diff --git "a/5401.jsonl" "b/5401.jsonl" new file mode 100644--- /dev/null +++ "b/5401.jsonl" @@ -0,0 +1,565 @@ +{"seq_id":"212918742","text":"import argparse\n\nfrom chainercv import transforms\nimport numpy as np\nimport tensorrt as trt\nfrom PIL import Image\nimport pycuda.driver as cuda\nimport pycuda.autoinit\n\nfrom local_lib.tensorrt import common\nfrom local_lib.utils.time import stop_watch\n\nTRT_LOGGER = trt.Logger(trt.Logger.INFO)\nlog_filename = \"tensorrt_vgg\"\n\n\ndef load_engine(engine_path):\n with open(engine_path, \"rb\") as f:\n with trt.Runtime(TRT_LOGGER) as runtime:\n return runtime.deserialize_cuda_engine(f.read())\n\n\n@stop_watch(log_filename)\ndef do_inference(context, bindings, inputs, outputs, stream, batch_size=1):\n [cuda.memcpy_htod(inp.device, inp.host) for inp in inputs]\n context.execute(\n batch_size=batch_size, bindings=bindings\n )\n [cuda.memcpy_dtoh(out.host, out.device) for out in outputs]\n return [out.host for out in outputs]\n\n\ndef transform_img(img, img_size):\n img = np.array(img, dtype=np.float32).transpose(2, 0, 1)\n img = transforms.resize(img, (img_size, img_size))\n img -= np.array((123, 117, 104)).reshape((-1, 1, 1))\n return img\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--engine_path', type=str, default=None)\n parser.add_argument('--img_path', type=str, default=None)\n parser.add_argument('--img_size', type=int, default=224)\n\n args = parser.parse_args()\n\n img = Image.open(args.img_path)\n img = transform_img(img, args.img_size)\n\n with load_engine(args.engine_path) as engine:\n with engine.create_execution_context() as context:\n inputs, outputs, bindings, stream = common.allocate_buffers(engine)\n inputs[0].host = img\n trt_outputs = do_inference(\n context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream\n )\n","sub_path":"src/tensorrt/infer_with_tensorrt.py","file_name":"infer_with_tensorrt.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"561012053","text":"# Setup\nimport pathlib\nfrom setuptools import setup\n\n\nHERE = pathlib.Path(__file__).parent\n\nREADME = (HERE / 'README.md').read_text()\n\nsetup(\n name='xlambda_helper',\n version='0.0.3',\n description='Helper library to handle warming requests from X-Lambda '\n '(more: https://bit.ly/xlambda).',\n long_description=README,\n long_description_content_type='text/markdown',\n url='https://github.com/dashbird/xlambda-helper-python/archive/0.0.3.tar.gz',\n author='Dashbird.io (Renato Byrro)',\n author_email='renato@dashbird.io',\n license='MIT',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n\n ],\n packages=[\n 'xlambda_helper',\n ],\n install_requires=[\n 'boto3',\n ],\n include_package_data=True,\n keywords=[\n 'x-lambda',\n 'xlambda',\n 'aws',\n 'aws lambda',\n 'cold start',\n 'warm',\n 'serverless',\n 'containers',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"376260696","text":"from cloudpathlib import CloudPath\n\n\ndef test_default_client_instantiation(rig):\n if not getattr(rig, \"is_custom_s3\", False):\n # Skip resetting the default client for custom S3 endpoint, but keep the other tests,\n # since they're still useful.\n rig.client_class._default_client = None\n\n # CloudPath dispatch\n p = CloudPath(f\"{rig.cloud_prefix}{rig.drive}/{rig.test_dir}/dir_0/file0_0.txt\")\n # Explicit class\n p2 = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n # Default client CloudPath constructor\n p3 = rig.client_class.get_default_client().CloudPath(\n f\"{rig.cloud_prefix}{rig.drive}/{rig.test_dir}/dir_0/file0_0.txt\"\n )\n # Default client path-class-name constructor\n p4 = getattr(rig.client_class.get_default_client(), rig.path_class.__name__)(\n f\"{rig.cloud_prefix}{rig.drive}/{rig.test_dir}/dir_0/file0_0.txt\"\n )\n\n # Check that paths are the same\n assert p == p2 == p3 == p4\n\n # Check that client is the same instance\n assert p.client is p2.client is p3.client is p4.client\n\n # Check the file content is the same\n assert p.read_bytes() == p2.read_bytes() == p3.read_bytes() == p4.read_bytes()\n\n # should be using same instance of client, so cache should be the same\n assert p._local == p2._local == p3._local == p4._local\n\n\ndef test_different_clients(rig):\n p = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n\n new_client = rig.client_class()\n p2 = new_client.CloudPath(f\"{rig.cloud_prefix}{rig.drive}/{rig.test_dir}/dir_0/file0_0.txt\")\n\n assert p.client is not p2.client\n assert p._local is not p2._local\n","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"512282449","text":"# import multiprocessing to avoid this bug\n# (http://bugs.python.org/issue15881#msg170215)\nimport multiprocessing\nimport re\nimport os\nfrom setuptools import setup, find_packages\n\nassert multiprocessing\nmodule_name = \"malcolm\"\n\n\ndef get_version():\n \"\"\"Extracts the version number from the version.py file.\n \"\"\"\n VERSION_FILE = os.path.join(module_name, 'version.py')\n txt = open(VERSION_FILE).read()\n mo = re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', txt, re.M)\n if mo:\n version = mo.group(1)\n bs_version = os.environ.get('MODULEVER', '0.0')\n assert bs_version == \"0.0\" or bs_version == version, \\\n \"Version {} specified by the build system doesn't match {} in \" \\\n \"version.py\".format(bs_version, version)\n return version\n else:\n raise RuntimeError('Unable to find version string in {0}.'\n .format(VERSION_FILE))\n\ninstall_requires = [\n # External\n \"enum34==1.1.6\",\n \"tornado>=5.1.1\",\n \"numpy==1.16.4\",\n \"ruamel.yaml==0.15.97\",\n \"h5py==2.9.0\",\n \"p4p==3.3.2\",\n \"pygelf==0.3.5\",\n \"plop==0.3.0\",\n \"typing==3.6.1\",\n # DLS developed\n \"annotypes==0.20\",\n \"cothread==2.16\",\n \"scanpointgenerator==2.3\",\n \"vdsgen==0.5.2\",\n ]\n\ntests_require = [\n 'mock>=2.0.0', 'nose>=1.3.0', 'coverage>=3.7.1', 'pytest>=3.10.1',\n 'pytest-cov>=2.6.1']\n\npackages = [x for x in find_packages() if x.startswith(\"malcolm\")]\nsetup(\n name=module_name,\n version=get_version(),\n description='Scanning in the middlelayer',\n long_description=open(\"README.rst\").read(),\n url='https://github.com/dls-controls/pymalcolm',\n author='Tom Cobb',\n author_email='tom.cobb@diamond.ac.uk',\n keywords='',\n packages=packages,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n license='APACHE',\n install_requires=install_requires,\n extras_require={\n 'websocket': ['tornado'],\n 'ca': ['cothread'],\n 'hdf5': ['h5py', 'vds-gen'],\n 'test': tests_require\n },\n include_package_data=True,\n package_data={'malcolm': ['modules/*/*/*.yaml', 'modules/*/*/*.svg']},\n data_files=[\n ('', ['README.rst', 'CHANGELOG.rst', 'LICENSE'])\n ],\n test_suite='nose.collector',\n tests_require=tests_require,\n zip_safe=False,\n entry_points={'console_scripts':\n [\"imalcolm = malcolm.imalcolm:main\"]\n },\n)\n","sub_path":"pypi_install_script/malcolm-4.1.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"489037303","text":"import unittest\nfrom public import base,read_xlsx\nfrom ddt import ddt,unpack,data\nfrom public.write_xls import *\nfrom public.base import md5_pwd\nimport json\nfrom public.Config import login_url\nimport requests\nimport os\nBASE_PATH = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]\n# print(BASE_PATH)#获取根目录\n# print(os.path.realpath(__file__)) #获取当前文件路径\n# print(os.path.dirname(os.path.realpath(__file__))) # 从当前文件路径中获取目录\n# print(os.path.basename(os.path.realpath(__file__))) #获取文件名\nEXCEL_PATH = os.path.join(BASE_PATH,'test_data','test_data.xls')\n#print(EXCEL_PATH)\ntest = read_xlsx.ExcelUtil(EXCEL_PATH, 'test').next()\nnewtable,newdata = ExcelUtil_write(EXCEL_PATH).new_tabe(0)\n\n\n# def myskip():\n# for i in range(len(test)):\n# yield (test[i]['Skip'])\n# skip = (test[i]['Skip'] for i in range(len(test)))\n\n\n@ddt\nclass InterfaceTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n data = {'username': 'huafeng', 'password': md5_pwd('000000')}\n rep = requests.post(login_url,data=data)\n cls.cookies_session = rep.cookies.get_dict()\n\n\n # @data(*test)\n # def setUp(self):\n # if self.data['Skip'] == \"True\":\n # b = True\n # else:\n # b = False\n # unittest.skipIf(b, \"跳过\")\n # print(1)\n\n # @data(*test)\n # def setUp(self,data):\n # data = {'username': 'huafeng', 'password': md5_pwd('000000')}\n # if data['is_login'] == \"True\":\n # rep = requests.post(login_url,data=data)\n # cookies_session = rep.cookies.get_dict()\n # else:\n # cookies_session = {}\n # print(cookies_session,22)\n\n\n @data(*test)\n def test_interface(self,data):\n '''接口测试'''\n url = base.get_url(data['endpoint'])\n #判断测试是否跳过\n print('idbegin'+ data['CaseId'] +'idend')\n print('smbegin'+ data['Describe'] + 'smend')\n if data['Skip'] == \"True\":\n self.skipTest(\"跳过测试\")\n if data['Params']:\n DataALL = eval(data['Params'])\n if data['MD5'] == \"True\":\n if data['MD5_parame']:\n if DataALL.get('data'):\n DataALL['data'][data['MD5_parame']] = md5_pwd(DataALL['data'][data['MD5_parame']])\n #print(DataALL['data'][data['MD5_parame']])\n if DataALL.get('json'):\n DataALL['json'][data['MD5_parame']] = md5_pwd(DataALL['json'][data['MD5_parame']])\n #print(DataALL['json'][data['MD5_parame']])\n if DataALL.get('params'):\n DataALL['params'][data['MD5_parame']] = md5_pwd(DataALL['params'][data['MD5_parame']])\n #print(DataALL['params'][data['MD5_parame']])\n else:\n DataALL = {}\n Method = data['RequestSend']\n if data['is_login'] == \"True\":\n cookies_session = InterfaceTest.cookies_session\n else:\n cookies_session = {}\n resp = base.get_response(url, Method,cookies_session, **DataALL)\n print(resp)\n\n #获取用例行,将响应结果写入表格中\n CaseId = data['CaseId']\n #print(CaseId)\n row = base.get_row(test,CaseId)\n #print(row)\n newtable.write(row, 7, str(resp))\n\n Expectedresult = eval(data['ExpectResult'])\n self.a=base.dict_bijiao(Expectedresult,resp)\n self.assertEqual(self.a,True)\n\n\n @classmethod\n def tearDownClass(cls):\n newdata.save(EXCEL_PATH)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test_case/test1_test.py","file_name":"test1_test.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"517975425","text":"from homepage import models as hmod\nfrom . import templater\n\ndef process_request(request):\n '''Shows the list of questions'''\n \n category = hmod.Category.objects.get(id=request.urlparams[0])\n type = hmod.Type.objects.filter(category=category)\n\n template_vars = {\n 'category': category,\n 'type': type\n }\n return templater.render_to_response(request, 'categories.html', template_vars)","sub_path":"homepage/views/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"71438848","text":"__author__ = 'imalkov'\n\n\ndef prepare_to_parse(path):\n res = []\n with open(path) as file:\n lines = file.readlines()\n #remove empty lines and full file comments\n # res = [(line.lstrip())[:-1] for line in lines]\n res = [(line.lstrip()).replace('\\n','') for line in lines]\n res = filter(lambda line: False if (len(line) == 0 or line[0] in ['$', '\\n']) else True, res)\n res = [line.split('$')[0].strip() for line in res]\n return res\n\n# print(prepare_to_parse('/home/imalkov/Dropbox/M.s/Research/DATA/SESSION_TREE/NODE02/Session2I/input/topo_parameters.txt'))","sub_path":"modeltools/modelinput/pecutils.py","file_name":"pecutils.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"392939112","text":"from __future__ import division\nimport discord\nimport asyncio\nimport random\nimport json\nimport urbandictionary as ud\nimport string\nimport win_unicode_console\n\nwin_unicode_console.enable()\n\nfrom bs4 import BeautifulSoup\nfrom discord.ext import commands\nfrom utils.calcparser import NumericStringParserForPython3\n\n\nclass Utilities:\n # Init with the bot reference, and a reference to the settings var\n def __init__(self, bot):\n self.bot = bot\n self.nsp = NumericStringParserForPython3()\n textmoji_strs = '🅰🅱🇨🇩🇪🇫🇬🇭🇮🇯🇰🇱🇲🇳🅾🅿🇶🇷🇸🇹🇺🇻🇼🇽🇾🇿'\n self.textmoji_table = dict((ord(char), trans) for char, trans in zip(string.ascii_lowercase, textmoji_strs))\n dev_server = discord.utils.find(lambda s: s.id == 365893884053553162, bot.guilds)\n self.feedback_channel = dev_server.get_channel(365893884053553162)\n self.guessed_wrong = [\n 'Not even close, the right number was:',\n 'Better luck next time, the number was:',\n 'How could you not have known that the number was:',\n 'Hmm, well, the right number was:',\n 'Not getting any better, the number was:',\n 'Right number was:'\n ]\n self.guessed_right = [\n 'You guessed correctly',\n 'Everyone knew you could do it',\n 'You got the right answer',\n 'History will remember you...'\n ]\n self.RPSLS = {'rock': \"\\N{RAISED FIST} **Rock!**\",\n 'paper': \"\\N{RAISED HAND WITH FINGERS SPLAYED} **Paper!**\",\n 'scissors': \"\\N{BLACK SCISSORS} **Scissors!**\",\n 'lizard': \"\\N{LIZARD} **Lizard!**\",\n 'spock': \"\\N{RAISED HAND WITH PART BETWEEN MIDDLE AND RING FINGERS} **Spock!**\"}\n\n @commands.command(aliases=['yt', 'vid'])\n async def video(self, ctx, *, search):\n \"\"\" Search for the first videos match on YouTube \"\"\"\n\n with await ctx.channel.typing():\n search = search.replace(' ', '+').lower()\n\n with await self.bot.session.get(f\"https://www.youtube.com/results?search_query={search}\") as resp:\n response = await resp.text()\n\n result = BeautifulSoup(response, \"lxml\")\n dir_address = f\"{result.find_all(attrs={'class': 'yt-uix-tile-link'})[0].get('href')}\"\n output = f\"**Top Result:**\\nhttps://www.youtube.com{dir_address}\"\n\n if not dir_address:\n return await ctx.errer(\"No results found!\")\n\n await ctx.send(output)\n\n @commands.command(aliases=['rock', 'paper', 'scissors', 'lizard', 'spock', 'rps'], no_pm=True)\n async def settle(self, ctx, opt: str = None):\n \"\"\" Play rock paper scissors, lizard spock\n\n Scissors cut paper, paper covers rock,\n rock crushes lizard, lizard poisons Spock,\n Spock smashes scissors, scissors decapitate lizard,\n lizard eats paper, paper disproves Spock,\n Spock vaporizes rock and, as it’s always been,\n rock crushes scissors.\n \"\"\"\n if opt is None:\n return await ctx.error(\"Please select one of rock, paper, scissors, lizard and spock\")\n player_choice = self.RPSLS[opt]\n a = ctx.message.author.display_name\n b = self.bot.user.name\n\n available = self.RPSLS['rock'], self.RPSLS['paper'], self.RPSLS['scissors'], self.RPSLS['lizard'], self.RPSLS['spock']\n bot_choice = random.choice(available)\n\n # I know, this is a mess, but trust me, it works correctly\n # each item wins or loses depending on the random choice\n # against the user's input.\n # Fun fact, this cond took me 15 min to write, and after that I slept\n # 18 hours straight!\n cond = {\n (self.RPSLS['rock'], self.RPSLS['paper']): False,\n (self.RPSLS['rock'], self.RPSLS['scissors']): True,\n (self.RPSLS['rock'], self.RPSLS['lizard']): True,\n (self.RPSLS['rock'], self.RPSLS['spock']): False,\n (self.RPSLS['paper'], self.RPSLS['rock']): True,\n (self.RPSLS['paper'], self.RPSLS['scissors']): False,\n (self.RPSLS['paper'], self.RPSLS['lizard']): False,\n (self.RPSLS['paper'], self.RPSLS['spock']): True,\n (self.RPSLS['scissors'], self.RPSLS['rock']): False,\n (self.RPSLS['scissors'], self.RPSLS['paper']): True,\n (self.RPSLS['scissors'], self.RPSLS['lizard']): True,\n (self.RPSLS['scissors'], self.RPSLS['spock']): False,\n (self.RPSLS['lizard'], self.RPSLS['rock']): False,\n (self.RPSLS['lizard'], self.RPSLS['paper']): True,\n (self.RPSLS['lizard'], self.RPSLS['scissors']): False,\n (self.RPSLS['lizard'], self.RPSLS['spock']): True,\n (self.RPSLS['spock'], self.RPSLS['rock']): True,\n (self.RPSLS['spock'], self.RPSLS['paper']): False,\n (self.RPSLS['spock'], self.RPSLS['scissors']): True,\n (self.RPSLS['spock'], self.RPSLS['lizard']): False\n }\n\n e = discord.Embed(colour=self.bot.user_color)\n e.add_field(name=f\"{a}'s choice:\", value=f'{player_choice.value}', inline=True)\n e.add_field(name=f\"{b}'s choice:\", value=f'{bot_choice.value}', inline=True)\n\n if bot_choice == player_choice:\n outcome = None\n else:\n outcome = cond[(player_choice, bot_choice)]\n\n if outcome:\n e.set_footer(text=f\"{a} wins, {b} loses...\")\n elif not outcome:\n e.set_footer(text=f\"{b} wins! {a} loses...\")\n else:\n e.set_footer(text=\"We're square\")\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.command(aliases=['post_channel'], no_pm=True)\n async def post(self, ctx, channel: discord.TextChannel, *, message: str = None):\n \"\"\" Send a message to any channel in Guild\n Usage: post #general Hello world! \"\"\"\n\n if channel is None:\n return await ctx.error('Use a channel ID or name to send a message from here.')\n\n if message is None:\n return await ctx.error(\"Please provide a message to send to the provided channel\")\n\n try:\n await channel.send(message)\n await ctx.channel.send('Success!')\n except discord.Forbidden:\n return await ctx.error('The bot does Not have enough permissions to send messages in that channel.')\n\n @commands.command()\n async def textmojify(self, ctx, *, msg):\n \"\"\" Convert text into emojis \"\"\"\n\n if msg is not None:\n text = msg.lower().translate(self.textmoji_table)\n await ctx.send(text)\n else:\n return await ctx.error('Please provide something to TextMojify.')\n\n '''@commands.command(description='To use the webapp go to http://eeemo.net/')\n async def zalgo(self, ctx, *, message: str=None):\n \"\"\"Fuck up text\n\n BROKEN!!! I'll fix it soon™\n \"\"\"\n try:\n await ctx.channel.trigger_typing()\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n\n user = ctx.message.author.display_name\n if message != None:\n words = message.split()\n try:\n iterations = int(words[len(words) - 1])\n words = words[:-1]\n except Exception:\n iterations = 1\n\n if iterations > 100:\n iterations = 100\n if iterations < 1:\n iterations = 1\n\n zalgo = \" \".join(words)\n for i in range(iterations):\n if len(zalgo) > 2000:\n break\n zalgo = self._zalgo(zalgo)\n\n zalgo = zalgo[:2000]\n e = discord.Embed(colour=discord.Colour(0xed791d))\n e.set_author(name=user, icon_url=ctx.message.author.avatar_url)\n e.description = zalgo\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n await ctx.send('Unable to send embeds here!')\n else:\n await ctx.send(f'Usage: `{ctx.prefix}zalgo [your text]`', delete_after=5)'''\n\n @commands.command()\n async def esrever(self, ctx, *, msg: str = None):\n \"\"\" Write backwards because reasons, in Embed \"\"\"\n\n e = discord.Embed(colour=self.bot.user_color)\n\n if msg is None:\n return await ctx.error('Write a message after command!')\n else:\n e.description = f'`{msg.lower()[::-1]}` \\N{LEFTWARDS BLACK ARROW}'\n\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.command(aliases=['thisis'])\n async def thisistisis(self, ctx, *, text):\n \"\"\" Secret language for initiates only. Not! \"\"\"\n\n sis = text.replace('a', 'i').replace('A', 'I').replace('e', 'i').replace('E', 'I') \\\n .replace('o', 'i').replace('O', 'I').replace('u', 'i').replace('U', 'I')\n\n e = discord.Embed(colour=self.bot.user_color)\n e.add_field(name=f'~~*{text}*~~', value=f'```{sis}```')\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.command(aliases=['tu'])\n async def tinyurl(self, ctx, *, link: str = None):\n \"\"\"Shorten URLs\"\"\"\n\n if link is None:\n return await ctx.error('Please provide a link to shorten!')\n\n api_tiny = 'http://tinyurl.com/api-create.php?url='\n\n async with self.bot.session.get(api_tiny + link) as tiny:\n shortenurl = await tiny.read()\n\n shortenurl = shortenurl.decode(\"utf-8\")\n\n emb = discord.Embed(colour=self.bot.user_color)\n emb.add_field(name=\"\\U0001f30d Original Link\",\n value=link, inline=False)\n emb.add_field(name=\"\\U0001f517 Shortened Link\",\n value=shortenurl, inline=False)\n\n await ctx.send(embed=emb)\n\n @commands.command(aliases=['calc', 'maths'])\n async def calculate(self, ctx, *, formula=None):\n \"\"\"Python calculator command\n Usage: Add: 2+3, Sub: 2-3, Mul: 2*3, Div: 2/3, Exp: 2^3,\n Pi: PI, E: e, Sin: sin, Cos: cos, Tan: tan, Abs: abs,\n Tru: trunc, Rou: round, Sgn: sgn\n\n This command uses: Paul McGuire's fourFn.py.\"\"\"\n u = ctx.message.author.display_name\n\n if formula is None:\n # How can it calculate an empty message? Reee!\n msg = f'\\u200BUsage: `{ctx.prefix}{ctx.invoked_with} [any maths formula]`'\n e = discord.Embed(colour=self.bot.user_color)\n e.description = f'{msg}'\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n # Okay, so here it tries to solve the problem, so far it has received\n # some numbers and operators, it will try to parse the input into intelligeble\n # formulas and solve them...\n try:\n answer = self.nsp.eval(formula)\n\n except:\n # If there's a problem with the input, shows examples instead of hanging up\n e = discord.Embed(colour=self.bot.user_color)\n e.description = f'\\u200B\\N{THINKING FACE} wrong `{formula}` input {u}!.\\n' \\\n f'Available operations:\\n\\n' \\\n f'**Add:** `2+3`, **Sub:** `2-3`, **Mul:** `2*3`, **Div:** `2/3`,\\n' \\\n f'**Exp:** `2^3`, **Pi:** `PI`, **E:** `e`,\\n' \\\n f'**Sin:** `sin()`, **Cos:** `cos()`, **Tan:** `tan()`, **Abs:** `abs()`,\\n' \\\n f'**Tru:** `trunc()`, **Rou:** `round()`, **Sgn:** `sgn()`,\\n' \\\n f'**Int:** `0 to 9`'\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n # If we made it here, then the correct input prints correct answer\n # everything else was just to make sure the input was somewhat logical\n e = discord.Embed(colour=self.bot.user_color)\n e.add_field(name='Input:', value=f'```{formula}```', inline=True)\n e.add_field(name='Result:', value=f'```{round(answer, 2)}```', inline=True)\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.group(invoke_without_command=True, aliases=['ud', 'urbandict'])\n async def urban(self, ctx, *, query: str):\n \"\"\" Check UrbanDictionary for the meaning of a word \"\"\"\n try:\n resultlst = await self.bot.loop.run_in_executor(None, ud.define, query)\n item = resultlst[0]\n except IndexError:\n return await ctx.error(f'Unable to find definition for `{query}`.')\n\n em = discord.Embed(color=self.bot.user_color)\n em.set_author(name=\"\\U0001f4d6 Urban Dictionary\")\n em.add_field(name=\"Word\", value=item.word, inline=False)\n em.add_field(name=\"Definition\", value=item.definition, inline=False)\n em.add_field(name=\"Example(s)\", value=item.example, inline=False)\n\n await ctx.send(embed=em)\n\n @urban.command(aliases=['-s'])\n async def search(self, ctx, *, query: str):\n \"\"\" Search UrbanDictoinary for a Specific Word \"\"\"\n\n resultlst = await self.bot.loop.run_in_executor(None, ud.define, query)\n\n msg = []\n for number, option in enumerate(resultlst[:4]):\n msg.append(f\"{number + 1}. {option.word}\\n \" \n f\"{option.definition[:57]+'...' if len(option.definition)>65 else option.definition}\")\n send_msg = '\\n'.join(msg)\n em = discord.Embed(title=\"Results\", description=send_msg, color=self.bot.user_color)\n em.set_footer(text=\"Type 'exit' to leave the menu.\")\n menumsg = await ctx.send(embed=em)\n\n def check(m):\n return m.author == ctx.message.author and m.channel == ctx.message.channel and m.content.isdigit()\n response = await self.bot.wait_for('message', check=check)\n\n try:\n if response.content.lower() == 'exit':\n await response.delete()\n await menumsg.delete()\n return\n else:\n await response.delete()\n await menumsg.delete()\n item = resultlst[int(response.content) - 1]\n except IndexError:\n return await ctx.error('Invalid option!')\n\n em = discord.Embed(color=self.bot.user_color)\n em.set_author(name=\"\\U0001f4d6 Urban Dictionary\")\n em.add_field(name=\"Word\", value=item.word)\n em.add_field(name=\"Definition\", value=item.definition)\n em.add_field(name=\"Example(s)\", value=item.example)\n await ctx.send(embed=em)\n\n @urban.command(aliases=['-r'])\n async def random(self, ctx):\n \"\"\" Get a Random Word and its Meaning from UrbanDictionary \"\"\"\n item = await self.bot.loop.run_in_executor(None, ud.random)\n\n em = discord.Embed(color=self.bot.user_color)\n em.set_author(name=\"\\U0001f4d6 Urban Dictionary\")\n em.add_field(name=\"Word\", value=item[0].word)\n em.add_field(name=\"Definition\", value=item[0].definition)\n em.add_field(name=\"Example(s)\", value=item[0].example)\n\n await ctx.send(embed=em)\n\n @commands.command(aliases=['suggestion'])\n async def feedback(self, ctx, *, text: str = 'Sorry, forgot to write.'):\n \"\"\"Suggestions and feature requests\"\"\"\n\n user = ctx.message.author\n c = ctx.invoked_with\n\n e = discord.Embed(colour=self.bot.user_color)\n e.title = 'Feedback'\n e.set_author(name=f'{user.name}{user.discriminator}', icon_url=user.avatar_url)\n e.description = f'{text}'\n\n try:\n await self.feedback_channel.send(embed=e)\n await ctx.send(f'Feedback received successfully. Thank you!')\n except discord.Forbidden:\n return await ctx.error('Unable to send feedback, please join Support Server\\nhttps://discord.gg/9qgzkQV')\n\n @commands.command()\n async def guess(self, ctx, number: int = None):\n \"\"\" Guess a number between 1 and 11 \"\"\"\n answer = random.randint(1, 11)\n u = ctx.message.author.display_name\n\n e = discord.Embed(colour=self.bot.user_color)\n if number is None:\n return await ctx.send('Guess a number between 1 and 11')\n\n if number < answer or number > answer:\n q_mark = '\\N{BLACK QUESTION MARK ORNAMENT}'\n e.add_field(name=f'{q_mark} Your choice {u}: `{number}`',\n value=f'```{random.choice(self.guessed_wrong)} {answer}```', inline=True)\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n if number == answer:\n q_mark = '\\N{BLACK QUESTION MARK ORNAMENT}'\n\n e.add_field(name=f'{q_mark} Correct number: `{answer}`',\n value=f'```{random.choice(self.guessed_right)} {u}!```', inline=True)\n try:\n await ctx.send(embed=e)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.command(aliases=['prediction', 'crystalball', 'oracle', 'i-ching', 'fortune'])\n async def iching(self, ctx, *, member: discord.Member = None):\n \"\"\"Based on the ancient I Ching oracle,\n `Usage: command [member]>`\n use it as a guide\"\"\"\n\n ich = await ctx.send('Tossing 3 antique coins for your result.')\n await asyncio.sleep(2)\n\n with open('data/oracle.json') as f:\n choices = json.load(f)\n\n iching = 'http://i.imgur.com/biEvXBN.png'\n m = member or ctx.author\n p = ctx.invoked_with.title()\n\n e = discord.Embed(colour=self.bot.user_color)\n e.set_thumbnail(url=iching)\n e.set_footer(text=\"+-<§) Ancient Oracle's wisdom interpreted for now (§>-+\")\n e.set_author(name=f\"{p}'s inspiration for: {m.display_name} | {m}\", icon_url=ctx.message.author.avatar_url)\n e.description = f'Meditation:\\n{random.choice(choices)}'\n\n try:\n await ich.edit(embed=e, content=None)\n except discord.HTTPException:\n return await ctx.error('Unable to send embeds here!')\n\n @commands.command()\n async def clap(self, ctx, *, msg):\n \"\"\" Clap that message! \"\"\"\n if msg is not None:\n text = msg.lower().replace(' ', ' :clap: ')\n await ctx.send(text)\n else:\n return await ctx.error(':clap:')\n\n\ndef setup(bot):\n bot.add_cog(Utilities(bot))\n","sub_path":"cogs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"514302989","text":"# Kevin Trinh\n# Programming Paradigms\n# Webservice Primer\n# 03/03/2017\n\nimport requests\nimport json\n\nclass _webservice_primer:\n\n def __init__(self):\n self.SITE_URL = 'http://ash.campus.nd.edu:40001'\n self.MOVIE_URL = self.SITE_URL + '/movies/'\n self.RESET_URL = self.SITE_URL + '/reset/'\n\n def get_movie(self, movie_id):\n req = requests.get(self.MOVIE_URL + str(movie_id))\n movie_info = json.loads(req.content.decode('utf-8'))\n return movie_info\n\n def set_movie_title(self, movie_id, title):\n movie_info = self.get_movie(movie_id)\n movie_info['title'] = title\n movie_info = json.dumps(movie_info)\n req = requests.put(self.MOVIE_URL + str(movie_id), data = movie_info)\n result = json.loads(req.content.decode('utf-8'))\n return result\n\n def delete_movie(self, movie_id):\n req = requests.delete(self.MOVIE_URL + str(movie_id))\n result = json.loads(req.content.decode('utf-8'))\n return result\n \n def reset_movie(self, movie_id):\n req = requests.put(self.RESET_URL + str(movie_id))\n result = json.loads(req.content.decode('utf-8'))\n return result\n\nif __name__ == \"__main__\":\n MID = 110\n ws = _webservice_primer()\n\n movie = ws.get_movie(MID)\n if movie['result'] == 'success':\n print(\"Title:\\t%s\" % movie['title'])\n else:\n print(\"Error:\\t%s\" % movie['message'])\n\n\n","sub_path":"webservice_primer/_webservice_primer.py","file_name":"_webservice_primer.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"653860793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 9 11:04:16 2018\n\n@author: rossspencer\n\"\"\"\n\n'=============== Imports =============='\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport train as trainingFunctions\n\n'a *function* that will run your testing code on an input data set X. Your test.py code should already be trained'\ndef findAllRedCarsTest(Y, X, x1, x2, y1, y2, stepsize):\n clf_trained = trainingFunctions.train(X)\n coords = []\n testingArr = []\n for i in range(x1, x2, stepsize):\n for j in range(y1, y2, stepsize):\n coords.append([i,j])\n testingArr.append(Y[i, j])\n \n predictions = clf_trained.predict(testingArr)\n \n##### this code is used to generate plots of the predicted red cars against the actual test image:\n plt.imshow(Y)\n for i in range(len(predictions)):\n if predictions[i] == 2:\n plt.plot([coords[i][0]], [coords[i][1]],'o')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.show()\n \n red_cars = []\n for i in range(len(predictions)):\n if predictions[i] == 2:\n red_cars.append(coords[i])\n return red_cars\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"240520053","text":"#Time complexity - O(max(s,t)) where s is length of string S and t is length of string T\r\n#Space complexity - O(t)\r\n#Works on leetcode - yes\r\n#Approach - First we save the frequency of letters of T and then iterate through S. If any letter from S is in the dictionary, we add it with \r\n#its frequency to result and delete that letter from dictionary. If dictionary is not empty yet, we add the rest of letters to the result\r\nclass Solution:\r\n def customSortString(self, S: str, T: str) -> str:\r\n count = collections.Counter(T)\r\n res = []\r\n for i in S:\r\n if i in count:\r\n res.extend([i]*count[i])\r\n del count[i]\r\n for k,v in count.items():\r\n res.extend(k*v)\r\n return ''.join(res)","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"550320056","text":"import xml.etree.ElementTree as ET\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sqlite3\r\nimport tkinter as tk\r\nfrom tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)\r\n\r\n# class _setit:\r\n# \"\"\"Internal class. It wraps the command in the widget OptionMenu.\"\"\"\r\n# def __init__(self, var, value, callback=None):\r\n# self.__value = value\r\n# self.__var = var\r\n# self.__callback = callback\r\n# def __call__(self, *args):\r\n# self.__var.set(self.__value)\r\n# if self.__callback:\r\n# self.__callback(self.__value, *args)\r\n\r\nclass MainGUI:\r\n def __init__(self, sql_database_name, sql_table_name):\r\n # self.master = master + in übergabe (self, master)\r\n #conn = sqlite3.connect(sql_database_name + '.db')\r\n conn = sqlite3.connect(sql_database_name)\r\n df = pd.read_sql_query(\"SELECT *, oid FROM \" + sql_table_name, conn)\r\n\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT *, oid FROM \" + sql_table_name)\r\n self.db_records = cursor.fetchall()\r\n\r\n self.db_records_listing = []\r\n for self.db_record in self.db_records:\r\n self.db_records_listing.append(len(self.db_records))\r\n\r\n print(\"Anzahl Einträge: \" + str(len(self.db_records_listing)))\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n win = tk.Tk()\r\n win.title('Datenbank - Anzahl der Einträge: ' + str(len(self.db_records_listing)))\r\n\r\n # scrollable Frame\r\n self.sf_database = ScrolledFrame(win, width=500, height=500)\r\n self.sf_database.pack(expand=1, fill=\"both\")\r\n\r\n # Create a frame within the ScrolledFrame\r\n self.db_inner_frame = self.sf_database.display_widget(Frame)\r\n\r\n #win.resizable(width=0, height=0)\r\n self.tree = ttk.Treeview(self.db_inner_frame, selectmode=\"browse\", height=30)\r\n self.tree.pack(fill=\"both\", expand = 1)\r\n #self.tree.pack(side='left')\r\n\r\n #vsb = ttk.Scrollbar(win, orient=\"vertical\", command=self.tree.yview)\r\n #vsb.pack(side='right', fill='y')\r\n\r\n #self.tree.configure(yscrollcommand=vsb.set)\r\n\r\n hsb = ttk.Scrollbar(win, orient=\"horizontal\", command=self.tree.xview)\r\n hsb.pack(side='bottom', fill='x')\r\n\r\n\r\n self.tree['show'] = 'headings'\r\n self.tree[\"columns\"] = df.columns.values.tolist()\r\n for i, header in enumerate(df.columns.values.tolist()):\r\n self.tree.column(i, width=100)\r\n self.tree.heading(i, text=header)\r\n\r\n\r\n for row in df.iterrows():\r\n self.tree.insert(\"\", 'end', values=list(row[1]))\r\n\r\n # self.tree.bind(\"\", self.preClick)\r\n # self.tree.bind(\"\", self.onLeft)\r\n\r\n #self.tree[\"displaycolumns\"] = df.columns.values.tolist()[0:(len(self.db_records_listing)-7)]\r\n\r\n # Alle Spalten-Einträge in Datenbank Übersicht zeigen\r\n self.tree[\"displaycolumns\"] = df.columns.values.tolist()\r\n\r\n\r\n # def add_to_header(val):\r\n # new = list(self.tree[\"displaycolumns\"])\r\n # new.append(val)\r\n # self.tree[\"displaycolumns\"] = new\r\n # self.update_option_menu()\r\n #\r\n #\r\n # def rem_from_header(val):\r\n # print(val)\r\n # new = list(self.tree[\"displaycolumns\"])\r\n # new.remove(val)\r\n # self.tree[\"displaycolumns\"] = new\r\n # self.update_option_menu()\r\n #\r\n # add_var = StringVar(app)\r\n # add_var.set(self.tree[\"columns\"])\r\n # add = OptionMenu(win, add_var, *self.tree[\"columns\"], command=add_to_header)\r\n #\r\n # self.rem_var = StringVar(app)\r\n # self.rem_var.set(self.tree[\"displaycolumns\"])\r\n # self.rem = OptionMenu(win, self.rem_var, *self.tree[\"displaycolumns\"], command=rem_from_header)\r\n # Label(win, text=\"Spalte ausblenden:\", font=(\"bold\", 11)).pack()\r\n # add.pack(side=\"top\")\r\n # Label(win, text=\"Spalte einblenden:\", font=(\"bold\", 11)).pack()\r\n # self.rem.pack(side=\"top\")\r\n\r\n\r\n # def update_option_menu(self):\r\n # def rem_from_header(val):\r\n # print(val)\r\n # new = list(self.tree[\"displaycolumns\"])\r\n # new.remove(val)\r\n # self.tree[\"displaycolumns\"] = new\r\n # self.update_option_menu()\r\n # menu = self.rem[\"menu\"]\r\n # menu.delete(0, \"end\")\r\n # for string in self.tree[\"displaycolumns\"]:\r\n # menu.add_command(label=string, command=_setit(self.rem_var, string, rem_from_header))\r\n\r\n\r\n # def onRight(self):\r\n # cursorx = int(self.master.winfo_pointerx() - self.master.winfo_rootx())\r\n # cursory = int(self.master.winfo_pointery() - self.master.winfo_rooty())\r\n # self.menu = Canvas(self.win, width=150, height=40, highlightthickness=1, highlightbackground=\"black\")\r\n # self.menu.place(x=cursorx, y=cursory)\r\n # self.menu.pack_propagate(0)\r\n # delLabel = Label(self.menu, text=\" Markierte Zeile löschen\", cursor=\"hand2\", anchor=\"w\")\r\n # delLabel.pack(side=\"top\", padx=1, pady=1, fill=\"x\")\r\n # loadLabel = Label(self.menu, text=\" Markierte Frage in Tool laden\", cursor=\"hand2\", anchor=\"w\")\r\n # loadLabel.pack(side=\"top\", padx=1, pady=1, fill=\"x\")\r\n #\r\n #\r\n # def destroy():\r\n # self.menu.place_forget()\r\n #\r\n # def delete(*args):\r\n # selection = self.tree.selection()\r\n # self.tree.delete(selection)\r\n # destroy()\r\n #\r\n # delLabel.bind(\"\", delete)\r\n #\r\n # def preClick(self, *args):\r\n # try:\r\n # self.menu.place_forget()\r\n # self.onRight()\r\n # except AttributeError:\r\n # self.onRight()\r\n #\r\n # def onLeft(self, *args):\r\n # self.curItem = self.tree.focus()\r\n # print(self.tree.item(self.curItem)[\"values\"][-1])\r\n # try:\r\n # self.menu.place_forget()\r\n # except AttributeError:\r\n # pass\r\n #\r\n # def selectItem(self, a):\r\n # curItem = self.tree.focus()\r\n # print(self.tree.item(curItem)[\"values\"][-1])\r\n\r\n\r\n\r\n","sub_path":"Test_Generator_Module/test_generator_modul_datenbanken_anzeigen.py","file_name":"test_generator_modul_datenbanken_anzeigen.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187011391","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@File : formatters.py\n@Time : 2019/7/1 21:57\n@Author : hrx\n@Email : hurx1116@gmail.com\n\"\"\"\nimport json\nimport logging\nfrom socket import gethostname\n\nfrom json.encoder import JSONEncoder\n\n\n\nclass JSONFormatter(logging.Formatter):\n \"\"\"\n 将日志记录格式化为JSON\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n include = kwargs.pop('include', None)\n exclude = kwargs.pop('exclude', None)\n super(JSONFormatter, self).__init__(*args, **kwargs)\n self.include = include\n self.exclude = exclude\n\n def format(self, record):\n data = record.__dict__.copy()\n\n if record.args:\n msg = record.msg % record.args\n else:\n msg = record.msg\n\n data.update(\n host=gethostname(),\n msg=msg,\n args=tuple(str(arg) for arg in record.args)\n )\n\n if 'exc_info' in data and data['exc_info']:\n data['exc_info'] = self.formatException(data['exc_info'])\n\n if self.include:\n data = {f: data[f] for f in self.include}\n elif self.exclude:\n for f in self.exclude:\n if f in data:\n del data[f]\n\n return json.dumps(data, cls=JSONEncoder)\n","sub_path":"log_rabbitmq/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"110082747","text":"import sys\nimport os\nimport time\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\n\n\n\ndef predict_image(model, images):\n prediction = model.predict(images)\n category = np.argmax(prediction)\n\n if category == 0:\n result = 'St.Pauls'\n elif category == 1:\n result = 'Marina Bay Sands'\n elif category == 2:\n result = 'Osaka Castle'\n elif category == 3:\n result = 'Statue Of Liberty'\n\n return result\n\n# print(os.path.join(os.path.realpath('.'), 'src', 'main', 'resources', 'static', 'uploaded' , sys.argv[1], sys.argv[2]))\n\n# ./webapps/ROOT/WEB-INF/classes/com/triplive/pythonconnect/CNN_model.py\n# model = load_model(os.path.join(os.path.realpath('.'), 'triplive', 'src', 'main', 'java', 'com', 'triplive', 'pythonconnect' , 'trainedModelForLandMark.h5'))\nmodel = load_model(os.path.join(os.path.realpath('.'), 'webapps', 'ROOT', 'WEB-INF', 'classes', 'com', 'triplive', 'pythonconnect' , 'trainedModelForLandMark.h5'))\n# model = load_model(os.path.join(os.path.realpath('.'), 'src', 'main', 'java', 'com', 'triplive', 'pythonconnect', 'trainedModelForLandMark.h5'))\n\n# img = cv2.imread(os.path.join(os.path.realpath('.'), 'triplive', 'src', 'main', 'resources', 'META-INF', 'resources', 'uploaded' , sys.argv[1], sys.argv[2]))\nimg = cv2.imread(os.path.join(os.path.realpath('.'), 'webapps', 'ROOT', 'WEB-INF', 'classes', 'static', 'uploaded' , sys.argv[1], sys.argv[2]))\n# img = cv2.imread(os.path.join(os.path.realpath('.'), 'src', 'main', 'resources', 'static', 'uploaded' , sys.argv[1], sys.argv[2]))\n\n\n# print(img)\n# img = cv2.imread(\"1.jpg\")\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = cv2.resize(img, dsize=(224, 224))\nimg = np.expand_dims(img, axis=0)\nimg = tf.image.convert_image_dtype(img, tf.float32)\nresult = predict_image(model, img)\n\nprint(result)\n","sub_path":"triplive/src/main/java/com/triplive/pythonconnect/CNN_model.py","file_name":"CNN_model.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"545286790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018-07-25\n\nThis module is supposed to hold configuration options that are used throughout the app, but it is also currently\n used to define and share test data across the app.\n\"\"\"\n \nimport os\nimport glob\nimport hashlib\nimport bz2\nfrom bokeh.models import ColumnDataSource\nfrom sqlalchemy import create_engine\n\n# Database connection using SQLAlchemy (DB abstraction)\n# http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlite\n# \"from config import engine\" to use it in the program\nengine = create_engine('sqlite:///anagreen.sqlite')\n\n#Variable contening the root path of the projet, useful for the upload method of the map\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n ###################### BEGINNING OF CLASS ##################################\n\nclass CaseConfig:\n\n # will contain flux from DB\n data_flux = {\n \"id\": [],\n \"name\": [],\n \"hotcold\": [],\n \"color\": [],\n \"media\": [],\n \"posX\": [],\n \"posY\": [],\n \"posXend\": [],\n \"posYend\": [],\n \"fclass\": [],\n \"active\": [],\n \"ID_Study_Case\":[]\n }\n\n data_notouch_zones = {\n \"id\": [],\n \"x\": [],\n \"y\": [],\n \"w\": [],\n \"h\": [],\n \"name\": [],\n \"color\": [],\n \"ID_Study_Case\":[]\n }\n\n \n def __init__(self):\n self.source_notouch_zones = None\n self.souce_flux = None\n self.case_id = 0\n self.case_name = \"\"\n self.case_company = \"\"\n self.path_to_map = \"\"\n \n \n def setValues(self, id_study_case):\n self.case_id = id_study_case\n study_cases = { #array that will be displayed as a table in index\n \"ID_Study_Case\":[],\n \"Name\": [],\n \"Path_to_plan\":[],\n \"company_name\": []\n }\n query = \"SELECT ID_Study_Case, Name, Path_to_plan, company_name FROM study_case WHERE ID_Study_Case = \" +str(id_study_case)\n result = engine.execute(query)\n for row in result:\n for (key, val) in row.items():\n study_cases[key].append(val)\n \n for step in range(len(study_cases[\"ID_Study_Case\"])):\n self.case_name = study_cases[\"Name\"][step]\n self.path_to_map = study_cases[\"Path_to_plan\"][step]\n self.case_company = study_cases[\"company_name\"][step]\n \n def select_flux(self, id_study_case): #search flux and fills the DataSource for bokeh map\n query=\"SELECT id, name , hotcold, color, media, posX, posY, posXend, posYend, fclass, active, ID_Study_Case FROM flux WHERE ID_Study_Case =\"+str(id_study_case)+\" ORDER BY name COLLATE NOCASE\"\n result = engine.execute(query)\n for row in result:\n for (key, val) in row.items():\n self.data_flux[key].append(val)\n self.souce_flux = ColumnDataSource(self.data_flux, name=\"name_souce_flux\")\n\n def select_notouch_zones(self, id_study_case): #search notouch zones and fills the DataSource for bokeh map\n query=\"SELECT id, x, y, w, h, name, color, ID_Study_Case FROM notouch_zone WHERE ID_Study_Case =\"+str(id_study_case)+\" ORDER BY name COLLATE NOCASE\"\n result = engine.execute(query)\n for row in result:\n for (key, val) in row.items():\n self.data_notouch_zones[key].append(val)\n\n self.source_notouch_zones = ColumnDataSource(self.data_notouch_zones, name=\"name_souce_notouchezone\")\n\n def generate_ColumnDataSource_flux(self, title):\n return ColumnDataSource(self.data_flux, name=\"bokeh_souce_flux_\"+title)\n \n\n #def get_data_source_flux_end(self):\n #return ColumnDataSource(self.data_flux)\n\n\n\n \n ############################ END OF CLASS ####################################\n \n \n \n# minimum size for zones \nMIN_ZONE_SIZE_W = 0.5\nMIN_ZONE_SIZE_H = 0.5\n \n\"\"\"Parameter names to be used as string keys of dictionnaries. Thoses dictionnary will be used for passing data to \nCustomJS functions. Therefore, the strings here should not be the string representation of a number, or contain \nspaces, so as to avoid syntax errors from javascript.\"\"\"\nPARAMETER_PROG_NAMES = [\n \"Energysaved\",\n \"C02savings\",\n \"CAPEX\",\n \"ROI\",\n \"Operationalcosts\"\n]\nPARAMETER_DISPLAY_NAMES = [\n \"Puissance echangée\",\n \"CO2 évités (kt eq CO2)\",\n \"CAPEX\",\n \"ROI\",\n \"Cout des opérations\"\n]\nPARAMETER_DISPLAY_NAMES_UNITS = {\n \"Energysaved\" : \"Puissance échangée (kW)\",\n \"C02savings\" :\"CO2 évités (kt eq CO2)\",\n \"CAPEX\" : \"CAPEX (Euros)\",\n \"ROI\" : \"ROI (Years)\",\n \"Operationalcosts\" : \"Coût des opérations (Euros/an)\" ,\n}\n\n\n# map picture => 11 pixels, 1 meter\n# 1772 pixels width = 161.1 meters\n# 1044 pixels height = 94.9 meters\n#path_to_map = \"plan-isidio-ag.jpg\"\nmap_bottom_left_corner_location = {\"x\": 0, \"y\": 0}\nmap_upper_right_corner_location = {\"x\": 161.1, \"y\": 94.9}\n\n\n''' useful functions '''\n\n# return the index of the line where table['id'][index]==id\ndef get_index_by_id(id,table):\n result=-1\n print (\"Table ID here : \"+str(table['id']))\n for (index, val) in enumerate(table['id']):\n if val == id:\n result = index\n return result\n\n\n# check scripts signature, to apprend to end of file to prevent cache if files changed\ndef sha256_checksum(filename, block_size=65536):\n sha256 = hashlib.sha256()\n with open(filename, 'rb') as f:\n for block in iter(lambda: f.read(block_size), b''):\n sha256.update(block)\n return sha256.hexdigest()\nfiles_sig=''\nlistfiles=[]\nlistfiles += glob.glob(\"./static/js/*\")\nlistfiles += glob.glob(\"./static/styles/*\")\nfor files in listfiles:\n files_sig += sha256_checksum(files)\nfiles_sig = hashlib.sha256(files_sig.encode('utf-8')).hexdigest()\n\n\n# Calculate signature of input parameters and save them in save_parameters\ndef get_params_sig():\n signature=hashlib.sha256() # good practice: use sha2/3, sha1/md5 are not secure anymore\n save_parameters={}\n query=\"SELECT * FROM flux ORDER BY name\"\n result = engine.execute(query)\n save_parameters['flux']=[]\n for row in result:\n save_parameters['flux'].append(dict(row))\n for (key, val) in row.items():\n signature.update((str(key)+str(val)).encode('utf-8'))\n query=\"SELECT * FROM notouch_zone ORDER BY name\"\n result = engine.execute(query)\n save_parameters['notouch_zone']=[]\n for row in result:\n save_parameters['notouch_zone'].append(dict(row))\n for (key, val) in row.items():\n signature.update((str(key)+str(val)).encode('utf-8'))\n query=\"SELECT * FROM raw_TS ORDER BY id\"\n result = engine.execute(query)\n save_parameters['raw_TS']=[]\n for row in result:\n save_parameters['raw_TS'].append(dict(row))\n for (key, val) in row.items():\n signature.update((str(key)+str(val)).encode('utf-8'))\n query=\"SELECT * FROM parametrization ORDER BY id\"\n result = engine.execute(query)\n save_parameters['parametrization']=[]\n for row in result:\n save_parameters['parametrization'].append(dict(row))\n for (key, val) in row.items():\n signature.update((str(key)+str(val)).encode('utf-8'))\n signaturestr=signature.hexdigest() \n return (signaturestr, save_parameters)\n \n\n# We load the page empty, display a progress modal and make an ajax request to launch calculation and wait for result\nquery=\"SELECT value FROM configuration WHERE name = 'status'\"\nresult = engine.execute(query).fetchone()\nstatus = int(result[0])\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"315255468","text":"#! /usr/env/python\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_log_error\nfrom lightgbm import LGBMRegressor\n\nfeatures = ['Market', 'Day', 'Stock', 'x0', 'x1', 'x2', 'x3A', 'x3B', 'x3C', 'x3D', 'x3E', 'x4', 'x5', 'x6']\n\ndf = pd.read_csv('train.csv', index_col=0)\ndf = df.fillna(0) # replace NaN entries\ndf_test = pd.read_csv('test.csv', index_col=0)\ndf_test = df_test.fillna(0) # replace NaN entries\n\nweights = []\nfor index, row in df.iterrows():\n\tweights.append(float(row['Weight']))\n\n\nX = df[features]\nY = df['y']\n\nmodel = LGBMRegressor(n_estimators=10000, learning_rate=0.01, min_data_in_leaf=40, num_leaves=80, num_iterations=110)\n\nmodel.fit(X,Y, sample_weight = weights)\nyp = pd.Series(model.predict(df_test[features])).rename('y')\nyp.index.name = 'Index'\nprint(yp.head())\n\nyp.to_csv('GradientBoostedRegressor4c.csv', header=True)\n","sub_path":"gradient_boosted_regression4c.py","file_name":"gradient_boosted_regression4c.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"183701225","text":"from helpers.utils import Zone\nfrom helpers.parser import*\n\nfrom index.inverted_index import InvertedIndex\n\n# assume we have 3 zones: title, annotation and body\nZONES_NUMBER = 3\n\nclass ZoneIndex(InvertedIndex):\n def __init__(self, files_count):\n InvertedIndex.__init__(self, False)\n self.files_count = files_count\n self.zone_weights = { Zone.TITLE : 0.2,\n Zone.ANNOTATION : 0.3,\n Zone.BODY : 0.5\n }\n\n def addToIndex(self, term, file_index, zone):\n docs_list = self.index.get(term, dict())\n file_indices_list = list(docs_list.keys())\n if file_index not in file_indices_list:\n docs_list[file_index] = [zone]\n else:\n if zone not in docs_list[file_index]:\n docs_list[file_index].append(zone)\n\n self.index[term] = docs_list\n\n def _getFilesList(self, term):\n return list(self.index.get(term, dict()).keys())\n\n def _and(self, query_list):\n intersected_files = super(ZoneIndex, self)._and(query_list)\n scores = [0.0] * self.files_count\n\n for f in intersected_files:\n score = self.__getBooleanScore(f, query_list) if len(query_list) == 2 \\\n else self.__getRelativeScore(f, query_list)\n scores[f] = self.__weightedZone(score)\n\n return scores\n\n def __weightedZone(self, scores):\n weights = []\n for i in range(ZONES_NUMBER):\n weights.append(self.zone_weights[Zone(i)] * scores[i])\n return sum(weights)\n\n def __getRelativeScore(self, doc_id, query_list):\n zones_coeficients = []\n\n for z in Zone:\n zone_contain_terms_counter = len(query_list)\n\n for term in query_list:\n zones = self.index[term][doc_id]\n if z not in zones:\n zone_contain_terms_counter -= 1\n break\n\n zones_coeficients.append((1.0 * zone_contain_terms_counter) / len(query_list))\n\n return zones_coeficients\n\n def __getBooleanScore(self, doc_id, query_list):\n zones_contain_query = []\n\n for z in Zone:\n zone_contain_all_terms = 1.0\n\n for term in query_list:\n zones = self.index[term][doc_id]\n if z not in zones:\n zone_contain_all_terms = 0.0\n break\n\n zones_contain_query.append(zone_contain_all_terms)\n\n return zones_contain_query\n\n def _or(self, query_list):\n return list() # should not be called\n\n def _not(self, query_list):\n return list() # should not be called","sub_path":"index/zone_index.py","file_name":"zone_index.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"494161677","text":"from random import randint\nimport random\nimport DS\n\n\ndef gen_random_tx(q, p, g):\n serial_number = random.getrandbits(128)#randint(2**(128-1), 2**128-1)\n payee_alpha,payee_public_key = DS.Key_Gen(q, p, g)\n payer_alpha,payer_public_key = DS.Key_Gen(q, p, g)\n Amount = randint(1, 1000000)\n \"\"\"\n message = \"**** Bitcoin transaction ****\\n\"\n message += \"Serial number: \"\n message += str(serial_number)\n message += \"\\nPayer public key (beta): \\n\"\n message += str(payer_public_key)\n message += \"\\nPayee public key (beta): \\n\"\n message += str(payee_public_key)\n message += \"\\nAmount: \"\n message += str(Amount)\n print(message)\n \"\"\"\n message = \"*** Bitcoin transaction ***\\nSerial number: \" + str(serial_number) + \"\\nPayer public key (beta): \" + str(payer_public_key) + \"\\nPayee public key (beta): \" + str(payee_public_key) + \"\\nAmount: \" + str(Amount) +\"\\n\"\n\n\n signature_s, signature_r = DS.SignGen(message.encode('UTF-8'), q, p, g, payer_alpha)\n\n #print(message)\n\n return message + \"Signature (s): \" + str(signature_s) + \"\\nSignature (r): \" + str(signature_r) +\"\\n\"\n","sub_path":"Phase1 .py files/Tx.py","file_name":"Tx.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"52024563","text":"# coding: utf-8\nimport sys, os\n\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nfrom common.layers import *\nfrom common.gradient import numerical_gradient\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\n\nfrom common.optimizer import *\n\nplt.rcParams['figure.figsize'] = (20, 10)\nplt.rcParams.update({'font.size': 20})\n\n\nclass TwoLayerNet:\n\n def __init__(self, input_size, hidden_size, output_size):\n self.params = {}\n self.params['W1'] = np.random.randn(input_size, hidden_size) / (np.sqrt(input_size / 2))\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = np.random.randn(hidden_size, output_size) / (np.sqrt(hidden_size / 2))\n self.params['b2'] = np.zeros(output_size)\n\n # 계층 생성\n self.layers = OrderedDict()\n self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])\n self.layers['BatchNorm1'] = BatchNormalization(gamma=1.0, beta=0.)\n self.layers['Relu1'] = Relu()\n self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])\n self.lastLayer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n return x\n\n # x : 입력 데이터, t : 정답 레이블\n def loss(self, x, t):\n y = self.predict(x)\n return self.lastLayer.forward(y, t)\n\n def accuracy(self, x, t):\n y = self.predict(x)\n y = np.argmax(y, axis=1)\n if t.ndim != 1: t = np.argmax(t, axis=1)\n accuracy = np.sum(y == t) / float(x.shape[0])\n return accuracy\n\n # x : 입력 데이터, t : 정답 레이블\n\n def numerical_gradient(self, x, t):\n loss_W = lambda W: self.loss(x, t)\n\n grads = {}\n grads['W1'] = numerical_gradient(loss_W, self.params['W1'])\n grads['b1'] = numerical_gradient(loss_W, self.params['b1'])\n grads['W2'] = numerical_gradient(loss_W, self.params['W2'])\n grads['b2'] = numerical_gradient(loss_W, self.params['b2'])\n\n return grads\n\n def gradient(self, x, t):\n # forward\n self.loss(x, t)\n # backward\n dout = 1\n dout = self.lastLayer.backward(dout)\n layers = list(self.layers.values())\n layers.reverse()\n\n for layer in layers:\n dout = layer.backward(dout)\n\n # 결과 저장\n grads = {}\n grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db\n grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db\n return grads\n\n\n# 데이터 읽기\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\n\n# 하이퍼파라미터\n\niters_num = 10000 # 반복 횟수를 적절히 설정한다.\ntrain_size = x_train.shape[0] # 60000 개\nbatch_size = 100 # 미니배치 크기\nlearning_rate = 0.1\n\ntrain_loss_list = []\ntrain_acc_list = []\ntest_acc_list = []\n\n# 1에폭당 반복 수\n\niter_per_epoch = max(train_size / batch_size, 1)\n\noptimizer = Adam()\n\nfor i in range(iters_num): # 10000\n # 미니배치 획득 # 랜덤으로 100개씩 뽑아서 10000번을 수행하니까 백만번\n batch_mask = np.random.choice(train_size, batch_size) # 100개 씩 뽑아서 10000번 백만번\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n # 기울기 계산\n # grad = network.numerical_gradient(x_batch, t_batch)\n grad = network.gradient(x_batch, t_batch)\n params = network.params\n optimizer.update(params, grad)\n\n # 매개변수 갱신\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n\n # 학습 경과 기록\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss) # cost 가 점점 줄어드는것을 보려고\n\n # 1에폭당 정확도 계산 # 여기는 훈련이 아니라 1에폭 되었을때 정확도만 체크\n if i % iter_per_epoch == 0: # 600 번마다 정확도 쌓는다.\n\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc) # 10000/600 개 16개 # 정확도가 점점 올라감\n test_acc_list.append(test_acc) # 10000/600 개 16개 # 정확도가 점점 올라감\n print(\"train acc, test acc | \" + str(train_acc) + \", \" + str(test_acc))","sub_path":"09-1. batch_noramlization.py","file_name":"09-1. batch_noramlization.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"246028980","text":"import tensorflow as tf\nimport tensorflow.keras as ks\n\nfrom kgcnn.layers.attention import AttentionHeadGAT\nfrom kgcnn.layers.casting import ChangeTensorType, ChangeIndexing\nfrom kgcnn.layers.keras import Concatenate, Dense, Average\nfrom kgcnn.layers.mlp import MLP\nfrom kgcnn.layers.pooling import PoolingNodes\nfrom kgcnn.ops.models import generate_standard_graph_input, update_model_args\n\n\ndef make_gat( # Input\n input_node_shape,\n input_edge_shape,\n input_embedd: dict = None,\n # Output\n output_embedd: dict = None,\n output_mlp: dict = None,\n # Model specific parameter\n depth=3,\n attention_heads_num=5,\n attention_heads_concat=False,\n attention_args: dict = None\n):\n \"\"\"\n Generate Interaction network.\n\n Args:\n input_node_shape (list): Shape of node features. If shape is (None,) embedding layer is used.\n input_edge_shape (list): Shape of edge features. If shape is (None,) embedding layer is used.\n input_embedd (dict): Dictionary of embedding parameters used if input shape is None. Default is\n {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,\n 'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,\n 'input_type': 'ragged'}.\n output_embedd (dict): Dictionary of embedding parameters of the graph network. Default is\n {\"output_mode\": 'graph', \"output_type\": 'padded'}.\n output_mlp (dict): Dictionary of arguments for final MLP regression or classifcation layer. Default is\n {\"use_bias\": [True, True, False], \"units\": [25, 10, 1],\n \"activation\": ['relu', 'relu', 'sigmoid']}.\n depth (int): Number of convolution layers. Default is 3.\n attention_heads_num (int): Number of attention heads. Default is 5.\n attention_heads_concat (bool): Concat attention. Default is False.\n attention_args (dict): Layer arguments for attention layer. Default is\n {\"units\": 32, 'is_sorted': False, 'has_unconnected': True}\n Returns:\n model (tf.keras.model): Interaction model.\n \"\"\"\n # default values\n model_default = {'input_embedd': {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,\n 'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,\n 'input_tensor_type': 'ragged'},\n 'output_embedd': {\"output_mode\": 'graph', \"output_tensor_type\": 'padded'},\n 'output_mlp': {\"use_bias\": [True, True, False], \"units\": [25, 10, 1],\n \"activation\": ['relu', 'relu', 'sigmoid']},\n 'attention_args': {\"units\": 32, 'is_sorted': False, 'has_unconnected': True}\n }\n\n # Update default values\n input_embedd = update_model_args(model_default['input_embedd'], input_embedd)\n output_embedd = update_model_args(model_default['output_embedd'], output_embedd)\n output_mlp = update_model_args(model_default['output_mlp'], output_mlp)\n attention_args = update_model_args(model_default['attention_args'], attention_args)\n pooling_nodes_args = {}\n\n # Make input embedding, if no feature dimension\n node_input, n, edge_input, ed, edge_index_input, _, _ = generate_standard_graph_input(input_node_shape,\n input_edge_shape, None,\n **input_embedd)\n\n edi = edge_index_input\n\n nk = Dense(units=attention_args[\"units\"], activation=\"linear\")(n)\n for i in range(0, depth):\n heads = [AttentionHeadGAT(**attention_args)([nk, ed, edi]) for _ in range(attention_heads_num)]\n if attention_heads_concat:\n nk = Concatenate(axis=-1)(heads)\n else:\n nk = Average()(heads)\n\n n = nk\n if output_embedd[\"output_mode\"] == 'graph':\n out = PoolingNodes(**pooling_nodes_args)(n)\n output_mlp.update({\"input_tensor_type\": \"tensor\"})\n out = MLP(**output_mlp)(out)\n main_output = ks.layers.Flatten()(out) # will be dense\n else: # node embedding\n out = MLP(**output_mlp)(n)\n main_output = ChangeTensorType(input_tensor_type=\"ragged\", output_tensor_type=\"tensor\")(out)\n\n model = tf.keras.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=main_output)\n\n return model\n","sub_path":"kgcnn/literature/GAT.py","file_name":"GAT.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"472404870","text":"from sklearn.decomposition import LatentDirichletAllocation\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\n\r\nspam_header = 'spam\\t'\r\nno_spam_header = 'ham\\t'\r\ndocuments = []\r\n\r\n# 위와는 다르게 레이블을 만들 필요는 없습니다. 단순히 문서만을 추출합니다.\r\nwith open('SMSSpamCollection') as file_handle:\r\n for line in file_handle:\r\n if line.startswith(spam_header):\r\n documents.append(line[len(spam_header):])\r\n elif line.startswith(no_spam_header):\r\n documents.append(line[len(no_spam_header):])\r\n\r\n# LDA는 단어 빈도 피처보다 개수 피처가 잘 동작하기 때문에\r\n# CountVectorizer를 사용합니다. 또한 토픽 모델에 도움이 되지 않는\r\n# 단어(stop_words)를 자동으로 제거합니다.\r\nvectorizer = CountVectorizer(stop_words='english', max_features=2000)\r\nterm_counts = vectorizer.fit_transform(documents)\r\nvocabulary = vectorizer.get_feature_names()\r\n\r\n# 토픽 모델을 학습합니다.\r\ntopic_model = LatentDirichletAllocation(n_topics=10)\r\ntopic_model.fit(term_counts)\r\n\r\n# 학습된 토픽들을 하나씩 출력합니다.\r\ntopics = topic_model.components_\r\nfor topic_id, weights in enumerate(topics):\r\n print('topic %d' % topic_id, end=': ')\r\n pairs = []\r\n for term_id, value in enumerate(weights):\r\n pairs.append( (abs(value), vocabulary[term_id]) )\r\n pairs.sort(key=lambda x: x[0], reverse=True)\r\n for pair in pairs[:10]:\r\n print(pair[1], end=',')\r\n print()\r\n","sub_path":"chapter11/11-6-topic-modeling.py","file_name":"11-6-topic-modeling.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"547026962","text":"#!/usr/bin/env python2.7\n#\n# REST Interface to COCOMO library methods\n#\n\nimport web\n\nfrom lib import handler_cocomo\nfrom lib import handler_fe\n\nfrom version import app_version\n\nclass getVersion():\n\tdef GET(self):\n\t\treturn app_version\n\nmapping = ( \n\t\n\t\"/(basic|intermediate|cocomo2ed|cocomo2pa)/\", handler_cocomo.index,\n\t\"/fe/(basic|intermediate|cocomo2ed|cocomo2pa)/\", handler_fe.index,\n\t\n\t\"/version\", getVersion,\n) \n\nweb.config.debug = False\n\n\nif __name__ == \"__main__\":\n\tapp = web.subdir_application(mapping)\n\tapp.run()\n","sub_path":"cocomo_rest_api.py","file_name":"cocomo_rest_api.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"490805230","text":"from collections import Counter, defaultdict, OrderedDict, deque\nfrom bisect import bisect_left, bisect_right\nfrom functools import reduce, lru_cache\nfrom typing import List\nimport itertools\nimport math\nimport heapq\nimport string\ntrue = True\nfalse = False\nMIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007\n\n\n#\n# @lc app=leetcode id=1552 lang=python3\n#\n# [1552] Magnetic Force Between Two Balls\n#\n# https://leetcode.com/problems/magnetic-force-between-two-balls/description/\n#\n# algorithms\n# Medium (38.24%)\n# Total Accepted: 3.3K\n# Total Submissions: 8.7K\n# Testcase Example: '[1,2,3,4,7]\\n3'\n#\n# In universe Earth C-137, Rick discovered a special form of magnetic force\n# between two balls if they are put in his new invented basket. Rick has n\n# empty baskets, the i^th basket is at position[i], Morty has m balls and needs\n# to distribute the balls into the baskets such that the minimum magnetic force\n# between any two balls is maximum.\n#\n# Rick stated that magnetic force between two different balls at positions x\n# and y is |x - y|.\n#\n# Given the integer array position and the integer m. Return the required\n# force.\n#\n#\n# Example 1:\n#\n#\n# Input: position = [1,2,3,4,7], m = 3\n# Output: 3\n# Explanation: Distributing the 3 balls into baskets 1, 4 and 7 will make the\n# magnetic force between ball pairs [3, 3, 6]. The minimum magnetic force is 3.\n# We cannot achieve a larger minimum magnetic force than 3.\n#\n#\n# Example 2:\n#\n#\n# Input: position = [5,4,3,2,1,1000000000], m = 2\n# Output: 999999999\n# Explanation: We can use baskets 1 and 1000000000.\n#\n#\n#\n# Constraints:\n#\n#\n# n == position.length\n# 2 <= n <= 10^5\n# 1 <= position[i] <= 10^9\n# All integers in position are distinct.\n# 2 <= m <= position.length\n#\n#\n#\nclass BS:\n '''General template for binary search \n '''\n def search(lv: int, rv: int, bool_func):\n left, right = lv, rv\n while left < right:\n mid = (left + right) // 2\n if bool_func(mid):\n left = mid + 1\n else:\n right = mid\n return left - 1\n\n\nclass Solution:\n def maxDistance(self, p: List[int], m: int) -> int:\n def __okay(distance):\n i32m, loc = m, p[0]\n while i32m > 0:\n i = bisect_left(p, loc)\n if i == len(p): return False\n loc = p[i] + distance\n i32m -= 1\n return True\n\n p.sort()\n return BS.search(1, p[-1], __okay)\n\n\nsol = Solution()\n\nposition, m = [1, 2, 3, 4, 7], 3\nposition, m = [5, 4, 3, 2, 1, 1000000000], 2\nprint(sol.maxDistance(position, m))\n","sub_path":"python_solutions/1552.magnetic-force-between-two-balls.py","file_name":"1552.magnetic-force-between-two-balls.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"62981375","text":"import os\nimport shutil\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD, Adam\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom collections import OrderedDict\nfrom . import few_shot\n\n\n_log_path = None\n_log_filename = 'log.txt'\n\n\ndef one_hot(y, num_class):\n return torch.zeros((len(y), num_class)).to(y.device).scatter_(1, y.unsqueeze(1), 1)\n\ndef sd_parallelize(state_dict):\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \"module.\"+k\n new_state_dict[name] = v\n return new_state_dict\n\ndef set_log_path(path):\n global _log_path\n _log_path = path\n\ndef set_log_filename(filename):\n global _log_filename\n _log_filename = filename\n\ndef log(obj, filename=None):\n print(obj)\n if _log_path is not None:\n if filename is not None:\n log_filename = filename\n else:\n log_filename = _log_filename\n with open(os.path.join(_log_path, log_filename), 'a') as f:\n print(obj, file=f)\n\n\nclass Averager():\n\n def __init__(self):\n self.n = 0.0\n self.v = 0.0\n\n def add(self, v, n=1.0):\n self.v = (self.v * self.n + v * n) / (self.n + n)\n self.n += n\n\n def item(self):\n return self.v\n\n\nclass Timer():\n\n def __init__(self):\n self.v = time.time()\n\n def s(self):\n self.v = time.time()\n\n def t(self):\n return time.time() - self.v\n\n\ndef set_gpu(gpu):\n print('set gpu:', gpu)\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\n\n\ndef ensure_path(path, remove=True,resume = None):\n basename = os.path.basename(path.rstrip('/'))\n if os.path.exists(path):\n if remove and (basename.startswith('_')\n or input('{} exists, remove? ([y]/n): '.format(path)) != 'n'):\n if resume is not None:\n return\n shutil.rmtree(path)\n os.makedirs(path)\n else:\n os.makedirs(path)\n\n\ndef time_str(t):\n if t >= 3600:\n return '{:.1f}h'.format(t / 3600)\n if t >= 60:\n return '{:.1f}m'.format(t / 60)\n return '{:.1f}s'.format(t)\n\n\ndef compute_logits(feat, proto, metric='dot', temp=1.0):\n assert feat.dim() == proto.dim()\n\n if feat.dim() == 2:\n if metric == 'dot':\n logits = torch.mm(feat, proto.t())\n elif metric == 'cos':\n logits = torch.mm(F.normalize(feat, dim=-1),\n F.normalize(proto, dim=-1).t())\n elif metric == 'sqr':\n logits = -(feat.unsqueeze(1) -\n proto.unsqueeze(0)).pow(2).sum(dim=-1)\n\n elif feat.dim() == 3:\n if metric == 'dot':\n logits = torch.bmm(feat, proto.permute(0, 2, 1))\n elif metric == 'cos':\n logits = torch.bmm(F.normalize(feat, dim=-1),\n F.normalize(proto, dim=-1).permute(0, 2, 1))\n elif metric == 'sqr':\n logits = -(feat.unsqueeze(2) -\n proto.unsqueeze(1)).pow(2).sum(dim=-1)\n\n return logits * temp\n\n\ndef compute_acc(logits, label, reduction='mean'):\n ret = (torch.argmax(logits, dim=1) == label).float()\n if reduction == 'none':\n return ret.detach()\n elif reduction == 'mean':\n return ret.mean().item()\n\n\ndef compute_n_params(model, return_str=True):\n tot = 0\n for p in model.parameters():\n w = 1\n for x in p.shape:\n w *= x\n tot += w\n if return_str:\n if tot >= 1e6:\n return '{:.1f}M'.format(tot / 1e6)\n else:\n return '{:.1f}K'.format(tot / 1e3)\n else:\n return tot\n\n\ndef make_optimizer(params, name, lr, weight_decay=None, milestones=None):\n if weight_decay is None:\n weight_decay = 0.\n if name == 'sgd':\n optimizer = SGD(params, lr, momentum=0.9, weight_decay=weight_decay)\n elif name == 'adam':\n optimizer = Adam(params, lr, weight_decay=weight_decay)\n if milestones:\n lr_scheduler = MultiStepLR(optimizer, milestones)\n else:\n lr_scheduler = None\n return optimizer, lr_scheduler\n\n\ndef visualize_dataset(dataset, name, writer, n_samples=16):\n demo = []\n for i in np.random.choice(len(dataset), n_samples):\n demo.append(dataset.convert_raw(dataset[i][0]))\n writer.add_images('visualize_' + name, torch.stack(demo))\n writer.flush()\n\n\ndef freeze_bn(model):\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"53479676","text":"import json\nfrom jiwer import wer\n\ndef wer_from_json(json_file):\n with open(json_file, encoding='utf-8') as jf:\n data = json.load(jf)\n reference = []\n hypothesis = []\n for wav in data['utts']:\n # recognized text\n rec_text = data['utts'][wav]['output'][0]['rec_text']\n rec_text = rec_text.replace('▁', ' ')[:-len('')].lstrip()\n # text\n text = data['utts'][wav]['output'][0]['text']\n\n reference.append(text)\n hypothesis.append(rec_text)\n\n error = wer(reference, hypothesis)\n print(error * 100)\n\ndef read_espnet_results(results_wrd_md):\n with open(results_wrd_md, 'r+') as f:\n x = f.readline()\n while x.split() != 'Speaker':\n f.next()\n x = f.readline()\n print(x)\n\n\n\nif __name__ == '__main__':\n #wer_from_json('decode/atis.test/result.json')\n #wer_from_json('decode/atis.train/result.json')\n #wer_from_json('decode/atis.valid/result.json')\n read_espnet_results('decode/atis.test/results.wrd.md')\n","sub_path":"egs/librispeech/asr1/src/jiwer_calculate_wer.py","file_name":"jiwer_calculate_wer.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"305759758","text":"def game(ch1,ch2):\n if ch1 == \"r\" or ch1 ==\"R\":\n if ch2 == \"s\" or ch2 == \"S\":\n return 1\n elif ch2 == \"p\" or ch2 ==\"R\":\n return 2\n if ch1 == \"s\" or ch1 == \"S\":\n if ch2 == \"p\" or ch2 ==\"P\":\n return 1\n elif ch2 == \"r\" or ch2 == \"R\":\n return 2\n if ch1== \"p\" or ch1 == \"P\":\n if ch2 ==\"r\" or ch2 ==\"R\":\n return 1\n elif ch2 == \"s\" or ch2 == \"S\":\n return 2\n\ndef main():\n i = 1\n count = 0\n c1 = 0\n c2 = 0\n while count <3:\n print(\"Round # \",i)\n ch1 = str(input(\"Player 1's Choice: \"))\n ch2 = str(input(\"Player 2's Choice: \"))\n if ch1 == ch2:\n count += 0\n print(\"No one wins this round\")\n elif ch1 != ch2:\n win = game(ch1,ch2)\n print(\"Player\",win,\"wins this round\")\n if win == 1:\n c1 +=1\n elif win == 2:\n c2 += 1\n count +=1\n i += 1\n if c1 > c2:\n print(\"Player 1 wins the game\")\n elif c2>c1:\n print(\"Player 2 wins the game\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"373499769","text":"import os, sys\nimport re\nimport glob\nimport plotly\nimport datetime\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom itertools import chain\nimport matplotlib.ticker as ticker\nfrom gui import GUI\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\nplt.rcParams['font.family'] ='sans-serif'#使用するフォント\nplt.rcParams['xtick.direction'] = 'in'#x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')\nplt.rcParams['ytick.direction'] = 'in'#y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')\nplt.rcParams['xtick.major.width'] = 1.0#x軸主目盛り線の線幅\nplt.rcParams['ytick.major.width'] = 1.0#y軸主目盛り線の線幅\nplt.rcParams['font.size'] = 8 #フォントの大きさ\nplt.rcParams['axes.linewidth'] = 1.0# 軸の線幅edge linewidth。囲みの太さ\n\n\nDEFAULT_SIZE_GUIDANCE = {\n \"distributions\": 500,\n \"images\": 4,\n \"audio\": 4,\n \"scalars\": 10000,\n \"histograms\": 1,\n \"tensors\": 100000,\n}\n\nclass TKEventGetter():\n \"\"\" tensorboardに表示したデータを取得し、グラフを作成する \"\"\"\n def __init__(self, events_list, prob, classification):\n dt_now = datetime.datetime.now()\n self.events_list = events_list\n self._prob = prob # 確率分布のグラフを作成するフラグ\n self._classification = classification # 回帰問題かどうか管理するフラグ\n self.log_dir = \"results/\" + dt_now.strftime(\"%y%m%d_%H%M%S\") + \"_events\"\n self.result_dic = {}\n self.graph_option = {}\n self._init_log()\n\n def _init_log(self):\n \"\"\" 結果を格納するdirectoryの作成 \"\"\"\n if tf.io.gfile.exists(self.log_dir):\n tf.io.gfile.remove(self.log_dir)\n tf.io.gfile.makedirs(self.log_dir)\n\n def __call__(self):\n # eventごとにデータを取得し、dictに格納する\n self.gather_result()\n\n # GUIの表示\n graph_select = self.use_gui()\n\n # 格納したdictから各項目ごとにグラフを作成する\n num = 0\n for key in self.result_dic:\n if graph_select[0]:\n self.make_graph(key, self.result_dic[key])\n if graph_select[1]:\n self.Plotly_make_graph(key, self.result_dic[key])\n if graph_select[2]:\n self.make_graph_moving_avg(key, self.result_dic[key])\n if graph_select[3]:\n self.Plotly_make_graph_moving_avg(key, self.result_dic[key])\n if self._prob:\n self.make_graph_prob(key, self.result_dic[key], num)\n self.Plotly_make_graph_prob(key, self.result_dic[key], num)\n num += 1\n self.logger()\n return\n \n def logger(self):\n \"\"\" どのファイルのeventを読み込んでグラフを作成したかテキストファイルに書き出す\"\"\"\n str_ = '\\n'.join(self.events_list)\n with open(self.log_dir + \"/filelist.txt\", 'wt') as f:\n f.write(str_)\n return\n\n def gather_result(self):\n \"\"\"\n directoryの中身を取得、項目ごとにdictに格納\n \"\"\"\n for event in self.events_list:\n # 名前の取得\n name = event.split('/')[1]\n self.get_scores(name, event)\n return\n \n def get_scores(self, name, path_file):\n \"\"\"指定されたファイルからスコアの系列を取得して返す\n\n parameters\n ----------\n name : result directory\n\n path_file : event path\n\n returns\n ----------\n \n \"\"\"\n accumulator = EventAccumulator(\n path=path_file,\n size_guidance=DEFAULT_SIZE_GUIDANCE\n )\n accumulator.Reload()\n\n tag_dict = accumulator.Tags() #'images', 'audio', 'histograms', 'scalars', 'distributions', 'tensors', 'graph', 'meta_graph', 'run_metadata'\n scalars_key = tag_dict['tensors']\n assert scalars_key is not None\n\n if not name in self.graph_option:\n self.graph_option.setdefault('all',{})\n self.graph_option['all'][name] = name\n \n for key in scalars_key:\n key_name = key.replace('/','_')\n if not key_name in self.result_dic:\n self.result_dic.setdefault(key_name,{})\n if not key_name in self.graph_option:\n self.graph_option.setdefault(key_name,{})\n self.graph_option[key_name]['x_axis'] = \"Epochs\"\n self.graph_option[key_name]['y_axis'] = key_name\n\n if not name in self.result_dic[key_name]:\n self.result_dic[key_name].setdefault(name,{})\n value = accumulator.Tensors(key)\n self.result_dic[key_name][name] = np.array([self.decode(tmp.tensor_proto) for tmp in value])\n \n return\n\n def decode(self, val):\n tensor_bytes = val.tensor_content\n tensor_dtype = val.dtype\n tensor_shape = [x.size for x in val.tensor_shape.dim]\n tensor_array = tf.io.decode_raw(tensor_bytes, tensor_dtype)\n tensor_array = tf.reshape(tensor_array, tensor_shape)\n return tensor_array\n\n def use_gui(self):\n self.gui = GUI(self.graph_option)\n self.graph_option, graph_select, self._classification, graph_option = self.gui()\n self._prob = graph_option[0]\n return graph_select\n\n def make_graph(self, name, values):\n \"\"\"\n 項目ごとにグラフを作成する\n\n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n fig = plt.figure(figsize=(10,5))\n colorlist = [\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\"]\n ax = fig.add_subplot(1, 1, 1)\n i = 0\n if not len(values.keys()):\n return \n for key in values:\n array = values[key]\n plt.plot(range(array.shape[0]), array, linestyle='solid', color=colorlist[i], label=self.graph_option['all'][key], alpha=0.6)\n i += 1\n if re.search('accuracy', name) and self._classification:\n ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1.0))\n plt.grid(which='major',color='gray',linestyle='-')\n plt.xlabel(self.graph_option[name]['x_axis'])\n plt.ylabel(self.graph_option[name]['y_axis'])\n plt.legend()\n plt.savefig(self.log_dir + '/{}.png'.format(name))\n plt.close()\n return\n\n def Plotly_make_graph(self, name, values):\n \"\"\"\n plotlyで項目ごとにグラフを作成する\n\n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n colorlist = [\"rgba(255, 0, 0, 0.6)\", \"rgba(0, 255, 0, 0.6)\", \"rgba(0, 0, 255, 0.6)\", \"rgba(0, 174, 239, 0.6)\", \"rgba(236, 0, 140, 0.6)\", \"rgba(227, 199, 0, 0.6)\", \"rgba(105, 105, 105, 0.6)\"]\n fig = make_subplots(rows=1, cols=1, subplot_titles=(\"epoch☓{}\".format(name)))\n if not len(values.keys()):\n return \n for i, key in enumerate(values):\n array = values[key]\n fig.add_trace(go.Scattergl(x=np.array(range(array.shape[0])), y=array, name=self.graph_option['all'][key], mode = 'lines', line_color='{}'.format(colorlist[i])), row=1, col=1)\n\n fig.update_xaxes(title_text=self.graph_option[name]['x_axis'], row=1, col=1)\n fig.update_yaxes(title_text=self.graph_option[name]['y_axis'], row=1, col=1)\n plotly.offline.plot(fig, filename=self.log_dir + \"/{}.html\".format(name))\n return\n\n\n def make_graph_moving_avg(self, name, values, rate=3):\n \"\"\"\n 項目ごとに移動平均したグラフを作成する\n\n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n fig = plt.figure(figsize=(10,5))\n colorlist = [\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\"]\n ax = fig.add_subplot(1, 1, 1)\n i = 0\n if not len(values.keys()):\n return \n for key in values:\n array = np.convolve(values[key], np.ones(rate)/float(rate), 'valid')\n plt.plot(range(array.shape[0]), array, linestyle='solid', color=colorlist[i], label=self.graph_option['all'][key], alpha=0.6)\n i += 1\n if re.search('accuracy', name) and not self._classification:\n ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1.0))\n plt.grid(which='major',color='gray',linestyle='-')\n plt.xlabel(self.graph_option[name]['x_axis'])\n plt.ylabel(self.graph_option[name]['y_axis'])\n plt.legend()\n plt.savefig(self.log_dir + '/{}_moving_avg.png'.format(name))\n plt.close()\n return\n\n def Plotly_make_graph_moving_avg(self, name, values, rate=3):\n \"\"\"\n plotlyで項目ごとにグラフを作成する\n\n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n colorlist = [\"rgba(255, 0, 0, 0.6)\", \"rgba(0, 255, 0, 0.6)\", \"rgba(0, 0, 255, 0.6)\", \"rgba(0, 174, 239, 0.6)\", \"rgba(236, 0, 140, 0.6)\", \"rgba(227, 199, 0, 0.6)\", \"rgba(105, 105, 105, 0.6)\"]\n fig = make_subplots(rows=1, cols=1, subplot_titles=(\"epoch ☓ {}\".format(name)))\n if not len(values.keys()):\n return \n for i, key in enumerate(values):\n array = values[key]\n array = np.convolve(values[key], np.ones(rate)/float(rate), 'valid')\n fig.add_trace(go.Scattergl(x=np.array(range(array.shape[0])), y=array, name=self.graph_option['all'][key], mode='lines', line_color='{}'.format(colorlist[i])), row=1, col=1)\n\n fig.update_xaxes(title_text=self.graph_option[name]['x_axis'], row=1, col=1)\n fig.update_yaxes(title_text=self.graph_option[name]['y_axis'], row=1, col=1)\n plotly.offline.plot(fig, filename=self.log_dir + \"/{}.html\".format(name), auto_open=False)\n return\n\n def make_graph_prob(self, name, values, num):\n \"\"\"\n 項目ごとに確率分布のグラフを作成する\n\n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n fig = plt.figure(figsize=(10,5))\n colorlist = [\"r\", \"g\", \"b\", \"c\", \"m\", \"brown\", \"grey\", \"darkblue\"]\n ax = fig.add_subplot(1, 1, 1)\n key_num = len(values.keys())\n if not key_num:\n return\n key = list(values)\n all_results = np.zeros((key_num, len(values[key[0]])))\n for i in range(key_num):\n all_results[i] = values[key[i]]\n mean = np.mean(all_results, axis=0)\n std = np.std(all_results, axis=0)\n df = pd.DataFrame({'mean': mean, 'variance': std})\n df.to_csv(self.log_dir + '/{}_prob.csv'.format(name))\n \n plt.plot(range(mean.shape[0]), mean, linestyle='solid', color=colorlist[num])\n \n if re.search('accuracy', name) and not self._classification:\n plt.fill_between(range(mean.shape[0]) ,np.clip(mean - std,0,1), np.clip(mean + std,0,1),facecolor=colorlist[num],alpha=0.3)\n ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1.0))\n else:\n plt.fill_between(range(mean.shape[0]) ,mean - std, mean + std, facecolor=colorlist[num],alpha=0.3)\n plt.grid(which='major',color='gray',linestyle='-')\n plt.xlabel(self.graph_option[name]['x_axis'])\n plt.ylabel(self.graph_option[name]['y_axis'])\n plt.savefig(self.log_dir + '/{}_prob.png'.format(name))\n plt.close()\n return\n\n def Plotly_make_graph_prob(self, name, values, num):\n \"\"\"\n plotlyで項目ごとに確率分布のグラフを作成する\n \n parameters\n ----------\n name : result directory\n\n values : dict\n\n returns\n ----------\n \"\"\"\n colorlist = [\"rgba(255, 0, 0, 0.6)\", \"rgba(0, 255, 0, 0.6)\", \"rgba(0, 0, 255, 0.6)\", \"rgba(0, 174, 239, 0.6)\", \"rgba(236, 0, 140, 0.6)\", \"rgba(227, 199, 0, 0.6)\", \"rgba(105, 105, 105, 0.6)\"]\n std_colorlist = [\"rgba(255, 0, 0, 0.3)\", \"rgba(0, 255, 0, 0.3)\", \"rgba(0, 0, 255, 0.3)\", \"rgba(0, 174, 239, 0.3)\", \"rgba(236, 0, 140, 0.3)\", \"rgba(227, 199, 0, 0.3)\", \"rgba(105, 105, 105, 0.3)\"]\n fig = make_subplots(rows=1, cols=1, subplot_titles=(\"epoch☓{}\".format(name)))\n key_num = len(values.keys())\n if not key_num:\n return\n key = list(values)\n all_results = np.zeros((key_num, len(values[key[0]])))\n for i in range(key_num):\n all_results[i] = values[key[i]]\n mean = np.mean(all_results, axis=0)\n std = np.std(all_results, axis=0)\n index = np.array(range(mean.shape[0]))\n\n \n if re.search('accuracy', name) and not self._classification:\n upper_confidence = np.clip(mean + std,0,1)\n lower_confidence = np.clip(mean - std,0,1)\n else:\n upper_confidence = mean + std\n lower_confidence = mean - std\n\n fig.add_trace(go.Scattergl(x=index, y=mean, name='Average', line_color='{}'.format(colorlist[num])), row=1, col=1)\n fig.add_trace(go.Scatter(x=index, y=upper_confidence, name=\"Upper confidence\", fill= None, line_color='{}'.format(std_colorlist[num])), row=1, col=1)\n fig.add_trace(go.Scatter(x=index, y=lower_confidence, name=\"Lower confidence\", fill= 'tonexty', line_color='{}'.format(std_colorlist[num])), row=1, col=1)\n\n fig.update_xaxes(title_text=self.graph_option[name]['x_axis'], row=1, col=1)\n fig.update_yaxes(title_text=self.graph_option[name]['y_axis'], row=1, col=1)\n plotly.offline.plot(fig, filename=self.log_dir + \"/{}_prob.html\".format(name))\n return\n\n\ndef main(args):\n assert args.dir is not None\n events_list = list(chain.from_iterable([glob.glob(res_dir+\"tf_board/events.*\", recursive=True) for res_dir in args.dir]))\n evget = TKEventGetter(events_list, args.prob, args.classification)\n evget()\n return\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir',nargs='*', help='tensorboard event directory')\n parser.add_argument('--prob', action='store_true', help='Probability distribution graph')\n parser.add_argument('--classification', action='store_true', help='Whether Regression task or not')\n args = parser.parse_args()\n main(args)","sub_path":"utility/tk_event_getter.py","file_name":"tk_event_getter.py","file_ext":"py","file_size_in_byte":14724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"62238692","text":"import hashlib\nimport random\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n# from books.models import User_info\nfrom tools.login import login_check\nfrom django. db import models\nfrom .models import *\nfrom django.http import HttpResponse\n\n\n\n# Create your views here.\n\ndef test_books(request,mail,number):\n print(number)\n # return HttpResponse('HG')\n # print('ggggg>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',mail,number)\n # return HttpResponse('FFFD')\n book = Novel.objects.filter(number=number).values('id')\n #获得书名类字典\n book_name = Novel.objects.filter(number=number).values('name')\n print('book_name>>>>>>>>>>>>>>>>>>>>>>>',book_name)\n book_names = book_name[0]['name']\n\n print('>>>>>>>>>>>',book)\n #获得书名\n messge_id = book[0]['id']\n print('messge>>>>>>>>>>',messge_id)\n try:\n author = NovelMessage.objects.filter(id=messge_id).values('author')[0]['author']\n introduce = NovelMessage.objects.filter(id=messge_id).values('introduce')[0]['introduce']\n up = NovelMessage.objects.filter(id=messge_id).values('up')[0]['up']\n serialize = NovelMessage.objects.filter(id=messge_id).values('serialize')[0]['serialize']\n vip_type = NovelMessage.objects.filter(id=messge_id).values('vip_type')[0]['vip_type']\n except:\n result = {'code':1001,'erro':'数据库中无此数据'}\n return JsonResponse(result)\n\n print('>>>>>>>>>>>>>>>>',author)\n print('>>>>>>>>>>>>>>>>', introduce)\n print('>>>>>>>>>>>>>>>>',up)\n print('>>>>>>>>>>>>>>>>',serialize)\n print('>>>>>>>>>>>>>>>>',vip_type)\n if serialize == True:\n serialize = '有更新'\n else:\n serialize = '暂无更新'\n if vip_type == True:\n vip_type = 'VIP'\n else:\n vip_type = '免费阅读'\n print('>>>>>>>>>>>>>>>>>',serialize,vip_type)\n\n email_id = Novelrack.objects.filter(number_id=number)\n if not email_id:\n book_rack = Novelrack()\n book_rack.number_id = number\n # print('dgggggg>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',book_names)\n book_rack.name = book_names\n book_rack.email = mail\n # print('>>>>>>>>>>>>>>>>>加入书架成功>>>>>>>>>>>>>>>>>',ema)\n book_rack.save()\n ema = '加入书架成功'\n result = {'code':200,'msg':ema}\n return JsonResponse(result)\n else:\n em = '已经加入书架,请勿重复加入'\n result = {'code':100,'error':em}\n return JsonResponse(result)\n\n\n\ndef test_input(request,email):\n # return HttpResponse(email)\n print(('>>>>>>>>>>>>>>>>>>>>>>', email))\n # 判断是不是有用户id存放\n book_name = Novelrack.objects.filter(email=email)\n print(book_name)\n #如果没有用户就返回无书籍\n if not book_name:\n result = {'code':1003,'error':'暂无书籍加入'}\n return JsonResponse(result)\n else:\n messge_id = Novelrack.objects.filter(email=email).values_list('number_id')\n print('messsge_id>>>>>>>>>>>>>三季度经济上电视看>>>>>>>>>>>>>.',messge_id)\n\n rack_info = []\n for book_id in messge_id:\n item = {}\n #邮箱-->获得小说编号--->获取小说id-->获取小说详情\n rack_id = (book_id[0])\n print('rasl_list>>>>>>>>>>>>>>>>>>>>>',rack_id)\n nvel_id = Novel.objects.filter(number = rack_id).values('id')[0]['id']\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(nvel_id)\n try:\n author = NovelMessage.objects.filter(id = nvel_id).values('author')[0]['author']\n print(author)\n introduce = NovelMessage.objects.filter(id = nvel_id).values('introduce')[0]['introduce']\n up = NovelMessage.objects.filter(id=nvel_id).values('up')[0]['up']\n serialize = NovelMessage.objects.filter(id=nvel_id).values('serialize')[0]['serialize']\n vip_type = NovelMessage.objects.filter(id=nvel_id).values('vip_type')[0]['vip_type']\n except:\n result = {'code': 1005, 'erro': '无此数据'}\n return JsonResponse(result)\n\n print('>>>>>>>>>>>>>>>>', author)\n print('>>>>>>>>>>>>>>>>', introduce)\n print('>>>>>>>>>>>>>>>>', up)\n print('>>>>>>>>>>>>>>>>', serialize)\n print('>>>>>>>>>>>>>>>>', vip_type)\n if serialize == True :\n serialize = '有更新'\n vip_type = 'VIP'\n else:\n serialize = '暂无更新'\n vip_type = '免费阅读'\n str1 = str(introduce)\n if len(str1)>5:\n st = str1[:5] + '....'\n print('>>>>>>>>>>>>>>>>>', serialize, vip_type)\n item['author'] = author\n item['introduce'] = st\n item['up'] = up\n item['serialize'] = serialize\n item['vip_type'] = vip_type\n rack_info.append(item)\n\n result = {'code':200,'data':rack_info,'msg':'成功'}\n return JsonResponse(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"bookrack/booker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"33076010","text":"\"\"\"code from:\nhttps://blog.dreamshire.com/project-euler-60-solution/\"\"\"\n\nfrom utils.utils import primes_below, is_prime\nimport itertools as iter\n\nprimes = primes_below(10000)\nset_size = 5\n\n\ndef make_chain(chain):\n if len(chain) == set_size:\n return chain\n for p in primes:\n if p > chain[-1] and all_prime(chain + [p]):\n new_chain = make_chain(chain + [p])\n if new_chain:\n return new_chain\n return False\n\n\ndef all_prime(chain):\n return all(is_prime(int(str(p[0]) + str(p[1]))) for p in iter.permutations(chain, 2))\n\n\nchain = 0\nwhile not chain:\n chain = make_chain([primes[0]])\n primes = primes[1:]\n\nprint(sum(map(int, chain)))\n","sub_path":"codes/problems/problem60.py","file_name":"problem60.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"388739473","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport math\nimport numpy as np\nimport net\n\n\nclass DygraphModel():\n def __init__(self):\n self.bucket = 100000\n self.absolute_limt = 200.0\n\n def rescale(self, number):\n if number > self.absolute_limt:\n number = self.absolute_limt\n elif number < -self.absolute_limt:\n number = -self.absolute_limt\n return (number + self.absolute_limt) / (self.absolute_limt * 2 + 1e-8)\n\n def create_model(self, config):\n item_emb_size = config.get(\"hyper_parameters.item_emb_size\", 64)\n cat_emb_size = config.get(\"hyper_parameters.cat_emb_size\", 64)\n act = config.get(\"hyper_parameters.act\", \"sigmoid\")\n is_sparse = config.get(\"hyper_parameters.is_sparse\", False)\n use_DataLoader = config.get(\"hyper_parameters.use_DataLoader\", False)\n item_count = config.get(\"hyper_parameters.item_count\", 63001)\n cat_count = config.get(\"hyper_parameters.cat_count\", 801)\n din_model = net.DINLayer(item_emb_size, cat_emb_size, act, is_sparse,\n use_DataLoader, item_count, cat_count)\n return din_model\n\n # define feeds which convert numpy of batch data to paddle.tensor\n def create_feeds(self, batch, config):\n hist_item_seq = batch[0]\n hist_cat_seq = batch[1]\n target_item = batch[2]\n target_cat = batch[3]\n label = paddle.reshape(batch[4], [-1, 1])\n mask = batch[5]\n target_item_seq = batch[6]\n target_cat_seq = batch[7]\n return hist_item_seq, hist_cat_seq, target_item, target_cat, label, mask, target_item_seq, target_cat_seq\n\n # define loss function by predicts and label\n def create_loss(self, raw_pred, label):\n avg_loss = paddle.nn.functional.binary_cross_entropy_with_logits(\n raw_pred, label, reduction='mean')\n return avg_loss\n\n # define optimizer\n def create_optimizer(self, dy_model, config):\n boundaries = [410000]\n base_lr = config.get(\n \"hyper_parameters.optimizer.learning_rate_base_lr\")\n values = [base_lr, 0.2]\n sgd_optimizer = paddle.optimizer.SGD(\n learning_rate=paddle.optimizer.lr.PiecewiseDecay(\n boundaries=boundaries, values=values),\n parameters=dy_model.parameters())\n return sgd_optimizer\n\n # define metrics such as auc/acc\n # multi-task need to define multi metric\n def create_metrics(self):\n metrics_list_name = [\"auc\"]\n #auc_metric = paddle.metric.Auc(num_thresholds=self.bucket)\n auc_metric = paddle.metric.Auc(\"ROC\")\n metrics_list = [auc_metric]\n return metrics_list, metrics_list_name\n\n # construct train forward phase\n def train_forward(self, dy_model, metrics_list, batch_data, config):\n hist_item_seq, hist_cat_seq, target_item, target_cat, label, mask, target_item_seq, target_cat_seq = self.create_feeds(\n batch_data, config)\n\n raw_pred = dy_model(hist_item_seq, hist_cat_seq, target_item,\n target_cat, label, mask, target_item_seq,\n target_cat_seq)\n loss = self.create_loss(raw_pred, label)\n predict = paddle.nn.functional.sigmoid(raw_pred)\n predict_2d = paddle.concat([1 - predict, predict], 1)\n label_int = paddle.cast(label, 'int64')\n metrics_list[0].update(\n preds=predict_2d.numpy(), labels=label_int.numpy())\n\n print_dict = {'loss': loss}\n return loss, metrics_list, print_dict\n\n def infer_forward(self, dy_model, metrics_list, batch_data, config):\n hist_item_seq, hist_cat_seq, target_item, target_cat, label, mask, target_item_seq, target_cat_seq = self.create_feeds(\n batch_data, config)\n raw_pred = dy_model(hist_item_seq, hist_cat_seq, target_item,\n target_cat, label, mask, target_item_seq,\n target_cat_seq)\n\n predict = paddle.nn.functional.sigmoid(raw_pred)\n predict_2d = paddle.concat([1 - predict, predict], 1)\n label_int = paddle.cast(label, 'int64')\n metrics_list[0].update(\n preds=predict_2d.numpy(), labels=label_int.numpy())\n\n return metrics_list, None\n","sub_path":"models/rank/din/dygraph_model.py","file_name":"dygraph_model.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"325276983","text":"import codecs\r\nimport socket\r\nimport traceback\r\nfrom struct import *\r\n\r\nfrom modules.api import definitions\r\nfrom modules.data import variables\r\n\r\nbuffSize = 128\r\n\r\n''\r\nclass HIL_socket:\r\n def __init__(self, ip, port):\r\n \"\"\"\r\n\r\n :param family:\r\n :param type:\r\n \"\"\"\r\n self.family = socket.AF_INET\r\n self.type = socket.SOCK_STREAM\r\n\r\n self.isconn = False\r\n\r\n self.Socket = socket.socket(self.family, self.type)\r\n self.Socket.settimeout(0.5)\r\n\r\n self.servicesDict = definitions.servicesDict\r\n self.errorsDict = definitions.errorsDict\r\n self.txMsgStructuresDict = definitions.txMsgStructures\r\n self.rxMsgStructureDict = definitions.rxMsgStructures\r\n\r\n self.debug_log = True\r\n\r\n self.ip = ip\r\n self.port = port\r\n\r\n def newSocket(self, timeout=0.5):\r\n self.Socket = socket.socket(self.family, self.type)\r\n self.Socket.settimeout(timeout)\r\n\r\n def is_connected(self):\r\n return self.isconn\r\n\r\n def reset_connection(self):\r\n self.isconn = False\r\n\r\n def connect(self):\r\n \"\"\"\r\n\r\n :param IP:\r\n :param port:\r\n :return:\r\n \"\"\"\r\n server_address = (self.ip, self.port)\r\n\r\n # if self.debug_log:\r\n variables.log2(self.__class__.__name__,'starting up on %s port %s' % server_address)\r\n \r\n try:\r\n self.Socket.connect(server_address)\r\n self.isconn = True\r\n return 0\r\n except:\r\n if self.debug_log:\r\n variables.log2(self.__class__.__name__,'Error starting connection on %s port %s' % server_address)\r\n variables.print_exception(self.__class__.__name__)\r\n self.isconn = False\r\n return 1\r\n\r\n def send(self, message):\r\n \"\"\"\r\n\r\n :param message:\r\n :return:\r\n \"\"\"\r\n if self.debug_log:\r\n # variables.log2(self.__class__.__name__, 'sending to \"%s\" \"%s\"' % (self.ip, message))\r\n # variables.log2(self.__class__.__name__, 'sending (parsed) to \"%s\" \"%s\"' % (self.ip, \"\".join(\"%s \" % (\"0x%0.2X\" % tup) for tup in message)))\r\n pass\r\n try:\r\n self.Socket.sendall(message)\r\n return 0\r\n except:\r\n if self.debug_log:\r\n variables.log2(self.__class__.__name__,'Error sending \"%s\"' % message)\r\n variables.print_exception(self.__class__.__name__)\r\n return 1\r\n\r\n def receive (self, bufsize):\r\n \"\"\"\r\n\r\n :param bufsize:\r\n :return:\r\n \"\"\"\r\n data = None\r\n try:\r\n data = self.Socket.recv(bufsize)\r\n if self.debug_log:\r\n # variables.log2(self.__class__.__name__, 'received from \"%s\" \"%s\"' % (self.ip, data))\r\n # variables.log2(self.__class__.__name__, 'received from \"%s\" \"%s\"' % (self.ip, \"\".join(\"%s \" % (\"0x%0.2X\" % tup) for tup in data)))\r\n pass\r\n except:\r\n if self.debug_log:\r\n variables.print_exception(self.__class__.__name__)\r\n return data\r\n\r\n def close(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n if self.debug_log:\r\n variables.log2(self.__class__.__name__, \"closing socket\")\r\n try:\r\n self.Socket.shutdown(socket.SHUT_RDWR)\r\n self.Socket.close()\r\n return 0\r\n except:\r\n if self.debug_log:\r\n variables.print_exception(self.__class__.__name__)\r\n return 1\r\n\r\n def request(self, tx_message):\r\n # if self.debug_log:\r\n # variables.log2(self.__class__.__name__, \"get data\")\r\n\r\n self.send((tx_message+\"\\n\").encode())\r\n rx_message = self.receive(buffSize)\r\n try:\r\n rx_message = rx_message.decode()\r\n rx_data = [int(rm) for rm in rx_message.split(\",\")]\r\n # get error code and data\r\n return (0, rx_data)\r\n except:\r\n if self.debug_log:\r\n variables.print_exception(self.__class__.__name__)\r\n return (-1, None)\r\n\r\n def request_hex(self, service_def):\r\n if self.debug_log:\r\n variables.log2(self.__class__.__name__, \"get data\")\r\n\r\n format = \"!BB\"\r\n tx_length = calcsize(format) - 1\r\n\r\n try:\r\n tx_message = pack(format, tx_length, self.servicesDict[service_def])\r\n # tx_message = pack(self.txMsgStructuresDict['DATA'], tx_length, self.servicesDict['SERVICE_DATA'])\r\n except:\r\n if self.debug_log:\r\n variables.print_exception(self.__class__.__name__)\r\n return (-1, None)\r\n\r\n self.send(tx_message)\r\n rx_message = self.receive(buffSize)\r\n try:\r\n rx_data = [rm for rm in rx_message]\r\n # get error code and data\r\n return (rx_data[2], rx_data)\r\n except:\r\n if self.debug_log:\r\n variables.print_exception(self.__class__.__name__)\r\n return (-1, None)\r\n\r\n","sub_path":"rpi backup/backend/modules/api/HIL_socket_API.py","file_name":"HIL_socket_API.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"2580688","text":"from .Command import Command\nfrom .Completers import citekeyCompleter, tagCompleter, authorCompleter\n\nclass List(Command):\n command = 'list'\n help = \"List all items in database\"\n\n def set_args(self, subparser):\n subparser.add_argument(\"--title\", \"-t\", metavar='REGEX', type=str, default=None)\n subparser.add_argument(\"--author\", \"-a\", metavar='REGEX', type=str, default=None).completer = authorCompleter\n subparser.add_argument(\"--year\", \"-y\", metavar='REGEX', type=str, default=None)\n subparser.add_argument(\"--tag\", \"-T\", metavar='TAG', type=str, default=None).completer = tagCompleter\n subparser.add_argument(\"--key\", \"-k\", metavar='REGEX', type=str, default=None).completer = citekeyCompleter\n\n def run(self, args):\n import re\n from ..Database import Database\n from ..AnsiBib import printBibliography\n\n db = Database(dataDir=args.data_dir)\n\n gen = iter(db.works)\n\n def match(g, f, r):\n if r:\n return filter(lambda x: getattr(x, f)() and re.search(r, getattr(x, f)(), re.I), g)\n return g\n\n gen = match(gen, 'title', args.title)\n gen = match(gen, 'author', args.author)\n gen = match(gen, 'year', args.year)\n gen = match(gen, 'key', args.key)\n if args.tag:\n gen = filter(lambda x: args.tag in x.tags, gen)\n\n\n printBibliography(sorted(gen, key=lambda x: x.key()))\n","sub_path":"pdfs/Commands/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"90952058","text":"def waterTrap(towerList):\n size = len(towerList)\n\n left = [0 for x in range(size)]\n right = [0 for x in range(size)]\n\n water = 0\n\n left[0] = towerList[0]\n for i in range(1, size):\n left[i] = max(left[i-1], towerList[i])\n\n right[size-1] = towerList[size-1]\n for i in range(size-2, -1, -1):\n right[i] = max(right[i+1], towerList[i])\n\n for i in range(size):\n water += min(left[i], right[i]) - towerList[i]\n\n return water\n\n\ntower = [3, 0, 0, 2, 0, 4]\ntower2 = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]\nprint(waterTrap(tower))\nprint(waterTrap(tower2))\n","sub_path":"interviews/python/waterTrap.py","file_name":"waterTrap.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"485840589","text":"import sys\nimport os\nimport itertools\nimport numpy as np\n\nn, m = 0, 0\n\n# FILE READ\n\nif len(sys.argv) != 2:\n\tprint(\"Wrong usage of script.\")\n\texit()\nelse:\n\tfile = open(sys.argv[1])\n\nfor i,line in enumerate(file):\n\tif i == 0:\n\t\tn = int(line.split()[0])\n\t\tm = int(line.split()[1])\n\t\tarray = [[0 for x in range(m)] for y in range(n)]\n\telse:\n\t\tfor v in range(m):\n\t\t\tarray[i-1][v] = int(line.split()[v])\n\narray.sort(key=sum, reverse=True)\n# array = zip(*array)\n\n# CMAX IN RANGE\n\n# def cmax(data, length):\n# \tdata = zip(*data)\n# \tlength = len(data)\n# \t# print(\"lool\")\n# \tif length > len(data):\n# \t\tprint(\"Wrong data length!\")\n# \t\texit()\n# \telse:\n# \t\tc_max = []\n# \t\tfor v in range(length):\n# \t\t\tif v > 0:\n# \t\t\t\tc_max.append(data[v][0]+c_max[v-1])\n# \t\t\telse:\n# \t\t\t\tc_max.append(data[v][0])\n# \t\tfor v in range(len(c_max)):\n# \t\t\tfor i in range(1, len(data[length-1])):\n# \t\t\t\tc_max[v] += data[v][i]\n# \t\treturn max(c_max)\n\ndef cmax(inp_array):\n q=0\n cmax_tab=[0 for x in range(m)]\n cmax_after=[[0 for x in range(m)] for y in range(len(inp_array))]\n for v in range(m):\n for i in range(len(inp_array)):\n if v==0:\n cmax_tab[v]+=inp_array[i][v]\n cmax_after[i][0]=cmax_tab[v]\n elif i==0 and not v==0:\n q=v\n while not q==-1:\n cmax_tab[v]+=inp_array[0][q]\n cmax_after[i][v]=cmax_tab[v]\n q-=1\n else:\n \n tmp1=max(cmax_tab[v],cmax_after[i][v-1])\n cmax_tab[v]+=(tmp1-cmax_tab[v])+inp_array[i][v]\n cmax_after[i][v]=cmax_tab[v]\n return max(cmax_tab)\n\n# NEH ALGORITHM\n\ndef neh(arr):\n\tif len(arr) < 2:\n\t\tprint(\"Wrong array!\")\n\t\texit()\n\telse:\n\t\tfor v in range(2, len(arr)+1):\n\t\t\tfinal = float(\"inf\")\n\t\t\ttmp = arr[:v]\n\t\t\tfor i in range(v, 0, -1):\n\t\t\t\ttmp2 = tmp[0:-1]\n\t\t\t\ttmp2.insert(i-1,tmp[-1])\n\t\t\t\tcm = cmax(tmp2)\n\t\t\t\tif cm < final:\n\t\t\t\t\tfinal = cm\n\t\t\t\t\ttmp3 = tmp2\n\t\t\tarr[:v] = tmp3\n\treturn final\nprint(neh(array))\n\n# print(array)\n# print(list(itertools.permutations(array))[0])\n# print(list(itertools.permutations(array))[1])","sub_path":"lab0/neh2.py","file_name":"neh2.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"503103504","text":"'''\n@author:lvming\n@time:2021/7/1\n'''\nimport jsonpath\n\n'''\n 课程回顾:\n 1MD5加密与MockServer\n 2.在接口端的关键字驱动实现模式是一种较为常态化的测试框架的定义\nUnitTest下的接口测试效果:\n 关键字驱动模式下的分层处理:\n\t1.配置有关键字模块\n\t2.用例模块\n\t3.数据模块\n\t4.配置模块\n\t5.日志模块\n\t6.测试报告、套件\n'''\n# 这是接口关键字驱动类,用于提供自动化接口测试的关键字方法。\n# 主要实现常用的关键字内容,并定义好所有的参数的内容即可\n# 接口中的常用关键字无非就是:\n# \t1.各种模拟请求方法:post/get/put/delete/header/….\n# 2.设置入参的默认值时,设置的参数必须放在最后\nimport requests\nimport json\nclass ApiKey:\n # get请求的封装 因为params可能会存在无值的情况,存放默认值None\n def do_get(self,url,params=None,headers=None,**kwargs):\n # 因为请求会默认返回一个响应,所以函数定义时需要return一下\n return requests.get(url=url,params=params,headers=headers,**kwargs)\n\n\n # post请求的封装 data默认也等于None\n def do_post(self,url,data=None,**kwargs):\n return requests.post(url=url,data=data,**kwargs)\n\n # 基于JsonPath获取数据的关键字:用于提取所有需要的内容\n def get_text(self,txt,key):\n try:\n txt = json.loads(txt)\n # jsonpath获取数据的表达式:成功则返回list,失败返回false\n # 对于json格式数据的获取,本身是存有目的性来获取的。\n value = jsonpath.jsonpath(txt,'$..{0}'.format(key))\n if value:\n if len(value)==1:\n return value[0]\n return value\n except Exception as e:\n return e\n return value\n # 结果的断言测试\n def assert_text(self,value1,value2):\n try:\n assert value1 == value2\n return True\n except:\n return False\n\n\n\nif __name__ == '__main__':\n ak = ApiKey()\n data = {\n \"username\":\"admin\",\n \"password\":\"123456\"\n }\n headers = {}\n res = ak.do_get(url='http://39.98.138.157:5000/api/getuserinfo',timeout=0.1)\n print(res.text)\n s= ak.do_post(url='http://39.98.138.157:5000/api/getuserinfo',json=data)\n print(s.text)","sub_path":"class13/class13_xuzhu/api_keyword/api_key.py","file_name":"api_key.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"230470557","text":"# !/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom __future__ import print_function\nimport sys\nimport collections as coll\n\n\n# lower\ndef get_word_to_list(file_name):\n word_list = []\n with open(file_name, 'r') as f:\n for line in f:\n lines = line.lower()\n word_list.append(lines.strip('\\n'))\n return word_list\n\n\ndef get_argument_word(word_list):\n if len(sys.argv) == 2:\n rack = sys.argv[1]\n lower_word = rack.lower()\n c = coll.Counter(lower_word)\n return [word for word in word_list if not (coll.Counter(word) - c)]\n else:\n print('Usage: scrabble_change.py ABC')\n sys.exit()\n\n\ndef get_scores(words):\n word_dic = coll.defaultdict(int)\n scores = {\"a\": 1, \"c\": 3, \"b\": 3, \"e\": 1, \"d\": 2, \"g\": 2,\n \"f\": 4, \"i\": 1, \"h\": 4, \"k\": 5, \"j\": 8, \"m\": 3,\n \"l\": 1, \"o\": 1, \"n\": 1, \"q\": 10, \"p\": 3, \"s\": 1,\n \"r\": 1, \"u\": 1, \"t\": 1, \"w\": 4, \"v\": 4, \"y\": 4,\n \"x\": 8, \"z\": 10}\n for word in words:\n for s in word:\n word_dic[word] += scores[s]\n return word_dic\n\n\n# def get_scores(*args):\n# dict_set1 = {} \n# if 'key' not in dict_set1: \n# dict_set1['key'] = set() \n# scores = {\"a\": 1, \"c\": 3, \"b\": 3, \"e\": 1, \"d\": 2, \"g\": 2,\n# \"f\": 4, \"i\": 1, \"h\": 4, \"k\": 5, \"j\": 8, \"m\": 3,\n# \"l\": 1, \"o\": 1, \"n\": 1, \"q\": 10, \"p\": 3, \"s\": 1,\n# \"r\": 1, \"u\": 1, \"t\": 1, \"w\": 4, \"v\": 4, \"y\": 4,\n# \"x\": 8, \"z\": 10}\n# for word in args:\n# for s in word:\n# total_scores += scores[s]\n# return total_scores, word\n\n\ndef main():\n word_list = get_word_to_list('E:\\\\56\\python_code\\\\class\\\\c4\\\\sowpods.txt')\n valid_words = get_argument_word(word_list)\n d = get_scores(valid_words)\n for key, val in sorted(d.items(), key=lambda item: item[1], reverse=True):\n print(val, key)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Scrabble_challenge_my.py","file_name":"Scrabble_challenge_my.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"237587646","text":"\n#TASK\n# Read an integer N.\n# Without using any string methods, try to print the following:\n#input >>>3\n#output >>>123\n\n\nn = int(input())\n\nnum = ''\nfor i in range(1, n+1):\n num += str(i)\n\nprint(num)","sub_path":"2nd_week/hackerrank/easy/sets/task_07.py","file_name":"task_07.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"117246717","text":"ATARI_ENVS = [\n 'Alien',\n 'Amidar',\n 'Assault',\n 'Asterix',\n 'Asteroids',\n 'Atlantis',\n 'BankHeist',\n 'BattleZone',\n 'BeamRider',\n 'Bowling',\n 'Boxing',\n 'Breakout',\n 'Centipede',\n 'ChopperCommand',\n 'CrazyClimber',\n 'DemonAttack',\n 'DoubleDunk',\n 'Enduro',\n 'FishingDerby',\n 'Freeway',\n 'Frostbite',\n 'Gopher',\n 'Gravitar',\n 'IceHockey',\n 'Jamesbond',\n 'Kangaroo',\n 'Krull',\n 'KungFuMaster',\n # 'MontezumaRevenge',\n 'MsPacman',\n 'NameThisGame',\n # 'Pitfall',\n 'Pong',\n # 'PrivateEye',\n 'Qbert',\n 'Riverraid',\n 'RoadRunner',\n 'Robotank',\n 'Seaquest',\n 'SpaceInvaders',\n 'StarGunner',\n 'Tennis',\n 'TimePilot',\n 'Tutankham',\n 'UpNDown',\n 'Venture',\n 'VideoPinball',\n 'WizardOfWor',\n 'Zaxxon'\n]","sub_path":"atari_envs.py","file_name":"atari_envs.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"99989732","text":"\n\"\"\"\nA fast date parser library with timezone offset support.\n\"\"\"\n\nfrom dateutil import tz\nfrom datetime import datetime, timedelta\n\nimport importlib\nimport io\nimport os\nimport warnings\n\n# Python 2 compatibility\ntry:\n\tunicode\n\tStringIO = io.BytesIO\nexcept NameError:\n\tStringIO = io.StringIO\n\nre = importlib.import_module(os.getenv('PYTHON_NR_DATE_REGEX_BACKEND', 're'))\n\n__author__ = 'Niklas Rosenstein '\n__version__ = '1.0.0'\n\n\nclass BaseFormatOption(object):\n\n\tdef __init__(self, char, dest):\n\t\tself.char = char\n\t\tself.dest = dest\n\n\tdef parse(self, string):\n\t\traise NotImplementedError\n\n\tdef render(self, date):\n\t\traise NotImplementedError\n\n\nclass FormatOption(BaseFormatOption):\n\n\tdef __init__(self, char, dest, regex, parse, render):\n\t\tsuper(FormatOption, self).__init__(char, dest)\n\t\tself.regex = re.compile(regex)\n\t\tself.parse = parse\n\t\tself.render = render\n\n\nclass TimezoneFormatOption(BaseFormatOption):\n\n\tdef __init__(self, char='z', dest='tzinfo'):\n\t\tsuper(TimezoneFormatOption, self).__init__(char, dest)\n\t\tself.regex = re.compile(r'(?:Z|[-+]\\d{2}:?\\d{2})')\n\n\tdef parse(self, string):\n\t\tif string == 'Z':\n\t\t\treturn tz.UTC\n\t\telse:\n\t\t\tsign = -1 if string[0] == '-' else 1\n\t\t\thours = int(string[1:3])\n\t\t\tminutes = int(string[3:5])\n\t\t\tseconds = sign * (hours * 3600 + minutes * 60)\n\t\t\treturn tz.tzoffset(None, seconds)\n\n\tdef render(self, date):\n\t\tif date.tzinfo == None:\n\t\t\treturn ''\n\t\telif date.tzinfo == tz.UTC:\n\t\t\treturn 'Z'\n\t\telse:\n\t\t\toff = date.utcoffset()\n\t\t\t# NOTE Copied from CPython 3.7 datetime.py _format_offset()\n\t\t\tstring = ''\n\t\t\tif off is not None:\n\t\t\t\tif off.days < 0:\n\t\t\t\t\tsign = \"-\"\n\t\t\t\t\toff = -off\n\t\t\t\telse:\n\t\t\t\t\tsign = \"+\"\n\t\t\t\thh, mm = divmod(off, timedelta(hours=1))\n\t\t\t\tmm, ss = divmod(mm, timedelta(minutes=1))\n\t\t\t\tstring += \"%s%02d:%02d\" % (sign, hh, mm)\n\t\t\t\tif ss or ss.microseconds:\n\t\t\t\t\tstring += \":%02d\" % ss.seconds\n\t\t\t\t\tif ss.microseconds:\n\t\t\t\t\t\tstring += '.%06d' % ss.microseconds\n\t\t\treturn string\n\n\nclass FormatOptionSet(object):\n\n\tdef __init__(self, options=()):\n\t\tself._options = {}\n\t\tself._cache = {}\n\t\tfor option in options:\n\t\t\tself.add(option)\n\n\tdef __repr__(self):\n\t\treturn 'FormatOptionSet({})'.format(''.join(sorted(self._options)))\n\n\tdef __getitem__(self, char):\n\t\treturn self._options[char]\n\n\tdef __contains__(self, char):\n\t\treturn char in self._options\n\n\tdef add(self, option):\n\t\tif not isinstance(option, BaseFormatOption):\n\t\t\traise TypeError('expected BaseFormatOption')\n\t\tif option.char in self._options:\n\t\t\traise ValueError('format char {!r} already allocated'.format(option.char))\n\t\tself._options[option.char] = option\n\n\tdef create_date_format(self, fmt):\n\t\t# TODO @NiklasRosenstein Work around cyclic reference, eg. with a weakref?\n\t\ttry:\n\t\t\treturn self._cache[fmt]\n\t\texcept KeyError:\n\t\t\tobj = self._cache[fmt] = DateFormat(fmt, self)\n\t\t\treturn obj\n\n\tdef create_format_set(self, name, formats):\n\t\tformats = [self.create_date_format(x) for x in formats]\n\t\treturn DateFormatSet(name, formats)\n\n\tdef parse(self, string, fmt):\n\t\treturn self.create_date_format(fmt).parse(string)\n\n\tdef format(self, date, fmt):\n\t\treturn self.create_date_format(fmt).format(date)\n\n\nclass DateFormat(object):\n\t\"\"\"\n\tRepresents a fully compiled fixed date format ready to parse and\n\tformat dates.\n\t\"\"\"\n\n\tdef __init__(self, string, option_set):\n\t\tindex = 0\n\t\tpattern = StringIO()\n\t\toptions = []\n\t\tjoin_sequence = []\n\t\tdef write(char):\n\t\t\tpattern.write(re.escape(string[index]))\n\t\t\tif join_sequence and isinstance(join_sequence[-1], str):\n\t\t\t\tjoin_sequence[-1] += string[index]\n\t\t\telse:\n\t\t\t\tjoin_sequence.append(string[index])\n\t\twhile index < len(string):\n\t\t\tif string[index] == '%':\n\t\t\t\tchar = string[index+1]\n\t\t\t\tif char != '%' and char not in option_set:\n\t\t\t\t\traise ValueError('Invalid date format \"%{}\"'.format(char))\n\t\t\t\tfo = option_set[char]\n\t\t\t\tif char == '%':\n\t\t\t\t\twrite('%')\n\t\t\t\telse:\n\t\t\t\t\tpattern.write('(' + fo.regex.pattern + ')')\n\t\t\t\t\toptions.append(fo)\n\t\t\t\t\tjoin_sequence.append(fo)\n\t\t\t\tindex += 2\n\t\t\telse:\n\t\t\t\twrite(string[index])\n\t\t\t\tindex += 1\n\n\t\tself._string = string\n\t\tself._regex = re.compile(pattern.getvalue())\n\t\tself._join_sequence = join_sequence\n\t\tself._options = options\n\n\tdef __repr__(self):\n\t\treturn 'DateFormat(string={!r})'.format(self.string)\n\n\t@property\n\tdef string(self):\n\t\treturn self._string\n\n\tdef parse(self, string):\n\t\tmatch = self._regex.match(string)\n\t\tif not match:\n\t\t\traise ValueError('Date \"{}\" does not match format {!r}'.format(\n\t\t\t\tstring, self.string))\n\t\tkwargs = {'year': 1900, 'month': 1, 'day': 1, 'hour': 0}\n\t\tfor option, value in zip(self._options, match.groups()):\n\t\t\tkwargs[option.dest] = option.parse(value)\n\t\treturn datetime(**kwargs)\n\n\tdef format(self, date):\n\t\tresult = StringIO()\n\t\tfor item in self._join_sequence:\n\t\t\tif isinstance(item, str):\n\t\t\t\tresult.write(item)\n\t\t\telse:\n\t\t\t\tresult.write(item.render(date))\n\t\treturn result.getvalue()\n\n\nclass DateFormatSet(list):\n\t\"\"\"\n\tRepresents a set of date formats.\n\t\"\"\"\n\n\tdef __init__(self, name, formats):\n\t\tself.name = name\n\t\tsuper(DateFormatSet, self).__init__(formats)\n\n\tdef __repr__(self):\n\t\treturn 'DateFormatSet({!r}, {})'.format(\n\t\t\tself.name, super(DateFormatSet, self).__repr__())\n\n\tdef parse(self, string):\n\t\tfor fmt in self:\n\t\t\ttry:\n\t\t\t\treturn fmt.parse(string)\n\t\t\texcept ValueError as exc:\n\t\t\t\tpass\n\t\traise ValueError('Date \"{}\" does not match any of the {!r} formats'\n\t\t\t.format(string, self.name))\n\n\tdef format(self, date):\n\t\treturn self[0].format(date)\n\n\nroot_option_set = FormatOptionSet([\n FormatOption('Y', 'year', r'\\d{4}', int, lambda d: str(d.year).rjust(4, '0')),\n FormatOption('m', 'month', r'\\d{2}', int, lambda d: str(d.month).rjust(2, '0')),\n FormatOption('d', 'day', r'\\d{2}', int, lambda d: str(d.day).rjust(2, '0')),\n FormatOption('H', 'hour', r'\\d{2}', int, lambda d: str(d.hour).rjust(2, '0')),\n FormatOption('M', 'minute', r'\\d{2}', int, lambda d: str(d.minute).rjust(2, '0')),\n FormatOption('S', 'second', r'\\d{2}', int, lambda d: str(d.second).rjust(2, '0')),\n FormatOption('f', 'microsecond', r'\\d+', lambda s: int(s) * (10 ** max(6-len(s), 0)), lambda d: str(d.microsecond).rstrip('0')),\n TimezoneFormatOption(),\n])\n\n\ndef register_format_option(option):\n\troot_option_set.add(option)\n\n\ndef parse_date(string, fmt):\n\treturn root_option_set.parse(string, fmt)\n\n\ndef format_date(date, fmt):\n\treturn root_option_set.format(date, fmt)\n\n\ndef create_format_set(name, formats):\n\treturn root_option_set.create_format_set(name, formats)\n\n\nISO_8601 = create_format_set('iso:8601', [\n\t'%Y-%m-%dT%H:%M:%S.%f%z', # RFC 3339\n\t'%Y-%m-%dT%H:%M:%S.%f', # ISO 8601 extended format\n\t'%Y%m%dT%H%M%S.%f', # ISO 8601 basic format\n\t'%Y%m%d', # ISO 8601 basic format, date only\n])\n\nJAVA_OFFSET_DATETIME = create_format_set('java:OffsetDateTime', [\n\t'%Y-%m-%dT%H:%M:%S.%f%z',\n\t'%Y-%m-%dT%H:%M:%S%z',\n\t'%Y-%m-%dT%H:%M%z',\n])\n","sub_path":"nr.date/src/nr/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"450476732","text":"class Board():\n\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef setBoard(self):\n\t\tself.board = [['#'] * self.y for _ in range(self.x)]\n\t\treturn self.board\n\n\tdef changeBoardMiss(self, row, col): self.board[row][col] = 'X'\n\t\t\n\tdef changeBoardHit(self, row, col): self.board[row][col] = '*'\n\n\tdef changeBoardShip(self, ship):\n\t\tprint(ship)\n\t\tif len(ship) == 1:\n\t\t\tfor coord in ship:\n\t\t\t\tcol, row = coord\n\t\t\t\tself.board[row][col] = '>'\n\n\t\telif ship[0][0] == ship[1][0]:\n\t\t\tfor coord in ship:\n\t\t\t\tcol, row = coord\n\t\t\t\tself.board[row][col] = '^'\n\t\telse:\n\t\t\tfor coord in ship:\n\t\t\t\tcol, row = coord\n\t\t\t\tself.board[row][col] = '>'\n\n\t\t\n\tdef printBoard(self):\n\t\tprint(' ', end = ' ')\n\t\tfor i in range(self.y):\n\t\t\tprint (string.ascii_letters[i], end = ' ')\n\t\tprint()\n\t\tfor i,j in enumerate(self.board):\n\t\t\tif i < 9:\n\t\t\t\tprint('0' + str(i + 1), ' '.join(j))\n\t\t\telse:\n\t\t\t\tprint(i + 1, ' '.join(j))\n\nclass Player(Board):\n\n\tdef __init__(self, name, x, y):\n\t\tsuper().__init__(x, y)\n\t\tself.name = name\n\t\tself.ships = []\n\t\tself.status = False\n\t\tself.win = False\n\n\tdef setShip(self):\n\n\t\tdef checkCoord(coord):\n\t\t\tif coord[0] > self.y or coord[1] > self.x or coord[0] < 0 or coord[1] < 0:\n\t\t\t\treturn False\n\n\t\t\tfor ship in self.ships:\n\t\t\t\tif coord in ship: # or coord[0] == ship[0] + 1 or coord[0] == ship[0] - 1 or coord[1] == ship[1] - 1 or coord[1] == ship[1] + 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\n\t\tdef getShip(coords):\n\t\t\tship = []\n\t\t\tfor coord in coords:\n\t\t\t\tcoord = coord.split('-')\n\t\t\t\tcoord[1] = int(coord[1]) - 1\n\t\t\t\ti = 0\n\t\t\t\tfor letter in string.ascii_letters:\n\t\t\t\t\tif coord[0] == letter:\n\t\t\t\t\t\tcoord[0] = i\n\t\t\t\t\t\tbreak\n\t\t\t\t\ti += 1\n\t\t\t\tif checkCoord(coord):\n\t\t\t\t\tship.append(coord)\n\t\t\t\telse:\n\t\t\t\t\tprint('В выбранные координаты нельзя поставить корабль!')\n\t\t\t\t\tcont()\n\t\t\t\t\treturn False\n\n\t\t\treturn ship\n\n\t\tsize = int(input('Выберите размер корабля\\n1. 1-палубный\\n2. 2-палубный\\n3. 3-палубный\\n4. 4-палубный\\n\\nВыбор: '))\n\n\t\tif size == 1:\n\t\t\tcoords = input('Введите координаты (пример: a-1): ')\n\t\t\tcoords = [coords]\n\t\t\tprint(coords)\n\t\t\tif getShip(coords) != False:\n\t\t\t\tship = getShip(coords)\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\telif size == 2:\n\t\t\tcoords = input('Введите координаты (пример: a-1;a-2): ').split(';')\n\t\t\tif getShip(coords) != False:\n\t\t\t\tship = getShip(coords)\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\telif size == 3:\n\t\t\tcoords = input('Введите координаты (пример: a-1;a-2;a-3): ').split(';')\n\t\t\tif getShip(coords) != False:\n\t\t\t\tship = getShip(coords)\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\telif size == 4:\n\t\t\tcoords = input('Введите координаты (пример: a-1;a-2;a-3;a-4): ').split(';')\n\t\t\tif getShip(coords) != False:\n\t\t\t\tship = getShip(coords)\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\tself.ships.append(ship)\n\n\tdef getMove(self, obj):\n\n\t\tmove = input('Ход игрока {} (пример ввода: а-1): '.format(self.name)).split('-')\n\t\ttry:\n\t\t\tmove[1] = int(move[1]) - 1\n\t\texcept ValueError:\n\t\t\tprint('Неверный ввод!')\n\t\t\tcont()\n\t\t\treturn False\n\n\t\tif len(move) < 2 or len(move[0]) > 1 or move[1] > 26 or move[0] not in string.ascii_letters or move[1] <= 0:\n\t\t\tprint('Неверный ввод!')\n\t\t\tcont()\n\t\t\treturn False\n\t\t\t\n\t\ti = 0\n\t\tfor letter in string.ascii_letters:\n\t\t\tif move[0] == letter:\n\t\t\t\tmove[0] = i\n\t\t\t\tbreak\n\t\t\ti += 1\n\n\t\tif int(move[1]) > self.x or int(move[0]) - 1 > self.y:\n\t\t\tprint('Вы вышли за границу поля')\n\t\t\tcont()\n\t\t\treturn False\n\n\t\telse:\n\t\t\tprint(move)\n\t\t\tfor ship in obj.ships:\n\t\t\t\tprint(ship)\n\t\t\t\tif move in ship:\n\t\t\t\t\tprint('Попал!')\n\t\t\telse:\n\t\t\t\tprint('Переход хода')\n\nclass Game():\n\n\tdef setPlayers(self):\n\t\ttry:\n\t\t\tnum_of_players = int(input('Введите количество игроков (1/2): '))\n\t\texcept ValueError:\n\t\t\tprint('Введите число!')\n\t\t\tcont()\n\t\tif num_of_players < 1 or num_of_players > 2:\n\t\t\tprint('Необходимо ввести число от 1 до 2!')\n\t\t\tcont()\n\t\t\tsys.exit()\n\n\t\treturn int(num_of_players)\n\t\n\tdef startGame(self):\n\t\tclear()\n\t\tnum_of_players = self.setPlayers()\n\t\tif num_of_players == 1:\n\t\t\tdef loop():\n\t\t\t\tsize = input('Введите размер доски (пример: 10х10): ').split('x')\n\t\t\t\tif len(size) != 2 or not size[0].isdigit() or not size[1].isdigit() or int(size[0]) > 26 or int(size[1]) > 26 or int(size[0]) <= 0 or int(size[1]) <= 0:\n\t\t\t\t\tprint('Неверный ввод!')\n\t\t\t\t\tloop()\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\telif num_of_players == 2:\n\t\t\tplayer1_name = input('Введите имя (Player1): ')\n\t\t\tplayer2_name = input('Введите имя (Player2): ')\n\n\t\t\tif player1_name == '':\n\t\t\t\tplayer1_name = 'Player1'\n\t\t\tif player2_name == '':\n\t\t\t\tplayer2_name = 'Player2'\n\n\t\t\tdef loop():\n\t\t\t\tsize = input('\\nВведите размер доски (пример: 10х10): ').split('x')\n\t\t\t\tif len(size) != 2 or not size[0].isdigit() or not size[1].isdigit() or int(size[0]) > 26 or int(size[1]) > 26 or int(size[0]) <= 0 or int(size[1]) <= 0:\n\t\t\t\t\tprint('Неверный ввод!')\n\t\t\t\t\tloop()\n\n\t\t\t\tboard = Board(int(size[0]), int(size[1]))\n\t\t\t\tplayer1 = Player(player1_name, int(size[0]), int(size[1]))\n\t\t\t\tplayer2 = Player(player2_name, int(size[0]), int(size[1]))\n\t\t\t\tplayer1.setBoard()\n\t\t\t\tplayer2.setBoard()\n\n\n\t\t\t\tplayers = [player1, player2]\n\n\t\t\t\twhile True:\n\t\t\t\t\tfor player in players:\n\t\t\t\t\t\tfor _ in range(2):\n\t\t\t\t\t\t\tclear()\n\t\t\t\t\t\t\tprint('Расстановка кораблей для {}'.format(player.name))\n\t\t\t\t\t\t\tplayer.printBoard()\n\t\t\t\t\t\t\tplayer.setShip()\n\t\t\t\t\t\t\tfor ship in player.ships:\n\t\t\t\t\t\t\t\tplayer.changeBoardShip(ship)\n\n\n\n\t\t\tloop()\n\n\t\t# \tsize = input('Введите размер доски (пример: 10х10): ').split('x')\n\t\t# \tif len(size) != 2 or not size[0].isdigit() or not size[1].isdigit() or int(size[0]) > 26 or int(size[1]) > 26 or int(size[0]) <= 0 or int(size[1]) <= 0:\n\t\t# \t\tprint('Неверный ввод!')\n\t\t# \t\tloop()\n\n\t\t# \telse:\n\t\t# \t\tplayers = self.setPlayers()\n\t\t# \t\tboard = Board(int(size[0]), int(size[1]))\n\t\t# \t\tboard.setBoard()\n\t\t# \t\tship_col, ship_row = board.setShip()\n\t\t# \t\tdone = False\n\t\t# \t\twhile not done:\n\t\t# \t\t\tfor name in players:\n\t\t\t\t\t\t\n\t\t# \t\t\t\t\tclear()\n\t\t# \t\t\t\t\tboard.printBoard()\n\t\t# \t\t\t\t\tplayer = Player(name, int(size[0]), int(size[1]))\n\t\t# \t\t\t\t\ttry:\n\t\t# \t\t\t\t\t\trow, col = player.getMove(ship_row, ship_col)\n\t\t# \t\t\t\t\texcept TypeError:\n\t\t# \t\t\t\t\t\tmain()\n\n\t\t# \t\t\t\t\tif player.status == True:\n\t\t# \t\t\t\t\t\tboard.changeBoardWin(row, col)\n\t\t# \t\t\t\t\t\tboard.printBoard()\n\t\t# \t\t\t\t\t\tprint('\\n...Конец игры...')\n\t\t# \t\t\t\t\t\tcont()\n\t\t# \t\t\t\t\t\tdone = True\n\t\t# \t\t\t\t\t\tsys.exit()\n\t\t# \t\t\t\t\telse:\n\t\t# \t\t\t\t\t\tclear()\n\t\t# \t\t\t\t\t\tboard.changeBoardMiss(row, col)\n\t\t# \t\t\t\t\t\tboard.printBoard()\n\t\t# \t\t\t\t\t\tprint('\\nПромах!')\n\t\t# \t\t\t\t\t\tcont()\n\t\t# \t\t\t\tmain()\n\t\t# loop()\nif __name__ == '__main__':\n\timport string, sys, os\n\n\tdef cont(): return input('\\n.....Введите символ чтобы продолжить.....')\n\tdef clear(): \n\t\tif sys.platform.startswith('win'):\n\t\t\treturn os.system('cls')\n\t\telse:\n\t\t\treturn os.system('clear')\n\n\tgame = Game()\n\tgame.startGame()\n\n","sub_path":"battleOOP2.0.py","file_name":"battleOOP2.0.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"488791832","text":"#!/usr/bin/python3\n\"\"\"\nsends a POST request with letter as a parameter.\n\"\"\"\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n url = \"http://0.0.0.0:5000/search_user\"\n letter = sys.argv[1] if len(sys.argv) > 1 else \"\"\n payload = {'q': letter}\n r = requests.post(url, data=payload)\n try:\n json = r.json()\n if json:\n print(\"[{}] {}\".format(json['id'], json['name']))\n else:\n print(\"No result\")\n except ValueError as err:\n print('Not a valid JSON')\n","sub_path":"0x11-python-network_1/8-json_api.py","file_name":"8-json_api.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"409650812","text":"import unittest\nfrom task_1_exp import remove_text_by_regex\nfrom task_1 import remove_text\n\nclass TestFirstTask(unittest.TestCase):\n '''\n Тесты первого задания\n '''\n def setUp(self):\n self.cases = [\n ['', ''],\n ['()', ''],\n ['asdflj (kla (inner) asd) port (another ))(unclosed', 'asdflj port )(unclosed'],\n ['(adsguwa asd ( (jh jk ) h )', '(adsguwa asd '],\n ['( adf) space ( ) 11(9)asdf ()', ' space 11asdf '],\n ['( asdf )f asdf((( asdfasdf)) ()) df ))', 'f asdf df ))'],\n ]\n\n def test_regex(self):\n ''' Тест функции через регулярное выражение '''\n for case in self.cases:\n with self.subTest(case=case):\n self.assertEqual(remove_text_by_regex(case[0]), case[1])\n\n def test_custom_method(self):\n ''' Тест самописной функции '''\n for case in self.cases:\n with self.subTest(case=case):\n self.assertEqual(remove_text(case[0]), case[1])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"task_1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491298695","text":"from django.urls import path\nfrom app import views\n \nurlpatterns=[\n path('GameCategories/',views.getGameCategories,name='GameCategories'),\n path('Games',views.getGames,name='Games'),\n path('GameCategories//',views.getGamesByCategory,name='GamesByCategory'),\n # path('UsersPost/',views.addUser,name='addUser')\n path('Games//',views.getGameByName,name='GameByName'),\n path('Users',views.getUsers,name='Users'),\n path('Users/',views.getUserByName,name=\"UserByName\"),\n path('Users/UserByID/',views.getUserByID,name=\"UserByID\"),\n path('Comments',views.comments,name=\"Comments\")\n]","sub_path":"geeko_backend/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"586270583","text":"from prototype.prototype_1 import Prototype\nfrom copy import deepcopy\n\n\nclass Archer(Prototype):\n def __init__(self, level):\n self.unit_type = \"Archer\"\n filename = \"{}_{}.dat\".format(self.unit_type, level)\n with open(filename, \"r\") as parameter_file:\n lines = parameter_file.read().split(\"\\n\")\n self.life = lines[0]\n self.speed = lines[1]\n self.attack_power = lines[2]\n self.attack_range = lines[3]\n self.weapon = lines[4]\n\n def __str__(self):\n return \"Life: {0}\\n\" \\\n \"Speed: {1}\\n\" \\\n \"Attack Power: {2}\\n\" \\\n \"Attack Range: {3}\\n\" \\\n \"Weapon: {4}\".format(self.life, self.speed, self.attack_power, self.attack_range, self.weapon)\n\n def clone(self):\n return deepcopy(self)\n","sub_path":"prototype/archer.py","file_name":"archer.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"275025640","text":"import math\n\n\ndef is_prime(num):\n prime = True\n for i in range(2, int(math.sqrt(num) + 1)):\n if num % i == 0:\n prime = False\n break\n\n return prime\n\n\ndef specific_prime(n_prime):\n prime_count = 0\n for i in range(2, 1000000):\n if is_prime(i):\n prime_count = prime_count + 1\n if prime_count >= n_prime:\n return i\n\n return -1\n\n\nprint(specific_prime(10001))","sub_path":"euler/problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"16914484","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport json\nimport sys\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PySide6.QtWidgets import QApplication\nfrom PySide6.QtWidgets import QLabel\nfrom PySide6.QtWidgets import QWidget\nfrom PySide6.QtWidgets import QLineEdit\nfrom PySide6.QtWidgets import QHBoxLayout\nfrom PySide6.QtCore import QObject, Signal, Slot\n\n\nurl = ''\n\n\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.\n\n\ndef download_douyin_video(link_url):\n # url = 'https://v.douyin.com/edMM3xM/'\n headers = {\n \"authority\": \"v.douyin.com\",\n \"method\": \"GET\",\n # \":path\": \"/edMM3xM/\"\n \"scheme\": \"https\",\n \"accept\": \"text / html, application / xhtml + xml, application / xml;q = 0.9, image / avif, image / webp, image / apng, * / *;q = 0.8, application / signed - exchange;v = b3;q = 0.9\",\n \"accept - encoding\": \"gzip, deflate, br\",\n \"accept - language\": \"en, zh - CN;q = 0.9, zh;q = 0.8, ja;q = 0.7\",\n \"cache - control\": \"no - cache\",\n \"dnt\": \"1\",\n \"pragma\": \"no - cache\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v = \\\"88\\\", \\\"Google Chrome\\\";v = \\\"88\\\", \\\";Not A Brand\\\";v = \\\"99\\\"\",\n # sec - ch - ua - mobile: ?0\n # sec - fetch - dest: document\n # sec - fetch - mode: navigate\n # sec - fetch - site: none\n # sec - fetch - user: ?1\n \"upgrade - insecure - requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36\"\n }\n # 以初始链接进行请求,会进行302跳转\n r = requests.get(link_url, headers=headers)\n # print(r.url)\n # 跳转后url会变,其中包含了后续请求的一个id\n r = requests.get(r.url)\n # print(r.url)\n # 通过解析url,获取id\n parsed_url = urlparse(r.url)\n url_path = parsed_url.path\n end = url_path.rindex('/')\n start = url_path.rindex('/', 0, end - 1)\n id = url_path[start + 1: end]\n\n request_url = 'https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids={0}'\n\n headers = {\n \"authority\": \"www.iesdouyin.com\",\n \"method\": \"GET\",\n \"path\": \"/web/api/v2/aweme/iteminfo/?item_ids={0}\".format(id),\n \"scheme\": \"https\",\n \"accept\": \"*/*\",\n \"cookie\": \"_ba=BA0.2-20190804-5199e-ju4rlZVUr3DUAKoGuCzg; _ga=GA1.2.2052427955.1564850335;\",\n \"referer\": r.url,\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36\",\n \"x-request-width\": \"XMLHttpRequest\"\n }\n # 'https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids=6936055700275694879'\n r = requests.get(request_url.format(id), headers=headers)\n dict = json.loads(r.text)\n download_url = dict['item_list'][0]['video']['play_addr']['url_list'][0]\n download_url_no_wm = str(download_url).replace('playwm', 'play')\n\n r = requests.get(download_url_no_wm, stream=True)\n\n file_name = '{0}.mp4'.format(id)\n with open(file_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n print(\"{0} has been downloaded!\".format(file_name))\n\n\nclass Downloader(QWidget):\n def __init__(self):\n super(Downloader, self).__init__()\n self.download_url = ''\n lay = QHBoxLayout()\n lineEdit = QLineEdit()\n lineEdit.setMinimumWidth(300)\n lineEdit.textChanged.connect(self.change_text)\n lineEdit.returnPressed.connect(self.start_download)\n lay.addWidget(lineEdit)\n self.setLayout(lay)\n self.setWindowTitle(\"Douyin Downloader\")\n\n @Slot(str)\n def change_text(self, str):\n self.download_url = str\n # print(url, len(url))\n\n def start_download(self):\n if len(self.download_url) > 28:\n print('start to download {0}.'.format(self.download_url))\n download_douyin_video(self.download_url)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n # w = QWidget()\n # lay = QHBoxLayout()\n # lineEdit = QLineEdit()\n # lineEdit.setMinimumWidth(300)\n # lineEdit.textChanged.connect(change_text)\n # lineEdit.returnPressed.connect(start_download)\n # lay.addWidget(lineEdit)\n # w.setLayout(lay)\n # w.show()\n w = Downloader()\n w.show()\n sys.exit(app.exec_())\n # url = 'https://v.douyin.com/edMM3xM/'\n # if len(sys.argv) < 2:\n # download_douyin_video(url)\n # else:\n # for link_url in sys.argv[1:]:\n # download_douyin_video(link_url)\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"549905643","text":"# creating a part manager application\nfrom tkinter import *\nfrom tkinter import messagebox\n\nimport dbss\n\n# create window object\n\napp = Tk()\n\napp.title('Part Manager')\napp.geometry('700x350')\n\n\n# functions\n\n\ndef populate_list():\n parts_list.delete(0, END)\n for row in dbss.fetch():\n parts_list.insert(END, row)\n # print('populate')\n\n\ndef add_item():\n if part_text.get() == \"\" or customer_text.get() == \"\" or retailer_text.get() == \"\" or price_text.get() == \"\":\n messagebox.showerror('Required Fields', 'Please include all fields.')\n return\n dbss.insert(part_text.get(), customer_text.get(), retailer_text.get(), price_text.get())\n parts_list.delete(0, END)\n parts_list.insert(END, (part_text.get(), customer_text.get(), retailer_text.get(), price_text.get()))\n clear_text()\n populate_list()\n\n\ndef select_item(event):\n try:\n global selected_item\n index = parts_list.curselection()[0]\n selected_item = parts_list.get(index)\n # print('selected')\n\n part_entry.delete(0, END)\n selected_item = parts_list.get(index)\n\n part_entry.delete(0, END)\n part_entry.insert(END, selected_item[1])\n customer_entry.delete(0, END)\n customer_entry.insert(END, selected_item[2])\n retailer_entry.delete(0, END)\n retailer_entry.insert(END, selected_item[3])\n price_entry.delete(0, END)\n price_entry.insert(END, selected_item[4])\n except IndexError as ie:\n print(ie)\n\n\ndef remove_item():\n try:\n dbss.remove(selected_item[0])\n clear_text()\n populate_list()\n except NameError as ne:\n print(ne)\n\n\ndef update_item():\n try:\n dbss.update(selected_item[0], part_text.get(), customer_text.get(), retailer_text.get(), price_text.get())\n populate_list()\n except NameError:\n pass\n\n\ndef clear_text():\n part_entry.delete(0, END)\n\n customer_entry.delete(0, END)\n\n retailer_entry.delete(0, END)\n\n price_entry.delete(0, END)\n\n\n# the menus\nmenu_bar = Menu(app)\nfile = Menu(menu_bar, tearoff=0)\nfile.add_command(label='New')\nfile.add_command(label='Open Folder')\nfile.add_command(label='Save')\nfile.add_command(label='Save As')\n\nfile.add_separator()\nfile.add_command(label='Exit', command=app.quit)\nmenu_bar.add_cascade(label='File', menu=file)\n\n# display the menu\napp.config(menu=menu_bar)\n\n# parts\n\n\npart_text = StringVar()\n\npart_label = Label(app, text='Part Name', font=('courier', 14, 'bold'), pady=20)\npart_label.grid(row=0, column=0, sticky=W)\npart_entry = Entry(app, textvariable=part_text)\npart_entry.grid(row=0, column=1)\n\n# customer\ncustomer_text = StringVar()\n\npart_label = Label(app, text='Customer', font=('courier', 14, 'bold'), pady=20)\npart_label.grid(row=0, column=2, sticky=W)\ncustomer_entry = Entry(app, textvariable=customer_text)\ncustomer_entry.grid(row=0, column=3)\n\n# retailer\nretailer_text = StringVar()\n\npart_label = Label(app, text='Retailer', font=('courier', 14, 'bold'), pady=20)\npart_label.grid(row=1, column=0, sticky=W)\nretailer_entry = Entry(app, textvariable=retailer_text)\nretailer_entry.grid(row=1, column=1)\n\n# price\nprice_text = StringVar()\n\npart_label = Label(app, text='Price', font=('courier', 14, 'bold'), pady=20)\npart_label.grid(row=1, column=2, sticky=W)\nprice_entry = Entry(app, textvariable=price_text)\nprice_entry.grid(row=1, column=3)\n\n# scrollbar\n\nscrollbar = Scrollbar(app)\nscrollbar.grid(row=3, column=3)\n\n# part list : list box\n\nparts_list = Listbox(app, height=8, width=50, border=0, bg='#ecf3f4',\n cursor='arrow', font=('Helvetica', 10, 'normal'))\nparts_list.grid(row=3, column=0, columnspan=3, rowspan=6, pady=20, padx=25)\n\n# set scrollbar to listbox\nparts_list.configure(yscrollcommand=scrollbar.set)\nscrollbar.config(command=parts_list.yview)\n\n# bind select\nparts_list.bind('<>', select_item)\n\n# buttons\nadd_btn = Button(app, text='Add part', activeforeground=('#07bafc'), activebackground=('#d8f2e7'),\n width=12, relief=RIDGE, bg='#0091f9', command=add_item)\nadd_btn.grid(row=2, column=0, pady=20)\n\nremove_btn = Button(app, text='Remove part', activeforeground=('#07bafc'),\n activebackground=('#d8f2e7'), width=12, relief=RIDGE, bg='#0091f9', command=remove_item)\nremove_btn.grid(row=2, column=1, pady=20)\n\nupdate_btn = Button(app, text='Update part', activeforeground=('#07bafc'),\n activebackground=('#d8f2e7'), width=12, relief=RIDGE, bg='#0091f9', command=update_item)\nupdate_btn.grid(row=2, column=2, pady=20)\n\nclear_btn = Button(app, text='Clear Input', activeforeground=('#07bafc'),\n activebackground=('#d8f2e7'), width=12, relief=RIDGE, bg='#0091f9', command=clear_text)\nclear_btn.grid(row=2, column=3, pady=20)\n\n# populate data\n\npopulate_list()\n\n# start program\napp.mainloop()\n","sub_path":"PartManager/part manager.py","file_name":"part manager.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"356655064","text":"with open('air.txt') as file_air:\n lines = file_air.readlines()\nstr_ = ''\nlist_str = []\nfor line in lines:\n str_ += line.strip().replace('.', '').replace(',', '').replace(' ', '').lower()\n\nfor i in str_:\n list_str.append(i)\n \nlist_d = {}.fromkeys(list_str, 0)\nfor a in list_str:\n list_d[a] += 1\nmax_value = max(list_d.values())\nfinal_dict = {k:v for k, v in list_d.items() if v == max_value} \nprint(list_d)\nprint('Max Value:')\nprint(final_dict)\n\nfile_air.close()\n","sub_path":"task1_air_txt.py","file_name":"task1_air_txt.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449636608","text":"# #connect to database\n# #create a cursor object\n# #Write an SQL query\n# #Commit changes\n# #close the connection\nimport sqlite3\n\ndef create_table():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(BASE_DIR, \"lite.db\")\n cur = conn.cursor()\n cur.execute(\"CREATE table if NOT EXISTS 'store'(item TEXT, quantity INTEGER, price REAL)\")\n conn.commit()\n conn.close()\n\ndef insert(item,quantity,price):\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"INSERT INTO store VALUES(?,?,?)\",(item,quantity,price))\n conn.commit()\n conn.close()\n\ninsert(\"cofee bar\", 10, 5)\n\ndef view():\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM store\")\n rows = cur.fetchall()\n conn.close()\n return rows\n\nprint(view())\n\n# import mysql.connector\n# word = input(\"Enter a word in English and press Enter: \")\n# con = mysql.connector.connect(\n# user=\"ardit700_student\", \n# password = \"ardit700_student\", \n# host=\"108.167.140.122\", \n# database = \"ardit700_pm1database\"\n# )\n# cursor = con.cursor()\n# query = cursor.execute(\"SELECT * FROM Dictionary WHERE Expression = '%s'\" % word)\n# results = cursor.fetchall()\n# if results:\n# for result in results:\n# print(result[1])\n# else:\n# print(\"We couldn't find any results about that.\")\n","sub_path":"Database_interaction/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"471175524","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.utils import shuffle\nfrom DataImporting import ImportData\nfrom sklearn.neural_network import MLPClassifier\n# feature selection\nfrom sklearn.feature_selection import RFECV\n\n# model selection\nfrom sklearn import model_selection\n\n# models\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\nfrom sklearn.preprocessing import MinMaxScaler\n\n#defining enviroment variables\nSEED = 7\n#SCORING = 'accuracy'\nSCORING = 'f1'\nPATH =r\"C:\\‏‏PycharmProjects\\AnxietyClassifier\\ExtractedFeatures\\data_features_for_each_matrix_features_processed_v2.csv\"\nSHEET_NAME = \"Sheet1\"\n\ngroup_column = 'group'\nsubject_number_column = 'Subject_Number'\n\n\ndef get_data(dateset):\n X_train = dateset.drop([group_column,subject_number_column], 1)\n Y_train = dateset[group_column]\n\n return X_train,Y_train\n\n\n\ndef grid_search(pipe, search_space, X, Y):\n clf = GridSearchCV(pipe, search_space, cv=10, scoring='f1')\n best_model = clf.fit(X, Y)\n #print(clf.best_score_,\"\\nclf.best_score_\")\n #print(best_model.best_estimator_.get_params()['classifier'], \"\\nbest classifier\")\n return best_model\n\ndef get_pipeline():\n # Create a pipeline\n pipe = Pipeline([('classifier', RandomForestClassifier())])\n # Create space of candidate learning algorithms and their hyperparameters\n search_space = [#{'classifier': [RandomForestClassifier()]},\n # {'classifier': [AdaBoostClassifier()]},\n # {'classifier': [GradientBoostingClassifier()]},\n # {'classifier': [LinearDiscriminantAnalysis()]},\n # {'classifier': [KNeighborsClassifier()]},\n # {'classifier': [SVC(kernel='rbf')]},\n # {'classifier': [SVC(kernel='linear')]},\n # {'classifier': [QuadraticDiscriminantAnalysis()]},\n # {'classifier': [LogisticRegression()]},\n {'classifier': [MLPClassifier(solver='adam', hidden_layer_sizes=(50, 50, 50, 1))]}\n ]\n return pipe, search_space\n\n\ndef get_RFE_pipeline(X_train, Y_train):\n lda = SVC(kernel='linear')\n rfecv = RFECV(estimator=lda, step=1, scoring=SCORING)\n rfecv.fit_transform(X_train, Y_train)\n cv_results = model_selection.cross_val_score(rfecv, X_train, Y_train, scoring=SCORING)\n print(cv_results)\n return rfecv\n\n\ndef choose_label(best_model, X_test):\n label_dict = {0:0, 1:0}\n for index, X in X_test.iterrows():\n y = best_model.predict(X.reshape(1,24))\n label_dict[int(y[0])] += 1\n key_max = max(label_dict.keys(), key=(lambda k: label_dict[k]))\n print(\"label_dict\\n\", label_dict)\n return key_max\n\n\ndef LOO(df):\n acc_list = []\n\n i = 0\n for sub in df[subject_number_column].unique():\n i+=1\n train_df = df[df[subject_number_column] != sub]\n X_train = train_df.drop([group_column, subject_number_column], 1)\n Y_train = train_df[group_column]\n #print(\"i\", i)\n test_df = df[df[subject_number_column] == sub]\n X_test = test_df.drop([group_column,subject_number_column], 1)\n Y_test =test_df[group_column][test_df.index[0]]\n\n pipe, search_space = get_pipeline()\n best_model = grid_search(pipe, search_space, X_train, Y_train)\n Y_hat = choose_label(best_model, X_test)\n #print(\"y hat\", Y_hat, \"y test\", Y_test)\n acc_list.append(Y_test == Y_hat)\n #print(\"tmp acc\", np.array(acc_list).mean())\n return np.array(acc_list).mean()\n\ndef rfecv_LOO(df):\n acc_list = []\n\n i = 0\n for sub in df[subject_number_column].unique():\n i+=1\n train_df = df[df[subject_number_column] != sub]\n X_train = train_df.drop([group_column, subject_number_column], 1)\n Y_train = train_df[group_column]\n #print(\"i\", i)\n test_df = df[df[subject_number_column] == sub]\n X_test = test_df.drop([group_column,subject_number_column], 1)\n Y_test =test_df[group_column][test_df.index[0]]\n\n best_model = get_RFE_pipeline(X_train, Y_train)\n Y_hat = choose_label(best_model, X_test)\n #print(\"y hat\", Y_hat, \"y test\", Y_test)\n acc_list.append(Y_test == Y_hat)\n #print(\"tmp acc\", np.array(acc_list).mean())\n return np.array(acc_list).mean()\n\n\ndef runner(path,sheet_name):\n\n dataset = ImportData.refactor_labels(ImportData.get_data(path, sheet_name, csv=1), \"group\")\n dataset = shuffle(dataset)\n print(\"LOO score\", LOO(dataset))\n\n\n\n\n\n\nrunner(PATH,SHEET_NAME)","sub_path":"RunningAlgorithms/loo_rfe_each_matrix.py","file_name":"loo_rfe_each_matrix.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"424600422","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\n\n\nclass MyCustomNode(Node): # MODIFY NAME\n def __init__(self):\n super().__init__(\"node_name\") # MODIFY NAME\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = MyCustomNode() # MODIFY NAME\n rclpy.spin(node)\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"template/rclpy/node_oop_template.py","file_name":"node_oop_template.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557413574","text":"import class_stack\n\n\nclass carPark : \n def __init__(self):\n self.car = class_stack.stack()\n def arrive(self,i):\n if self.car.isFull():\n return 'SOI FULL'\n else: \n self.car.push(i)\n space = 4 - self.car.size()\n return 'space left ' + str(space) \n def depart(self,i) :\n if self.car.isEmpty() :\n return \"SOI EMPTY\"\n else:\n if i in self.car.items:\n temp = class_stack.stack()\n for c in range(self.car.size(),-1,-1):\n if c != i:\n temp.push(c)\n self.car.items.pop()\n else:\n self.car.items.pop()\n break\n print('\\tpop ' + str(c), end =' ')\n print('pop ' + str(i), end = '')\n for c in range(len(temp.items)-1,-1,-1):\n print('push ' +str(temp.items[c]),end='')\n self.car.push(temp.items[c])\n space = 4 - len(self.car.items)\n print('\\nspace left ' + str(space))\n else : \n return 'No Car ' + str(i)\n\n\ns = carPark()\n\n\nprint(s.depart(6))\nprint(s.arrive(1))\nprint(s.arrive(2))\nprint(s.arrive(3))\nprint(s.arrive(4))\nprint(s.arrive(5))\nprint(s.car.items)\nprint(s.depart(7))\ns.depart(2)\nprint(s.car.items)\ns.depart(3)\nprint(s.car.items)\n\n\n\n\n","sub_path":"Lab/Data-Struct-Lab-2D/02_stack/Stack_Lab_3.py","file_name":"Stack_Lab_3.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"22891419","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('home.urls', namespace='home')),\n path('account/', include('account.urls', namespace='account')),\n path('post/', include('post.urls', namespace='post')),\n path('category/', include('category.urls', namespace='category')),\n path('activity/', include('activity.urls', namespace='activity')),\n path('ckeditor/', include('ckeditor_uploader.urls')),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"virgool/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"93516369","text":"from django.shortcuts import render, get_object_or_404 \nfrom datetime import datetime\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom models import MessageModel\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nimport pusher\n\n@login_required\ndef board(request):\n messages = MessageModel.objects.order_by('-creation_date')[:6]\n return render(request, 'message_board/board.html', {'board_page_active': 'active',\n 'messages' : messages})\n \n@login_required\ndef message_ajax(request):\n if request.method == 'POST':\n message = request.POST.get('message', '')\n if len(message) > 0:\n username = request.user.username\n #now = timezone.now()\n now = datetime.now()\n m = MessageModel(username=username, message=message, creation_date=now)\n m.save()\n\n # create push event with the data of a new message\n pusher_client = pusher.Pusher(\n app_id='187082',\n key='5729b7b62f550fc1c6ff',\n secret='a267a54361c086c8107d',\n cluster='eu',\n ssl=True\n )\n pusher_client.trigger('message_channel', 'message_save_event', \n {'username': m.username,\n 'message': m.message,\n 'date_string': m.date_string\n })\n return HttpResponse('message sent through pusher')\n # old no real time shit\n #messages = MessageModel.objects.order_by('-creation_date')[:5]\n #return render(request, 'message_board/messages_template.html', {\n # 'messages': messages })\n return HttpResponse('message not created, not sent')\n","sub_path":"mysite/message_board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"589465099","text":"#!/usr/bin/env python3\n# -*-encoding: utf-8-*-\n\nimport cgi\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom structure import *\n\nform = cgi.FieldStorage()\n\n\nif COST in form or REVENUE in form:\n item_type = COST if COST in form else REVENUE\n diagram = DIAGRAM_COSTS if COST in form else DIAGRAM_REVENUES\n\n # отримуємо словник категорій {Name1: id1, ...}\n categories = name_dict(data_connector.get_categories(item_type))\n counter = {}\n\n # для кожної катеогорії обчислюємо суму\n for k, v in categories.items():\n counter[k] = data_connector.get_sum(item_type=item_type, Category_id=v)\n\n # будуємо гістограму (взято зі stackowerflow)\n labels, values = zip(*counter.items())\n\n indexes = np.arange(len(labels))\n width = 0.6\n\n fig = plt.figure(figsize=(9, 7))\n plt.bar(indexes, values, width)\n plt.xticks(indexes + width * 0.5, labels)\n\n plt.savefig(diagram)\n with open(DIAGRAM_PATTERN, 'r', encoding='utf-8') as file:\n page = file.read()\n\n print(change_html(page.format(type=item_type, diagram=diagram), mode=STRING_MODE))\n","sub_path":"math_projects/bogdan/cgi_bin/build_diagram.py","file_name":"build_diagram.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"600445824","text":"\"\"\"changed author detail ID column to author detail group ID to accommodate for multiple author profiles\n\nRevision ID: dedf61559215\nRevises: 5975cb87844c\nCreate Date: 2020-01-18 16:03:40.001948\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'dedf61559215'\ndown_revision = '5975cb87844c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('fact_researchers', sa.Column('scopus_author_detail_groups_id', sa.Integer(), nullable=True))\n op.drop_constraint('fact_researchers_scopus_author_detail_id_fkey', 'fact_researchers', type_='foreignkey')\n op.create_foreign_key(None, 'fact_researchers', 'dim_scopus_author_detail_groups', ['scopus_author_detail_groups_id'], ['id'])\n op.drop_column('fact_researchers', 'scopus_author_detail_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('fact_researchers', sa.Column('scopus_author_detail_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'fact_researchers', type_='foreignkey')\n op.create_foreign_key('fact_researchers_scopus_author_detail_id_fkey', 'fact_researchers', 'dim_scopus_author_detail', ['scopus_author_detail_id'], ['id'])\n op.drop_column('fact_researchers', 'scopus_author_detail_groups_id')\n # ### end Alembic commands ###\n","sub_path":"masterlist_dw/app/migrations/versions/dedf61559215_changed_author_detail_id_column_to_.py","file_name":"dedf61559215_changed_author_detail_id_column_to_.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"532999290","text":"# Interact with Core\n#\n# Input events: from config\n# Output events: coretickets.ticket_created\n# coretickets.ticket_closed\n# coretickets.error\n\nimport xmlrpclib\n\nfrom siren.common.framework import *\n\n\nclass CoreTickets(Plugin):\n def _publish(self, class_type, msg):\n event_class = \"{0}.{1}\".format(self.plugin_name, class_type)\n scan_result_event = self.new_event(event_class, msg)\n self.publish(scan_result_event)\n\n def init(self):\n\n ### CHECK IF THE TOKEN IS VALID\n\n # Set up the connection to core and authenticate\n self.core_server = self.config.get(\"core_url\")\n try:\n auth = xmlrpclib.ServerProxy('%s/Auth' % self.core_server)\n self.auth_token = auth.systemLogin(core_user, core_pass)\n except:\n self.error(\"Error: Authenticating with CORE failed\")\n self.base_url = self.core_server + self.auth_token\n\n # Read the server number\n try:\n with open('/root/.servernumber') as f:\n self.server_number = f.readline().strip()\n except:\n self.error(\"Unable to find Server Number file /root/.servernumber\")\n\n # Get account and server information\n try:\n contact = xmlrpclib.ServerProxy(\n '{0}/Contact/::session_id::'.format(self.base_url))\n contacts = contact.getCustomerContactsForAccount(self.account)\n self.contacts = contacts['Techical']\n server = xmlrpclib.ServerProxy('{0}/Computer/::session_id::'.format(\n self.base_url))\n server_info = server.getDetailsByComputers([self.server_number])\n except:\n self.error(\"Failed to pull server and account information from Core\")\n self.account_id = server_info['customer']\n self.datacenter = server_info['datacenter']\n\n @Subscribe(\"removed_drives.success\")\n def create_ticket(self, event):\n \"\"\"\n Create a ticket for the failed device.\n\n :returns: If error code is 0 then returns ticket number, but if\n an error occurs it will return a message\n \"\"\"\n msg_queue = defaultdict(list)\n\n # TODO: Need to parse drive info from the event data (it's a list!)\n\n # Ticket Attributes\n category_id = 2288 # Hardware\n subcategory_id = 13503 # Hard Drive\n severity_id = 2 # Urgent\n is_private = False\n private_first_message = False\n assignee = 0\n send_message_text = True\n status = 1\n contact_email_type = 1\n priority = 1\n\n\n requester_info = self.contacts.blahblahblah # Bookmark\n requester_name = requester_info['assignee']\n requester_id = requester_info['assignee_id']\n\n recipients = create_opsid_list(ops)\n subject = ticket_template['subject']\n body = ticket_template['body']\n\n queue_ids = {'DFW1': {'name': 'DCOPS (DFW1)', 'id': 49},\n 'DFW2': {'name': 'DCOPS (DFW2)', 'id': 475},\n 'ORD1': {'name': 'DCOPS (ORD1)', 'id': 391},\n 'LON3': {'name': 'DCOPS (LON3)', 'id': 189},\n 'SYD2': {'name': 'DCOPS (SYD2)', 'id': 523},\n 'IAD3': {'name': 'DCOPS (IAD3)', 'id': 525},\n 'cloudfs_ops': {'name': 'Cloud Files Ops', 'id': 413},\n 'cloud_infra': {'name': 'CLOUD INFRASTRUCTURE', 'id': 360}}\n queue_id = queue_ids[self.config['datacenter']]\n for failed_drive in event.data:\n logical_drive, physical_drive = failed_drive\n try:\n # Create a new Core ticket\n ticket = xmlrpclib.ServerProxy('{0}/Ticket/::session_id::'.format(\n self.base_url))\n ticket_number = ticket.createTicket(\n queue_id['id'],\n severity_id,\n subcategory_id,\n subject,\n body,\n is_private,\n private_first_message,\n recipients,\n requester_id,\n int(server_info['customer']),\n [self.server_number],\n assignee,\n send_message_text,\n status,\n contact_email_type,\n priority\n )\n except:\n msg_queue['error'].append((\n \"Unable to create ticket\",\n logical_drive,\n physical_drive\n ))\n else:\n msg_queue['ticket_created'].append((\n \"Ticket created\",\n logical_drive,\n physical_drive\n ))\n # Now publish all the events\n for class_type, msg in msg_queue.items():\n self._publish(class_type, msg)\n\n","sub_path":"siren/plugins/coretickets.py","file_name":"coretickets.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"602434676","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.models.commondb.RAMSTKFailureMode.py is part of The RAMSTK\n# Project\n#\n# All rights reserved.\n# Copyright 2007 - 2017 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"RAMSTKFailureMode Table Module.\"\"\"\n\n# Third Party Imports\nfrom sqlalchemy import Column, Float, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\n# RAMSTK Package Imports\nfrom ramstk.db import RAMSTK_BASE\nfrom ramstk.models import RAMSTKBaseTable\n\n\nclass RAMSTKFailureMode(RAMSTK_BASE, RAMSTKBaseTable):\n \"\"\"Class to represent ramstk_failuremode in the RAMSTK Common database.\"\"\"\n\n __defaults__ = {\n 'description': 'Failure Mode Description',\n 'mode_ratio': 1.0,\n 'source': ''\n }\n __tablename__ = 'ramstk_failure_mode'\n __table_args__ = {'extend_existing': True}\n\n category_id = Column(\n 'fld_category_id',\n Integer,\n ForeignKey('ramstk_category.fld_category_id'),\n nullable=False,\n )\n subcategory_id = Column(\n 'fld_subcategory_id',\n Integer,\n ForeignKey('ramstk_subcategory.fld_subcategory_id'),\n nullable=False,\n )\n mode_id = Column(\n 'fld_failuremode_id',\n Integer,\n primary_key=True,\n autoincrement=True,\n nullable=False,\n )\n description = Column(\n 'fld_description',\n String(512),\n default=__defaults__['description'],\n )\n mode_ratio = Column('fld_mode_ratio',\n Float,\n default=__defaults__['mode_ratio'])\n source = Column('fld_source', String(128), default=__defaults__['source'])\n\n # Define the relationships to other tables in the RAMSTK Program database.\n category = relationship( # type: ignore\n 'RAMSTKCategory', back_populates='mode')\n subcategory = relationship( # type: ignore\n 'RAMSTKSubCategory', back_populates='mode')\n\n def get_attributes(self):\n \"\"\"Retrieve current values of RAMSTKFailureMode data model attributes.\n\n :return: {failuremode_id, description, type} pairs\n :rtype: dict\n \"\"\"\n _attributes = {\n 'category_id': self.category_id,\n 'subcategory_id': self.subcategory_id,\n 'mode_id': self.mode_id,\n 'description': self.description,\n 'mode_ratio': self.mode_ratio,\n 'source': self.source,\n }\n\n return _attributes\n","sub_path":"src/ramstk/models/commondb/ramstkfailuremode.py","file_name":"ramstkfailuremode.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"645964086","text":"#!/usr/bin/env python3\n#\n# spiro.py -\n# script for time-lapse imaging of Petri dishes, with a focus on plants\n# (i.e. it is adapted to day/night cycles).\n#\n# - Jonas Ohlsson \n#\n\nimport os\nfrom picamera import PiCamera\nfrom spiro.hwcontrol import HWControl\nfrom spiro.config import Config\nfrom spiro.logger import log, debug\nimport spiro.webui as webui\nimport argparse\nimport textwrap\nimport sys\nimport signal\nimport RPi.GPIO as gpio\n\nparser = argparse.ArgumentParser(\n description=textwrap.dedent(\"\"\"\\\n SPIRO control software.\n Running this command without any flags starts the web interface.\n Specifying flags will perform those actions, then exit.\"\"\"))\nparser.add_argument('--reset-config', action=\"store_true\", dest=\"reset\",\n help=\"reset all configuration values to defaults\")\nparser.add_argument('--reset-password', action=\"store_true\", dest=\"resetpw\",\n help=\"reset web UI password\")\nparser.add_argument('--install-service', action=\"store_true\", dest=\"install\",\n help=\"install systemd user service file\")\nparser.add_argument('--toggle-debug', action=\"store_true\", dest=\"toggle_debug\",\n help=\"toggles additional debug logging on or off\")\noptions = parser.parse_args()\n\ndef initCam():\n cam = PiCamera()\n # cam.framerate dictates longest exposure (1/cam.framerate)\n cam.framerate = 5\n cam.iso = 50\n cam.resolution = cam.MAX_RESOLUTION\n cam.rotation = 90\n cam.image_denoise = False\n hw.focusCam(cfg.get('focus'))\n return cam\n\n\ndef installService():\n try:\n os.makedirs(os.path.expanduser('~/.config/systemd/user'), exist_ok=True)\n except OSError as e:\n print(\"Could not make directory (~/.config/systemd/user):\", e)\n try:\n with open(os.path.expanduser('~/.config/systemd/user/spiro.service'), 'w') as f:\n if (os.path.exists('/home/pi/.local/bin/spiro')): exe = '/home/pi/.local/bin/spiro'\n else: exe = '/usr/local/bin/spiro'\n f.write(textwrap.dedent(\"\"\"\\\n [Unit]\n Description=SPIRO control software\n [Service]\n ExecStart={}\n Restart=always\n [Install]\n WantedBy=default.target\n \"\"\").format(exe))\n except OSError as e:\n print(\"Could not write file (~/.config/systemd/user/spiro.service):\", e)\n print(\"Systemd service file installed.\")\n\n\ndef terminate(sig, frame):\n global shutdown\n if sig == signal.SIGALRM:\n # force shutdown\n debug(\"Shut down time-out, force-quitting.\")\n debug(\"If the software locks up at this point, a reboot is needed.\")\n debug(\"This is due to a bug in the underlying camera code.\")\n cam.close()\n sys.exit()\n\n if not shutdown:\n # give the app 10 seconds to shut down, then force it\n shutdown = True\n signal.alarm(10)\n\n log(\"Signal \" + str(sig) + \" caught -- shutting down.\")\n webui.stop()\n hw.motorOn(False)\n cam.close()\n hw.cleanup()\n sys.exit()\n\n\nshutdown = False\ncfg = Config()\ncam = None\nhw = HWControl()\nfor sig in [signal.SIGTERM, signal.SIGINT, signal.SIGQUIT, signal.SIGHUP, signal.SIGALRM]:\n signal.signal(sig, terminate)\n\n# start here.\ndef main():\n if options.reset:\n print(\"Clearing all configuration values.\")\n try:\n os.remove(os.path.expanduser('~/.config/spiro/spiro.conf'))\n except OSError as e:\n print(\"Could not remove file (~/.config/spiro/spiro.conf):\", e.strerror)\n raise\n if options.install:\n print(\"Installing systemd service file.\")\n installService()\n if options.resetpw:\n print(\"Resetting web UI password.\")\n cfg.set('password', '')\n if options.toggle_debug:\n cfg.set('debug', not cfg.get('debug'))\n if cfg.get('debug'):\n print(\"Debug mode on.\")\n else:\n print(\"Debug mode off\")\n if any([options.reset, options.resetpw, options.install, options.toggle_debug]):\n sys.exit()\n\n # no options given, go ahead and start web ui\n global cam\n gpio.setmode(gpio.BCM)\n hw.GPIOInit()\n cam = initCam()\n log('Starting web UI.')\n webui.start(cam, hw)\n","sub_path":"spiro/spiro.py","file_name":"spiro.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367839152","text":"from setuptools import setup\n\nrequires = [\n 'pyramid',\n 'pyramid_chameleon',\n 'sqlalchemy',\n 'pyramid_tm',\n 'zope.sqlalchemy',\n]\n\nsetup(name='bitcoinbiz',\n install_requires=requires,\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = bitcoinbiz:main\n [console_scripts]\n initialize_bitcoinbiz_db = bitcoinbiz.initialize_db:main\n \"\"\",\n)\n","sub_path":"package/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"134425243","text":"from setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom pathlib import Path\nimport site\n\ncustomize_code = \"\"\"\n###############################################################################\n# Add peepshow to built-ins\n###############################################################################\n\ntry:\n import peepshow\nexcept:\n import warnings\n warnings.warn(\"peepshow seems to be uninstalled, please manually \" \\\\\n \"remove corresponding entry form \" + __file__)\nelse:\n import builtins\n import os\n builtins.peep = peepshow.peep\n builtins.show = peepshow.show\n builtins.peep_ = peepshow.peep_\n builtins.show_ = peepshow.show_\n peepshow.enable_except_hook(consider_env=True)\n\n\"\"\"\n\nclass CustomInstallCommand(install):\n\n user_options = install.user_options + [\n ('add-builtins', None, 'Add \"peep\" and \"show\" keywords to builtins'),\n ]\n\n def initialize_options(self):\n install.initialize_options(self)\n self.add_builtins = 0\n\n def do_add_builtins(self):\n if self.user:\n customize_path = Path(site.getusersitepackages()) / 'usercustomize.py'\n else:\n customize_path = Path(site.getsitepackages()[0]) / 'sitecustomize.py'\n\n with open(customize_path, 'a') as fh:\n fh.write(customize_code)\n\n print('PeepShow utils added to builtins through: ', customize_path)\n\n def run(self):\n super().run()\n if self.add_builtins:\n self.do_add_builtins()\n\nreadme_path = Path(__file__).parent / 'README.rst'\n\nwith open(readme_path) as fh:\n long_description = fh.read()\n\nsetup(\n name = 'peepshow',\n version = '0.2.2',\n url = 'https://github.com/gergelyk/peepshow',\n author = 'Grzegorz Krason',\n author_email = 'grzegorz@krason.me',\n description = 'Data Explorer',\n packages = find_packages(),\n keywords = 'debug data explore programming'.split(),\n long_description = long_description,\n python_requires = '>=3.6,<3.9',\n package_data = {'': ['peepshow.1']},\n cmdclass = {\n 'install': CustomInstallCommand,\n },\n classifiers = [\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Utilities',\n ],\n install_requires = [\n 'astor~=0.7',\n 'astunparse~=1.6',\n 'colorama~=0.4',\n 'getch~=1.0',\n 'miscutils~=1.1',\n 'pprintpp~=0.4',\n 'pygments~=2.2',\n ],\n)\n","sub_path":"pypi_install_script/peepshow-0.2.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"185511059","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"emotion predictor\",\n version=\"0.0.1\",\n author=\"Kilian & wshop19 \",\n description=\"Emotion predictor + GEIST data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/jaros1024/emotion-predictor\",\n packages=setuptools.find_packages(),\n install_requires=['pandas', 'matplotlib', 'sklearn',\n 'biosppy', 'pyEDFlib', 'NeuroKit'],\n dependency_link=['git+https://github.com/neuropsychology/NeuroKit.py/zipball/master'],\n scripts=['bin/preprocess_geist', 'bin/emotion_predictor_main'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"370066585","text":"import cv2\nimport numpy as np\nimport random\n\nGRID_WIDTH = 10\nGRID_HEIGHT = 10\nBACKGROUND = (128, 128, 128)\nNUM_SEGMENTS = 150\nSEG_RADIUS = 20\nBUFFER = 2 \nNUM_ELEMENTS = 8\n\nimg = cv2.imread('original/1.png') #background = [128, 128, 128]\ncv2.imshow('image', img)\ncv2.waitKey()\n\ndef getEdges(img, thresh1, thresh2):\n out = cv2.Canny(img, thresh1, thresh2)\n return out\n\ndef getObject(img, background):\n out = np.zeros((img.shape[0], img.shape[1], 1), np.uint8)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if (img[i, j] == background).all():\n out[i, j] = (0)\n else:\n out[i, j] = (255)\n return out\n\ndef cropCircularSegment(img, pos, rad): #returns image only displaying circular region of radius rad around position pos\n mask = np.zeros(img)\n out = np.zeros_like(img)\n cv2.circle(mask, pos, rad, (255, 255, 255), thickness=-1)\n cv2.bitwise_and(img, mask, out)\n return out\n\ndef makeTransparent(img, background):\n transparent = img\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if (img[i, j][0: 3] == (background)).all():\n transparent[i, j][3] = 0\n return transparent\n\ndef getContours(img): #returns dictionary of orientation : contours, center and radius of the minimum enclosing circle\n contour_dict = {}\n seg_dict = {}\n bin_img = getObject(img, BACKGROUND)\n trash, contours, hierarchy = cv2.findContours(bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n (x, y), rad = cv2.minEnclosingCircle(contours[0])\n x, y, rad = int(x), int(y), int(rad)\n segment = img[y - rad: y + rad, x - rad: x + rad]\n bin_segment = getObject(segment, BACKGROUND)\n segment = cv2.cvtColor(segment, cv2.COLOR_BGR2BGRA)\n segment = makeTransparent(segment, BACKGROUND)\n trash, contours, hierarchy = cv2.findContours(bin_segment, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n out = np.zeros_like(bin_segment)\n for i in range(360):\n cv2.drawContours(out, contours, -1, (255, 255, 255))\n mask = cv2.getRotationMatrix2D((rad, rad), i + 1, 1)\n out = cv2.warpAffine(out, mask, (2 * rad, 2 * rad))\n seg_out = cv2.warpAffine(segment, mask, (2 * rad, 2 * rad), borderValue=BACKGROUND)\n contour_dict[i] = out\n seg_dict[i] = seg_out\n out = np.zeros_like(bin_img)\n return contour_dict, seg_dict, (x, y), rad\n\ndef constructScreen(contour_dict, seg_dict, center, rad, num_elements):\n screen = np.zeros((700, 1200), np.uint8)\n main_screen = np.full((700, 1200), BACKGROUND[0], np.uint8)\n main_screen = cv2.cvtColor(main_screen, cv2.COLOR_GRAY2BGRA)\n i = 0\n num_loops = 0\n while i < num_elements:\n curr_screen = screen\n index = random.randint(0, 359)\n x, y = random.randint(0, 700 - 2 * rad), random.randint(0, 1200 - 2 * rad)\n trash, contours, hierarchy = cv2.findContours(contour_dict[index], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n mask = np.zeros_like(screen)\n cv2.drawContours(mask, contours, -1, (255, 255, 255), offset=(y, x), thickness=cv2.FILLED)\n check = np.zeros_like(screen)\n curr_screen = np.bitwise_xor(curr_screen, mask)\n np.bitwise_and(curr_screen, mask, check)\n if np.sum(check) == np.sum(mask):\n for c in range(0, 3):\n main_screen[x: x + 2 * rad, y: y + 2 * rad, c] = seg_dict[index][:, :, c] * (seg_dict[index][:, :, 3]/255.0) + main_screen[x: x + 2 * rad, y: y + 2 * rad, c] * (1.0 - seg_dict[index][:, :, 3]/255.0)\n cv2.imshow('main', main_screen)\n cv2.waitKey(30)\n cv2.drawContours(screen, contours, -1, (255, 255, 255), offset=(y, x), thickness=-1)\n i += 1\n if num_loops > 10000 and not i == num_elements:\n break\n num_loops += 1\n if num_loops > 10000 and not i == num_elements:\n return constructScreen(contour_dict, seg_dict, center, rad, num_elements)\n return main_screen, screen\n \ncontour_dict, seg_dict, center, rad = getContours(img)\nimg, bin = constructScreen(contour_dict, seg_dict, center, rad, NUM_ELEMENTS)\ncv2.imshow('img', img)\ncv2.waitKey()\ncv2.imshow('rip', bin)\ncv2.waitKey()\n\nheight = bin.shape[0]\nwidth = bin.shape[1]\n\ni = 0\nbin = cv2.cvtColor(bin, cv2.COLOR_GRAY2RGBA)\nmain_mask = np.zeros_like(bin)\n \nwhile i < NUM_SEGMENTS:\n mask = np.zeros_like(bin)\n result = np.zeros_like(bin)\n x = random.randint(SEG_RADIUS + BUFFER, height - SEG_RADIUS - BUFFER)\n y = random.randint(SEG_RADIUS + BUFFER, width - SEG_RADIUS - BUFFER)\n cv2.circle(mask, (y, x), SEG_RADIUS + BUFFER, (255, 255, 255), thickness=-1)\n np.bitwise_and(bin, mask, result)\n empty = np.zeros_like(bin)\n \n if np.sum(result) == np.sum(mask) or np.sum(result) == np.sum(empty):\n overlap = np.zeros((height, width, 4), np.uint8)\n np.bitwise_and(main_mask, mask, overlap)\n check = np.zeros((height, width, 4), np.uint8)\n if (np.sum(overlap) == np.sum(check)):\n cv2.circle(main_mask ,(y, x), SEG_RADIUS, (255, 255, 255), thickness=-1)\n cv2.circle(bin ,(y, x), SEG_RADIUS + BUFFER, (255, 255, 255), thickness=-1)\n i += 1\n \ncv2.imshow('main', main_mask)\ncv2.waitKey()\nout = np.zeros_like(img)\nnp.bitwise_and(img, main_mask, out)\ncv2.imshow('out', out)\ncv2.waitKey()\n\n# screen = np.zeros((600, 1240))\n# for i in range(6):\n# dict = random.randint(0, 360)\n# x = random.randint(0, 300)\n# y = random.randint(0, 620)\n# img, contours, hierarchy = cv2.findContours(contour_dict[dict], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n# cv2.drawContours(screen, contours, -1, (255, 255, 255), offset=(x, y))\n# \n# img, contours, hierarchy = cv2.findContours(contour_dict[1], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n# (x, y), rad = cv2.minEnclosingCircle(contours[0])\n# x = int(x)\n# y = int(y)\n# rad = int(rad)\n# cv2.circle(screen, (x, y), rad, (255, 255, 255))\n# print x, y\n# cv2.drawContours(screen, contours, -1, (255, 255, 255))\n# cv2.imshow('lol', screen)\n# cv2.waitKey()\n# \n# cv2.imshow('hi', screen[( y-rad):(y+rad), (x - rad): (x + rad)])\n# cv2.waitKey()\n# \n# cv2.imshow('screen', screen)\n# cv2.waitKey()\n# \n# \n# \n# img2 = getEdges(img, 0, 200)\n# cv2.imshow('image2', img2)\n# cv2.waitKey()\n# \n# \n# img3 = np.zeros_like(img2)\n# img2, contours, hierarchy = cv2.findContours(img2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n# cv2.drawContours(img3, contours, -1, (255, 255, 255))\n# \n# cv2.imshow('image3', img3)\n# cv2.waitKey()\n# img4 = cv2.getRotationMatrix2D((300, 300), 40, 1)\n# img4 = cv2.warpAffine(img3, img4, (600, 600))\n# cv2.imshow('image4', img4)\n# cv2.waitKey()\n# img5, contours, hierarchy = cv2.findContours(img4, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n# cv2.drawContours(img5, contours, -1, (255, 255, 255))\n# cv2.imshow('image5', img5)\n# cv2.waitKey()\n# img4 = cv2.getRotationMatrix2D((300, 300), 40, 1)\n# img4 = cv2.warpAffine(img5, img4, (600, 600))\n# cv2.imshow('image4', img4)\n# cv2.waitKey()\n# img5, contours, hierarchy = cv2.findContours(img4, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n# cv2.drawContours(img5, contours, -1, (255, 255, 255))\n# cv2.imshow('image5', img5)\n# cv2.waitKey()\n# \n# \n# height = img2.shape[0]\n# width = img2.shape[1]\n# main_mask = np.zeros((height, width, 3), np.uint8)\n# \n# i = 0\n# \n# while i < NUM_SEGMENTS:\n# mask = np.zeros((height, width), np.uint8)\n# result = np.zeros((height, width), np.uint8)\n# result2 = np.zeros((height, width), np.uint8)\n# x = random.randint(SEG_RADIUS + BUFFER, height - SEG_RADIUS - BUFFER)\n# y = random.randint(SEG_RADIUS + BUFFER, width - SEG_RADIUS - BUFFER)\n# cv2.circle(mask, (x, y), SEG_RADIUS + BUFFER, (255, 255, 255), thickness=-1)\n# np.bitwise_xor(img2, mask, result)\n# np.bitwise_and(result, mask, result2)\n# \n# if np.sum(result2) == np.sum(mask):\n# mask = np.zeros((height, width, 3), np.uint8)\n# cv2.circle(mask, (x, y ), SEG_RADIUS + BUFFER, (255, 255, 255), thickness=-1)\n# in_object = np.zeros((height, width, 3), np.uint8)\n# np.bitwise_and(img, mask, in_object)\n# check = np.zeros((height, width, 3), np.uint8)\n# cv2.circle(check, (x, y), SEG_RADIUS + BUFFER, BACKGROUND, thickness=-1)\n# if (not np.sum(in_object) == np.sum(check)):\n# cv2.circle(main_mask ,(x, y), SEG_RADIUS, (255, 255, 255), thickness=-1)\n# cv2.circle(img2 ,(x, y), SEG_RADIUS + BUFFER, (255, 255, 255), thickness=-1)\n# i += 1\n# \n# cv2.imshow('image2', img2)\n# cv2.waitKey()\n# cv2.imshow('main_mask ', main_mask)\n# cv2.waitKey()\n# final = np.zeros((height, width, 3), np.uint8)\n# np.bitwise_and(img, main_mask, final)\n# cv2.imshow('final', final)\n# cv2.waitKey()\n# \n# \n# \n# # \n# # def drawGrid(img, width, height): #returns dictionary of grid location: image segment \n# # mask = np.zeros((height, width, 3), np.uint8)\n# # img_dict = {}\n# # for i in range(GRID_WIDTH):\n# # for j in range(GRID_HEIGHT):\n# # cv2.circle(mask, (width/10 * i + width/20, height/10 * j + height/20), width/20, (255, 255, 255), thickness=-1)\n# # cv2.imshow('image2', mask)\n# # out = np.zeros((height, width, 3), np.uint8)\n# # np.bitwise_and(img, mask, out)\n# # cv2.imshow('final', out)\n# # cv2.waitKey()\n# # for i in range(GRID_WIDTH):\n# # for j in range(GRID_HEIGHT):\n# # crop = out[width/10 * i: width/10 * (i + 1), height/10 * j: height/10 * (j + 1)]\n# # img_dict[(i, j)] = crop\n# # return img_dict\n# # \n# # def findFilled(img_dict): #returns dictionary of grid location: image with segment of volume in it\n# # has_obj_dict = {}\n# # for key in img_dict.keys():\n# # crop = img_dict[key]\n# # crop_width = crop.shape[0]\n# # crop_height = crop.shape[1]\n# # mask2 = np.zeros((crop_width, crop_height, 3), np.uint8)\n# # check = np.zeros((crop_width, crop_height, 3), np.uint8)\n# # cv2.circle(mask2, (crop_width/2, crop_height/2), crop_width/2, BACKGROUND, thickness=-1)\n# # np.bitwise_xor(crop, mask2, check)\n# # if not np.sum(check) == 0:\n# # has_obj_dict[key] = crop\n# # return has_obj_dict\n# # \n# # height = img.shape[0]\n# # width = img.shape[1]\n# # img_dict = drawGrid(img, width, height)\n# # has_obj_dict = findFilled(img_dict)\n# # \n# # # TEST CONCATENATION\n# # # i = 0\n# # # for key in has_obj_dict.keys():\n# # # if i == 0:\n# # # out = has_obj_dict[key]\n# # # i += 1\n# # # else:\n# # # out = np.concatenate((out, has_obj_dict[key]), axis=1)\n# # # cv2.imshow('concat', out)\n# # # cv2.waitKey()\n# # # cv2.imshow('out', out)\n# # # cv2.waitKey()\n# # \n# # def rotate(img, deg): #return image rotated by deg degrees\n# # height = img.shape[0]\n# # width = img.shape[1]\n# # rot_img = cv2.getRotationMatrix2D((height/2, width/2), deg, 1)\n# # rot_img = cv2.warpAffine(img, rot_img, (height, width))\n# # return rot_img\n# # \n# # \n# # \n# # # TEST JITTER\n# # cv2.imshow('rot_test', has_obj_dict[3, 3])\n# # cv2.waitKey() \n# # for angle in range(360):\n# # rot_img = rotate(has_obj_dict[3, 3], angle)\n# # cv2.imshow('rot_test', rot_img)\n# # cv2.waitKey()\n# # if not rot_img.shape[0] == has_obj_dict[3, 3].shape[0] or not rot_img.shape[1] == has_obj_dict[3, 3].shape[1] or not rot_img.shape[2] == has_obj_dict[3, 3].shape[2]:\n# # print rot_img.shape\n# # print has_obj_dict[3, 3].shape\n\n\n","sub_path":"generateStimuli.py","file_name":"generateStimuli.py","file_ext":"py","file_size_in_byte":11558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"437690561","text":"from typing import Dict, List, Tuple, Set, Optional\n\n\nclass F1_triplet(object):\n def __init__(self):\n self.A = 1e-10\n self.B = 1e-10\n self.C = 1e-10\n\n self.tag_A = 1e-10\n self.tag_B = 1e-10\n self.tag_C = 1e-10\n\n def reset(self) -> None:\n self.A = 1e-10\n self.B = 1e-10\n self.C = 1e-10\n\n self.tag_A = 1e-10\n self.tag_B = 1e-10\n self.tag_C = 1e-10\n\n def get_metric(self, reset: bool = False):\n if reset:\n self.reset()\n\n f1, p, r = 2 * self.A / (self.B +\n self.C), self.A / self.B, self.A / self.C\n\n '''\n tag_f1, tag_p, tag_r = 2 * self.tag_A / (self.tag_B +\n self.tag_C), self.tag_A / self.tag_B, self.tag_A / self.tag_C\n\n \n result = {\"precision\": p, \"recall\": r, \"fscore\": f1, \\\n \"tag_precision\": tag_p, \"tag_recall\": tag_r, \"tag_fscore\": tag_f1}\n '''\n result = {\"precision\": p, \"recall\": r, \"fscore\": f1}\n\n return result\n\n '''\n def __call__(self, predictions: List[List[Dict[str, str]]],\n gold_labels: List[List[Dict[str, str]]]):\n '''\n def __call__(self, predictions, gold_labels):\n\n for g, p in zip(gold_labels['selection_triplets'], predictions['spo_gold']):\n g_set = set('_'.join((gg['object'], gg['predicate'],\n gg['subject'])) for gg in g)\n p_set = set('_'.join((pp['object'], pp['predicate'],\n pp['subject'])) for pp in p)\n self.A += len(g_set & p_set)\n self.B += len(p_set)\n self.C += len(g_set)\n\n '''\n for g, p in zip(gold_labels['tag_gold'], predictions['tag_pred']):\n for i in range(len(g)):\n if g[i] != 2 and g[i] != 5:\n self.tag_C += 1\n\n if p[i] != 2 and g[i] != 5:\n self.tag_B += 1\n if p[i] == g[i]:\n self.tag_A += 1\n '''\n ","sub_path":"lib/metrics/F1_score.py","file_name":"F1_score.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"312280620","text":"import unittest\nimport os\nimport SegundoParcial\nfrom unittest.mock import MagicMock\nfrom SegundoParcial import Alumnos\n\nclass TestAlumnos(unittest.TestCase):\n def testMostrar(self):\n entrada = [(\"Jose_Lopez\",\"quimica\", 99.08),\n (\"Jose_Lopez\",\"matematicas\", 95.37),\n (\"Maria_Martinez\",\"fisica\", 85.60),\n (\"Maria_Martinez\",\"español\", 80.20)]\n\n salida_esperada = [(\"Jose_Lopez\", 99.08),\n (\"Jose_Lopez\", 95.37),\n (\"Maria_Martinez\", 85.60),\n (\"Maria_Martinez\", 80.20)]\n\n Mock = MagicMock()\n Mock.mostrar.return_value = entrada\n\n real = Alumnos(Mock)\n self.assertEqual(salida_esperada, real)\n\n def setUp(self):\n archivo = open(\"alumnos_test\", \"w\")\n archivo.write(\"Jose_Lopez quimica 99.08\"\"\\nJose_Lopez matematicas 95.37\"\"\\nMarta_Martinez fisica 85.60\"\"\\nMarta_Martinez español 80.20\")\n\n def tearDown(self):\n os.remove(\"alumnos_test.txt\")\n\n def test_integration(self):\n\n salida_esperada = [(\"Jose_Lopez\", 99.08),\n (\"Jose_Lopez\", 95.37),\n (\"Maria_Martinez\", 85.60),\n (\"Maria_Martinez\", 80.20)]\n\n real = Alumnos(Mock)\n self.assertEqual(salida_esperada, real)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"ene-jun-2019/Luis Ornelas/Parcial 2/SegundoParcial_Test.py","file_name":"SegundoParcial_Test.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"582383139","text":"# Credentials\nadmin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}\noa_credentials = {'username': 'Administrator', 'password': 'hpvse14'}\nilo_credentials = {'username': 'Administrator', 'password': 'hpvse123'}\ncliq_credentials = {\n 'mgmt_ip': '16.71.149.173',\n 'username': 'admin',\n 'password': 'admin'}\nssh_credentials = {'username': 'root', 'password': 'hpvse1'}\n\n# Resource types for X-API-Version=1000\nSERVER_PROFILE_TYPE = 'ServerProfileV10'\n\n# Enclosures, Interconnects, Server Hardware, LIG, EG, and LE\n# Enclosures\nENC1 = 'CN75120D7B'\nENC2 = 'CN75120D77'\nENC3 = 'CN750163KD'\n# Potash and Chloride\nENC1ICBAY3 = '%s, interconnect 3' % ENC1 # Potash\nENC1ICBAY6 = '%s, interconnect 6' % ENC1\nENC2ICBAY3 = '%s, interconnect 3' % ENC2 # Potash\nENC2ICBAY6 = '%s, interconnect 6' % ENC2\nENC3ICBAY3 = '%s, interconnect 3' % ENC3\nENC3ICBAY6 = '%s, interconnect 6' % ENC3\n# Natasha SAS interconnects\nENC1SASICBAY1 = '%s, interconnect 1' % ENC1\nENC1SASICBAY4 = '%s, interconnect 4' % ENC1\n# Drive Enclosures (Bigbird)\nENC1DEBAY1 = '%s, bay 1' % ENC1\n# Server Hardware\nENC1SHBAY3 = '%s, bay 3' % ENC1 # SY680 Gen9\nENC1SHBAY5 = '%s, bay 5' % ENC1 # SY660 Gen9\nENC1SHBAY6 = '%s, bay 6' % ENC1 # SY480 Gen10\nENC1SHBAY7 = '%s, bay 7' % ENC1 # SY480 Gen9\nENC1SHBAY8 = '%s, bay 8' % ENC1 # SY480 Gen10\nENC2SHBAY1 = '%s, bay 1' % ENC2 # SY480 Gen9\nENC2SHBAY5 = '%s, bay 5' % ENC2 # SY660 Gen9\nENC2SHBAY7 = '%s, bay 7' % ENC2 # SY480 Gen10\nENC2SHBAY8 = '%s, bay 8' % ENC2 # SY480 Gen10\nENC3SHBAY1 = '%s, bay 1' % ENC3 # SY480 Gen9\nENC3SHBAY5 = '%s, bay 5' % ENC3 # SY680 Gen9\n# LIG, EG and LE\nLIG_NAME = 'LIG1'\nSASLIG_NAME = 'SASLIG1'\nEG_NAME = 'EG1'\nLE_NAME = 'LE1'\n\n# Name prefixes\nNAME_PREFIX = 'Synergy-Ring2-OVF2101-'\nPROFILE_NAME_PREFIX = NAME_PREFIX\n# SH\nSERVER = ENC1SHBAY8\nGEN9_SERVER = ENC1SHBAY7\n# Profile\nPROFILE_NAME = PROFILE_NAME_PREFIX + 'pts2-profile'\nPROFILE_SERVER = SERVER\nPROFILE_EG = EG_NAME\nPTS2_PROFILE_NAME = PROFILE_NAME_PREFIX + 'pts2-profile'\nPTS2_PROFILE_SERVER = SERVER\nPTS2_PROFILE_EG = EG_NAME\n\nsuite_setup_profile = {\n \"type\": SERVER_PROFILE_TYPE, \"name\": PROFILE_NAME,\n \"serverHardwareUri\": 'SH:' + PROFILE_SERVER, \"enclosureGroupUri\": 'EG:' + PROFILE_EG,\n \"iscsiInitiatorNameType\": \"AutoGenerated\", \"serialNumberType\": \"Virtual\", \"macType\": \"Virtual\", \"wwnType\": \"Virtual\", \"affinity\": \"Bay\",\n \"connectionSettings\": {\"connections\": []},\n \"boot\": {\"manageBoot\": False, \"order\": []},\n \"bootMode\": {\"manageMode\": False, \"mode\": None, \"secureBoot\": \"Unmanaged\", \"pxeBootPolicy\": None},\n \"firmware\": {\"manageFirmware\": False, },\n \"bios\": {\"manageBios\": False, \"overriddenSettings\": []},\n \"sanStorage\": {'manageSanStorage': False, 'volumeAttachments': []},\n \"hideUnusedFlexNics\": True, \"osDeploymentSettings\": None,\n \"localStorage\": {\n \"sasLogicalJBODs\": [],\n \"controllers\": [\n {\n \"deviceSlot\": \"Embedded\",\n \"mode\": \"Mixed\",\n \"initialize\": True,\n \"importConfiguration\": False,\n \"driveWriteCache\": None,\n \"logicalDrives\": []\n }\n ]\n },\n\n}\n\npts2_profile_create = {\n \"type\": SERVER_PROFILE_TYPE, \"name\": PTS2_PROFILE_NAME,\n \"serverHardwareUri\": 'SH:' + PTS2_PROFILE_SERVER, \"enclosureGroupUri\": 'EG:' + PTS2_PROFILE_EG,\n \"iscsiInitiatorNameType\": \"AutoGenerated\", \"serialNumberType\": \"Virtual\", \"macType\": \"Virtual\", \"wwnType\": \"Virtual\", \"affinity\": \"Bay\",\n \"connectionSettings\": {\"connections\": []},\n \"boot\": {\"manageBoot\": False, \"order\": []},\n \"bootMode\": {\"manageMode\": False, \"mode\": None, \"secureBoot\": \"Unmanaged\", \"pxeBootPolicy\": None},\n \"firmware\": {\"manageFirmware\": False, },\n \"bios\": {\"manageBios\": False, \"overriddenSettings\": []},\n \"sanStorage\": {'manageSanStorage': False, 'volumeAttachments': []},\n \"hideUnusedFlexNics\": True, \"osDeploymentSettings\": None,\n \"localStorage\": {\n \"sasLogicalJBODs\": [],\n \"controllers\": [\n {\n \"deviceSlot\": \"Embedded\",\n \"mode\": \"Mixed\",\n \"initialize\": True,\n \"importConfiguration\": False,\n \"driveWriteCache\": \"Enabled\",\n \"logicalDrives\": [\n {\n \"name\": \"LD1\",\n \"raidLevel\": \"RAID0\",\n \"bootable\": False,\n \"numPhysicalDrives\": 1,\n \"driveTechnology\": \"SasHdd\",\n \"sasLogicalJBODId\": None,\n \"accelerator\": \"ControllerCache\",\n },\n {\n \"name\": \"LD2\",\n \"raidLevel\": \"RAID0\",\n \"bootable\": False,\n \"numPhysicalDrives\": 1,\n \"driveTechnology\": \"SasHdd\",\n \"sasLogicalJBODId\": None,\n \"accelerator\": \"ControllerCache\",\n }\n ]\n }\n ]\n }\n}\n\nsuite_setup_profiles = [suite_setup_profile.copy()]\n\npts2_profiles_create = [pts2_profile_create.copy()]\n","sub_path":"robo4.2/fusion/tests/RIST/API/OVF2101/Regression_Data2.py","file_name":"Regression_Data2.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"485185094","text":"from rest_framework.routers import DefaultRouter\nfrom account.api.views import StudentProfileViewSet, MentorProfileViewSet\n\napp_name = 'account'\n\nrouter = DefaultRouter()\nrouter.register(r'student-profile', StudentProfileViewSet, base_name='student-profile')\nrouter.register(r'mentor-profile', MentorProfileViewSet, base_name='mentor-profile')\n\nurlpatterns = router.urls\n","sub_path":"src/account/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"397604278","text":"# -*- coding: utf-8 -*- \n# @Time : 2019/12/18 10:09\n# @Author : hangzhouwh \n# @Email: hangzhouwh@gmail.com\n# @File : simi_song_proc.py \n# @Software: PyCharm\n\nfrom music163.analysis import lyric_analysis\nfrom music163.proc.douyin_lyric_proc import make_summary\nfrom music163.tool import json_tool\n\n\ndef wash_111():\n\tfile = '../data/csps_lyric/csps_lyrics.json'\n\tdatas = json_tool.load_json(file)\n\n\t# for data in reversed(datas):\n\t# \ttemp = data['song_name']\n\t# \tdata['song_name'] = data['artist_name']\n\t# \tdata['artist_name'] = temp\n\n\t# for data in reversed(datas):\n\t# \tcds = ['许嵩', 'G.E.M.邓紫棋', '徐秉龙', '沈以诚', '陈粒', '群星', '蔡健雅', '汪苏泷', '田馥甄', '孙燕姿', '赵方婧',\n\t# \t\t '王菲', '陈雪凝', '杨千嬅', '任然', 'Beyond', '程佳佳', '杨宗纬', '郭顶', '五月天', '王嘉尔', '房东的猫']\n\t# \tif data['artist_name'] in cds:\n\t# \t\tdatas.remove(data)\n\n\tfor data in reversed(datas):\n\t\tlyric = data['lyric']\n\t\tlyric_list = lyric_analysis.wash(lyric)\n\t\tdata['lyric'] = '.'.join(lyric_list)\n\t# \tlyric_list = lyric_analysis.word_spilt(lyric_list)\n\t# \tlyric_list = lyric_analysis.filter_stopwords(lyric_list)\n\n\tjson_tool.write_json(datas, '../data/csps_lyric/csps_lyrics_washed.json')\n\n\ndef get_simi_song_summary():\n\tfile = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\simi\\\\simi_song_lyric.json'\n\tsongs = json_tool.load_json(file)\n\n\tword_lists = []\n\tsummary = []\n\tfor song in songs:\n\t\tlyric = song['lyric']\n\t\tsong_name = song['song_name']\n\t\tartist_name = song['artist_name']\n\t\twash_list = lyric_analysis.wash(lyric)\n\t\tword_list = lyric_analysis.word_spilt(wash_list)\n\t\tword_list = lyric_analysis.filter_stopwords(word_list)\n\t\ttext = ''.join(word_list)\n\t\ttfidf, textrank = make_summary(text)\n\n\t\ttfidf_txt = ','.join(tfidf[0])\n\t\ttextrank_txt = ','.join(textrank[0])\n\t\tsong_dict = {'song_name': song_name, 'artist_name': artist_name, 'tfidf': tfidf_txt, 'textrank': textrank_txt}\n\t\tsummary.append(song_dict)\n\n\tfile_output = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\song_lyric_summary.json'\n\tjson_tool.write_json(summary, file_output)\n\n\nif __name__ == \"__main__\":\n\t# file = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\simi\\\\simi_song_lyric.json'\n\t# datas = json_tool.load_json(file)\n\n\twash_111()\n\t# for data in reversed(datas):\n\t# \tlyric = data['lyric']\n\t# \tlyric_list = lyric_analysis.wash(lyric)\n\t# \tlyric_list = lyric_analysis.word_spilt(lyric_list)\n\t# \tlyric_list = lyric_analysis.filter_stopwords(lyric_list)\n","sub_path":"music163/proc/simi_song_proc.py","file_name":"simi_song_proc.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"247525190","text":"from agent.network import SceneSpecificNetwork, SharedNetwork, ActorCriticLoss\nfrom agent.environment import Environment, HabitatDiscreteEnvironment\nimport torch.nn as nn\nfrom typing import Dict, Collection\nimport signal\nimport random\nimport torch\nfrom torchvision import transforms\nfrom agent.replay import ReplayMemory, Sample\nfrom collections import namedtuple\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\nfrom multiprocessing import Condition\n\nTrainingSample = namedtuple('TrainingSample', ('state', 'policy', 'value', 'action_taken', 'goal', 'R', 'temporary_difference'))\nfrom matplotlib import pyplot as plt\n\n\nclass TrainingThread(mp.Process):\n def __init__(self,\n id : int,\n optimizer,\n device,\n network : torch.nn.Module,\n scene_glb : str,\n saver,\n max_t,\n terminal_image):\n\n super(TrainingThread, self).__init__()\n\n # Initialize the environment\n self.env = None\n self.scene_glb = scene_glb\n self.saver = saver\n self.max_t = max_t\n self.local_backbone_network = SharedNetwork().to(device)\n self.id = id\n self.terminal_image = terminal_image\n\n self.master_network = network\n self.optimizer = optimizer\n self.device = device\n\n self.fig = plt.figure()\n self.time_list = []\n self.reward_list = []\n\n def _sync_network(self):\n self.policy_network.load_state_dict(self.master_network.state_dict())\n\n def _ensure_shared_grads(self):\n for param, shared_param in zip(self.policy_network.parameters(), self.master_network.parameters()):\n if shared_param.grad is not None:\n return \n shared_param._grad = param.grad \n \n def get_action_space_size(self):\n return len(self.env.actions)\n\n def _initialize_thread(self):\n # self.logger = logging.getLogger('agent')\n # self.logger.setLevel(logging.INFO)\n #self.init_args['h5_file_path'] = lambda scene: h5_file_path.replace('{scene}', scene)\n #self.env = THORDiscreteEnvironment(self.scene, **self.init_args)\n self.env = HabitatDiscreteEnvironment(self.scene_glb, terminal_image=self.terminal_image)\n self.gamma = 0.99\n self.grad_norm = 40.0\n entropy_beta = 0.01\n self.local_t = 0\n self.action_space_size = self.get_action_space_size()\n\n self.criterion = ActorCriticLoss(entropy_beta)\n self.policy_network = nn.Sequential(SharedNetwork(), SceneSpecificNetwork(self.get_action_space_size())).to(self.device)\n # Initialize the episode\n self._reset_episode()\n self._sync_network()\n\n def _reset_episode(self):\n self.episode_reward = 0\n self.episode_length = 0\n self.episode_max_q = -np.inf\n self.env.reset()\n\n def _forward_explore(self):\n # Does the evaluation end naturally?\n is_terminal = False\n terminal_end = False\n\n results = { \"policy\":[], \"value\": []}\n rollout_path = {\"state\": [], \"action\": [], \"rewards\": [], \"done\": []}\n\n # Plays out one game to end or max_t\n for t in range(self.max_t):\n state = { \n \"current\": self.env.render(),\n \"goal\": self.env.render_target(),\n }\n\n x_processed = torch.Tensor(state[\"current\"])\n x_processed = x_processed.permute(0, 3, 1, 2).to(self.device)\n \n goal_processed = torch.Tensor(state[\"goal\"])\n goal_processed = goal_processed.permute(0, 3, 1, 2).to(self.device)\n \n (policy, value) = self.policy_network((x_processed, goal_processed,))\n\n # Store raw network output to use in backprop\n results[\"policy\"].append(policy)\n results[\"value\"].append(value)\n\n with torch.no_grad():\n (_, action,) = policy.max(0)\n action = F.softmax(policy, dim=0).multinomial(1).item()\n \n policy = policy.cpu().data.numpy()\n value = value.cpu().data.numpy()\n \n # Makes the step in the environment\n print(\"Stepping Agent with, \", action)\n #self.f.write(\"Stepping Action with\" + str(action))\n self.env.step(action)\n\n # Receives the game reward\n is_terminal = self.env.is_terminal\n is_collision = self.env.is_collision\n\n # ad-hoc reward for navigation\n #reward = 10.0 if is_terminal else -0.01\n if is_terminal:\n reward = 10.0\n elif is_collision:\n reward = -0.1\n else:\n reward = -0.01\n\n # Max episode length\n if self.episode_length > 500 : is_terminal = True\n if is_collision : is_terminal = True\n\n # Update episode stats\n self.episode_length += 1\n self.episode_reward += reward\n self.episode_max_q = max(self.episode_max_q, np.max(value))\n\n # clip reward\n reward = np.clip(reward, -1, 1)\n\n # Increase local time\n self.local_t += 1\n\n rollout_path[\"state\"].append(state)\n rollout_path[\"action\"].append(action)\n rollout_path[\"rewards\"].append(reward)\n rollout_path[\"done\"].append(is_terminal)\n\n if is_terminal:\n # Logging the training stats\n print('playout finished')\n with open(\"log.txt\", \"a\") as myfile:\n myfile.write(\"appended text\")\n myfile.write(\"Local Time: \" + str(self.local_t))\n myfile.write(\"\\n\")\n\n print('Episode Length: ', self.episode_length)\n myfile.write(\"Episode Length: \" + str(self.episode_length))\n myfile.write(\"\\n\")\n\n print('Episode Reward: ', self.episode_reward)\n myfile.write(\"Episode Reward \" + str(self.episode_reward))\n myfile.write(\"\\n\")\n \n print('Episode max_q', self.episode_max_q)\n myfile.write(\"Episode max_q \" + str(self.episode_max_q))\n myfile.write(\"\\n\") \n \n self.time_list.append(self.local_t)\n self.reward_list.append(self.episode_reward)\n\n terminal_end = True\n plt.plot(self.time_list, self.reward_list)\n plt.savefig('reward.png')\n self._reset_episode()\n break\n\n if terminal_end:\n return 0.0, results, rollout_path\n else:\n x_processed = torch.Tensor(self.env.render())\n x_processed = x_processed.permute(0, 3, 1, 2).to(self.device)\n \n goal_processed = torch.Tensor(self.env.render_target())\n goal_processed = goal_processed.permute(0, 3, 1, 2).to(self.device)\n\n (_, value) = self.policy_network((x_processed, goal_processed,))\n return value, results, rollout_path\n #return value.data.item(), results, rollout_path\n f.close()\n f1.close()\n \n def _optimize_path(self, playout_reward: float, results, rollout_path):\n policy_batch = []\n value_batch = []\n action_batch = []\n temporary_difference_batch = []\n playout_reward_batch = []\n\n for i in reversed(range(len(results[\"value\"]))):\n reward = rollout_path[\"rewards\"][i]\n value = results[\"value\"][i]\n action = rollout_path[\"action\"][i]\n\n playout_reward = reward + self.gamma * playout_reward\n temporary_difference = playout_reward - value.data.item()\n\n policy_batch.append(results['policy'][i])\n value_batch.append(results['value'][i])\n action_batch.append(action)\n temporary_difference_batch.append(temporary_difference)\n playout_reward_batch.append(playout_reward)\n \n policy_batch = torch.stack(policy_batch, 0)\n value_batch = torch.stack(value_batch, 0)\n action_batch = torch.from_numpy(np.array(action_batch, dtype=np.int64))\n temporary_difference_batch = torch.from_numpy(np.array(temporary_difference_batch, dtype=np.float32))\n playout_reward_batch = torch.from_numpy(np.array(playout_reward_batch, dtype=np.float32))\n \n # Compute loss\n loss = self.criterion.forward(policy_batch, value_batch, action_batch.to(self.device), temporary_difference_batch.to(self.device), playout_reward_batch.to(self.device))\n loss = loss.sum()\n\n loss_value = loss.cpu().detach().numpy()\n self.optimizer.optimize(loss, \n self.policy_network.parameters(), \n self.master_network.parameters())\n\n def run(self, master = None):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n print(\"Thread \", self.id, ' ready')\n\n self._initialize_thread()\n \n if not master is None:\n print('Master Thread ', self.id, \" started\")\n else:\n print('Thread ', self.id, \" started\")\n\n try:\n self.env.reset()\n while True:\n self._sync_network()\n # Plays some samples\n playout_reward, results, rollout_path = self._forward_explore()\n # Train on collected samples\n self._optimize_path(playout_reward, results, rollout_path)\n \n print(\"Step Finished\", self.optimizer.get_global_step())\n\n # Trigger save or other\n self.saver.after_optimization() \n pass\n except Exception as e:\n print(e)\n # TODO: add logging\n #self.logger.error(e.msg)\n raise e\n","sub_path":"agent/training_thread.py","file_name":"training_thread.py","file_ext":"py","file_size_in_byte":9972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"7879399","text":"\"\"\" A refinement plan for an analytical response function.\n\"\"\"\n\nimport numpy\nfrom scipy import stats\nfrom spotlight import plan\n\nclass Plan(plan.BasePlan):\n\n # required to have solution_file, state_file, and num_solvers\n configuration = {\n \"solution_file\" : \"solution.db\",\n \"state_file\" : \"state.db\",\n \"checkpoint_stride\" : 1,\n }\n\n # required to have local solver and sampling method\n # all other special options get added to a Solver instance\n # any non-special options are passed to the Solver.solve function\n solver = {\n \"local_solver\" : \"powell\",\n \"stop_change\" : 0.1,\n \"stop_generations\" : 5,\n \"sampling_method\" : \"uniform\",\n }\n\n # parameters names and bounds\n # in compute function use self.get(\"x\") to use optimizer's value for \"x\"\n parameters = {\n \"x\" : [-9.5, 9.5],\n \"y\" : [-9.5, 9.5],\n }\n\n def initialize(self):\n \"\"\" Executed once at the beginning to set up the problem.\n \"\"\"\n pass\n\n def compute(self):\n \"\"\" Executed for each set of drawn parameters in the optimization search.\n \"\"\"\n\n # get the x and y values from Mystic\n x, y = self.get(\"x\"), self.get(\"y\")\n\n # get value at Gaussian function x and y\n var = stats.multivariate_normal(mean=[0, 0], cov=[[0.5, 0],[0, 0.5]])\n gauss = -50.0 * var.pdf([x, y])\n\n # get value at volcano function x and y\n r = numpy.sqrt(x**2 + y**2)\n mu, sigma = 5.0, 1.0\n stat = 25.0 * (numpy.exp(-r / 35.0) + 1.0 /\n (sigma * numpy.sqrt(2.0 * numpy.pi)) *\n numpy.exp(-0.5 * ((r - mu) / sigma) ** 2)) + gauss\n\n # whether to flip sign of function\n # a positive lets you search for minimum\n # a negative lets you search for maximum\n stat *= self.surface.sign if hasattr(self, \"surface\") else 1.0\n\n return stat\n","sub_path":"examples/volcano/config_analytical.py","file_name":"config_analytical.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"3791638","text":"from tkinter import *\nimport tkinter.messagebox\nfrom tkinter.ttk import Combobox\nfrom sql_backend import *\nfrom sql_backend import *\nimport difflib\n\n\n\ndef showDisease():\n text_1 = Text(height = 5, width = 50)\n diseases_str = \"\"\n for d in predicted_diseases:\n diseases_str = diseases_str + d + \"\\n\"\n text_1.insert(END, diseases_str)\n text_1.place(x=90,y=350)\n\ndef historyCallback(username):\n text_1 = Text(height = 5, width = 50)\n diseases_str = \"\"\n for d in predicted_diseases:\n diseases_str = diseases_str + d + \"\\n\"\n text_1.insert(END, diseases_str)\n text_1.place(x=90,y=350)\n\ndef submitCallback(username, name, age, gender, blood_group, sym1, sym2, sym3):\n addUser(username, name, age, gender, blood_group) \n getDiseaseFromSymptoms(sym1, sym2, sym3)\n showDisease()\n addDiseasesToTable(username)\n\ndef dropdown1():\n text = entry_6.get()\n if (text != \"\"):\n sym1_dropdown[\"values\"] = difflib.get_close_matches(text, all_symptoms, n=10, cutoff=0.1)\n\ndef dropdown2():\n text = entry_7.get()\n if (text != \"\"):\n sym2_dropdown[\"values\"] = difflib.get_close_matches(text, all_symptoms, n=10, cutoff=0.1)\n\ndef dropdown3():\n text = entry_8.get()\n if (text != \"\"):\n sym3_dropdown[\"values\"] = difflib.get_close_matches(text, all_symptoms, n=10, cutoff=0.1)\n\n\nwindow = Tk()\nwindow.title('Disease prediction system')\nwindow.geometry(\"800x800\")\n\ngetAllSymptoms()\n\nlabel_1 = Label(text=\"Username\")\nentry_1 = Entry(window)\nlabel_1.place(x=90,y=60)\nentry_1.place(x=200,y=60)\n\nlabel_2 = Label(text=\"Name\")\nentry_2 = Entry(window)\nlabel_2.place(x=90,y=100)\nentry_2.place(x=200,y=100)\n\nlabel_3 = Label(text=\"Age\")\nentry_3 = Entry(window)\nlabel_3.place(x=90,y=140)\nentry_3.place(x=200,y=140)\n\nlabel_4 = Label(text=\"Gender\")\nentry_4 = StringVar()\nmale_4 = Radiobutton(window, text='Male', value='male', variable=entry_4, tristatevalue=0)\nfemale_4 = Radiobutton(window, text='Female', value='female', variable=entry_4, tristatevalue=0)\nlabel_4.place(x=90,y=180)\nmale_4.place(x=200,y=180)\nfemale_4.place(x=300, y=180)\n\nlabel_5 = Label(text=\"Blood group\")\nentry_5 = StringVar()\nbg_options = [\"A+\", \"A-\", \"B+\", \"B-\", \"O+\", \"O-\", \"AB+\", \"AB-\"]\nbg_dropdown = Combobox(window, textvariable=entry_5, width=10, values=bg_options)\nlabel_5.place(x=90,y=220)\nbg_dropdown.place(x=200, y=220)\n\nlabel_6 = Label(text=\"Symptom 1\")\nentry_6 = StringVar()\nsym1_options = all_symptoms\nsym1_dropdown = Combobox(window, textvariable=entry_6, width=20, values=sym1_options, postcommand=dropdown1)\nlabel_6.place(x=90, y=260)\nsym1_dropdown.place(x=90, y=280)\n\nlabel_7 = Label(text=\"Symptom 2\")\nentry_7 = StringVar()\nsym2_options = all_symptoms\nsym2_dropdown = Combobox(window, textvariable=entry_7, width=20, values=sym2_options, postcommand=dropdown2)\nlabel_7.place(x=290, y=260)\nsym2_dropdown.place(x=290, y=280)\n\nlabel_8 = Label(text=\"Symptom 3\")\nentry_8 = StringVar()\nsym3_options = all_symptoms\nsym3_dropdown = Combobox(window, textvariable=entry_8, width=20, values=sym3_options, postcommand=dropdown3)\nlabel_8.place(x=490, y=260)\nsym3_dropdown.place(x=490, y=280)\n\nbtn1=Button(window, text=\"Get History\", command= lambda: historyCallback(entry_1.get()))\n\nbtn2=Button(window, text=\"Submit\", command= lambda:\n submitCallback(entry_1.get(), entry_2.get(), entry_3.get(), entry_4.get(),\n entry_5.get(), entry_6.get(), entry_7.get(), entry_8.get()))\n\nbtn2.place(x=400, y=500)\n\nwindow.mainloop()\n\n","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"114049599","text":"import sys\n\n\"\"\"\n* Complete the function below.\n* DO NOT MODIFY CODE OUTSIDE THIS FUNCTION!\n\"\"\"\ndef twins(a, b):\n aa = list(zip(a,b))\n res = []\n for i in aa:\n x1 = i[0] \n y1 = i[1] \n x2 = []\n x3 = []\n y2 = []\n y3 = []\n if len(x1) == len(y1):\n for i in range(len(x1)):\n if i % 2 == 0:\n x2.append(x1[i])\n y2.append(y1[i])\n else:\n x3.append(x1[i])\n y3.append(y1[i])\n z2 = set(x2)\n w2 = set(y2)\n z3 = set(x3)\n w3 = set(y3)\n if z2 == w2 and z3== w3:\n res.append(\"Yes\")\n else:\n res.append(\"No\")\n else:\n res.append(\"No\")\n return res\n\n\"\"\"\n* DO NOT MODIFY CODE BELOW THIS POINT!\n\"\"\"\ndef main():\n data = sys.stdin.readlines()\n \n pos = 0\n\n a = []\n b = []\n\n for a_i in range(pos + 1, int(data[pos]) + 1):\n a.append(data[a_i])\n\n pos = len(a) + 1\n\n for b_i in range(pos + 1, int(data[pos]) + pos + 1):\n b.append(data[b_i])\n \n result = twins(a, b)\n \n for val in result:\n print(val)\n\nmain()\n","sub_path":"crossover1.py","file_name":"crossover1.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"101516324","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# -----------------------------------------------------------------------------\n#\n# P A G E B O T\n#\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau\n# www.pagebot.io\n# Licensed under MIT conditions\n#\n# Supporting usage of DrawBot, www.drawbot.com\n# Supporting usage of Flat, https://github.com/xxyxyz/flat\n# -----------------------------------------------------------------------------\n#\n# introduction.py\n#\nfrom __future__ import division # Make integer division result in float.\n\nfrom pagebot.elements.pbtextbox import TextBox\n\nclass Introduction(TextBox):\n\n def build_html(self, view, origin=None, drawElements=True):\n u\"\"\"Build a page wide in introduction box for large type, if there is any content.\"\"\"\n if self.bs.s: # Ignore if no content.\n b = self.context.b\n self.build_css(view)\n b.div(cssClass='container %s' % (self.cssClass or 'introduction'))\n b.div(cssClass='row')\n b.div(cssClass='twelvecol last')\n\n b.addHtml(self.bs.s)\n if drawElements:\n for e in self.elements:\n e.build_html(view, origin)\n b._div() # .twelvecol last\n b._div() # .row\n b._div() # .container .introduction\n\nif __name__ == '__main__':\n import doctest\n import sys\n sys.exit(doctest.testmod()[0])\n","sub_path":"Lib/pagebot/elements/web/simplesite/introduction.py","file_name":"introduction.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"193679982","text":"\"\"\"People Counter.\"\"\"\n\"\"\"\n Copyright (c) 2018 Intel Corporation.\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit person to whom the Software is furnished to do so, subject to\n the following conditions:\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport os\nimport time\nimport cv2\nimport numpy as np\n\nfrom inference import Network\nfrom argparse import ArgumentParser\nfrom yolo_v3_utils import ParseYOLOV3Output, IntersectionOverUnion\n\n\nLABELS = (\"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\",\n \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\",\n \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\", \"bird\",\n \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\",\n \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\",\n \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\",\n \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\",\n \"baseball glove\", \"skateboard\", \"surfboard\",\"tennis racket\", \"bottle\",\n \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\",\n \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\",\n \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\",\n \"cake\", \"chair\", \"sofa\", \"pottedplant\", \"bed\",\n \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\",\n \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\",\n \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\",\n \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\")\n\n\ndef build_argparser():\n \"\"\"\n Parse command line arguments.\n\n :return: command line arguments\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=False, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n parser.add_argument(\"-iou\", \"--iou_threshold\",\n help=\"Intersection over union threshold\"\n \"(0.4 by default)\", type=float, default=0.2)\n parser.add_argument(\"-img_dir\", \"--image_directory\",\n help=\"The path to the directory of images. Ignores --input if set\", type=str,\n required=False)\n parser.add_argument(\"-max_imgs\", \"--maximum_images\",\n help=\"Stop at this much images processed\", type=int, required=False, default=100)\n\n return parser\n\n\ndef pre_process(frame, new_w, new_h, net_h, net_w):\n p_frame = cv2.resize(frame, (new_w, new_h))\n canvas = np.full((net_h, net_w, 3), 128)\n canvas[(net_h - new_h) // 2:\n (net_h - new_h) // 2 + new_h, (net_w - new_w) // 2:\n (net_w - new_w) // 2 + new_w, :] = p_frame\n pp_img = canvas\n pp_img = pp_img.transpose((2, 0, 1))\n pp_img = pp_img.reshape(1, *pp_img.shape) # Batch size axis add & NHWC to NCHW\n\n return pp_img\n\n\ndef post_conversion_benchmark(frame, model, cpu_extension, device, prob_threshold, iou_threshold, network,\n net_input_shape):\n height, width, channels = frame.shape\n\n net_w = net_input_shape[2]\n net_h = net_input_shape[3]\n\n new_w = int(width * min(net_w / width, net_h / height))\n new_h = int(height * min(net_w / width, net_h / height))\n\n pp_img = pre_process(frame, new_w, new_h, net_h, net_w)\n\n inference_start_time = time.time()\n network.exec_net(pp_img)\n if network.wait() == 0:\n inference_end_time = time.time()\n total_inference_time = inference_end_time - inference_start_time\n\n outputs = network.get_output()\n objects = []\n for output in outputs.values():\n objects = ParseYOLOV3Output(output, new_h, new_w, height, width, prob_threshold, objects)\n\n # Filtering overlapping boxes\n obj_len = len(objects)\n for i in range(obj_len):\n if objects[i].confidence == 0.0:\n continue\n for j in range(i + 1, obj_len):\n if IntersectionOverUnion(objects[i], objects[j]) >= iou_threshold:\n objects[j].confidence = 0\n\n # Drawing boxes\n conf = 0\n for obj in objects:\n if obj.confidence < prob_threshold:\n continue\n lbl = obj.class_id\n label = LABELS[lbl]\n if label != 'person':\n continue\n else:\n conf = obj.confidence * 100\n break\n\n return conf, round(total_inference_time * 1000, 3)\n\n\ndef main():\n args = build_argparser().parse_args()\n\n scores = list()\n scores_wout_mispredictions = list()\n inference_time = list()\n\n network = Network()\n network.load_model(args.model, cpu_extension=args.cpu_extension, device=args.device)\n net_input_shape = network.get_input_shape()\n\n if args.image_directory is not None:\n if os.path.exists(args.image_directory):\n for root, dirs, files in os.walk(args.image_directory, topdown=False):\n counter = 0\n for name in files:\n if counter >= args.maximum_images:\n break\n\n frame = cv2.imread(os.path.join(root, name))\n res, inf_time = post_conversion_benchmark(frame, args.model, args.cpu_extension, args.device,\n args.prob_threshold, args.iou_threshold, network,\n net_input_shape)\n\n if res > 0:\n scores_wout_mispredictions.append(res)\n\n scores.append(res)\n inference_time.append(inf_time)\n\n counter += 1\n else:\n res, inf_time = post_conversion_benchmark(args.input_img, args.model, args.cpu_extension, args.device,\n args.prob_threshold, args.iou_threshold, network,\n net_input_shape)\n\n if res > 0:\n scores_wout_mispredictions.append(res)\n\n scores.append(res)\n inference_time.append(inf_time)\n\n print(\"Average score across all images: \" + str(np.mean(scores)))\n print(\"Max score across all images: \" + str(np.max(scores)))\n print(\"Average inference time: \" + str(np.mean(inference_time)) + \"ms\")\n print(\"Average score disregarding mis-predictions: \" + str(np.mean(scores_wout_mispredictions)))\n print(\"Minimum score disregarding mis-predictions: \" + str(np.min(scores_wout_mispredictions)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"226585276","text":"# Activity 1 from Dynamic Arrays / Lists\n# This program asks the user to enter grade scores to figure out the average grade score.\n# It displays the total points, highest score entered, lowest score entered, the average grade score, and the letter grade.\n\n\ndef get_scores():\n test_scores = []\n while True:\n try:\n score = float(input(\"Enter a score(press enter to exit): \"))\n except ValueError:\n break\n test_scores.append(score)\n return test_scores\n\n\ndef get_total(test_scores):\n total = sum(test_scores)\n return total\n\n\ndef get_maximum(test_scores):\n maximum = max(test_scores)\n return maximum\n\n\ndef get_minimum(test_scores):\n minimum = min(test_scores)\n return minimum\n\n\ndef get_average(test_scores, total):\n average = round(total/len(test_scores), 2)\n return average\n\n\ndef get_letter_grade(average):\n if 90 <= average:\n return \"A\"\n elif 80 <= average:\n return \"B\"\n elif 70 <= average:\n return \"C\"\n elif 60 <= average:\n return \"D\"\n else:\n return \"F\"\n\n\ndef display_result(total, test_scores, maximum, minimum, average, letter_grade):\n print(\"The total points entered are: \", total)\n print(\"The highest score is: \", maximum)\n print(\"The lowest score is: \", minimum)\n print(\"The average score is: \", average)\n print(\"The letter grade is: \", letter_grade)\n\n\ndef main():\n test_scores = get_scores()\n total = get_total(test_scores)\n maximum = get_maximum(test_scores)\n minimum = get_minimum(test_scores)\n average = get_average(test_scores, total)\n letter_grade = get_letter_grade(average)\n display_result(total, test_scores, maximum, minimum, average, letter_grade)\n\n \nmain()\n","sub_path":"Assignment 12/Activity 1.py","file_name":"Activity 1.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"389545375","text":"\"\"\"\nBase class for all IXN package tests.\n\n@author yoram@ignissoft.com\n\"\"\"\n\nfrom os import path\nimport pytest\n\nfrom trafficgenerator.test.test_tgn import TestTgnBase\n\nfrom ixnetwork.ixn_app import init_ixn\n\n\nclass TestIxnBase(TestTgnBase):\n\n TestTgnBase.config_file = path.join(path.dirname(__file__), 'IxNetwork.ini')\n\n def setup(self):\n super(TestIxnBase, self).setup()\n self._get_config()\n\n self.ixn = init_ixn(self.api, self.logger, self.config.get('IXN', 'install_dir'))\n self.ixn.connect(self.server_ip, self.server_port)\n\n def teardown(self):\n for port in self.ixn.root.get_objects_or_children_by_type('vport'):\n port.release()\n self.ixn.disconnect()\n super(TestIxnBase, self).teardown()\n\n def test_hello_world(self, api):\n pass\n\n #\n # Auxiliary functions, no testing inside.\n #\n\n def _get_config(self):\n\n server_ip = pytest.config.getoption('--server') # @UndefinedVariable\n self.server_ip = server_ip.split(':')[0]\n self.server_port = server_ip.split(':')[1] if len(server_ip.split(':')) == 2 else 8009\n chassis = pytest.config.getoption('--chassis') # @UndefinedVariable\n self.port1 = '{}/{}'.format(chassis, pytest.config.getoption('--port1')) # @UndefinedVariable\n self.port2 = '{}/{}'.format(chassis, pytest.config.getoption('--port2')) # @UndefinedVariable\n\n def _load_config(self, config_name):\n version = self.config.get('IXN', 'config_version')\n config_file = path.join(path.dirname(__file__), 'configs/{}_{}.ixncfg'.format(config_name, version))\n self.ixn.new_config()\n self.ixn.load_config(config_file)\n self.ixn.commit()\n","sub_path":"ixnetwork/test/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"358490267","text":"def create_input(number):\n start_input = number * \">\" + \"_\" + number * \"<\"\n return start_input\n\n\ndef game_expected_exit(inp):\n helper = inp.split(\"_\")\n ex = helper[1] + \"_\" + helper[0]\n return ex\n\n\ndef next_possible_jumps(inp, possible_ans):\n index = inp.find(\"_\")\n if index > 1 and inp[index - 2] == \">\":\n way = inp[:index - 2] + \"_\" + inp[index - 1] + \">\" + inp[index + 1:]\n possible_ans.append(way)\n if index > 0 and inp[index - 1] == \">\":\n way = inp[:index - 1] + \"_\" + \">\" + inp[index + 1:]\n possible_ans.append(way)\n if index < len(inp) - 1 and inp[index + 1] == \"<\":\n way = inp[:index] + \"<\" + \"_\" + inp[index + 2:]\n possible_ans.append(way)\n if index < len(inp) - 2 and inp[index + 2] == \"<\":\n way = inp[:index] + \"<\" + inp[index + 1] + \"_\" + inp[index + 3:]\n possible_ans.append(way)\n return possible_ans\n\n\n# we use wrong_way for nodes we have visited and nodes that are deadlock\ndef game(tree, wanted_result, path_to_tree, wrong_way):\n helper = tree[0]\n if helper in wrong_way:\n tree.pop(0)\n if helper in path_to_tree:\n path_to_tree.remove(helper)\n return game(tree, wanted_result, path_to_tree, wrong_way)\n\n possible_pos = next_possible_jumps(helper, [])\n if wanted_result in possible_pos:\n path_to_tree.append(helper)\n path_to_tree.append(wanted_result)\n return path_to_tree\n\n if len(possible_pos) == 0 or (len(possible_pos) == 1 and len(next_possible_jumps(possible_pos[0], [])) == 0):\n wrong_way.append(helper)\n tree.pop(0)\n return game(tree, wanted_result, path_to_tree, wrong_way)\n\n path_to_tree.append(helper)\n wrong_way.append(helper)\n\n for pos_elm in possible_pos:\n if len(next_possible_jumps(pos_elm, [])) == 0:\n wrong_way.append(pos_elm)\n if pos_elm not in wrong_way:\n tree.insert(0, pos_elm)\n return game(tree, wanted_result, path_to_tree, wrong_way)\n\n\ndef print_path(pth):\n for i in range(len(pth) - 1):\n print(pth[i], \" ~JUMP~ \", end='')\n print(pth[len(pth) - 1])\n\n\ndef create_game():\n all_frogs = int(input(\"How many frogs do we have?: \"))\n frogs = int(all_frogs / 2)\n print(frogs)\n game_begin = create_input(frogs)\n wanted_result = game_expected_exit(game_begin)\n print(\"START POSITION: \", game_begin)\n print(\"WANTED POSITION: \", wanted_result)\n tree = next_possible_jumps(game_begin, [])\n path = game(tree, wanted_result, [], [])\n return path\n\n\ncorrect_alg = create_game()\nprint_path(correct_alg)\nprint(\"JUMPS MADE : \", len(correct_alg))\n","sub_path":"week8/recursive_frog.py","file_name":"recursive_frog.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"618462928","text":"import logging\n \nlogging.basicConfig(\n stream=open('/var/log/scalarizr.lettuce.log', 'w'),\n level=logging.DEBUG)\n \n \n# convenience functions\n# should be moved to commons.py if we'll have one\n# terrain problem: imports automatically only if test were run from this dir\n \nfrom lettuce import world\nfrom lettuce.core import Feature, Scenario\n \n \nclass ThisFeatureOnly(object):\n \"\"\"\n Usage:\n from lettuce import world\n this_feature_only = world.ThisFeatureOnly(\"your feature name\")\n \n @before.each_scenario\n @this_feature_only\n def setup(scenario):\n ...\n \n Please use 'this_feature_only' name for readability.\n \n Can be used with:\n @before.each_scenario\n @after.each_scenario\n @before.each_feature\n @after.each_feature\n \"\"\"\n \n def __init__(self, feature):\n self.feature = feature\n \n def __call__(self, f):\n def wrapper(arg):\n if isinstance(arg, Scenario):\n feature_name = arg.feature.name\n elif isinstance(arg, Feature):\n feature_name = arg.name\n else:\n raise Exception(\"this_feature_only is supposed to decorate only \"\n \"before/after each feature/scenario functions that\"\n \" accept one argument of type Feature or Scenario\")\n if feature_name == self.feature:\n return f(arg)\n return wrapper\n \nworld.absorb(ThisFeatureOnly)\n \n","sub_path":"tests/acceptance/terrain.py","file_name":"terrain.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"92156414","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport random\nfrom twython import Twython\n\n\nCONSUMER_KEY='XXXX'\nCONSUMER_SECRET='XXXXXXXX'\nACCESS_TOKEN='XXXXXXXXXXXXXX'\nACCESS_SECRET='XXXXXXXXXX'\n\ndef varoitukset():\n\tvaroitus = open('varoitukset_2.txt','r')\n\tvaroitus_data = varoitus.read().splitlines()\n\tvaroitus_str = random.choice(varoitus_data)\n\tvaroitus.close()\n\treturn varoitus_str\n\ndef alueet():\n\talue = open('alueet_2.txt','r')\n\talue_data = alue.read().splitlines()\n\talue_str = random.choice(alue_data)\n\talue.close()\n\treturn alue_str\n\ndef paikat_genetiivi():\n\tpaikat = open('paikat_n_2.txt','r')\n\tpaikat_data = paikat.read().splitlines()\n\tpaikat_str = random.choice(paikat_data)\n\tpaikat.close()\n\treturn paikat_str\n\ndef paikat_elatiivi():\n\tpaikat = open('paikat_lla_2.txt','r')\n\tpaikat_data = paikat.read().splitlines()\n\tpaikat_str = random.choice(paikat_data)\n\tpaikat.close()\n\treturn paikat_str\n\ndef aineet():\n\taineet = open('aineet_2.txt','r')\n\taineet_data = aineet.read().splitlines()\n\taineet_str = random.choice(aineet_data)\n\taineet.close()\n\treturn aineet_str\n\ndef sataa():\n\tsataa = open('sataa_2.txt','r')\n\tsataa_data = sataa.read().splitlines()\n\tsataa_str = random.choice(sataa_data)\n\tsataa.close()\n\treturn sataa_str\n\ndef skenaario_1():\n\ttwiitti= (varoitukset() + '\\n' + alueet() + \" \" + paikat_elatiivi() + \" \" + sataa() + \" \" + aineet() + \" ja lämpötila pysyttelee \" + str(random.randint(-40,50)) + \" asteen tuntumassa.\")\n\tapi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)\n\tapi.update_status(status=twiitti)\n\tprint (twiitti)\n\ndef skenaario_2():\n\ttwiitti = (alueet() + \" \" + paikat_elatiivi() + \" lämpötila vaihtelee \" + str(random.randint(-40,50)) + \" ja \" + str(random.randint(-40,50)) + \" asteen välillä. \" + \"Alueella on \" + str(random.randint(0,100)) + \"% mahdollisuus, että taivaalta voi sataa \" + aineet() + \".\")\n\tapi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)\n\tapi.update_status(status=twiitti)\n\tprint (twiitti)\n\ndef skenaario_3():\n\ttwiitti= (varoitukset() + '\\n' + aineet() + \", \" + aineet() + \" ja \" + aineet() + \" on odotettavissa \" + paikat_genetiivi() + \" \" + paikat_elatiivi() + \".\")\n\tapi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)\n\tapi.update_status(status=twiitti)\n\tprint (twiitti)\n\ndef skenaario_4():\n\ttwiitti= (\"Koko Suomessa on odotettavissa seuraavien \" + str(random.randint(2,12)) + \" tunnin sisällä mm. \" + aineet() + \" \" + paikat_elatiivi() + \", \" + aineet() + \" \" + paikat_elatiivi() + \" sekä \" + aineet() + \" \" + paikat_elatiivi() + \".\")\n\tapi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)\n\tapi.update_status(status=twiitti)\n\tprint (twiitti)\n\n#Valitaan satunnainen luku 1-4, jonka perusteella twiitataan skenaario.\nnumber = random.randint(1,4)\n\nif number == 1:\n\tskenaario_1()\nelif number == 2:\n\tskenaario_2()\nelif number == 3:\n\tskenaario_3()\nelif number == 4:\n\tskenaario_4()\n\n\n\n\n\n\n\n","sub_path":"weather_tweet.py","file_name":"weather_tweet.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"646584634","text":"\nimport tika\nfrom tika import parser\n\ntika.TikaClientOnly = True\n\nfile_to_proc = \"/Users/tylerskluzacek/Desktop/conference-proceeding.pdf\"\nfile_to_proc = \"/Users/tylerskluzacek/Desktop/king-cholera.jpeg\"\n\n# opening pdf file\nparsed_pdf = parser.from_file(file_to_proc)\n\n# saving content of pdf\n# you can also bring text only, by parsed_pdf['text']\n# parsed_pdf['content'] returns string\ndata = parsed_pdf['metadata']\n\n# Printing of content\nprint(data)\n\n# \nprint(type(data))\n","sub_path":"extractors/xtract_tika.py","file_name":"xtract_tika.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"37607938","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 16 00:53:17 2020\r\n@author: shenpaul\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\n\r\ntarget=' activity inference' #target data name\r\nthreshold=0.03 # to choose those data whose correlation coefficient lower than threshold\r\ntestdata_per=0.4 #the percentage for test data\r\npath='result/result_u00.pkl'\r\n#arrange the data into X(data set) and Y(target set)\r\ndef arrangeXY(DF,features,target):\r\n X=DF[features[0]]\r\n Y=DF[target]\r\n for i in range(1,len(features)):\r\n print(features[i])\r\n X_Old=X;\r\n X_New=testDF[features[i]];\r\n #joint data frame \r\n X = pd.concat([X_Old,X_New], axis=1, sort=False) \r\n X=X.drop([target,],axis=1)#delete target set in data set\r\n print(\"\\ndata set:\",X)\r\n print(X.columns)\r\n return (X,Y)\r\n#\"Load data\"\r\ndef Loaddata(path):\r\n print(\"Loading... data\")\r\n DF = pd.read_pickle(path, compression='xz') #Read dataset\r\n DF.replace([np.inf, -np.inf], np.nan, inplace=True) #replace infinity and nan value with mean value\r\n DF.fillna(DF.mean(), inplace=True) \r\n #DF=DF.drop(['next_activity'],axis=1) #I am still thinking about this.....\r\n return DF\r\n\r\ndef Randomforest(X,Y,testdata_per):\r\n # Split the data into 40% test and 60% training\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=testdata_per, random_state=0)\r\n clf = RandomForestRegressor(n_estimators=100, max_depth=20,random_state=0, n_jobs=-1)\r\n print(\"\\nTraining...Regressor\")\r\n # Train ......\r\n clf.fit(X_train, Y_train)\r\n print(\"\\nprediction\")\r\n Y_pred = clf.predict(X_test)\r\n print(\"R2 score : %.2f\" % r2_score(Y_test,Y_pred))\r\n print(\"Mean squared error: %.2f\" % mean_squared_error(Y_test,Y_pred))\r\n \r\ndef Pearson_Sele(testDF):\r\n #pearson selection\r\n df = pd.DataFrame(data= testDF) # use dataframe, because of the use of building pearson method\r\n corr = df.corr(method ='pearson')[' activity inference']#Pearson correlation coefficient\r\n # print(corr)#print out the result of the corrleation after pearson correaltion \r\n feat_labels = corr.index #here, already filter out first time some data in which with text message\r\n abs_corr = abs(corr).sort_values(ascending=True,)[1:]# absolute for positive values\r\n print(abs_corr,\"\\n\")\r\n relevant_features = abs_corr[abs_corr> threshold]#filter out those correalation coefficient is lower than 0.08\r\n return (feat_labels,relevant_features)\r\n#Main\r\ntestDF=Loaddata(path)\r\n\r\n#pearson selection\r\n(feat_labels,relevant_features)=Pearson_Sele(testDF)\r\nprint(\"relevant_features:\",relevant_features.index)\r\n#arrange the data for the convenience of division of test set and training set\r\nprint(\"\\nspilt original data to training set and test set\")\r\n(X,Y)=arrangeXY(testDF,feat_labels,target)\r\nRandomforest(X,Y,testdata_per)\r\n\r\nprint(\"\\nChange... set\") #use the features after filtered out\r\nprint(\"\\nspilt limited data to training set and test set\")\r\n(X_limited,Y)=arrangeXY(testDF,relevant_features.index,target) \r\nRandomforest(X_limited,Y,testdata_per)","sub_path":"Pearson Correlation/Pearson_Correalation_1.0.py","file_name":"Pearson_Correalation_1.0.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366251652","text":"import unittest\n\nfrom src.grammar import Grammar\nfrom src.rules import lit, eps, eof, plus, star, reg, seqn, alt, InvalidRegexDefinitionError, lazy, Rule\n\n\nclass TestGrammarMethods(unittest.TestCase):\n\n def test_add(self):\n g = Grammar()\n r = alt(\"MyAlt\", lit('a'), lit('b'))\n g.add(r)\n self.assertEqual(3, len(g.rules))\n\n rec = lazy()\n C = seqn(\"At\", rec, lit('t'))\n B = seqn(\"Cd\", C, lit('d'))\n A = alt(\"A\", seqn(\"Br\", B, lit('r')), eps())\n rec.set_rule(A)\n g.add(A)\n self.assertEqual(11, len(g.rules))\n\n g.set_nullables()\n for r in g.rules.values():\n print(r, r.is_nullable)\n print(\"---\")\n g.set_left_recursives()\n for r in g.rules.values():\n print(r, r.is_left_recursive)\n","sub_path":"test/grammar_test.py","file_name":"grammar_test.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"318739432","text":"\"\"\"\nFrom: https://oj.leetcode.com/problems/recover-binary-search-tree/\nAuthor: Jing Zhou\nDate: Sep 01, 2014\nThought: used the same method to traverse the tree in order, then find the abnormal nodes\nTags: BST, tree, recursion\n\"\"\"\n\n\n\nclass Solution:\n # @param root, a tree node\n # @return a tree node\n def __init__(self):\n \"\"\" Okay I did so many problems now and I\n \"\"\"\n self.first = None\n self.second = None\n self.pre = None\n def recoverTree(self, root):\n if not root:\n return root\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val\n return root\n def inorder(self, root):\n if not root:\n return\n self.inorder(root.left)\n if not self.pre:\n self.pre = root\n else:\n if self.pre.val > root.val:\n if not self.first:\n self.first = self.pre\n self.second = root\n self.pre = root\n self.inorder(root.right)\n","sub_path":"week22/Jing/p_recover_BST.py","file_name":"p_recover_BST.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"452952900","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('', views.home, name='home'),\n path('index/', views.index, name='index'),\n #path('index1/', views.index1, name='index1'),\n path('register/', views.register, name='register'),\n path('login/', views.SignIn, name='signIn'),\n path('details/', views.details, name='details'),\n path('edit_details/', views.edit_details, name='edit_details'),\n path('education/', views.education, name='education'),\n path('edit_education//', views.edit_education, name='edit_education'),\n path('project/', views.project, name='project'),\n path('edit_project//', views.edit_project, name='edit_project'),\n path('personaldetails/', views.personaldetails, name='personaldetails'),\n path('edit_personaldetails//', views.edit_personaldetails, name='edit_personaldetails'),\n path('logout/', views.signout, name='logout'),\n path('resume/', views.resume, name='resume'),\n\n\n\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"555360075","text":"import argparse\nimport os\n\nimport numpy as np\nimport time\n\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom dataset import Dataset\nfrom model import Model\n\n\ndef _adjust_learning_rate(optimizer, step, initial_lr, decay_steps, decay_rate):\n lr = initial_lr * (decay_rate ** (step // decay_steps))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef _train(path_to_data_dir, path_to_logs_dir, path_to_restore_checkpoint_file):\n batch_size = 16\n initial_learning_rate = 1e-3\n decay_steps = 8000\n decay_rate = 0.1\n num_steps_to_show_loss = 20\n num_steps_to_snapshot = 1000\n num_steps_to_train = 30000\n\n dataset = Dataset(path_to_data=path_to_data_dir, mode=Dataset.Mode.TRAIN)\n dataloader = DataLoader(dataset, batch_size, shuffle=True, num_workers=8)\n model, step = Model(), 0\n losses = []\n\n optimizer = optim.SGD(model.parameters(), lr=initial_learning_rate, momentum=0.9, weight_decay=0.0005)\n if path_to_restore_checkpoint_file is not None:\n step = model.load(path_to_restore_checkpoint_file, optimizer=optimizer)\n losses = np.load('logs/losses.npy').tolist()\n losses = list(filter(lambda x: x[0] <= step, losses))\n print('Model restored from file: %s' % path_to_restore_checkpoint_file)\n model.cuda()\n\n start_time, num_samples = time.time(), 0\n should_continue = True\n num_obj_samples = np.array([7228, 746, 342, 197, 190, 832, 811, 230, 529, 1166, 129, 87, 14, 32, 65, 51, 64, 125, 613, 458, 546, 438, 33, 66])\n obj_weight = torch.FloatTensor(1 - (num_obj_samples / sum(num_obj_samples))).cuda()\n\n while should_continue:\n for batch_index, (head_images, hand_images, fa_labels, ges_labels, obj_labels) in enumerate(dataloader):\n hand_images = Variable(hand_images).cuda()\n head_images = Variable(head_images).cuda()\n fa_labels = Variable(fa_labels).cuda()\n ges_labels = Variable(ges_labels).cuda()\n obj_labels = Variable(obj_labels).cuda()\n\n fa_logits, ges_logits, obj_logits = model.train().forward(hand_images, head_images)\n\n fa_cross_entropy, ges_cross_entropy, obj_cross_entropy = Model.loss(fa_logits, ges_logits, obj_logits, fa_labels, ges_labels, obj_labels, obj_weight)\n loss = fa_cross_entropy + ges_cross_entropy + obj_cross_entropy\n\n learning_rate = _adjust_learning_rate(optimizer, step=step, initial_lr=initial_learning_rate,\n decay_steps=decay_steps, decay_rate=decay_rate)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n step += 1\n num_samples += len(obj_labels)\n\n if step % num_steps_to_show_loss == 0:\n elapsed_time = time.time() - start_time\n examples_per_sec = num_samples / elapsed_time\n start_time, num_samples, duration = time.time(), 0, 0.0\n print('step = {:d}, loss = {:f}, learning_rate = {:f} ({:.1f} examples/sec)'.format(\n step, loss.data[0], learning_rate, examples_per_sec))\n print('=> fa_cross_entropy = {:f}, ges_cross_entropy = {:f}, obj_cross_entropy = {:f}'.format(\n fa_cross_entropy.data[0], ges_cross_entropy.data[0], obj_cross_entropy.data[0]))\n losses.extend([[step, loss.data[0]]])\n\n if step % num_steps_to_snapshot == 0 or step % num_steps_to_train == 0:\n np.save(os.path.join(path_to_logs_dir, 'losses.npy'), np.array(losses))\n path_to_checkpoint_file = model.save(path_to_dir=path_to_logs_dir,\n step=step, optimizer=optimizer)\n print('=> Model saved to file: %s' % path_to_checkpoint_file)\n\n if step % num_steps_to_train == 0:\n should_continue = False\n break\n\n\nif __name__ == '__main__':\n def main(args):\n path_to_data_dir = args.data_dir\n path_to_logs_dir = args.logs_dir\n path_to_restore_checkpoint_file = args.restore_checkpoint\n\n if not os.path.exists(path_to_logs_dir):\n os.mkdir(path_to_logs_dir)\n\n print('Start training')\n _train(path_to_data_dir, path_to_logs_dir, path_to_restore_checkpoint_file)\n print('Done')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data_dir', default='./data', help='path to data directory')\n parser.add_argument('-l', '--logs_dir', default='./logs', help='path to logs directory')\n parser.add_argument('-r', '--restore_checkpoint', default=None, help='path to restore checkpoint file, e.g., ./logs/model-100.pth')\n\n main(parser.parse_args())\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"126665605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2020 Jarosław Stańczyk \nSource code presented in the lectures \"Python programming language\"\n\n04/write.py\n\"\"\"\nfrom __future__ import print_function\n\n\nif __name__ == \"__main__\":\n\tf = open(\"05.write.txt\", 'w')\n\tf.write(\"Here's a string that ends with \" + str(2017) + \"\\n\")\n\tf.close()\n\n\tf = open(\"05.write.txt\", 'a')\n\tfor i in range(1, 11):\n\t\tprint(\"i is:\", i, file=f)\n\tf.close()\n\n# eof.\n","sub_path":"04/05.write.py","file_name":"05.write.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"127228457","text":"class Solution:\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n arr1 = []\n arr2 = []\n if len(nums1) < len(nums2):\n arr1 = nums1\n arr2 = nums2\n else:\n arr1 = nums2\n arr2 = nums1\n\n intersect = []\n for item in arr1:\n if item in intersect:\n continue\n elif item in arr2:\n intersect.append(item)\n return intersect\n","sub_path":"349.py","file_name":"349.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"645415006","text":"# -*- coding: UTF-8 -*-\nimport random\nimport re\n\nfrom os import listdir\n\ndef textParse(bigText):\n # re.split,支持正则及多个字符切割\n listOfWords = re.split(r'\\W*',bigText)\n return [tok.lower() for tok in listOfWords if len(tok) > 2]\n\ndef createVocabList(dataSet):\n uniqueWord = set([])\n for wordList in dataSet:\n uniqueWord = set(wordList) | uniqueWord\n return list(uniqueWord)\n\n\ndef setOfWords2Vec(vocabList, wordList):\n vocabVec = [0] * len(vocabList)\n for word in wordList:\n if word in vocabList:\n vocabList[vocabVec.index(word)] = 1;\n return vocabVec;\n\n\nif __name__ == '__main__':\n docList = []\n classList = []\n fullText = []\n fileDir = 'email/ham'\n fileDir1 = 'email/spam'\n trainingFileList = listdir(fileDir)\n m = len(trainingFileList)\n\n for i in range(m):\n wordList = textParse(open(fileDir + '/' +trainingFileList[i], 'r' , encoding= 'GBK').read())\n docList.append(wordList)\n fullText.append(wordList)\n classList.append(0)\n\n trainingFileList = listdir(fileDir1)\n m = len(trainingFileList)\n for i in range(m):\n wordList = textParse(open(fileDir1 + '/' + trainingFileList[i], 'r', encoding='GBK').read())\n docList.append(wordList)\n fullText.append(wordList)\n classList.append(1)\n\n vocabList = createVocabList(docList)\n trainingSet = list(range(50))\n testSet = []\n for i in range(10):\n randomIndex = int(random.uniform(0,len(trainingSet)))\n testSet.append(trainingSet[randomIndex])\n del(trainingSet[randomIndex])\n\n trainMat = []\n trainClasses = []\n for docIndex in trainingSet:\n trainingSet.append(setOfWords2Vec(vocabList,docList[docIndex]))\n trainClasses.append(classList[docIndex])","sub_path":"com/fp/myBayes/bayes-modify.py","file_name":"bayes-modify.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"19126508","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# author:Administrator\n# datetime:2018/9/21 20:50\n# software: PyCharm\nclass Solution:\n def findDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n length = len(nums)\n result = []\n count = 0\n while count 1:\n lo = heappop(heap)\n hi = heappop(heap)\n for pair in lo[1:]:\n pair[1] = '0' + pair[1]\n for pair in hi[1:]:\n pair[1] = '1' + pair[1]\n heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])\n return symb2freq, sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p))\n\nif __name__ == '__main__':\n txt = \"this is an example for huffman encoding\"\n symb2freq, huff = encode(txt)\n print('Symbol\\tWeight\\tHuffman Code')\n for p in huff:\n print('%s\\t%s\\t%s' % (p[0], symb2freq[p[0]], p[1]))\n","sub_path":"algorithm/huffmancoding.py","file_name":"huffmancoding.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"262005490","text":"import os, pandas, sys\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cd_site.settings\")\nimport django\ndjango.setup()\nfrom catholicData.models import WorldPopData\nimport schedule\n\n\ndef job():\n cpop=WorldPopData.objects.all()[0].pop\n cpop = 1 + int(cpop)\n WorldPopData.objects.all().delete()\n p=WorldPopData( pop=cpop )\n p.save()\n print(\"cpop running...done\")\n print(cpop)\n\n\nschedule.every(1.39).seconds.do(job)\n\nwhile True:\n schedule.run_pending()","sub_path":"scripts/update_cathpop.py","file_name":"update_cathpop.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"262838404","text":"import csv\nfrom numpy.linalg import norm\nfrom scipy import *\nfrom pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog\nfrom numpy import ones\n\n\n#wdir = \"../../../../data/raw/Joebigsmooth/o1/13/11/\"\n#wdir = \"../../../../../../PhDold/project/code/data/Joe/alldb/o3/\"\n\n#wdir = \"../../../../data/dbchris/o3femh110h01testsmallt/\"\n\n\n#wdir = \"../../../../data/raw/Cserre/solitonothers/collDMcopy/o3/\"\n#wdir = \"../../../../data/raw/bigsmoothalphainf/o3/10/0/\"\n\n#wdir = \"../../../../data/raw/trackleadsola10new/o3/\"\n\n#wdir = \"../../../../data/raw/DBASPECTRAT/o3/10/10/8.0/\"\n\n#wdir = \"../../../../data/raw/longcontactdiscdx9diff10fileio/o3/9/0/\"\n#wdir = \"../../../../data/raw/longcontactdiscdx8diff10fileio/o3/8/0/\"\n\n#wdir = \"../../../../data/raw/LCD/o3/dx=9/diff=1000.0/\"\n\n\n#wdir = \"../../../../data/raw/longcontactdisc/o3/dx=9/diff=10.0/\"\n#wdir = \"../../../../data/raw/Joebigsmooth/o3/9/6/\"\n\n#wdir = \"../../../../gitproj/data/raw/longcontactdiscdx9diff10fileio/o3/6/0/\"\n\n#wdir = \"../../../../data/raw/DBSTEEPNoLimiter/o2/0/\"\n\n#wdir = \"../../../../data/raw/longtimedambreakNEW/FDc/\"\n\n#wdir = \"../../../../data/raw/Joebigsmooth/FDcent/14/12/\"\n\nwdir = \"../../../../../data/raw/solslopelarger10p1R/o2/\"\n\n#wdir = \"../../../data/bigsmoothtargettedNEW1/FDcent/13/12/\"\n\ntimeinsecs = 30\n\ngap = 1\ng = 9.81\n \n#time = 0.0995978291745\nfilen = 400*500\n\n#s = wdir + \"saveoutputts\" + str(int(filen)) + \".txt\"\n \n#s = wdir + \"saveoutputtslast.txt\"\ns = wdir + \"out\" +str(filen)+ \".txt\"\nwith open(s,'r') as file1:\n readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n h = []\n bed = []\n u = []\n he = []\n ue = []\n x = []\n ht = []\n ut = []\n j = -1\n for row in readfile: \n if (j >= 0):\n \n \n dx =float(row[0])\n dt =float(row[1])\n t =float(row[2])\n x.append(float(row[3]))\n h.append(float(row[4]))\n u.append(float(row[6]))\n #he.append(float(row[8]))\n #ue.append(float(row[9]))\n bed.append(float(row[7]))\n #diffuse = float(row[8])\n \n \n \"\"\"\n dx =float(row[0])\n dt =float(row[1])\n t =float(row[2])\n x.append(float(row[5]))\n h.append(float(row[6]))\n u.append(float(row[8]))\n #diffuse = float(row[7])\n \"\"\"\n \n \n \"\"\"\n \n dx =float(row[0])\n dt =float(row[1])\n t =float(row[2])\n x.append(float(row[4]))\n h.append(float(row[5]))\n u.append(float(row[7]))\n \"\"\"\n \n\n \n \n \n j = j + 1\n\n n = len(x) \n x = array(x)\n u = array(u)\n h = array(h)\n bed = array(bed)\n \nu2 = 1.074975\nh2 = 1.36898\nx2 = 500 + 30*u2\n ","sub_path":"CODE/postprocessing/readplot/investigate/solitonexampleplot.py","file_name":"solitonexampleplot.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"125389903","text":"import os\nimport datetime\nimport json\n\n#Dictionary for book\n#{author: {book: [highlights], date_of_last_highlight: date}}\n\n\nclass KindleDictionary(dict):\n \"\"\"\n A class that creates a dictionary that contain all the highlights within the Amazon Kindle clippings file\n The dictionary format is {author: {book: [highlights]}, date_of_last_highlight: date}\n \"\"\"\n\n def __init__(self, highlights, dictionary_filepath_to_load=\"\"):\n \"\"\"\n constructor which creates the dictionary object with the highlights\n :param highlights: the highlights in the amazon clippings file\n :param dictionary_filepath_to_load: file path to dictionary saved in JSON format\n :return: none\n \"\"\"\n\n if highlights == \"\":\n raise Exception(\"This file is empty\")\n\n if dictionary_filepath_to_load == \"\":\n self.highlights_dictionary = {\"date_of_last_highlight\": datetime.datetime.strptime(\"1900 1 1\", \"%Y %M %d\")}\n self.highlights = highlights\n self.booklist = []\n self.authorlist = []\n self.last_modified_date = 0\n self._add_highlights(highlights, self.highlights_dictionary[\"date_of_last_highlight\"])\n else:\n if not dictionary_filepath_to_load.lower().endswith(\".txt\"):\n raise Exception(\"Please make sure your file is a .txt file\")\n elif os.stat(dictionary_filepath_to_load).st_size == 0:\n raise Exception(\"This file is empty\")\n else:\n with open(dictionary_filepath_to_load, 'r') as infile:\n self.highlights_dictionary = json.load(infile)\n self.highlights_dictionary[\"date_of_last_highlight\"] = \\\n datetime.datetime.strptime(self.highlights_dictionary[\"date_of_last_highlight\"], \"%Y-%m-%d %H:%M:%S\")\n\n self._add_highlights(highlights, self.highlights_dictionary[\"date_of_last_highlight\"])\n self.booklist = self._get_book_list()\n self.authorlist = self._get_author_list()\n self.last_modified_date = self._get_last_modified_date()\n\n def get_book_list_by_author(self, author_name):\n \"\"\"\n :param author_name: the name of the author\n :return: the books belonging to that author\n \"\"\"\n return self.highlights_dictionary[author_name]\n\n def get_highlights_by_book(self, book_name):\n \"\"\"\n :param book_name: the name of the book\n :return: all the highlights in that book\n \"\"\"\n for author in self.authorlist:\n if book_name in self.highlights_dictionary[author]:\n return self.highlights_dictionary[author][book_name]\n\n return \"This book was not found. Please check the spelling.\"\n\n def save_dict(self, dictionary_file_to_save_full_path):\n \"\"\"\n :param dictionary_file_to_save_full_path: file path to save dictionary\n :return: None\n \"\"\"\n if not dictionary_file_to_save_full_path.lower().endswith(\".txt\"):\n raise Exception(\"Please make sure your file is a .txt file\")\n\n if os.path.isfile(dictionary_file_to_save_full_path):\n if os.stat(dictionary_file_to_save_full_path).st_size == 0:\n raise Exception(\"This file is empty\")\n\n with open(dictionary_file_to_save_full_path, 'w') as outfile:\n self.highlights_dictionary[\"date_of_last_highlight\"] = str(self.highlights_dictionary[\"date_of_last_highlight\"])\n json.dump(self.highlights_dictionary, outfile)\n\n def _get_last_modified_date(self):\n \"\"\"\n :return: the last time the dictionary was modified\n \"\"\"\n return self.highlights_dictionary[\"date_of_last_highlight\"]\n\n def _get_author_list(self):\n \"\"\"\n :return: the list of authors in the dictionary\n \"\"\"\n author_list = list(self.highlights_dictionary.keys())\n author_list.remove(\"date_of_last_highlight\")\n return author_list\n\n def _get_book_list(self):\n \"\"\"\n :return: all the books from all the authors in the dictionary\n \"\"\"\n all_books = []\n author_list = self._get_author_list()\n for author in author_list:\n all_books.extend((list(self.highlights_dictionary[author].keys())))\n\n return all_books\n\n def _extract_author_name(self, book_and_author_string):\n \"\"\"\n :param book_and_author_string: string that contains the book and the author's name\n :return: the author's name\n \"\"\"\n open_bracket = book_and_author_string.rfind(\"(\")\n close_bracket = book_and_author_string.rfind(\")\")\n\n if open_bracket == -1:\n return \"unknown\"\n\n author_full_name = book_and_author_string[open_bracket+1:close_bracket].split(',')\n if len(author_full_name) == 1:\n return author_full_name[0]\n else:\n first_name = author_full_name[1].strip()\n last_name = author_full_name[0].strip()\n return \" \".join([first_name, last_name])\n\n def _extract_date(self, date_line):\n \"\"\"\n :param date_line: the string that contains the date of the highlight in the kindle file\n :return: date the highlight was made\n \"\"\"\n index_pos = len(\"day, \") + date_line.find('day, ')\n return datetime.datetime.strptime(date_line[index_pos:], \"%d %B %Y %H:%M:%S\")\n\n def _extract_book_title(self, book_and_author_string):\n \"\"\"\n :param book_and_author_string: string that contains the book and the author's name\n :return: title of the book\n \"\"\"\n open_bracket = book_and_author_string.rfind(\"(\")\n\n if open_bracket == -1:\n return book_and_author_string.strip()\n else:\n return book_and_author_string[:open_bracket]\n\n def _test_author_empty(self, author_name):\n try:\n self.highlights_dictionary[author_name]\n except KeyError:\n self.highlights_dictionary[author_name] = {}\n\n def _test_book_empty(self, book_name, author_name):\n try:\n self.highlights_dictionary[author_name][book_name]\n except KeyError:\n self.highlights_dictionary[author_name][book_name] = []\n\n def _add_highlights(self, highlights, start_date):\n \"\"\"\n This function adds the clippings and the associated date to each book.\n It also updates the KindleDictionary object's attributes\n :param highlights: The highlights from the kindle file\n :param start_date: The date from which to start updating the highlights\n :return: None\n \"\"\"\n reset_line = \"==========\"\n line_number = 1\n book_and_author = \"\"\n clip = []\n date = start_date\n start_index = 0\n\n date_element_indexes = [index-1 for index, line in enumerate(highlights) if line == \"\"]\n for index in date_element_indexes:\n if self._extract_date(highlights[index]) > date:\n start_index = index\n break\n\n highlights = highlights[start_index-1:]\n for line in highlights:\n if line == reset_line:\n line_number = 0\n\n if not clip == []:\n book = self._extract_book_title(book_and_author).strip()\n author = self._extract_author_name(book_and_author).strip()\n self._test_author_empty(author)\n self._test_book_empty(book, author)\n self.highlights_dictionary[author][book].append(\" \".join(clip))\n clip = []\n\n if self.highlights_dictionary[\"date_of_last_highlight\"] < date:\n self.highlights_dictionary[\"date_of_last_highlight\"] = date\n\n if line_number == 1:\n book_and_author = line\n elif line_number == 2:\n date = self._extract_date(line)\n elif line_number >= 4:\n clip.append(line)\n else:\n pass\n\n line_number += 1\n\n self.booklist = self._get_book_list()\n self.authorlist = self._get_author_list()\n self.last_modified_date = self._get_last_modified_date()\n\n def __str__(self):\n return str(self.highlights_dictionary)\n\n def write_clippings_to_file(self, book_name, file_path=\"\"):\n \"\"\"\n This function writes all the clippings in the book specified to a text file with the title\n \n :param book_name: name of the book to search in the dictionary\n :param file_path: the location where the file will be saved excluding file name\n :return: None\n \"\"\"\n\n if file_path == \"\":\n directory = os.getcwd()\n file_name = book_name + \"_clippings.txt\"\n elif os.path.isdir(file_path):\n directory = file_path\n file_name = file_path + book_name + \"_clippings.txt\"\n else:\n raise Exception(\"{} is not a valid file path\".format(file_path))\n\n for author in self.authorlist:\n if book_name in self.highlights_dictionary[author]:\n file = open(file_name, 'w')\n for clip in self.highlights_dictionary[author][book_name]:\n file.write(clip + '\\n\\n')\n file.close()\n return \"Highlights saved to '{0}' with the file name '{1}'\".format(directory, file_name)\n else:\n return \"This book was not found. Please check the spelling.\"\n\n\ndef extract_highlights(file_name):\n \"\"\"\n This function extracts all the highlights you have made from the text file where the Amazon Kindle book highlights\n are kept\n :param file_name: text file with highlight\n :return: highlights\n \"\"\"\n if not file_name.lower().endswith(\".txt\"):\n raise Exception(\"Please make sure your file is a .txt file\")\n elif os.stat(file_name).st_size == 0:\n raise Exception(\"This file is empty\")\n else:\n file_contents = open(file_name, 'r', encoding=\"utf-8\")\n highlights = file_contents.readlines()\n file_contents.close()\n highlights = list(map(str.strip, highlights))\n return highlights\n","sub_path":"KindleHighlightsExtractor.py","file_name":"KindleHighlightsExtractor.py","file_ext":"py","file_size_in_byte":10248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"360494934","text":"from csv import reader, writer\nfrom sklearn.cluster import KMeans\n\nimport matplotlib.pyplot as plt\nimport numpy\n\ndef read_csv(name,nl=\"\\n\",dl=\",\"):\n cloud=[]\n with open(name,newline=nl) as csvfile:\n csvreader=reader(csvfile,delimiter=dl)\n for xx, yy, zz in csvreader:\n cloud.append([float(xx), float(yy), float(zz)])\n return cloud\n\ndef write_csv(file_name, cloud_points):\n with open(file_name, 'w', encoding='utf-8', newline='\\n') as csvfile:\n csvwriter=writer(csvfile)\n for p in cloud_points:\n csvwriter.writerow(p)\n\ncloud=read_csv(\"pointsdata.xyz\")\nclusterer=KMeans(n_clusters=3)\nX=numpy.array(cloud)\ny_pred=clusterer.fit_predict(X)\n\nred=y_pred==0\nblue=y_pred==1\ncyan=y_pred==2\n\nimport matplotlib.pyplot as plt\n\nfig=plt.figure()\nax=fig.add_subplot(111, projection='3d')\nax.scatter(X[red,0],X[red,1],X[red,2],c='r')\nax.scatter(X[blue,0],X[blue,1],X[blue,2],c='b')\nax.scatter(X[cyan,0],X[cyan,1],X[cyan,2],c='c')\nplt.show()\n\n\nr=[]\nb=[]\nc=[]\n\nfor i in range(len(cloud)):\n if red[i]:\n r.append(cloud[i])\n elif blue[i]:\n b.append(cloud[i])\n elif cyan[i]:\n c.append(cloud[i])\n else:\n print(cloud[i])\n\nwrite_csv(\"cloud_r.xyz\",r)\nwrite_csv(\"cloud_b.xyz\",b)\nwrite_csv(\"cloud_c.xyz\",c)","sub_path":"K_means.py","file_name":"K_means.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"129742177","text":"'''\n zeroby0's code\n'''\n\n'''\nInterleaved\n===========\n\nFast and efficient methods to extract\nInterleaved CSI samples in PCAP files.\n\n~230k samples per second.\n\nSuitable for bcm43455c0 and bcm4339 chips.\n\nRequires Numpy.\n\nUsage\n-----\n\nimport decoders.interleaved as decoder\n\nsamples = decoder.read_pcap('path_to_pcap_file')\n\nBandwidth is inferred from the pcap file, but\ncan also be explicitly set:\nsamples = decoder.read_pcap('path_to_pcap_file', bandwidth=40)\n'''\n\n__all__ = [\n 'read_pcap'\n]\n\nimport os\nimport numpy as np\n\n# Indexes of Null and Pilot OFDM subcarriers\n# https://www.oreilly.com/library/view/80211ac-a-survival/9781449357702/ch02.html\nnulls = {\n 20: [x+32 for x in [\n -32, -31, -30, -29,\n 31, 30, 29, 0\n ]],\n\n 40: [x+64 for x in [\n -64, -63, -62, -61, -60, -59, -1, \n 63, 62, 61, 60, 59, 1, 0\n ]],\n\n 80: [x+128 for x in [\n -128, -127, -126, -125, -124, -123, -1,\n 127, 126, 125, 124, 123, 1, 0\n ]],\n\n 160: [x+256 for x in [\n -256, -255, -254, -253, -252, -251, -129, -128, -127, -5, -4, -3, -2, -1,\n 255, 254, 253, 252, 251, 129, 128, 127, 5, 4, 3, 3, 1, 0 \n ]]\n}\n\npilots = {\n 20: [x+32 for x in [\n -21, -7,\n 21, 7\n ]],\n\n 40: [x+64 for x in [\n -53, -25, -11, \n 53, 25, 11\n ]],\n\n 80: [x+128 for x in [\n -103, -75, -39, -11,\n 103, 75, 39, 11\n ]],\n\n 160: [x+256 for x in [\n -231, -203, -167, -139, -117, -89, -53, -25,\n 231, 203, 167, 139, 117, 89, 53, 25\n ]]\n}\n\nclass SampleSet(object):\n '''\n A helper class to contain data read\n from pcap files.\n '''\n def __init__(self, samples, bandwidth):\n self.mac, self.seq, self.css, self.csi = samples\n\n self.nsamples = self.csi.shape[0]\n self.bandwidth = bandwidth\n\n def get_mac(self, index):\n return self.mac[index*6: (index+1)*6]\n\n def get_seq(self, index):\n sc = int.from_bytes( #uint16: SC\n self.seq[index*2: (index+1)*2],\n byteorder = 'little',\n signed = False\n )\n fn = sc % 16 # Fragment Number\n sc = int((sc - fn)/16) # Sequence Number\n\n return (sc, fn)\n \n def get_css(self, index):\n return self.css[index*2: (index+1)*2]\n\n def get_csi(self, index, rm_nulls=False, rm_pilots=False):\n csi = self.csi[index].copy()\n if rm_nulls:\n csi[nulls[self.bandwidth]] = -10000 # changed by jji, default == 0\n if rm_pilots:\n csi[pilots[self.bandwidth]] = 0\n\n # Added by jji\n csi = np.delete(csi, np.where(csi == -10000), axis=0)\n\n return csi\n\n # Added by jji\n def get_subcarrier(self, index):\n\n sub = list()\n idx = index + 32\n\n for i in range(0, len(self.csi)):\n packet = self.csi[i].copy()\n sub.append(packet[idx])\n\n return np.array(sub)\n\n # Added byjji\n def get_all_csi(self, rm_nulls=False, rm_pilots=False):\n new_csi = list()\n for i in range(0, len(self.csi)):\n packet = self.csi[i].copy()\n\n if rm_nulls:\n packet[nulls[self.bandwidth]] = -10000 # changed by jji, default == 0\n if rm_pilots:\n packet[pilots[self.bandwidth]] = 0\n\n # Added by jji\n packet = np.delete(packet, np.where(packet == -10000), axis=0)\n\n new_csi.append(packet)\n\n\n return new_csi\n\n def print(self, index):\n # Mac ID\n macid = self.get_mac(index).hex()\n macid = ':'.join([macid[i:i+2] for i in range(0, len(macid), 2)])\n\n # Sequence control\n sc, fn = self.get_seq(index)\n\n # Core and Spatial Stream\n css = self.get_css(index).hex()\n\n print(\n f'''\nSample #{index}\n---------------\nSource Mac ID: {macid}\nSequence: {sc}.{fn}\nCore and Spatial Stream: 0x{css}\n '''\n )\n\n\ndef __find_bandwidth(incl_len):\n '''\n Determines bandwidth\n from length of packets.\n \n incl_len is the 4 bytes\n indicating the length of the\n packet in packet header\n https://wiki.wireshark.org/Development/LibpcapFileFormat/\n\n This function is immune to small\n changes in packet lengths.\n '''\n\n pkt_len = int.from_bytes(\n incl_len,\n byteorder='little',\n signed=False\n )\n\n # The number of bytes before we\n # have CSI data is 60. By adding\n # 128-60 to frame_len, bandwidth\n # will be calculated correctly even\n # if frame_len changes +/- 128\n # Some packets have zero padding.\n # 128 = 20 * 3.2 * 4\n nbytes_before_csi = 60\n pkt_len += (128 - nbytes_before_csi)\n\n bandwidth = 20 * int(\n pkt_len // (20 * 3.2 * 4)\n )\n\n return bandwidth\n\n\n\ndef __find_nsamples_max(pcap_filesize, nsub):\n '''\n Returns an estimate for the maximum possible number\n of samples in the pcap file.\n\n The size of the pcap file is divided by the size of\n a packet to calculate the number of samples. However,\n some packets have a padding of a few bytes, so the value\n returned is slightly higher than the actual number of\n samples in the pcap file.\n '''\n\n # PCAP global header is 24 bytes\n # PCAP packet header is 12 bytes\n # Ethernet + IP + UDP headers are 46 bytes\n # Nexmon metadata is 18 bytes\n # CSI is nsub*4 bytes long\n #\n # So each packet is 12 + 46 + 18 + nsub*4 bytes long\n nsamples_max = int(\n (pcap_filesize - 24) / (\n 12 + 46 + 18 + (nsub*4)\n )\n )\n\n return nsamples_max\n\ndef read_pcap(pcap_filepath, bandwidth=0, nsamples_max=0):\n '''\n Reads CSI samples from\n a pcap file. A SampleSet\n object is returned.\n\n Bandwidth and maximum samples\n are inferred from the pcap file by\n default, but you can also set them explicitly.\n '''\n\n pcap_filesize = os.stat(pcap_filepath).st_size\n with open(pcap_filepath, 'rb') as pcapfile:\n fc = pcapfile.read()\n \n if bandwidth == 0:\n bandwidth = __find_bandwidth(\n # 32-36 is where the incl_len\n # bytes for the first frame are\n # located.\n # https://wiki.wireshark.org/Development/LibpcapFileFormat/\n fc[32:36]\n )\n # Number of OFDM sub-carriers\n nsub = int(bandwidth * 3.2)\n\n if nsamples_max == 0:\n nsamples_max = __find_nsamples_max(pcap_filesize, nsub)\n\n # Preallocating memory\n mac = bytearray(nsamples_max * 6)\n seq = bytearray(nsamples_max * 2)\n css = bytearray(nsamples_max * 2)\n csi = bytearray(nsamples_max * nsub * 4)\n\n # Pointer to current location in file.\n # This is faster than using file.tell()\n # =24 to skip pcap global header\n ptr = 24\n\n nsamples = 0\n while ptr < pcap_filesize:\n # Read frame header\n # Skip over Eth, IP, UDP\n ptr += 8\n frame_len = int.from_bytes(\n fc[ptr: ptr+4],\n byteorder='little',\n signed=False\n )\n ptr += 50\n\n # 4 bytes: Magic Bytes @ 0 - 4\n # 6 bytes: Source Mac ID @ 4 - 10\n # 2 bytes: Sequence Number @ 10 - 12\n # 2 bytes: Core and Spatial Stream @ 12 - 14\n # 2 bytes: ChanSpec @ 14 - 16\n # 2 bytes: Chip Version @ 16 - 18\n # nsub*4 bytes: CSI Data @ 18 - 18 + nsub*4\n\n mac[nsamples*6: (nsamples+1)*6] = fc[ptr+4: ptr+10]\n seq[nsamples*2: (nsamples+1)*2] = fc[ptr+10: ptr+12]\n css[nsamples*2: (nsamples+1)*2] = fc[ptr+12: ptr+14]\n csi[nsamples*(nsub*4): (nsamples+1)*(nsub*4)] = fc[ptr+18: ptr+18 + nsub*4]\n\n ptr += (frame_len - 42)\n nsamples += 1\n\n # Convert CSI bytes to numpy array\n csi_np = np.frombuffer(\n csi,\n dtype = np.int16,\n count = nsub * 2 * nsamples\n )\n\n # Cast numpy 1-d array to matrix\n csi_np = csi_np.reshape((nsamples, nsub * 2))\n\n # Convert csi into complex numbers\n csi_cmplx = np.fft.fftshift(\n csi_np[:nsamples, ::2] + 1.j * csi_np[:nsamples, 1::2], axes=(1,)\n )\n\n return SampleSet(\n (mac,\n seq,\n css,\n csi_cmplx),\n bandwidth\n )\n\nif __name__ == \"__main__\":\n samples = read_pcap('pcap_files/output-40.pcap')\n","sub_path":"decoders/interleaved.py","file_name":"interleaved.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"582981685","text":"import sqlite3\nimport telebot\n\nfrom telebot import types\nfrom telebot.types import InlineKeyboardButton, InlineKeyboardMarkup\n\nbot = telebot.TeleBot(\"1757847078:AAF9MlsiRs3Z1ChLzLJfrxnaA4sIPjAOIk4\")\nnewUser = True\n\n\ndef start(message):\n conn = sqlite3.connect('DUral.db')\n cur = conn.cursor()\n\n global newUser\n\n setUserID = str(message.from_user.id)\n setUserName = str(message.from_user.username)\n\n searchID = \"SELECT COUNT(DISTINCT id_user) FROM users WHERE id_user = '\" + setUserID + \"'; \"\n\n cur.execute(searchID)\n results = cur.fetchone()\n # print(results)\n\n for i in results:\n if i == 0:\n setUserData = \"INSERT INTO users VALUES ('\" + setUserID + \"', '\" + setUserName + \"', NULL, NULL, NULL, \" \\\n \"NULL); \"\n cur.execute(setUserData)\n conn.commit()\n else:\n newUser = False\n\n \"\"\"bot.send_message(message.chat.id, \"Привет, я ... и я помогу тебе разобраться в Урале\\n\"\n \"спроси меня где находиться Чердынь\\n\"\n \"или где купить футболку как у Васи\\n\"\n \"и постораюсь тебе помочь\"\n )\"\"\"\n\n send_mess = (\"Привет, я ... и я помогу тебе разобраться в Урале\\n\"\n \"проси меня где находиться Чердынь\\n\"\n \"или расскажи про каменный город\\n\"\n \"и постораюсь тебе помочь\")\n\n user_markup = telebot.types.ReplyKeyboardMarkup(True, False)\n user_markup.row('Где находится Хохловка')\n user_markup.row('Расскажи про Каменный город')\n bot.send_message(message.from_user.id, send_mess, reply_markup=user_markup)\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"391931837","text":"# -*- coding: utf-8 -*-\nfrom pathlib import Path\n\nimport mock\nimport pytest\nfrom mock import call\nfrom pydantic import ValidationError\n\nfrom as3ninja.settings import NinjaSettings, NinjaSettingsLoader\n\n\nclass Test_NinjaSettings:\n @staticmethod\n def test_required_attributes():\n njs = NinjaSettings()\n assert \"GITGET_TIMEOUT\" in njs.dict()\n assert \"GITGET_SSL_VERIFY\" in njs.dict()\n assert \"GITGET_PROXY\" in njs.dict()\n assert \"SCHEMA_BASE_PATH\" in njs.dict()\n assert \"SCHEMA_GITHUB_REPO\" in njs.dict()\n assert \"VAULT_SSL_VERIFY\" in njs.dict()\n\n @staticmethod\n def test_forbid_extra_attributes():\n with pytest.raises(ValidationError):\n NinjaSettings(EXTRA_ATTRIBUTES_ARE_FORBIDDEN=True)\n\n @staticmethod\n def test_attributes_from_env(monkeypatch):\n \"\"\"\n Test environment variables override defaults\n \"\"\"\n monkeypatch.setenv(\"AS3N_GITGET_TIMEOUT\", \"1234\", prepend=False)\n monkeypatch.setenv(\"AS3N_GITGET_SSL_VERIFY\", \"False\", prepend=False)\n monkeypatch.setenv(\"AS3N_GITGET_PROXY\", \"http://proxy:8080\", prepend=False)\n monkeypatch.setenv(\n \"AS3N_SCHEMA_GITHUB_REPO\", \"SCHEMA_GITHUB_REPO\", prepend=False\n )\n monkeypatch.setenv(\"AS3N_VAULT_SSL_VERIFY\", \"False\", prepend=False)\n\n njs = NinjaSettings()\n assert njs.dict()[\"GITGET_TIMEOUT\"] == 1234\n assert njs.dict()[\"GITGET_SSL_VERIFY\"] is False\n assert njs.dict()[\"GITGET_PROXY\"] == \"http://proxy:8080\"\n assert njs.dict()[\"SCHEMA_GITHUB_REPO\"] == \"SCHEMA_GITHUB_REPO\"\n assert njs.dict()[\"VAULT_SSL_VERIFY\"] is False\n\n\nclass Test_NinjaSettingsLoader_methods:\n @staticmethod\n def test_save_config(mocker):\n \"\"\"\n Test config write functionality\n Also: Must not write RUNTIME_CONFIG keys\n \"\"\"\n mocked_open = mocker.patch(\"builtins.open\", mock.mock_open())\n mocked_self = mock.Mock\n mocked_self.AS3NINJA_CONFIGFILE_NAME = \"as3ninja.config.json\"\n mocked_self.RUNTIME_CONFIG = [\"SCHEMA_BASE_PATH\"]\n mocked_self._settings = mock.Mock()\n mocked_self._settings.dict.return_value = {\n \"FOO\": \"BAR\",\n \"SCHEMA_BASE_PATH\": \"SCHEMA_BASE_PATH\",\n }\n NinjaSettingsLoader._save_config(mocked_self)\n\n mocked_open_handle = mocked_open()\n mocked_open_handle.write.assert_called_once_with(\n '{\\n \"FOO\": \"BAR\"\\n}'\n ) # check this is written to the config file\n\n @staticmethod\n def test_detect_schema_base_path(mocker):\n mP_exists = mocker.patch.object(Path, \"exists\", return_value=False)\n mP_home = mocker.patch.object(Path, \"home\")\n mP_mkdir = mocker.patch.object(Path, \"mkdir\")\n\n # returns str (a path)\n assert isinstance(NinjaSettingsLoader._detect_schema_base_path(), str)\n\n # Path.exists() called once\n assert mP_exists.call_count == 1\n\n # Path.home() called twice\n assert mP_home.call_count == 2\n\n # Path.mkdir() called twice with the options listed\n mP_mkdir.assert_has_calls(\n [\n call(mode=448, parents=True, exist_ok=True),\n call(mode=448, parents=True, exist_ok=True),\n ]\n )\n\n @staticmethod\n def test_detect_schema_base_path__exists(mocker):\n mP_exists = mocker.patch.object(Path, \"exists\", return_value=True)\n\n # returns str (a path)\n _schema_base_path = NinjaSettingsLoader._detect_schema_base_path()\n assert isinstance(_schema_base_path, str)\n assert NinjaSettingsLoader.AS3_SCHEMA_DIRECTORY in _schema_base_path\n\n # Path.exists() called once\n assert mP_exists.call_count == 1\n\n @staticmethod\n def test_detect_config_file__noConfigFile(mocker):\n mP_is_file = mocker.patch.object(\n Path, \"is_file\", return_value=False\n ) # No config file exists at all\n mocker.patch.object(Path, \"home\")\n mP_mkdir = mocker.patch.object(Path, \"mkdir\")\n mP_touch = mocker.patch.object(Path, \"touch\")\n\n # returns None\n assert NinjaSettingsLoader._detect_config_file() is None\n\n # Path.is_file() called twice\n assert mP_is_file.call_count == 2\n\n # Path.touch() called with the options listed\n mP_touch.assert_has_calls([call(mode=384, exist_ok=True)])\n\n # Path.mkdir() called with the options listed\n mP_mkdir.assert_has_calls([call(mode=448, parents=True, exist_ok=True)])\n\n @staticmethod\n def test_detect_config_file__exists(mocker):\n mP_is_file = mocker.patch.object(\n Path, \"is_file\", return_value=True\n ) # config file found on first try\n mocker.patch.object(Path, \"home\")\n mP_mkdir = mocker.patch.object(Path, \"mkdir\")\n mP_touch = mocker.patch.object(Path, \"touch\")\n\n # returns config file\n _cfgfile = NinjaSettingsLoader._detect_config_file()\n assert isinstance(_cfgfile, str)\n assert \"as3ninja\" in _cfgfile\n\n # Path.is_file() called once only (file found)\n assert mP_is_file.call_count == 1\n\n # Path.touch() not called\n assert mP_touch.call_count == 0\n\n # Path.mkdir() not called\n mP_mkdir.call_count == 0\n\n\nclass Test_NinjaSettingsLoader:\n @staticmethod\n def test_no_config_file(mocker):\n \"\"\"\n Test code path for non-existing config file in __init__\n \"\"\"\n mocker.patch.object(\n NinjaSettingsLoader, \"_detect_config_file\", return_value=None\n )\n mocker.patch.object(NinjaSettingsLoader, \"_save_config\")\n\n NSL = NinjaSettingsLoader()\n\n # check that NinjaSettings is returned\n assert isinstance(NSL(), NinjaSettings)\n\n @staticmethod\n def test_config_file_exists(mocker):\n \"\"\"\n Test code path for non-existing config file in __init__\n \"\"\"\n mocker.patch.object(\n NinjaSettingsLoader,\n \"_detect_config_file\",\n return_value=\"path/to/config.file\",\n )\n mocker.patch.object(NinjaSettingsLoader, \"_detect_schema_base_path\")\n mocker.patch(\"as3ninja.settings.NinjaSettings\")\n mocked_deserialize = mocker.patch(\"as3ninja.settings.deserialize\")\n\n NSL = NinjaSettingsLoader()\n _ = NSL()\n\n # deserialize called with return value from _detect_config_file\n mocked_deserialize.assert_called_once_with(\"path/to/config.file\")\n","sub_path":"tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"180546204","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport grpc\nimport time\n# import dronecore_pb2 as dc\n# import dronecore_pb2_grpc\nimport action_pb2 as dc_action\nimport action_pb2_grpc\nfrom google.protobuf import empty_pb2\n\n\ndef run():\n channel = grpc.insecure_channel('0.0.0.0:50051')\n action_stub = action_pb2_grpc.ActionRPCStub(channel)\n\n arm_result = action_stub.Arm(empty_pb2.Empty())\n if arm_result.result == dc_action.ActionResult.SUCCESS:\n print(\"arming ok\")\n else:\n print(\"arming failed: \" + arm_result.result_str)\n\n time.sleep(2)\n\n takeoff_result = action_stub.TakeOff(empty_pb2.Empty())\n if takeoff_result.result == dc_action.ActionResult.SUCCESS:\n print(\"takeoff ok\")\n else:\n print(\"takeoff failed: \" + takeoff_result.result_str)\n\n time.sleep(5)\n\n land_result = action_stub.Land(empty_pb2.Empty())\n if land_result.result == dc_action.ActionResult.SUCCESS:\n print(\"landing ok\")\n else:\n print(\"landing failed: \" + land_result.result_str)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"grpc/python_client/sync_client.py","file_name":"sync_client.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"104844693","text":"from flask import g, request\\\n\t, url_for, redirect, flash\nfrom ... import session, updateUser, UserProfile\\\n\t, getUserById, login_required , getEmailStatus\n\nfrom . import mod_profile\n\n@mod_profile.route('/users/email/activate')\n@login_required\ndef activate_email():\n\tg.kwargs['pagetitle'] = 'Email verified'\n\ttoken = request.args.get('code')\n\tif not token:\n\t\tflash(\"Make sure you came here by following reset link sent to your email.\")\n\t\treturn redirect(url_for('home.landing_page'))\n\t# check if the token is provided as username\n\tuser_id = UserProfile.verify_token(token)\n\tif user_id:\n\t\tuser = getUserById(user_id)\n\t\tsts_obj = getEmailStatus(g.user.id)\n\t\tif user.email or sts_obj.status_code == '1':\n\t\t\tflash(\"Email is already verified.\")\n\t\t\treturn redirect(url_for('home.landing_page'))\n\t\tparams = dict(user = user)\n\t\ttry:\n\t\t\tparams['updatetype']='vemail'\n\t\t\tuser_id = updateUser(**params)\n\t\texcept Exception as e:\n\t\t\tsession.rollback()\n\t\t\tparams['dberror'] = \"DB Error-\" + str(e)\n\t\t\tflash(\"Internal DB Error: \" + str(e))\n\t\t\treturn redirect(url_for('home.landing_page'))\n\t\tflash(\"Your email is verified.\")\n\telse:\n\t\tflash(\"Activation link is no more valid, please request again.\")\n\treturn redirect(url_for('home.landing_page'))\n","sub_path":"app/views/mod_profile/activate_email.py","file_name":"activate_email.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"166027274","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\np = 0.01\nb = 500 * 8000\nN = math.pow(10, -9)\n\nmodevalue = np.sqrt(2 / np.pi) * math.pow(10, -3)\n\nH = np.random.rayleigh(modevalue)\nH2 = H/3\n\nW = math.pow(10, 6)\n\nee= []\nee2 = []\npp= []\ntt= []\nwhile p < 1:\n r = W * math.log2(1 + p * H * H / N)\n t = b / r\n e = t * p\n ee.append(e/1)\n pp.append(p)\n tt.append(t/2)\n p += 0.01\n\nplt.plot(pp, ee)\nplt.plot(pp, tt)\n\nplt.show()","sub_path":"MOBIHOC/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"264541865","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 6 22:44:50 2020\r\n\r\n@author: kbhandari\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import timedelta\r\nimport os\r\nimport gc\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\nRunType = 'Model'\r\n\r\nwd = \"C:/My Desktop/Hackathon/\"\r\nos.chdir(wd)\r\n\r\nraw=pd.read_csv('train.csv')\r\nraw['InvoiceDate']=pd.to_datetime(raw['InvoiceDate'])\r\nraw.dropna(inplace=True)\r\nraw.head()\r\n\r\ndef generate_universe(raw_data, RunType='Model'):\r\n sub=pd.read_csv('submission.csv')\r\n target=[x[7:] for x in sub.columns[1:]]\r\n \r\n #if RunType.upper() == 'MODEL':\r\n # rd=raw['InvoiceDate'].max()-timedelta(days=30)\r\n #else:\r\n # rd=raw['InvoiceDate'].max()\r\n \r\n if RunType.upper() == 'MODEL':\r\n raw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\n raw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\n\r\n # DV Universe\r\n dv = raw[(raw['item_id'].isin(target)) & (raw['rank']<=1) & (raw['item_qty']>0)]\r\n dv.drop('rank', axis=1, inplace=True)\r\n dv.loc[:, 'Target'] = 1\r\n \r\n dv=dv.groupby(['customer_id','item_id']).agg({'Target':'count'}).unstack()\r\n for i in dv.columns:\r\n dv[i]=dv[i].apply(lambda x: 1 if x>0 else 0)\r\n dv.columns = dv.columns.get_level_values(1)\r\n dv.reset_index(level=['customer_id'],inplace=True)\r\n \r\n dv_df = pd.melt(dv, id_vars =['customer_id'], value_vars = target, var_name='item_id', value_name=\"Target\")\r\n \r\n if RunType.upper() == 'MODEL':\r\n # IV Universe\r\n iv_full = raw[raw['rank']>1]\r\n iv_full.drop('rank', axis=1, inplace=True)\r\n else:\r\n iv_full = raw.copy()\r\n \r\n # Net Price \r\n iv_full['Net_Price'] = iv_full['item_amt'] * iv_full['item_qty'] \r\n \r\n # DL Feature Creation\r\n iv_full['categorical_sequence_items'] = 'ITEM_' + iv_full['item_id']\r\n agg_df = iv_full.sort_values('InvoiceDate', ascending=False).groupby(['customer_id']).agg({\r\n 'categorical_sequence_items': lambda s: ', '.join(s)\r\n }).reset_index()\r\n \r\n impp = iv_full.groupby(['customer_id', 'item_id'],as_index=False)['transaction_id'].count().rename(columns={'transaction_id': 'count'}).sort_values(['count'], ascending=False)\r\n impp['rank'] = impp.groupby(['customer_id'], as_index=False)['count'].rank(ascending=0,method='dense')\r\n impp = impp[impp['rank']==1].drop(['rank'],axis=1)\r\n impp.drop_duplicates(subset=['customer_id'], keep='first', inplace=True)\r\n impp.columns = ['customer_id', 'IMPP', 'IMPP_count']\r\n \r\n # Reference Dates\r\n r=iv_full.groupby(['customer_id'],as_index=False)['InvoiceDate'].max().rename(columns={'InvoiceDate':'rd'})\r\n iv_full=pd.merge(iv_full,r,how='left',on=['customer_id'])\r\n iv_full['rd1']=iv_full['rd']-timedelta(days=1)\r\n iv_full['rd7']=iv_full['rd']-timedelta(days=7)\r\n iv_full['rd30']=iv_full['rd']-timedelta(days=30)\r\n iv_full['rd60']=iv_full['rd']-timedelta(days=60)\r\n iv_full['rd90']=iv_full['rd']-timedelta(days=90)\r\n iv_full['rd360']=iv_full['rd']-timedelta(days=360)\r\n iv_full['rd720']=iv_full['rd']-timedelta(days=720)\r\n \r\n def cust_freq(dataset,reference_date,f,g,l,k):\r\n return dataset[dataset['InvoiceDate']>=reference_date].groupby(['customer_id'],as_index=False).agg({'transaction_id':'count','item_amt':'mean','item_qty':'sum','Net_Price': 'sum'}).rename(columns={'transaction_id':f,'item_amt':g,'item_qty':l,'Net_Price':k})\r\n \r\n c1=cust_freq(iv_full,iv_full['rd1'],'CF1','CM1','CQ1','CN1')\r\n c7=cust_freq(iv_full,iv_full['rd7'],'CF7','CM7','CQ7','CN7')\r\n c30=cust_freq(iv_full,iv_full['rd30'],'CF30','CM30','CQ30','CN30')\r\n c60=cust_freq(iv_full,iv_full['rd60'],'CF60','CM60','CQ60','CN60')\r\n c90=cust_freq(iv_full,iv_full['rd90'],'CF90','CM90','CQ90','CN90')\r\n c360=cust_freq(iv_full,iv_full['rd360'],'CF360','CM360','CQ360','CN360')\r\n c720=cust_freq(iv_full,iv_full['rd720'],'CF720','CM720','CQ720','CN720')\r\n \r\n # Last 3 transaction average, stdev customer level\r\n iv_full['C_PrevInvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id'])['InvoiceDate'].shift(1)\r\n iv_full['C_T2InvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id'])['InvoiceDate'].shift(2)\r\n iv_full['C_T3InvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id'])['InvoiceDate'].shift(3)\r\n \r\n iv_full['C_DayDiff'] = (iv_full['InvoiceDate'] - iv_full['C_PrevInvoiceDate']).dt.days\r\n iv_full['C_DayDiff2'] = (iv_full['InvoiceDate'] - iv_full['C_T2InvoiceDate']).dt.days\r\n iv_full['C_DayDiff3'] = (iv_full['InvoiceDate'] - iv_full['C_T3InvoiceDate']).dt.days\r\n \r\n c_tx_day_diff = iv_full.groupby(['customer_id']).agg({'C_DayDiff': ['mean', 'std'], 'C_DayDiff2': ['mean', 'std'], 'C_DayDiff3': ['mean', 'std']}).reset_index()\r\n c_tx_day_diff.columns = ['customer_id', 'C_DayDiffMean', 'C_DayDiffStd', 'C_DayDiff2Mean', 'C_DayDiff2Std', 'C_DayDiff3Mean', 'C_DayDiff3Std']\r\n \r\n # Exclude Non Target Items\r\n iv_full = iv_full[(iv_full['item_id'].isin(target))]\r\n \r\n # TFIDF Feature Creation\r\n iv_full['tfidf_item'] = 'ITEM_' + iv_full['item_id']\r\n agg_df_2 = iv_full.sort_values('InvoiceDate', ascending=False).groupby(['customer_id']).agg({\r\n 'tfidf_item': lambda s: ', '.join(s)\r\n }).reset_index()\r\n \r\n # Reference Dates\r\n iv_full.drop('rd', axis=1, inplace=True)\r\n r=iv_full.groupby(['customer_id'],as_index=False)['InvoiceDate'].max().rename(columns={'InvoiceDate':'rd'})\r\n iv_full=pd.merge(iv_full,r,how='left',on=['customer_id'])\r\n iv_full['rd1']=iv_full['rd']-timedelta(days=1)\r\n iv_full['rd7']=iv_full['rd']-timedelta(days=7)\r\n iv_full['rd30']=iv_full['rd']-timedelta(days=30)\r\n iv_full['rd60']=iv_full['rd']-timedelta(days=60)\r\n iv_full['rd90']=iv_full['rd']-timedelta(days=90)\r\n iv_full['rd360']=iv_full['rd']-timedelta(days=360)\r\n iv_full['rd720']=iv_full['rd']-timedelta(days=720)\r\n \r\n def freq(dataset,reference_date,f,g,l,k):\r\n return dataset[dataset['InvoiceDate']>=reference_date].groupby(['customer_id','item_id'],as_index=False).agg({'transaction_id':'count','item_amt':'sum','item_qty':'sum', 'Net_Price': 'sum'}).rename(columns={'transaction_id':f,'item_amt':g,'item_qty':l, 'Net_Price':k})\r\n \r\n f1=freq(iv_full,iv_full['rd1'],'F1','M1','Q1','N1')\r\n f7=freq(iv_full,iv_full['rd7'],'F7','M7','Q7','N7')\r\n f30=freq(iv_full,iv_full['rd30'],'F30','M30','Q30','N30')\r\n f60=freq(iv_full,iv_full['rd60'],'F60','M60','Q60','N60')\r\n f90=freq(iv_full,iv_full['rd90'],'F90','M90','Q90','N90')\r\n f360=freq(iv_full,iv_full['rd360'],'F360','M360','Q360','N360')\r\n f720=freq(iv_full,iv_full['rd720'],'F720','M720','Q720','N720') \r\n \r\n # Last 3 transaction average, stdev item level\r\n iv_full['PrevInvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id','item_id'])['InvoiceDate'].shift(1)\r\n iv_full['T2InvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id','item_id'])['InvoiceDate'].shift(2)\r\n iv_full['T3InvoiceDate'] = iv_full.sort_values(['InvoiceDate'],ascending=True).groupby(['customer_id','item_id'])['InvoiceDate'].shift(3)\r\n \r\n iv_full['DayDiff'] = (iv_full['InvoiceDate'] - iv_full['PrevInvoiceDate']).dt.days\r\n iv_full['DayDiff2'] = (iv_full['InvoiceDate'] - iv_full['T2InvoiceDate']).dt.days\r\n iv_full['DayDiff3'] = (iv_full['InvoiceDate'] - iv_full['T3InvoiceDate']).dt.days\r\n \r\n tx_day_diff = iv_full.groupby(['customer_id','item_id']).agg({'DayDiff': ['mean', 'std'], 'DayDiff2': ['mean', 'std'], 'DayDiff3': ['mean', 'std']}).reset_index()\r\n tx_day_diff.columns = ['customer_id', 'item_id', 'DayDiffMean', 'DayDiffStd', 'DayDiff2Mean', 'DayDiff2Std', 'DayDiff3Mean', 'DayDiff3Std'] \r\n \r\n \r\n \r\n f=pd.merge(pd.merge(pd.merge(pd.merge(pd.merge(pd.merge(f1,f7,how='outer',on=['customer_id','item_id']),f30,how='outer',on=['customer_id','item_id']),f60,how='outer',on=['customer_id','item_id']),f90,how='outer',on=['customer_id','item_id']),f360,how='outer',on=['customer_id','item_id']),f720,how='outer',on=['customer_id','item_id'])\r\n f = pd.merge(f, tx_day_diff, how='outer', on=['customer_id','item_id'])\r\n \r\n c=pd.merge(pd.merge(pd.merge(pd.merge(pd.merge(pd.merge(c1,c7,how='outer',on=['customer_id']),c30,how='outer',on=['customer_id']),c60,how='outer',on=['customer_id']),c90,how='outer',on=['customer_id']),c360,how='outer',on=['customer_id']),c720,how='outer',on=['customer_id'])\r\n \r\n# cids = pd.DataFrame(raw.customer_id.unique(), columns=['customer_id'])\r\n \r\n # c_r_l=data.groupby(['customer_id','item_id'],as_index=False).agg({'InvoiceDate':[lambda x: (rd-x.max()).days]})\r\n c_r_f = iv_full.groupby(['customer_id','item_id'],as_index=False).agg({'InvoiceDate':[lambda x: (x.max()-x.min()).days]})\r\n \r\n # c_r_l.columns=c_r_l.columns.map('_'.join).str.strip('_')\r\n c_r_f.columns=c_r_f.columns.map('_'.join).str.strip('_')\r\n \r\n # c_r_l.rename(columns={'InvoiceDate_':'R_L'},inplace=True)\r\n c_r_f.rename(columns={'InvoiceDate_':'R_F'},inplace=True)\r\n \r\n # c_T_L=data.groupby(['customer_id'],as_index=False).agg({'InvoiceDate':[lambda x: (rd-x.max()).days]})\r\n c_T_F = iv_full.groupby(['customer_id'],as_index=False).agg({'InvoiceDate':[lambda x: (x.max()-x.min()).days]})\r\n \r\n # c_T_L.columns=c_T_L.columns.map('_'.join).str.strip('_')\r\n c_T_F.columns=c_T_F.columns.map('_'.join).str.strip('_')\r\n \r\n # c_T_L.rename(columns={'InvoiceDate_':'T_L'},inplace=True)\r\n c_T_F.rename(columns={'InvoiceDate_':'T_F'},inplace=True)\r\n \r\n iv_df_ci = pd.merge(f,c_r_f,how='outer',on=['customer_id','item_id']) \r\n iv_df_ci.fillna(999 ,inplace=True)\r\n \r\n iv_df_c = pd.merge(pd.merge(c_T_F, c,how='outer',on=['customer_id']),c_tx_day_diff,how='outer',on=['customer_id']) \r\n iv_df_c = pd.merge(pd.merge(pd.DataFrame(raw['customer_id'].unique(), columns=['customer_id']),iv_df_c,how='outer',on=['customer_id']), impp, how=\"outer\", on=['customer_id'])\r\n iv_df_c.fillna(999 ,inplace=True)\r\n \r\n iv_df_c = pd.merge(pd.merge(iv_df_c, agg_df, how='outer', on=['customer_id']), agg_df_2, how='outer', on=['customer_id']).fillna('')\r\n \r\n \r\n if RunType.upper() == 'MODEL':\r\n iv_df = pd.merge(iv_df_ci, dv_df, how='right',on=['customer_id', 'item_id'])\r\n iv_df.fillna(999, inplace=True)\r\n iv_df = pd.merge(iv_df, iv_df_c, how='left',on=['customer_id'])\r\n iv_df['categorical_sequence_items'].fillna('' ,inplace=True)\r\n iv_df.fillna(999 ,inplace=True)\r\n else:\r\n # Cross Join\r\n items = pd.DataFrame(iv_df_ci.item_id.unique(), columns=['item_id'])\r\n cids = pd.DataFrame(iv_df_ci.customer_id.unique(), columns=['customer_id'])\r\n items.loc[:,'constant'] = 1\r\n cids.loc[:,'constant'] = 1\r\n item_cid_combinations = pd.merge(cids, items, how='outer', on=['constant'])\r\n iv_df_ci = pd.merge(iv_df_ci, item_cid_combinations, how='outer', on=['customer_id', 'item_id'])\r\n iv_df = pd.merge(iv_df_ci, iv_df_c, how='left',on=['customer_id'])\r\n iv_df['categorical_sequence_items'].fillna('' ,inplace=True)\r\n iv_df.fillna(999 ,inplace=True)\r\n \r\n print(iv_df.head())\r\n return iv_df\r\n\r\n\r\nuniv_1 = generate_universe(raw, RunType='Model')\r\n\r\nraw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\nraw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\nraw=raw[raw['rank']>1]\r\nraw.drop(\"rank\", axis=1, inplace=True)\r\nuniv_2= generate_universe(raw, RunType='Model')\r\n\r\nraw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\nraw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\nraw=raw[raw['rank']>1]\r\nraw.drop(\"rank\", axis=1, inplace=True)\r\nuniv_3 = generate_universe(raw, RunType='Model')\r\n\r\nraw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\nraw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\nraw=raw[raw['rank']>1]\r\nraw.drop(\"rank\", axis=1, inplace=True)\r\nuniv_4 = generate_universe(raw, RunType='Model')\r\n\r\nraw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\nraw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\nraw=raw[raw['rank']>1]\r\nraw.drop(\"rank\", axis=1, inplace=True)\r\nuniv_5 = generate_universe(raw, RunType='Model')\r\n\r\nraw.sort_values(['InvoiceDate'],ascending=False,inplace=True)\r\nraw['rank']=raw.groupby(['customer_id'])['InvoiceDate'].rank(ascending=0,method='dense')\r\nraw=raw[raw['rank']>1]\r\nraw.drop(\"rank\", axis=1, inplace=True)\r\nuniv_6 = generate_universe(raw, RunType='Model')\r\n\r\ncols_to_keep = univ_1.columns.intersection(univ_2.columns).intersection(univ_3.columns).intersection(univ_4.columns)#.intersection(univ_5.columns).intersection(univ_6.columns)\r\nuniv_1 = univ_1[cols_to_keep]\r\nuniv_2 = univ_2[cols_to_keep]\r\nuniv_3 = univ_3[cols_to_keep]\r\nuniv_4 = univ_4[cols_to_keep]\r\nuniv_5 = univ_5[cols_to_keep]\r\nuniv_6 = univ_6[cols_to_keep]\r\n\r\ndata = pd.concat([univ_1, univ_2, univ_3, univ_4, univ_5, univ_6], axis=0)\r\n#data = data[data.columns[data.isnull().mean() < 0.95]]\r\ndata.fillna(999 ,inplace=True)\r\ndel univ_1, univ_2, univ_3, univ_4, univ_5, univ_6\r\ngc.collect()\r\n\r\nvectorizer = TfidfVectorizer()\r\ntf_idf = vectorizer.fit_transform(data.tfidf_item.values)\r\ntf_idf_df = pd.DataFrame(tf_idf.toarray(), columns=vectorizer.get_feature_names())\r\ntf_idf_df = tf_idf_df.add_prefix('tfidf_')\r\ndata.drop('tfidf_item', axis=1, inplace=True)\r\n\r\ndata.reset_index(drop=True, inplace=True)\r\ntf_idf_df.reset_index(drop=True, inplace=True)\r\ndata = pd.concat([data, tf_idf_df], axis=1)\r\n\r\n# Don't use this line for deep learning\r\n#items = pd.get_dummies(data['item_id'], prefix='Item_')\r\n#data = pd.concat([data, items], axis=1)\r\n\r\ndata.to_csv(\"DL_MDL_UNIV.csv\", header=True, index=False)\r\n\r\n# Scoring Univ\r\nraw = pd.read_csv('train.csv')\r\nsub = pd.read_csv('submission.csv')\r\nraw['InvoiceDate']=pd.to_datetime(raw['InvoiceDate'])\r\nraw.dropna(inplace=True)\r\nraw.head()\r\n\r\ntest = generate_universe(raw, RunType='Score')\r\n\r\ntf_idf = vectorizer.transform(test.tfidf_item.values)\r\ntf_idf_df = pd.DataFrame(tf_idf.toarray(), columns=vectorizer.get_feature_names())\r\ntf_idf_df = tf_idf_df.add_prefix('tfidf_')\r\ntest.drop('tfidf_item', axis=1, inplace=True)\r\n\r\ntest.reset_index(drop=True, inplace=True)\r\ntf_idf_df.reset_index(drop=True, inplace=True)\r\ntest = pd.concat([test, tf_idf_df], axis=1)\r\n\r\n# Don't use this line for deep learning\r\n#items = pd.get_dummies(test['item_id'], prefix='Item_')\r\n#test = pd.concat([test, items], axis=1)\r\n\r\ncols_to_keep = [col for col in data.columns if 'Target' not in col]\r\ntest = test[cols_to_keep]\r\ntest = test[test['customer_id'].isin(sub.customer_id)]\r\ntest.fillna(999, inplace=True)\r\ntest.to_csv(\"DL_MDL_UNIV_SCORE.csv\", header=True, index=False)\r\n\r\n\r\na = raw.groupby(['customer_id', 'item_id'],as_index=False)['transaction_id'].count().rename(columns={'transaction_id': 'count'}).sort_values(['count'], ascending=False)\r\na['rank'] = a.groupby(['customer_id'], as_index=False)['count'].rank(ascending=0,method='dense')\r\na = a[a['rank']==1].drop(['rank'],axis=1)\r\na.drop_duplicates(subset=['customer_id'], keep='first', inplace=True)\r\n\r\n\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom scipy.sparse import vstack, csr_matrix, save_npz, load_npz\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_selection import SelectFromModel\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\n\r\ndef warn(*args, **kwargs):\r\n pass\r\nimport warnings\r\nwarnings.warn = warn\r\n\r\n\r\nif \"customer_id\" in data.columns:\r\n data.drop(\"customer_id\", axis=1, inplace=True)\r\nif \"item_id\" in data.columns:\r\n data_item_id = pd.DataFrame(data['item_id'])\r\n test_item_id = pd.DataFrame(test['item_id'])\r\n data.drop(\"item_id\", axis=1, inplace=True)\r\n \r\ndependent_variables = [col for col in data.columns if 'Target' in col]\r\n \r\nparameters = {\r\n'solver': ['lbfgs'],\r\n'C': [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.05, 0.01],\r\n'l1_ratio': [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\r\n}\r\n\r\nparameters = {\r\n'solver': ['lbfgs'],\r\n'C': [0.01],\r\n'l1_ratio': [0.001]\r\n}\r\n\r\ntest_preds = pd.DataFrame(test[['customer_id', 'item_id']])\r\nfor n, col in enumerate(dependent_variables):\r\n print(n, col)\r\n \r\n# # Downsampling \r\n# count_class_0, count_class_1 = data[col].value_counts()\r\n# df_class_0 = data[data[col] == 0]\r\n# df_class_1 = data[data[col] == 1] \r\n# df_class_0_under = df_class_0.sample(count_class_1)\r\n# df_under = pd.concat([df_class_0_under, df_class_1], axis=0) \r\n# print('Random under-sampling:')\r\n# print(df_under[col].value_counts())\r\n \r\n X_train = data[[col for col in data.columns if col not in dependent_variables]].values\r\n \r\n X_test = test[[col for col in test.columns if 'customer_id' not in col and 'item_id' not in col]].values\r\n print(X_train.shape)\r\n \r\n sc = StandardScaler()\r\n X_train = sc.fit_transform(X_train)\r\n X_test = sc.transform(X_test)\r\n \r\n y_train = data[[col]].values\r\n \r\n X_train_matrix = csr_matrix(X_train, dtype='float32')\r\n X_test_matrix = csr_matrix(X_test, dtype='float32')\r\n \r\n selector = SelectFromModel(estimator=LogisticRegression()).fit(X_train_matrix, y_train)\r\n X_train_matrix = selector.transform(X_train_matrix)\r\n X_test_matrix = selector.transform(X_test_matrix)\r\n \r\n LR = LogisticRegression()\r\n clf = GridSearchCV(LR, parameters)\r\n clf.fit(X_train_matrix, y_train)\r\n best_parameters = clf.best_params_\r\n best_score = clf.best_score_\r\n\r\n print (\"Best %s:\" % (best_score))\r\n print (\"Best parameters set:\", best_parameters)\r\n \r\n model_lr = LogisticRegression(**best_parameters)\r\n model_lr.fit(X_train_matrix, y_train) \r\n roc=roc_auc_score(y_train, model_lr.predict_proba(X_train_matrix)[:,1])\r\n print('Train AUC: ', roc)\r\n \r\n test_preds[col] = model_lr.predict_proba(X_test_matrix)[:,1]\r\n\r\n","sub_path":"ALL_PROJECTS/2nd Place Recommendation Hackathon/Deep_Learning_Universe_Augmentation.py","file_name":"Deep_Learning_Universe_Augmentation.py","file_ext":"py","file_size_in_byte":18752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"265970633","text":"from __future__ import absolute_import, print_function\n\n__author__ = 'michaelcaraccio'\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import Stream\nimport tweepy\n\n# Comsumer and access token/key\nfrom authentification import authentification\n\nclass StdOutListener(StreamListener):\n \"\"\" A listener handles tweets are the received from the stream.\n This is a basic listener that just prints received tweets to stdout.\n \"\"\"\n def on_status(self, status):\n print(status)\n return True\n\n def on_error(self, status):\n print(status)\n\nif __name__ == '__main__':\n\n # Get access and key from another class\n auth = authentification()\n\n consumer_key=auth.getconsumer_key()\n consumer_secret=auth.getconsumer_secret()\n\n access_token=auth.getaccess_token()\n access_token_secret=auth.getaccess_token_secret()\n\n # Authentification\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.secure = True\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n\n l = StdOutListener()\n\n stream = Stream(auth, l)\n stream.filter(track=['michael'])","sub_path":"streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348356221","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# The MIT License (MIT)\n\n# Copyright (c) 2017 Juan Cabral\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# =============================================================================\n# FUTURE\n# =============================================================================\n\nfrom __future__ import unicode_literals\n\n\n# =============================================================================\n# DOC\n# =============================================================================\n\n__doc__ = \"\"\"\"\"\"\n\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport math\n\nimport numpy as np\n\nfrom .core import Extractor\n\n\n# =============================================================================\n# EXTRACTOR CLASS\n# =============================================================================\n\nclass FluxPercentileRatioMid20(Extractor):\n\n data = ['magnitude']\n features = [\"FluxPercentileRatioMid20\"]\n\n def fit(self, magnitude):\n sorted_data = np.sort(magnitude)\n lc_length = len(sorted_data)\n\n F_60_index = int(math.ceil(0.60 * lc_length))\n F_40_index = int(math.ceil(0.40 * lc_length))\n F_5_index = int(math.ceil(0.05 * lc_length))\n F_95_index = int(math.ceil(0.95 * lc_length))\n\n F_40_60 = sorted_data[F_60_index] - sorted_data[F_40_index]\n F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]\n F_mid20 = F_40_60 / F_5_95\n\n return {\"FluxPercentileRatioMid20\": F_mid20}\n\n\nclass FluxPercentileRatioMid35(Extractor):\n\n data = ['magnitude']\n features = [\"FluxPercentileRatioMid35\"]\n\n def fit(self, magnitude):\n sorted_data = np.sort(magnitude)\n lc_length = len(sorted_data)\n\n F_325_index = int(math.ceil(0.325 * lc_length))\n F_675_index = int(math.ceil(0.675 * lc_length))\n F_5_index = int(math.ceil(0.05 * lc_length))\n F_95_index = int(math.ceil(0.95 * lc_length))\n\n F_325_675 = sorted_data[F_675_index] - sorted_data[F_325_index]\n F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]\n F_mid35 = F_325_675 / F_5_95\n\n return {\"FluxPercentileRatioMid35\": F_mid35}\n\n\nclass FluxPercentileRatioMid50(Extractor):\n\n data = ['magnitude']\n features = [\"FluxPercentileRatioMid50\"]\n\n def fit(self, magnitude):\n sorted_data = np.sort(magnitude)\n lc_length = len(sorted_data)\n\n F_25_index = int(math.ceil(0.25 * lc_length))\n F_75_index = int(math.ceil(0.75 * lc_length))\n F_5_index = int(math.ceil(0.05 * lc_length))\n F_95_index = int(math.ceil(0.95 * lc_length))\n\n F_25_75 = sorted_data[F_75_index] - sorted_data[F_25_index]\n F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]\n F_mid50 = F_25_75 / F_5_95\n\n return {\"FluxPercentileRatioMid50\": F_mid50}\n\n\nclass FluxPercentileRatioMid65(Extractor):\n\n data = ['magnitude']\n features = [\"FluxPercentileRatioMid65\"]\n\n def fit(self, magnitude):\n sorted_data = np.sort(magnitude)\n lc_length = len(sorted_data)\n\n F_175_index = int(math.ceil(0.175 * lc_length))\n F_825_index = int(math.ceil(0.825 * lc_length))\n F_5_index = int(math.ceil(0.05 * lc_length))\n F_95_index = int(math.ceil(0.95 * lc_length))\n\n F_175_825 = sorted_data[F_825_index] - sorted_data[F_175_index]\n F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]\n F_mid65 = F_175_825 / F_5_95\n\n return {\"FluxPercentileRatioMid65\": F_mid65}\n\n\nclass FluxPercentileRatioMid80(Extractor):\n\n data = ['magnitude']\n features = [\"FluxPercentileRatioMid80\"]\n\n def fit(self, magnitude):\n sorted_data = np.sort(magnitude)\n lc_length = len(sorted_data)\n\n F_10_index = int(math.ceil(0.10 * lc_length))\n F_90_index = int(math.ceil(0.90 * lc_length))\n F_5_index = int(math.ceil(0.05 * lc_length))\n F_95_index = int(math.ceil(0.95 * lc_length))\n\n F_10_90 = sorted_data[F_90_index] - sorted_data[F_10_index]\n F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]\n F_mid80 = F_10_90 / F_5_95\n\n return {\"FluxPercentileRatioMid80\": F_mid80}\n","sub_path":"feets/extractors/ext_flux_percentile_ratio.py","file_name":"ext_flux_percentile_ratio.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"307510333","text":"import random\nimport os\nimport glob\n\n\nclass GreetServer(object):\n def __init__(self):\n pass\n\n def get_greet(self, name='NoName'):\n lucky_number = random.randint(1, 100000)\n return \"Hello {}, this is your lucky number {}\".format(name, lucky_number)\n\n def create(self, filename, text):\n if filename is None or filename == \"\":\n return \"Filename is not defined\"\n try:\n with open(filename, 'x') as file:\n file.write(text)\n return \"{} created\".format(filename)\n except Exception as e:\n return e\n\n def read(self, filename):\n if filename is None or filename == \"\":\n return \"Filename is not defined\"\n try:\n with open(filename, 'r') as file:\n return file.read()\n except Exception as e:\n return e\n\n def update(self, filename, text):\n if filename is None or filename == \"\":\n return \"Filename is not defined\"\n try:\n with open(filename, 'x') as file:\n file.write(text)\n return \"{} updated\".format(filename)\n except Exception as e:\n return e\n\n def delete(self, filename):\n if filename is None or filename == \"\":\n return \"Filename is not defined\"\n try:\n os.remove(filename)\n return \"{} deleted\".format(filename)\n except Exception as e:\n return e\n\n def list(self, directory):\n try:\n files = [f for f in glob.glob(directory + \"**/*\", recursive=True)]\n list_files = \"\\nFile Lists:\"\n for file in files:\n list_files += \"\\n\"+file\n\n return list_files\n except Exception as e:\n return e\n\n\nif __name__ == '__main__':\n k = GreetServer()\n print(k.get_greet('royyana'))\n","sub_path":"c0/greet.py","file_name":"greet.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"305382448","text":"def min_sum(list):\r\n num1=list[0]\r\n num2=list[1]\r\n sum=list[0]+list[1]\r\n for i in range(len(list)):\r\n for j in range(len(list)):\r\n if i==j:\r\n continue\r\n else:\r\n compare=list[i]+list[j]\r\n if sum>compare:\r\n sum=compare\r\n num1=list[i]\r\n num2=list[j]\r\n return num1,num2\r\n \r\narray=input(\"Enter the Numbers: \")\r\n\r\nlist_num=list(map(int,array.split()))\r\n\r\nprint(\"\\nList is: \",list_num)\r\n\r\nprint(\"\\nNumbers are: \",list(min_sum(list_num)))\r\n\r\n","sub_path":"day3/Akshay_Day3.py","file_name":"Akshay_Day3.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"182527087","text":"import pretty_midi\n\n# Creating chord arrays\nD_min7 = ['D5', 'F5', 'A5', 'C5']\nG_dom7 = ['G5', 'B5', 'D5', 'F6']\nC_maj7 = ['C5', 'E5', 'G5', 'B5']\n\n# Create a PrettyMIDI object\npiano_c_chord = pretty_midi.PrettyMIDI()\n\n# Create an Instrument instance for a piano instrument\npiano_program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano')\npiano = pretty_midi.Instrument(program=piano_program)\n\n# Iterate over note names, which will be converted to note number later\nfor note_name in D_min7:\n # Convert given note name to it's MIDI number\n note_number = pretty_midi.note_name_to_number(note_name)\n # Create a Note instance\n note = pretty_midi.Note(\n velocity=100, pitch=note_number, start=0, end=2)\n # Add it to our piano instrument instance\n piano.notes.append(note)\n\nfor note_name in (G_dom7):\n note_number = pretty_midi.note_name_to_number(note_name)\n note = pretty_midi.Note(\n velocity=100, pitch=note_number, start=2, end=4)\n piano.notes.append(note)\n\nfor note_name in (C_maj7):\n note_number = pretty_midi.note_name_to_number(note_name)\n note = pretty_midi.Note(\n velocity=100, pitch=note_number, start=4, end=8)\n piano.notes.append(note)\n\n# Add the piano instrument to the PrettyMIDI object\npiano_c_chord.instruments.append(piano)\n# Write the MIDI data to a file\npiano_c_chord.write('piano-ii-V-I-arpeggio.mid')","sub_path":"make_midi.py","file_name":"make_midi.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"251005389","text":"import bluetooth\nfrom tkinter import *\nfrom flexduino_coordinates import *\n\n# Search for bluetooth device\nprint(\"Searching for devices...\\n\")\n\n# Create an array with discovered devices\nnearby_devices = bluetooth.discover_devices()\nif not len(nearby_devices):\n print(\"No Devices Found\")\n\nprint(\"Select your device by entering its coresponding number...\")\nfor i in range(len(nearby_devices)):\n print(i, \": \" , bluetooth.lookup_name(nearby_devices[i]))\n\n# Select the appropiate bluetooth module\nselection = input(\"Enter Device ID: \")\nprint(\"You have selected\", bluetooth.lookup_name(nearby_devices[int(selection)]))\nbd_addr, port = nearby_devices[int(selection)], 1\n\n# Bluetooth send data\nsock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\nsock.connect((bd_addr, port))\nwhile True:\n pos = queryMousePosition()\n data = processCoords(pos[0], pos[1])\n print('Sending data', data, '...')\n sock.send(data)\n time.sleep(1.5)\n","sub_path":"emoji/data/ProjectFlexduino_NIrJMGHKlA/files/flexduino_servo_control_1480981271977413.py","file_name":"flexduino_servo_control_1480981271977413.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"646467729","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"sport-scraper\",\n version=\"0.0.1\",\n author=\"Cristobal Mitchell\",\n author_email=\"cristobalmitchell@gmail.com\",\n description=\"Simple web scraper package for gathering sports data from ESPN.com\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/cristobalmitchell/sports-data-web-scraper\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"129305920","text":"import h5py\nimport numpy as np\nimport os\nimport warnings\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\n\nwarnings.filterwarnings('ignore')\n\n\n# parametri\nnum_trees = 100 # numero degli alberi da creare nel Random Forest Classifier\ntest_size = 0.10 # parametro da utilizzare nella suddivisione dei dati\nseed = 9 # parametro per i random_state dei classificatori\ntrain_path = \"Dataset/PetImages/Train\"\nh5_data = 'Output/data.h5'\nh5_labels = 'Output/labels.h5'\nscoring = \"accuracy\"\n\n# prendi i training labels\ntrain_labels = os.listdir(train_path)\n\n# ordina i training labels\ntrain_labels.sort()\n\n# creazione di tutti i modelli per il machine learning\nmodels = []\nmodels.append(('LR', LogisticRegression(random_state=seed)))\nmodels.append(('LDA', LinearDiscriminantAnalysis()))\nmodels.append(('KNN', KNeighborsClassifier()))\nmodels.append(('CART', DecisionTreeClassifier(random_state=seed)))\nmodels.append(('RF', RandomForestClassifier(n_estimators=num_trees, random_state=seed)))\nmodels.append(('NB', GaussianNB()))\nmodels.append(('SVM', SVC(random_state=seed)))\n\n# variabili per mantendere i risultati e i nomi\nresults = []\nnames = []\n\n# importa il feature vector e i labels\nh5f_data = h5py.File(h5_data, 'r')\nh5f_label = h5py.File(h5_labels, 'r')\n\nglobal_features_string = h5f_data['dataset_1']\nglobal_labels_string = h5f_label['dataset_1']\n\n# conversione del file .h5 in un array numpy\nglobal_features = np.array(global_features_string)\nglobal_labels = np.array(global_labels_string)\n\nh5f_data.close()\nh5f_label.close()\n\n\n# divide i dati per il training e per il testing\n(trainDataGlobal, testDataGlobal, trainLabelsGlobal, testLabelsGlobal) = train_test_split(np.array(global_features),\n np.array(global_labels),\n test_size=test_size,\n random_state=seed)\n\nprint(\"\\n[STATUS] Inizio della validazione...\\n\")\n\n# 10-fold cross validation\nfor name, model in models:\n kfold = KFold(n_splits=10, random_state=seed)\n cv_results = cross_val_score(model, trainDataGlobal, trainLabelsGlobal, cv=kfold, scoring=scoring)\n results.append(cv_results)\n names.append(name)\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n print(msg)\n\nprint(\"\\n[STATUS] Stampa dei risultati conclusa!\")\n","sub_path":"main_models.py","file_name":"main_models.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"302062989","text":"import sys\nimport math\nimport re\nimport numpy as np\n\n\n\nlon = float(re.sub(\",\",\".\",input()))\nlat = float(re.sub(\",\",\".\",input()))\nn = int(input())\nDISTANCE=[[\"\",\"\",\"\"]for x in range(n)]\nfor i in range(n):\n defib = input()\n NOM=defib.split(\";\")[1]\n LON=defib.split(\";\")[4]\n LON=float(re.sub(\",\",\".\",LON))\n LAT=defib.split(\";\")[5]\n LAT=float(re.sub(\",\",\".\",LAT))\n DISTANCE[i]=[NOM,LON,LAT]\n\nprint(lon,file=sys.stderr)\n\nD=[]\nfor i in DISTANCE:\n D_x=(i[1]-lon)*math.cos((i[2]+lat)/2)\n D_y=i[2]-lat\n D.append(math.sqrt(D_x*D_x+D_y*D_y)*6371)\n\nidx = np.argmin(D)\n\nprint(DISTANCE[idx][0])\n","sub_path":"CLASSIC PUZZLE - EASY/defibrillators.py","file_name":"defibrillators.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"588568514","text":"import scrapy\nimport time\n\n\n# 自定义爬虫news的下载中间件\nclass NewsDownloaderMiddleware:\n\n def process_response(self, request, response, spider):\n # 判断哪些响应对象是属于5大分类的请求返回的,如果是就对响应对象进行处理\n if response.url in spider.urls:\n brower = spider.brower\n brower.get(response.url)\n\n # 鼠标滚动到底部\n brower.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n time.sleep(2)\n brower.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n time.sleep(2)\n brower.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n time.sleep(2)\n\n # 获取携带了新闻数据的页面的html源码\n page_html = brower.page_source\n\n # 实例化一个新的响应对象\n new_response = scrapy.http.HtmlResponse(url=response.url, body=page_html, encoding='utf-8', request=request)\n return new_response\n else:\n return response","sub_path":"Day38code/wangyi/wangyi/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"635678288","text":"# Find the difference between the sum of the squares of\n# the first one hundred natural nums and the square of the sum.\n\n\nsum_squares = sum(x ** 2 for x in range(1, 101))\nsquare_sum = sum(x for x in range(1, 101)) ** 2\n\ndifference = square_sum - sum_squares\n\nprint(difference)\n","sub_path":"python/euler_6.py","file_name":"euler_6.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"230210337","text":"#!/bin/python\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the solve function below.\ndef solve(a, b):\n Alice=0\n Bob=0\n for i in range(0,len(a)):\n if a[i]>b[i]:\n Alice +=1\n elif a[i]='A' and lst[i][j]<='Z') or (lst[i][j]>='a' and lst[i][j]<='z'):\n judgeWord = True\n str += lst[i][j]\n theLine.append(str)\n j += 1\n input.append(theLine)\n\nstr1 = input[0].copy()\nstr2 = input[1].copy()\n\ni = 0\nj = 0\nwhile i < len(str2) and j < len(str1):\n if str2[i:].count(str1[j]) > 0:\n before = str2[0:i]\n behind = str2[i:]\n behind.remove(str1[j])\n before.insert(i,str1[j])\n str2 = before + behind\n i += 1\n else:\n j += 1\n\nprint(\"\".join(str2))\n","sub_path":"Code/CodeRecords/2530/60581/250327.py","file_name":"250327.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"166213330","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport math\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport paddle\nimport six\nfrom PIL import Image, ImageEnhance\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nfrom io import BytesIO\n\nrandom.seed(0)\n\nDATA_DIM = 112\nTHREAD = 8\nBUF_SIZE = 10240\n\n#TEST_LIST = 'lfw,cfp_fp,agedb_30,cfp_ff'\nTEST_LIST = 'lfw'\n\n\ndef get_train_image_list(data_dir):\n train_list_file = os.path.join(data_dir, 'label.txt')\n train_list = open(train_list_file, \"r\").readlines()\n random.shuffle(train_list)\n train_image_list = []\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\", \"0\"))\n trainer_count = int(os.getenv(\"PADDLE_TRAINERS_NUM\", \"1\"))\n per_node_lines = (len(train_list) + trainer_count - 1) // trainer_count\n train_list += train_list[0:per_node_lines * trainer_count - len(\n train_list)]\n lines = train_list[trainer_id * per_node_lines:(trainer_id + 1) *\n per_node_lines]\n print(\"read images from %d, length: %d, total: %d\" %\n (trainer_id * per_node_lines, per_node_lines, len(train_list)))\n\n for i, item in enumerate(lines):\n path, label = item.strip().split()\n label = int(label)\n train_image_list.append((path, label))\n print(\"train_data size:\", len(train_image_list))\n return train_image_list\n\n\nimg_mean = np.array([127.5, 127.5, 127.5]).reshape((3, 1, 1))\nimg_std = np.array([128.0, 128.0, 128.0]).reshape((3, 1, 1))\n\n\ndef resize_short(img, target_size):\n percent = float(target_size) / min(img.size[0], img.size[1])\n resized_width = int(round(img.size[0] * percent))\n resized_height = int(round(img.size[1] * percent))\n img = img.resize((resized_width, resized_height), Image.BILINEAR)\n return img\n\n\ndef scale(img, size):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), Image.BILINEAR)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), Image.BILINEAR)\n\n\ndef center_crop(img, size):\n w, h = img.size\n th, tw = int(size), int(size)\n x1 = int(round((w - tw) / 2.))\n y1 = int(round((h - th) / 2.))\n return img.crop((x1, y1, x1 + tw, y1 + th))\n\n\ndef crop_image(img, target_size, center):\n width, height = img.size\n size = target_size\n if center == True:\n w_start = (width - size) / 2\n h_start = (height - size) / 2\n else:\n w_start = random.randint(0, width - size)\n h_start = random.randint(0, height - size)\n w_end = w_start + size\n h_end = h_start + size\n img = img.crop((w_start, h_start, w_end, h_end))\n return img\n\n\ndef random_resized_crop(img, size):\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(0.08, 1.0) * area\n aspect_ratio = random.uniform(3. / 4, 4. / 3)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n x1 = random.randint(0, img.size[0] - w)\n y1 = random.randint(0, img.size[1] - h)\n\n img = img.crop((x1, y1, x1 + w, y1 + h))\n assert (img.size == (w, h))\n\n return img.resize((size, size), Image.BILINEAR)\n\n w = min(img.size[0], img.size[1])\n i = (img.size[1] - w) // 2\n j = (img.size[0] - w) // 2\n img = img.crop((i, j, i + w, j + w))\n img = img.resize((size, size), Image.BILINEAR)\n return img\n\n\ndef random_crop(img, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):\n aspect_ratio = math.sqrt(random.uniform(*ratio))\n w = 1. * aspect_ratio\n h = 1. / aspect_ratio\n\n bound = min((float(img.size[0]) / img.size[1]) / (w**2),\n (float(img.size[1]) / img.size[0]) / (h**2))\n scale_max = min(scale[1], bound)\n scale_min = min(scale[0], bound)\n\n target_area = img.size[0] * img.size[1] * random.uniform(scale_min,\n scale_max)\n target_size = math.sqrt(target_area)\n w = int(target_size * w)\n h = int(target_size * h)\n\n i = random.randint(0, img.size[0] - w)\n j = random.randint(0, img.size[1] - h)\n\n img = img.crop((i, j, i + w, j + h))\n img = img.resize((size, size), Image.BILINEAR)\n return img\n\n\ndef rotate_image(img):\n angle = random.randint(-10, 10)\n img = img.rotate(angle)\n return img\n\n\ndef distort_color(img):\n def random_brightness(img, lower=0.8, upper=1.2):\n e = random.uniform(lower, upper)\n return ImageEnhance.Brightness(img).enhance(e)\n\n def random_contrast(img, lower=0.8, upper=1.2):\n e = random.uniform(lower, upper)\n return ImageEnhance.Contrast(img).enhance(e)\n\n def random_color(img, lower=0.8, upper=1.2):\n e = random.uniform(lower, upper)\n return ImageEnhance.Color(img).enhance(e)\n\n ops = [random_brightness, random_contrast, random_color]\n random.shuffle(ops)\n\n img = ops[0](img)\n img = ops[1](img)\n img = ops[2](img)\n\n return img\n\n\ndef process_image_imagepath(sample, class_dim, color_jitter, rotate,\n rand_mirror, normalize):\n imgpath = sample[0]\n img = Image.open(imgpath)\n\n if rotate:\n img = rotate_image(img)\n img = random_resized_crop(img, DATA_DIM)\n\n if color_jitter:\n img = distort_color(img)\n\n if rand_mirror:\n if random.randint(0, 1) == 1:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n\n if img.mode != 'RGB':\n img = img.convert('RGB')\n\n img = np.array(img).astype('float32').transpose((2, 0, 1))\n\n if normalize:\n img -= img_mean\n img /= img_std\n\n assert sample[1] < class_dim, \\\n \"label of train dataset should be less than the class_dim.\"\n\n return img, sample[1]\n\n\ndef arc_iterator(data,\n class_dim,\n data_dir,\n shuffle=False,\n color_jitter=False,\n rotate=False,\n rand_mirror=False,\n normalize=False):\n def reader():\n if shuffle:\n random.shuffle(data)\n for j in range(len(data)):\n path, label = data[j]\n path = os.path.join(data_dir, path)\n yield path, label\n\n mapper = functools.partial(\n process_image_imagepath,\n class_dim=class_dim,\n color_jitter=color_jitter,\n rotate=rotate,\n rand_mirror=rand_mirror,\n normalize=normalize)\n return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)\n\n\ndef load_bin(path, image_size):\n if six.PY2:\n bins, issame_list = pickle.load(open(path, 'rb'))\n else:\n bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')\n data_list = []\n for flip in [0, 1]:\n data = np.empty(\n (len(issame_list) * 2, 3, image_size[0], image_size[1]))\n data_list.append(data)\n for i in range(len(issame_list) * 2):\n _bin = bins[i]\n if six.PY2:\n if not isinstance(_bin, six.string_types):\n _bin = _bin.tostring()\n img_ori = Image.open(StringIO(_bin))\n else:\n img_ori = Image.open(BytesIO(_bin))\n for flip in [0, 1]:\n img = img_ori.copy()\n if flip == 1:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n img = np.array(img).astype('float32').transpose((2, 0, 1))\n img -= img_mean\n img /= img_std\n data_list[flip][i][:] = img\n if i % 1000 == 0:\n print('loading bin', i)\n print(data_list[0].shape)\n return data_list, issame_list\n\n\ndef arc_train(data_dir, class_dim):\n train_image_list = get_train_image_list(data_dir)\n return arc_iterator(\n train_image_list,\n class_dim=class_dim,\n data_dir=data_dir,\n shuffle=True,\n color_jitter=False,\n rotate=False,\n rand_mirror=True,\n normalize=True)\n\n\ndef test(data_dir, datasets):\n test_list = []\n test_name_list = []\n for name in datasets.split(','):\n path = os.path.join(data_dir, name + \".bin\")\n if os.path.exists(path):\n data_set = load_bin(path, (DATA_DIM, DATA_DIM))\n test_list.append(data_set)\n test_name_list.append(name)\n print('test', name)\n return test_list, test_name_list\n\n\nclass TrainDataset(paddle.io.Dataset):\n def __init__(self,\n data_dir,\n class_dim,\n color_jitter=False,\n rotate=False,\n rand_mirror=False,\n normalize=False):\n self.data_dir = data_dir\n self.class_dim = class_dim\n self.color_jitter = color_jitter\n self.rotate = rotate\n self.rand_mirror = rand_mirror\n self.normalize = normalize\n self.sample_list = get_train_image_list(data_dir)\n\n def __getitem__(self, idx):\n img_path, label = self.sample_list[idx]\n img_path = os.path.join(self.data_dir, img_path)\n img = Image.open(img_path)\n if self.rotate:\n img = rotate_image(img)\n img = random_resized_crop(img, DATA_DIM)\n\n if self.color_jitter:\n img = distort_color(img)\n\n if self.rand_mirror:\n if random.randint(0, 1) == 1:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n\n if img.mode != 'RGB':\n img = img.convert('RGB')\n\n img = np.array(img).astype('float32').transpose((2, 0, 1))\n\n if self.normalize:\n img -= img_mean\n img /= img_std\n\n assert label < self.class_dim, \\\n \"label of train dataset should be less than the class_dim.\"\n\n return img, label\n\n def __len__(self):\n return len(self.sample_list)\n\n\nclass TestDataset(paddle.io.Dataset):\n def __init__(self, data_dir, datasets):\n self.data_dir = data_dir\n self.datasets = datasets.split(',')\n self.sample_list = []\n\n def __getitem__(self, idx):\n img_path, label = self.sample_list[idx]\n return img, label\n\n def __len__(self):\n return len(self.sample_list)\n","sub_path":"plsc/utils/jpeg_reader.py","file_name":"jpeg_reader.py","file_ext":"py","file_size_in_byte":11043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"124927009","text":"\ndef find_path(grid):\n def dfs(row, col):\n print(row, col)\n if row == len(grid) - 1 and col == len(grid[0]) - 1:\n return True\n \n if row == len(grid):\n return False\n \n if col == len(grid[0]):\n return False\n \n if not grid[row][col]:\n return False\n \n grid[row][col] = False\n for next_row, next_col in [[row + 1, col], [row, col + 1]]:\n if dfs(next_row, next_col):\n return True\n \n return False\n\n return dfs(0, 0)\n\ntest = [\n [True, True, False],\n [False, True, False],\n [False, True, True]\n]\n\nprint(find_path(test))","sub_path":"ctci-python/ch_08/robot_grid.py","file_name":"robot_grid.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"197542204","text":"# run with python3\nimport os\nfrom timeit import default_timer as timer\nimport swiftclient\nimport pickle\n\n_authurl = 'https://chi.tacc.chameleoncloud.org:5000'\n_auth_version = '3'\n_user = 'pcpeng'\n_key = 'xxxxx'\n_os_options = {\n 'user_domain_name': 'Default',\n 'project_domain_name': 'Default',\n 'project_name': 'CH-818141'\n}\n\nconn = swiftclient.Connection(\n authurl=_authurl,\n user=_user,\n key=_key,\n os_options=_os_options,\n auth_version=_auth_version\n)\n\n\ncontainer_name = \"cifar10\"\n\nstart = timer()\ndata = {}\n\ni = 0\nwhile i < 50000: \n filename = \"img\" + str(i)\n obj_tuple = conn.get_object(container_name, filename)\n data[filename] = obj_tuple[1]\n i += 1\n \nend = timer()\nprint(\"download time: \", end - start) \n\ndict = {}\nstart2 = timer()\nfor key, value in data.items():\n dict[key] = pickle.loads(value) \nend2 = timer()\nprint(\"deser time: \", end2 - start2) \n\n","sub_path":"Cifar10/ceph_cham/down.py","file_name":"down.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"201216266","text":"import sys\nimport inspect\n\nfrom kanapy.api import APIClient\nimport logging\n\n\nLIST = type([])\nDICT = type({})\nPRIMITIVE = (int, str, bool, type(None), LIST, DICT)\n\n\ndef to_upper_camel(snake):\n lst = snake.split(\"_\")\n return \"\".join(map(lambda x: x[0].upper() + x[1:], lst))\n\n\ndef from_dict(variable, module_source=__name__):\n return dict([(k, deserialize(v, module_source=module_source)) for k, v in variable.items()])\n\n\ndef deserialize(variable, module_source=__name__):\n if type(variable) not in [LIST, DICT]:\n return variable\n\n if type(variable) == LIST:\n return [deserialize(v, module_source=module_source) for v in variable]\n\n className = to_upper_camel(variable['resource_type'])\n current_module = sys.modules[module_source]\n cls = None\n try:\n cls = getattr(current_module, className)\n except AttributeError:\n setattr(current_module, className, type(className, (Resource,), {}))\n cls = getattr(current_module, className)\n\n return cls(**from_dict(variable, module_source=module_source))\n\n\nclass Resource:\n resource_type = None\n\n def __init__(self, id=None, created_at=None, updated_at=None, resource_url=None, resource_type=None, **kwargs):\n self.id = id\n self.created_at = created_at\n self.updated_at = created_at\n self.resource_url = resource_url\n self.resource_type = resource_type\n\n @classmethod\n def create(cls, obj):\n raise NotImplementedError()\n\n @classmethod\n def delete(cls, id_):\n \"\"\"A HTTP DELETE has not been implemented\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def get(cls, id=None, params=None):\n c = APIClient()\n url = c.get_url(cls, id)\n response = c.http.get(url, params=params, verify=False)\n data = response.json()\n c.use_session(data.get('session_id'))\n\n return deserialize(data[\"data\"])\n\n @classmethod\n def update(cls, obj):\n \"\"\"A HTTP PUT has not been implemented\"\"\"\n raise NotImplementedError()\n\n def get_fields(self):\n d = vars(self)\n return { key: d[key] for key in d if not key.startswith('_')}\n\n def _serialize(self, val):\n if type(val) not in PRIMITIVE and issubclass(val.__class__, Resource):\n return val.serializeable()\n if type(val) is DICT:\n return {k: self._serialize(v) for k, v in val}\n if type(val) is LIST:\n return [self._serialize(v) for v in val]\n return val\n\n def serializable(self):\n return {k: self._serialize(v) for k, v in self.__dict__.items()}\n\n\nclass User(Resource):\n _resource_base_url = \"/users\"\n\n def __init__(self, **kwargs):\n self.__dict__ = kwargs\n\n\nclass UserMinimal(Resource):\n _resource_base_url = \"/users\"\n\n def __init__(self, full_name=None, last_active_at=None, last_seen_at=None, avatar=None, presence_channel=None, **kwargs):\n super().__init__(**kwargs)\n self.full_name = full_name\n self.last_active_at = last_active_at\n self.last_seen_at = last_seen_at\n self.avatar = avatar\n self.presence_channel = presence_channel\n\n @classmethod\n def from_user(cls, user):\n return UserMinimal(\n id=user.id,\n full_name=user.full_name,\n last_active_at=user.last_active_at,\n last_seen_at=user.last_seen_at,\n avatar=user.avatar,\n presence_channel=user.presence_channel,\n )\n\n @classmethod\n def get(cls, id=None, params=None):\n if not id:\n users = super().get(params=params)\n return [cls.from_user(usr) for usr in users]\n\n user = super().get(id, params=params)\n return cls.from_user(user)\n\n\nclass LocaleField(Resource):\n _resource_base_url = \"/locale/fields\"\n\n def __init__(self, locale=None, translation=None, parent_id=None, field=None, **kwargs):\n super().__init__(**kwargs)\n self.locale = locale\n self.translation = translation\n self.parent_id = parent_id\n self.field = field\n\n\nclass Brand(Resource):\n _resource_base_url = \"/brands\"\n\n\nclass Locale(Resource):\n _resource_base_url = \"/locales\"\n","sub_path":"kanapy/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"270715444","text":"# -*- coding: utf-8 -*-\r\nfrom django.utils.translation import ugettext_lazy as _\r\n\r\nfrom cms.plugin_pool import plugin_pool\r\nfrom cms.plugin_base import CMSPluginBase\r\n\r\nfrom .models import AnimateOnScroll_Element, AnimateOnScroll_Anchor\r\nfrom .forms import AnimateOnScroll_Element_Form\r\n\r\nclass AnimateOnScroll_Anchor_Plugin(CMSPluginBase):\r\n model = AnimateOnScroll_Anchor\r\n name = _('AnimateOnScroll - Anchor')\r\n module = _('Animate On Scroll')\r\n render_template = 'djangocms_animate_on_scroll/aos_anchor.html'\r\n\r\n fieldsets = (\r\n (None, {\r\n 'fields': (\r\n 'id_name',\r\n )\r\n }),\r\n )\r\n\r\n\r\nplugin_pool.register_plugin(AnimateOnScroll_Anchor_Plugin)\r\n\r\n\r\nclass AnimateOnScroll_Element_Plugin(CMSPluginBase):\r\n model = AnimateOnScroll_Element\r\n name = _('AnimateOnScroll - Element')\r\n module = _('Animate On Scroll')\r\n render_template = 'djangocms_animate_on_scroll/aos_element.html'\r\n allow_children = True\r\n form = AnimateOnScroll_Element_Form\r\n\r\n fieldsets = (\r\n (None, {\r\n 'fields': (\r\n 'aos_animation',\r\n 'aos_easing',\r\n )\r\n }),\r\n (_('AOS Anchor settings'), {\r\n 'fields': (\r\n 'aos_anchor_placement',\r\n 'aos_anchor',\r\n ),\r\n }),\r\n (_('AOS Advanced settings'), {\r\n 'classes': ('collapse',),\r\n 'fields': (\r\n ('aos_offset',\r\n 'aos_duration',\r\n 'aos_delay',\r\n 'aos_once'),\r\n ),\r\n }),\r\n (_('Advanced Element settings'), {\r\n 'classes': ('collapse',),\r\n 'fields': (\r\n 'id_name',\r\n 'additional_classes',\r\n 'attributes',\r\n ),\r\n }),\r\n )\r\n\r\n\r\nplugin_pool.register_plugin(AnimateOnScroll_Element_Plugin)\r\n","sub_path":"env/lib/python3.8/site-packages/djangocms_animate_on_scroll/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"479555411","text":"from recommendations.models import Recommendation, User\n\nclass CreateRecommendationForMe: \n\n @classmethod\n def execute(cls, payload):\n user = User.objects.get(phone=payload['phone'])\n recommendation = Recommendation(recommender=user, recommendee=user, name=payload['name'], accepted=True)\n recommendation.full_clean()\n recommendation.save()\n return {'message': f\"'{recommendation.name}' was added to your list.\"}","sub_path":"recommendations/actions/create_recommendation_for_me.py","file_name":"create_recommendation_for_me.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"653542680","text":"import requests\nfrom lxml import etree\nimport re\nimport time\n\ndef origin_txt(pathname):\n ''' 写入空值, 初始化content和log文件 '''\n with open(pathname, 'w', encoding='utf-8') as file:\n file.write('')\n\ndef get_source(url):\n ''' 通过网页链接获取网页源文件 '''\n html = requests.get(url)\n html.encoding = 'utf-8'\n return html.text\n\ndef page_url(origin_url):\n ''' blog目录共7页, 只有最后一位数字不一致.\n 通过正则表达式替换, 构造全部7个目录页的url '''\n page_list = []\n for i in range(1, 8):\n page_list.append(re.sub('\\d.html', '{}.html'.format(i), origin_url))\n return page_list\n\ndef url_in_page(page):\n '''通过每一页目录链接, 获取每页里单个文章的url '''\n html = get_source(page)\n selector = etree.HTML(html)\n try:\n content_field = selector.xpath('//div[@class=\"articleList\"]')[0]\n url_in_page_list = content_field.xpath('div/p/span[@class=\"atc_title\"]/a/@href')\n return url_in_page_list\n except:\n False\n\ndef get_article(url):\n ''' 通过每一篇文字的url, 获取标题, 时间和正文.\n 并将获取的的内容和相应的log写入到对应的txt中.'''\n html = get_source(url)\n selector = etree.HTML(html)\n dict = {}\n try:\n dict['title'] = selector.xpath('//div[@id=\"articlebody\"]/div/h2/text()')[-1]\n dict['time'] = selector.xpath('//div[@id=\"articlebody\"]/div/span/text()')[-1]\n try:\n graph_field = selector.xpath('//div[@id=\"articlebody\"]/div[3]/div')[0]\n except:\n graph_field = selector.xpath('//div[@id=\"articlebody\"]/div[3]')[0]\n dict['graph'] = graph_field.xpath('string(.)').replace('\\n\\n', '').replace('\\t', '')\n towrite(dict)\n log = time.ctime() + ' 成功抓取: ' + str(url)\n except:\n log = time.ctime() + ' 抓取失败: ' + str(url)\n writelog(log)\n\ndef towrite(dict):\n file = open(txtpath, 'a', encoding='utf-8')\n for key, value in dict.items():\n file.writelines(key + ': ' + str(value) + '\\n')\n file.close()\n\ndef writelog(log):\n file = open(logpath, 'a', encoding='utf-8')\n file.writelines(log + '\\n')\n print(log)\n\nif __name__ == '__main__':\n start = time.ctime()\n print('正在初始化...')\n logpath = r'hanBlog\\log.txt'\n txtpath = r'hanBlog\\hanBlog.txt'\n origin_url = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_1.html'\n origin_txt(logpath)\n origin_txt(txtpath)\n\n page_url = page_url(origin_url)\n urllist = []\n for page in page_url:\n pageurllist = url_in_page(page)\n if pageurllist:\n urllist.extend(pageurllist)\n log = 'page: ' + str(page) + '初始化成功'\n else:\n log = 'page: ' + str(page) + '初始化失败'\n writelog(log)\n\n print('正在抓取...')\n for url in urllist:\n get_article(url)\n\n print('运行结束')\n end = time.ctime()\n print(start)\n print(end)","sub_path":"hanhan_blog/blog_spider.py","file_name":"blog_spider.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"609315047","text":"# przykladowy_slownik = {\n# 'klucz': 1235,\n# 'klucz': 345678,\n# 4: 'inna wartosc',\n# 123: 123,\n# }\n# print(przykladowy_slownik)\n\n### DICT COMPREHENSION\n# slownik = {\n# x: y\n# for x, y in cokolwiek\n# }\n###\ntekst = \"alamakota\"\nwystapienia = {\n literka: tekst.count(literka)\n for literka in tekst\n}\nprint(wystapienia)\n","sub_path":"03_oop/1_dzien/zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"464888348","text":"import os\nimport gensim\nimport numpy as np\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.corpus import brown\nfrom keras.engine import Input\nfrom keras.preprocessing import sequence\nfrom keras.layers import Embedding, LSTM, Dense\nfrom keras.models import Model, Sequential, load_model\nfrom keras.utils import plot_model\n\n# config\nW2V_MODEL_SAVE_PATH = \"w2v_text.txt\"\nKERAS_MODEL_SAVE_PATH = \"model.h5\"\nMAX_INPUT_SEQUENCE_LENGTH = 5 # cut sentences longer than this, pad sentences shorter than this\n\n# load sentences list to generate embeddings\nsentences_list = brown.sents()\nprint(\"Loaded corpus: {0} sentences, starts from {1} ...\".format(len(sentences_list), sentences_list[0][:5]))\n\n# additional pre-processing for the dictionary (make all words lowercase)\n# TODO: remove numbers, punctuation etc\nsentences_list = [list(map(lambda w: w.lower(), sentence)) for sentence in sentences_list]\n\n# try loading pre-trained embeddings\nmodel_exists = False\nif os.path.isfile(W2V_MODEL_SAVE_PATH):\n w2v_model = KeyedVectors.load_word2vec_format(W2V_MODEL_SAVE_PATH, binary=False)\n model_exists = True\n print(\"Loaded existing embeddings from {0}\".format(W2V_MODEL_SAVE_PATH))\n for word in ['the', 'beautiful', 'listen']:\n print(\"W2V representation for '{0}': {1}...\".format(word, w2v_model.word_vec(word)[:5]))\nelse:\n print(\"Couldn't find existing embeddings at {0}, re-training\".format(W2V_MODEL_SAVE_PATH))\n\nif not model_exists: \n # train word2vec embeddings\n w2v_model = gensim.models.Word2Vec(sentences_list)\n print(\"Finished generating embeddings\")\n\n # save embeddings\n print(\"Saving embeddings to {0}\".format(W2V_MODEL_SAVE_PATH))\n w2v_model.wv.save_word2vec_format(W2V_MODEL_SAVE_PATH, binary=False)\n\n# verify embeddings\nembedding_matrix = w2v_model.wv.syn0\nvocabulary_size = embedding_matrix.shape[0] # rows\nembedding_size = embedding_matrix.shape[1] # columns\nprint(\"Dimension of w2v weights (embedding matrix): {0}x{1} (vocabulary_size)x(embedding_size)\".format(vocabulary_size, embedding_size))\n\n# get and verify vocabulary\nvocab_words = w2v_model.wv.vocab.keys()\nword_to_idx = dict([(k, v.index) for k, v in w2v_model.wv.vocab.items()])\nidx_to_word = dict([(v, k) for k, v in word_to_idx.items()])\nprint(\"Word indices: \")\nfor word, i in list(word_to_idx.items())[:5]:\n print(\"{0} : {1}\".format(word, i))\n\nprint(\"Word indices reversed: \")\nfor i, word in list(idx_to_word.items())[:5]:\n print(\"{0} : {1}\".format(i, word))\n\n# build/train keras model or load existing one\nmodel_found = False\ntry:\n model = load_model(KERAS_MODEL_SAVE_PATH)\n model_found = True\n print(\"Found the model at {0}\".format(KERAS_MODEL_SAVE_PATH))\nexcept:\n print(\"Couldn't find the model at {0}\".format(KERAS_MODEL_SAVE_PATH))\n\nif not model_found:\n ''' model = Sequential()\n model.add(Embedding(input_dim=vocabulary_size, output_dim=embedding_size, weights=[embedding_matrix], trainable=False))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['acc']) '''\n\n model = Sequential()\n model.add(Embedding(input_dim=vocabulary_size, output_dim=embedding_size, weights=[embedding_matrix], input_length=MAX_INPUT_SEQUENCE_LENGTH, trainable=False))\n model.add(LSTM(128)) # LSTM cell matrix is 128x128\n model.add(Dense(vocabulary_size, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n # dump model graph\n plot_model(model, to_file='model.png', show_shapes=True)\n\n # prepare training data\n clipped_sentences_list = []\n for i, sentence in enumerate(sentences_list):\n if len(sentence) >= (MAX_INPUT_SEQUENCE_LENGTH + 1):\n clipped_sentence = [word_to_idx[word] for word in sentence if word in vocab_words][:(MAX_INPUT_SEQUENCE_LENGTH + 1)]\n if len(clipped_sentence) == (MAX_INPUT_SEQUENCE_LENGTH + 1):\n clipped_sentences_list.append(clipped_sentence)\n\n train_x = np.zeros((len(clipped_sentences_list), MAX_INPUT_SEQUENCE_LENGTH), dtype=np.int32)\n train_y = np.zeros((len(clipped_sentences_list), vocabulary_size), dtype=np.int32)\n for i, sentence in enumerate(clipped_sentences_list):\n train_x[i] = sentence[:-1]\n train_y[i][sentence[-1]] = 1 # one hot representing the output word for each sentence\n\n # verify right construction of training data \n print(\"Shape of input: {0}, output: {1}\".format(train_x.shape, train_y.shape))\n for i in range(10):\n # (print word_ids of sentences and their next word prediction as word id)\n print(\"Input: {0}, output: {1}\".format(train_x[i], next(j for (j,v) in enumerate(train_y[i]) if v==1)))\n\n # train model\n model.fit(train_x, train_y, batch_size=15, epochs=5)\n\n # save the model\n model.save('model.h5')\n\n# feed some data into model\ntest_input = \"The Fulton County Grand Jury\"\nmodel_input = np.asarray([word_to_idx[word.lower()] for word in test_input.split()]).reshape(1,5)\nprint(\"Input: {0} with shape {1}\".format(model_input, model_input.shape))\nmodel_output = model.predict(model_input, verbose=1)[0]\nmodel_output_max_probability_vocab_indices = sorted(np.argpartition(model_output, -5)[-5:]) # argpartition(x, -K)[-K:] will sort and get 5 max indices (!unsorted!), therefore call sorted()\nprint(model_output_max_probability_vocab_indices)\npredicted_words = [idx_to_word[idx] for idx in model_output_max_probability_vocab_indices] \nprint(\"Model output: {0}\".format(predicted_words)) \n","sub_path":"NLP/keras-rnn-prediction/next_word_pred.py","file_name":"next_word_pred.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"370136324","text":"import requests\n\n# requests.request(method, url, **kwargs) # 构造一个请求\n# method:请求方式,对应get/put/post等7种\n# GET, HEAD, POST, PUT, PATCH, delete, OPTIONS\n\n# ---------------------**kwargs参数-----------------------\n# params:字典或字节序列,作为参数增加到url中\nkv = {'key1': 'value1', 'key2': 'value2'}\nr = requests.request('GET', 'http://python123.io/ws', params=kv)\nprint(r.url) # https://python123.io/ws?key1=value1&key2=value2\n\n# data:字典、字节序列或文件对象,作为Request的内容\nr = requests.request('POST', 'http://python123.io/ws', data=kv)\nbody = '主题内容'\n# r = requests.request('POST', 'http://python123.io/ws',\n# data=body.encode('utf-8'))\n\n# json:JSON格式的数据,作为Request的内容\nr = requests.request('POST', 'http://python123.io/ws', json=kv)\n\n# headers:字典,HTTP定制头\nhd = {'user-agent': 'chrome/10'} # 模拟浏览器\nr = requests.request('POST', 'http://python123.io/ws', headers=hd)\nprint(r.headers)\n\n# cookies:字典或CookieJar,Request中的cookie\n# auth:元组,支持HTTP认证功能\n\n# files:字典类型,传输文件\nfs = {'file': open('data.xls', 'rb')}\nr = requests.request('POST', 'http://python123.io/ws', files=fs)\n\n# timeout:设定超时时间,秒为单位\nr = requests.request('GET', 'http://python123.io/ws', timeout=10)\n\n# proxies:字典类型,设定访问代理服务器,可以增加登陆认证\n# 掩盖原ip,防止爬虫逆追踪\npxs = {'http': 'http//user:pass@10.10.10.1:1234',\n 'https': 'https//10.10.10.1:4321'}\nr = requests.request('GET', 'http://www.baidu.com', proxies=pxs)\n\n# allow_redirects:True/False,默认为True,重定向开关\n# stream:True/False,默认为True,获取内容立即下载开关\n# verify:True/False,默认为True,认证SSL证书开关\n# cert:本地SSL证书路径\n# ------------------------------------------------------\n\n\n# requests.get(url, params=None, **kwargs)\n# params:url中的额外参数,字典或字节流格式,可选\n# **kwargs:12个控制访问参数\nr = requests.get('http://www.baidu.com', params=None)\nprint(r.status_code) # 状态码\nprint(r.headers)\n# r.status_code HTTP请求的返回状态,200成功,404失败\n# r.text HTTP响应内容的字符串形式,即url对应的页面内容\n# r.encoding 从HTTP header中猜测的响应内容编码方式\n# r.apparent_encoding 从内容中分析出的相应内容编码方式\n# r.content HTTP响应内容的二进制形式\nprint(r.encoding) # 只从charset中读取,没有则为ISO-8859-1\nprint(r.apparent_encoding) # 从内容分析,更准确\nprint(r.text)\nr.encoding = 'utf-8'\nprint(r.text)\n\n\n# 爬取网页的通用代码框架\n# requests.ConnectionError 网络连接错误异常,如DNS查询失败、拒绝连接等\n# requests.HTTPError HTTP错误异常\n# requests.URLRequired URL缺失异常\n# requests.TooManyRedirects 超过最大重定向次数,产生重定向异常\n# requests.ConnectTimeout 连接远程服务器超时异常\n# requests.Timeout 请求URL超时,产生超时异常\ndef get():\n try:\n r = requests.get('http://www.baidu.com', timeout=30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"产生异常\"\n","sub_path":"网络爬虫/requests库/requests库.py","file_name":"requests库.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"444508982","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\n\nimport re\nimport uuid\n\nimport time\nimport tornado.httpserver\nfrom tornado.websocket import WebSocketHandler\nfrom tornado.web import RequestHandler\nfrom tornado.options import define, options\n\nMAX_ROOMS = 100\nMAX_USERS_PER_ROOM = 100\n\n\nclass RoomHandler(object):\n def __init__(self):\n # 房间信息 {room:[{'client_id':client_id, 'nick':'nick'}, {'client_id':client_id, 'nick':'nick'},], room1:[conn2, conn3]...}\n self.rooms_info = dict()\n # 临时连接信息 {client_id:{nick:'11', room:'222'}}\n self.temp_conns = dict()\n # 客户端信息 {client_id:{'conn':wsconn, 'nick':'11', 'room':'222'}}\n self.clients_info = dict()\n\n def add_room(self, room, nick):\n \"\"\"\n 添加Room:\n 1. 校验room相关信息\n 2. 初始化room队列,并在pending中增加room信息,待到实际连接时再添加到room中\n 3. 返回client_id,用于前台使用\n\n :param room: 房间名字\n :param nick: 用户名字\n :return: client_id\n \"\"\"\n # 校验是否满足需求(数量限制,名字限制)\n if len(self.rooms_info) > MAX_ROOMS:\n raise RuntimeError(\n \"The maximum number of rooms (%d) has been reached.\\n\\nPlease try again later.\" % MAX_ROOMS)\n if room in self.rooms_info and len(\n self.rooms_info[room]) >= MAX_USERS_PER_ROOM:\n raise RuntimeError(\n \"The maximum number of users in this room (%d) has been reached.\\n\\nPlease try again later.\" % MAX_USERS_PER_ROOM)\n roomvalid = re.match(r'[\\w-]+$', room)\n nickvalid = re.match(r'[\\w-]+$', nick)\n if roomvalid == None:\n raise RuntimeError(\n \"The room name provided was invalid. It can only contain letters, numbers, - and _.\\nPlease try again.\")\n if nickvalid == None:\n raise RuntimeError(\n \"The nickname provided was invalid. It can only contain letters, numbers, - and _.\\nPlease try again.\")\n\n # 添加到房间中\n client_id = str(uuid.uuid4().int)\n if not self.rooms_info.get(room):\n self.rooms_info[room] = []\n\n # 验证nick是否已经重复\n nicks = list(map(lambda x: x['nick'], self.rooms_info[room]))\n\n suffix = 1\n while True:\n if nick in nicks:\n nick += str(1)\n else:\n break\n suffix += 1\n\n # 添加到临时连接\n self.temp_conns[client_id] = dict(room=room, nick=nick)\n return client_id\n\n def add_client(self, client_id, wsconn):\n \"\"\"\n 添加client_ws_conn\n 1. 删除pending中的client\n 2. 在room中实际添加连接\n 3. 在client中实际添加信息\n 4. 向房间所有人广播信息,同时更新房间人数列表\n :param client_id:\n :param wsconn:\n :return:\n \"\"\"\n # 在临时中取出client信息,并添加到clients_info中\n client_info = self.temp_conns.pop(client_id)\n client_info['conn'] = wsconn\n self.clients_info[client_id] = client_info\n\n # 在room中添加上连接信息\n\n self.rooms_info[client_info['room']].append(dict(\n nick=client_info['nick'],\n conn=wsconn,\n client_id=client_id\n ))\n # 通知同一房间所有人某人加入\n self.send_msg(client_id, msg_type='join')\n # 房间在线人数列表刷新\n self.send_msg(client_id, msg_type='nicks')\n\n def remove_client(self, client_id):\n \"\"\"\n 1. 在clients_info中移除client_id\n 2. 在rooms_info中移除client,若是房间里没有人了,房间一并删除\n 3. 通知房间内所有人。**离开房间\n 4. 刷新房间在线人数列表\n :param client_id: 根据client_id通知信息\n :return:\n \"\"\"\n # 在clients_info中移除client_id\n if client_id not in self.clients_info:\n return\n\n client_info = self.clients_info.get(client_id)\n room = client_info['room']\n\n # 在rooms_info中移除room中的client\n room_client = list(filter(lambda x: x['client_id'] == client_id,\n self.rooms_info[room]))\n\n # 将当前client在rooms中移除\n self.rooms_info[room].remove(room_client[0])\n\n # 通知信息\n self.send_msg(client_id, msg_type='leave')\n self.send_msg(client_id, msg_type='nicks')\n\n # 将client在clients中移除\n del self.clients_info[client_id]\n\n # 移除room\n if len(self.rooms_info[room]) == 0:\n del self.rooms_info[room]\n\n def send_msg(self, client_id, msg_type=\"join\", message=None):\n \"\"\"\n\n :param client_id: 客户端ID\n :param msg_type: 发送消息类型 join, leave, nicks\n :return:\n \"\"\"\n client_info = self.clients_info.get(client_id)\n if client_info:\n room = client_info['room']\n nick = client_info['nick']\n conns = list(map(lambda x: x['conn'], self.rooms_info[room]))\n msg = dict(time='%10.6f' % time.time(),\n msg_type=msg_type)\n if msg_type.lower() == 'join':\n msg['nick'] = nick\n msg['msg'] = 'joined the chat room.'\n elif msg_type.lower() == 'leave':\n msg['nick'] = nick\n msg['msg'] = 'left the chat room.'\n elif msg_type.lower() == 'nicks':\n msg['nick'] = nick\n msg['nicks'] = list(\n map(lambda x: x['nick'], self.rooms_info[room]))\n elif msg_type.lower() == 'msg':\n msg['nick'] = nick\n msg['msg'] = message\n\n msg = json.dumps(msg)\n for conn in conns:\n conn.write_message(msg)\n\n\nclass MainHandler(RequestHandler):\n def initialize(self, room_handler):\n self.room_handler = room_handler\n\n def get(self, *args, **kwargs):\n \"\"\"\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n try:\n room = self.get_argument('room')\n nick = self.get_argument('nick')\n client_id = self.room_handler.add_room(room, nick)\n self.set_cookie(\"client_id\", client_id)\n self.render(\"mian.html\", room_name=room)\n except tornado.web.MissingArgumentError:\n self.render(\"index.html\")\n except RuntimeError as e:\n self.render(\"error.html\", msg=str(e))\n\n\nclass ChatHandler(WebSocketHandler):\n def initialize(self, room_handler):\n self.room_handler = room_handler\n\n def open(self, *args, **kwargs):\n \"\"\"\n 将pending中的client_id和wsconn关联起来\n \"\"\"\n # 根据登录后设置的cookie信息获取room和nick信息,并关联wsconn\n self.client_id = self.get_cookie('client_id', 0)\n self.room_handler.add_client(self.client_id, self)\n\n\n def on_message(self, message):\n \"\"\"\n 向同一房间所有的连接发送信息\n \"\"\"\n msg = json.loads(message)\n self.room_handler.send_msg(self.client_id, msg_type='msg', message=msg['message'])\n\n def on_close(self):\n self.room_handler.remove_client(self.client_id)\n\n\ndefine(\"address\", default='0.0.0.0', help=\"监听地址\", type=str)\ndefine(\"port\", default=8100, help=u\"监听端口\")\n\nconf = {\n \"debug\": True,\n}\n\nif __name__ == \"__main__\":\n room_handler = RoomHandler()\n\n conf['handlers'] = [\n (r\"/\", MainHandler, {\"room_handler\": room_handler}),\n (r\"/ws\", ChatHandler, {\"room_handler\": room_handler})\n ]\n conf['cookie_secret'] = \"bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=\"\n conf['template_path'] = os.path.join(os.path.dirname(__file__), \"templates\")\n conf['static_path'] = os.path.join(os.path.dirname(__file__), \"static\")\n conf['compiled_template_cache'] = False\n conf['autoreload'] = False\n\n tornado.options.parse_command_line()\n\n app = tornado.web.Application(**conf)\n server = tornado.httpserver.HTTPServer(app)\n\n server.listen(options.port, address=options.address)\n\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"214254582","text":"#!/bin/python\nimport subprocess\n\ndef main():\n sizes_process = [100, 1000, 10000, 100000, 1000000, 5000000, 10000000]\n types_process = [\"i32\", \"i32\"]\n bounds_process = [ [-10000, 10000]\n , [-10000, 10000]\n ]\n\n sizes_montecarlo = [100, 10000, 1000000]\n types_montecarlo = [\"f32\", \"f32\"]\n bounds_montecarlo = [ [0, 2]\n , [0, 2]\n ]\n\n sizes_segmented = [100, 10000, 1000000]\n types_segmented = [\"i32\", \"bool\"]\n bounds_segmented = None\n\n generate(sizes_process, types_process, bounds_process, \"process\")\n generate(sizes_montecarlo, types_montecarlo, bounds_montecarlo, \"montecarlo\")\n generate(sizes_segmented, types_segmented, bounds_segmented, \"segmented\")\n\ndef generate(sizes, types, bounds, set):\n for size in sizes:\n filename = \"data-\" + set + \"-\" + str(size) + \".dat\"\n command = [ \"futhark-dataset\"\n , \"-b\"\n ]\n for i in range(len(types)):\n if(bounds != None):\n command += [\"--\" + types[i] + \"-bounds=\" + str(bounds[i][0]) + \":\" + str(bounds[i][1])]\n\n command += [ \"-g\"\n , \"[\" + str(size) + \"]\" + types[i]\n ]\n\n file = open(filename, \"w\")\n subprocess.call(command, stdout=file, stderr=subprocess.STDOUT)\n file.close()\n\nmain()\n","sub_path":"labB/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"274852135","text":"\"\"\"Updates the cache for the `covidcast_meta` endpiont.\"\"\"\n\n# standard library\nimport argparse\nimport json\n\n# first party\nfrom delphi.epidata.acquisition.covidcast.database import Database\nfrom delphi.epidata.client.delphi_epidata import Epidata\n\n\ndef get_argument_parser():\n \"\"\"Define command line arguments.\"\"\"\n\n # there are no flags, but --help will still work\n return argparse.ArgumentParser()\n\n\ndef main(args, epidata_impl=Epidata, database_impl=Database):\n \"\"\"Update the covidcast metadata cache.\n\n `args`: parsed command-line arguments\n \"\"\"\n\n # fetch live (not cached) metadata\n response = epidata_impl.covidcast_meta()\n args = (response['message'], response['result'])\n print('covidcast_meta result: %s (code %d)' % args)\n\n if response['result'] != 1:\n print('unable to cache epidata')\n return\n\n # serialize the data\n epidata_json = json.dumps(response['epidata'])\n\n # update the cache\n database = database_impl()\n database.connect()\n try:\n database.update_covidcast_meta_cache(epidata_json)\n print('successfully cached epidata')\n finally:\n # no catch block so that an exception above will cause the program to\n # fail after the following cleanup\n database.disconnect(True)\n\n\nif __name__ == '__main__':\n main(get_argument_parser().parse_args())\n","sub_path":"src/acquisition/covidcast/covidcast_meta_cache_updater.py","file_name":"covidcast_meta_cache_updater.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"245955979","text":"from resolve_others_param import identify_model_param\nfrom edit_distance import calculate_Edit_Distance\nimport re\nimport os\nfrom merge import merge\nfrom resolve_others_param import identify_model\nimport csv\nimport dataloader\n\n\ndef merge_similar():\n for brand in os.listdir('./model'):\n print(brand)\n brand_dir = './model/' + brand\n model_files = os.listdir(brand_dir)\n models = []\n for file_name in model_files:\n model, _ = file_name.split('.')\n models.append(model)\n\n dig_ext = re.compile('\\d+')\n for i in range(len(models)):\n for j in range(i+1, len(models)):\n if calculate_Edit_Distance(models[i], models[j]) < 3:\n if dig_ext.findall(models[i]) != dig_ext.findall(models[j]):\n continue\n if models[i][:2] != models[j][:2] or models[i][-1] == models[j][-1]:\n continue\n param_i = identify_model(brand, models[i])\n param_j = identify_model(brand, models[j])\n tup_i_1, tup_i_2 = param_i\n if not tup_i_1 or not tup_i_2:\n continue\n if param_i != (None, None) and param_i == param_j:\n print(models[i], models[j])\n print(identify_model(brand, models[i]))\n # merge(models[i], models[j], brand_dir, 0)\n\n\ndef extract_same_model(brand):\n brand_dir = './model/' + brand\n model_files = os.listdir(brand_dir)\n\n same_models = set()\n for file_name in model_files:\n model, _ = file_name.split('.')\n model_file_path = brand_dir + '/' + model + '.csv'\n with open(model_file_path, encoding='UTF-8') as model_file:\n reader = csv.reader(model_file)\n is_first_row = True\n for row in reader:\n if is_first_row:\n is_first_row = False\n else:\n model_label_content = dataloader.load_model(row[0])\n if re.match('.* / .*', model_label_content):\n same_models.add(model_label_content)\n for same_pair in same_models:\n print(same_pair)\n\n\nif __name__ == '__main__':\n extract_same_model('Panasonic')\n # merge_similar()\n\n\n\n\n\n\n","sub_path":"Rule_Based_Nan/merge_models.py","file_name":"merge_models.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"115124431","text":"from pyvirtualdisplay import Display\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nimport time\n\nHOST_BASE=\"http://localhost:9000\"\n\nprofile = webdriver.firefox.firefox_profile.FirefoxProfile()\n\n# Fiddle with FF preferences\nset_pref = profile.set_preference\nset_pref(\"signed.applets.codebase_principal_support\", True)\nset_pref(\"capability.principal.codebase.p0.granted\", \"UniversalXPConnect\");\nset_pref(\"capability.principal.codebase.p0.id\", HOST_BASE);\nset_pref(\"capability.principal.codebase.p0.subjectName\", \"\");\nset_pref(\"webgl.prefer-native-gl\", True);\nset_pref(\"webgl.force-enabled\", True);\nset_pref(\"dom.max_script_run_time\", 60);\n\n# https://developer.mozilla.org/En/Same-origin_policy_for_file%3A_URIs\n# http://www.generalinterface.org/docs/display/DEVBLOG/2010/04/15/Stopping+the+repetitious+security+prompt+on+Firefox+GI+Builder\nset_pref(\"security.fileuri.strict_origin_policy\", False);\n\n# Set anti-aliasing\nset_pref(\"webgl.msaa-force\", True);\nset_pref(\"webgl.msaa-level\", 4);\n\ndisplay = Display(visible=0, size=(1024, 768))\ndisplay.start()\n\nbrowser = webdriver.Firefox(firefox_profile=profile)\n\npage = \"/plugins/resource/1?page=SoftVis3D\"\n\nbrowser.get(HOST_BASE + page)\nassert 'SonarQube' in browser.title\n\ntime.sleep(10)\nbrowser.save_screenshot('/home/vagrant/screenshots/startScreen.png')\n\n# show city view\nbrowser.find_element_by_xpath(\"//*[@id='city-loader']/button\").click()\n\ntime.sleep(20)\nbrowser.save_screenshot('/home/vagrant/screenshots/cityModel.png')\n\n# browser.find_element_by_xpath(\"//*[@id='loader-buttons']/div[2]\").click()\n# browser.find_element_by_xpath(\"//*[@id='dependency-loader']/div[2]/button\").click()\n\n# browser.implicitly_wait(10)\n# browser.save_screenshot('/home/vagrant/screenshots/dependencyView.png')\n\nbrowser.quit()\ndisplay.stop()\n","sub_path":"ansible/provisioning/roles/test/files/checkPlugin.py","file_name":"checkPlugin.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"619726420","text":"class Solution:\n def consecutiveNumbersSum(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n cnt = 0\n\n n1 = (math.sqrt(1 + 8 * N) + 1) / 2\n for i in range(1, math.ceil(n1), 2):\n if N % i == 0:\n cnt += 1\n\n n2 = (math.sqrt(1 + 8 * N) - 1) / 2\n for i in range(2, math.ceil(n2)+1, 2):\n if (2 * N - i) % (2 * i) == 0 and (2 * N) % i == 0:\n cnt += 1\n\n return cnt\n","sub_path":"829.py","file_name":"829.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366902935","text":"import argparse\nimport warnings\n\nfrom tqdm import tqdm\n\nfrom models import ICBF_OCC,HYBRID_CBF_ICBF\nfrom data_loader import DATA_LOADER\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef main():\n\n warnings.filterwarnings(\"ignore\")\n\n parser = argparse.ArgumentParser(description=\"execute inference.py\")\n parser.add_argument('--model_type', type=str, default=None, help='select model type [icbf | hybrid]')\n parser.add_argument('--is_valid', type=str2bool, help='select dataset [True=val.json | False=test.json]')\n\n args = parser.parse_args()\n model_type = args.model_type\n is_vaild = args.is_valid\n\n files_dict = {'song_meta': 'song_meta.json', 'train': 'train.json', 'val':'val.json','test': 'test.json'}\n data_dict = dict()\n data_loader = DATA_LOADER()\n print(f\"Load DataFiles: {files_dict.values()}\")\n for key,item in tqdm(files_dict.items()):\n data_dict[key] = data_loader.load_json_to_pandas(item)\n print(f\"\\n{item} is loaded successfully.\")\n\n if is_vaild:\n target = data_dict[\"val\"]\n else:\n target = data_dict[\"test\"]\n\n\n if model_type == \"icbf\":\n icbf_model = ICBF_OCC(train_df=data_dict[\"train\"],test_df=target)\n icbf_rcomm_result = icbf_model.execute_recommendation()\n data_loader.write_json(icbf_rcomm_result,'icbf_rcomm_results.json')\n\n\n if model_type == \"hybrid\":\n hybrid_model = HYBRID_CBF_ICBF(train_df=data_dict[\"train\"], test_df=target)\n hybrid_rcomm_result = hybrid_model.execute_recommendation()\n data_loader.write_json(hybrid_rcomm_result, 'hybrid_rcomm_results.json')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"14536023","text":"import PySimpleGUI as sg \r\nimport time, random\r\nnum='000'\r\nlista_num = []\r\nintentos = 10\r\ngano = False #Controla el juego\r\ndef instrucciones (): \r\n return '¡¿Qué cómo se juega?! ¡¡¡No puede ser!!! \\nEs muy sencillo. La computadora pensará un número de 4 (cuatro) dígitos al azar. \\nNinguna de estos estará repetido \\n Deberás escribir un número de 3 (tres) dígitos. \\nLa computadora te dirá cuántos adivinaste CORRECTAMENTE y cuáles están bien pero MAL UBICADOS\\nBuena suerte!!'\r\nsg.theme ('Dark Purple6')\r\n\r\ncontenido = [ \r\n [sg.Text ('BIENVENIDE AL JUEGO DE ADIVINAR EL NÚMERO',size=(40,2),justification='center',font=('Impact',20),text_color='gray')],\r\n [sg.Text(instrucciones(),size = (47,7),justification='center',font=('Arial',15),text_color='white',key='instrucciones')],\r\n [sg.Text ('Ingrese 3 dígitos:', size = (18,1), justification='left',font=('Arial',10)),sg.InputText(key='_numJugador_',size=(5,1)),sg.Button('Probemos'),sg.Text(key='lista_num',size=(9,5))],\r\n [sg.Text ('Números Correctos:', size = (19,1),font=('Arial',10)),sg.Text(key='_ok_'),sg.Text ('Números Mal ubicados: ', size = (22,1),font=('Arial',10)),sg.Text(key='_mal_')],\r\n [sg.Text ('Número secreto:', size = (18,1), justification='left',font=('Arial',10)),sg.Text(size=(4,1),key='_DISPLAY_'),sg.Text(size = (4,1)),sg.Text(size=(4,1),key='_vidas_')],\r\n [sg.Text ('', size = (10,1)),sg.Text (size = (15,1),justification='center',font=('Arial',30),text_color='white',key='_ganador_')],\r\n [sg.Button('Nuevo Numero Secreto'),sg.Text ('', size = (40,1)),sg.Button('Salir')] \r\n ]\r\n#Imprime numero secreto\r\ndef actualizar_pantalla(valor_pantalla:str):\r\n try:\r\n ventana['_DISPLAY_'].update(value='{}'.format(valor_pantalla))\r\n except:\r\n ventana['_DISPLAY_'].update(value=valor_pantalla)\r\n\r\ndef actualizar_ok(valor_pantalla:str):\r\n try:\r\n ventana['_ok_'].update(value='{}'.format(valor_pantalla))\r\n except:\r\n ventana['_ok_'].update(value=valor_pantalla)\r\n#Imprime cantidad de intentos restantes\r\ndef actualizar_vidas():\r\n try:\r\n ventana['_vidas_'].update(value=str(intentos))\r\n except:\r\n\r\n \r\n ventana['_vidas_'].update(value=valor_pantalla)\r\n#Imprime si ganó o perdió\r\ndef actualizar_ganador(valor):\r\n try:\r\n ventana['_ganador_'].update(value=valor)\r\n except:\r\n ventana['_ganador_'].update(value=valor_pantalla)\r\n#Imprime números mal ubicados\r\ndef actualizar_mal(valor_pantalla:str):\r\n try:\r\n ventana['_mal_'].update(value='{}'.format(valor_pantalla))\r\n except:\r\n ventana['_mal_'].update(value=valor_pantalla)\r\n\r\ndef ver_ganador(ok): \r\n global intentos\r\n global gano\r\n if (intentos > 0) and (ok == 3): #Si acertó todas, ganó y finaliza flujo\r\n actualizar_ganador('GANASTE')\r\n gano = True\r\n else: \r\n intentos = intentos - 1\r\n actualizar_vidas()\r\n if intentos == 0: #Imprime perdiste y finaliza el flujo\r\n actualizar_ganador('PERDISTE')\r\n actualizar_pantalla(num)\r\n gano = True\r\n#Imprime números ya utilizados \r\ndef actualizar_lista_numeros(valor_pantalla:str):\r\n try:\r\n ventana['lista_num'].update(value=valor_pantalla)\r\n except:\r\n ventana['lista_num'].update(value=valor_pantalla)\r\n#Evalúa los números elegidos con el número secreto, dígito a dígito\r\ndef validar():\r\n casi = 0\r\n ok = 0\r\n listado='' \r\n numJugador = values['_numJugador_'] \r\n if len(numJugador)==3:\r\n for i in range(3):\r\n if numJugador[i] == num[i]:\r\n ok= ok +1\r\n elif numJugador[i] in num:\r\n casi = casi +1 \r\n lista_num.append(str(numJugador))\r\n for i in range(len(lista_num)):\r\n listado = listado + ' | '+ str(lista_num[i])\r\n actualizar_lista_numeros (listado)\r\n actualizar_ok(ok)\r\n actualizar_mal(casi)\r\n ventana['_numJugador_'].update(value='') \r\n ver_ganador(ok)\r\n#Crea número secreto\r\ndef num_secreto():\r\n global num\r\n global intentos \r\n num=''\r\n numeros=(list(range(10)))\r\n random.shuffle(numeros)\r\n for i in range(3):\r\n num += str(numeros[i])\r\n return num\r\n#Inicializa la interfaz y las variables\r\ndef inicializar(num):\r\n global intentos\r\n global lista_num \r\n global gano\r\n gano = False \r\n lista_num=[]\r\n actualizar_lista_numeros ('')\r\n intentos = 10\r\n actualizar_pantalla('***')\r\n actualizar_ganador('')\r\n actualizar_vidas()\r\n\r\nventana = sg.Window('Adivinando el Número', layout=contenido)\r\n\r\nwhile True:\r\n event, values = ventana.read()\r\n if num =='000':\r\n num_secreto()\r\n if event in (None, 'Salir'):\t# Salir, cierra ventana\r\n break\r\n if event == 'Nuevo Numero Secreto':\r\n num_secreto()\r\n inicializar(num)\r\n if gano == False:\r\n if event == 'Probemos':\r\n validar ()\r\n elif (intentos < 0) and (gano == False):\r\n num_secreto()\r\n inicializar(num)\r\nventana.close()","sub_path":"adivinar_numero_interfaz.py","file_name":"adivinar_numero_interfaz.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"87070586","text":"import contextlib\nimport enum\nimport math\nimport os\nimport pathlib\nimport random\nimport sys\nimport tempfile\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, Type, cast\n\nimport determined as det\nimport determined_common.api.authentication as auth\nfrom determined import constants, errors, gpu, horovod, load, workload\nfrom determined_common import api, check, context, util\n\n\nclass Mode(enum.Enum):\n \"\"\"\n The mode used to create an experiment.\n\n See :py:func:`determined.create()`.\n \"\"\"\n\n CLUSTER = \"cluster\"\n LOCAL = \"local\"\n\n\ndef _in_ipython() -> bool:\n import __main__\n\n if hasattr(__main__, \"__file__\"):\n return False\n try:\n import IPython\n\n IPython\n except ImportError:\n return False\n return True\n\n\ndef _get_current_args() -> List:\n return sys.argv[1:]\n\n\ndef set_command_default(\n context_dir: pathlib.Path, command: Optional[List[str]] = None\n) -> List[str]:\n if not command or len(command) == 0:\n if _in_ipython():\n raise errors.InvalidExperimentException(\n \"Must specify the location of the notebook file \"\n \"relative to the context directory when in notebook.\"\n )\n\n exp_path = pathlib.Path(sys.argv[0]).resolve()\n exp_rel_path = exp_path.relative_to(context_dir.resolve())\n if exp_rel_path.suffix in {\"py\", \"ipynb\"}:\n raise errors.InvalidExperimentException(\n \"Command must begin with a file with the suffix .py or .ipynb. \"\n \"Found {}\".format(command)\n )\n\n command = [str(exp_rel_path), *_get_current_args()]\n\n return command\n\n\ndef create_experiment(\n config: Optional[Dict[str, Any]],\n context_dir: str,\n command: Optional[List[str]],\n test_mode: bool = False,\n master_url: Optional[str] = None,\n) -> Optional[int]:\n \"\"\"Submit an experiment to the Determined master.\n\n Alternatively, use det.create() with a mode argument of \"submit\".\n\n Args:\n name (Optional[str]): The URL of the Determined master node. If None\n (default), then the master address will be inferred from the\n environment.\n\n Returns:\n The ID of the created experiment.\n \"\"\"\n if context_dir == \"\":\n raise errors.InvalidExperimentException(\"Cannot specify the context directory to be empty.\")\n\n context_path = pathlib.Path(context_dir)\n config = {**constants.DEFAULT_EXP_CFG, **(config or {})}\n config.setdefault(\"internal\", {})\n config[\"internal\"][\"native\"] = {\"command\": set_command_default(context_path, command)}\n print(\"Creating an experiment with config: {}\".format(config))\n\n if master_url is None:\n master_url = util.get_default_master_address()\n\n exp_context = context.Context.from_local(context_path)\n\n # When a requested_user isn't specified to initialize_session(), the\n # authentication module will attempt to use the token store to grab the\n # current logged-in user. If there is no logged in user found, it will\n # default to constants.DEFAULT_DETERMINED_USER.\n auth.initialize_session(master_url, requested_user=None, try_reauth=True)\n\n if test_mode:\n exp_id = api.create_test_experiment(master_url, config, exp_context)\n else:\n exp_id = api.create_experiment(master_url, config, exp_context)\n print(\"Created experiment {}\".format(exp_id))\n\n return exp_id\n\n\ndef get_gpus() -> Tuple[bool, List[str], List[str]]:\n gpu_ids, gpu_uuids = gpu.get_gpu_ids_and_uuids()\n use_gpu = len(gpu_uuids) > 0\n return use_gpu, gpu_uuids, gpu_ids\n\n\ndef generate_test_hparam_values(config: Dict[str, Any]) -> Dict[str, Any]:\n def generate_random_value(hparam: Any) -> Any:\n if isinstance(hparam, Dict):\n if hparam[\"type\"] == \"const\":\n return hparam[\"val\"]\n elif hparam[\"type\"] == \"int\":\n return random.randint(hparam[\"minval\"], hparam[\"maxval\"])\n elif hparam[\"type\"] == \"double\":\n return random.uniform(hparam[\"minval\"], hparam[\"maxval\"])\n elif hparam[\"type\"] == \"categorical\":\n return hparam[\"vals\"][random.randint(0, len(hparam[\"vals\"]) - 1)]\n elif hparam[\"type\"] == \"log\":\n return math.pow(hparam[\"base\"], random.uniform(hparam[\"minval\"], hparam[\"maxval\"]))\n else:\n raise Exception(f\"Wrong type of hyperparameter: {hparam['type']}\")\n elif isinstance(hparam, (int, float, str)):\n return hparam\n else:\n raise Exception(f\"Wrong type of hyperparameter: {type(hparam)}\")\n\n hparams_def = config.get(\"hyperparameters\", {})\n hparams = {name: generate_random_value(hparams_def[name]) for name in hparams_def}\n return hparams\n\n\ndef make_test_workloads(\n checkpoint_dir: pathlib.Path, config: det.ExperimentConfig\n) -> workload.Stream:\n interceptor = workload.WorkloadResponseInterceptor()\n\n print(\"Training one batch\")\n yield from interceptor.send(workload.train_workload(1), [config.batches_per_step()])\n metrics = interceptor.metrics_result()\n batch_metrics = metrics[\"batch_metrics\"]\n check.eq(len(batch_metrics), config.batches_per_step())\n if util.debug_mode():\n print(f\"Finished training, metrics: {batch_metrics}\")\n\n print(\"Validating one step\")\n yield from interceptor.send(workload.validation_workload(1), [])\n validation = interceptor.metrics_result()\n v_metrics = validation[\"validation_metrics\"]\n if util.debug_mode():\n print(f\"Finished validating, validation metrics: {v_metrics}\")\n\n print(f\"Saving a checkpoint to {checkpoint_dir}.\")\n yield workload.checkpoint_workload(), [checkpoint_dir], workload.ignore_workload_response\n print(f\"Finished saving a checkpoint to {checkpoint_dir}.\")\n\n yield workload.terminate_workload(), [], workload.ignore_workload_response\n print(\"The test experiment passed.\")\n\n\ndef make_local_experiment_config(input_config: Optional[Dict[str, Any]]) -> Dict[str, Any]:\n \"\"\"\n Create a local experiment configuration based on an input configuration and\n defaults. Use a shallow merging policy to overwrite our default\n configuration with each entire subconfig specified by a user.\n\n The defaults and merging logic is not guaranteed to match the logic used by\n the Determined master. This function also does not do experiment\n configuration validation, which the Determined master does.\n \"\"\"\n\n input_config = input_config or {}\n config_keys_to_ignore = {\n \"bind_mounts\",\n \"checkpoint_storage\",\n \"environment\",\n \"resources\",\n \"optimizations\",\n }\n for key in config_keys_to_ignore:\n if key in input_config:\n print(\n \"'{}' configuration key is not supported by LOCAL mode and will be ignored\".format(\n key\n )\n )\n del input_config[key]\n\n return {**constants.DEFAULT_EXP_CFG, **input_config}\n\n\ndef make_test_experiment_env(\n checkpoint_dir: pathlib.Path, config: Optional[Dict[str, Any]]\n) -> Tuple[det.EnvContext, workload.Stream, det.RendezvousInfo, horovod.HorovodContext]:\n config = det.ExperimentConfig(make_local_experiment_config(config))\n hparams = generate_test_hparam_values(config)\n use_gpu, container_gpus, slot_ids = get_gpus()\n local_rendezvous_ports = (\n f\"{constants.LOCAL_RENDEZVOUS_PORT},{constants.LOCAL_RENDEZVOUS_PORT+1}\"\n )\n\n env = det.EnvContext(\n master_addr=\"\",\n master_port=1,\n container_id=\"test_mode\",\n experiment_config=config,\n hparams=hparams,\n initial_workload=workload.train_workload(1, 1, 1),\n latest_checkpoint=None,\n use_gpu=use_gpu,\n container_gpus=container_gpus,\n slot_ids=slot_ids,\n debug=config.debug_enabled(),\n workload_manager_type=\"\",\n det_rendezvous_ports=local_rendezvous_ports,\n det_trial_runner_network_interface=constants.AUTO_DETECT_TRIAL_RUNNER_NETWORK_INTERFACE,\n det_trial_id=\"1\",\n det_experiment_id=\"1\",\n det_cluster_id=\"test_mode\",\n trial_seed=config.experiment_seed(),\n )\n workloads = make_test_workloads(checkpoint_dir.joinpath(\"checkpoint\"), config)\n rendezvous_ports = env.rendezvous_ports()\n rendezvous_info = det.RendezvousInfo(\n addrs=[f\"0.0.0.0:{rendezvous_ports[0]}\"], addrs2=[f\"0.0.0.0:{rendezvous_ports[1]}\"], rank=0\n )\n hvd_config = horovod.HorovodContext.from_configs(\n env.experiment_config, rendezvous_info, env.hparams\n )\n\n return env, workloads, rendezvous_info, hvd_config\n\n\ndef _stop_loading_implementation() -> None:\n raise det.errors.StopLoadingImplementation()\n\n\ndef create_trial_instance(\n trial_def: Type[det.Trial], checkpoint_dir: str, config: Optional[Dict[str, Any]] = None\n) -> det.Trial:\n \"\"\"\n Create a trial instance from a Trial class definition. This can be a useful\n utility for debugging your trial logic in any development environment.\n\n Arguments:\n trial_def: A class definition that inherits from the det.Trial interface.\n checkpoint_dir:\n The checkpoint directory that the trial will use for loading and\n saving checkpoints.\n config:\n An optional experiment configuration that is used to initialize the\n :class:`determined.TrialContext`. If not specified, a minimal default\n is used.\n \"\"\"\n env, workloads, rendezvous_info, hvd_config = make_test_experiment_env(\n checkpoint_dir=pathlib.Path(checkpoint_dir), config=config\n )\n trial_context = trial_def.trial_context_class(env, hvd_config)\n return trial_def(trial_context)\n\n\ndef create(\n trial_def: Type[det.Trial],\n config: Optional[Dict[str, Any]] = None,\n mode: Mode = Mode.CLUSTER,\n context_dir: str = \"\",\n command: Optional[List[str]] = None,\n master_url: Optional[str] = None,\n) -> None:\n # TODO: Add a reference to the local development tutorial.\n \"\"\"\n Create an experiment.\n\n Arguments:\n trial_def:\n A class definition implementing the ``det.Trial`` interface.\n config:\n A dictionary representing the experiment configuration to be\n associated with the experiment.\n mode:\n The :py:class:`determined.experimental.Mode` used when creating\n an experiment\n\n 1. ``Mode.CLUSTER`` (default): Submit the experiment to a remote\n Determined cluster.\n\n 2. ``Mode.LOCAL``: Test the experiment in the calling\n Python process for local development / debugging purposes.\n Run through a minimal loop of training, validation, and checkpointing steps.\n\n context_dir:\n A string filepath that defines the context directory. All model\n code will be executed with this as the current working directory.\n\n In CLUSTER mode, this argument is required. All files in this\n directory will be uploaded to the Determined cluster. The total\n size of this directory must be under 96 MB.\n\n In LOCAL mode, this argument is optional and assumed to be the\n current working directory by default.\n command:\n A list of strings that is used as the entrypoint of the training\n script in the Determined task environment. When executing this\n function via a python script, this argument is inferred to be\n ``sys.argv`` by default. When executing this function via IPython\n or Jupyter notebook, this argument is required.\n\n Example: When creating an experiment by running \"python train.py\n --flag value\", the default command is inferred as [\"train.py\",\n \"--flag\", \"value\"].\n\n master_url:\n An optional string to use as the Determined master URL in submit\n mode. If not specified, will be inferred from the environment\n variable ``DET_MASTER``.\n \"\"\"\n\n if Mode(mode) == Mode.CLUSTER:\n if load.RunpyGlobals.is_initialized():\n load.RunpyGlobals.set_runpy_trial_result(\n trial_def, cast(Type[det.TrialController], trial_def.trial_controller_class)\n )\n _stop_loading_implementation()\n\n else:\n create_experiment(\n config=config, context_dir=context_dir, command=command, master_url=master_url\n )\n\n elif Mode(mode) == Mode.LOCAL:\n context_path = pathlib.Path(context_dir) if context_dir else pathlib.Path.cwd()\n test_one_batch(context_path, trial_class=trial_def, config=config)\n else:\n raise errors.InvalidExperimentException(\"Must use either local mode or cluster mode.\")\n\n\ndef _init_native(\n controller_cls: Type[det.TrialController],\n native_context_cls: Type[det.NativeContext],\n config: Optional[Dict[str, Any]] = None,\n mode: Mode = Mode.CLUSTER,\n context_dir: str = \"\",\n command: Optional[List[str]] = None,\n master_url: Optional[str] = None,\n) -> Any:\n if Mode(mode) == Mode.CLUSTER:\n if load.RunpyGlobals.is_initialized():\n controller_cls.pre_execute_hook(\n env=load.RunpyGlobals.get_instance().env,\n hvd_config=load.RunpyGlobals.get_instance().hvd_config,\n )\n context = native_context_cls(\n env=load.RunpyGlobals.get_instance().env,\n hvd_config=load.RunpyGlobals.get_instance().hvd_config,\n )\n load.RunpyGlobals.set_runpy_native_result(context, controller_cls)\n context._set_train_fn(_stop_loading_implementation)\n return context\n\n else:\n create_experiment(\n config=config, context_dir=context_dir, command=command, master_url=master_url\n )\n print(\"Exiting the program after submitting the experiment.\")\n sys.exit(0)\n\n elif Mode(mode) == Mode.LOCAL:\n print(\"Running a minimal test experiment locally\")\n checkpoint_dir = tempfile.TemporaryDirectory()\n env, workloads, rendezvous_info, hvd_config = make_test_experiment_env(\n checkpoint_dir=pathlib.Path(checkpoint_dir.name), config=config\n )\n print(f\"Using hyperparameters: {env.hparams}\")\n if util.debug_mode():\n print(f\"Using a test experiment config: {env.experiment_config}\")\n\n controller_cls.pre_execute_hook(env=env, hvd_config=hvd_config)\n context = native_context_cls(env=env, hvd_config=hvd_config)\n\n def train_fn() -> None:\n controller = controller_cls.from_native(\n context=context,\n env=env,\n workloads=workloads,\n load_path=None,\n rendezvous_info=rendezvous_info,\n hvd_config=hvd_config,\n )\n controller.run()\n checkpoint_dir.cleanup()\n\n context._set_train_fn(train_fn)\n return context\n\n else:\n raise errors.InvalidExperimentException(\"Must use either local mode or cluster mode.\")\n\n\n@contextlib.contextmanager\ndef local_execution_manager(new_directory: pathlib.Path) -> Iterator:\n \"\"\"\n A context manager that temporarily moves the current working directory and\n appends it to syspath.\n \"\"\"\n\n # TODO(DET-2719): Add context dir to TrainContext and remove this function.\n current_directory = os.getcwd()\n\n try:\n os.chdir(new_directory)\n yield\n finally:\n os.chdir(current_directory)\n\n\ndef test_one_batch(\n context_path: pathlib.Path,\n trial_class: Optional[Type[det.Trial]] = None,\n config: Optional[Dict[str, Any]] = None,\n) -> None:\n # Override the batches_per_step value to 1.\n # TODO(DET-2931): Make the validation step a single batch as well.\n config = {**(config or {}), \"batches_per_step\": 1}\n\n print(\"Running a minimal test experiment locally\")\n checkpoint_dir = tempfile.TemporaryDirectory()\n env, workloads, rendezvous_info, hvd_config = make_test_experiment_env(\n checkpoint_dir=pathlib.Path(checkpoint_dir.name), config=config\n )\n print(f\"Using hyperparameters: {env.hparams}\")\n if util.debug_mode():\n print(f\"Using a test experiment config: {env.experiment_config}\")\n\n with local_execution_manager(context_path):\n if not trial_class:\n if util.debug_mode():\n print(\"Loading trial class from experiment configuration\")\n trial_class = load.load_trial_implementation(env.experiment_config[\"entrypoint\"])\n\n controller = load.load_controller_from_trial(\n trial_class=trial_class,\n env=env,\n workloads=workloads,\n load_path=None,\n rendezvous_info=rendezvous_info,\n hvd_config=hvd_config,\n )\n controller.run()\n\n checkpoint_dir.cleanup()\n print(\"Note: to submit an experiment to the cluster, change mode argument to Mode.CLUSTER\")\n","sub_path":"harness/determined/experimental/_native.py","file_name":"_native.py","file_ext":"py","file_size_in_byte":17046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"362113100","text":"from django.db.models import (\n ForeignKey, \n OneToOneField, \n ImageField, \n FileField, \n SET_NULL,\n signals\n)\nfrom django.core import checks\nfrom image import utils\nfrom image import form_fields\n\n\nclass ImageFileField(ImageField):\n '''\n A (model) ImageField.\n ImageFileField allows any length of file path. It adds some\n extra parameters and associated verification.\n \n max_size\n in bytes\n '''\n #NB In stock Django,\n # - The model ImageField does routing, not validation, not even \n # through FileField\n # - The form FileField read-checks files. It checks if a file exists, \n # has a size, and that the filename is not too long.\n # - The form ImageField goes a little further. It checks Pillow can \n # read the file, that Pillow.verify() does not think it is broken, \n # that the MIME type is coherent, then validates the extension \n # against Pillow data.\n # - Interesting, ImageFile itself has a little checking, as it must \n # rescue dimension data from Pillow.\n\n # Class mainly exists for contribute_to options, faking abstract\n # classes. But adds some init vals, bytesize handling, and it's \n # formfield belongs to this app\n default_validators = []\n \n def __init__(self, \n verbose_name=None,\n name=None, \n bytesize_field=None,\n accept_formats=None,\n form_limit_filepath_length = True,\n max_size=None,\n **kwargs\n ):\n self.bytesize_field = bytesize_field\n self.accept_formats = accept_formats\n self.form_limit_filepath_length = form_limit_filepath_length\n self.max_size = max_size\n self.default_validators = []\n super().__init__(verbose_name, name, **kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n # upload_to and max_length handled by super()\n if self.bytesize_field:\n kwargs['bytesize_field'] = self.bytesize_field \n kwargs['max_size'] = self.max_size\n kwargs['form_limit_filepath_length'] = self.form_limit_filepath_length\n return name, path, args, kwargs\n\n def contribute_to_class(self, cls, name, private_only=False):\n # This is not contribute to class at all. It is class \n # contribute to field. That said, Django does this for \n # verbose names.\n # \n # Point is, this method is run by the metaclass\n # way before the classes are constructed. And it is run on\n # every class in MRO, from base to subclasses. So class \n # attributes referenced here can influence actions in an \n # imitation of overriding. Which Python cannot otherwise do. \n super().contribute_to_class(cls, name, private_only=False)\n\n # Since this predates init, and steps from base, we can simply\n # overwrite values\n # Must use hasattr() defence because migration builds \n # '__fake__' classes\n if (hasattr(cls, 'filepath_length')):\n self.max_length = cls.filepath_length\n if (hasattr(cls, 'form_limit_filepath_length')):\n self.form_limit_filepath_length = cls.form_limit_filepath_length\n if (hasattr(cls, 'max_upload_size')):\n self.max_size = utils.mb2bytes(cls.max_upload_size)\n if (hasattr(cls, 'accept_formats')):\n self.accept_formats = cls.accept_formats\n\n # slightly more normal\n if not cls._meta.abstract:\n signals.post_init.connect(self.update_bytesize_field, sender=cls)\n\n def update_bytesize_field(self, instance, force=False, *args, **kwargs):\n # Nothing to update if the field doesn't have the attribute or if\n # the field is deferred.\n if not self.bytesize_field or self.attname not in instance.__dict__:\n return\n\n # getattr will call the ImageFileDescriptor's __get__ method, which\n # coerces the assigned value into an instance of self.attr_class\n # (ImageFieldFile in this case).\n file = getattr(instance, self.attname)\n \n # Nothing to update if we have no file and not being forced to update.\n if not file and not force:\n return\n update_field = self.bytesize_field and not(getattr(instance, self.bytesize_field))\n \n # When the field has a value, we are most likely loading\n # data from the database or updating an image field that already had\n # an image stored. In the first case, we don't want to update the\n # field because we are already getting the value from the\n # database. In the second case, we do want to update the\n # field and will skip this return because force will be True since we\n # were called from ImageFileDescriptor.__set__.\n if not(update_field) and not force:\n return\n\n # file should be an instance of ImageFieldFile or should be None.\n bytesize = None\n if file:\n bytesize = file.size\n \n # ok, update\n if self.bytesize_field:\n setattr(instance, self.bytesize_field, bytesize)\n\n def formfield(self, **kwargs):\n # if max_len is None, the formfield is unlimited length\n max_length = None\n if (self.form_limit_filepath_length):\n max_length = self.max_length\n return super().formfield(**{\n 'form_class': form_fields.FreePathImageField,\n 'max_length': max_length,\n 'max_size': self.max_size,\n 'accept_formats' : self.accept_formats,\n **kwargs,\n })\n\n\n\nclass ReformFileField(FileField):\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs['max_length'] = self.max_length\n return name, path, args, kwargs\n \n def contribute_to_class(self, cls, name, private_only=False):\n super().contribute_to_class(cls, name, private_only=False)\n if (hasattr(cls, 'image_model') and hasattr(cls.image_model, 'filepath_length')):\n self.max_length = cls.image_model.filepath_length \n\n\n\nclass ImageRelationFieldMixin():\n \n def _check_relation_model_is_image_model(self):\n # These checks are run in 'show migrations' and 'runmigrations'.\n # By this check, the related model must exist.\n remote_class = self.remote_field.model\n\n # ...but that attribute may be by lazy string, which means\n # rooting about for a model class\n rel_is_string = isinstance(remote_class, str)\n if (rel_is_string):\n remote_class = self.opts.apps.all_models[remote_class]\n\n if (not(issubclass(remote_class, AbstractImage))):\n # More rooting about. Fun, huh? \n model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name\n return [\n checks.Error(\n \"ImageSingleField defines a relation with model '%s', which is \"\n \"not a subclass of AbstractImage.\" % model_name,\n obj=self,\n id='fields.E300',\n )\n ]\n return []\n\n\n\nclass ImageManyToOneField(ImageRelationFieldMixin, ForeignKey):\n '''\n A preconfigured ManyToOne model field for Images.\n\n This is a ForeignKey, so several models can refer to one image.\n It is suitable for referring to images intended as a pool e.g. \n for gallery use.\n \n The field has some dafaults, which can be overridden,\n - The field is nullable\n - Deletion of the image sets the field to null\n - Deletion of the model will not delete the image \n - The image can not refer back to the model\n '''\n def __init__(self, to, related_query_name=None,\n limit_choices_to=None, parent_link=False,\n db_constraint=True, **kwargs):\n\n # not kwrd, set a default\n on_delete = kwargs.get('on_delete', SET_NULL)\n kwargs['blank'] = kwargs.get('blank', True) \n kwargs['null'] = kwargs.get('null', True) \n related_name = kwargs.get('related_name', '+') \n to_field = None\n super().__init__(to, on_delete, related_name, related_query_name,\n limit_choices_to, parent_link, to_field,\n db_constraint, **kwargs)\n \n def check(self, **kwargs):\n #NB run after the core checks\n return [\n *super().check(**kwargs),\n *self._check_relation_model_is_image_model(),\n ]\n \n \n \n\nclass ImageOneToOneField(ImageRelationFieldMixin, OneToOneField):\n '''\n A preconfigured OneToOne model field for Images.\n \n This is a OneToOneField, so suitable when the model containing the \n field is locked to one image (which other models can not use) e.g.\n 'Sales product' -> 'Product image'.\n\n The field has some dafaults, which can be overridden,\n - The field is nullable\n - Deletion of the image sets the field to null\n - Deletion of the model deletes the image \n - The image can not refer back to the model\n\n auto_delete\n delete image model on deletion of a model with this field.\n Needs to be enabled in signals. Default True\n '''\n auto_delete = True\n\n def __init__(self, to, **kwargs):\n if ('auto_delete' in kwargs):\n self.auto_delete = kwargs['auto_delete']\n \n # not kwrd, set a default\n on_delete = kwargs.get('on_delete', SET_NULL)\n kwargs['blank'] = kwargs.get('blank', True) \n kwargs['null'] = kwargs.get('null', True) \n kwargs['related_name'] = kwargs.get('related_name', '+') \n kwargs['to_field'] = None\n super().__init__(to, on_delete, **kwargs)\n \n def check(self, **kwargs):\n #NB run after the core checks\n return [\n *super().check(**kwargs),\n *self._check_relation_model_is_image_model(),\n ]\n","sub_path":"env/Lib/site-packages/image/model_fields.py","file_name":"model_fields.py","file_ext":"py","file_size_in_byte":10008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"255050962","text":"from pyspark import SparkConf, SparkContext\r\nimport sys\r\nimport random\r\nimport operator\r\n\r\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\r\n\r\n# add more functions as necessary\r\n\r\ndef get_euler(partitionList):\r\n \r\n total_for_this_partition = 0 \r\n \r\n for element in partitionList:\r\n random.seed(element) \r\n sum = 0.0\r\n while sum < 1:\r\n sum += random.random()\r\n total_for_this_partition += 1\r\n return total_for_this_partition\r\n\r\ndef main(samples):\r\n rangeNumOfSamplesRdd = sc.range(samples,numSlices=40).glom() # RDD of ints from 0 to numberOfSamples.Divide it into list of lists using glom\r\n eulerRDD = rangeNumOfSamplesRdd.map(get_euler) # calculate sum of e for each partition.no of calls = no of partitions\r\n numberOfIterations = eulerRDD.reduce(operator.add) # sum all partitions\r\n euler_constant = numberOfIterations / samples\r\n print(\"Euler constant is : \" + str(euler_constant))\r\n\r\nif __name__ == '__main__':\r\n conf = SparkConf().setAppName('example code')\r\n sc = SparkContext(conf=conf)\r\n assert sc.version >= '2.3' # make sure we have Spark 2.3+\r\n samples = sys.argv[1] # number of samples to estimate e\r\n main(int(samples))\r\n","sub_path":"a3/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"509362862","text":"def max_sub_prob(arr):\n\tsum = 0\n\tactual = 0\n\tfor i in range(len(arr)):\n\t\tsum = arr[i]\n\t\tfor j in range(i+1,len(arr)):\n\t\t\tsum += arr[j]\n\t\t\tactual = max(actual,sum)\n\treturn actual\n\nt = [13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7]\nprint(max_sub_prob(t))\n","sub_path":"4-Divide_and_Conquer/brute-max-subarray-problem.py","file_name":"brute-max-subarray-problem.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"222583140","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadDimensionOperator(BaseOperator):\n \"\"\"\n Custom operator to load a dimensional table from a staging table.\n \n args:\n redshift_conn_id: the connection id for the redshift connection stored in airflow\n sql_query: select query returning a result set that will be inserted into the target_table\n target_table: target_table into which to insert results\n truncate: enable with delete-insert or append operation.If true the table will be truncated first before inserting from the sql_query\n \"\"\"\n ui_color = '#80BD9E'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id,\n sql_query,\n target_table,\n target_columns,\n truncate,\n *args, **kwargs):\n\n super(LoadDimensionOperator, self).__init__(*args, **kwargs)\n \n self.redshift_conn_id=redshift_conn_id\n self.sql_query=sql_query\n self.target_table=target_table\n self.target_columns=target_columns\n self.truncate=truncate\n\n def execute(self, context):\n db = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n if self.truncate:\n self.log.info(f'Truncating target table: {self.target_table}')\n\n db.run(f\"TRUNCATE TABLE {self.target_table}\")\n if self.target_columns is None:\n insert_columns = \"\"\n else:\n insert_columns = '(' + \",\".join(self.target_columns) + ')'\n \n insert_sql = f\"\"\"\nINSERT INTO {self.target_table} {insert_columns}\n{self.sql_query}\"\"\"\n \n self.log.info(f'Loading dimension target table: {self.target_table}, insert_sql: {insert_sql}')\n\n db.run(insert_sql) \n \n","sub_path":"plugins/operators/load_dimension.py","file_name":"load_dimension.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"28728048","text":"import boto3\nimport json\nimport logging\nimport os\nfrom boto3.dynamodb.conditions import Key\n\n# Loggin\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\n\n# AWS\nDYNAMODB = boto3.resource('dynamodb')\nS3 = boto3.client('s3')\n\n# Initialize DynamoDB tables\nPAT_DB = DYNAMODB.Table(os.environ['PATS_DB_NAME'])\nREC_DB = DYNAMODB.Table(os.environ['RECS_DB_NAME'])\n\ndef validate_call(event_path):\n \"\"\" Check if all the parameters are given from API call\n \n Args:\n event (dict): incoming event from API\n context (dict): info about incoming event from AWS\n \"\"\"\n\n if 'pat_id' not in event_path or 'rec_id' not in event_path:\n return False\n \n return True\n \n\ndef lambda_handler(event,context):\n \"\"\" Main Function\n Args:\n event (dict): incoming event from API\n context (dict): info about incoming event from AWS\n \"\"\"\n LOGGER.info(event)\n event_path = event['pathParameters']\n\n if not validate_call(event_path):\n body = {\n \"error\": \"Invalid data given in body.\"\n }\n # Create response Body\n response = {\n \"isBase64Encoded\": False,\n \"statusCode\": 409,\n \"headers\": {},\n \"body\": json.dumps(body)\n }\n\n return response\n\n # Get Patient Info\n pat = PAT_DB.query(\n KeyConditionExpression=Key('id').eq(event_path['pat_id'])\n )\n\n if pat['Count'] == 0:\n body = {\n \"error\": \"Patient Not Found\",\n \"id\": event_path['pat_id']\n }\n # Create response Body\n response = {\n \"isBase64Encoded\": False,\n \"statusCode\": 404,\n \"headers\": {},\n \"body\": json.dumps(body)\n }\n\n return response\n\n # Remove Patient Recordings\n rec_id = event_path['rec_id']\n # Remove from DB\n REC_DB.delete_item(\n Key={'id': rec_id}\n )\n # Remove from S3\n S3.delete_object(\n Bucket=os.environ['S3_NAME'],\n Key=rec_id + \".wav\"\n )\n\n # Create new list of recordings without old recording\n if rec_id in pat['Items'][0]['recordings']:\n recordings = pat['Items'][0]['recordings'].remove(rec_id)\n if recordings is None:\n recordings = []\n else:\n recordings = pat['Items'][0]['recordings']\n\n # Remove Recording from Patient\n PAT_DB.update_item(\n Key={'id': event_path['pat_id']},\n UpdateExpression=(\n 'SET recordings=:set_recordings'\n ),\n ExpressionAttributeValues={\n ':set_recordings': recordings\n }\n )\n\n # Create response Body\n response = {\n \"isBase64Encoded\": False,\n \"statusCode\": 200,\n \"headers\": {}\n }\n\n return response","sub_path":"backend-prd/modules/lambda/code/del_rec.py","file_name":"del_rec.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367922493","text":"def problem1(upper):\r\n counter = 0\r\n sum = 0\r\n for i in range(upper):\r\n if i % 3 == 0 or i % 5 == 0:\r\n counter += 1\r\n sum += i\r\n print(counter)\r\n print(sum)\r\n\r\ndef sumMultiples(upper,div): #finds sum of all multiples of div strictly less than upper\r\n stop = upper // div\r\n return (stop * (stop +1)) * (div/2)\r\n\r\ndef problem1alt(upper):\r\n print(sumMultiples(1000,3)+sumMultiples(1000,5)-sumMultiples(1000,15))\r\n\r\nproblem1(1000)\r\nproblem1alt(1000)\r\n","sub_path":"001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"92714310","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, redirect\nfrom .forms import CourierForm\nfrom .models import CourierDetails\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\n\n\n# Create your views here.\n\n# from ..login.models import UserProfile\n@login_required(login_url=\"/login/\")\ndef courier_form(request):\n form = CourierForm()\n if request.method == 'POST':\n form = CourierForm(request.POST)\n if form.is_valid():\n form.save()\n courier_nam = form.cleaned_data.get('courier_name')\n courier_info = CourierDetails.objects.get(courier_name=courier_nam)\n return render(request, 'courier/courier_just_recorded.html', {'courier_info': courier_info})\n # return redirect('success')\n else:\n return render(request, 'courier/record.html', {'form': form})\n\n else:\n return render(request, 'courier/record.html', {'form': form})\n\n@login_required(login_url=\"/login/\")\ndef courier_detail(request):\n branchs= request.user.userprofile.branch\n print(branchs)\n courier_info = CourierDetails.objects.all()\n courier_pending = CourierDetails.objects.filter(courier_status='Pending')\n courier_delivered = CourierDetails.objects.filter(courier_status='Delivered')\n pending = CourierDetails.objects.filter(courier_status='Pending').count()\n delivered = CourierDetails.objects.filter(courier_status='Delivered').count()\n # { % url\n # 'edit_courier'\n # courier.courier_id %}\n\n return render(request, 'courier/courierdetail.html', {'courier_delivered': courier_delivered,'courier_pending': courier_pending,'courier_info': courier_info, 'pending': pending, 'delivered': delivered})\n\n\n\ndef courier_tracking(request):\n\n tracking_id = request.GET.get('courier_id')\n # print(tracking_id)\n\n if tracking_id:\n try:\n courier_details = CourierDetails.objects.get(courier_tracking_id=tracking_id)\n return render(request, 'courier/tracked_courier.html', {'courier_details': courier_details})\n except ObjectDoesNotExist:\n raise Http404('Courier with this Tracking ID is not found. Check Tracking ID, Thank you !!')\n else:\n return redirect('success')\n@login_required(login_url=\"/login/\")\ndef edit_courier(request, pk):\n courier = get_object_or_404(CourierDetails, courier_id=pk)\n if request.method == \"POST\":\n form= CourierForm(request.POST, instance=courier)\n if form.is_valid():\n form.save()\n return redirect('courier_detail')\n else:\n form = CourierForm(instance=courier)\n return render(request, 'courier/edit_courier.html', {'form': form})\n\n\n\n\n\n\n # def courier_form(request):\n # form = CourierForm()\n # if request.method == 'POST':\n # form = CourierForm(request.POST)\n # if form.is_valid():\n # form.save()\n # courier_name = form.cleaned_data.get('courier_id')\n # print(courier_name)\n # return redirect('success')\n # else:\n # return render(request, 'courier/record.html', {'form': form})\n #\n # else:\n # return render(request, 'courier/record.html', {'form': form})","sub_path":"courier/courier_branch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"618266027","text":"#\r\n# component.py\r\n#\r\nimport logging\r\nlogger = logging.getLogger(\"component\")\r\n\r\nclass ComponentAlreadyRegistered(Exception):\r\n pass\r\n\r\nclass Component(object):\r\n\r\n def __init__(self, name, depend=None):\r\n self._component_name = name\r\n self._component_depend = depend\r\n self._component_state = \"Stopped\"\r\n componentManager.register(self)\r\n\r\n\r\n def _component_start(self):\r\n if self._component_state == \"Stopped\":\r\n if hasattr(self, \"start\"):\r\n self._component_state = \"Starting\"\r\n self.start()\r\n self._component_state = \"Started\"\r\n return True;\r\n else:\r\n return False\r\n \r\n elif self._component_state == \"Started\":\r\n return True\r\n \r\n\r\n logger.error(\"Cannot start a component not in a Stopped state!\")\r\n return False;\r\n\r\n def _component_stop(self):\r\n\r\n if self._component_state != \"Stopped\" and self._component_state != \"Stopping\":\r\n if hasattr(self, \"stop\"):\r\n self._component_state = \"Stopping\"\r\n self.stop()\r\n self._component_state = \"Stopped\"\r\n return True\r\n else:\r\n return False\r\n\r\n elif self._component_state == \"Stopped\":\r\n return True\r\n\r\n logger.error(\"Cannot start a component not in a Started state!\")\r\n return False\r\n\r\n def _component_shutdown(self):\r\n\r\n if hasattr(self, \"shutdown\"):\r\n return self.shutdown()\r\n \r\n return self._component_stop()\r\n\r\nclass ComponentManager(object):\r\n\r\n def __init__(self):\r\n self.components = {}\r\n\r\n def register(self, obj):\r\n \"\"\"\r\n Registers a component object with the registry. This is done\r\n automatically when a Component object is instantiated.\r\n\r\n :param obj: the Component object\r\n :type obj: object\r\n\r\n :raises ComponentAlreadyRegistered: if a component with the same name is already registered.\r\n\r\n \"\"\"\r\n name = obj._component_name\r\n if name in self.components:\r\n raise ComponentAlreadyRegistered(\"Component already registered with name %s\" % name)\r\n\r\n self.components[obj._component_name] = obj\r\n\r\n def unregister(self, name):\r\n \"\"\"\r\n unregisters a component from the registry. A stop will be\r\n issued to the component prior to deregistering it.\r\n\r\n :param name: the name of the component\r\n :type name: string\r\n\r\n \"\"\"\r\n\r\n if name in self.components:\r\n logger.debug(\"Unregistering Component: %s\", name)\r\n self.stop([name])\r\n del self.components[name]\r\n\r\n def start(self, names=[]):\r\n if not names:\r\n names = self.components.keys()\r\n elif isinstance(names, str):\r\n names = [names]\r\n\r\n for name in names:\r\n self.components[name]._component_start()\r\n\r\n def stop(self, names=[]):\r\n if not names:\r\n names = self.components.keys()\r\n elif isinstance(names, str):\r\n names = [names]\r\n\r\n for name in names:\r\n self.components[name]._component_stop()\r\n\r\n def shutdown(self):\r\n names = self.components.keys()\r\n\r\n for name in names:\r\n self.components[name]._component_shutdown()\r\n\r\n\r\ncomponentManager = ComponentManager()\r\n\r\ndef get(name):\r\n \"\"\"\r\n Return a reference to a component.\r\n\r\n :param name: the Component name to get\r\n :type name: string\r\n\r\n :returns: the Component object\r\n :rtype: object\r\n\r\n :raises KeyError: if the Component does not exist\r\n\r\n \"\"\"\r\n return componentManager.components[name]","sub_path":"reactor/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"537549097","text":"from contents.getAPI import Api\nfrom common.request_src import RequestSRC\nfrom contents.getToken import get_token, getToken, get_QRcode\nfrom common.logfile import log\nimport unittest\n\n\nlogger = log()\n\n\nclass CancelOrder(unittest.TestCase):\n \"\"\"\n 取消用户订单\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n # 准备测试数据\n cls.api = Api()\n cls.header = cls.api.app_header\n cls._url, cls._payload = cls.api.export_api('取消用户订单')\n\n t = getToken(get_token()) # 获取登陆token\n cls.header['loginToken'] = t.token # 单例模式,只实现一个对象\n\n def test_cancel_order_01(self):\n \"\"\"\n 一:取消用户订单,二维码为空\n :return:\n \"\"\"\n\n self._payload['payQrCodeNo'] = ''\n r = RequestSRC.request_post(url=self._url, headers=self.header,\n data=self.api.data_json(self._payload))\n logger.info('接口请求地址:%s' % self._url)\n logger.info('接口请求头部:%s' % self.header)\n logger.info('接口请求参数:%s' % self._payload)\n logger.info('接口返回报文:%s' % r)\n self.assertNotEqual(r['code'], 200)\n\n def test_cancel_order_02(self):\n \"\"\"\n 二:取消用户订单,不存在的二维码\n :return:\n \"\"\"\n\n self._payload['payQrCodeNo'] = 'wx:dada24234dsdfds'\n r = RequestSRC.request_post(url=self._url, headers=self.header,\n data=self.api.data_json(self._payload))\n logger.info('接口请求地址:%s' % self._url)\n logger.info('接口请求头部:%s' % self.header)\n logger.info('接口请求参数:%s' % self._payload)\n logger.info('接口返回报文:%s' % r)\n self.assertNotEqual(r['code'], 200)\n\n def test_cancel_order_03(self):\n \"\"\"\n 三:取消用户订单,未生成订单的二维码\n :return:\n \"\"\"\n\n self._payload['payQrCodeNo'] = 'xwallet:FB5A5151CEFE4042930F8E001F8AB7A5'\n r = RequestSRC.request_post(url=self._url, headers=self.header,\n data=self.api.data_json(self._payload))\n logger.info('接口请求地址:%s' % self._url)\n logger.info('接口请求头部:%s' % self.header)\n logger.info('接口请求参数:%s' % self._payload)\n logger.info('接口返回报文:%s' % r)\n self.assertNotEqual(r['code'], 200)\n\n\n\"\"\"\n已生成订单的二维码,未做\n\"\"\"\n","sub_path":"testcase/testXwallet/CancelOrder_case.py","file_name":"CancelOrder_case.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"81534258","text":"import sys\n\ndef MaximalNonBranchingPaths(adjDict, inCount, outCount):\n paths = []\n \n for node in list(adjDict.keys()):\n # if node is not a 1-in-1-out\n if inCount[node] != 1 or outCount[node] != 1:\n if outCount[node] > 0:\n for outNode in adjDict[node]:\n present = 0\n nonBranchingPath = []\n nonBranchingPath.append(node)\n nonBranchingPath.append(outNode)\n while inCount[outNode] == 1 and outCount[outNode] == 1:\n nextNode = adjDict[outNode][0]\n if nextNode == node:\n break \n nonBranchingPath.append(nextNode)\n outNode = nextNode\n\n for path in paths:\n if sorted(nonBranchingPath) == sorted(path):\n present = 1\n if not present:\n paths.append(nonBranchingPath)\n \n # handling isolated cycles\n if inCount[node] == 1 and outCount[node] == 1:\n for outNode in adjDict[node]:\n present = 0\n nonBranchingPath = []\n nonBranchingPath.append(node)\n nonBranchingPath.append(outNode)\n while inCount[outNode] == 1 and outCount[outNode] == 1:\n nextNode = adjDict[outNode][0]\n nonBranchingPath.append(nextNode)\n if nextNode == node:\n for path in paths:\n if sorted(nonBranchingPath[1:]) == sorted(path[1:]):\n present = 1\n if not present:\n paths.append(nonBranchingPath)\n break\n outNode = nextNode\n\n \n return paths\n \n\ndef FindInOutCount(adjDict):\n # We modify the list and close the loop\n outCount = {}\n inCount = {}\n\n keys = adjDict.keys()\n\n possibleNodes = []\n\n for key in keys:\n possibleNodes += [item for item in adjDict[key]]\n\n possibleNodes = possibleNodes + list(set(keys) - set(possibleNodes)) \n\n unbalancedIn = unbalancedOut = 0\n\n for key1 in possibleNodes:\n if key1 in keys:\n outCount[key1] = len(adjDict[key1])\n else:\n outCount[key1] = 0\n inCount[key1] = 0\n for key2 in keys:\n inCount[key1] += adjDict[key2].count(key1)\n \n return inCount, outCount\n\ndef DeBruijn(strings):\n presufDict = {}\n\n for string in strings:\n presufDict[Prefix(string)] = []\n\n for string in strings:\n presufDict[Prefix(string)].append(Suffix(string))\n\n adjMatrix = []\n\n for item in presufDict:\n adjMatrix.append(item + \" -> \" + \",\".join(presufDict[item]))\n\n return adjMatrix\n\ndef Prefix(string):\n return string[:-1]\n\ndef Suffix(string):\n return string [1:]\n\ndef rotate(l, n):\n return l[n:] + l[:n]\n\nadjDict = {}\n'''\ndata = sys.stdin.read().split('\\n')\n'''\n\nwith open(\"data.txt\", \"r\") as f:\n data = f.readlines()\n\ncleanData = []\n\nfor item in data:\n item = item.replace('\\n', '').strip()\n cleanData.append(item)\n\nadjData = cleanData\n\n#adjData = DeBruijn(cleanData)\n\nfor item in adjData:\n adjDict[item.split(\"->\")[0].strip()] = item.split(\"->\")[1].strip().split(\",\")\n\ninCount, outCount = FindInOutCount(adjDict)\n'''\nprint adjDict\n'''\npaths = MaximalNonBranchingPaths(adjDict, inCount, outCount)\nfor path in paths:\n print(\" -> \".join(path))\n","sub_path":"bioinfo2/week2/5NonBranchingEx.py","file_name":"5NonBranchingEx.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"376015565","text":"from core.dal.users import User\nfrom eme.data_access import get_repo\n\n\nclass UsersCommand():\n def __init__(self, server):\n self.server = server\n self.users = get_repo(User)\n\n self.commands = {\n 'users:list': {\n 'help': 'Lists users',\n 'short': {},\n 'long': []\n },\n 'users:setadmin': {\n 'help': 'Sets admin',\n 'short': {},\n 'long': ['username=']\n },\n }\n\n def runList(self):\n dusers = self.users.list_all()\n\n for user in dusers:\n print(user.uid, user.email, user.created_at)\n\n def runSetadmin(self, username):\n user = self.users.find_user(username=username)\n\n user.admin = not user.admin\n\n self.users.save()\n if user.admin:\n print(\"User is admin:\", user.username, user.uid)\n else:\n print(\"User admin revoked:\", user.username, user.uid)\n","sub_path":"cliapp/commands/UsersCommand.py","file_name":"UsersCommand.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"164712602","text":"import numpy as np\ndef unique(m,n):\n arr = np.zeros((n,m), dtype=np.int32)\n if n==1 or m==1:\n return 1\n for i in range((m)):\n arr[0][i] = 1\n for i in range((n)):\n arr[i][0] = 1\n for i in range(1,n):\n for j in range(1,m):\n arr[i][j] = arr[i-1][j] + arr[i][j-1]\n return arr[n-1][m-1]\n\n\nprint(unique(7,3))\n\n\n ","sub_path":"leetcode/uniquePathsI.py","file_name":"uniquePathsI.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"45729194","text":"from boofuzz import *\n\nhost = '127.0.0.1' # windows VM\nport = 9999 # vulnserver port\n\n\ndef main():\n\n session = Session(target=Target(\n connection=SocketConnection(host, port, proto='tcp')))\n\n s_initialize(\"TRUN\") # just giving our session a name, \"TRUN\"\n # these strings are fuzzable by default, so here instead of blank\n s_string(\"TRUN\", fuzzable=False)\n # we don't want to fuzz the space between \"TRUN\" and our arg\n s_delim(\" \", fuzzable=False)\n # This value is arbitrary as we did not specify 'False' for fuzzable. Boofuzz will fuzz this string now\n s_string(\"FUZZ\")\n\n # having our 'session' variable connect following the guidelines we established in \"TRUN\"\n session.connect(s_get(\"TRUN\"))\n session.fuzz() # calling this function actually performs the fuzzing\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fuzz.py","file_name":"fuzz.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"9678110","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/reviewboard/reviews/models/diff_comment.py\n# Compiled at: 2020-02-11 04:03:56\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom django.utils import six\nfrom django.utils.translation import ugettext_lazy as _\nfrom reviewboard.diffviewer.models import FileDiff\nfrom reviewboard.reviews.models.base_comment import BaseComment\n\nclass Comment(BaseComment):\n \"\"\"A comment made on a diff.\n\n A comment can belong to a single filediff or to an interdiff between\n two filediffs. It can also have multiple replies.\n \"\"\"\n anchor_prefix = b'comment'\n comment_type = b'diff'\n filediff = models.ForeignKey(FileDiff, verbose_name=_(b'file diff'), related_name=b'comments')\n interfilediff = models.ForeignKey(FileDiff, verbose_name=_(b'interdiff file'), blank=True, null=True, related_name=b'interdiff_comments')\n first_line = models.PositiveIntegerField(_(b'first line'), blank=True, null=True)\n num_lines = models.PositiveIntegerField(_(b'number of lines'), blank=True, null=True)\n last_line = property(lambda self: self.first_line + self.num_lines - 1)\n\n def get_absolute_url(self):\n revision_path = six.text_type(self.filediff.diffset.revision)\n if self.interfilediff:\n revision_path += b'-%s' % self.interfilediff.diffset.revision\n return b'%sdiff/%s/?file=%s#file%sline%s' % (\n self.get_review_request().get_absolute_url(),\n revision_path, self.filediff.id, self.filediff.id,\n self.first_line)\n\n class Meta(BaseComment.Meta):\n db_table = b'reviews_comment'\n verbose_name = _(b'Diff Comment')\n verbose_name_plural = _(b'Diff Comments')","sub_path":"pycfiles/ReviewBoard-3.0.17-py2.7/diff_comment.py","file_name":"diff_comment.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"151657635","text":"import numpy, csv\n\n\ndef read_csv(filecsv, skip_rows=0, delimiter=',', skip_column_left=0, skip_column_right=0, last_column_is_label=True,\n num_label=1):\n \"\"\"\n Read file.csv filecsv with delimiter delimiter skipping the first skip_rows and skipping the first skip_column_left\n and the first skip_column_right from the left and the right respectively\n precondition:the labels are in the last columns\n :param filecsv: string,path to csv file\n :param skip_rows:int,row to skip\n :param last_column_is_label:boolean,if last_column_is_label==True the label must be read from last column(after\n remove the skip_column_right column from the right),otherwise is the first label(after remove the skip_column_left\n column from the left\n :return features set: X,labels set:Y\n \"\"\"\n with open(filecsv) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=delimiter)\n for i in range(skip_rows):\n next(readCSV)\n data = list(readCSV)\n for i in range(len(data)):\n data[i] = [x for x in data[i] if x]\n result = numpy.array(data)\n num_row = len(result)\n num_col = len(result[skip_rows])\n if last_column_is_label:\n X = result[:, skip_column_left:num_col - skip_column_right - num_label] # data without columns of the labels\n Y = result[:, num_col - skip_column_right - num_label:] # array of label\n else: # first column is label\n X = result[:, 0 + skip_column_left + num_label:num_col - skip_column_right]\n Y = result[:,\n 0 + skip_column_left:skip_column_left + num_label] # array of label in the first num_label columns\n return X, Y\n\n\ndef convert_label_values(Y, list_old_label, list_new_label):\n \"\"\"\n Convert label's value in label set Y from oldvalue(taken from list old_label)\n to new value(taken from list new_label)\n list_old_label[i] became list_new_label[i]\n :param Y:label set\n :param list_old_label: list of label to convert\n :param list_new_label: list of new label\n :return: Y convertito\n \"\"\"\n for j in range(len(Y)):\n for i in range(len(list_old_label)):\n if Y[j] == list_old_label[i]:\n Y[j] = list_new_label[i]\n Y = convert_type_to_float(Y)\n return Y\n\n\ndef convert_type_to_float(data):\n \"\"\"\n convert numeric values in float64,needed to scikit-learn library\n :param data:any,data to convert\n :return: data converted in float64\n \"\"\"\n data = data.astype('float64')\n return data\n","sub_path":"CSV.py","file_name":"CSV.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"393624723","text":"# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.\n# Licensed to PSF under a Contributor Agreement.\n\n# Modifications:\n# Copyright David Halter and Contributors\n# Modifications are dual-licensed: MIT and PSF.\n\nfrom parso.python.tokenize import tokenize\nfrom parso.utils import parse_version_string\nfrom parso.python.token import PythonTokenTypes\n\n\nclass GrammarParser():\n \"\"\"\n The parser for Python grammar files.\n \"\"\"\n def __init__(self, bnf_grammar):\n self._bnf_grammar = bnf_grammar\n self.generator = tokenize(\n bnf_grammar,\n version_info=parse_version_string('3.6')\n )\n self._gettoken() # Initialize lookahead\n\n def parse(self):\n # grammar: (NEWLINE | rule)* ENDMARKER\n while self.type != PythonTokenTypes.ENDMARKER:\n while self.type == PythonTokenTypes.NEWLINE:\n self._gettoken()\n\n # rule: NAME ':' rhs NEWLINE\n self._current_rule_name = self._expect(PythonTokenTypes.NAME)\n self._expect(PythonTokenTypes.OP, ':')\n\n a, z = self._parse_rhs()\n self._expect(PythonTokenTypes.NEWLINE)\n\n yield a, z\n\n def _parse_rhs(self):\n # rhs: items ('|' items)*\n a, z = self._parse_items()\n if self.value != \"|\":\n return a, z\n else:\n aa = NFAState(self._current_rule_name)\n zz = NFAState(self._current_rule_name)\n while True:\n # Add the possibility to go into the state of a and come back\n # to finish.\n aa.add_arc(a)\n z.add_arc(zz)\n if self.value != \"|\":\n break\n\n self._gettoken()\n a, z = self._parse_items()\n return aa, zz\n\n def _parse_items(self):\n # items: item+\n a, b = self._parse_item()\n while self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING) \\\n or self.value in ('(', '['):\n c, d = self._parse_item()\n # Need to end on the next item.\n b.add_arc(c)\n b = d\n return a, b\n\n def _parse_item(self):\n # item: '[' rhs ']' | atom ['+' | '*']\n if self.value == \"[\":\n self._gettoken()\n a, z = self._parse_rhs()\n self._expect(PythonTokenTypes.OP, ']')\n # Make it also possible that there is no token and change the\n # state.\n a.add_arc(z)\n return a, z\n else:\n a, z = self._parse_atom()\n value = self.value\n if value not in (\"+\", \"*\"):\n return a, z\n self._gettoken()\n # Make it clear that we can go back to the old state and repeat.\n z.add_arc(a)\n if value == \"+\":\n return a, z\n else:\n # The end state is the same as the beginning, nothing must\n # change.\n return a, a\n\n def _parse_atom(self):\n # atom: '(' rhs ')' | NAME | STRING\n if self.value == \"(\":\n self._gettoken()\n a, z = self._parse_rhs()\n self._expect(PythonTokenTypes.OP, ')')\n return a, z\n elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING):\n a = NFAState(self._current_rule_name)\n z = NFAState(self._current_rule_name)\n # Make it clear that the state transition requires that value.\n a.add_arc(z, self.value)\n self._gettoken()\n return a, z\n else:\n self._raise_error(\"expected (...) or NAME or STRING, got %s/%s\",\n self.type, self.value)\n\n def _expect(self, type_, value=None):\n if self.type != type_:\n self._raise_error(\"expected %s, got %s [%s]\",\n type_, self.type, self.value)\n if value is not None and self.value != value:\n self._raise_error(\"expected %s, got %s\", value, self.value)\n value = self.value\n self._gettoken()\n return value\n\n def _gettoken(self):\n tup = next(self.generator)\n self.type, self.value, self.begin, prefix = tup\n\n def _raise_error(self, msg, *args):\n if args:\n try:\n msg = msg % args\n except:\n msg = \" \".join([msg] + list(map(str, args)))\n line = self._bnf_grammar.splitlines()[self.begin[0] - 1]\n raise SyntaxError(msg, ('', self.begin[0],\n self.begin[1], line))\n\n\nclass NFAArc(object):\n def __init__(self, next_, nonterminal_or_string):\n self.next = next_\n self.nonterminal_or_string = nonterminal_or_string\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.nonterminal_or_string)\n\n\nclass NFAState(object):\n def __init__(self, from_rule):\n self.from_rule = from_rule\n self.arcs = [] # List[nonterminal (str), NFAState]\n\n def add_arc(self, next_, nonterminal_or_string=None):\n assert nonterminal_or_string is None or isinstance(nonterminal_or_string, str)\n assert isinstance(next_, NFAState)\n self.arcs.append(NFAArc(next_, nonterminal_or_string))\n\n def __repr__(self):\n return '<%s: from %s>' % (self.__class__.__name__, self.from_rule)\n","sub_path":"contrib/python/parso/py2/parso/pgen2/grammar_parser.py","file_name":"grammar_parser.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"108350544","text":"from oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth2Session\n\n\nclass Connection:\n def __init__(self, client_id, client_secret, region):\n self.client_id = client_id\n self.client_secret = client_secret\n self.region = region\n\n def get_token(self):\n client = BackendApplicationClient(client_id=self.client_id)\n oauth = OAuth2Session(client=client)\n token = dict(oauth.fetch_token(token_url='https://{}.battle.net/oauth/token'.format(self.region),\n client_id=self.client_id, client_secret=self.client_secret))\n return {'Authorization': 'Bearer {}'.format(token['access_token'])}\n","sub_path":"WoWAPI/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"28811291","text":"from player import Player\nfrom question import Question\nfrom pprint import pprint\nimport random\n\ndef gen_board(double_jeopardy):\n '''\n param double_jeopardy: Boolean, true if board should be double jeopardy\n '''\n board = [[Question((400 if double_jeopardy else 200) * p) for p in range(1, 6)] for i in range(6)]\n add_daily_double(board, double_jeopardy)\n return board\n\n# https://preview.redd.it/tzssic6y6ad11.png?width=2283&format=png&auto=webp&s=79d794d04d27a7465701877c5273a01bc4e430e1\nDAILY_DOUBLE_DISTR = [\n [0.04, 2.23, 6.06, 7.71, 4.72], # category 1\n [0.03, 1.24, 3.77, 5.09, 2.69], # category 2\n [0.04, 1.80, 5.22, 7.26, 4.35], # category 3\n [0.03, 1.59, 5.01, 6.48, 4.21], # category 4\n [0.03, 1.17, 4.89, 6.95, 3.93], # category 5\n [0.03, 1.26, 3.65, 4.75, 3.20], # category 6\n]\n\ndef add_daily_double(board, double_jeopardy):\n probs = [q for category in DAILY_DOUBLE_DISTR for q in category]\n indices = range(0, 30)\n dd1 = random.choices(indices, probs)[0]\n category = dd1 // 5\n question = dd1 % 5\n board[category][question].daily_double = True\n\n if double_jeopardy:\n probs[dd1] = 0\n dd2 = random.choices(indices, probs)[0]\n category = dd2 // 5\n question = dd2 % 5\n board[category][question].daily_double = True\n\ndef round_over(board):\n for col in board:\n for q in col:\n if not q.answered:\n return False\n \n return True\n\ndef pick_q(board):\n for col in board:\n for q in col:\n if not q.answered:\n return q\n\ndef do_round(board, in_control, players):\n while not round_over(board):\n question = players[in_control].pick_question_strat(board)\n participants = [player for player in players if player.know_question()]\n \n do_question(participants, question)\n \n question.answered = True\n \ndef do_question(participants, question):\n while len(participants) > 0:\n buzz_index = random.choices(range(len(participants)), [p.buzzer_skill for p in participants])[0]\n curr = participants[buzz_index]\n \n if not question.daily_double:\n if curr.is_correct():\n curr.money += question.value\n in_control = participants.index(curr)\n break\n else:\n curr.money -= question.value\n participants.pop(buzz_index)\n \n else: # Daily Double\n bet = curr.daily_double_strat(curr.money)\n if curr.is_correct():\n curr.money += bet\n else:\n curr.money -= bet\n break\n\ndef do_final_jeopardy(players):\n participants = [player for player in players if player.money > 0]\n\n for participant in participants:\n bet = participant.final_jeopardy_strat(participant.money)\n if participant.is_correct():\n participant.money += bet\n else:\n participant.money -= bet\n\ndef sim():\n p1 = Player(0.7, 0.9, 1, lambda x : x, lambda x : x, pick_q)\n p2 = Player(0.7, 0.9, 1, lambda x : x / 2, lambda x : x / 2, pick_q)\n p3 = Player(0.7, 0.9, 1, lambda x : x / 3, lambda x : x / 3, pick_q)\n\n players = [p1, p2, p3]\n in_control = 0\n\n board = gen_board(False)\n do_round(board, in_control, players) # Normal jeopardy\n\n board = gen_board(True)\n do_round(board, in_control, players) # Double jeopardy\n\n do_final_jeopardy(players)\n\n return p1.money, p2.money, p3.money\n\np1Money = 0\np2Money = 0\np3Money = 0\n\nn = 1000\nfor i in range(n):\n vals = sim()\n p1Money += vals[0]\n p2Money += vals[1]\n p3Money += vals[2]\n\nprint(p1Money / n)\nprint(p2Money / n)\nprint(p3Money / n)","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"24164674","text":"import numpy as np\n\n\ndef get_stochastic_transition_matrix(A):\n \"\"\" Calculates the stochastic transition matrix\n\n :param A: adjacency matrix with edge strengths\n :type A: numpy.array\n :return: stochastic transition matrix\n :rtype: numpy.array\n \"\"\"\n Q_prim = 1 / np.sum(A, axis=1).reshape(-1, 1) * A\n return Q_prim\n\n\ndef get_transition_matrix(Q_prim, start_node, alpha):\n \"\"\" Calculate the transition matrix from given stochastic transition matrix,\n start node and restart probability\n\n :param Q_prim: stochastic transition matrix\n :type Q_prim: numpy.array\n :param start_node: index of the start node\n :type start_node: int\n :param alpha: restart probability\n :type alpha: float\n :return: transition matrix\n :rtype: numpy.array\n \"\"\"\n one = np.zeros(Q_prim.shape)\n one[:, start_node] = 1\n return (1 - alpha) * Q_prim + alpha * one\n\n\ndef iterative_page_rank(trans, epsilon, max_iter):\n \"\"\" Iterative power-iterator like computation of PageRank vector p\n\n :param trans: transition matrix\n :type trans: numpy.array\n :param epsilon: tolerance parameter\n :type epsilon: float\n :param max_iter: maximum number of iterations\n :type max_iter: int\n :return: stationary distribution\n :rtype: numpy.array\n \"\"\"\n p = np.ones((1, trans.shape[0])) / trans.shape[0]\n p_new = np.dot(p, trans)\n for t in range(max_iter):\n if np.allclose(p, p_new, rtol=0, atol=epsilon):\n break\n p = p_new\n p_new = np.dot(p, trans)\n return p_new[0]\n\n\ndef random_walk(A, source, alpha=0.3, max_iter=100):\n \"\"\" Random walk with given parameters and directed graph\n\n :param A: adjacency matrix\n :type A: numpy.array\n :param source: index of source node\n :type source: int\n :param alpha: restart probability\n :type alpha: float\n :param max_iter: maximum number of iterations\n :type max_iter: int\n :return: p vector for every source node\n :rtype: numpy.array\n \"\"\"\n epsilon = 1e-12\n\n Q_prim = get_stochastic_transition_matrix(A)\n Q = get_transition_matrix(Q_prim, source, alpha)\n p = iterative_page_rank(Q, epsilon, max_iter)\n\n return p\n","sub_path":"IIS_2020_2021/link_prediction/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"332040175","text":"\"\"\"\nCreated on 2018/6/11 22:43\n\n\"\"\"\nfrom collections import namedtuple\nfrom functools import wraps\n\nfrom flask import current_app, g, request\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired\n\nfrom app.libs.error_code import AuthFailed\n\n\n__Author__ = '阿强'\n\nUser = namedtuple('User', ['uid'])\n\n\ndef token_decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n token = request.headers.get(\"token\")\n user_info = verify_auth_token(token)\n if not user_info:\n return False\n else:\n g.user = user_info\n return func(*args, **kwargs)\n return wrapper\n\n\ndef verify_auth_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except BadSignature:\n raise AuthFailed(msg='invalid token', error_code=1002)\n except SignatureExpired:\n raise AuthFailed(msg='token is expiration', error_code=1003)\n uid = data['uid']\n return User(uid)\n","sub_path":"app/libs/token_auth.py","file_name":"token_auth.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"114498472","text":"\r\nSTATUS = (\r\n\t(1, 'Active'),\r\n\t(2, 'Inactive'),\r\n\t(3, 'Blocked'),\r\n\t(4, 'Submitted'),\r\n\t(5, 'Archive'),\r\n\t(6, 'Draft'),\r\n\t(7, 'Rejected'),\r\n\t(8, 'Survey'),\r\n\t(9, 'Verified'),\r\n\t(10, 'Regitered'),\r\n\t(11, 'Insert'),\r\n\t(12, 'Bupati'),\r\n\t(13, 'Perbaikan'),\r\n\t(14, 'Rejected-dinkes'),\r\n)\r\n\r\n# Status Data : \r\n# 1. Active: Ketika izin selesai\r\n# 2. Inactive: Kalo tidak ada survey kabid mengubah status dari Submitted ke Inactive\r\n# 3. Blocked: \r\n# 4. Submitted: Operator mengubah Draft ke Submitted\r\n# 5. Archive: untuk pembayaran\r\n# 6. Draft: default\r\n# 7. Rejected:\r\n# 8. Survey: Dan jika ada survey mengubah status Submitted ke Survey dan jika Survey selesai kabid merubah status menjadi Inactive\r\n# 9. Verified:\r\n# 10. Regitered:\r\n# 11. Insert:\r\n# 12. Bupati: Izin perlu di verifikasi oleh Bupati\r\n# 13. Perbaikan: Izin Kembali ke kabid untuk di cek kembali\r\n# \r\n# Status Data surat:\r\n# 1. Surat dibuat default status Draft\r\n# 2. kabid merubah status Draft ke Submitted\r\n# 3. \r\n\r\nKETERANGAN_PEKERJAAN = (\r\n\t('',''),\r\n\t('Dokter', 'Dokter'),\r\n\t('Petani', 'Petani'),\r\n\t('Guru', 'Guru'),\r\n\t('TNI', 'TNI'),\r\n\t('Polisi', 'Polisi'),\r\n\t('Nelayan', 'Nelayan'),\r\n\t('Dosen', 'Dosen'),\r\n\t('Direktur', 'Direktur'),\r\n\t('Bidan', 'Bidan'),\r\n\t('Apoteker', 'Apoteker'),\r\n\t('Wartawan', 'Wartawan'),\r\n)\r\n\r\ndef get_status_color(obj):\r\n\twarna = \"\"\r\n\tif obj.status == 2:\r\n\t\twarna = ' warning'\r\n\telif obj.status == 4:\r\n\t\twarna = ' danger'\r\n\telif obj.status == 5:\r\n\t\twarna = ' success'\r\n\telif obj.status == 6:\r\n\t\twarna = ' info'\r\n\treturn warna\r\n\r\ndef api_val(data):\r\n\tif not data:\r\n\t\treturn None\r\n\tif data == '':\r\n\t\treturn None\r\n\tif data == 'None':\r\n\t\treturn None\r\n\treturn data\r\n\r\n#Kurang Foto\r\ndef save_sync_siabjo(user, data):\r\n\r\n\treturn True","sub_path":"accounts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"29377836","text":"import numpy as np\nimport Network\nfrom game import *\n\n\ndef Evaluate(strategies, juegos = 10):\n Jugadores = []\n for i in range(len(strategies)):\n Jugadores.append(Jugador(500, strategies[i]))\n for h in range(juegos):\n random.shuffle(Jugadores)\n for i in range(0,len(Jugadores)//9, 9):\n #print(len(Jugadores[i*9:(i+1)*9]))\n Juego(Jugadores[i*9:(i+1)*9], 10, juegos).torneo()\n print(\"Tournament\", i);\n print(\"Tournament round %d finished\" %h);\n for x in Jugadores:\n x.fichas = 500\n\n fitness = [x.puesto for x in Jugadores]\n x = np.array([x.getStrategy() for x in Jugadores])\n return x[np.argsort(fitness)]","sub_path":"Try.py","file_name":"Try.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348623484","text":"from POM.Home_page import Home_Page\nfrom Lib import Lib\nfrom POM.Contact_Us import Contact_Us\nfrom selenium.webdriver.support.ui import Select\nimport json\n\n\n# Go to URL\n# Click \"Contact us\"\n# Fill all field in Contact us besides Message field and click on Send button\n# Check that Validate message displays\n# Close browser\n\ndef test_3():\n try:\n obj_lib=Lib()\n #open browser\n browser=obj_lib.open_browser()\n #load page\n obj_lib.page_load(browser)\n obj_home=Home_Page(browser)\n #find and click on Contact us button\n browser.find_element(*obj_home.contact_us).click()\n obj_contact=Contact_Us(browser)\n obj_lib.wait_for_element(browser, obj_contact.subject_heading)\n #find and select Subject Heading field\n element = browser.find_element(*obj_contact.subject_heading)\n element.click()\n subject_name=obj_lib.get_data(key='subject_heading')\n select=Select(element)\n select.select_by_visible_text(subject_name)\n \n #find and input in email field\n \n with open(\"config.json\") as f:\n data=json.load(f)\n browser.find_element(*obj_contact.email_address).send_keys(data[\"eMail\"])\n \n #find Send button and cklick\n send=browser.find_element(*obj_contact.sent_bnt)\n obj_lib.move_to_element(browser,send)\n send.click()\n #assert seccess message\n success_message=obj_lib.get_data(key=\"contact_us_error_message\")\n assert success_message in browser.page_source\n except:\n print(\"Contact us page with not full fills fields test is fail \")\n obj_lib.save_screenshot(browser)\n finally:\n #close browse\n obj_lib.close_browser(browser)\n\n\n#Nel, use raise in except block\n#there are parts which you use in positive case, it means code duplication, would be better to keep in POM\n\n\n\n\n\n\n\n \n\n\n \n\n\n \n\n","sub_path":"Marina/Test/test_contact_us_fail.py","file_name":"test_contact_us_fail.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"88529408","text":"from __future__ import absolute_import, division, print_function\nimport os\n\ntry:\n from gensim import corpora, models\n TFIDF = True\nexcept ImportError:\n TFIDF = False\nimport pytest\n\nfrom memex_dossier.models.features import sip\nfrom memex_dossier.models.tests import kvl, store\n\n\n@pytest.fixture\ndef tfidf():\n if not TFIDF:\n return\n doc1 = u'Andrew likes Diet Pepsi.'\n doc2 = u'Andrew knows the muffin man.'\n doc3 = u'Andrew lives near the muffin man on Shirley Lane.'\n corpus = map(sip.noun_phrases, [doc1, doc2, doc3])\n dictionary = corpora.Dictionary(corpus)\n bows = [dictionary.doc2bow(tokens) for tokens in corpus]\n return models.TfidfModel(bows, id2word=dictionary)\n\n@pytest.fixture\ndef tfidf_path():\n dir_name = os.path.dirname(__file__)\n return os.path.join(dir_name, 'tfidf_model.dump')\n\n# def test_fc_generator(tfidf):\n # print(tfidf)\n # print(tfidf[u'Andrew likes Diet Pepsi'])\n # assert False\n","sub_path":"memex_dossier/models/tests/test_web.py","file_name":"test_web.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"173394910","text":"h, w = map(int, input().split())\nboard = [[0]*(w+2)] + [[0]+list(map(int, input().split()))+[0] for _ in range(h)] + [[0]*(w+2)]\ndone = [[False for _ in range(w+2)] for _ in range(h+2)]\n\ndxy = [(1,0),(-1,0),(0,1),(0,-1)]\n\nans = 0\nfor r in range(1,h+1):\n for c in range(1,w+1):\n if done[r][c]: continue\n v = board[r][c]\n if v <= 1: continue\n q = [(r,c)]\n ok = True\n while len(q) > 0:\n x, y = q.pop()\n if done[x][y] is True: continue\n done[x][y] = True\n for dx, dy in dxy:\n if board[x+dx][y+dy] > v: ok = False\n if board[x+dx][y+dy] == v: q.append((x+dx,y+dy))\n if ok: ans += 1\n\nprint(ans)\n","sub_path":"ICPC/ukiepc2020problems/efficientlyelevated/submissions/accepted/ragnar.py","file_name":"ragnar.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"25553719","text":"import apache_beam as beam\nimport logging\nfrom apache_beam.options.pipeline_options import PipelineOptions, GoogleCloudOptions, StandardOptions, SetupOptions\n\nclass TemplateOptions(PipelineOptions):\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_value_provider_argument(\"--input\",\n type=str,\n help='Input csv to process',\n dest='input',\n required=False)\n parser.add_value_provider_argument(\"--output\",\n type=str,\n help='Output path to write to in bq or gcs',\n dest='output',\n required=False)\n parser.add_value_provider_argument(\"--mode\",\n type=str,\n help='mode either `bq` or `gcs`',\n default='gcs',\n dest='mode',\n required=False)\n\ndef run():\n options = PipelineOptions()\n options.view_as(StandardOptions).runner = 'DataFlowRunner'\n options.view_as(GoogleCloudOptions).project = 'bigquery-cp-project'\n options.view_as(GoogleCloudOptions).temp_location = 'gs://beam-stg/temp'\n options.view_as(GoogleCloudOptions).region = 'europe-west2'\n options.view_as(GoogleCloudOptions).job_name = 'flex-load-test'\n user_args = options.view_as(TemplateOptions)\n\n with beam.Pipeline(options=options) as p:\n read_data = (p | 'read_csv' >> beam.io.ReadFromText(file_pattern=user_args.input, skip_header_lines=0))\n if user_args.mode == 'gcs':\n write_gcs = (read_data | 'write_storage' >> beam.io.WriteToText(file_path_prefix=f\"gs://cp-csv-bucket/{user_args.output}\", file_name_suffix='.txt'))\n else:\n formatted_row = (read_data | 'format_as_row' >> beam.Map(lambda x: {\"id\": 1, \"row_content\": str(x)}))\n write_bq = (formatted_row | 'write_bq' >> beam.io.WriteToBigQuery(f\"bigquery-cp-project:beam_results.{user_args.output}\",schema='id:INTEGER,row:STRING',create_disposition='CREATE_IF_NEEDED', write_disposition='WRITE_EMPTY'))\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()","sub_path":"pipeline/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"489247926","text":"\n# list comprehension\n# a comprehension is a compact way of creating a python data structure from one\n# or more iterators.\n\nlist_comprehension = [number for number in range(0,9) if number % 2 == 0]\nprint(list_comprehension)\n\n# dictionary comprehension\nword = \"letter\"\ncount_dic = {letter : word.count(letter) for letter in word}\nprint(count_dic)\n\n# \n","sub_path":"Introducing Python/day_004/comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"400066129","text":"from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\n\nfrom .views import SignUpView, UserProfileView, ActiveAccountView\n\napp_name = 'accounts'\n\n\nurlpatterns = [\n path('signup/', SignUpView.as_view(), name='sign_up'),\n path('active///', ActiveAccountView.as_view(), name='activate_account'),\n path('stu//', UserProfileView.as_view(), name='user_profile'),\n path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"192576490","text":"#(g)\n\nfrom collections import namedtuple\n\nCount = namedtuple('Count', 'letter number')\n#Count attributes : letter, number of times that letter is repeated\n\ndef character_count(string: str, char: str) -> Count:\n ''' takes in a string and a single character. returns a single Count namedtuple\n with char and the number of times char occurs in string'''\n result = 0\n for i in string:\n if i == char:\n result += 1\n return Count(char, result)\nassert character_count('hello, how are you?', 'o') == Count(letter='o', number=3)\nassert character_count('o\\n m\\n g\\n', '\\n') == Count(letter='\\n', number=3)\n\ndef letter_count(s1: str, s2: str) -> 'list of Count':\n '''takes in two strings. counts the number of letters in s1 specified by s2\n using Count. returns a list of Count namedtuples.'''\n result = []\n for i in s2:\n result.append(character_count(s1, i))\n return result \nassert letter_count('The cabbage has baggage', 'abcd') == [Count(letter='a',number=5),\n Count(letter='b',number=3),\n Count(letter='c',number=1),\n Count(letter='d',number=0)]\nassert letter_count('The \\t cabbAge has \\n bAggage', '\\t\\nAa') == [Count(letter='\\t',number=1),\n Count(letter='\\n',number=1),\n Count(letter='A',number=2),\n Count(letter='a',number=3)]\n","sub_path":"ICS 31/Lab 5/lab5_part_g.py","file_name":"lab5_part_g.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"637140615","text":"from omega import *\r\nfrom cyclops import *\r\nfrom pointCloud import *\r\n\r\n#Directions\r\n#Point Cloud should be the same size as the plane\r\n\r\n#----------------------------------------------------------------------------\r\n#Planeview code\r\nimgResRatioX = 0.18/(float(10260)/32064)\r\nimgResRatioY = 0.18/(float(9850)/30780)\r\nplane = PlaneShape.create(imgResRatioX*10260, imgResRatioY*9850)\r\nplane.setPosition(Vector3(imgResRatioX*10260/2, imgResRatioY*9850/2, 0))\r\nplane.setEffect(\"textured -v emissive -d 50Island.png\")\r\n\r\n\r\n#-----------------------------------------------------------------------------\r\n#PointCloud code\r\nscene = getSceneManager()\r\nscene.addLoader(BinaryPointsLoader())\r\n\r\nsetNearFarZ(0.1, 1000000)\r\n\r\npointProgram = ProgramAsset()\r\npointProgram.name = \"points\"\r\npointProgram.vertexShaderName = \"islandShaders/Sphere.vert\"\r\npointProgram.fragmentShaderName = \"islandShaders/Sphere.frag\"\r\npointProgram.geometryShaderName = \"islandShaders/Sphere.geom\"\r\npointProgram.geometryOutVertices = 4\r\npointProgram.geometryInput = PrimitiveType.Points\r\npointProgram.geometryOutput = PrimitiveType.TriangleStrip\r\nscene.addProgram(pointProgram)\r\n\r\npointScale = Uniform.create('pointScale', UniformType.Float, 1)\r\npointScale.setFloat(1)\r\nglobalAlpha = Uniform.create('globalAlpha', UniformType.Float, 2)\r\nglobalAlpha.setFloat(1)\r\n\r\npointCloudModel = ModelInfo()\r\npointCloudModel.name = 'pointCloud'\r\npointCloudModel.path = 'hmColorHigh.xyzb'\r\n#pointCloudModel.options = \"10000 100:1000000:5 20:100:4 6:20:2 0:5:1\"\r\npointCloudModel.options = \"10000 100:1000000:20 20:100:10 6:20:5 0:5:5\"\r\n#pointCloudModel.options = \"10000 0:1000000:1\"\r\nscene.loadModel(pointCloudModel)\r\n\r\npointCloud = StaticObject.create(pointCloudModel.name)\r\n# attach shader uniforms\r\nmat = pointCloud.getMaterial()\r\nmat.setProgram(pointProgram.name)\r\nmat.attachUniform(pointScale)\r\nmat.attachUniform(globalAlpha)\r\ngetDefaultCamera().setPosition(imgResRatioX*10260/2, imgResRatioY*9850/2, 2500)\r\n\r\n#---------------------------------------------------------------------------\r\n# Movement point cloud code\r\n\r\n#filters\r\nstartDay = Uniform.create('startDay', UniformType.Int, 1)\r\nendDay = Uniform.create('endDay', UniformType.Int, 1)\r\n\r\nmyStartDay = 0\r\nmyEndDay = 1\r\ndayIncrement = 1\r\nnumberOfDays = 84\r\n\r\nstartDay.setInt(myStartDay)\r\nendDay.setInt(myEndDay)\r\n\r\ncolorBy = Uniform.create('colorBy', UniformType.Int, 1) #if 1, shaders turn on. If 0, shaders turn off\r\ncolorBy.setInt(0)\r\n\r\nselectedIndividual1 = Uniform.create('selectedIndividual1', UniformType.Int, 1) #if 1, shaders turn on. If 0, shaders turn off\r\nselectedIndividual1.setInt(4693)\r\n\r\nselectedIndividual2 = Uniform.create('selectedIndividual2', UniformType.Int, 1) #if 1, shaders turn on. If 0, shaders turn off\r\nselectedIndividual2.setInt(4693)\r\n\r\nmovePointScale = Uniform.create('movePointScale', UniformType.Float, 1)\r\nmovePointScale.setFloat(8.0)\r\n\r\n#Point cloud created here- makes sure it is different name from James point cloud\r\nmovePointProgram = ProgramAsset()\r\nmovePointProgram.name = \"movePoints\"\r\nmovePointProgram.vertexShaderName = \"movementShaders/Sphere.vert\" #here are our shaders\r\nmovePointProgram.fragmentShaderName = \"movementShaders/Sphere.frag\"\r\nmovePointProgram.geometryShaderName = \"movementShaders/mySphere.geom\"\r\nmovePointProgram.geometryOutVertices = 4\r\nmovePointProgram.geometryInput = PrimitiveType.Points\r\nmovePointProgram.geometryOutput = PrimitiveType.TriangleStrip\r\nscene.addProgram(movePointProgram)\r\n\r\nmovePointCloudModel = ModelInfo()\r\nmovePointCloudModel.name = 'movePointCloud'\r\nmovePointCloudModel.path = 'all.xyzb'#'XY_Chibi_Christmas_Parsed.xyzb'#'Chibi_Christmas_Parsed.xyzb' #'newpng.xyzb'\r\n#movePointCloudModel.options = \"10000 100:1000000:5 20:100:4 6:20:2 0:5:1\"\r\nmovePointCloudModel.options = \"10000 100:1000000:20 20:100:10 6:20:5 0:5:5\"\r\n#movePointCloudModel.options = \"10000 0:1000000:1\"\r\nscene.loadModel(movePointCloudModel)\r\n\r\nmovePointCloud = StaticObject.create(movePointCloudModel.name)\r\n# attach shader uniforms\r\nmoveMat = movePointCloud.getMaterial()\r\nmoveMat.setProgram(movePointProgram.name)\r\n\r\nmoveMat.attachUniform(movePointScale)\r\nmoveMat.attachUniform(startDay)\r\nmoveMat.attachUniform(endDay)\r\nmoveMat.attachUniform(selectedIndividual1)\r\nmoveMat.attachUniform(selectedIndividual2)\r\nmoveMat.attachUniform(colorBy)\r\n\r\n#---------------------------------------------------------------------------\r\n#Menu items\r\n#PointSize slider created by: Alessandro\r\nmm = MenuManager.createAndInitialize()\r\nmm.getMainMenu().addLabel(\"Point Size\")\r\npointss = mm.getMainMenu().addSlider(40, \"onPointSizeSliderValueChanged(%value%)\")\r\npointSlider = pointss.getSlider()\r\npointSlider.setValue(1)\r\n\r\n#Controls alpha values of points created by: Alessandro\r\nmm.getMainMenu().addLabel(\"Point Transparency\")\r\nalphass = mm.getMainMenu().addSlider(11, \"onAlphaSliderValueChanged(%value%)\")\r\nalphaSlider = alphass.getSlider()\r\nalphaSlider.setValue(10)\r\n\r\n#SUBMENU CAMERA\r\nss = mm.getMainMenu().addSubMenu(\"Camera Options\")\r\nvbtn = ss.addButton(\"Vertical View\", \"viewVertical(1)\")\r\nhbtn = ss.addButton(\"Horizontal View\", \"viewHorizontal(1)\")\r\n\r\n#SUBMENU STEPTHRO\r\nss2 = mm.getMainMenu().addSubMenu(\"Step Through Options\")\r\nbtnOneUp = ss2.addButton(\"Forward a day\", \"oneDayStepUp(1)\")\r\nbtnOneDown = ss2.addButton(\"Backward a day\", \"oneDayStepDown(1)\")\r\nbtnSvnUp = ss2.addButton(\"7 Days Forward\", \"sevenDayStepUp(1)\")\r\nbtnSvnUp = ss2.addButton(\"7 Days Backward\", \"sevenDayStepDown(1)\")\r\nss2.addLabel(\"--------------------\")\r\nbtnAll = ss2.addButton(\"All Days\", \"allDay(1)\")\r\n\r\n#SUBMENU COLOR\r\nss3 = mm.getMainMenu().addSubMenu(\"Color Options\")\r\nbtnGrad1 = ss3.addButton(\"Hour Gradient 1\", \"setColorBy(0)\")\r\nbtnGrad2 = ss3.addButton(\"Hour Gradient 2\", \"setColorBy(1)\")\r\nbtnGrad3 = ss3.addButton(\"Hour Gradient 3\", \"setColorBy(2)\")\r\nbtnGrad4 = ss3.addButton(\"Day Gradient 1\", \"setColorBy(3)\")\r\nbtnGrad5 = ss3.addButton(\"Day Gradient 2\", \"setColorBy(4)\")\r\nbtnGrad6 = ss3.addButton(\"Color by individual\", \"setColorBy(5)\")\r\n\r\nss4 = mm.getMainMenu().addSubMenu(\"selected Individual 1\")\r\n#ss4.setStyleValue('fill', '#954FEA')\r\nbtn1 = ss4.addButton(\"Veruca 4690\", \"setSelInd1(4690)\")\r\nbtn2 = ss4.addButton(\"Chibi 4693\", \"setSelInd1(4693)\")\r\nbtn3 = ss4.addButton(\"Abby 4652\", \"setSelInd1(4652)\")\r\nbtn4 = ss4.addButton(\"Ben Bob 4653\", \"setSelInd1(4653)\")\r\nbtn5 = ss4.addButton(\"Bonnie 4658\", \"setSelInd1(4658)\")\r\nbtn6 = ss4.addButton(\"Chloe 4052\", \"setSelInd1(4052)\")\r\nbtn7 = ss4.addButton(\"Clementina 4672\", \"setSelInd1(4672)\")\r\nbtn8 = ss4.addButton(\"Ellie 4668\", \"setSelInd1(4668)\")\r\nbtn9 = ss4.addButton(\"Gillian 4671\", \"setSelInd1(4671)\")\r\nbtn10 = ss4.addButton(\"Ornette 4669\", \"setSelInd1(4669)\")\r\nbtn11 = ss4.addButton(\"Pliny 4675\", \"setSelInd1(4675)\")\r\nbtn12 = ss4.addButton(\"Ripley 4650\", \"setSelInd1(4650)\")\r\nbtn13 = ss4.addButton(\"Serge 4670\", \"setSelInd1(4670)\")\r\nbtn14 = ss4.addButton(\"Sofie 4674\", \"setSelInd1(4674)\")\r\nbtn15 = ss4.addButton(\"Greg 4689\", \"setSelInd1(4689)\")\r\nbtn16 = ss4.addButton(\"Ibeth 4654\", \"setSelInd1(4654)\")\r\nbtn17 = ss4.addButton(\"Olga 4657\", \"setSelInd1(4657)\")\r\nbtn18 = ss4.addButton(\"Mimi 4660\", \"setSelInd1(4660)\")\r\nbtn19 = ss4.addButton(\"Kyle 4692\", \"setSelInd1(4692)\")\r\nbtn20 = ss4.addButton(\"Atlas 4673\", \"setSelInd1(4673)\")\r\nbtn21 = ss4.addButton(\"Vielle 4670\", \"setSelInd1(4670)\")\r\nbtn22 = ss4.addButton(\"Judy 4656\", \"setSelInd1(4656)\")\r\nbtn23 = ss4.addButton(\"Merk 4665\", \"setSelInd1(4665)\")\r\n\r\nss5 = mm.getMainMenu().addSubMenu(\"selected Individual 2\")\r\n#ss5.setStyleValue('fill', '#5588F4')\r\nbtn1 = ss5.addButton(\"Veruca 4690\", \"setSelInd2(4690)\")\r\nbtn2 = ss5.addButton(\"Chibi 4693\", \"setSelInd2(4693)\")\r\nbtn3 = ss5.addButton(\"Abby 4652\", \"setSelInd2(4652)\")\r\nbtn4 = ss5.addButton(\"Ben Bob 4653\", \"setSelInd2(4653)\")\r\nbtn5 = ss5.addButton(\"Bonnie 4658\", \"setSelInd2(4658)\")\r\nbtn6 = ss5.addButton(\"Chloe 4052\", \"setSelInd2(4052)\")\r\nbtn7 = ss5.addButton(\"Clementina 4672\", \"setSelInd2(4672)\")\r\nbtn8 = ss5.addButton(\"Ellie 4668\", \"setSelInd2(4668)\")\r\nbtn9 = ss5.addButton(\"Gillian 4671\", \"setSelInd2(4671)\")\r\nbtn10 = ss5.addButton(\"Ornette 4669\", \"setSelInd2(4669)\")\r\nbtn11 = ss5.addButton(\"Pliny 4675\", \"setSelInd2(4675)\")\r\nbtn12 = ss5.addButton(\"Ripley 4650\", \"setSelInd2(4650)\")\r\nbtn13 = ss5.addButton(\"Serge 4670\", \"setSelInd2(4670)\")\r\nbtn14 = ss5.addButton(\"Sofie 4674\", \"setSelInd2(4674)\")\r\nbtn15 = ss5.addButton(\"Greg 4689\", \"setSelInd2(4689)\")\r\nbtn16 = ss5.addButton(\"Ibeth 4654\", \"setSelInd2(4654)\")\r\nbtn17 = ss5.addButton(\"Olga 4657\", \"setSelInd2(4657)\")\r\nbtn18 = ss5.addButton(\"Mimi 4660\", \"setSelInd2(4660)\")\r\nbtn19 = ss5.addButton(\"Kyle 4692\", \"setSelInd2(4692)\")\r\nbtn20 = ss5.addButton(\"Atlas 4673\", \"setSelInd2(4673)\")\r\nbtn21 = ss5.addButton(\"Vielle 4670\", \"setSelInd2(4670)\")\r\nbtn22 = ss5.addButton(\"Judy 4656\", \"setSelInd2(4656)\")\r\nbtn23 = ss5.addButton(\"Merk 4665\", \"setSelInd2(4665)\")\r\n\r\n\r\n\r\n#btnAll.setRadio(True)\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------------\r\n#Functions\r\ndef oneDayStepUp(value):\r\n\tglobal myStartDay\r\n\tglobal myEndDay\r\n\tglobal numberOfDays\r\n\r\n\tmyStartDay = myStartDay + 1\r\n\tif myStartDay > numberOfDays:\r\n\t\tmyStartDay = 0\r\n\tmyEndDay = myStartDay + 1\r\n\tendDay.setInt(myEndDay)\r\n\tstartDay.setInt(myStartDay)\r\n\r\n\t# print( \"one day step up\" + myStartDay)\r\n\r\ndef oneDayStepDown(value):\r\n global myStartDay\r\n global myEndDay\r\n global numberOfDays\r\n\r\n myStartDay = myStartDay - 1\r\n if myStartDay < 0:\r\n myStartDay = numberOfDays-1\r\n myEndDay = myStartDay + 1\r\n endDay.setInt(myEndDay)\r\n startDay.setInt(myStartDay)\r\n\r\n # print( \"one day step down \" + myStartDay)\r\n\r\ndef sevenDayStepUp(value):\r\n global myStartDay\r\n global myEndDay\r\n global numberOfDays\r\n\r\n myStartDay = myStartDay + 7\r\n if myStartDay > numberOfDays:\r\n myStartDay = 0\r\n myEndDay = myStartDay + 7\r\n endDay.setInt(myEndDay)\r\n startDay.setInt(myStartDay)\r\n\r\n # print( \"seven day step up\" + myStartDay)\r\n\r\ndef sevenDayStepDown(value):\r\n global myStartDay\r\n global myEndDay\r\n global numberOfDays\r\n\r\n myStartDay = myStartDay - 7\r\n if myStartDay < 0:\r\n myStartDay = numberOfDays-7\r\n myEndDay = myStartDay + 7\r\n endDay.setInt(myEndDay)\r\n startDay.setInt(myStartDay)\r\n\r\n # print( \"seven day step down \" + myStartDay)\r\n\r\ndef allDay(value):\r\n\tglobal numberOfDays\r\n\tendDay.setInt(numberOfDays)\r\n\tstartDay.setInt(0)\r\n\r\n\t# print( \"one day step \" + myStartDay)\r\n\r\ndef setColorBy(value):\r\n colorBy.setInt(value)\r\n\r\n # print( \"set color by \" + value)\r\n\r\ndef setSelIn1(value):\r\n colorBy.setInt(value)\r\n\r\n # print( \"set color by \" + value)\r\n\r\ndef onPointSizeSliderValueChanged(value):\r\n if (value != 0):\r\n size = .95 + value * .05\r\n else:\r\n size = 0.0\r\n pointScale.setFloat(size)\r\n\r\ndef onAlphaSliderValueChanged(value):\r\n if (value != 0):\r\n a = value/10.0\r\n else:\r\n a = 0.0\r\n #globalAlpha.setFloat(a)\r\n pointCloud.getMaterial().setAlpha(a)\r\n\r\n# def handleEvent():\r\n# e = getEvent()\r\n# print(getDefaultCamera().getPosition()) #prints location of camera\r\n# if (e.isButtonDown(EventFlags.ButtonDown)):\r\n# viewVertical(1)\r\n# if (e.isButtonDown(EventFlags.ButtonUp)):\r\n# viewHorizontal(1)\r\n# setEventFunction(handleEvent)\r\n\r\ndef viewVertical(value):\r\n if (value == 1):\r\n getDefaultCamera().setPosition(Vector3(imgResRatioX*10260/2, imgResRatioY*9850/2, 2500))\r\n getDefaultCamera().setPitchYawRoll(Vector3(0,0,0))\r\n\r\ndef viewHorizontal(value):\r\n if (value == 1):\r\n getDefaultCamera().setPitchYawRoll(Vector3(45,0,0))\r\n getDefaultCamera().setPosition(Vector3(imgResRatioX*10260/2, 0, 500))\r\n\r\n\r\n\r\n#--Event handler\r\n# EVENT HANDLERS\r\n# def handleEvent():\r\n# e = getEvent()\r\n # if(e.isButtonDown(EventFlags.ButtonLeft)): \r\n # print(\"Left button pressed \")\r\n # myStartDay = myStartDay + dayIncrement\r\n # myEndDay = myEndDay + dayIncrement\r\n # if( myStartDay > numberOfDays ):\r\n # myStartDay = 0\r\n # myEndDay = dayIncrement\r\n # endDay.setInt(myEndDay)\r\n # startDay.setInt(myStartDay)\r\n # if(e.isButtonDown(EventFlags.ButtonRight)):\r\n # myStartDay = myStartDay - dayIncrement\r\n # myEndDay = myEndDay - dayIncrement\r\n # if( myStartDay < 0 ):\r\n # myStartDay = numberOfDays - dayIncrement\r\n # myEndDay = numberOfDays\r\n # endDay.setInt(myEndDay)\r\n # startDay.setInt(myStartDay)\r\n # if(e.isButtonDown(EventFlags.ButtonUp)): \r\n # print(\"Up button pressed turning off white\")\r\n # if(e.isButtonDown(EventFlags.ButtonDown)):\r\n # print(\"Up button pressed turning on white\")\r\n\r\n# setEventFunction(handleEvent)\r\n","sub_path":"v1/islandAndMovement.py","file_name":"islandAndMovement.py","file_ext":"py","file_size_in_byte":12698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"618438507","text":"from bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nfrom wordcloud import WordCloud, ImageColorGenerator, STOPWORDS\nimport requests\nfrom io import BytesIO\nfrom time import sleep, time \n\nwhile True: \n \n while True:\n id = input(\"[*] Digite el ID del usuario para mostrar la nube de palabras: \")\n \n if id.isdigit () != True:\n print (\"\\n\\nDato no valido.\")\n else:\n break\n \n url = 'https://es.stackoverflow.com/users/' + id + \"/?tab=tags\"\n page = requests.get (url) # requests revisa si el url es funcional\n \n soup = BeautifulSoup (page.content, \"html.parser\") # BeautifulSoup analiza el htlm\n lbls = soup.find_all ('a', class_='post-tag') # lbls = etiquetas de usuario\n sleep(5)\n labels = list ()\n \n for i in lbls:\n labels.append(i.text)\n c = None #Verificara si el usurio cuenta con tags \n msj = \"\"\n \n \n for i in labels:\n if i != None:\n c = True \n break\n \n if c == True:\n\n url = \"https://media.istockphoto.com/photos/monarch-butterfly-in-rainbow-colors-isolated-on-white-picture-id1196565484?k=6&m=1196565484&s=170667a&w=0&h=-H8O0dSlwlyFWvzRK1RR5VDV4fZo63dNoWXveFMm7JE=\"\n \n unique_string = (\" \").join (labels)\n response = requests.get (url)\n creation = np.asarray (Image.open (BytesIO (response.content))) #np transforma la imagen en un array\n wordcloud = WordCloud (background_color = \"black\", mask=creation, contour_width = 0, regexp=r\"\\S[\\S']+\").generate (unique_string)\n colors = ImageColorGenerator (creation)\n wordcloud.recolor (color_func = colors)\n plt.figure (figsize = (15, 8))\n plt.imshow (wordcloud)\n plt.axis (\"off\")\n plt.show ()\n plt.close ()\n\n else:\n print(\"Este usuario no posee tags que mostrar\")\n \n msj = input(\"\\nDesea acabar la ejecución si/no: \")\n msj.lower() #En caso de que escriba en mayuscula esta la transformara en minuscula\n\n if msj == \"si\":\n break\n print(\"\\n\")\n\n\n\n","sub_path":"wordcloud.py","file_name":"wordcloud.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"138708415","text":"'''\nleetcode 40 组合总和II\n顺序不同的组合算同一种组合\n每个数字在每个组合中只能使用一次\nbeat 69%\n\n思路:递归 + 动态规划\n'''\n\n\nclass Solution(object):\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n candidates.sort()\n # 储存结果\n self.ansList = []\n self.DFS(candidates, target, 0, [])\n return self.ansList\n\n def DFS(self, candidates, target, start, valuelist):\n if target == 0:\n if valuelist not in self.ansList:\n return self.ansList.append(valuelist) # 应对candidates = [1,1,2,5], target =8,防止将[1,2,5]算两遍\n for i in range(start, len(candidates)):\n # 若剩下的最小的候选人都比target小,则组合失败\n if candidates[i] > target:\n return\n self.DFS(candidates, target - candidates[i], i + 1, valuelist + [candidates[i]])\n # 仅把上题的i改成i+1\n\n\n'''40的测试程序'''\nnums = [2,5,2,1,2]\ntarget = 5\na = Solution()\nprint(a.combinationSum2(nums, target))","sub_path":"40组合总和II.py","file_name":"40组合总和II.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"420995630","text":"import sys\nfrom itertools import starmap\nfrom typing import Sequence, Optional, Callable, Mapping, Text, IO\n\nimport numpy as np\nfrom fn.func import identity\nfrom keras import callbacks, backend as K\n\n\ndef load_validator(log_file_handle):\n pass\n\n\nclass Validator(callbacks.Callback):\n modes = (\"max\", \"min\")\n\n # TODO docs\n\n def __init__(self,\n inputs: Sequence[np.ndarray],\n output: np.ndarray,\n batch_size: int,\n metrics: Mapping[Text, Callable[[np.ndarray, np.ndarray], float]],\n transform: Callable[[np.ndarray], np.ndarray] = identity,\n monitor: Optional[Text] = None,\n mode: Text = \"max\",\n prefix: Text = None,\n stream: IO = sys.stderr):\n \"\"\"\n :param inputs:\n :param output:\n :param batch_size:\n :param metrics: a mapping between names and functions; the functions\n must have the following signature: f(true, predicted) -> float\n :param transform:\n :param monitor:\n :param mode:\n :param prefix:\n \"\"\"\n super().__init__()\n if mode not in self.modes:\n raise ValueError(\"`mode` must be either 'max' or 'min'\")\n if monitor and monitor not in metrics:\n raise ValueError(\"`monitor` is not in metrics\")\n if monitor and not prefix:\n raise ValueError(\"you must provide a path prefix when monitoring\")\n self.inputs = inputs\n self.output = output\n self.epoch = None\n self.batch_size = batch_size\n self.metrics = metrics\n self.mode = mode\n self.transform = transform\n self.monitor = monitor\n self.best = float(\"-inf\") if mode == \"max\" else float(\"inf\")\n self.prefix = prefix\n self.stream = stream\n\n def _estimate_metrics(self):\n pred = self.transform(self.model.predict(self.inputs, self.batch_size))\n return {name: f(self.output, pred) for name, f in self.metrics.items()}\n\n @staticmethod\n def _format_score_log(scores: Mapping[Text, float]):\n template = \"{} - {:.3f}\"\n return \" | \".join(starmap(template.format, scores.items()))\n\n def _improved(self, score: float):\n return score > self.best if self.mode == \"max\" else score < self.best\n\n def on_epoch_end(self, epoch, logs=None):\n self.epoch = epoch\n scores = self._estimate_metrics()\n log = self._format_score_log(scores)\n print(\"\\n\" + log, file=self.stream)\n if self.monitor and self._improved(scores[self.monitor]):\n path = \"{}-{:02d}-{:.3f}.hdf5\".format(self.prefix, self.epoch, scores[self.monitor])\n print(\"{} improved from {} to {}; saving weights to {}\".format(\n self.monitor, self.best, scores[self.monitor], path),\n end=\"\\n\\n\", file=self.stream)\n self.best = scores[self.monitor]\n self.model.save_weights(path)\n elif self.monitor:\n print(\"{} didn't improve\".format(self.monitor), end=\"\\n\\n\", file=self.stream)\n self.stream.flush()\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n prec = true_positives / (predicted_positives + K.epsilon())\n return prec\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n rec = true_positives / (possible_positives + K.epsilon())\n return rec\n\n\ndef fbeta_score(y_true, y_pred, beta):\n \"\"\"\n Calculates the F score, the weighted harmonic mean of precision and recall.\n This is useful for multi-label classification, where input samples can be\n classified as sets of labels. By only using accuracy (precision) a model\n would achieve a perfect score by simply assigning every class to every\n input. In order to avoid this, a metric should penalize incorrect class\n assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)\n computes this, as a weighted mean of the proportion of correct class\n assignments vs. the proportion of incorrect class assignments.\n With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning\n correct classes becomes more important, and with beta > 1 the metric is\n instead weighted towards penalizing incorrect class assignments.\n \"\"\"\n if beta < 0:\n raise ValueError('The lowest choosable beta is zero (only precision).')\n\n # If there are no true positives, fix the F score at 0 like sklearn.\n if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:\n return 0\n\n p = precision(y_true, y_pred)\n r = recall(y_true, y_pred)\n bb = beta ** 2\n fbeta = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())\n return fbeta\n\n\ndef fmeasure(y_true, y_pred):\n \"\"\"\n Calculates the f-measure, the harmonic mean of precision and recall.\n \"\"\"\n return fbeta_score(y_true, y_pred, beta=1)\n\n\ndef recall_softmax(y_true, y_pred):\n labels_true = K.argmax(y_true, axis=-1)\n labels_pred = K.argmax(y_pred, axis=-1)\n positive_true = K.cast(K.equal(labels_true, 1), dtype=K.floatx())\n positive_pred = K.cast(K.equal(labels_pred, 1), dtype=K.floatx())\n true_positives = K.sum(positive_true * positive_pred) + K.epsilon()\n return true_positives / (K.sum(positive_true) + K.epsilon())\n\n\ndef precision_softmax(y_true, y_pred):\n labels_true = K.argmax(y_true, axis=-1)\n labels_pred = K.argmax(y_pred, axis=-1)\n positive_true = K.cast(K.equal(labels_true, 1), dtype=K.floatx())\n positive_pred = K.cast(K.equal(labels_pred, 1), dtype=K.floatx())\n true_positives = K.sum(positive_true * positive_pred) + K.epsilon()\n return true_positives / (K.sum(positive_pred) + K.epsilon())\n\n\ndef fmeasure_softmax(y_true, y_pred):\n p = precision_softmax(y_true, y_pred)\n r = recall_softmax(y_true, y_pred)\n return 2 * p * r / (p + r)\n","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79962924","text":"__author__ = 'Reijer'\r\n\r\nimport random\r\n\r\n\r\nhandvalue = [\"Royal Straight Flush\", \"Straight Flush\", \"Four of a Kind\", \"Full House\", \"Flush\", \"Straight\", \"Three of a Kind\", \"Two Pairs\", \"One Pair\", \"High Card\"]\r\ncardsvalue = ('2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A')\r\ncardssuit = ('D', 'H', 'S', 'C')\r\nallhands = ['8C TS KC 9H 4S', '8C TS 9C 8H 4S', '8C TS TC 8H 4S', '8C 8S TC 8H 4S', '2C 3S 4C 5H 6S', 'AC 2S 3C 4H 5S', '8D TD KD 9D 4D' ,'8C TS TC 8H 8S', '8C 8D TC 8H 8S', '9D TD JD QD KD', 'AC 2C 3C 4C 5C', 'TD JD QD KD AD']\r\n\r\n\r\n\r\ndef dealcard(deck):\r\n card = random.choice(cardsvalue)+random.choice(cardssuit)\r\n if card not in deck:\r\n deck.append(card)\r\n yield card\r\n\r\n\r\ndef main():\r\n deck = []\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Texas Holdem.py","file_name":"Texas Holdem.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"429338455","text":"from torch_geometric.nn import SAGEConv\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass GCN(nn.Module):\n def __init__(self, in_channels, out_channels, batch_size, dropout=0.0):\n super(GCN, self).__init__()\n\n self.num_layers = 1 # k=1\n self.batch_size = batch_size\n self.dropout = dropout\n\n self.convs = nn.ModuleList()\n self.convs.append(SAGEConv(in_channels, 16, normalize=False, aggr='add'))\n self.fc1 = nn.Linear(16, 16, bias=True)\n self.fc2 = nn.Linear(16, 16, bias=True)\n self.fc3 = nn.Linear(16, out_channels, bias=True)\n \n def forward(self, x, edge_index):\n # `train_loader` computes the k-hop neighborhood of a batch of nodes,\n # and returns, for each layer, a bipartite graph object, holding the\n # bipartite edges `edge_index`, the index `e_id` of the original edges,\n # and the size/shape `size` of the bipartite graph.\n # Target nodes are also included in the source nodes so that one can\n # easily apply skip-connections or add self-loops.\n x_target = x[:self.batch_size] # Target nodes are always placed first.\n x = self.convs[0]((x, x_target), edge_index)\n x = F.leaky_relu(x)\n x = F.dropout(x, p=self.dropout)\n x = self.fc1(x)\n x = F.leaky_relu(x)\n x = F.dropout(x, p=self.dropout)\n x = self.fc2(x)\n return x","sub_path":"extranet/models/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"611733902","text":"import operator\n\nfrom user import User\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk import corpus\nfrom nltk import pos_tag\nfrom nltk.stem import WordNetLemmatizer\n\n\nclass ChiUser(User):\n\n def __init__(self, username):\n super().__init__(username)\n\n def process_data(self, clean):\n lemmatizer = WordNetLemmatizer()\n\n # Tag words with part of speech\n words = pos_tag(word_tokenize(clean))\n\n # Get stop words\n stop = stopwords.words(\"english\")\n\n # Get english words\n english = set(corpus.words.words())\n\n # Count the frequency of each word that is english, a verb or noun, and not a stop word\n raw_data = {}\n for word in words:\n lower_word = word[0].lower()\n if lower_word not in stop:\n if lower_word in english and len(lower_word) > 2:\n if \"NN\" in word[1] or \"VB\" in word[1]:\n # lemma_word = lemmatizer.lemmatize(lower_word)\n # Try passing in part of speech\n if \"NN\" in word[1]:\n lemma_word = lemmatizer.lemmatize(lower_word, 'n')\n if \"VB\" in word[1]:\n lemma_word = lemmatizer.lemmatize(lower_word, 'v')\n if lemma_word not in raw_data:\n raw_data[lemma_word] = 1\n else:\n raw_data[lemma_word] += 1\n\n # Select the top five interests\n # top5 = {}\n # for i in range(5):\n # max_key = max(raw_data.items(), key=operator.itemgetter(1))[0]\n # print(max_key)\n # top5[max_key] = raw_data[max_key]\n # del raw_data[max_key]\n\n # Delete interests generated if they have a hit score of 3 or less\n # keys_to_delete = []\n # for key in raw_data:\n # if raw_data[key] <= 10:\n # keys_to_delete.append(key)\n # for key in keys_to_delete:\n # del raw_data[key]\n\n return raw_data\n","sub_path":"chiuser.py","file_name":"chiuser.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"504117059","text":"\nimport re\nimport unittest\n\n\nclass MoveRoversException(Exception):\n pass\n\n\ndef move_rovers(input_string):\n\n # strip any surrounding white space from the input string\n input_string = input_string.strip()\n\n # make a list of the lines and strip each line of any surrounding white space\n input_lines = [x.strip() for x in input_string.split('\\n')]\n\n # check to see if the number of lines in the input string is odd and at least 3\n line_count = len(input_lines)\n if line_count % 2 != 1 or line_count < 3:\n raise MoveRoversException('The input string must contain an odd '\n 'number of at least three lines')\n\n first_line = input_lines[0]\n\n # check to see if the first line is in an acceptable format\n if re.match(r'^[1-9]\\d* [1-9]\\d*$', first_line) is None:\n raise MoveRoversException('The first line of the input string must be '\n 'in the format \"X Y\" where X and Y are '\n 'any unsigned integers greater than zero')\n\n # NOTE: the following variables are named as though we're looking down on the plateau from above\n plateau_width, plateau_height = [int(x) for x in first_line.split()]\n\n # split the rest of the input string into a list of dicts, each dict representing\n # a rover (its current coordinates and movement instructions).\n rovers = [{\n 'current_coordinates': {\n 'x': int(input_lines[i].split()[0]),\n 'y': int(input_lines[i].split()[1]),\n 'facing': input_lines[i].split()[2],\n },\n 'movement_instructions': input_lines[i + 1]\n } for i in range(len(input_lines)) if i % 2 == 1]\n\n # the following will be used to identify rovers by number\n n = 0\n\n # the following is used to store rovers' current positions so that collision\n # detection and avoidance can be enforced\n rover_current_positions = {}\n\n # check to see if each rover's coordinates and movement instructions are\n # in an acceptable format and the coordinates are within the dimensions of the plateau\n\n for rover in rovers:\n\n rover['id'] = n\n\n errors = 0\n\n if not isinstance(rover['current_coordinates']['x'], int):\n errors += 1\n elif rover['current_coordinates']['x'] > plateau_width:\n raise MoveRoversException('A rover cannot exist outside of the X axis of the plateau')\n\n if not isinstance(rover['current_coordinates']['y'], int):\n errors += 1\n elif rover['current_coordinates']['y'] > plateau_height:\n raise MoveRoversException('A rover cannot exist outside of the Y axis of the plateau')\n\n if re.match(r'^[NESW]$', rover['current_coordinates']['facing']) is None:\n errors += 1\n\n if errors:\n raise MoveRoversException(f'A rover\\'s coordinates must be in the format \"X Y D\" '\n f'where X and Y are any unsigned integers greater '\n f'than zero and where D is either N, E, S or W. The X and Y '\n f'coordinates must also be within the dimensions of the plateau. '\n f'{str(rover[\"current_coordinates\"])} is invalid.')\n\n if re.match(r'^[LMR]+$', rover['movement_instructions']) is None:\n raise MoveRoversException('A rover\\'s movement instructions '\n 'must only contain the characters L, M and/or R')\n\n # store rover's current position\n rover_current_positions[n] = (\n rover['current_coordinates']['x'],\n rover['current_coordinates']['y']\n )\n\n # increment rover id tag\n n += 1\n\n # ================================================================================\n\n # we have established that the supplied data is in a valid format,\n # so we can now proceed with the computations\n\n # the following function does the actual leg work\n def get_new_position(from_x, from_y, from_facing, move):\n\n # deal with a left rotation\n if move == 'L':\n left_rotation = {\n 'N': 'W',\n 'E': 'N',\n 'S': 'E',\n 'W': 'S',\n }\n # since we are just rotating, x and y stay the same\n return from_x, from_y, left_rotation[from_facing]\n\n # deal with a right rotation\n elif move == 'R':\n right_rotation = {\n 'N': 'E',\n 'E': 'S',\n 'S': 'W',\n 'W': 'N',\n }\n # since we are just rotating, x and y stay the same\n return from_x, from_y, right_rotation[from_facing]\n\n prevent_movement = False\n\n # deal with a forward movement (the logic states that the move must equal M)\n if from_facing == 'N':\n # move the rover north (potentially)\n now_y = from_y + 1\n # if the rover is already at the northern most point of the plateau,\n # the rover should not move any further north\n if from_y == plateau_height:\n now_y = plateau_height\n return from_x, now_y, from_facing\n if from_facing == 'E':\n # move the rover east (potentially)\n now_x = from_x + 1\n # if the rover is already at the eastern most point of the plateau,\n # the rover should not move any further east\n if from_x == plateau_width:\n now_x = plateau_width\n return now_x, from_y, from_facing\n if from_facing == 'S':\n # move the rover south (potentially)\n now_y = from_y - 1\n # if the rover is already at the southern most point of the plateau,\n # the rover should not move any further south\n if from_y == 0:\n now_y = 0\n return from_x, now_y, from_facing\n if from_facing == 'W':\n # move the rover west (potentially)\n now_x = from_x - 1\n # if the rover is already at the western most point of the plateau,\n # the rover should not move any further west\n if from_x == 0:\n now_x = 0\n return now_x, from_y, from_facing\n\n output_strings = []\n for rover in rovers:\n for instruction in rover['movement_instructions']:\n new_x, new_y, new_facing = get_new_position(\n rover['current_coordinates']['x'],\n rover['current_coordinates']['y'],\n rover['current_coordinates']['facing'],\n instruction,\n )\n # ========================================================\n # avoid a collision with another rover\n if (new_x, new_y) in [v for k, v in rover_current_positions.items() if k != rover['id']]:\n raise MoveRoversException('A potential collision has been avoided.')\n\n # ========================================================\n rover['current_coordinates']['x'] = new_x\n rover['current_coordinates']['y'] = new_y\n rover['current_coordinates']['facing'] = new_facing\n\n rover_current_positions[rover['id']] = (new_x, new_y)\n\n output_strings.append(f\"{rover['current_coordinates']['x']} \"\n f\"{rover['current_coordinates']['y']} \"\n f\"{rover['current_coordinates']['facing']}\")\n\n return '\\n'.join(output_strings)\n\n\nclass RoverTests(unittest.TestCase):\n\n def test_correct_input(self):\n\n # this is the sample input and output data from the test\n input_string = \"\"\"\n 5 5\n 1 2 N\n LMLMLMLMM\n 3 3 E\n MMRMMRMRRM\n \"\"\"\n expected_output = \"1 3 N\\n5 1 E\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n # ==========================================================\n\n input_string = \"\"\"\n 12 8\n 9 5 E\n MMLMLMMMRM\n 2 5 E\n MMRMMRMRML\n \"\"\"\n expected_output = \"8 7 N\\n3 4 W\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n def test_incorrect_plateau_dimensions(self):\n\n input_string = \"\"\"\n 1A B7\n 9 3 W\n MLMRLRM\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n def test_incorrect_line_count(self):\n\n input_string = \"\"\"\n 1A B7\n 9 3 W\n MLMRLRM\n 0 234\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n def test_rover_initial_position_should_not_be_outside_plateau(self):\n\n input_string = \"\"\"\n 5 5\n 6 5 N\n MLMRLRM\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n # ==========================================================\n\n input_string = \"\"\"\n 5 5\n 5 6 N\n MLMRLRM\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n def test_rover_tries_to_move_too_far_north(self):\n\n input_string = \"\"\"\n 5 5\n 5 5 N\n MM\n \"\"\"\n expected_output = \"5 5 N\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n def test_rover_tries_to_move_too_far_east(self):\n\n input_string = \"\"\"\n 5 5\n 5 5 E\n MM\n \"\"\"\n expected_output = \"5 5 E\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n def test_rover_tries_to_move_too_far_south(self):\n\n input_string = \"\"\"\n 5 5\n 5 2 S\n MMMM\n \"\"\"\n expected_output = \"5 0 S\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n def test_rover_tries_to_move_too_far_west(self):\n\n input_string = \"\"\"\n 5 5\n 0 0 W\n MMMM\n \"\"\"\n expected_output = \"0 0 W\"\n output = move_rovers(input_string)\n self.assertEqual(output, expected_output)\n\n def test_incorrect_movement_instructions(self):\n\n input_string = \"\"\"\n 5 5\n 5 6 N\n MLMFAK@RLRM\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n def test_incorrect_facing(self):\n\n input_string = \"\"\"\n 5 5\n 5 6 R\n MMMM\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n def test_collision_prevention(self):\n\n input_string = \"\"\"\n 5 5\n 5 4 S\n M\n 5 3 N\n M\n \"\"\"\n try:\n move_rovers(input_string)\n self.fail()\n except MoveRoversException:\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"317712696","text":"#-*- coding: utf-8 -*-\n\nfrom datetime import datetime, date\nfrom json import load\nfrom random import choice\nfrom uuid import uuid4\nfrom smtplib import SMTP\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nfrom flask import url_for\nfrom sqlalchemy import UniqueConstraint, Column, Boolean, Integer, String, Date, DateTime, ForeignKey, Text\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom config import DEBUG, MAIL_FROM, MAIL_DEBUG\nfrom config.mail import *\nfrom . import Base\n\ndef sendmail(addr_from, addr_to, subject, msg, attachment=None, attachment_name=None):\n if not attachment:\n mail = MIMEText(msg)\n else:\n mail = MIMEMultipart()\n mail.attach(MIMEText(msg))\n app = MIMEApplication(attachment)\n app['Content-Disposition'] = 'attachment; filename=\"%s\"' % attachment_name\n mail.attach(app)\n\n mail[\"Subject\"] = subject\n mail[\"From\"] = addr_from\n if type(addr_to) == str:\n mail[\"To\"] = addr_to\n else:\n mail[\"To\"] = ', '.join(addr_to)\n\n with SMTP(\"localhost\") as s:\n s.send_message(mail)\n\nclass Reminder(Base):\n __tablename__ = \"reminders\"\n\n id = Column(Integer, primary_key=True)\n phone_number = Column(String(20), unique=True, nullable=False)\n time = Column(Integer)\n date_added = Column(Date, nullable=False)\n last_called = Column(DateTime)\n times_called = Column(Integer, nullable=False)\n times_forwarded = Column(Integer, nullable=False)\n time_connected = Column(Integer, nullable=False)\n\n def __init__(self, phone_number, time=None):\n self.phone_number = phone_number\n self.time = time\n self.date_added = date.today()\n self.last_called = None\n self.times_called = 0\n self.times_forwarded = 0\n self.time_connected = 0\n\n def __repr__(self):\n return \"\".format(self.phone_number)\n\nclass Mail(Base):\n __tablename__ = \"mails\"\n\n id = Column(Integer, primary_key=True)\n sender_id = Column(Integer, ForeignKey(\"senders.id\"))\n recipient = Column(String(5), nullable=False)\n date_sent = Column(DateTime)\n\n __table_args__ = tuple(\n UniqueConstraint(sender_id, recipient)\n )\n\n def __init__(self, sender, recipient):\n self.sender = sender\n self.recipient = recipient\n\n def send(self):\n self.date_sent = datetime.now()\n\n rep = reps.get_representative_by_id(self.recipient)\n\n addr_from = '\"' + MAIL_FROM + '\" <' + MAIL_FROM + '>'\n if DEBUG:\n addr_to = '\"' + MAIL_DEBUG + '\" <' + MAIL_DEBUG + '>'\n else:\n addr_to = str(rep) + \" <\" + rep.contact.mail + \">\"\n subject = \"Sicherheitspaket\"\n msg = MAIL_DISCLAIMER.format(name_user=self.sender.name, mail_user=self.sender.email_address) + \"\\n\" * 2\n msg = msg + MAIL_REPRESENTATIVE.format(name_rep=str(rep), name_user=self.sender.name, salutation=rep.salutation)\n sendmail(addr_from, addr_to, subject, msg)\n\nclass Sender(Base):\n __tablename__ = \"senders\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String(256), nullable=False)\n email_address = Column(String(254), unique=True, nullable=False)\n mails = relationship(\"Mail\", order_by=\"Mail.id\", backref=\"sender\")\n hash = Column(String(64), unique=True, nullable=False)\n date_validated = Column(DateTime)\n date_requested = Column(DateTime, nullable=False)\n newsletter = Column(Boolean(False), nullable=False)\n\n def __init__(self, name, email_address, newsletter=False):\n self.name = name\n self.email_address = email_address\n self.request_validation()\n self.newsletter = newsletter\n\n def validate(self):\n self.date_validated = datetime.now()\n\n addr_from = '\"' + MAIL_FROM + '\" <' + MAIL_FROM + '>'\n addr_to = self.name + \" <\" + self.email_address + \">\"\n subject = \"Vielen Dank für Ihre Teilnahme auf überwachungspaket.at\"\n msg = MAIL_WELCOME.format(name_user=self.name)\n sendmail(addr_from, addr_to, subject, msg)\n\n def request_validation(self):\n self.hash = uuid4().hex\n self.date_requested = datetime.now()\n\n addr_from = '\"' + MAIL_FROM + '\" <' + MAIL_FROM + \">\"\n addr_to = self.name + \" <\" + self.email_address + \">\"\n subject = \"Bestätigung für überwachungspaket.at\"\n url = url_for(\"validate\", hash=self.hash, _external=True)\n msg = MAIL_VALIDATE.format(name_user=self.name, url=url)\n sendmail(addr_from, addr_to, subject, msg)\n\nclass Representatives():\n def __init__(self):\n self.parties = load_parties()\n self.teams = load_teams()\n self.representatives = load_representatives(\"representatives.json\", self.parties, self.teams)\n self.government = load_representatives(\"government.json\", self.parties, self.teams, True)\n\n def get_representative_by_id(self, id):\n representatives = self.representatives + self.government\n if id == \"00000\":\n return choice([rep for rep in representatives if rep.important])\n\n try:\n rep = [rep for rep in representatives if rep.id == id][0]\n except IndexError:\n rep = None\n return rep\n\n def get_representative_by_name(self, prettyname):\n representatives = self.representatives + self.government\n try:\n rep = [rep for rep in representatives if rep.name.prettyname == prettyname][0]\n except IndexError:\n rep = None\n return rep\n\n def get_party(self, shortname):\n return self.parties[shortname]\n\nclass Contact():\n def __init__(self, mail, phone, facebook, twitter):\n self.mail = mail\n self.phone = phone\n self.facebook = facebook\n self.twitter = twitter\n\nclass Party():\n def __init__(self, handle, name, shortname, prettyname, color, contact):\n self.handle = handle\n self.name = name\n self.shortname = shortname\n self.prettyname = prettyname\n self.color = color\n self.contact = contact\n\nclass Name():\n def __init__(self, firstname, lastname, prettyname, prefix, suffix):\n self.firstname = firstname\n self.lastname = lastname\n self.prettyname = prettyname\n self.prefix = prefix\n self.suffix = suffix\n\nclass Image():\n def __init__(self, url, copyright):\n self.url = url\n self.copyright = copyright\n\nclass Team():\n def __init__(self, name, prettyname):\n self.name = name\n self.prettyname = prettyname\n\n def __repr__(self):\n return self.name\n\nclass Representative():\n def __init__(self, id, name, contact, image, party, team, sex, important, salutation, state, is_government):\n self.id = id\n self.name = name\n self.contact = contact\n self.image = image\n self.party = party\n self.team = team\n self.sex = sex\n self.is_male = sex == 'male'\n self.is_female = sex == 'female'\n self.important = important\n self.salutation = salutation\n self.state = state\n self.is_government = is_government\n\n if not self.contact.mail:\n self.contact.mail = party.contact.mail\n if not self.contact.phone:\n self.contact.phone = party.contact.phone\n if not self.contact.facebook:\n self.contact.facebook = party.contact.facebook\n if not self.contact.twitter:\n self.contact.twitter = party.contact.twitter\n\n def __repr__(self):\n return self.name.firstname + \" \" + self.name.lastname\n\n def fullname(self):\n return (self.name.prefix + \" \" if self.name.prefix else \"\") + self.name.firstname + \" \" + self.name.lastname + (\" \" + self.name.suffix if self.name.suffix else \"\")\n\ndef load_parties():\n parties = {}\n\n with open(\"ueberwachungspaket/data/parties.json\", \"r\") as f:\n lparties = load(f)\n\n for prettyname in lparties:\n lparty = lparties[prettyname]\n lcontact = lparty[\"contact\"]\n contact = Contact(lcontact[\"mail\"], lcontact[\"phone\"], lcontact[\"facebook\"], lcontact[\"twitter\"])\n party = Party(prettyname, lparty[\"name\"], lparty[\"shortname\"], prettyname, lparty[\"color\"], contact)\n parties[prettyname] = party\n\n return parties\n\ndef load_teams():\n teams = {}\n\n with open(\"ueberwachungspaket/data/teams.json\", \"r\") as f:\n lteams = load(f)\n\n for prettyname in lteams:\n lteam = lteams[prettyname]\n team = Team(lteam[\"name\"], prettyname)\n teams[prettyname] = team\n\n return teams\n\ndef load_representatives(filename, parties, teams, is_government=False):\n representatives = []\n\n with open(\"ueberwachungspaket/data/\" + filename, \"r\") as f:\n lrepresentatives = load(f)\n\n for lrep in lrepresentatives:\n lname = lrep[\"name\"]\n name = Name(lname[\"firstname\"], lname[\"lastname\"], lname[\"prettyname\"], lname[\"prefix\"], lname[\"suffix\"])\n lcontact = lrep[\"contact\"]\n contact = Contact(lcontact[\"mail\"], lcontact[\"phone\"], lcontact[\"facebook\"], lcontact[\"twitter\"])\n image = Image(lrep[\"image\"][\"url\"], lrep[\"image\"][\"copyright\"])\n party = parties[lrep[\"party\"]]\n team = teams[lrep[\"team\"]]\n representative = Representative(lrep[\"id\"], name, contact, image, party, team, lrep[\"sex\"], lrep[\"important\"], lrep[\"salutation\"], lrep[\"state\"], is_government)\n representatives.append(representative)\n\n return representatives\n\nreps = Representatives()\n\n\ndef load_consultation_issues():\n with open(\"ueberwachungspaket/data/consultation_issues.json\", \"r\") as f:\n consultation_issues = load(f)\n return consultation_issues\n\nclass ConsultationSender(Base):\n __tablename__ = \"consultation_senders\"\n\n id = Column(Integer, primary_key=True)\n first_name = Column(String(256), nullable=False)\n last_name = Column(String(256), nullable=False)\n email_address = Column(String(254), unique=True, nullable=False)\n bmi_text = Column(Text)\n bmj_text = Column(Text)\n confidential_submission = Column(Boolean(True), nullable=False)\n hash = Column(String(64), unique=True, nullable=False)\n date_validated = Column(DateTime)\n date_requested = Column(DateTime, nullable=False)\n newsletter = Column(Boolean(False), nullable=False)\n\n def __init__(self, first_name, last_name, email_address, bmi_text, bmj_text, confidential_submission, newsletter=False):\n self.first_name = first_name\n self.last_name = last_name\n self.email_address = email_address\n self.bmi_text = bmi_text\n self.bmj_text = bmj_text\n self.confidential_submission = confidential_submission\n self.request_validation()\n self.newsletter = newsletter\n\n def validate(self):\n self.date_validated = datetime.now()\n\n def request_validation(self):\n self.hash = uuid4().hex\n self.date_requested = datetime.now()\n\n addr_from = '\"' + MAIL_FROM + '\" <' + MAIL_FROM + '>'\n addr_to = '\"' + self.first_name + ' ' + self.last_name + '\" <' + self.email_address + '>'\n subject = \"Bestätigung für überwachungspaket.at\"\n url = url_for(\"consultation_complete\", hash=self.hash, _external=True)\n msg = CONSULTATION_MAIL_VALIDATE.format(first_name=self.first_name, last_name=self.last_name, url=url)\n sendmail(addr_from, addr_to, subject, msg)\n\nclass Opinion(Base):\n __tablename__ = \"opinions\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String(256), nullable=False)\n logo_filename = Column(String(256))\n date = Column(Date, nullable=False)\n link_bmi_parliament = Column(String(256))\n link_bmi_pdf = Column(String(256))\n originality_bmi = Column(Integer)\n link_bmj_parliament = Column(String(256))\n link_bmj_pdf = Column(String(256))\n originality_bmj = Column(Integer)\n addresses_bundestrojaner = Column(Boolean, nullable=False)\n addresses_netzsperren = Column(Boolean, nullable=False)\n addresses_vds_video = Column(Boolean, nullable=False)\n addresses_ueberwachung_strassen = Column(Boolean, nullable=False)\n addresses_vds_quickfreeze = Column(Boolean, nullable=False)\n addresses_anonyme_simkarten = Column(Boolean, nullable=False)\n addresses_imsi_catcher = Column(Boolean, nullable=False)\n addresses_lauschangriff_auto = Column(Boolean, nullable=False)\n comment = Column(Text)\n\n def __init__(self,\n name,\n logo_filename,\n date,\n link_bmi_parliament,\n link_bmi_pdf,\n originality_bmi,\n link_bmj_parliament,\n link_bmj_pdf,\n originality_bmj,\n addresses_bundestrojaner,\n addresses_netzsperren,\n addresses_vds_video,\n addresses_ueberwachung_strassen,\n addresses_vds_quickfreeze,\n addresses_anonyme_simkarten,\n addresses_imsi_catcher,\n addresses_lauschangriff_auto,\n comment):\n self.name = name\n self.logo_filename = logo_filename\n self.date = date\n self.link_bmi_parliament = link_bmi_parliament\n self.link_bmi_pdf = link_bmi_pdf\n self.originality_bmi = originality_bmi\n self.link_bmj_parliament = link_bmj_parliament\n self.link_bmj_pdf = link_bmj_pdf\n self.originality_bmj = originality_bmj\n self.addresses_bundestrojaner = addresses_bundestrojaner\n self.addresses_netzsperren = addresses_netzsperren\n self.addresses_vds_video = addresses_vds_video\n self.addresses_ueberwachung_strassen = addresses_ueberwachung_strassen\n self.addresses_vds_quickfreeze = addresses_vds_quickfreeze\n self.addresses_anonyme_simkarten = addresses_anonyme_simkarten\n self.addresses_imsi_catcher = addresses_imsi_catcher\n self.addresses_lauschangriff_auto = addresses_lauschangriff_auto\n self.comment = comment\n\n @hybrid_property\n def originality(self):\n return max(self.originality_bmi, self.originality_bmj)\n\n @originality.expression\n def originality(cls):\n return func.greatest(cls.originality_bmi, cls.originality_bmj)\n\n def name_pretty(self):\n if len(self.name) > 48:\n return self.name[0:49] + \"...\"\n else:\n return self.name\n\n def date_pretty(self):\n return \"{}.{}.{}\".format(self.date.day, self.date.month, self.date.year)\n\n def originality_pretty(self):\n if self.originality > 1073741822:\n return 100\n else:\n return int(99 / 9273 * self.originality)\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"logoFilename\": url_for(\"static\", filename=\"img/logo/\" + self.logo_filename) if self.logo_filename is not None else None,\n \"name\": self.name_pretty(),\n \"date\": self.date_pretty(),\n \"linkBmi\": self.link_bmi_pdf,\n \"linkBmj\": self.link_bmj_pdf,\n \"addressesBundestrojaner\": self.addresses_bundestrojaner,\n \"addressesNetzsperren\": self.addresses_netzsperren,\n \"addressesVdsVideo\": self.addresses_vds_video,\n \"addressesUeberwachungStrassen\": self.addresses_ueberwachung_strassen,\n \"addressesVdsQuickfreeze\": self.addresses_vds_quickfreeze,\n \"addressesAnonymeSimkarten\": self.addresses_anonyme_simkarten,\n \"addressesImsiCatcher\": self.addresses_imsi_catcher,\n \"addressesLauschangriffAuto\": self.addresses_lauschangriff_auto,\n \"originality\": self.originality_pretty(),\n \"comment\": self.comment\n }\n\nclass Activist(Base):\n __tablename__ = \"activists\"\n\n id = Column(Integer, primary_key=True)\n email = Column(String(255), unique=True, nullable=False)\n phone = Column(String(20))\n date_entered = Column(DateTime, nullable=False)\n\n def __init__(self, email, phone, date_entered):\n self.email = email\n self.phone = phone\n self.date_entered = date_entered\n","sub_path":"database/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"82119996","text":"import json\r\nimport sqlite3\r\nfrom firebase import firebase\r\n\r\n\r\n# ===============================================================\r\n# Database Manager Class\r\n\r\nclass DatabaseManager():\r\n def __init__(self):\r\n self.firebase = firebase.FirebaseApplication('address of firebase')\r\n\r\n def add_update_db_record(self, data):\r\n self.firebase.post('Air quality', data)\r\n return\r\n\r\n\r\n# ===============================================================\r\n# Functions to push Sensor Data into Database\r\n\r\n# Function to save Temperature to DB Table\r\ndef Air_Quality_Data_Handler(jsonData):\r\n # Parse Data\r\n json_Dict = json.loads(jsonData)\r\n Sec = json_Dict[0]['Sec']\r\n PM0_3 = json_Dict[0]['DB0_3']\r\n PM0_5 = json_Dict[0]['DB0_5']\r\n PM1 = json_Dict[0]['DB1']\r\n PM2_5 = json_Dict[0]['DB2_5']\r\n PM5 = json_Dict[0]['DB5']\r\n PM10 = json_Dict[0]['DB10']\r\n\r\n air_quality_data = {'Sec': Sec, 'PM0_3': PM0_3, 'PM0_5': PM0_5, 'PM1': PM1, 'PM2_5': PM2_5\r\n , 'PM5': PM5, 'PM10': PM10}\r\n # Push into DB Table\r\n dbObj = DatabaseManager()\r\n dbObj.add_update_db_record(air_quality_data)\r\n print(\"Inserted Temperature Data into Database.\")\r\n print(\"\")\r\n\r\n\r\n# ===============================================================\r\n# Master Function to Select DB Funtion based on MQTT Topic\r\n\r\ndef sensor_Data_Handler(jsonData):\r\n Air_Quality_Data_Handler(jsonData)\r\n","sub_path":"MQTT/store_Sensor_Data_to_DB.py","file_name":"store_Sensor_Data_to_DB.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"48903842","text":"import socket\nimport threading\nimport sys\n\nclass Cliente():\n\n def conecta(self, host, puerto):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.socket.connect((str(host), int(puerto)))\n info = input(\"\")\n self.socket.send(bytes(info, 'utf-8'))\n\n confirmacion = self.socket.recv(1)\n # Si niega la entrada por existencia de usuario. Lo metemos en un ciclo\n # hasta que lo acepte.\n while (confirmacion == 0):\n info = input(\"Ese nombre de usuario ya está en uso, introduce otro: \")\n self.socket.send(bytes(info, 'utf-8'))\n confirmacion = self.socket.recv(1)\n\n\n def entrada(self):\n while True:\n lista_socket = [sys.stdin, self.socket]\n\n lee_socket, escribe_socekt, error_socket = select.select(lista_socket, [], [])\n for socks in lee_socket:\n if(socks == self.socket):\n mensaje = socks.recv(2048)\n print(mensaje)\n else:\n mensaje = sys.stdin.readline()\n self.socket.send(message)\n \t\t\t# Borramos la linea que escribimos por estética\n CURSOR_UP = '\\033[F'\n ERASE_LINE = '\\033[K'\n print(CURSOR_UP + ERASE_LINE)\n sys.stdout.write(\"Tú: \")\n sys.stdout.write(mensaje)\n sys.stdout.flush()\n self.socket.close()\n\n\ndef main():\n c = Cliente()\n host = sys.argv[1]\n puerto = sys.argv[2]\n c.conecta(host, puerto)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"541403612","text":"import re\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#Finds words over 15 letters long, and plots them in a histogram.\ndef long_word(words):\n\t#opens text file\n\tfile = open(words, 'r', encoding=\"utf8\")\n\t#creates a list of words for each line in the text\n\ttxt_split = [line.rstrip('\\n') for line in file]\n\n\t#joins text into one item\n\ttxt_joined = \" \".join(txt_split)\n\t#splits each word of the document to an item in a list\n\ttxt_list = txt_joined.split(\" \")\n\t# create empty dictionary for word:frequency pairs\n\tfreq = {}\n\n\t#finds words over 15 characters and places them in the dictionary\n\tfor item in txt_list:\n\n\t\t#removes punctuation\n\t\titem = re.sub('\\.|\\!|\\?|\\,|\\(|\\)|\\;|\\:', '', item)\n\t\t#removes left and right quotations\n\t\titem = re.sub(u'[\\u201c\\u201d]', '', item)\n\n\n\t\tif len(item) >= 15:\n\t\t\t#skips over dashed and em-dashed words\n\t\t\tif \"-\" in item:\n\t\t\t\tcontinue\n\t\t\telif \"—\" in item:\n\t\t\t\tcontinue\n\t\t\telif item in freq:\n\t\t\t\tfreq[item] += 1\n\t\t\telse:\n\t\t\t\tfreq[item] = 1\n\tprint(freq)\n\n\t#Turns dictionary to tuples so they can be sorted\n\twords, count = zip(*freq.items())\n\t#provides the index sort to apply to both arrays, decending by count, cutting list off at 10 items\n\tindexSort = np.argsort(count)[::-1]\n\twords = np.array(words)[indexSort]\n\twords = words[0:10]\n\tcount = np.array(count)[indexSort]\n\tcount = count[0:10]\n\tindicies = np.arange(len(words[:10]))\n\n\tplt.bar(indicies, count)\n\n\tplt.xticks(indicies, words)\n\n\tplt.show()\n\t#final = freq.most_common\n\treturn freq\n\n\nif __name__ == \"__main__\":\n\tlong_word(\"wap_notoc.txt\")\n","sub_path":"wordlength15.py","file_name":"wordlength15.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"98464144","text":"from flask import Flask, render_template, session, url_for, flash\r\nfrom flask import make_response\r\nfrom flask import redirect\r\nimport os\r\nfrom flask.ext.sqlalchemy import SQLAlchemy\r\nfrom flask.ext.bootstrap import Bootstrap\r\nfrom flask.ext.wtf import Form\r\nfrom wtforms import StringField, SubmitField\r\nfrom wtforms.validators import Required\r\n\r\nbasedir = os.path.abspath(os.path.dirname(__file__))\r\napp=Flask(__name__) \r\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///'+os.path.join(basedir,'data.sqlite')\r\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\r\ndb = SQLAlchemy(app)\r\napp.config['SECRET_KEY'] = 'GCXDL'\r\nbootstrap = Bootstrap(app)\r\n\r\ndoor_state={'state':0}\r\n\r\n#db.create_all()\r\n\r\n@app.route('/',methods=['GET','POST'])\r\ndef index():\r\n\tform = NameForm()\r\n\tif form.validate_on_submit():\r\n\t\told_name = session.get('name')\r\n\t\tif old_name is not None and old_name != form.name.data:\r\n\t\t\tflash('Looks like you changed your name!')\r\n\t\tsession['name']=form.name.data\r\n\t\t#door_state['state']=100\r\n\t\tform.name.data = ''\r\n\t\treturn redirect(url_for('index'))\r\n\treturn render_template('index.html',form=form, name=session.get('name'))\r\n\r\n@app.route('/user/')\r\ndef user(name):\r\n return render_template('user.html', name=name)\r\n\r\n@app.route('/user/id/')\r\ndef id(ie):\r\n \treturn 'id is %s' %ie\r\n\r\n@app.route('/door')\r\ndef door():\r\n\treturn render_template('door_state.html',door_state=door_state.get('state'))\r\n\r\n\r\n'''class Role(db.Model):\r\n\t__tablename__ = 'roles'\r\n\tid = db.Column(db.Integer, primary_key=True)\r\n\tname = db.Column(db.String(64), unique=True)\r\n\r\n\tdef __repr__(self):\r\n\t\treturn ', Sergio J. Rey and Elijah Knaap \"\n\nimport numpy as np\n\nfrom .._base import MultiGroupIndex, SpatialImplicitIndex\n\nnp.seterr(divide=\"ignore\", invalid=\"ignore\")\n\n\ndef _multi_local_simpson_interaction(data, groups):\n \"\"\"\n Calculation of Local Simpson Interaction index for each unit\n\n Parameters\n ----------\n\n data : a pandas DataFrame of n rows\n \n groups : list of strings.\n The variables names in data of the groups of interest of the analysis.\n\n Returns\n -------\n\n statistics : np.array(n)\n Local Simpson Interaction values for each unit\n \n core_data : a pandas DataFrame\n A pandas DataFrame that contains the columns used to perform the estimate.\n\n Notes\n -----\n Based on the local version of Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. \"Measures of multigroup segregation.\" Sociological methodology 32.1 (2002): 33-67.\n \n Simpson's interaction index can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group.\n\n Higher values means lesser segregation.\n \n Simpson's Concentration + Simpson's Interaction = 1\n \n Reference: :cite:`reardon2002measures`.\n\n \"\"\"\n\n core_data = data[groups]\n\n df = np.array(core_data)\n\n ti = df.sum(axis=1)\n pik = df / ti[:, None]\n\n local_SI = np.nansum(pik * (1 - pik), axis=1)\n\n return local_SI, core_data, groups\n\n\nclass MultiLocalSimpsonInteraction(MultiGroupIndex, SpatialImplicitIndex):\n \"\"\"Multigroup Local Simpson Interaction Index.\n\n Parameters\n ----------\n data : pandas.DataFrame or geopandas.GeoDataFrame, required\n dataframe or geodataframe if spatial index holding data for location of interest\n groups : list, required\n list of columns on dataframe holding population totals for each group\n w : libpysal.weights.KernelW, optional\n lipysal spatial kernel weights object used to define an egohood\n network : pandana.Network\n pandana Network object representing the study area\n distance : int\n Maximum distance (in units of geodataframe CRS) to consider the extent of the egohood\n decay : str\n type of decay function to apply. Options include\n precompute : bool\n Whether to precompute the pandana Network object\n\n Attributes\n ----------\n statistic : float\n Multigroup Dissimilarity Index value\n core_data : a pandas DataFrame\n DataFrame that contains the columns used to perform the estimate.\n \"\"\"\n\n def __init__(\n self,\n data,\n groups,\n w=None,\n network=None,\n distance=None,\n decay=None,\n precompute=None,\n function='triangular'\n ):\n \"\"\"Init.\"\"\"\n\n MultiGroupIndex.__init__(self, data, groups)\n if any([w, network, distance]):\n SpatialImplicitIndex.__init__(self, w, network, distance, decay, function, precompute)\n aux = _multi_local_simpson_interaction(self.data, self.groups)\n\n self.statistics = aux[0]\n self.data = aux[1]\n self.groups = aux[2]\n self._function = _multi_local_simpson_interaction\n","sub_path":"segregation/local/local_multi_simpsons_interaction.py","file_name":"local_multi_simpsons_interaction.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"111214072","text":"import requests\nimport json\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\n\ndef sum_up(list_of_numbers):\n total = 0\n for number in list_of_numbers:\n if type(number) != int and type(number) != float:\n raise TypeError('number in list (' + str(number) +') expect to be int/float, instead was ' + str(type(number)))\n total = total + number\n return(total)\n\n@app.route('/total', methods=['POST'])\ndef total():\n data = request.get_json()\n print(data['listOfNumbers'])\n list_of_numbers = data['listOfNumbers']\n try:\n total = sum_up(list_of_numbers)\n except TypeError:\n return('Please only use int/floats in your list of numbers', 400)\n except:\n return('Internal server error', 500)\n return{'total':total}","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"575188452","text":"def linear_search(list,n):\n i = 0\n for i in range(len(list)):\n if list[i] == int(n):\n globals()['pos'] = i\n return True\n return False\n\npos = -1\n\nlist = [21,33,45,9,24,9,34,7,88,96,54,35,66,77]\n\nn =input(\"Find The Number => \")\n\nif linear_search(list,int(n)):\n print(f\"\\nNumber {n} Is Present At Index => {pos+1}\")\nelse:\n print(f\"\\nNumber {n} Not Found\") ","sub_path":"Personel/Sandesh/Python/Sorting/LinerSearch.py","file_name":"LinerSearch.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"352465171","text":"from socket import *\nimport msg\nfrom homework.log.server_log_config import *\n\ns = socket(AF_INET, SOCK_STREAM)\ns.bind(('127.0.0.1', 7777))\ns.listen(5)\nwhile True:\n client, addr = s.accept()\n data = client.recv(1000000)\n try:\n server_info_logger(msg.un_byte(data))\n except TypeError as e:\n raise TypeError(e, server_error_logger(e))\n message = msg.to_byte(msg.response)\n client.send(message)\n client.close()\n","sub_path":"homework/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"21103284","text":"import random\r\nimport math\r\n\r\ngamenumber = int(input(\"How many probems do you want?\\n\"))\r\n\r\ndef main(gamenumber):\r\n random.seed()\r\n count = 0\r\n correct = 0\r\n while count < gamenumber:\r\n num_1 = random.randint(1,12)\r\n num_2 = random.randint(1,12)\r\n guess = int(input(\"What is \" + str(num_1) + \" x \" + str(num_2) + \"\\n\"))\r\n answer = num_1*num_2\r\n count += 1\r\n if guess == answer:\r\n correct += 1\r\n print(\"Good Job! Edward or Amy\")\r\n else:\r\n print(\"Sorry! the answer is\", answer, \".\")\r\n\r\n if gamenumber > 1:\r\n result = correct * 100./gamenumber\r\n\r\n print(\"You got \", \"%.1f\"%result, \"of the problems.\")\r\n \r\nmain(gamenumber)\r\n","sub_path":"multi_game001.py","file_name":"multi_game001.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"202364884","text":"import torch\nfrom robolearn.torch.utils import pytorch_util as ptu\n\nfrom robolearn.utils.data_management.replay_buffer import ReplayBuffer\n\n\nclass SimpleReplayBuffer(ReplayBuffer):\n def __init__(\n self, max_size, obs_dim, action_dim,\n ):\n if not max_size > 1:\n raise ValueError(\"Invalid Maximum Replay Buffer Size: {}\".format(\n max_size)\n )\n\n max_size = int(max_size)\n\n self._obs_buffer = torch.zeros((max_size, obs_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._next_obs_buffer = torch.zeros((max_size, obs_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._acts_buffer = torch.zeros((max_size, action_dim),\n dtype=torch.float32,\n device=ptu.device)\n self._rewards_buffer = torch.zeros((max_size, 1),\n dtype=torch.float32,\n device=ptu.device)\n self._terminals_buffer = torch.zeros((max_size, 1),\n dtype=torch.float32,\n device=ptu.device)\n\n self._obs_dim = obs_dim\n self._action_dim = action_dim\n self._max_size = max_size\n self._top = 0\n self._size = 0\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n self._obs_buffer[self._top] = torch.as_tensor(observation)\n self._acts_buffer[self._top] = torch.as_tensor(action)\n self._rewards_buffer[self._top] = torch.as_tensor(reward)\n self._terminals_buffer[self._top] = torch.as_tensor(terminal.astype(float))\n self._next_obs_buffer[self._top] = torch.as_tensor(next_observation)\n self._advance()\n\n def terminate_episode(self):\n pass\n\n def _advance(self):\n self._top = (self._top + 1) % self._max_size\n if self._size < self._max_size:\n self._size += 1\n\n def random_batch(self, batch_size):\n if batch_size > self._size:\n raise AttributeError('Not enough samples to get. %d bigger than '\n 'current %d!' % (batch_size, self._size))\n\n indices = torch.randint(0, self._size, (batch_size,), dtype=torch.long,\n device=ptu.device)\n return dict(\n observations=self.buffer_index(self._obs_buffer, indices),\n actions=self.buffer_index(self._acts_buffer, indices),\n rewards=self.buffer_index(self._rewards_buffer, indices),\n terminals=self.buffer_index(self._terminals_buffer, indices),\n next_observations=self.buffer_index(self._next_obs_buffer, indices),\n )\n\n def available_samples(self):\n return self._size\n\n @staticmethod\n def buffer_index(buffer, indices):\n return torch.index_select(buffer, dim=0, index=indices)\n","sub_path":"robolearn/torch/utils/data_management/simple_replay_buffer.py","file_name":"simple_replay_buffer.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"319792525","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom qiandao.Mytool import *\n\n\ndef mail():\n ret = True\n try:\n msg = MIMEText(mail_msg, 'html', 'utf-8')\n msg['From'] = formataddr([\"FromKinCC\", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号\n msg['To'] = ','.join(my_user) # 括号里的对应收件人邮箱昵称、收件人邮箱账号\n msg['Subject'] = \"武科大教务处通知!\" # 邮件的主题,也可以说是标题\n\n server = smtplib.SMTP_SSL(\"smtp.126.com\", 465) # 发件人邮箱中的SMTP服务器,端口是25\n server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(my_sender, my_user, msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit() # 关闭连接\n except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n ret = False\n return ret\n\n\nurl = 'http://202.114.242.231:8036/default.html'\nres = requests.get(url)\n# 使用GBK编码\nres.encoding = 'GBK'\n# 使用剖析器为html.parser\nsoup = BeautifulSoup(res.text, 'html.parser')\n\nlink_list = soup.select(\".newslist_134259638016186242\")\n\nmy_sender = 'kin_cc@126.com' # 发件人邮箱账号\nmy_pass = 'jinchan1995' # 发件人邮箱密码\n'''\nmy_sender='1609085048@qq.com' # 发件人邮箱账号\nmy_pass = 'rzidmhqnmvjufeha' # 发件人邮箱密码\n'''\nmy_user = ['kin_cc@126.com']\n\nmail_msg = ''\nfor lin in link_list:\n ul = lin.select('ul')\n timess = lin.select(\".time\")\n href = ul[0].select('a')\n\nfor i in range(11):\n mail_msg = mail_msg + str(i + 1) + '、' + str(href[i]) + '\\t' + str(timess[i]) + '
' * 2\n\nwith open(r'C:\\kincc\\qiandao/jwc_info.txt', 'r') as f:\n if f.read() != mail_msg:\n with open(r'C:\\kincc\\qiandao/jwc_info.txt', 'w') as f:\n ret = mail()\n f.write(mail_msg)\n f.close()\n print(\"邮件发送成功!\")\n log_file('教务处')\n else:\n print(\"邮件重复,不发送!\")\n","sub_path":"qiandao/jwc_mail.py","file_name":"jwc_mail.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"595130655","text":"\n#\n# Copyright 2015-2019 University of Southern California\n# Distributed under the Apache License, Version 2.0. See LICENSE for more info.\n#\n\nimport re\nimport web\n\nfrom webauthn2.util import jsonReader\n\nfrom .. import core\nfrom .core import web_url, web_method, RestHandler, NoMethod, Conflict, NotFound, BadRequest, LengthRequired, \\\n PayloadTooLarge\n\n@web_url([\n # path, name, job, chunk, querystr\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/([^/:;?]+)/([^/:;?]+)[?](.*)',\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/([^/:;?]+)/([^/:;?]+)()'\n])\nclass ObjectTransferChunk (RestHandler):\n\n def __init__(self):\n RestHandler.__init__(self)\n\n @web_method()\n def PUT(self, path, name, job, chunk, querystr):\n \"\"\"Upload chunk of transfer job.\"\"\"\n try:\n chunk = int(chunk)\n except ValueError:\n raise BadRequest('Invalid chunk number %s.' % chunk)\n\n if chunk < 0:\n raise BadRequest('Invalid chunk number %s.' % chunk)\n \n try:\n nbytes = int(web.ctx.env['CONTENT_LENGTH'])\n except:\n raise LengthRequired()\n\n if nbytes > core.config.get(\"max_request_payload_size\", core.max_request_payload_size_default):\n raise PayloadTooLarge()\n\n metadata = {}\n\n for hdr, var in [\n ('content-md5', 'HTTP_CONTENT_MD5'),\n ('content-sha256', 'HTTP_CONTENT_SHA256')\n ]:\n val = web.ctx.env.get(var)\n if val is not None:\n metadata[hdr] = val\n \n upload = self.resolve_upload(path, name, job)\n upload.enforce_acl(['owner'], web.ctx.webauthn2_context)\n self.http_check_preconditions('PUT')\n upload.upload_chunk_from_file(\n chunk, \n web.ctx.env['wsgi.input'],\n web.ctx.webauthn2_context,\n nbytes,\n web.ctx.hatrac_directory.metadata_from_http(metadata)\n )\n return self.update_response()\n\n@web_url([\n # path, name, job, querystr\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/([^/:;?]+)/?[?](.*)',\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/([^/:;?]+)/?()'\n])\nclass ObjectTransfer (RestHandler):\n\n def __init__(self):\n RestHandler.__init__(self)\n\n @web_method()\n def POST(self, path, name, job, querystr):\n \"\"\"Update status of transfer job to finalize.\"\"\"\n upload = self.resolve_upload(path, name, job)\n self.http_check_preconditions('POST')\n version = upload.finalize(web.ctx.webauthn2_context)\n return self.create_response(version)\n\n @web_method()\n def DELETE(self, path, name, job, querystr):\n \"\"\"Cancel existing transfer job.\"\"\"\n upload = self.resolve_upload(path, name, job)\n self.http_check_preconditions('DELETE')\n upload.cancel(web.ctx.webauthn2_context)\n return self.update_response()\n\n def _GET(self, path, name, job, querystr):\n \"\"\"Get status of transfer job.\"\"\"\n upload = self.resolve_upload(path, name, job)\n self.http_check_preconditions()\n return self.get_content(upload, web.ctx.webauthn2_context)\n\n@web_url([\n # path, name, querystr\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/?[?](.*)',\n '/((?:[^/:;?]+/)*)([^/:;?]+);upload/?()'\n])\nclass ObjectTransfers (RestHandler):\n\n def __init__(self):\n RestHandler.__init__(self)\n\n @web_method()\n def POST(self, path, name, querystr):\n \"\"\"Create a new chunked transfer job.\"\"\"\n in_content_type = self.in_content_type()\n\n if in_content_type != 'application/json':\n raise BadRequest('Only application/json input is accepted for upload jobs.')\n try:\n job = jsonReader(web.ctx.env['wsgi.input'].read().decode())\n except ValueError as ev:\n raise BadRequest('Error reading JSON input:' % ev)\n if type(job) != dict:\n raise BadRequest('Job input must be a flat JSON object.')\n\n try:\n try:\n # backwards-compatibility\n chunksize = int(job['chunk_bytes'])\n except KeyError as ev:\n chunksize = int(job['chunk-length'])\n try:\n # backwards-compatibility\n nbytes = int(job['total_bytes'])\n except KeyError as ev:\n nbytes = int(job['content-length'])\n except KeyError as ev:\n raise BadRequest('Missing required field %s.' % ev)\n except ValueError as ev:\n raise BadRequest('Invalid count: %s.' % ev)\n\n metadata = {}\n\n for hdr, keys in [\n ('content-type', {'content_type', 'content-type'}),\n ('content-md5', {'content_md5', 'content-md5'}),\n ('content-sha256', {'content-sha256'}),\n ('content-disposition', {'content-disposition'})]:\n for key in keys:\n val = job.get(key)\n if val is not None:\n metadata[hdr] = val\n \n # create object implicitly or reuse existing object...\n try:\n params = self.parse_querystr(querystr)\n make_parents = params.get('parents', 'false').lower() == 'true'\n resource = web.ctx.hatrac_directory.create_name(\n self._fullname(path, name),\n True, # is_object\n make_parents,\n web.ctx.webauthn2_context\n )\n except core.Conflict as ev:\n try:\n resource = self.resolve(path, name).get_uploads()\n except core.NotFound as ev:\n raise Conflict('Name %s is not available for use.' % self._fullname(path, name))\n \n # say resource_exists=False as we always create a new one...\n self.http_check_preconditions('POST', False)\n upload = resource.create_version_upload_job(\n chunksize, web.ctx.webauthn2_context, nbytes, web.ctx.hatrac_directory.metadata_from_http(metadata)\n )\n return self.create_response(upload)\n\n def _GET(self, path, name, querystr):\n \"\"\"List outstanding chunked transfer jobs.\"\"\"\n resource = self.resolve(path, name).get_uploads()\n self.http_check_preconditions()\n return self.get_content(resource, web.ctx.webauthn2_context)\n \n\n\n","sub_path":"hatrac/rest/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":6310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"202201711","text":"import os\nimport locale\n\nfrom flask import Flask\nfrom flask_login import LoginManager\nfrom flask_migrate import Migrate\n\nfrom bamboo.extensions import db\n\n\nlocale.setlocale(locale.LC_TIME, 'ru_RU')\n\n\ndef create_app():\n\n __config = {\n \"development\": \"config.DevelopmentConfig\",\n \"testing\": \"config.TestingConfig\",\n \"production\": \"config.ProductionConfig\"\n }\n\n app = Flask(__name__)\n config_name = os.getenv('FLASK_CONFIGURATION', 'development')\n app.config.from_object(__config[config_name])\n db.init_app(app)\n return app\n\n\napp = create_app()\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login_view'\n\nmigrate = Migrate(app, db)\n\n\nimport bamboo.views, bamboo.models\n","sub_path":"bamboo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"524859982","text":"\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel,\n QLineEdit, QHBoxLayout, QVBoxLayout)\nfrom PyQt5.QtCore import QObject, pyqtSignal, Qt\n\n\nclass KeySetterWindow(QWidget):\n\n check_keys_signal = pyqtSignal(int, int, str, str)\n\n def __init__(self, client):\n super().__init__()\n self.direction = 'left'\n self.client = client\n self.init_GUI()\n self.right_key = None\n self.right_key_name = None\n self.left_key = None\n self.left_key_name = None\n\n def init_GUI(self):\n self.setGeometry(300, 200, 350, 300)\n self.msg1_label = QLabel('Choose the keys for movement', self)\n self.msg2_label = QLabel(f'Press a key to choose {self.direction} '\n f'movement')\n self.right_button = QPushButton('Right movement Key', self)\n self.right_button.clicked.connect(self.change_to_right_direction)\n self.right_key_label = QLabel('', self)\n self.left_button = QPushButton('Left movement Key', self)\n self.left_button.clicked.connect(self.change_to_left_direction)\n self.left_key_label = QLabel('', self)\n self.msg3_label = QLabel('', self)\n self.next_button = QPushButton('Next', self)\n self.next_button.clicked.connect(self.check_keys)\n\n vbox = QVBoxLayout()\n vbox.addStretch(1)\n vbox.addWidget(self.msg1_label)\n vbox.addWidget(self.msg2_label)\n vbox.addStretch(.5)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.left_button)\n hbox.addWidget(self.right_button)\n vbox.addLayout(hbox)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.left_key_label)\n hbox.addWidget(self.right_key_label)\n vbox.addLayout(hbox)\n vbox.addStretch(.4)\n vbox.addWidget(self.msg3_label)\n vbox.addStretch(.7)\n vbox.addWidget(self.next_button)\n vbox.addStretch(1)\n self.setLayout(vbox)\n\n self.check_keys_signal.connect(self.client.check_keys)\n\n\n def keyPressEvent(self, e):\n key_name = e.text().upper()\n if key_name == '':\n direction_keys = {16777235: 'Up', 16777236: 'Right',\n 16777234: 'Left', 16777237: 'Down'}\n key_name = direction_keys.get(e.key())\n elif key_name == ' ':\n key_name = 'Space'\n if self.direction == 'right':\n self.right_key = e.key()\n self.right_key_name = key_name\n self.right_key_label.setText(f'Chosen key: {key_name}')\n self.right_key_label.resize(self.right_key_label.sizeHint())\n elif self.direction == 'left':\n self.left_key = e.key()\n self.left_key_name = key_name\n self.left_key_label.setText(f'Chosen key: {key_name}')\n self.left_key_label.resize(self.left_key_label.sizeHint())\n\n def change_to_right_direction(self):\n self.direction = 'right'\n self.msg2_label.setText(f'Press a key to choose right movement')\n self.msg2_label.resize(self.msg2_label.sizeHint())\n\n def change_to_left_direction(self):\n self.direction = 'left'\n self.msg2_label.setText(f'Press a key to choose left movement')\n self.msg2_label.resize(self.msg2_label.sizeHint())\n\n def check_keys(self):\n self.check_keys_signal.emit(self.left_key, self.right_key,\n self.left_key_name, self.right_key_name)\n\n def open_waiting_room(self, state):\n if state == 'ok':\n self.msg3_label.setText('Ready')\n elif state == 'invalid':\n self.msg3_label.setText('One or more keys are invalid')\n elif state == 'same':\n self.msg3_label.setText('Left and right movent can not have the '\n 'same key')\n self.msg3_label.resize(self.msg3_label.sizeHint())\n\n\n","sub_path":"Tareas/T04/client/key_setter_frontend.py","file_name":"key_setter_frontend.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"310328000","text":"'''\nThis file contains some geometry-related functions I used for collision detection in a Pygame project I\nabandoned some years ago. While this isn't my most recent work, it does serve to demonstrate my tendency to\nreinvent the wheel rather than grapple with external libraries when I only need a few simple functions to\nget things up and running. I'm happy to do either, but sometimes writing things from scratch reduces future\nconfusion and saves time in the long run.\n'''\n\n#!/usr/local/bin/python3\n# Name: Simon Katzer\n# File: geom.py\n# Desc: This file defines a bunch of geometry-related functions.\n\nimport entity, wall, math\n\n#########################\n# SIMPLE MATH FUNCTIONS #\n#########################\n\ndef max(a, b):\n\treturn a if a>b else b\ndef min(a, b):\n\treturn a if a 0: return 1\n\telif n < 0: return -1\n\ndef slope(p1, p2): # returns the slope of the line defined by 2 points\n\treturn math.inf if p1[0] == p2[0] else (p2[1]-p1[1])/(p2[0]-p1[0]) # slope = delta y / delta x\ndef on_segment(point, line):\n\t# it's colinear\n\treturn (\n\t\t\t(slope(point, line[0]) == slope(point, line[1])) and\n\t\t\t# its x is in the bounding box\n\t\t\tpoint[0] >= min(line[0][0],line[1][0]) and\n\t\t\tpoint[0] <= max(line[0][0],line[1][0]) and\n\t\t\t# its y is in the bounding box\n\t\t\tpoint[1] >= min(line[0][1],line[1][1]) and\n\t\t\tpoint[1] <= max(line[0][1],line[1][1])\n\t)\ndef point_of_intersection(line1, line2):\n\t#print(\"Finding intersection of {0} and {1}... \".format(line1, line2),end=\"\")\n\t#find the equations for the two lines\n\tslope1 = slope(line1[0], line1[1]) # the slope (m) of line1\n\tslope2 = slope(line2[0], line2[1]) # the slope (m) of line2\n\tconst1 = 0 if (slope1 == math.inf) else (line1[0][1] - line1[0][0]*slope1) # the constant (b) of line1\n\tconst2 = 0 if (slope2 == math.inf) else (line2[0][1] - line2[0][0]*slope2) # the constant (b) of line2\n\n\tif slope1 == slope2: return False # lines are parallel\n\t\n\tif math.isinf(slope1):\n\t\tintersectionX = line1[0][0]\n\t\tintersectionY = slope2 * intersectionX + const2 # mx + b\n\telif math.isinf(slope2):\n\t\tintersectionX = line2[0][0]\n\t\tintersectionY = slope1 * intersectionX + const1 # mx + b\n\telse:\n\t\tslopeTemp = slope1 - slope2;\n\t\tconstTemp = const2 - const1;\n\t\tintersectionX = constTemp / slopeTemp;\n\t\tintersectionY = slope1 * intersectionX + const1;\n\t#print(\"result: {0}\".format((intersectionX, intersectionY)))\t\n\treturn (intersectionX, intersectionY)\n\n'''\n# https://stackoverflow.com/questions/2259476/rotating-a-point-about-another-point-2d\ndef rotate_point(cx, cy, angle, p):\n\ts = sin(angle)\n\tfloat c = cos(angle);\n\n\t# translate point back to origin:\n\tp[0] -= cx;\n\tp[1] -= cy;\n\n\t# rotate point\n\txnew = p.x * c - p.y * s;\n\tynew = p.x * s + p.y * c;\n\n\t# translate point back:\n\tp[0] = xnew + cx;\n\tp[1] = ynew + cy;\n\treturn p;\n'''\n\n###############################\n# FUNCTIONS INVOLVING CLASSES #\n###############################\n\ndef distance(a, b): # use the Pythagorean Theorem to find the distance between two points (tuples) or entities\n\tif isinstance(a, tuple) and isinstance(b, tuple): return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)\n\telif isinstance(a, tuple) and isinstance(b, entity.Entity): return math.sqrt((a[0]-b.x)**2 + (a[1]-b.y)**2)\n\telif isinstance(a, entity.Entity) and isinstance(b, tuple): return distance(b, a)\n\telif isinstance(a, entity.Entity) and isinstance(b, entity.Entity): return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)\n\n# returns a number indicating which direction this entity should face to look at the specified point. Not very elegant atm\ndef angle_to_point(entity_, point):\n\treturn -math.degrees(math.atan((point[1]-entity_.y)/(point[0]-entity_.x))) + (180 if (point[0]-entity_.x) < 0 else 0) % 360\n\ndef collision(a, b):\n\tif isinstance(a, entity.Entity) and isinstance(b, entity.Entity):\n\t\treturn distance(a.center(),b.center()) < a.COLLISION_RADIUS + b.COLLISION_RADIUS\n\telif isinstance(a, entity.Entity) and isinstance(b, wall.Wall):\n\t\t#rename them so the code is sasier to read\n\t\tentity_ = a\n\t\twall_ = b\n\t\tfor i in range(0,wall_.num_vertices()): # for each side of the wall:\n\t\t\tside = (wall_.vertices[i],wall_.vertices[(i+1)%wall_.num_vertices()]) \t\t\t\t# define the side\n\t\t\ttemp_point = () \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# this point, plus the entity center, will define a line perpendicular to the side of the wall\n\t\t\tif abs(slope(side[0], side[1])) == 0: # if the line is horizontal\n\t\t\t\ttemp_point = (entity_.x, entity_.y + 1)\t\n\t\t\telif math.isinf(slope(side[0],side[1])): temp_point = (entity_.x + 1, entity_.y) \t# if the line is vertical\n\t\t\telse: temp_point = entity_.x, entity_.y + -1/slope(side[0], side[1]) \t\t\t\t# if the line is at some other angle\n\t\t\tintersection = point_of_intersection(side,(entity_.center(),temp_point));\n\t\t\t#if i == 0: print(\"Point of intersection of side {0},{1} and {2},{3}: {4}\".format(side[0], side[1], entity_.center(), temp_point, intersection))\n\t\t\tif (distance(entity_.center(), intersection) < entity_.COLLISION_RADIUS and on_segment(intersection, side)) or distance(entity_.center(), wall_.vertices[i]) < entity_.COLLISION_RADIUS:\n\t\t\t\treturn True\n\t\treturn False\n\telif isinstance(a, wall.Wall) and isinstance(b, entity.Entity): return collision(b, a)\n\n######################\n# ENTITY PATHFINDING #\n######################\n\n'''\nWhat I need to here is to make a function which determines whether an entity has line of sight to a point (taking into account collision radius\nby drawing one line from each side of itself). If one of these raytraces hits a wall, it will attempt new sets of raytraces with offsets of\n1 degree ccw, 1 degree cw, 2 degrees ccw, 2 degrees cw, and so on. These new rays will go the same distance as the first one did when it hit\na wall and, if both are successful, it will check for line of sight to the target as if it had just teleported to its new location. If it\nstill can't see the target, it will attempt to go back to step 1 and find a new location to \"teleport\" to, and if none of these locations are\nsuccessful, it will give up.\n'''\n","sub_path":"geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"457538473","text":"from bs4 import BeautifulSoup\r\n\r\ndef gengeralParamForParserTable(tag:str,attrs:dict=None,splitStart:int=None,splitEnd:int=None) -> dict:\r\n param = {'tag': tag, 'attrs': attrs, 'splitStart': splitStart, 'splitEnd': splitEnd}\r\n return param\r\n\r\ndef parserTable(soup:BeautifulSoup,searchParams:list,curFloor = 0,searchData=[]) -> list:\r\n if curFloor==len(searchParams):\r\n return searchData\r\n param = searchParams[curFloor]\r\n curFloor+=1\r\n tags = soup.find_all(param['tag'], attrs=param['attrs'])[param['splitStart']: param['splitEnd']]\r\n\r\n dataItem = []\r\n for tag in tags:\r\n if curFloor == len(searchParams):\r\n dataItem.append(tag.get_text().replace(\"\\xa0\",\"\"))\r\n else:\r\n parserTable(tag,searchParams,curFloor,searchData)\r\n if dataItem:\r\n searchData.append(dataItem)\r\n return searchData\r\n","sub_path":"htmlparser.py","file_name":"htmlparser.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"572431683","text":"from .models import Country\n\ndef countries(request):\n if request.user.is_authenticated:\n if request.user.is_superuser:\n countries = Country.objects.all()\n if not request.session.get('country'):\n request.session['country'] = Country.objects.all().first().id\n choosen_country = Country.objects.all().first()\n else:\n choosen_country = Country.objects.filter(pk=request.session.get('country')).first()\n else:\n countries = request.user.countries.all()\n # countries = Country.objects.all()\n if int(request.session.get('country')) in countries.values_list('id', flat=True):\n choosen_country = Country.objects.filter(pk=request.session.get('country')).first()\n elif countries.first():\n choosen_country = countries.first()\n request.session['country'] = countries.first().id\n else:\n choosen_country = request.user.countries.all().first()\n else:\n choosen_country = None\n countries = None\n return {'countries': countries,'choosen_country': choosen_country}\n\ndef is_arc(request):\n context = {'is_arc':False}\n if request.user.is_authenticated:\n country = Country.objects.get(id=request.session.get('country'))\n if country.name == 'United States':\n context['is_arc'] = True\n return context\n","sub_path":"main/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557661961","text":"from math import*\n\nk=float(input(\"k: \"))\ncont=1\nn=0\nsoma=0\nwhile(cont<=k):\n\t\n\ttotal=soma+(1/factorial(n))\n\tn=n+1\n\tcont=cont+1\n\tsoma=total\nprint(round(total,8))\n\t\n\n\n","sub_path":"exs/1464-1137.py","file_name":"1464-1137.py","file_ext":"py","file_size_in_byte":165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"199559250","text":"\"\"\"\nExample of working with a user-defined series.\n\nThis example does not make much sense on its own, as the calculation should really be moved to\nanother library code. However, this shows the workflow.\n\n\"\"\"\n\n\nfrom econ_platform_core import fetch, Providers\nfrom econ_platform.start import quick_plot\n\n\n#---------------------------------------------------------------------------------------\n# Normally, the user-defined series would be handled in a library. Kept here to keep\n# Example self-contained.\n\ndef us_inflation(query_ticker):\n if not query_ticker == 'US_CPI_INFLATION':\n raise ValueError('Wrong series!')\n cpi_index = fetch('F@CPIAUCSL') # All -items, SA\n inf = cpi_index.pct_change(12)\n return inf\n# Push the handler into the UserProvider\n# Assume we are still using the default provider code\nuser_provider = Providers['U']\nuser_provider.SeriesMapper['US_CPI_INFLATION'] = us_inflation\n# End of code that should be in a library.\n#--------------------------------------------------------------------\n\n\n# Now we can fetch it.\n# Note that since this calculated series uses the US CPI index level, the system\n# automatically also creates the CPI series on the database when this is called.\ninf = fetch('U@US_CPI_INFLATION')\nquick_plot(inf)","sub_path":"examples/ex20190503_user_series.py","file_name":"ex20190503_user_series.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"544050301","text":"import os\nimport sys\nimport numpy as np\nimport pickle\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\") # Can change to 'Agg' for non-interactive mode\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import cm\nimport matplotlib.patches as mpatches\nimport matplotlib.animation as animation\n\nfrom yw.tool import logger\nfrom yw.flow.plot import load_results\n\n\ndef visualize_potential_surface(ax, res):\n ct = ax.contour(*res[\"o\"], res[\"surf\"])\n try: # in case of nan\n plt.colorbar(ct, ax=ax)\n except:\n pass\n # ax.axis([-1, 0, -1, 0])\n ax.set_xlabel(\"s1\")\n ax.set_ylabel(\"s2\")\n for item in [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels():\n item.set_fontsize(6)\n\n\ndef visualize_action(ax, res, plot_opts={}):\n for i in range(res[\"o\"].shape[0]):\n ax.arrow(*res[\"o\"][i], *(res[\"u\"][i] * 0.05), head_width=0.01, **plot_opts)\n ax.axis([-1, 0, -1, 0])\n ax.set_xlabel(\"s1\")\n ax.set_ylabel(\"s2\")\n for item in [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels():\n item.set_fontsize(6)\n\n\ndef create_animate(frame, data):\n # use the default figure\n pl.figure(0)\n gs = gridspec.GridSpec(1, 1)\n # create a new axes for action\n ax = pl.subplot(gs[0, 0])\n ax.clear()\n # get the result\n res = data[frame]\n # plot\n visualize_action(ax, res)\n\n\ndef create_plot(frame, fig, all_results, query_ls):\n\n # Plot frame\n fig.text(0.01, 0.01, \"frame: {}\".format(frame), ha=\"left\", va=\"bottom\", fontsize=10, color=\"r\")\n\n # Load data\n data = {}\n for result in all_results:\n exp_name = result[\"params\"][\"config\"]\n load_dir = result[\"dirname\"]\n queries = {}\n for q_k in query_ls:\n if q_k in os.listdir(load_dir):\n q = os.path.join(load_dir, q_k)\n result_files = os.listdir(q)\n result_files.sort()\n if result_files != []:\n queries[q_k] = {\n **np.load(os.path.join(q, result_files[frame if frame < len(result_files) else -1]))\n }\n if queries != {}:\n data[exp_name] = queries\n\n # Initialization\n num_rows = len(data.keys())\n num_cols = len(query_ls)\n gs = gridspec.GridSpec(num_rows, num_cols)\n col = cm.jet(np.linspace(0, 1.0, num_rows))\n\n for i, exp in enumerate(data.keys()):\n for j, query in enumerate(query_ls):\n\n if query not in data[exp].keys():\n continue\n\n ax = plt.subplot(gs[i, j])\n ax.clear()\n\n if query in [\n \"query_policy\",\n \"query_optimized_p_only\",\n \"query_optimized_q_only\",\n \"query_optimized_p_plus_q\",\n ]:\n visualize_action(ax, data[exp][query], plot_opts={\"color\": col[i]})\n ax.legend(handles=[mpatches.Patch(color=col[i], label=exp)], loc=\"lower left\")\n\n if query in [\"query_surface_p_only\", \"query_surface_q_only\", \"query_surface_p_plus_q\"]:\n visualize_potential_surface(ax, data[exp][query])\n\n fig.text(\n 0.02,\n 1.0 - (1.0 / (2 * num_rows) + i / num_rows),\n exp,\n ha=\"center\",\n va=\"center\",\n fontsize=10,\n color=\"r\",\n rotation=\"vertical\",\n )\n\n\ndef main(directories, save, mode=\"plot\", **kwargs):\n\n logger.configure()\n assert logger.get_dir() is not None\n\n # Allow load dir to be *\n all_results = load_results(directories)\n # Data pre parser\n query_ls = [\n \"query_surface_p_only\",\n \"query_surface_q_only\",\n \"query_surface_p_plus_q\",\n # UNCOMMENT for simple environments\n # \"query_optimized_p_only\",\n # \"query_optimized_q_only\",\n # \"query_optimized_p_plus_q\",\n # \"query_policy\",\n ]\n data = {}\n num_frames = 0\n for result in all_results:\n exp_name = result[\"params\"][\"config\"]\n load_dir = result[\"dirname\"]\n queries = {}\n for q_k in query_ls:\n if q_k in os.listdir(load_dir):\n q = os.path.join(load_dir, q_k)\n result_files = os.listdir(q)\n num_frames = max(num_frames, len(result_files))\n if result_files != []:\n queries[q_k] = 1\n if queries != {}:\n data[exp_name] = queries\n\n # Initialization\n fig = plt.figure(0) # use the same figure\n fig.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.9, wspace=0.35, hspace=0.35)\n num_rows = len(data.keys())\n num_cols = len(query_ls)\n fig.set_size_inches(4 * num_cols, 4 * num_rows)\n\n for i, k in enumerate(query_ls):\n scale = 0.95\n shift = (1.0 - scale) / 2.0\n x = (1.0 / (2 * num_cols) + i / num_cols) * scale + shift\n plt.figtext(\n x, 0.95, k.replace(\"query_\", \"\"), ha=\"center\", va=\"center\", fontsize=14, color=\"r\", rotation=\"horizontal\"\n )\n\n if mode == \"plot\":\n\n create_plot(frame=-1, fig=fig, all_results=all_results, query_ls=query_ls)\n\n if save:\n res_store_dir = os.path.join(directories[0], \"queries.png\")\n print(\"Storing query plot to {}\".format(res_store_dir))\n plt.savefig(res_store_dir, dpi=200)\n\n elif mode == \"mv\":\n # Plot animation\n fig = plt.figure(0) # create figure before hand\n res = animation.FuncAnimation(\n fig, create_plot, num_frames, fargs=(fig, all_results, query_ls), repeat=False, interval=300\n )\n\n if save:\n res_store_dir = os.path.join(directories[0], \"queries.mp4\")\n print(\"Storing query animation to {}\".format(res_store_dir))\n res.save(res_store_dir, dpi=200)\n\n else:\n return\n\n plt.show() # close the figure and then continue\n\n\nif __name__ == \"__main__\":\n from yw.util.cmd_util import ArgParser\n\n ap = ArgParser()\n ap.parser.add_argument(\"--mode\", help=\"plot the last or create an animation\", type=str, default=\"plot\")\n ap.parser.add_argument(\n \"--directory\", help=\"which script to run\", type=str, action=\"append\", default=None, dest=\"directories\"\n )\n ap.parser.add_argument(\"--save\", help=\"save gifure\", type=int, default=0)\n ap.parse(sys.argv)\n main(**ap.get_dict())\n","sub_path":"Package/yw/yw/flow/query/visualize_query.py","file_name":"visualize_query.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"479705209","text":"'''\nCreated on 7May.,2018\n\nDjango Stats Middleware\n\n@author: Bernd Wechner, based on an old snippet here: https://code.djangoproject.com/wiki/PageStatsMiddleware\n@status: Beta - works and is in use on a dedicated project. Can't guarantee it works everywhere. Tested on Django 2.0 only with Python 3.6.\n\nInserts some basic performance stats just prior to the tag in the response of every page served.\n\nTo use, add it to the MIDDLEWARE list in settings.py as follows:\n\nMIDDLEWARE = (\n 'django_stats_middleware.StatsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware' \n)\n\nCan be easily tweaked below to deliver whatever stats you like. \n\nThis information cannot be delivered to pages through the template context because timing information is \ncollected until the whole template is already rendered. To wit, we patch it into the content just above \nthe tag. If your page has no such tag, stats won't appear on it of course.\n'''\n\n# Python Imports\nfrom time import time\nfrom operator import add\nfrom functools import reduce\nimport re\nfrom re import RegexFlag as ref # Specifically to avoid a PyDev Error in the IDE.\n\n# Django Imports\nfrom django.db import connection\nfrom django.conf import settings\n\nclass StatsMiddleware(object):\n\n def __init__(self, get_response):\n self.get_response = get_response\n # One-time configuration and initialization.\n\n def __call__(self, request):\n if not settings.DEBUG:\n return self.get_response(request)\n\n # get number of db queries before we do anything\n n = len(connection.queries)\n\n # time the view\n start = time()\n response = self.get_response(request)\n total_time = time() - start\n\n # compute the db time for the queries just run\n db_queries = len(connection.queries) - n\n if db_queries:\n db_time = reduce(add, [float(q['time'])\n for q in connection.queries[n:]])\n else:\n db_time = 0.0\n\n # and backout python time\n python_time = total_time - db_time\n \n stats = br''.join((br'
'\n br'',\n br'',\n br'',\n br'',\n br'', \n br'
STATS:Total Time:', \"{:.1f} ms\".format(total_time*1000).encode(), br'Python Time:', \"{:.1f} ms\".format(python_time).encode(), br'DB Time:', \"{:.1f} ms\".format(db_time).encode(), br'Number of Queries:', \"{:,}\".format(db_queries).encode(), br'
\\1'))\n\n # Insert the stats just prior to the body close tag (we need to update the Content-Length header or browser won't render it all.\n if response and getattr(response, 'content', False):\n response.content = re.sub(br\"()\", stats, response.content, flags=ref.IGNORECASE)\n response['Content-Length'] = str(len(response.content)) \n\n return response\n ","sub_path":"django_stats_middleware/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"368710125","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom keras.callbacks import ModelCheckpoint\nfrom jitter import rand_jitter\nimport numpy as np\n\n\nclass Network(object):\n\n def __init__(self, nb_filters=32, nb_conv=3, nb_pool=2):\n model = Sequential()\n model.add(Convolution2D(nb_filters, nb_conv, nb_conv,\n border_mode='valid',\n input_shape=(1, 5, 30)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Convolution2D(nb_filters/2, nb_conv, nb_conv))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(500))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(10))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n self.model = model\n\n def get_model(self):\n return self.model\n\n def train(self, train_data, validation_data, nb_epoch=50, batch_size=128, weight_file=\"weights.hdf5\"):\n # Callback for model saving:\n checkpointer = ModelCheckpoint(filepath=weight_file, \n verbose=1, save_best_only=True)\n\n X_train, Y_train = train_data[0], train_data[1]\n X_test, Y_test = validation_data[0], validation_data[1]\n\n # Training\n for k in range(1, nb_epoch + 1):\n print(\"epoch %s/%s:\" %(k,nb_epoch))\n X_train_temp = np.copy(X_train) # Copy to not effect the originals\n \n # Add noise on later epochs\n if k > 1:\n for j in range(0, X_train_temp.shape[0]):\n X_train_temp[j,0, :, :] = rand_jitter(X_train_temp[j,0,:,:])\n\n self.model.fit(X_train_temp, Y_train, nb_epoch=1, batch_size=batch_size, \n validation_data=(X_test, Y_test), \n callbacks=[checkpointer])\n\n def load_weights(self, weight_file):\n self.model.load_weights(weight_file)\n\n def evaluate(self, X_data, Y_data):\n score = self.model.evaluate(X_data, Y_data, verbose=0)\n return score\n\n def predict(self, X_test, batch_size=128):\n predictions = self.model.predict(X_test, batch_size=batch_size)\n return predictions\n\n\nif __name__ == '__main__':\n a = Network()\n model = a.get_model()\n\n","sub_path":"submit/network2.py","file_name":"network2.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"644402676","text":"import time\nfrom threading import Thread\nfrom urllib.request import urlopen\nimport PiConfigReader\nimport logging\n\nclass Manager:\n def __init__(self, internet_connection_check_config):\n self.internet_connection_check_config = internet_connection_check_config\n self.internet_connection_threshold = 1\n\n def start_internet_connection_thread(self):\n self.internet_connection_thread = Thread(target=self.manage_internet_connection, args=())\n self.internet_connection_thread.start()\n\n def _connect_to_internet(self):\n try:\n url = self.internet_connection_check_config.get_url()\n urlopen(url, timeout=5.0)\n self.internet_connected= True\n except Exception as error:\n self.internet_connected= False\n\n def manage_internet_connection(self):\n # No debouncing. Alert users for momentary internet drops.\n internet_connection_threadold = 1\n internet_connected = False\n internet_connected_stable = False\n internet_connected_count = 0\n internet_disconnected_count = 0\n while True:\n self._connect_to_internet()\n if self.internet_connected:\n if not internet_connected_stable:\n # We just connected the wifi\n internet_connected_count += 1\n if (internet_connected_count >= internet_connection_threadold):\n # We've consistently had wifi\n logging.info('Internet connection is stable')\n internet_connected_stable = True\n internet_disconnected_count = 0\n else:\n logging.info('Not connected to internet')\n if internet_connected_stable:\n # We've just lost the wifi connection\n # Debounce this change in state\n internet_disconnected_count += 1\n if (internet_disconnected_count >= self.internet_connection_threshold):\n # We've consistently lost wifi\n logging.info('Internet connection is no longer stable')\n internet_connected_stable = False\n internet_connected_count = 0\n time.sleep(1.0)\n\n","sub_path":"pi_demos/KnockoutTank/InternetConnection.py","file_name":"InternetConnection.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"34231280","text":"from trade_system.transactions.services import task\nfrom config import celery_app\n# from trade_system.transactions.services import search_offer\nfrom trade_system.offers import services\n\n@celery_app.task()\ndef perform_trade(seller, buyers):\n task(seller, buyers)\n\n\n@celery_app.task()\ndef task_search_offer():\n sellers = services.get_opened_sale_offers()\n buyers = services.get_opened_purchase_offers()\n\n for seller_offer in sellers:\n perform_trade.delay(seller_offer, buyers)","sub_path":"trade_system/transactions/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477823776","text":"import cv2\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('soltpepar1.png') \n\n#Averaging\navg = cv2.blur(img,(10,10)) \n#cv2.imshow('mean',avg)\n#cv2.waitKey(0)\n\n#cv2.destroyAllWindows()\n\nplt.subplot(2,1,1),plt.imshow(img,cmap = 'gray')\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,1,2),plt.imshow(avg,cmap = 'gray')\nplt.title('Mean'), plt.xticks([]), plt.yticks([])\nplt.show()\n","sub_path":"IMG LAB FINAL/3rd_lab_31_aug/mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"322554760","text":"__author__ = 'jay'\n\n'''\nA program that counts how many a’s, b’s, c’s etc. there are in the string The quick brown fox jumped over the lazy\ndog. Case-fold the characters before counting; i.e., count the T in The as a t, just like a lower-case t. The program\ndisplays the characters (a-z) and their counts. E.g., it should show that there are four o’s.\n'''\n\ns = \"The quick brown fox jumped over the lazy dog\"\ns_lower = s.lower()\n\nindx = 0\nfor c in s_lower:\n if c != \" \":\n cntr = 1\n inner_indx = 0\n for checker in s_lower:\n if inner_indx != indx and c == checker:\n cntr += 1\n inner_indx += 1\n isAre = \"is\"\n if cntr > 1:\n isAre = \"are\"\n print(\"There %s %d %s\" % (isAre, cntr, c))\n indx += 1\n","sub_path":"bootcamp/ex1-count-chars.py","file_name":"ex1-count-chars.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"603196679","text":"# -*- coding: utf-8 -*-\n# @Author: cody\n# @Date: 2019-09-18 11:20:31\n# @Last Modified by: cody\n# @Last Modified time: 2019-09-18 13:04:41\n\n\nimport os, traceback, urllib.parse\nimport logging as log\nimport cherrypy, requests\n\nlog.root.setLevel(log.DEBUG)\n\nif 'OPENFAAS_URL' in os.environ:\n OPENFAAS_URL = os.environ['OPENFAAS_URL']\nelse:\n OPENFAAS_URL = 'http://127.0.0.1:8080'\n\nlog.debug('OPENFAAS_URL - %s', OPENFAAS_URL)\n\nassert '://' in OPENFAAS_URL, OPENFAAS_URL\n\ndef verify_non_empty_string(s):\n ''' shortcut for asserting that a variable is a non-empty string '''\n assert isinstance(s, str), s\n assert s.strip(), s\n\ndef html_friendly_exception(ex):\n '''returns a browser friendly view for a crash'''\n log.exception(ex)\n return ''.join(\n traceback.format_exception(\n type(ex),\n ex,\n ex.__traceback__\n )\n ).replace('\\n', '
')\n\ndef request(f, q=None):\n ''' maps url parameters to valid request data and returns the faas result '''\n try:\n yield from requests.get(\n '{}/function/{}'.format(OPENFAAS_URL, f),\n **({} if q is None else {'data':q})\n )\n except Exception as ex:\n yield html_friendly_exception(ex)\n\nclass RequestGateway(object):\n ''' contains all all logic for the server '''\n @cherrypy.expose\n def index(self, f, q=None):\n verify_non_empty_string(f)\n if q is not None:\n verify_non_empty_string(q)\n log.debug('before url parse - %s', q)\n q = urllib.parse.unquote(q)\n log.debug('after url parse - %s', q)\n\n yield from request(f, q)\n\n\ncherrypy.config.update({\n 'server.socket_host': '0.0.0.0',\n 'server.socket_port': 8080\n})\n\ncherrypy.quickstart(RequestGateway())\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79119598","text":"\n\nimport pygame\nfrom threading import Thread, Lock\n\nmutex = Lock()\n\nclass ovelha:\n pos_x =0\n pos_y =0\n velocidade = 0\n imagem = None\n tamanho_y = 50\n tamanho_x = 50\n status = True\n \n\n def __init__(self, pos_x,pos_y, velocidade):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.velocidade = velocidade\n self.imagem = None\n self.tamanho_y = 50\n self.tamanho_x = 50\n self.status = True\n \n def spawn_ovelha(pos_x,pos_y, velocidade):\n o = ovelha(pos_x,pos_y,velocidade)\n o.imagem = pygame.image.load('Animais/Ovelha/cima1.png')\n return o\n \n def sheep_up(vel, o):\n mutex.acquire(1)\n try:\n o.pos_y -= vel\n o.imagem = pygame.image.load('Animais/Ovelha/cima1.png')\n finally:\n mutex.release()\n return o\n \n def sheep_down(vel, o):\n o.pos_y += vel\n o.imagem = pygame.image.load('Animais/Ovelha/baixo1.png')\n return o\n \n def sheep_left(vel, o):\n o.pos_x -= vel\n o.imagem = pygame.image.load('Animais/Ovelha/esquerda1.png')\n return o\n \n def sheep_right(vel, o):\n o.pos_x += vel\n o.imagem = pygame.image.load('Animais/Ovelha/direita1.png')\n return o\n \n def Comer(o):\n o.pos_y -=5\n return o\n\n","sub_path":"ComputacaoGrafica/Game.py/Jogo Python/ovelha.py","file_name":"ovelha.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"376419791","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\n#g01-踢東西-tru二值化\r\nimport os\r\n\r\ntry:\r\n os.makedirs('./g01-out/tru/')\r\nexcept:\r\n print(\"OK:build=./g01-out/tru/\")\r\n\r\nfor i in range(9):\r\n #讀取第一張\r\n if i == 0 :\r\n img_name = \"./g01/g01\" + str(i + 1) + \".jpg\"\r\n img_color = cv2.imread(img_name, cv2.IMREAD_COLOR)\r\n img = cv2.imread(img_name, 0) # 直接读为灰度图像\r\n\r\n titles = ['img_original', 'gray']\r\n b, g, r = cv2.split(img_color)\r\n img_color_merg = cv2.merge([r, g, b])\r\n images = [img_color_merg,img]\r\n #畫圖\r\n plt.figure()\r\n plt.subplot(1,2,1),plt.imshow(images[0])\r\n plt.title(titles[0])\r\n plt.subplot(1,2,2),plt.imshow(images[1],'gray')\r\n plt.title(titles[1])\r\n plt.xticks([]), plt.yticks([])\r\n plt.ion()\r\n plt.pause(0.4) #显示秒数\r\n plt.close()\r\n else:\r\n img_name = \"./g01/g01\" + str(i + 1) + \".jpg\"\r\n img_color = cv2.imread(img_name, cv2.IMREAD_COLOR)\r\n\r\n img_name1= \"./g01/g01\" + str(i ) + \".jpg\"\r\n img_name2= \"./g01/g01\" + str(i + 1) + \".jpg\"\r\n #print(i+2,\"-YES\")\r\n img_color_1 = cv2.imread(img_name1) # 直接读为灰度图像\r\n img_color_2 = cv2.imread(img_name2) # 直接读为灰度图像\r\n img_sub = cv2.subtract(img_color_1, img_color_2)\r\n #相減後相加(灰圖)\r\n if i == 1:\r\n add_img_sub = img_sub\r\n else:\r\n add_img_sub = cv2.add(add_img_sub, img_sub)\r\n\r\n #剪完後二值化\r\n img1 = cv2.imread(img_name1, 0) # 直接读为灰度图像\r\n img2 = cv2.imread(img_name2, 0) # 直接读为灰度图像\r\n img_sub = cv2.subtract(img1, img2)\r\n ret_2, thresh3 = cv2.threshold(img_sub, 127, 255, cv2.THRESH_TRUNC)\r\n #儲存thresh3\r\n\r\n output_tru_name=\"./g01-out/tru/g01-out-tru-\"+str(i)+\".jpg\"\r\n cv2.imwrite(output_tru_name,thresh3)\r\n #二值化相加\r\n if i == 1:\r\n tru_add_img_sub= thresh3\r\n else:\r\n tru_add_img_sub= cv2.add(tru_add_img_sub, thresh3)\r\n\r\n\r\n\r\n img_name_pl=\"img\"+str(i+1)\r\n titles = [img_name_pl, 'TRUNC-cut']\r\n\r\n b, g, r = cv2.split(img_color)\r\n img_color_merg = cv2.merge([r, g, b])\r\n images = [img_color_merg, thresh3]\r\n #畫圖\r\n plt.figure()\r\n plt.subplot(1,2,1),plt.imshow(images[0])\r\n plt.title(titles[0])\r\n plt.subplot(1,2,2),plt.imshow(images[1],'gray')\r\n plt.title(titles[1])\r\n plt.xticks([]), plt.yticks([])\r\n plt.ion()\r\n plt.pause(0.4) #显示秒数\r\n plt.close()\r\n\r\n#顯示相減相加的結果\r\n#彩圖相減後相加add_img_sub\r\n#二值化後相加tru_add_img_sub\r\ncv2.imwrite(\"./g01-out/g01-color-out.jpg\", add_img_sub)\r\ncv2.imwrite(\"./g01-out/g01-tru-out.jpg\", tru_add_img_sub)\r\n#顯示最終結果\r\ntitles = [\"all-add\", 'TRUNC-cut-all']\r\nb, g, r = cv2.split(add_img_sub)\r\nimg_color_merg_out = cv2.merge([r, g, b])\r\nimages = [img_color_merg_out, tru_add_img_sub]\r\n#畫圖\r\nplt.figure()\r\nplt.subplot(1,2,1),plt.imshow(images[0])\r\nplt.title(titles[0])\r\nplt.subplot(1,2,2),plt.imshow(images[1],'gray')\r\nplt.title(titles[1])\r\nplt.xticks([]), plt.yticks([])\r\nplt.ion()\r\nplt.pause(3) #显示秒数\r\nplt.close()\r\n\r\nprint(\"Done!\")","sub_path":"0104/test0101-g01-1.py","file_name":"test0101-g01-1.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"127468998","text":"import data_finder\nfrom pickle import dump, load\nimport random\n\nclass Auto_Tweeter:\n def __init__ (self):\n self.data_grab()\n self.tweet_maker()\n\n def data_grab(self):\n # asks if you need to pull new data for this person\n print ('What is the handle of the person you want to mimic?')\n self.user_handle = input()\n\n print ('Have you collected Data? (y/n)')\n collect_data = input()\n #pulls new data, by calling Auto_tweeter\n if collect_data == 'n':\n self.statuses = data_finder.data_finder(self.user_handle)\n self.dictionary_maker() # calls the dictionary_maker method\n def dictionary_maker(self):\n #sorts data into for markov analysis\n self.dictionary = {}\n self.start = []\n self.end = []\n for tweet in self.statuses:\n # collects all the starts of each tweet\n self.each_start = [[tweet[0],tweet[1]]]\n self.start = self.start + self.each_start\n self.dictionary['START']= self.start\n for tweet in self.statuses:\n tweet += ['END'] # adding and end flag to the end of the tweet\n i= len(tweet)\n for x in range (0,(i-2)):\n #forming a dictionary linking previos two to the next word\n if tweet[x]+tweet[x+1] in self.dictionary:\n self.dictionary[tweet[x]+tweet[x+1]] += [tweet[x+2]]\n else:\n self.dictionary[tweet[x]+tweet[x+1]] = [tweet[x+2]]\n\n self.pickle()\n\n def pickle(self):\n #saves the data set in a file\n Data_Set = open(str(self.user_handle)+'.txt','wb')\n dump(self.dictionary, Data_Set)\n Data_Set.close()\n\n\n def tweet_maker(self):\n Data_Set = open(str(self.user_handle)+'.txt','rb')\n #loading the data\n self.dictionary = load(Data_Set)\n print(len(self.dictionary['START']))\n self.new_tweet=[]\n self.new_tweet += random.choice(self.dictionary['START'])\n #starts the tweet off with a random begining\n #keeps adding randomly untill we arive at an END\n while not self.new_tweet[-1] == 'END':\n self.new_tweet += [random.choice(self.dictionary[self.new_tweet[-2]+self.new_tweet[-1]])]\n self.final_tweet = ' '\n #adds the words together and adds spaces\n for word in self.new_tweet:\n if not word =='END':\n self.final_tweet += ' ' + word\n #prints the new tweet\n print(str(self.user_handle)+' says:'+ self.final_tweet)\n\n print('Make another tweet (y/n)')\n another= input()\n if another == 'y':\n self.tweet_maker()\n\nif __name__ == '__main__':\n tweeter = Auto_Tweeter()\n","sub_path":"Auto_tweeter.py","file_name":"Auto_tweeter.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"242664035","text":"# -*- coding : utf-8- -*-\r\n__author__ = 'KimHippo'\r\n\r\nfrom keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom keras.models import Model\r\nimport keras_sub_pack as ksp\r\nfrom glob import glob\r\nimport os\r\n\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsess = tf.Session(config = config)\r\n\r\nwith tf.device('/gpu:0'):\r\n class cat_n_dog(Model):\r\n\r\n def __init__(self, classes=2, in_shape=None):\r\n self.classes, self.in_shape = classes, in_shape\r\n self.model()\r\n i, t = self.i, self.t\r\n\r\n super().__init__(i, t)\r\n self.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])\r\n\r\n def model(self):\r\n classes = self.classes\r\n in_shape = self.in_shape\r\n\r\n i = Input(shape=in_shape)\r\n c = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(i)\r\n c = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = MaxPooling2D(pool_size=(2, 2))(c)\r\n\r\n c = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = MaxPooling2D(pool_size=(2, 2))(c)\r\n\r\n c = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = MaxPooling2D(pool_size=(2, 2))(c)\r\n\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = MaxPooling2D(pool_size=(2, 2))(c)\r\n\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(c)\r\n c = MaxPooling2D(pool_size=(2, 2))(c)\r\n f = Flatten()(c)\r\n\r\n h = Dense(4096, activation='relu')(f)\r\n h = Dense(4096, activation='relu')(h)\r\n h = Dense(1000, activation='relu')(h)\r\n t = Dense(classes, activation='softmax')(h)\r\n\r\n self.i, self.t = i, t\r\n\r\n class DATA:\r\n\r\n def __init__(self, path, format='png', img_size=28, test_size=0.3, rand_stat=0):\r\n self.path = path\r\n self.format = format\r\n self.img_size = img_size\r\n\r\n self.preprocessing()\r\n in_data, tar_data = self.in_data, self.tar_data\r\n in_train, in_test, tar_train, tar_test = train_test_split(in_data, tar_data, test_size=test_size,\r\n random_state=rand_stat)\r\n self.in_train, self.in_test = in_train, in_test\r\n self.tar_train, self.tar_test = tar_train, tar_test\r\n self.gener = ImageDataGenerator(horizontal_flip=True, rotation_range=30, vertical_flip=True)\r\n\r\n def load(self):\r\n path, format = self.path, self.format\r\n img_path = glob(os.path.join(path, '*.' + format))\r\n labels = [img_path[label][len(path) + 1] for label in range(len(img_path))]\r\n images = [plt.imread(img_path[imgs]) for imgs in range(len(img_path))]\r\n images = np.asarray(images)\r\n self.images, self.labels = images, labels\r\n\r\n def preprocessing(self):\r\n self.load()\r\n in_data, tar_data = self.images, self.labels\r\n classes = len(set(tar_data))\r\n\r\n in_data = in_data.astype('float32')\r\n in_shape = in_data.shape[1:]\r\n tar_data = to_categorical(tar_data, classes)\r\n\r\n self.in_data, self.tar_data = in_data, tar_data\r\n self.classes, self.in_shape = classes, in_shape\r\n\r\n\r\n class run:\r\n\r\n def __init__(self, path, set_name, format='png', epo=15, batch=30, fig=1):\r\n\r\n data = DATA(path=path, img_size=300, format=format)\r\n model = cat_n_dog(classes=data.classes, in_shape=data.in_shape)\r\n flow = data.gener.flow(data.in_train, data.tar_train, batch_size = batch)\r\n histo = model.fit_generator(flow, epochs = epo, steps_per_epoch=len(data.in_train) / 32,\r\n validation_data=(data.in_test, data.tar_test))\r\n loss_n_accu = model.evaluate(data.in_test, data.tar_test, batch_size=batch)\r\n print(f'Test loss : {loss_n_accu[0]} \\n accuracy : {loss_n_accu[1]}')\r\n\r\n if fig:\r\n plt.figure(figsize=(12,5))\r\n plot = ksp.plot(histo)\r\n plt.subplot(1, 2, 1)\r\n plot.plot_loss()\r\n\r\n plt.subplot(1, 2, 2)\r\n plot.plot_acc()\r\n plt.show()\r\n\r\n self.model = model\r\n self.set_name = set_name\r\n self.save_n_load()\r\n\r\n def save_n_load(self):\r\n\r\n model = self.model\r\n set_name = self.set_name\r\n fname = ksp.random_name()\r\n path = '../weights' + set_name\r\n if not os.path.isdir(path):\r\n os.mkdir(path)\r\n os.chdir(path)\r\n\r\n model.save_weights('{}_{}_kimhippo.h5'.format(fname, set_name))\r\n print('Train weights are saved in : ', path)\r\n\r\n\r\n path = '../Datas/cat_and_dog/using_model'\r\n set_name = 'cat_n_dog'\r\n format = 'png'\r\n\r\n run = run(path=path, set_name=set_name, format=format, epo = 50)","sub_path":"keras/케라스공부/3. CNN/VGG16_with_cats_and_dogs.py","file_name":"VGG16_with_cats_and_dogs.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"573703998","text":"# Author: Ashutosh Kumar, The University of Tokyo\n# Date: 2021/03/10\n# Program Version: 1.1.0\n\n# Please obtain API token from U-BLOX and IPINFO for Assisted GPS (A-GPS) service. They can be substituted on Line 46 and 47 of this program\n\n\nimport pprint\nimport ipinfo\nimport pynmea2\nimport serial\nimport requests\nimport time\nimport os\nimport threading\nimport multiprocessing\nfrom datetime import datetime\nimport subprocess\nfrom random import randint\n\n# Get a random digit of n digits. This is used to name video files.\ndef rand_n(n):\n range_start = 10**(n-1)\n range_end = (10**n)-1\n return randint(range_start, range_end)\n\n# This function uses location based on the IP address and passes it to the device to assist obtaining GPS location\ndef get_initial_location(ipinfo_access_token):\n\tapi_handler = ipinfo.getHandler(ipinfo_access_token)\n\tdetails = api_handler.getDetails()\n\n\tlatitude, longitude = details.details[\"loc\"].split(\",\")\n\n\treturn {\"latitude\": latitude, \"longitude\": longitude}\n\n# get_lat_lon function is used to get the latitude and longitude in real time and updates the global latitude and longitude variable. This function runs on a separate thread.\ndef get_lat_lon(latitude, longitude):\n INTERNET_AVAILABLE = True\n GPS_FAILURE_TIME_LIMIT = 40\n COM_PORT = \"/dev/ttyUSB0\"\n BAUDRATE = 115200\n API_TOKEN = \"API_TOKEN_FOR_ASSIST_NOW\"\n IPINFO_ACCESS_TOKEN = \"API_TOKEN_FOR_IPINFO\"\n gps_failure_time = 0\n internet_failure_count = 0\n\n print(\"Running get_lat_lon function\")\n f = open(f\"/home/{os.getlogin()}/gps_data/{datetime.now().strftime('%Y%m%d%H%M%S')}.log\", \"a\")\n\n if INTERNET_AVAILABLE:\n a_gps = True\n while a_gps:\n try:\n initial_location = get_initial_location(IPINFO_ACCESS_TOKEN)\n http_response = requests.get(\"https://online-live1.services.u-blox.com/GetOnlineData.ashx?token=\" +\n API_TOKEN + \";gnss=gps,glo,qzss,bds,gal;datatype=eph,alm,aux,pos;lat=\" + initial_location[\"latitude\"] + \";lon=\" + initial_location[\"longitude\"] +\";alt=0.000000;pacc=5000.000000;filteronpos\")\n a_gps = False\n except:\n print(\"Internet is not available, trying again!\")\n time.sleep(5)\n internet_failure_count += 1\n if internet_failure_count >= 5:\n INTERNET_AVAILABLE = False\n break\n else:\n continue\n\n serial_port = serial.Serial(COM_PORT, BAUDRATE, timeout = None)\n\n # Wait until GPS is released\n pipe = True\n while pipe:\n pipe = serial_port.inWaiting()\n serial_port.read(pipe)\n\n # Write A-GPS Data\n if INTERNET_AVAILABLE:\n print(\"Sending A-GPS Data To GPS Module\")\n serial_port.write(http_response.content)\n print(\"Finished !\")\n else:\n print(\"Continue without internet or A-GPS\")\n\n try:\n while True:\n line = serial_port.readline()\n line2 = line.decode('latin-1')\n\n if line2.startswith(\"$GNGGA\"):\n msg = pynmea2.parse(line2.strip())\n latitude.value, longitude.value = msg.latitude, msg.longitude\n f.write(f\"{datetime.now().strftime('%Y:%m:%d:%H:%M:%S')} {msg.timestamp} {str(latitude.value)} {str(longitude.value)} \\n\")\n f.flush()\n else:\n pass\n except KeyboardInterrupt:\n serial_port.close()\n\n\ndef gstreamer_nano():\n\n cmd = f\"gst-launch-1.0 -e v4l2src device=/dev/video0 ! image/jpeg,width=1280,height=720,framerate=30/1 ! jpegdec ! videoflip method=rotate-180 ! nvvidconv ! queue ! clockoverlay ! omxh264enc ! splitmuxsink location=/home/{os.getlogin()}/video_data/{rand_n(5)}-%04d.mp4 max-size-time=60000000000 max-size-bytes=10000000\" \n process = subprocess.Popen(cmd, shell = True)\n\nif __name__ == \"__main__\":\n \n\n # Here we do not put any values to the latitude and longitude in the beginning. The values will be updated using the global variable in get_lat_lon function running on an independent thread\n latitude = multiprocessing.Value('d', 1.0)\n longitude = multiprocessing.Value('d', 1.0)\n\n gps_thread = multiprocessing.Process(target = get_lat_lon, args=(latitude, longitude))\n gps_thread.start()\n \n gstreamer_nano()","sub_path":"video_gps.py","file_name":"video_gps.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"552470407","text":"#!/usr/bin/python3\n\"\"\"\n\nFunction that prints a square with the character \"#\"\n\n\"\"\"\n\n\ndef print_square(size):\n \"\"\"\n Function that prints a square with the character \"#\"\n \"\"\"\n if not (isinstance(size, (int, float))):\n raise TypeError(\"size must be an integer\")\n if (isinstance(size, float) and size < 0):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size == 0:\n print()\n for i in range(size):\n for j in range(size):\n print(\"#\", end=\"\")\n print()\n","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557138864","text":"\"\"\"program to accept a number from the user; then display the reverse of the entered number.\"\"\"\n\nn=int(input(\"Enter the value of n: \"))\nrev=0\ntemp=n\nwhile not n==0:\n r=n%10\n rev=(rev*10)+r\n n=n//10\nprint(f\"given number is {temp}\")\nprint(f\"Revesed number is {rev}\")\n","sub_path":"internship/M_1/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"598001565","text":"import numpy as np\nimport glob\nimport os\nfrom definition import *\n\n\n\nPATH = '/Users/austinmcdonald/Desktop/ArgonXenon/'\n\nfolders = glob.glob(PATH+\"*/\")\nfolders.sort()\nfolders.remove('/Users/austinmcdonald/Desktop/ArgonXenon/Lamp/')\n#folders = [folders[1]]\n#folders = [folders[1]]\n#folders\n\n\n\n# arrival, sigma, max, RC\nDATA = []\nfor F in range(0,len(folders)):\n \n files = os.listdir(folders[F])\n if '.DS_Store' in files: files.remove('.DS_Store')\n info = np.loadtxt(folders[F]+'Run-info.txt')\n files.remove('Run-info.txt')\n files.sort()\n N = 0\n for fi in files:\n print(\"starting on\")\n print(fi)\n Data = np.loadtxt(folders[F]+fi)\n if np.mean(np.nan_to_num(Data[0]))!=0 and np.mean(np.nan_to_num(Data[1]))!=0:\n xa,ya,opta,chia,cuta = FITTER_ANOD(Data[0],Data[2])\n xg,yg,optg,chig,cutg = FITTER_GOLD(Data[0],Data[1])\n \n EE =int(fi.split('.txt')[0].split('-')[-1])\n PP = int(fi.split('.txt')[0].split('-')[2].split('_')[0])\n ident = None\n if PP==1:\n PP = info[0]\n ident = '^'\n elif PP==3:\n PP = info[1]\n ident = 'd'\n elif PP==6:\n PP = info[2]\n ident = 's'\n elif PP==9:\n PP = info[3]\n ident = '*'\n \n Prct = folders[F].split('/')[-2]\n dt = opta[0]-optg[0]\n sigma = opta[1]\n #INFO = [Prct, EE, PP, dt, sigma, ident, opta, optg]\n INFO = [float(Prct), EE, PP, dt, sigma, ident]\n #INFO = np.array([int(Prct), EE, PP, dt, sigma, ident])\n \n DATA.append(INFO)\nDATA = np.array(DATA)\n#print(DATA[:,0])\nnp.save('XenonArgon.npy',DATA)\n","sub_path":"Electron-XeAr/Waveform-Fits.py","file_name":"Waveform-Fits.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"117467072","text":"from __future__ import print_function\nimport sugartensor as tf\nimport numpy as np\nfrom prepro import *\nfrom train import ModelGraph\nimport codecs\nimport readchar\n\ndef main(): \n g = ModelGraph(mode=\"test\")\n \n with tf.Session() as sess:\n tf.sg_init(sess)\n\n # restore parameters\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint('asset/train'))\n print(\"Restored!\")\n mname = open('asset/train/checkpoint', 'r').read().split('\"')[1] # model name\n\n char2idx, idx2char = load_char_vocab()\n word2idx, idx2word = load_word_vocab()\n\n\n previous = [0]*50 # a stack for previous words\n para = \"EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE\"\n ctx = [0]*50\n\n while True:\n key = readchar.readkey().lower()\n\n if key == readchar.key.BACKSPACE:\n ctx.insert(0, previous.pop())\n ctx.pop()\n previous.insert(0, 0)\n\n elif key == readchar.key.ESC:\n break\n\n else:\n key_idx = char2idx[key]\n ctx.append(key_idx)\n ctx.pop(0)\n\n logits = sess.run(g.logits, {g.x: np.expand_dims(ctx, 0)})\n preds = logits.argsort()[0][-3:]\n # pred = np.argmax(logits, -1)[0]\n predword1, predword2, predword3 = [idx2word.get(pred) for pred in preds]\n print(predword1, ' ', predword2, ' ', predword3)\n\n\n \nif __name__ == '__main__':\n main()\n print(\"Done\")\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"258462907","text":"import pygame\r\nimport time\r\nimport random\r\n\r\npygame.init()\r\n\r\n# define colors\r\nblack=(0,0,0)\r\nwhite=(255,255,255)\r\nred=(255,0,0)\r\ngreen=(0,255,0)\r\n\r\n\r\n# Create game environment\r\ndisplay_width=800\r\ndisplay_height=600\r\ngameDisplay=pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption(\"Train The Model!\")\r\nclock=pygame.time.Clock()\r\n\r\n# Load in player background images\r\nplayer_img=pygame.image.load(r\"C:\\Users\\Oli\\Documents\\Python_Game_Devs\\cropped_rocket.png\")\r\nplayer_width = 60\r\nbg_1 = pygame.image.load(r\"C:\\Users\\Oli\\Documents\\Python_Game_Devs\\starry_night.png\")\r\nbg_2 = pygame.image.load(r\"C:\\Users\\Oli\\Documents\\Python_Game_Devs\\starry_night.png\")\r\n\r\n\r\n\r\ndef draw_scoreboard(count):\r\n font=pygame.font.SysFont(None,40)\r\n text=font.render(\"Score \"+str(count),True,white)\r\n gameDisplay.blit(text,(20,20))\r\n\r\n\r\ndef set_player_icon(x,y):\r\n gameDisplay.blit(player_img,(x,y))\r\n \r\n\r\ndef insert_obstacle(obstaclex,obstacley,obstaclew,obstacleh,color):\r\n pygame.draw.rect(gameDisplay,color,[obstaclex,obstacley,obstaclew,obstacleh])\r\n\r\n# Create the 'alien'\r\ndef insert_prize(obstaclex,obstacley,obstaclew,obstacleh,color):\r\n pygame.draw.circle(gameDisplay, color, [obstaclex, obstacley], 40)\r\n\r\n# Function for creating general text\r\ndef text_objects(text,font):\r\n textSurface=font.render(text, True, red)\r\n return textSurface, textSurface.get_rect()\r\n\r\n# Used for when users crash\r\ndef message_display(text):\r\n largeText=pygame.font.Font('freesansbold.ttf',115)\r\n TextSurf, TextRect=text_objects(text, largeText)\r\n TextRect.center=((display_width/2),(display_height/2))\r\n gameDisplay.blit(TextSurf, TextRect)\r\n\r\n pygame.display.update()\r\n\r\n time.sleep(2)\r\n\r\n game_loop()\r\n\r\n# Call when users crash into an obstacle\r\ndef crash():\r\n message_display('You Crashed')\r\n\r\n# For now this is just a placeholder. It creates a lame white box with red writing - not an input box.\r\ndef make_input_box():\r\n\r\n pygame.draw.rect(gameDisplay,black,[200,200,400,250], 8)\r\n pygame.draw.rect(gameDisplay,white,[200,200,400,250])\r\n pygame.display.update()\r\n\r\n text = 'Type your example here:'\r\n largeText=pygame.font.Font('freesansbold.ttf',20)\r\n TextSurf, TextRect=text_objects(text, largeText)\r\n TextRect.center=((display_width/2 - 65),(display_height/2-70))\r\n gameDisplay.blit(TextSurf, TextRect)\r\n\r\n pygame.display.update()\r\n\r\n time.sleep(2)\r\n game_loop()\r\n\r\n\r\n# This makes the rolling background. As one background image lowers, the other follows. When the first reaches\r\n# the end of the display screen it returns to its starting position. This cycle is continuous. \r\ndef update_background(bg_1_xpos, bg_2_xpos, bg_2, display_height):\r\n if bg_1_xpos > display_height:\r\n bg_1_xpos = 0\r\n bg_2_xpos = bg_1_xpos - bg_2.get_size()[1]\r\n else:\r\n bg_1_xpos += 5\r\n bg_2_xpos = bg_1_xpos - bg_2.get_size()[1]\r\n return bg_1_xpos, bg_2_xpos\r\n\r\n\r\n# Function for producing a crash when the play hits an obstacle\r\ndef check_obstacle_crash(player_x, player_y, obstacle_startx, obstacle_starty,\\\r\n obstacle_height, obstacle_width, player_width):\r\n\r\n # If the obstacle's y position matches the \r\n if player_y < obstacle_starty + obstacle_height:\r\n print('y crossover')\r\n\r\n # if the player position overalps with obstacle positon - call the crash function\r\n if player_x > obstacle_startx and player_x < obstacle_startx + obstacle_width\\\r\n or player_x + player_width > obstacle_startx and player_x + player_width < obstacle_startx + obstacle_width:\r\n print('x crossover')\r\n crash()\r\n\r\n# Function that generates an input box when an 'alien' is hit\r\ndef check_prize_crash(player_x, player_y, prize_startx, prize_starty,\\\r\n prize_height, prize_width, player_width):\r\n\r\n if player_y < prize_starty+prize_height:\r\n print('y crossover')\r\n\r\n if player_x > prize_startx and player_x prize_startx and player_x + player_width < prize_startx + prize_width:\r\n make_input_box()\r\n \r\n\r\n\"\"\"\r\nBelow is the main game loop where all functions are called. \r\n\"\"\"\r\n\r\ndef game_loop():\r\n\r\n # Defint the background image's starting position\r\n bg_1_xpos = 0\r\n bg_2_xpos = 0 - bg_2.get_size()[1]\r\n\r\n # define the players initial position\r\n player_x = (display_width*0.48)\r\n player_y = (display_height*0.79)\r\n\r\n # define the player's initial movement - begins still so = 0\r\n x_change = 0\r\n\r\n # define the first obstacle's starting position and speed. Speed will be random between two integers.\r\n obstacle_startx=random.randrange(200,display_width-200)\r\n obstacle_starty=-600\r\n obstacle_speed= random.randrange(5,10)\r\n obstacle_width=100\r\n obstacle_height=100\r\n\r\n # define the first 'alien''s starting potision and speed. \r\n prize_startx = random.randrange(80,display_width-80)\r\n prize_starty = -300\r\n prize_speed = random.randrange(5,7)\r\n prize_width = 40\r\n prize_height = 40\r\n\r\n #Define player's current score\r\n dodge_count=0\r\n\r\n gameExit=False\r\n\r\n while not gameExit:\r\n \r\n # Didsplay background images at up-to-date x positions\r\n gameDisplay.blit(bg_1, (0, bg_1_xpos))\r\n gameDisplay.blit(bg_2, (0,bg_2_xpos))\r\n\r\n # Receive user's movement commands\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_LEFT:\r\n x_change=-5\r\n elif event.key==pygame.K_RIGHT:\r\n x_change=5\r\n\r\n if event.type==pygame.KEYUP:\r\n if event.key==pygame.K_LEFT or event.key==pygame.K_RIGHT:\r\n x_change=0\r\n \r\n # Update player's x-axis position based on movement commands. Display the player at this position.\r\n player_x += x_change\r\n set_player_icon(player_x, player_y)\r\n\r\n # Display the obstacle and update its position on screen according to its pre-defined speed\r\n insert_obstacle(obstacle_startx,obstacle_starty,obstacle_width,obstacle_height,red)\r\n obstacle_speed = random.randrange(5,15)\r\n obstacle_starty += obstacle_speed\r\n\r\n # Display the 'alien' and update its position \r\n insert_obstacle(prize_startx, prize_starty, prize_width, prize_height, green)\r\n prize_speed = random.randrange(5,15)\r\n prize_starty+=prize_speed\r\n\r\n # Update the scoreboard depending on how many obstacles have been dodged.\r\n # Scoring system will change when we're able to receive input messages.\r\n draw_scoreboard(dodge_count)\r\n\r\n # The player will crash if they stray beyond the display boundaries\r\n if player_x > display_width-player_width or player_x < 0:\r\n crash()\r\n\r\n\r\n # Move the obstacle\r\n # If the obstacle's current position is beyond the display height, then reset with a random x position \r\n if obstacle_starty>display_height:\r\n obstacle_starty=0-obstacle_height\r\n obstacle_startx=random.randrange(0,display_width) # position of obstacle will be random in the boundry of the display\r\n dodge_count+=1\r\n\r\n # the player crashes if an obstacle is hit\r\n check_obstacle_crash(player_x, player_y, obstacle_startx, obstacle_starty,\\\r\n obstacle_height, obstacle_width, player_width)\r\n \r\n\r\n # Move the 'alien' \r\n if prize_starty>display_height:\r\n prize_starty=0-prize_height\r\n prize_startx=random.randrange(0,display_width) # position of prize will be random in the boundry of the display\r\n dodge_count+=1\r\n\r\n # Generate input input box when 'alien' is hit\r\n check_prize_crash(player_x, player_y, prize_startx, prize_starty,\\\r\n prize_height, prize_width, player_width)\r\n\r\n\r\n #Update the background image position to ensure a rolling background\r\n bg_1_xpos, bg_2_xpos = update_background(bg_1_xpos, bg_2_xpos, bg_2, display_height) \r\n\r\n pygame.display.update()\r\n clock.tick(80)\r\n \r\n\r\n\r\n# Run the game loop\r\ngame_loop()\r\npygame.quit()\r\nquit()\r\n","sub_path":"space_game.py","file_name":"space_game.py","file_ext":"py","file_size_in_byte":8439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79863935","text":"from math import sin, cos, pow, tan\nfrom pprint import pprint\nimport dill as pickle\n\n\nassociativity = {\n 'x' : 'right',\n 'π' : 'right',\n 'sin' : 'right',\n 'cos' : 'right',\n 'tan' : 'right',\n 'log' : 'right',\n '!' : 'left',\n '^' : 'right',\n '*' : 'left',\n '/' : 'left',\n '+' : 'left',\n '-' : 'left'\n}\n\nprecedence = {\n 'x' : 1,\n 'π' : 1,\n 'sin' : 2,\n 'cos' : 2,\n 'tan' : 2,\n 'log' : 2,\n '!' : 2,\n '^' : 3,\n '*' : 3,\n '/' : 3,\n '+' : 4,\n '-' : 4\n}\n\nop_type = {\n 'x' : 'variable',\n 'π' : 'constant',\n 'sin' : 'unary',\n 'cos' : 'unary',\n 'tan' : 'unary',\n 'log' : 'unary',\n '!' : 'unary',\n '^' : 'binary',\n '*' : 'binary',\n '/' : 'binary',\n '+' : 'binary',\n '-' : 'binary'\n}\n\noperators = {\n 'x' : lambda x: x,\n 'π' : lambda : 3.141592653589793,\n 'sin' : lambda x: sin(x),\n 'cos' : lambda x: cos(x),\n 'tan' : lambda x : tan(x),\n 'log' : lambda x : log(x),\n '!' : (lambda f: (lambda x: f(lambda v: x(x)(v)))(lambda x: f(lambda v: x(x)(v))))(lambda f: (lambda i: 1 if (i == 0) else i * f(i - 1))),\n '^' : lambda x, y: pow(x, y),\n '*' : lambda x, y: x * y,\n '/' : lambda x, y: x / y,\n '+' : lambda x, y: x + y,\n '-' : lambda x, y: x - y\n}\n\n\nif __name__ == '__main__':\n data = {}\n for token in operators:\n data[token] = {\n 'func' : operators[token],\n 'associativity' : associativity[token],\n 'precedence' : precedence[token],\n 'type' : op_type[token]\n }\n with open('opdata', 'wb') as file:\n pickle.dump(data, file)\n with open('opdata', 'rb') as file:\n pprint(pickle.load(file))\n","sub_path":"opgenerator.py","file_name":"opgenerator.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"278484142","text":"from PyQt5.QtGui import QIcon\nfrom piece import Piece\n\n\ndef set_val(parent):\n parent.setWindowTitle('Mensch Game')\n parent.setWindowIcon(QIcon('ressource/dice_6.png'))\n parent.setGeometry(200, 40, 956, 763)\n\n parent.bases = {\n 'yellow': [parent.base_yellow_0, parent.base_yellow_1, parent.base_yellow_2, parent.base_yellow_3],\n 'red': [parent.base_red_0, parent.base_red_1, parent.base_red_2, parent.base_red_3],\n 'blue': [parent.base_blue_0, parent.base_blue_1, parent.base_blue_2, parent.base_blue_3],\n 'green': [parent.base_green_0, parent.base_green_1, parent.base_green_2, parent.base_green_3]\n }\n\n parent.btn_list = [parent.add_player, parent.start_game, parent.new_game, parent.exit_game, parent.roll_dice]\n\n parent.color_code = {\n 'yellow': '#F8D823',\n 'red': '#EF0B0E',\n 'blue': '#1334C4',\n 'green': '#297C29'\n }\n\n parent.roll_nums = {\n 1: 'ressource/dice_1.png',\n 2: 'ressource/dice_2.png',\n 3: 'ressource/dice_3.png',\n 4: 'ressource/dice_4.png',\n 5: 'ressource/dice_5.png',\n 6: 'ressource/dice_6.png'\n }\n\n parent.players = {}\n\n parent.names = [parent.name_1, parent.name_2, parent.name_3, parent.name_4]\n\n parent.positions = {\n 'yellow': [parent.start_yellow, parent.yellow_1, parent.yellow_2, parent.yellow_3, parent.yellow_4,\n parent.yellow_5, parent.yellow_6, parent.yellow_7, parent.yellow_8, parent.yellow_9,\n parent.start_red, parent.red_1, parent.red_2, parent.red_3, parent.red_4, parent.red_5, parent.red_6,\n parent.red_7, parent.red_8, parent.red_9, parent.start_blue, parent.blue_1, parent.blue_2,\n parent.blue_3, parent.blue_4, parent.blue_5, parent.blue_6, parent.blue_7, parent.blue_8,\n parent.blue_9, parent.start_green, parent.green_1, parent.green_2, parent.green_3, parent.green_4,\n parent.green_5, parent.green_6, parent.green_7, parent.green_8, parent.green_9, parent.home_yellow_0,\n parent.home_yellow_1, parent.home_yellow_2, parent.home_yellow_3],\n 'red': [parent.start_red, parent.red_1, parent.red_2, parent.red_3, parent.red_4, parent.red_5, parent.red_6,\n parent.red_7, parent.red_8, parent.red_9, parent.start_blue, parent.blue_1, parent.blue_2,\n parent.blue_3, parent.blue_4, parent.blue_5, parent.blue_6, parent.blue_7, parent.blue_8,\n parent.blue_9, parent.start_green, parent.green_1, parent.green_2, parent.green_3, parent.green_4,\n parent.green_5, parent.green_6, parent.green_7, parent.green_8, parent.green_9, parent.start_yellow,\n parent.yellow_1, parent.yellow_2, parent.yellow_3, parent.yellow_4, parent.yellow_5, parent.yellow_6,\n parent.yellow_7, parent.yellow_8, parent.yellow_9, parent.home_red_0, parent.home_red_1,\n parent.home_red_2, parent.home_red_3],\n 'blue': [parent.start_blue, parent.blue_1, parent.blue_2, parent.blue_3, parent.blue_4, parent.blue_5,\n parent.blue_6, parent.blue_7, parent.blue_8, parent.blue_9, parent.start_green, parent.green_1,\n parent.green_2, parent.green_3, parent.green_4, parent.green_5, parent.green_6, parent.green_7,\n parent.green_8, parent.green_9, parent.start_yellow, parent.yellow_1, parent.yellow_2, parent.yellow_3,\n parent.yellow_4, parent.yellow_5, parent.yellow_6, parent.yellow_7, parent.yellow_8, parent.yellow_9,\n parent.start_red, parent.red_1, parent.red_2, parent.red_3, parent.red_4, parent.red_5, parent.red_6,\n parent.red_7, parent.red_8, parent.red_9, parent.home_blue_0, parent.home_blue_1, parent.home_blue_2,\n parent.home_blue_3],\n 'green': [parent.start_green, parent.green_1,\n parent.green_2, parent.green_3, parent.green_4, parent.green_5, parent.green_6, parent.green_7,\n parent.green_8, parent.green_9, parent.start_yellow, parent.yellow_1, parent.yellow_2,\n parent.yellow_3,\n parent.yellow_4, parent.yellow_5, parent.yellow_6, parent.yellow_7, parent.yellow_8, parent.yellow_9,\n parent.start_red, parent.red_1, parent.red_2, parent.red_3, parent.red_4, parent.red_5, parent.red_6,\n parent.red_7, parent.red_8, parent.red_9, parent.start_blue, parent.blue_1, parent.blue_2,\n parent.blue_3, parent.blue_4, parent.blue_5, parent.blue_6, parent.blue_7, parent.blue_8,\n parent.blue_9, parent.home_green_0, parent.home_green_1, parent.home_green_2, parent.home_green_3]\n }\n\n parent.pieces = {\n 'yellow': [Piece(parent.main_board, 'yellow', parent.bases['yellow'], parent.positions['yellow']) for _ in range(4)],\n 'red': [Piece(parent.main_board, 'red', parent.bases['red'], parent.positions['red']) for _ in range(4)],\n 'blue': [Piece(parent.main_board, 'blue', parent.bases['blue'], parent.positions['blue']) for _ in range(4)],\n 'green': [Piece(parent.main_board, 'green', parent.bases['green'], parent.positions['green']) for _ in range(4)]\n }\n","sub_path":"set_values.py","file_name":"set_values.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"243122083","text":"from util import HttpRequestsUtil, DateTimeUtil\nfrom config import Env_IP\n\nenv_ip = Env_IP.api_test_env\nurl = env_ip + '/visit/shelves'\n\n\ndef do_get(token, store_sieble_uid, visit_uid, visit_date):\n params = {\n 'storeSiebleUid': store_sieble_uid,\n 'visitUid': visit_uid,\n 'visitDate': visit_date\n }\n return HttpRequestsUtil.do_get(url, token, params)\n\n\ndef do_put(token, visit_uid, action_type, uid=None):\n stamp = DateTimeUtil.get_time_stamp()\n basic = {\n 'errorInfo': {\n 'result': '单张',\n 'msg': '--',\n 'rePhotos': ['']\n },\n 'shelvesType': 'G_A_A_42',\n 'photoDescribe': '陈列描述123',\n 'picType': 'shelves_photo_pic_type_1',\n 'category': 'A_BDS_C_2',\n 'photo': '96798385-0df6-40ee-af34-dec242a5edac' + str(stamp) + '.jpeg'\n }\n free = {\n 'category': 'A_BDS_C_1',\n 'endDate': 1563897600000,\n 'photoDescribe': '空',\n 'startDate': 1563206400000,\n 'shelvesType': 'G_A_A_40',\n 'photo': 'd9cf5584-7ece-45ea-a182-c7d46113d721' + str(stamp) + '.jpeg',\n 'picType': 'shelves_photo_pic_type_2',\n 'errorInfo': {\n 'msg': '--',\n 'rePhotos': [''],\n 'result': '单张'\n }\n }\n paid = {\n 'category': 'A_BDS_C_10',\n 'quantity': 'A_FS_C_15',\n 'endDate': 1564502400000,\n 'photoDescribe': '空调的',\n 'startDate': 1563206400000,\n 'shelvesType': 'G_A_A_44',\n 'photo': '54b7ade8-72f9-42c1-a23c-87418f05b995' + str(stamp) + '.jpeg',\n 'picType': 'shelves_photo_pic_ty',\n 'errorInfo': {\n 'msg': '--',\n 'rePhotos': [''],\n 'result': '单张'\n },\n 'rowQuantity': 2\n }\n if action_type == 1:\n shelves = [basic, free, paid]\n params = {\n 'shelvesList': shelves,\n 'visitUid': str(visit_uid)\n }\n elif action_type == 2:\n basic['uid'] = uid['basic']\n basic['photoDescribe'] = '陈列描述new'\n free['uid'] = uid['free']\n shelves = [basic, free, paid]\n params = {\n 'shelvesList': shelves,\n 'visitUid': str(visit_uid)\n }\n elif action_type == 3:\n basic['uid'] = uid['basic']\n shelves = [basic]\n params = {\n 'shelvesList': shelves,\n 'visitUid': str(visit_uid)\n }\n else:\n params = {\n 'shelvesList': [],\n 'visitUid': str(visit_uid)\n }\n return HttpRequestsUtil.do_put(url, params, token)\n","sub_path":"api/visit/shelves.py","file_name":"shelves.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"152043298","text":"\"\"\"\nThis sample shows, how to create an XML configuration file \nfor a video capture device.\n\"\"\"\n\nimport ctypes\nimport tisgrabber as tis\n\nic = ctypes.cdll.LoadLibrary(\"./tisgrabber_x64.dll\")\ntis.declareFunctions(ic)\n\nic.IC_InitLibrary(0)\n\nhGrabber = ic.IC_ShowDeviceSelectionDialog(None)\n\nif(ic.IC_IsDevValid(hGrabber)):\n ic.IC_SaveDeviceStateToFile(hGrabber, tis.T(\"device.xml\"))\nelse:\n ic.IC_MsgBox(tis.T(\"No device opened\"), tis.T(\"Simple Live Video\"))\n\nic.IC_ReleaseGrabber(hGrabber)\n","sub_path":"Python/tisgrabber/samples/03-save-to-file.py","file_name":"03-save-to-file.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"353385376","text":"# Program to train the function for shooting\r\n\r\nimport win32api, win32con\r\nimport time\r\nimport pyscreenshot as ImageGrab\r\nimport threading\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\n#Make this where the blue botton is at the bottom\r\nblueXPos=790\r\nblueYPos=725\r\n\r\ndef startGame():\r\n\r\n #Click through the login screens\r\n time.sleep(3)\r\n click(blueXPos,blueYPos)\r\n print(\"Click 1\")\r\n time.sleep(2)\r\n click(blueXPos,blueYPos)\r\n print(\"Click 2\")\r\n time.sleep(5)\r\n click(blueXPos,blueYPos)\r\n print(\"Click 3\")\r\n \r\n#Click funtion for a click press \r\ndef click(x,y):\r\n win32api.SetCursorPos((x,y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)\r\n time.sleep(.01)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)\r\n\r\n#Click and hold function\r\ndef clickTime(x,y,myTime):\r\n win32api.SetCursorPos((x,y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)\r\n time.sleep(myTime)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)\r\n\r\n\r\n#Fire Arrow with pull time \r\ndef fireArrow(pullTime):\r\n\r\n clickTime(blueXPos,blueYPos,pullTime)\r\n #time.sleep(3)\r\n #click(blueXPos,blueYPos)\r\n\r\n\r\n#Uses a 2D convolution on the image to find the arrowhead location, and records it\r\ndef calculateArrowPos(im):\r\n\r\n method = cv2.TM_SQDIFF\r\n\r\n # Read the images from the file\r\n small_image = cv2.imread('arrow.png',0)\r\n\r\n try:\r\n result = cv2.matchTemplate(small_image, im, method)\r\n result = cv2.matchTemplate(im[:,:,0],im[10:40,50:80,0],method)\r\n mn,mx,mnLoc,mxLoc = cv2.minMaxLoc(result)\r\n MPx,MPy = mnLoc\r\n print(MPx)\r\n print(MPy)\r\n \r\n except:\r\n print(\"Failed to matchTemplate\") \r\n \r\n #use arrow tail instead\r\n #small_image = cv2.imread('tail.png',0)\r\n #result = cv2.matchTemplate(small_image, im, method)\r\n #mn,mx,mnLoc,mxLoc = cv2.minMaxLoc(result)\r\n #MPx,MPy = mnLoc\r\n\r\n #Save to CSV\r\n ######### DO I NEED TO WORRY ABOUT THE THREADS GETTING OUT OF ORDER AND SAVING WRONG ORDER IN CSV\r\n\r\n#Runs a 2D convolution on the image to find the object location\r\ndef determineObjectLocation():\r\n\r\n #Capture screen image\r\n large_image= np.array(ImageGrab.grab(bbox=(750,150,1250,1100)).convert('RGB')) # X1,Y1,X2,Y2\r\n\r\n\r\n #Determine center of image (ideally this will be the \"sweet spot\")\r\n method = cv2.TM_SQDIFF\r\n\r\n # Read the images from the file\r\n\r\n small_image = cv2.imread('head.jpg',0)\r\n\r\n result = cv2.matchTemplate(small_image, large_image, method)\r\n # We want the minimum squared difference\r\n mn,mx,mnLoc,mxLoc = cv2.minMaxLoc(result)\r\n\r\n\r\n # Draw the rectangle:\r\n # Extract the coordinates of our best match\r\n MPx,MPy = mnLoc\r\n print(MPy)\r\n\r\n #Ranges determine the size of the box above dude's head that we want to search\r\n x_range = 80 \r\n y_range = 80 \r\n\r\n patch = large_image[MPy-y_range:MPy, MPx:MPx+x_range]\r\n cv2.imshow('Patch',patch)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n detector = cv2.SimpleBlobDetector_create()\r\n params = cv2.SimpleBlobDetector_Params()\r\n params.blobColor = 255\r\n params.filterByColor = True\r\n blobs = detector.detect(patch)\r\n print(blobs)\r\n\r\n edges = cv2.Canny(patch, 100, 200)\r\n cv2.imshow('Edges',edges)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n edges = (edges == 255)\r\n maxY = 0\r\n\r\n for y in range(0,y_range):\r\n if(np.sum(edges[y,:]) > 0):\r\n maxY = y_range - y\r\n break\r\n\r\n center_y = (MPy + (MPy - maxY))/2\r\n\r\n center_x=1\r\n return (center_x,center_y)\r\n\r\n#Uses experimentally determined function for the arrow to calculate\r\n#how long we should click and hold\r\ndef determineArrowHoldTime(x,y):\r\n print(\"TODO\")\r\n return 1\r\n\r\n\r\ndef main():\r\n \r\n threads = []\r\n\r\n #time to ALT-Tab\r\n time.sleep(3)\r\n\r\n\r\n #startGame()\r\n\r\n # Run a 2D Convolution to determine X and Y pos \r\n (x,y) = determineObjectLocation()\r\n\r\n pullTime = determineArrowHoldTime(x,y)\r\n\r\n fireArrow(pullTime)\r\n print(\"Shots Fired *fire emoji*\")\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"137808411","text":"#! /usr/bin/python\n\nimport sys\nimport cPickle\nfrom collections import defaultdict\nimport sklearn.feature_extraction as skfe\n\nfrom Node import *\nfrom featuresBase import FeatureMap, prepareDescendents\n\nfrom reordererState import ReordererState\n\ncutoffCount = 100\n\ndef processDepSentence(sid, sent, acc):\n\t# Note: Accurate for Bigram features only\n\tprint >> sys.stderr, sid\n\n\ttrFM, taFM, reFM, pfFM = acc['transitionFeatureMap'], acc['targetFeatureMap'], acc['readoutFeatureMap'], acc['pathFeatureMap']\n\tnodes, root = createNodes(sent, sid)\n\tprepareDescendents(root)\n\treorderedNodes = sorted(nodes, key = lambda node: node.hint)\n\tm = len(reorderedNodes)\n\tstate = ReordererState((nodes, root))\n\tprevNode = None\n\tnextNode = None\n\tfor i in xrange(m):\n\t\tfor candidateNode in nodes:\n\t\t\tif candidateNode == prevNode:\n\t\t\t\tcontinue\n\t\t\tif candidateNode == reorderedNodes[i]:\n\t\t\t\tnextNode = candidateNode\n\t\t\t\tnextState = state.successorToNode(candidateNode)\n\t\t\ttrF = state.transitionFeaturesToNode(candidateNode)\n\t\t\tfor k, v in trF:\n\t\t\t\ttrFM.fit_feature(k, v)\n\t\t\ttaF = state.targetFeaturesToNode(candidateNode)\n\t\t\tfor k, v in taF:\n\t\t\t\ttaFM.fit_feature(k, v)\n\t\t\treF = state.readoutFeaturesToNode(candidateNode)\n\t\t\tfor k, v in reF:\n\t\t\t\treFM.fit_feature(k, v)\n\t\t\tpfFL = state.pathFeaturesToNode(candidateNode)\n\t\t\tfor pfF in pfFL:\n\t\t\t\tfor k, v in pfF:\n\t\t\t\t\tpfFM.fit_feature(k, v)\n\t\tprevNode = nextNode\n\t\tstate = nextState\t\n\ndef main():\n\targn = len(sys.argv)\n\tif argn != 2:\n\t\tusage()\n\t\tsys.exit(-1)\n\toutF = open(sys.argv[1], 'wb')\n\n\tacc = {'transitionFeatureMap' : FeatureMap(), 'targetFeatureMap' : FeatureMap(), 'readoutFeatureMap' : FeatureMap(), 'pathFeatureMap' : FeatureMap()}\n\tfor fm in acc.itervalues():\n\t\tfm.fit()\n\t\n\tfor i, sent in enumerate(readDepSentences(sys.stdin)):\n\t\tprocessDepSentence(i+1, sent, acc)\n\t\n\trv = {}\n\tfor k, fm in acc.iteritems():\n\t\tffm = fm.get_filtered_feature_map(cutoffCount)\n\t\trv[k] = ffm\n\tcPickle.dump(rv, outF, protocol=2)\n\t\ndef usage():\n\tprint >> sys.stderr, 'Usage:'\n\tprint >> sys.stderr, sys.argv[0], 'out-feature-maps-file'\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"simpleNLMR/preprocessTrain_path.py","file_name":"preprocessTrain_path.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"74470025","text":"import pandas as pd\nimport os \nimport cv2\nimport numpy as np\nimport re\nfrom collections import Counter \nfrom ffpyplayer.player import MediaPlayer\n\n#Tokenize and remove stop words\nfrom nltk.tokenize import sent_tokenize, word_tokenize \nfrom nltk.corpus import stopwords\n\n#Google speech to text\nimport wave\nfrom typing import Tuple\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\n\n#Threading\nimport threading\nimport time\n\n#Tesseract\nimport pytesseract\n\n\n#http://sebastiandahlgren.se/2014/06/27/running-a-method-as-a-background-thread-in-python/\n\ndef extract_text_frame(img):\n #Crop the bottom part\n #run tesseract on top of that\n height, width, channel = img.shape\n img = img[int(6/8*height):height,0:width]\n lower = np.array([0,0,205])\n upper = np.array([179,255,255])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(img,img, mask= mask)\n\n custom_config = r'--oem 3 --psm 6'\n cv2.imwrite(\"temp.jpg\",res)\n try:\n print(pytesseract.image_to_string(res ))#, config=custom_config))\n except Exception as ex:\n print(ex)\n\n\ndef PlayVideo(video_path): \n video=cv2.VideoCapture(video_path)\n player = MediaPlayer(video_path)\n count = 0 \n while True:\n count = count + 1\n grabbed, frame=video.read()\n audio_frame, val = player.get_frame()\n \n if count % 15 == 0:\n th = threading.Thread(target=extract_text_frame, args=(frame,))\n th.start()\n\n if not grabbed:\n print(\"End of video\")\n break\n if cv2.waitKey(45) & 0xFF == ord(\"q\"):\n break\n cv2.imshow(\"Video\", frame)\n if val != 'eof' and audio_frame is not None:\n #audio\n img, t = audio_frame\n\n if count % 15 == 0:\n th.join()\n\n video.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n \n video_path = \"1.mp4\"\n audio_path = \"audio.wav\"\n encoding = 'LINEAR16'\n lang ='hi-IN'\n subtitle_path = \"subtitle.txt\"\n #Extract audio from video using ffmpeg\n runVideo = PlayVideo(video_path)\n \n","sub_path":"textract.py","file_name":"textract.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"85856237","text":"import json\nimport importlib\nfrom definitions import DASHBOARD\nfrom utils import assignmentString\n\n\ndef block(name, contents):\n res = \"\"\n res += f'\\n {name} ' + '{'\n for key, val in contents.items():\n res += assignmentString(key, val)\n return res + '\\n } '\n\n\ndef block_list(name, contents):\n res = \"\"\n for elm in contents:\n res += block(name, elm)\n return res\n\n\n##################\ndef block_elements(name, content, converter):\n res =\"\"\n res += f'\\n {name} ' + '{'\n for key, val in content.items():\n res += converter(key, val)\n return res + '\\n } '\n\n\n################\ndef block_list_elements(name, content, converter):\n res = \"\"\n for elm in content:\n res += block_elements(name, elm, converter)\n return res\n\n\ndef template_variables(key, val):\n res = \"\"\n for elm in val:\n res += block(\"template_variable\", elm) \n return res\n\n\ndef template_variable_preset(key, val):\n res = \"\"\n for k, v in val.items():\n if k == \"name\":\n res += assignmentString(k, v)\n elif k == \"template_variables\":\n # res += template_variables(k, v)\n res += block_list_elements(\"template_variable\", v, assignmentString)\n return res\n\n\ndef convertSort(content):\n res = \"\"\n if isinstance(content,str):\n res += assignmentString(\"sort\", content)\n else:\n res += block_list(\"sort\", content)\n return res\n\n\ndef group_by_block(content):\n res = f'\\n group_by ' + '{'\n for k, v in content.items():\n if k in [\"sort\"]:\n res += block(\"sort_query\", v)\n elif k in [\"sort_query\"]:\n res += block(k,v)\n else:\n res += assignmentString(k,v)\n return f'{res} \\n }} '\n\n \ndef group_by_blocks(content):\n res = \"\"\n for elm in content:\n res += group_by_block(elm)\n return res\n\n\n######################\ndef group_by_schema(key, val):\n res = \"\"\n if key in [\"sort\"]:\n res += block_elements(\"sort_query\", val, assignmentString)\n elif key in [\"sort_query\"]:\n res += block_elements(\"sort_query\", val, assignmentString)\n else:\n res += assignmentString(key, val)\n return res\n\n\ndef log_query_block(content):\n res = f'\\n log_query ' + '{'\n for k, v in content.items():\n if k in [\"search\"]:\n res += assignmentString(\"search_query\",v[\"query\"])\n elif k in [\"compute\"]:\n res += block(\"compute_query\",v)\n elif k in [\"compute_query\"]:\n res += block(k,v)\n elif k in [\"multi_compute\"]:\n pass\n elif k in [\"group_by\"]:\n res += group_by_blocks(v)\n else:\n res += assignmentString(k,v)\n return f'{res} \\n }} '\n\n\ndef rum_query_block(content):\n res = f'\\n rum_query ' + '{'\n for k, v in content.items():\n if k in [\"search\"]:\n res += assignmentString(\"search_query\",v[\"query\"])\n elif k in [\"compute\"]:\n res += block(\"compute_query\",v)\n elif k in [\"compute_query\"]:\n res += block(k,v)\n elif k in [\"multi_compute\"]:\n pass\n elif k in [\"group_by\"]:\n res += group_by_blocks(v)\n else:\n res += assignmentString(k,v)\n return f'{res} \\n }} '\n\n\ndef fill_block(content):\n res = f'\\n fill ' + '{'\n for k,v in content.items():\n if k in [\"apm_query\"]:\n pass\n elif k in [\"log_query\"]:\n res += log_query_block(v)\n elif k in [\"process_query\"]:\n pass\n elif k in [\"rum_query\"]:\n res += rum_query_block(v)\n elif k in [\"security_query\"]:\n pass\n else:\n res += assignmentString(k,v)\n return f'{res} \\n }} '\n\n \ndef requests_blocks(content):\n res = f'\\n request ' + '{'\n for k, v in content.items():\n if k in [\"style\"]:\n res += block(k,v)\n elif k in [\"metadata\",\"conditional_formats\"]:\n res += block_list(k,v)\n elif k in [\"log_query\"]:\n res += log_query_block(v)\n elif k in [\"rum_query\"]:\n res += rum_query_block(v)\n elif k in [\"fill\"]:\n res += fill_block(v)\n elif k in [\"profile_metrics_query\"]:\n pass ## Unsupported block type\n else:\n res += assignmentString(k,v)\n return res + '\\n } '\n\n\n##################\ndef request_block_nested(key, val):\n res = \"\"\n if key in [\"apm_query\"]:\n pass\n # elif key in [\"log_query\"]:\n # res += log_query_block(val)\n # elif key in [\"process_query\"]:\n # pass\n # elif key in [\"rum_query\"]:\n # res += rum_query_block(val)\n # elif key in [\"security_query\"]:\n # pass\n # elif key in [\"sort\"]:\n # res += block(\"sort_query\", val)\n # elif key in [\"sort_query\"]:\n # res += block(key,val)\n elif key in [\"search\"]:\n res += assignmentString(\"search_query\",val[\"query\"])\n elif key in [\"compute\"]:\n res += block(\"compute_query\",val)\n elif key in [\"compute_query\"]:\n res += block(key,val)\n elif key in [\"multi_compute\"]:\n pass\n elif key in [\"group_by\"]:\n # res += group_by_blocks(val)\n res += block_list_elements(\"group_by\", val, group_by_schema)\n else:\n res += assignmentString(key,val)\n return res\n\n\n###################\ndef request_block_converter(key, val):\n res = \"\"\n if key in [\"style\"]:\n # res += block(key,val)\n res += block_elements(key, val, assignmentString)\n elif key in [\"process_query\"]:\n res += block_elements(key, val, assignmentString)\n elif key in [\"apm_stats_query\"]:\n res += block_elements(key, val, assignmentString)\n elif key in [\"metadata\",\"conditional_formats\"]:\n # res += block_list(key,val)\n res += block_list_elements(key, val, assignmentString)\n elif key in [\"log_query\"]:\n # res += log_query_block(val)\n res += block_elements(\"log_query\", val, request_block_nested)\n elif key in [\"rum_query\"]:\n res += block_elements(\"rum_query\", val, request_block_nested)\n elif key in [\"apm_query\"]:\n res += block_elements(\"apm_query\", val, request_block_nested)\n elif key in [\"rum_query\"]:\n # res += rum_query_block(val)\n res += block_elements(\"rum_query\", val, request_block_nested)\n elif key in [\"security_query\"]:\n res += block_elements(\"security_query\", val, request_block_nested)\n elif key in [\"network_query\"]:\n res += block_elements(\"network_query\", val, request_block_nested)\n elif key in [\"fill\"]:\n # res += fill_block(val)\n # res += block_elements(\"fill\", val, request_block_nested)\n res += block_elements(\"fill\", val, request_block_converter)\n elif key in [\"size\"]:\n res += block_elements(\"size\", val, request_block_converter)\n elif key in [\"profile_metrics_query\"]:\n pass ## Unsupported block type\n else:\n res += assignmentString(key,val)\n return res\n\n\ndef requests(contents):\n res = \"\"\n for elm in contents:\n res += requests_blocks(elm)\n return res\n\n\ndef widgets_definition(contents):\n res = \"\"\n for k , v in contents.items():\n if k in [\"type\",\"legend_layout\",\"legend_columns\",\"global_time_target\",\"reflow_type\"]:\n pass\n elif k == \"custom_links\":\n # res += block_list(\"custom_link\",v)\n res += block_list_elements(\"custom_link\", v, assignmentString)\n elif k == \"requests\":\n # res += requests(v) if isinstance(v,list) else requests_blocks(v)\n res += block_list_elements(\"request\", v, request_block_converter) if isinstance(v,list) else block_elements(\"request\", v, request_block_converter)\n elif k == \"widgets\":\n # res += widgets(k,v)\n res += block_list_elements(\"widget\", v, widget_block)\n elif k == \"sort\":\n convertSort(v)\n elif k in [\"event\", \"right_yaxis\", \"widget_layout\", \"xaxis\", \"yaxis\", \"style\"]:\n res += block_elements(k, v, assignmentString) \n # elif k == \"event\":\n # # res += block(\"event\", v)\n # res += block_elements(\"event\", v, assignmentString)\n elif k == \"events\":\n # res += block_list(\"event\",v)\n res += block_list_elements(\"event\", v, assignmentString)\n elif k == \"markers\":\n # res += block_list(\"marker\",v)\n res += block_list_elements(\"marker\", v, assignmentString)\n # elif k == \"right_yaxis\":\n # # res += block(\"right_yaxis\", v)\n # res += block_elements(\"right_yaxis\", v, assignmentString)\n # elif k == \"widget_layout\":\n # # res += block(\"widget_layout\", v)\n # res += block_elements(\"widget_layout\", v, assignmentString)\n # elif k == \"xaxis\":\n # # res += block(\"xaxis\", v)\n # res += block_elements(\"xaxis\", v, assignmentString)\n # elif k == \"yaxis\":\n # # res += block(\"yaxis\", v)\n # res += block_elements(\"yaxis\", v, assignmentString)\n # elif k == \"style\":\n # # res += block(\"style\", v)\n # res += block_elements(\"style\", v, assignmentString)\n elif k == \"time\":\n res += assignmentString(\"live_span\",v[\"live_span\"])\n elif k == \"autoscale\":\n print(v)\n res += assignmentString(k,v)\n else:\n res += assignmentString(k,v)\n definition_type = \"service_level_objective\" if contents[\"type\"] == \"slo\" else contents[\"type\"]\n return f'\\n {definition_type}_definition ' + '{ ' + f'{res} \\n' + '}'\n\n\ndef widgets_blocks(val):\n res = f'\\n widget ' + '{'\n for k, v in val.items():\n if k in [\"id\"]:\n pass\n elif k == \"definition\":\n res += widgets_definition(v)\n elif k == \"layout\":\n res += block(\"widget_layout\", v)\n return res + '\\n } '\n\n\ndef widgets(key, val):\n res = \"\"\n for elm in val:\n res += widgets_blocks(elm)\n return res\n\n\n###########################\ndef widget_block(key, val):\n res = \"\"\n if key in [\"id\"]:\n pass\n elif key == \"definition\":\n res += widgets_definition(val)\n elif key == \"layout\":\n # res += block(\"widget_layout\", val)\n res += block_elements(\"widget_layout\", val, assignmentString)\n return res\n\n\ndef generateDashboardTerraformCode(dashboardData):\n res = \"\"\n for key, val in dashboardData.items():\n print(key, val)\n if key in [\"id\",\"reflow_type\"]:\n pass\n elif key == \"widgets\":\n # res += widgets(key, val)\n res += block_list_elements(\"widget\", val, widget_block)\n elif key == \"template_variables\":\n # res += template_variables(key, val)\n res += block_list_elements(\"template_variable\", val, assignmentString)\n elif key == \"template_variable_presets\":\n res += block_list_elements(\"template_variable_preset\", val, template_variable_preset)\n else:\n res += assignmentString(key,val)\n return res\n\n\ndef main():\n # with open(\"../project8/dashboard.json\") as f:\n # with open(\"../project8/mongodb.json\") as f:\n # with open(\"../project8/mongo.json\") as f:\n # with open(\"../project8/business.json\") as f:\n # with open(\"../project8/uptime.json\") as f:\n # with open(\"../project8/alb_performance.json\") as f:\n # with open(\"../project8/status_errors.json\") as f:\n # with open(\"../project8/status_errors.json\") as f:\n # with open(\"../project8/response_time.json\") as f:\n # with open(\"../project8/aws_elb.json\") as f:\n # with open(\"../project8/jvm_metrics.json\") as f:\n # with open(\"../project8/k8s_pods.json\") as f:\n # with open(\"../project8/aws_ec2.json\") as f:\n # with open(\"../project8/jvm_overview.json\") as f:\n # with open(\"../project8/alb_cloned.json\") as f:\n # with open(\"../project8/k8s_services.json\") as f:\n # with open(\"../project8/aws_ec2_cloned.json\") as f:\n # with open(\"../project8/trace_analytics.json\") as f:\n # with open(\"../project8/system_metrics.json\") as f:\n # with open(\"../project8/aws_mq.json\") as f:\n # with open(\"../project8/aws_autoscaling.json\") as f:\n # with open(\"../project8/aws_billing.json\") as f:\n # with open(\"../project8/aws_s3.json\") as f:\n # with open(\"../project8/azure_api.json\") as f:\n # with open(\"../project8/azure_overview.json\") as f:\n # with open(\"../project8/aws_document.json\") as f:\n # with open(\"../project8/redis.json\") as f:\n # with open(\"../project8/aws_kinesis.json\") as f:\n # with open(\"../project8/aws_kinesis_firehose.json\") as f:\n # with open(\"../project8/aws_lambda.json\") as f:\n # with open(\"../project8/aws_rds.json\") as f:\n # with open(\"../project8/aws_sqs.json\") as f:\n # with open(\"../project8/aws_step_functions.json\") as f:\n # with open(\"../project8/aws_trusted_advisor.json\") as f:\n # with open(\"../project8/azure_app_service.json\") as f:\n # with open(\"../project8/azure_batch.json\") as f:\n # with open(\"../project8/azure_cosmosdb.json\") as f:\n # with open(\"../project8/azur_dbmsql.json\") as f:\n # with open(\"../project8/azure_dbpostgres.json\") as f:\n # with open(\"../project8/azure_event_hub.json\") as f:\n # with open(\"../project8/azure_functions.json\") as f:\n # with open(\"../project8/azure_iot_hub.json\") as f:\n # with open(\"../project8/azure_loadbalancing.json\") as f:\n # with open(\"../project8/azure_logicapp.json\") as f:\n # with open(\"../project8/azure_overview#1.json\") as f:\n # with open(\"../project8/azure_databases.json\") as f:\n # with open(\"../project8/azure_usage.json\") as f:\n # with open(\"../project8/azure_vm.json\") as f:\n # with open(\"../project8/azure_vm_scale.json\") as f:\n # with open(\"../project8/azure_cont.json\") as f:\n # with open(\"../project8/azure_coredns.json\") as f:\n # with open(\"../project8/docker_overview.json\") as f:\n # with open(\"../project8/host_count.json\") as f:\n # with open(\"../project8/k8s_daemonset.json\") as f:\n # with open(\"../project8/k8s_deployment.json\") as f:\n # with open(\"../project8/k8s_replicaset.json\") as f:\n # with open(\"../project8/k8s_overview.json\") as f:\n # with open(\"../project8/run_errors.json\") as f:\n # with open(\"../project8/rum_mobile.json\") as f:\n # with open(\"../project8/system_diskio.json\") as f:\n # with open(\"../project8/system_networking.json\") as f:\n # with open(\"../project8/troubleshoot.json\") as f:\n with open(\"../project8/load_test.json\") as f:\n content = json.load(f)\n\n terraform_string = generateDashboardTerraformCode(content)\n terraform_string = \\\n f'resource \"datadog_dashboard\" \"test_dashboard\" {{\\n' + \\\n terraform_string + \\\n f'\\n}}'\n\n # print(terraform_string)\n\n # with open(\"dashboard.tf\",\"w\") as f:\n # with open(\"k8s_overview_dashboard.tf\",\"w\") as f:\n # with open(\"aws_elb_dashboard.tf\",\"w\") as f:\n # with open(\"load_test_dashboard_test.tf\",\"w\") as f:\n with open(\"test_dashboard.tf\",\"w\") as f:\n f.write(terraform_string)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"project9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"648981410","text":"# -*- coding: utf-8 -*-\n\nfrom .utils import versioned_reverse as reverse\nimport pytest\nfrom .utils import get\n\n\ndef get_detail(api_client, detail_pk, version='v1', data=None):\n detail_url = reverse('keywordset-detail', version=version, kwargs={'pk': detail_pk})\n return get(api_client, detail_url, data=data)\n\n\n@pytest.mark.django_db\ndef test_keywordset_excludes_hidden_keywords(api_client, keyword, keyword_set2):\n keyword.is_hidden = True\n keyword.save()\n keyword_set2.save()\n response = get_detail(api_client, keyword_set2.pk, data={'include': 'keywords'})\n assert response.status_code == 200\n for set_keyword in response.data.get('keywords'):\n assert set_keyword.get('id') != keyword.id\n\n\n@pytest.mark.django_db\ndef test_keywordset_does_not_exclude_non_hidden_keywords(api_client, keyword, keyword_set2):\n keyword.is_hidden = False\n keyword.save()\n keyword_set2.save()\n response = get_detail(api_client, keyword_set2.pk, data={'include': 'keywords'})\n assert response.status_code == 200\n found = False\n for set_keyword in response.data.get('keywords'):\n if set_keyword.get('id') == keyword.id:\n found = True\n assert found == True","sub_path":"events/tests/test_keywordset_get.py","file_name":"test_keywordset_get.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"526209707","text":"#!/usr/bin/python3\n\"\"\"\nPrototype: def text_indentation(text):\ntext must be a string, otherwise raise a TypeError\nexception with the message text must be a string\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n prints 2 new lines after each of these characters: ., ? and :\n \"\"\"\n if type(text) is not str:\n raise TypeError(\"text must be a string\")\n for i in range(len(text)):\n if i > 0 and text[i] == ' ' and text[i - 1] in \".:?\":\n continue\n print(text[i], end='')\n if text[i] in \".:?\":\n print(\"\\n\")\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"148959658","text":"\nimport sys\nimport os\nsys.path.append(os.getcwd() + '/..')\n\nfrom packages.Node import Node\n# delete the kth node starting from end \n\ndef deleteknodefromlast2(head:Node, k):\n if head == None:\n return head\n \n f = head\n s = head \n for i in range (k):\n if f.next == None:\n if i == k -1:\n return head.next\n return head\n f = f.next\n \n while f.next != None:\n f = f.next\n s = s.next\n \n s.next = s.next.next\n return head\n \n \n \ndef deleteknodefromlast(head:Node, k):\n # delete the kth node from end\n if head == Node:\n return Node\n\n p = head\n while p != None:\n k -= 1\n p = p.next\n \n if k > 0:\n return head\n \n if k == 0:\n return head.next \n \n if k < 0:\n p = head \n k+=1\n while k != 0: \n p = p.next\n k += 1 \n p.next = p.next.next\n return head\n\n\nhead = Node(1)\nhead.next = Node(2)\nhead.next.next = Node(3)\nhead.next.next.next = Node(4)\nhead.next.next.next.next = Node(5) \n\n#l = deleteknodefromlast(head,2)\nl = deleteknodefromlast2(head, 1) \nl.print() \n\n\n\n\n","sub_path":"Python/linked-list/deletekthnode.py","file_name":"deletekthnode.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"583208625","text":"#! -*- coding:utf-8 -*-\nfrom django.template import loader, RequestContext\nfrom django.http import HttpResponse\n\nfrom accounts.forms import register_form\n\n\ndef index(request):\n \"\"\"\n 首页index\n 若未登录显示\n \"\"\"\n vt = loader.get_template(\"index.html\")\n c = RequestContext(\n request, {\n 'form': register_form(large_input=False)\n }\n\n )\n return HttpResponse(vt.render(c))\n\n","sub_path":"Bohoo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"88504792","text":"import math\nimport numpy as np\nimport cellprofiler\nimport cellprofiler.modules.overlayoutlines as overlay\nimport cellprofiler.modules.images as images\nimport cellprofiler.modules.metadata as metadata\nimport cellprofiler.modules.namesandtypes as namesandtypes\nimport cellprofiler.modules.identifyprimaryobjects as idprimary \nimport cellprofiler.modules.identifysecondaryobjects as idsecondary \nimport cellprofiler.modules.saveimages as save\nimport cellprofiler.modules.imagemath as imagemath\nimport cellprofiler.modules.identifyobjectsmanually as manual\nimport cellprofiler.modules.measureobjectsizeshape as size_shape_mod\nimport numpy as np \nimport cv2 \nfrom matplotlib import pyplot as plt\nfrom matplotlib import pyplot as plt\nnp.set_printoptions(threshold=np.nan)\n\n#Environment Variables\n\nSIZE_RANGE_SETTING_TEXT = \"Typical diameter of objects, in pixel units (Min,Max)\"\nEXCLUDE_SIZE_SETTING_TEXT = \"Discard objects outside the diameter range?\"\nUN_INTENSITY = \"Intensity\"\nWA_INTENSITY = \"Intensity\"\nWANTS_COLOR = \"Color\"\nWANTS_GRAYSCALE = \"Grayscale\"\n\nM_PROPAGATION = \"Propagation\"\nM_WATERSHED_G = \"Watershed - Gradient\"\nM_WATERSHED_I = \"Watershed - Image\"\nM_DISTANCE_N = \"Distance - N\"\nM_DISTANCE_B = \"Distance - B\"\n\nO_ADD = \"Add\"\nO_SUBTRACT = \"Subtract\"\nO_DIFFERENCE = \"Absolute Difference\"\nO_MULTIPLY = \"Multiply\"\nO_DIVIDE = \"Divide\"\nO_AVERAGE = \"Average\"\nO_MINIMUM = \"Minimum\"\nO_MAXIMUM = \"Maximum\"\nO_INVERT = \"Invert\"\nO_COMPLEMENT = \"Complement\"\nO_LOG_TRANSFORM_LEGACY = \"Log transform (legacy)\"\nO_LOG_TRANSFORM = \"Log transform (base 2)\"\nO_NONE = \"None\"\n\nO_COMBINE = \"Combine\"\nO_OR = \"Or\"\nO_AND = \"And\"\nO_NOT = \"Not\"\nO_EQUALS = \"Equals\"\n\ndef cp_session_init(rna_file, cell_file, nuc_file): \n\n #INITIALIZE CP OBJECTS\n pipe, measurements, object_set = build_cp_objects()\n\n #BUILD CP IMAGE SET\n image_set, image_set_list = load_image_stack(nuc_file, \"Nucleus\", cell_file, \"CellBody\", rna_file, \"Fish\")\n\n #SAVE MODS\n save_nuc_mod = save_images(\"NucleusOverlay\", \"Nucleus_Out\")\n save_cell_mod = save_images(\"CellOverlay\", \"Cell_Out\")\n\n #BUILD WORKSPACE\n workspace = cp_workspace(pipe, measurements, object_set, image_set, save_nuc_mod, image_set_list)\n\n return workspace, measurements, object_set, pipe, save_nuc_mod, save_cell_mod\n\n\ndef build_cp_objects(): \n\n pipe = cellprofiler.pipeline.Pipeline()\n measurements = cellprofiler.measurement.Measurements()\n object_set = cellprofiler.object.ObjectSet()\n\n return pipe, measurements, object_set\n\n\ndef load_image_stack(nuc_path, nuc_image_name, cell_path, cell_image_name, rna_path, rna_image_name): \n\n #2 Channel Support \n \n #Read Nucleus Image\n nuc_image = cv2.imread(nuc_path)\n nuc_image = np.delete(nuc_image, np.s_[::2], 2)\n nuc_image = np.squeeze(nuc_image)\n nuc_image = cellprofiler.image.Image(image=nuc_image)\n image_set_list = cellprofiler.image.ImageSetList()\n image_set = image_set_list.get_image_set(0)\n\n #Read Cell Image\n cell_image = cv2.imread(cell_path)\n cell_image = np.delete(cell_image, np.s_[::2], 2)\n cell_image = np.squeeze(cell_image)\n cell_image = cellprofiler.image.Image(image=cell_image)\n\n rna_image = cv2.imread(rna_path)\n rna_image = np.delete(rna_image, np.s_[::2], 2)\n rna_image = np.squeeze(rna_image)\n rna_image = cellprofiler.image.Image(image=rna_image)\n\n\n\n image_set.add(nuc_image_name, nuc_image)\n image_set.add(cell_image_name, cell_image)\n image_set.add(rna_image_name, rna_image)\n\n\n return image_set, image_set_list\n\n\n\ndef id_primary_obj(manual_flag):\n\n if manual_flag: \n\n idp_mod = id_objects_manually()\n\n else: \n\n idp_mod = idprimary.IdentifyPrimaryObjects()\n\n #Settings\n idp_mod.x_name.value = \"Nucleus\"\n idp_mod.size_range = cellprofiler.setting.IntegerRange(\n SIZE_RANGE_SETTING_TEXT,\n (150, 500),\n minval=1)\n idp_mod.exclude_border_objects = cellprofiler.setting.Binary(\n \"Discard objects touching the border of the image?\",\n False)\n idp_mod.exclude_size = cellprofiler.setting.Binary(\n EXCLUDE_SIZE_SETTING_TEXT,\n True)\n\n #options: [UN_INTENSITY, UN_SHAPE, UN_NONE]\n idp_mod.unclump_method = cellprofiler.setting.Choice(\n 'Method to distinguish clumped objects',\n UN_INTENSITY)\n\n #options: [WA_INTENSITY, WA_SHAPE, WA_PROPAGATE, WA_NONE]\n idp_mod.watershed_method = cellprofiler.setting.Choice(\n 'Method to draw dividing lines between clumped objects',\n WA_INTENSITY)\n\n return idp_mod\n\ndef id_secondary_obj(manual_flag): \n\n\n ids_mod = idsecondary.IdentifySecondaryObjects()\n ids_mod.method = cellprofiler.setting.Choice(\n \"Select the method to identify the secondary objects\",\n [\n M_PROPAGATION,\n M_WATERSHED_G,\n M_WATERSHED_I,\n M_DISTANCE_N,\n M_DISTANCE_B\n ],\n M_PROPAGATION)\n ids_mod.regularization_factor = cellprofiler.setting.Float(\n \"Regularization factor\",\n .05,\n minval=0)\n\n\n #Inputs objects can come from the IdentifyPrimaryObjects module OR the IdentifyObjectsManually module\n\n #x_name is the input object\n\n if manual_flag: \n ids_mod.x_name.value = \"IdentifyObjectsManually\"\n else: \n ids_mod.x_name.value = \"IdentifyPrimaryObjects\"\n\n ids_mod.image_name = cellprofiler.setting.ImageNameSubscriber(\n \"Select the input image\",\n \"CellBody\")\n ids_mod.size_range = cellprofiler.setting.IntegerRange(\n SIZE_RANGE_SETTING_TEXT,\n (250, 750),\n minval=1)\n ids_mod.wants_discard_edge = cellprofiler.setting.Binary(\n \"Discard secondary objects touching the border of the image?\",\n True)\n ids_mod.wants_discard_primary = cellprofiler.setting.Binary(\n \"Discard the associated primary objects?\",\n True)\n # ids_mod.distance_to_dilate = cellprofiler.setting.Integer(\n # \"Number of pixels by which to expand the primary objects\",\n # 200,\n # minval=1)\n\n return ids_mod\n\n\ndef invert_image(): \n\n invert_mod = imagemath.ImageMath()\n\n\n invert_mod.images[0].image_name = cellprofiler.setting.ImageNameSubscriber(\n \"image_name\",\n \"CellBody\")\n\n\n invert_mod.operation = cellprofiler.setting.Choice(\n \"Operation\",\n [O_ADD, O_SUBTRACT, O_DIFFERENCE, O_MULTIPLY, O_DIVIDE, O_AVERAGE,\n O_MINIMUM, O_MAXIMUM, O_INVERT,\n O_LOG_TRANSFORM, O_LOG_TRANSFORM_LEGACY,\n O_AND, O_OR, O_NOT, O_EQUALS, O_NONE],\n O_INVERT)\n invert_mod.output_image_name = cellprofiler.setting.ImageNameProvider(\n \"Name the output image\", \"ImageAfterMath\")\n return invert_mod\n\n\n\n\n#image_subscriber is the image to overlay the outlines on\ndef overlay_outlines(object_subscriber, image_subscriber, output_image_name): \n\n outline_mod = overlay.OverlayOutlines()\n outline_mod.blank_image = cellprofiler.setting.Binary(\n \"Display outlines on a blank image?\",\n False)\n outline_mod.image_name = cellprofiler.setting.ImageNameSubscriber(\n \"objects_name\",\n image_subscriber)\n\n\n #module3.outlines[0].set_value(\"IdentifyPrimaryObjects\")\n\n outline_mod.outlines[0].objects_name = cellprofiler.setting.ObjectNameSubscriber(\n \"objects_name\",\n object_subscriber\n )\n outline_mod.outlines[0].color = cellprofiler.setting.Color(\n \"color\",\n \"Red\"\n )\n\n outline_mod.line_mode = cellprofiler.setting.Choice(\n \"How to outline\",\n [\"Inner\", \"Outer\", \"Thick\"],\n value=\"Inner\")\n outline_mod.output_image_name = cellprofiler.setting.ImageNameProvider(\n \"Name the output image\",\n output_image_name)\n outline_mod.wants_color = cellprofiler.setting.Choice(\n \"Outline display mode\",\n [WANTS_COLOR, WANTS_GRAYSCALE],\n WANTS_COLOR)\n\n return outline_mod\n\ndef object_size_shape(manual_flag): \n\n ss_mod = size_shape_mod.MeasureObjectSizeShape()\n\n #CONTROL FLOW FOR MANUAL VS AUTOMATIC\n\n ss_mod.object_groups[0].name.value = \"FilteredNuclei\"\n # if manual_flag == True: \n # ss_mod.object_groups[0].name.value = \"IdentifyObjectsManually\"\n # else: \n # ss_mod.object_groups[0].name.value = \"IdentifyPrimaryObjects\"\n\n ss_mod.add_object()\n\n ss_mod.object_groups[1].name.value = \"IdentifySecondaryObjects\"\n\n return ss_mod\n\n\ndef id_objects_manually(): \n\n manual_mod = manual.IdentifyObjectsManually()\n\n manual_mod.image_name = cellprofiler.setting.ImageNameSubscriber(\n \"Select the input image\",\n \"CellBody\")\n\n manual_mod.objects_name = cellprofiler.setting.ObjectNameProvider(\n\n \"Name the objects to be identified\", \"IdentifyObjectsManually\")\n\n return manual_mod\n\n\ndef save_images(image_to_save, file_name): \n\n SAVE_PATH = \"/home/clay\"\n\n save_mod = save.SaveImages()\n #save_mod.set_module_num(1)\n save_mod.save_image_or_figure.value = save.IF_IMAGE\n save_mod.image_name.value = image_to_save\n save_mod.file_name_method.value = save.FN_SINGLE_NAME\n save_mod.single_file_name.value = file_name\n save_mod.pathname.value = \"{}|{}\".format(cellprofiler.setting.ABSOLUTE_FOLDER_NAME, SAVE_PATH)\n save_mod.file_format.value = save.FF_TIFF\n save_mod.bit_depth.value = save.BIT_DEPTH_8\n \n return save_mod\n\ndef cp_workspace(pipe, measurements, object_set, image_set, initial_module, image_set_list): \n\n workspace = cellprofiler.workspace.Workspace(\n\n pipeline=pipe,\n module=initial_module,\n image_set=image_set,\n object_set=object_set,\n measurements=measurements,\n image_set_list=image_set_list)\n return workspace\n\n \n\n\n\n","sub_path":"cp_api.py","file_name":"cp_api.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"638552463","text":"''' Wrrapers of pretrained models.\n'''\n\nimport os\n\nimport rlcard\nfrom rlcard.agents import CFRAgent\nfrom rlcard.models.model import Model\n\n# Root path of pretrianed models\nROOT_PATH = os.path.join(rlcard.__path__[0], 'models/pretrained')\n\nclass LeducHoldemNFSPModel(Model):\n ''' A pretrained model on Leduc Holdem with NFSP\n '''\n\n def __init__(self):\n ''' Load pretrained model\n '''\n import tensorflow as tf\n from rlcard.agents import NFSPAgent\n self.graph = tf.Graph()\n self.sess = tf.Session(graph=self.graph)\n\n env = rlcard.make('leduc-holdem')\n with self.graph.as_default():\n self.nfsp_agents = []\n for i in range(env.player_num):\n agent = NFSPAgent(self.sess,\n scope='nfsp' + str(i),\n action_num=env.action_num,\n state_shape=env.state_shape,\n hidden_layers_sizes=[128,128],\n q_mlp_layers=[128,128])\n self.nfsp_agents.append(agent)\n\n check_point_path = os.path.join(ROOT_PATH, 'leduc_holdem_nfsp')\n with self.sess.as_default():\n with self.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(self.sess, tf.train.latest_checkpoint(check_point_path))\n @property\n def agents(self):\n ''' Get a list of agents for each position in a the game\n\n Returns:\n agents (list): A list of agents\n\n Note: Each agent should be just like RL agent with step and eval_step\n functioning well.\n '''\n return self.nfsp_agents\n\nclass LeducHoldemNFSPPytorchModel(Model):\n ''' A pretrained PyTorch model on Leduc Holdem with NFSP\n '''\n\n def __init__(self):\n ''' Load pretrained model\n '''\n import torch\n from rlcard.agents import NFSPAgentPytorch\n self.game = \"leduc-holdem\"\n env = rlcard.make(self.game)\n self.nfsp_agents = []\n for i in range(env.player_num):\n agent = NFSPAgentPytorch(scope='nfsp' + str(i),\n action_num=env.action_num,\n state_shape=env.state_shape,\n hidden_layers_sizes=[128,128],\n q_mlp_layers=[128,128],\n device=torch.device('cpu'))\n self.nfsp_agents.append(agent)\n\n check_point_path = os.path.join(ROOT_PATH, 'leduc_holdem_nfsp_pytorch/model.pth')\n checkpoint = torch.load(check_point_path)\n for agent in self.nfsp_agents:\n agent.load(checkpoint)\n\n @property\n def agents(self):\n ''' Get a list of agents for each position in a the game\n\n Returns:\n agents (list): A list of agents\n\n Note: Each agent should be just like RL agent with step and eval_step\n functioning well.\n '''\n return self.nfsp_agents\n\nclass LeducHoldemCFRModel(Model):\n ''' A pretrained model on Leduc Holdem with CFR\n '''\n\n def __init__(self):\n ''' Load pretrained model\n '''\n self.game = \"leduc-holdem\"\n env = rlcard.make(self.game)\n\n self.agent = CFRAgent(env, model_path=os.path.join(ROOT_PATH, 'leduc_holdem_cfr'))\n self.agent.load()\n @property\n def agents(self):\n ''' Get a list of agents for each position in a the game\n\n Returns:\n agents (list): A list of agents\n\n Note: Each agent should be just like RL agent with step and eval_step\n functioning well.\n '''\n return [self.agent, self.agent]\n\n","sub_path":"rlcard/models/pretrained_models.py","file_name":"pretrained_models.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"252879496","text":"import os\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndriver = webdriver.Chrome(ChromeDriverManager().install()) # Menage drivers with webdriver_manager\ncwd = os.getcwd() # Current working directory\ndriver.get(\"file://\" + cwd + \"/test_sites/Test.html\")\n\nmagic_element = driver.find_elements_by_tag_name(\"papa\")\n\n# list length method\nif len(magic_element) > 0:\n print(\"Element actually does exist\")\nelse:\n print(\"No such element found\")\n\n# try/except method\ntry:\n driver.find_element_by_tag_name(\"papa\")\n print(\"Element does exist\")\nexcept NoSuchElementException:\n print(\"No such element found\")\n\ndriver.quit()\n","sub_path":"Selenium/element_exists.py","file_name":"element_exists.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"463365381","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 19:06:08 2016\n\n@author: marcus\n\"\"\"\n\nimport sqlite3\nimport time\n\nabc =(time.strftime(\"%H:%M:%S\")) #local time\nabd =(time.strftime(\"%d/%m/%Y\")) #local date\n#for some reason the names time =.. and date =.. did not work\n\nprint(abd)\n\ndef query(sql, data):\n with sqlite3.connect(\"database.db\") as db:\n cursor = db.cursor()\n cursor.execute(sql,data)\n db.commit()\n\ndef insert_product_type_data(description):\n sql = \"INSERT INTO ProductType (Description) VALUES (?)\"\n query(sql, description)\n\ndef insert_product_data(records):\n sql = \"INSERT INTO Product (Name,Price,ProductTypeID) VALUES (?,?,?)\"\n print(records)\n query(sql,records)\n\ndef insert_customer_data(records):\n sql = \"INSERT INTO Customer(FirstName,LastName,Street,Town,PostCode,TelephoneNumber, \\\n EMailAddress)VALUES (?,?,?,?,?,?,?)\"\n print(records)\n query(sql,records)\n\ndef insert_order_customer_data(records):\n sql = \"INSERT INTO CustomerOrder(Date, Time, CustomerID, Quantity) VALUES (?,?,?,?)\"\n query(sql,(records))\n\n# def insert_order_item_data(records):\n# sql = \"INSERT INTO OrderItem(OrderID,ProductID,Quantity) VALUES (?,?,?)\"\n# for record in records:\n# print(record)\n# query(sql, record)\nif __name__ == '__main__':\n insert_product_type_data((\"test\",))\n#if __name__ == \"__main__\":\n# product_types = [(\"Coffee\",)]\n# products = [(\"Latte\", 1.35, 1)]\n# customer_data = [(\"Anton\",\"Hallman\",\"Den tredje lusvägen\",\"Ottawa\",9321,\"0921853769122\",\"swag@bing.net\")]\n# customer_order = [(abc,abd,1)]\n# order_item = [(1,1,1)]\n# insert_product_type_data(product_types)\n# insert_product_data(products)\n# insert_customer_data(customer_data)\n# insert_order_customer_data(customer_order)\n# insert_order_item_data(order_item)\n","sub_path":"databas/insert_data_relationships.py","file_name":"insert_data_relationships.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"508432555","text":"import maya.cmds as mc\nimport maya.mel as mm\n\nimport glTools.utils.channelState\nimport glTools.utils.curve\n\n_overrideId = {'lf':14,'rt':13,'cn':17}\n_controlType = ['anchor','arch','arcArrow','arrow','box','circle','crescent','diamond','eye','face','locator','pyramid','spiral','sphere','sphereAnchor','square','teardrop','text']\n\t\ndef create(controlType,controlName,translate=(0,0,0),rotate=(0,0,0),scale=1,colour=0,text=''):\n\t'''\n\tThis script builds curve control objects based on the arguments input by the user\n\t@param controlType: Type of control to build\n\t@type controlType: str\n\t@param controlName: Name of the resultant curve control\n\t@type controlName: str\n\t'''\n\t# Check Control Type\n\tif not _controlType.count(controlType):\n\t\traise Exception('Unsupported control shape type(\"'+controlType+'\")!!')\n\t\t\n\t# Create Control\n\tcontrol = ''\n\tif controlType == 'anchor': control = self.anchor()\n\telif controlType == 'arch': control = self.arch()\n\telif controlType == 'arcArrow': control = self.arcArrow()\n\telif controlType == 'arrow': control = self.arrow()\n\telif controlType == 'box': control = self.box()\n\telif controlType == 'circle': control = self.circle()\n\telif controlType == 'crescent': control = self.crescent()\n\telif controlType == 'diamond': control = self.diamond()\n\telif controlType == 'eye': control = self.eye()\n\telif controlType == 'face': control = self.face()\n\telif controlType == 'locator': control = self.locator()\n\telif controlType == 'pyramid': control = self.pyramid()\n\telif controlType == 'spiral': control = self.spiral()\n\telif controlType == 'sphere': control = self.sphere()\n\telif controlType == 'sphereAnchor': control = self.sphereAnchor()\n\telif controlType == 'square': control = self.square()\n\telif controlType == 'teardrop': control = self.teardrop()\n\telif controlType == 'text': control = self.text(text=text)\n\telse: raise Exception('Unsupported control shape type(\"'+controlType+'\")!!')\n\t\n\t# Check Control\n\tif not control: raise Exception('Error creating controls')\n\t\n\t# Rename Control\n\tcontrol = mc.rename(control,controlName+'#')\n\tcontrolShape = mc.listRelatives(control,s=1,ni=1,pa=True)\n\tif len(controlShape) == 1:\n\t\tcontrolShape = mc.rename(controlShape[0],control+'Shape')\n\telse:\n\t\tfor c in range(len(controlShape)):\n\t\t\tmc.rename(controlShape[c],control+'Shape'+str(c))\n\t\n\t# Color it\n\tprefix = controlName.split('_')[0]\n\tif _overrideId.has_key(prefix):\n\t\tmc.setAttr(controlShape+'.overrideEnabled',1)\n\t\tmc.setAttr(controlShape+'.overrideColor',_overrideId[prefix])\n\t\n\t# Position Control\n\tmc.move(translate[0],translate[1],translate[2],control,r=True)\n\tmc.rotate(rotate[0],rotate[1],rotate[2],control)\n\tmc.scale(scale,scale,scale,control)\n\tmc.makeIdentity(control,apply=True,translate=True,rotate=True,scale=True,normal=False)\n\t\n\t# Set channel states\n\tglTools.utils.channelState.ChannelState().setFlags([0,0,0,0,0,0,0,0,0,1],[control])\n\t\n\t# Return result\n\treturn str(control)\n\t\ndef anchor(self):\n\t'''\n\tCreate anchor control object\n\t'''\n\t# Create control object\n\tpts = [(0.000,0.000,0.000),(0.000,0.826,0.000),(0.087,0.826,0.000),(0.087,1.000,0.000),(-0.087,1.000,0.000),(-0.087,0.826,0.000),(0.000,0.826,0.000)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef arch(self):\n\t'''\n\t'''\n\t# Create control object\n\tpts = [(0.100,0.000,-0.500),(-0.100,0.000,-0.500),(-0.100,0.250,-0.433),(-0.100,0.433,-0.250),(-0.100,0.500,0.000),(-0.100,0.433,0.250),(-0.100,0.250,0.433),(-0.100,0.000,0.500),(0.100,0.000,0.500),(0.100,0.250,0.433),(0.100,0.433,0.250),(0.100,0.500,0.000),(0.100,0.433,-0.250),(0.100,0.250,-0.433),(0.100,0.000,-0.500)]\n\tknots = [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0]\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef arcArrow(self):\n\t'''\n\t'''\n\t# Create control object\n\tpts = [(0.0,0.414,-0.854),(0.0,0.487,-0.942),(0.0,0.148,-0.941),(0.0,0.226,-0.627),(0.0,0.293,-0.708),(0.0,0.542,-0.542),(0.0,0.708,-0.293),(0.0,0.767,0.0),(0.0,0.708,0.293),(0.0,0.542,0.542),(0.0,0.293,0.708),(0.0,0.235,0.607),(0.0,0.126,0.914),(0.0,0.445,0.967),(0.0,0.389,0.871),(0.0,0.678,0.678),(0.0,0.885,0.367),(0.0,0.958,0.0),(0.0,0.885,-0.367),(0.0,0.678,-0.678),(0.0,0.414,-0.854)]\n\tknots = [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0]\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\t\ndef arrow(self):\n\t'''\n\tCreate arrow control object\n\t'''\n\t# Create control object\n\tpts = [\t(-0.333,0.0,-1.0), (0.333,0.0,-1.0), (0.333,0.0,0.333), (0.666,0.0,0.333),\n\t\t(0.0,0.0,1.0), (-0.666,0.0,0.333), (-0.333,0.0,0.333), (-0.333,0.0,-1.0) ]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef box(self):\n\t'''\n\tCreate box control object\n\t'''\n\t# Create control object\n\tpts = [\t(-0.5,0.5,0.5), (0.5,0.5,0.5), (0.5,-0.5,0.5),\n\t\t\t(-0.5,-0.5,0.5), (-0.5,0.5,0.5), (-0.5,0.5,-0.5),\n\t\t\t(-0.5,-0.5,-0.5), (-0.5,-0.5,0.5), (-0.5,0.5,0.5),\n\t\t\t(0.5,0.5,0.5), (0.5,0.5,-0.5), (-0.5,0.5,-0.5),\n\t\t\t(-0.5,-0.5,-0.5), (0.5,-0.5,-0.5), (0.5,0.5,-0.5),\n\t\t\t(0.5,0.5,0.5), (0.5,-0.5,0.5), (0.5,-0.5,-0.5)\t]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef circle(self):\n\t'''\n\tCreate circle control object\n\t'''\n\treturn mc.circle(c=(0,0,0),nr=(0,0,1),sw=360,r=0.5,d=3,ut=0,tol=0.01,s=8,ch=0)[0]\n\ndef crescent(self):\n\t'''\n\tCreate Crescent control object\n\t'''\n\t# Create control object\n\tcontrol = mc.curve(d=3,p=[(0.392,0.392,-0.000),(-0.000,0.554,-0.000),(-0.392,0.392,-0.000),(-0.554,0.000,-0.000),(-0.392,0.228,-0.000),(-0.000,0.323,-0.000),(0.392,0.228,-0.000),(0.554,-0.000,0.000),(0.392,0.392,-0.000),(-0.000,0.554,-0.000),(-0.392,0.392,-0.000)],k=[-0.25,-0.125,0.0,0.125,0.25,0.375,0.5,0.625,0.75,0.875,1.0,1.125,1.25])\n\t\n\t# Return control\n\treturn control\n\ndef diamond(self):\n\t'''\n\tCreate diamond control object\n\t'''\n\t# Create control object\n\tpts = [(0.0,0.5,0.0),(-0.25,0.0,0.0),(0.0,-0.5,0.0),(0.25,0.0,0.0),(0.0,0.5,0.0)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef eye(self):\n\t'''\n\tCreate eye control object\n\t'''\n\t# Create control object\n\tpts = [(1.000,0.064,0.000),(-0.000,0.747,0.000),(-1.000,0.064,0.000),(-1.000,0.000,0.000),(-1.000,-0.064,0.000),(-0.000,-0.747,0.000),(1.000,-0.064,0.000),(1.000,-0.000,0.000),(1.000,0.064,0.000),(-0.000,0.747,0.000),(-1.000,0.064,0.000)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef face(self):\n\t'''\n\tCreate face control object\n\t'''\n\t# Create control object\n\tpts = [(0.573,0.863,0.000),(-0.000,1.047,0.000),(-0.573,0.863,0.000),(-0.770,0.266,0.000),(-0.750,0.000,0.000),(-0.409,-0.656,0.000),(-0.322,-0.953,0.000),(-0.000,-1.020,0.000),(0.322,-0.953,0.000),(0.409,-0.656,0.000),(0.750,-0.000,0.000),(0.770,0.266,0.000),(0.573,0.863,0.000),(-0.000,1.047,0.000),(-0.573,0.863,0.000)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef locator(self):\n\t'''\n\tCreate locator control object\n\t'''\n\t# Create control object\n\tpts = [\t(-0.5, 0.0, 0.0), (0.5, 0.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.5, 0.0), (0.0, -0.5, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, -0.5), (0.0, 0.0, 0.5)\t]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef pyramid(self):\n\t'''\n\tCreate pyramid control object\n\t'''\n\t# Create control object\n\tpts = [\t(-0.5,-0.5,0.5), (0.5,-0.5,0.5), (0.5,-0.5,-0.5), (-0.5,-0.5,-0.5), (-0.5,-0.5,0.5),\n\t\t(0.0,0.5,0.0), (-0.5,-0.5,-0.5), (0.5,-0.5,-0.5), (0.0,0.5,0.0), (0.5,-0.5,0.5)\t]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef sphere(self):\n\t'''\n\tCreate sphere control object\n\t'''\n\t# Create control object\n\tpts = [\t(0.5, 0.0, 0.0), (0.462, 0.0, 0.19), (0.35, 0.0, 0.35),\n\t\t(0.19, 0.0, 0.46), (0.0, 0.0, 0.5), (-0.19, 0.0, 0.46),\n\t\t(-0.35, 0.0, 0.35), (-0.46, 0.0, 0.19), (-0.5, 0.0, 0.0),\n\t\t(-0.46, 0.0, -0.19), (-0.35, 0.0, -0.35), (-0.19, 0.0, -0.46),\n\t\t(0.0, 0.0, -0.5), (0.19, 0.0, -0.46), (0.35, 0.0, -0.35),\n\t\t(0.46, 0.0, -0.19), (0.5, 0.0, 0.0), (0.46, -0.19, 0.0),\n\t\t(0.35, -0.35, 0.0), (0.19, -0.46, 0.0), (0.0, -0.5, 0.0), \n\t\t(-0.19, -0.46, 0.0), (-0.35, -0.35, 0.0), (-0.46, -0.19, 0.0), \n\t\t(-0.5, 0.0, 0.0), (-0.46, 0.19, 0.0), (-0.35, 0.35, 0.0), \n\t\t(-0.19, 0.46, 0.0), (0.0, 0.5, 0.0), (0.19, 0.46, 0.0), \n\t\t(0.35, 0.35, 0.0), (0.46, 0.19, 0.0), (0.5, 0.0, 0.0), \n\t\t(0.46, 0.0, 0.19), (0.35, 0.0, 0.35), (0.19, 0.0, 0.46), \n\t\t(0.0, 0.0, 0.5), (0.0, 0.24, 0.44), (0.0, 0.44, 0.24), \n\t\t(0.0, 0.5, 0.0), (0.0, 0.44, -0.24), (0.0, 0.24, -0.44), \n\t\t(0.0, 0.0, -0.5), (0.0, -0.24, -0.44), (0.0, -0.44, -0.24), \n\t\t(0.0, -0.5, 0.0), (0.0, -0.44, 0.24), (0.0, -0.24, 0.44), \n\t\t(0.0, 0.0, 0.5)\t]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef sphereAnchor(self):\n\t'''\n\tCreate sphereAnchor control object\n\t'''\n\t# Create control object\n\tpts = [\t(0.0, 1.0, -0.05), (0.0, 0.981, -0.0462), (0.0, 0.965, -0.035),\n\t\t(0.0, 0.954, -0.019), (0.0, 0.95, 0.0), (0.0, 0.954, 0.019),\n\t\t(0.0, 0.965, 0.035), (0.0, 0.981, 0.046), (0.0, 1.0, 0.05),\n\t\t(0.0, 1.019, 0.046), (0.0, 1.035, 0.035), (0.0, 1.046, 0.019),\n\t\t(0.0, 1.05, 0.0), (0.0, 1.046, -0.019), (0.0, 1.035, -0.035),\n\t\t(0.0, 1.019, -0.046), (0.0, 1.0, -0.05), (-0.019, 1.0, -0.046),\n\t\t(-0.035, 1.0, -0.035), (-0.046, 1.0, -0.019), (-0.05, 1.0, 0.0),\n\t\t(-0.046, 1.0, 0.019), (-0.035, 1.0, 0.035), (-0.019, 1.0, 0.046),\n\t\t(0.0, 1.0, 0.05), (0.019, 1.0, 0.046), (0.035, 1.0, 0.035),\n\t\t(0.046, 1.0, 0.019), (0.05, 1.0, 0.0), (0.046, 1.0, -0.019),\n\t\t(0.035, 1.0, -0.035), (0.019, 1.0, -0.046), (0.0, 1.0, -0.05),\n\t\t(0.0, 0.981, -0.046), (0.0, 0.965, -0.035), (0.0, 0.954, -0.019),\n\t\t(0.0, 0.95, 0.0), (0.024, 0.956, 0.0), (0.044, 0.976, 0.0),\n\t\t(0.05, 1.0, 0.0), (0.044, 1.024, 0.0), (0.024, 1.044, 0.0),\n\t\t(0.0, 1.05, 0.0), (-0.024, 1.044, 0.0), (-0.044, 1.024, 0.0),\n\t\t(-0.05, 1.0, 0.0), (-0.044, 0.976, 0.0), (-0.024, 0.956, 0.0),\n\t\t(0.0, 0.95, 0.0), (0.0, 0.0, 0.0) ]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef spiral(self):\n\t'''\n\tCreate spiral control object\n\t'''\n\t# Build Point Array\n\tpts = [\t(0.0, 0.0, 0.0), (0.0, 0.1, 0.0), (0.0, 0.2, 0.0),\n\t\t(0.0, 0.28, 0.0), (0.0, 0.288, 0.0), (0.0, 0.325, 0.0),\n\t\t(0.0, 0.346, -0.05), (0.01, 0.35, -0.12), (0.13, 0.38, -0.11),\n\t\t(0.21, 0.4, -0.02), (0.16, 0.44, 0.14), (0.0, 0.46, 0.2),\n\t\t(-0.14, 0.5, 0.12), (-0.21, 0.5, -0.06), (-0.18, 0.525, -0.28),\n\t\t(0.0, 0.55, -0.39), (0.28, 0.576, -0.312), (0.4, 0.615, -0.09),\n\t\t(0.3, 0.67, 0.186), (0.0, 0.7, 0.28), (-0.28, 0.728, 0.187),\n\t\t(-0.4, 0.768, -0.09), (-0.336, 0.823, -0.428), (0.0, 0.847, -0.595),\n\t\t(0.425, 0.867, -0.486), (0.589, 0.9, -0.09), (0.435, 0.97, 0.311),\n\t\t(0.158, 0.997, 0.415), (0.0, 1.0, 0.407) ]\n\t\t\t\n\t# Build Knot Array\n\tknots = [0,0]\n\tknots.extend(range(len(pts)-2))\n\tknots.extend([len(pts)-3,len(pts)-3])\n\tdegree = 3\n\t\n\t# Create control object\n\tcontrol = mc.curve(d=degree,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\ndef square(self):\n\t'''\n\tCreate square control object\n\t'''\n\t# Create control object\n\tpts = [(-0.5,0.5,0.0),(-0.5,-0.5,0.0),(0.5,-0.5,0.0),(0.5,0.5,0.0),(-0.5,0.5,0.0)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t\n\t# Return control name\n\treturn control\n\t\ndef teardrop(self):\n\t'''\n\t'''\n\t# Create control object\n\tpts = [(-0.000,0.554,0.000),(-0.015,0.548,0.000),(-0.554,0.109,0.000),(-0.392,-0.392,0.000),(-0.000,-0.554,0.000),(0.392,-0.392,0.000),(0.554,0.109,0.000),(0.015,0.548,0.000),(-0.000,0.554,0.000)]\n\tknots = range(len(pts))\n\tcontrol = mc.curve(d=1,p=pts,k=knots)\n\t#control = mc.rebuildCurve(control,ch=False,rt=0,rpo=True,end=True,kr=True,d=3,kcp=True)[0]\n\t\n\t# Return control name\n\treturn control\n\t\ndef text(self,text='text'):\n\t'''\n\tCreate text control object\n\t@param text: Text string\n\t@type: str\n\t'''\n\t# Check text string\n\tif not text: raise Exception('Empty string error!')\n\t\n\t# Create Text\n\ttextCurve = mc.textCurves(ch=False,f='Arial',t=text)\n\t\n\t# Parent shapes to single treansform\n\ttextShapes = mc.ls(mc.listRelatives(textCurve,ad=True),type='nurbsCurve')\n\tfor textShape in textShapes:\n\t\ttextXform = mc.listRelatives(textShape,p=True)[0]\n\t\ttextXform = mc.parent(textXform,w=True)\n\t\tmc.makeIdentity(textXform,apply=True,t=True,r=True,s=True,n=False)\n\t\tmc.parent(textShape,textCurve,r=True,s=True)\n\t\tmc.delete(textXform)\n\t\n\t# Delete unused transforms\n\ttextChildren = mc.listRelatives(textCurve,c=True,type='transform')\n\tif textChildren: mc.delete(textChildren)\n\t\n\t# Position text\n\tmc.select(textCurve)\n\tmm.eval('CenterPivot')\n\tpiv = mc.xform(textCurve,q=True,ws=True,rp=True)\n\tmc.move(-piv[0],-piv[1],-piv[2],textCurve,ws=True,r=True)\n\t\n\t# Scale text\n\twidth = (mc.getAttr(textCurve[0]+'.boundingBoxMaxX') - mc.getAttr(textCurve[0]+'.boundingBoxMinX'))\n\theight = (mc.getAttr(textCurve[0]+'.boundingBoxMaxY') - mc.getAttr(textCurve[0]+'.boundingBoxMinY'))\n\tif width > height: sc = 1.0/ width\n\telse: sc = 1.0/ height\n\tmc.scale(sc,sc,sc,textCurve)\n\t\n\t# Freeze Transforms\n\tmc.makeIdentity(textCurve,apply=True,t=True,r=True,s=True,n=False)\n\t\n\t# Return result\n\treturn textCurve\n\ndef controlShape(transform,controlType,translate=(0,0,0),rotate=(0,0,0),scale=1,text='',orient=True):\n\t'''\n\t'''\n\t# Control Builder\n\tcontrolBuilder = ControlBuilder()\n\t\n\t# Create Control\n\tif controlType == 'text':\n\t\tcontrol = controlBuilder.create(controlType,'temp_control_transform',text=text)\n\telse:\n\t\tcontrol = controlBuilder.create(controlType,'temp_control_transform')\n\tcontrolShapeList = mc.listRelatives(control,s=True)\n\t\n\t# Match Control\n\tif not orient: mc.setAttr(control+'.rotate',rotate[0],rotate[1],rotate[2])\n\tmc.delete(mc.pointConstraint(transform,control))\n\tmc.parent(control,transform)\n\tmc.setAttr(control+'.translate',translate[0],translate[1],translate[2])\n\tif orient: mc.setAttr(control+'.rotate',rotate[0],rotate[1],rotate[2])\n\tmc.setAttr(control+'.scale',scale,scale,scale)\n\tmc.makeIdentity(control,apply=True,t=1,r=1,s=1,n=0)\n\t\n\t# For each shape\n\tfor i in range(len(controlShapeList)):\n\t\t\n\t\t# Parent Control Shape\n\t\tcontrolShapeList[i] = mc.parent(controlShapeList[i],transform,r=True,s=True)[0]\n\t\tcontrolShapeList[i] = mc.rename(controlShapeList[i],transform+'Shape'+str(i+1))\n\t\n\t# Delete temp transform \n\tmc.delete(control)\n\t\n\t# Colour Control\n\tcolourControl(transform)\n\t\n\t# Return result\n\treturn controlShapeList\n\ndef colourControl(control):\n\t'''\n\t'''\n\t# Get control transform\n\tif not glTools.utils.transform.isTransform(control):\n\t\tcontrolParent = mc.listRelatives(control,p=True)\n\t\tif not controlParent:\n\t\t\traise Exception('Unable to determine controls transform!')\n\t\tcontrol = controlParent[0]\n\t\n\t# Determine Colour\n\tif control.startswith('cn'): colour = _overrideId['cn']\n\telif control.startswith('lf') or control.startswith('L'): colour = _overrideId['lf']\n\telif control.startswith('rt') or control.startswith('R'): colour = _overrideId['rt']\n\telse: colour = 17\n\t\n\t# Set colour\n\tcontrolShapes = mc.listRelatives(control,s=True)\n\tfor controlShape in controlShapes:\n\t\tmc.setAttr(controlShape+'.overrideEnabled',1)\n\t\tmc.setAttr(controlShape+'.overrideColor',colour)\n\t\n\t# Return result\n\treturn colour\n\ndef anchorCurve(control,anchor,template=True):\n\t'''\n\t'''\n\t# Check control\n\tif not mc.objExists(control):\n\t\traise Exception('Control \"'+control+'\" does not exist!')\n\tif not mc.objExists(anchor):\n\t\traise Exception('Anchor transform \"'+anchor+'\" does not exist!')\n\t\n\t# Create curve shape\n\tcrv = mc.curve(p=[(0,0,0),(0,1,0)],k=[0,1],d=1,n=control+'Anchor')\n\tcrvShape = mc.listRelatives(crv,s=True,pa=True)\n\tif not crvShape:\n\t\traise Exception('Unable to determine shape for curve \"'+crv+'\"!')\n\t\n\t# Create curve locators\n\tcrvLoc = glTools.utils.curve.locatorCurve(crv,locatorScale=0.0,local=True,prefix=control)\n\tmc.parent(crvLoc,control)\n\tmc.setAttr(crvLoc[0]+'.t',0,0,0)\n\tmc.setAttr(crvLoc[1]+'.t',0,0,0)\n\tmc.setAttr(crvLoc[0]+'.v',0)\n\tmc.setAttr(crvLoc[1]+'.v',0)\n\t\n\t# Rename and Parent curve shape\n\tcrvShape = mc.parent(crvShape[0],control,r=True,s=True)[0]\n\tcrvShape = mc.rename(crvShape,control+'Shape0')\n\t\n\t# Delete original curve transform\n\tmc.delete(crv)\n\t\n\t# Connect to anchor\n\tmc.pointConstraint(anchor,crvLoc[1])\n\t\n\t# Template\n\tif template: mc.setAttr(crvShape+'.template',1)\n\t\n\t# Set channel states\n\tglTools.utils.channelState.ChannelState().setFlags([2,2,2,2,2,2,2,2,2,1],crvLoc)\n\t\n\t# Return result\n\treturn crvShape\n\ndef freezeCtrlScale(ctrl):\n\t'''\n\t'''\n\tgrp = mc.listRelatives(ctrl,p=True)[0]\n\tmdn = ctrl.replace('ctrl','multiplyDivide')\n\tmdn = mc.createNode('multiplyDivide',n=mdn)\n\tmc.connectAttr(grp+'.s',mdn+'.input2',f=True)\n\tmc.setAttr(mdn+'.input1',1.0,1.0,1.0)\n\tmc.setAttr(mdn+'.operation',2)\n\tmc.connectAttr(mdn+'.output',ctrl+'.s',f=True)\n\t\ndef unfreezeCtrlScale(ctrl):\n\t'''\n\t'''\n\tmdn = mc.listConnections(ctrl+'.s',s=True,d=False)\n\tif mdn: mc.delete(mdn)\n\t\ndef setTranslateLimits(ctrl,tx=[],ty=[],tz=[]):\n\t'''\n\t'''\n\tif(tx): mc.transformLimits(ctrl,tx=(tx[0],tx[1]),etx=(1,1))\n\tif(ty): mc.transformLimits(ctrl,ty=(ty[0],ty[1]),ety=(1,1))\n\tif(tz): mc.transformLimits(ctrl,tz=(tz[0],tz[1]),etz=(1,1))\n","sub_path":"glTools-master/tools/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":17247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"108494883","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.neighbors import KNeighborsClassifier, DistanceMetric\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nimport sys\n\nsys.path.append(\"..\")\nfrom processData import loadDataDivided\nimport KNN\n\ndef runLDA(X_train, X_test, y_train, y_test, n_comp):\n print(\"\\nn_comp=%d\\n\"%(n_comp))\n transformer = LinearDiscriminantAnalysis(solver='svd', n_components=n_comp)\n transformer.fit(X_train, y_train)\n X_train_proj = transformer.transform(X_train)\n X_test_proj = transformer.transform(X_test)\n np.save('X_train_LDA', X_train_proj)\n np.save('X_test_LDA', X_test_proj)\n return X_train_proj, X_test_proj\n\ndef cosine(x, y):\n s = np.linalg.norm(x, ord=2) * np.linalg.norm(y, ord=2)\n if s == 0:\n return 0\n return 1 - np.dot(x, y) / s\n\ndef main():\n dim_range = [40]\n k_range = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n \n X_train, X_test, y_train, y_test = loadDataDivided(ifSubDir=True, ifScale=True, suffix='')\n for dim in dim_range:\n print(\"dim: %d, method: LDA, metric: %s\" % (dim, \"euclidean\"))\n X_train_proj, X_test_proj = runLDA(X_train, X_test, y_train, y_test, dim)\n KNN.runKNN(X_train_proj, X_test_proj, y_train, y_test, k_range, metric='euclidean', metric_params=None,\n label=str(dim) + '_LDA_euclidean')\n\nif __name__ == '__main__':\n main()\n","sub_path":"Prj2/Learning/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"526119681","text":"while True:\n ans = 0\n n, x = map(int, input().split())\n\n if n == 0 and x == 0:\n break\n \n for i in range(1, n+1):\n for j in range(i+1, n+1):\n\n c = x - (i+j)\n\n if j < c and c <= n:\n ans += 1\n\n print(\"%d\"%(ans))","sub_path":"backet_python/jijiij.py","file_name":"jijiij.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"636203932","text":"from setuptools import setup, find_packages\n\nname = \"koansys.recipe.hooks\"\nsetup(\n name = name,\n version = open(\"version.txt\").read(),\n author = \"Chris Shenton\",\n author_email = \"chris@koansys.com\",\n description = \"zc.buildout recipe to run python methods as hooks\",\n long_description = open(\"README.txt\").read(),\n license = \"GPL\",\n keywords = \"buildout\",\n classifiers = [\n \"Framework :: Buildout\",\n ],\n url = 'http://koansys-recipe-hooks.googlecode.com/svn/trunk',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['koansys.recipe'],\n include_package_data=True,\n zip_safe=False,\n install_requires = ['zc.buildout', 'setuptools'],\n entry_points = {'zc.buildout':\n ['default = %s:Recipe' % name]},\n )\n","sub_path":"pypi_install_script/koansys.recipe.hooks-0.1-dev-r8.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"12335138","text":"import json\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom .models import Users\nfrom inviteCode.models import InviteCode\nfrom udeskApi.utils import postApi, getApi, putApi\nfrom backend.config import BACKDOOR_INVITE_CODE\nfrom udeskApi.utils import webImSignature\n\n\ndef addUserToUdesk(customer):\n r = postApi(\"open_api_v1/customers\", customer)\n assert r.get(\"code\") == 1000\n\n\ndef api(req):\n if req.method == 'GET':\n return HttpResponse(\"no GET method\")\n elif req.method == 'POST': # 新建用户\n data = json.loads(req.body)\n if data.get(\"inviteCode\").startswith(BACKDOOR_INVITE_CODE): # 后门邀请码\n # 查重\n user_check = User.objects.filter(username=data.get(\"phone\"))\n if len(user_check):\n return JsonResponse({\n \"status\": 1,\n \"message\": \"手机号{}已注册, 如有问题请致电400咨询.\".format(data.get(\"phone\"))\n })\n user_check = User.objects.filter(username=data.get(\"email\"))\n if len(user_check):\n return JsonResponse({\n \"status\": 2,\n \"message\": \"邮箱{}已注册, 如有问题请致电400咨询.\".format(data.get(\"email\"))\n })\n user = User.objects.create_user(\n username = data.get(\"phone\"),\n last_name = data.get(\"name\"),\n email = data.get(\"email\"),\n password=data.get(\"passwd\"),\n )\n user.save()\n users = Users(\n user = user,\n inviteCode = 'backdoor',\n company = data.get(\"inviteCode\")[8:],\n title = data.get(\"title\"),\n )\n users.save()\n addUserToUdesk({\n \"customer\": {\n \"email\": data.get(\"email\"),\n \"nick_name\": data.get(\"name\"),\n \"cellphones\": [\n [None, data.get(\"phone\")]\n ],\n \"description\": data.get(\"inviteCode\")[8:]+\"/\"+data.get(\"title\")\n }\n })\n return JsonResponse({\n \"status\":0,\n \"message\":\"注册成功\"\n })\n else:\n # check invite code\n try:\n inviteCode = InviteCode.objects.get(code=data.get(\"inviteCode\"))\n except InviteCode.DoesNotExist:\n return JsonResponse({\n \"status\": 1,\n \"message\": '激活码{}不存在'.format(data.get(\"inviteCode\"))\n })\n if not inviteCode.active:\n return JsonResponse({\n \"status\": 2,\n \"message\": '激活码{}无效'.format(data.get(\"inviteCode\"))\n })\n else:\n # todo 使用事务\n # 查重\n user_check = User.objects.filter(username=data.get(\"phone\"))\n if len(user_check):\n return JsonResponse({\n \"status\": 1,\n \"message\": \"手机号{}已注册, 如有问题请致电400咨询.\".format(data.get(\"phone\"))\n })\n user_check = User.objects.filter(username=data.get(\"email\"))\n if len(user_check):\n return JsonResponse({\n \"status\": 2,\n \"message\": \"邮箱{}已注册, 如有问题请致电400咨询.\".format(data.get(\"email\"))\n })\n user = User.objects.create_user(\n username=data.get(\"phone\"),\n last_name=data.get(\"name\"),\n email=data.get(\"email\"),\n password=data.get(\"passwd\")\n )\n user.save()\n users = Users(\n user=user,\n inviteCode=inviteCode.code,\n company=inviteCode.company,\n title = data.get(\"title\"),\n )\n users.save()\n inviteCode.active = False\n inviteCode.users = users\n inviteCode.save()\n addUserToUdesk({\n \"customer\": {\n \"email\": data.get(\"email\"),\n \"nick_name\": data.get(\"name\"),\n \"cellphones\": [\n [None, data.get(\"phone\")]\n ],\n \"description\": inviteCode.company+\"/\"+data.get(\"title\")\n }\n })\n return JsonResponse({\n \"status\": 0,\n \"message\": \"注册成功\"\n })\n\n\ndef do_login(req):\n if req.method == \"POST\":\n data = json.loads(req.body)\n user = authenticate(username=data['phone'], password=data['passwd'])\n if user is not None:\n login(req, user)\n users = Users.objects.get(user=user)\n return JsonResponse({\n \"status\": 0,\n \"message\":{\n \"username\":user.username,\n \"name\":user.last_name,\n \"email\":user.email,\n \"company\":users.company,\n \"title\":users.title,\n \"webim_sign\": \"&\" + webImSignature(user.username)\n }\n })\n else:\n return JsonResponse({\n \"status\": 1,\n \"message\": \"手机号和/或密码不正确\"\n })\n\n\ndef do_logout(req):\n logout(req)\n return JsonResponse({\n \"status\": 0\n })\n\n\ndef updateUserInfo(req):\n if req.method == \"POST\":\n data = json.loads(req.body)\n user = req.user\n\n # 查重\n user_check = User.objects.filter(username = data.get(\"phone\"))\n if len(user_check) and user_check[0].id != user.id:\n return JsonResponse({\n \"status\": 1,\n \"message\": \"手机号{}已注册, 如有问题请致电咨询.\".format(user_check.phone)\n })\n user_check = User.objects.filter(username=data.get(\"email\"))\n if len(user_check) and user_check[0].id != user.id:\n return JsonResponse({\n \"status\": 2,\n \"message\": \"邮箱{}已注册, 如有问题请致电咨询.\".format(user_check.email)\n })\n else:\n users = Users.objects.get(user=req.user)\n # 同步udesk\n info = {\"customer\": {}}\n if user.username != data.get(\"phone\"):\n info[\"customer\"][\"cellphones\"] = [None, data.get(\"phone\")]\n if user.last_name != data.get(\"name\"):\n info[\"customer\"][\"nick_name\"] = data.get(\"name\")\n if user.email != data.get(\"email\"):\n info[\"customer\"][\"email\"] = data.get(\"email\")\n if users.title != data.get(\"title\"):\n info[\"customer\"][\"description\"] = users.company + \"/\" + data.get(\"title\")\n users.title = data.get(\"title\")\n users.save()\n if info[\"customer\"]:\n r = putApi(\"open_api_v1/customers/update_customer\", params={\n \"type\": \"cellphone\",\n \"content\": user.username\n }, data=info)\n assert r.get(\"code\") == 1000\n\n user.username = data.get(\"phone\") # 这三行加到上面去会导致user.username过早更新, putApi就没法弄了.\n user.last_name = data.get(\"name\")\n user.email = data.get(\"email\")\n if data.get(\"passwd\"):\n user.set_password(data.get(\"passwd\"))\n user.save()\n return JsonResponse({\n \"status\": 0,\n \"message\": \"\"\n })\n","sub_path":"backend/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413950431","text":"from django.db import models\n\n\nclass Vacanci(models.Model):\n Title = models.TextField(\n verbose_name='Название',\n )\n Link = models.URLField(\n 'Ссылка на вакансию'\n )\n Salary = models.TextField(\n 'Зарплата'\n )\n No_resume = models.BooleanField(\n 'Можно ли откликаться без резюме'\n )\n First = models.BooleanField(\n 'Будете ли вы первым, откликаясь на эту вакансию'\n )\n Company = models.TextField(\n 'Компания'\n )\n Date = models.TextField(\n 'Дата публикации вакансии'\n )\n Address = models.TextField(\n 'Адрес'\n )\n\n class Meta:\n verbose_name = \"Вакансии\"\n verbose_name_plural = \"Вакансии\"\n\n\nclass Vacanci_2(models.Model):\n Title = models.TextField(\n verbose_name='Название',\n )\n Link = models.URLField(\n 'Ссылка на вакансию'\n )\n Price = models.TextField(\n 'Зарплата'\n )\n Number_of_otkliks = models.TextField(\n 'Количество откликов'\n )\n Number_of_vues = models.TextField(\n 'Количество просмотров другими пользователями'\n )\n\n class Meta:\n verbose_name = \"Вакансии_2\"\n verbose_name_plural = \"Вакансии_2\"\n","sub_path":"djadminka/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"89000562","text":"Frame(\"LayoutOptions\").rcgrid(0,0,sticky='nw')\n\n### CODE ===================================================\n\nLock()\n\n# -------------- this LabelFrame contains data for a layout value refresh ---------------------\n\n# the data reference Entries for x,y,row,column,side, and index depending an the layout type\nwidget(\"LayoutOptions\").mydata=[None,None,None,None,None,None]\n\n# -------------- receiver for message 'LAYOUT_VALUES_REFRESH' ---------------------\n\ndef values_refresh(widget = widget(\"LayoutOptions\")):\n mydata = widget.mydata\n if mydata[0] != None:\n mydata[0].delete(0,END)\t\n mydata[0].insert(0,getlayout(\"x\"))\n if mydata[1] != None:\n mydata[1].delete(0,END)\t\n mydata[1].insert(0,getlayout(\"y\"))\n if mydata[2] != None:\n mydata[2].delete(0,END)\t\n mydata[2].insert(0,getlayout(\"row\"))\n if mydata[3] != None:\n mydata[3].delete(0,END)\t\n mydata[3].insert(0,getlayout(\"column\"))\n if mydata[4] != None:\n mydata[4].delete(0,END)\t\n mydata[4].insert(0,getlayout(\"side\"))\n if mydata[5] != None:\n mydata[5].delete(0,END)\t\n mydata[5].insert(0,getlayout(\"index\"))\n\ndo_receive('LAYOUT_VALUES_REFRESH',values_refresh)\n\n# -------------- receiver for message 'SHOW_LAYOUT' - help functions ------------------------------------\n\n# another help function: for layout option sticky we show an info message box\n\n\n# for Return key or mouse klick: get active selection from the listbox, hide the listbox, set the layout and insert the text in the Entry for showing\n\ndef do_lbox_click(event,lbox,entry,isMouse):\n if isMouse: text = lbox.get(lbox.nearest(event.y))\n else: text = lbox.get(ACTIVE)\n setlayout(entry.mydata[0],text)\n entry.delete(0,END)\n entry.insert(0,text)\n lbox.unbind(\"\")\n lbox.unbind(\"\")\n lbox.unlayout()\n\ndef listbox_helpbutton(lbox,entry,lbox_click = do_lbox_click):\n lbox.select_clear(0,END) # clear a former listbox selection \n lbox_index = lbox.getStringIndex(getlayout(entry.mydata[0])) # get the listbox index for the layout option\n lbox.select_set(lbox_index) # preselect the current layout option in the listbox\n lbox.activate(lbox_index) # and set the selection cursor to it\n lbox.rcgrid(0,3) # show the listbox\n lbox.focus_set() # and focus it\n lbox.do_event(\"\",lbox_click,(lbox,entry,False),wishEvent=True) # bind Return key to the listbox\n lbox.do_event(\"\",lbox_click,(lbox,entry,True),wishEvent=True) # bind mouse click to the listbox\n\ndef listbox_selection(helpbutton = listbox_helpbutton):\n Button(text=\"?\").rcgrid(0,2) # create a help button for showing the listbox\n do_command(helpbutton,(widget(\"listbox\"),widget(\"Entry\")))\n\n# -------------- receiver for message 'SHOW_LAYOUT' ------------------------------------\n\n\ndef entry_event(me):\n setlayout(me.mydata[0],me.get())\n me.delete(0,END)\n me.insert(0,get_entry_as_string(getlayout(me.mydata[0])))\n me['bg']='gray'\n informLater(300,me,'color')\n send(\"LAYOUT_OPTIONS_CHANGED\",this())\n\nenable_flag = [False,False,False]\n\nRefDict = {}\n\ndef can_update(linfo, RefDict=RefDict,thisframe=widget(\"LayoutOptions\")):\n if len(RefDict) != len(linfo): return False\n for entry in linfo:\n if entry not in RefDict: return False\n for entry,value in linfo.items():\n RefDict[entry].mydata[1].set(get_entry_as_string(value))\n # reference update for a value refresh without new show layout option creation\n if entry == \"x\": thisframe.mydata[0]=RefDict[entry]\n elif entry == \"y\": thisframe.mydata[1]=RefDict[entry]\n elif entry == \"row\": thisframe.mydata[2]=RefDict[entry]\n elif entry == \"column\": thisframe.mydata[3]=RefDict[entry]\n elif entry == \"side\": thisframe.mydata[4]=RefDict[entry]\n elif entry == \"index\": thisframe.mydata[5]=RefDict[entry]\n return True\n\n\ndef show_layout(msg,onflag = enable_flag, cont = container(),thisframe=widget(\"LayoutOptions\"),e_event=entry_event,lbox_select=listbox_selection,entry_width=7,RefDict=RefDict,can_update=can_update):\n\n if isinstance(msg,bool):\n if msg:\n if not onflag[0]:\n onflag[0] = True\n send('SHOW_LAYOUT',this()) # resend message once more\n \n elif onflag[0]: #if shall switch off and SHOW_LAYOUT is on\n onflag[0]=False # switch flag to off\n cont.unlayout() # and unlayout the DetailedLayout frame\n thisframe.mydata=[None,None,None,None,None,None] # set references for value refresh to not active\n\n elif type(msg) is tuple:\n\n if msg[1]:\n thisframe.grid()\n onflag[0] = onflag[1]\n onflag[2] = False\n else:\n if not onflag[2]: onflag[1] = onflag[0]\n onflag[0] = False\n onflag[2] = True\n thisframe.unlayout()\n \n elif onflag[0]: # a correct message arrived and show layout is on\n # reset references for value refresh to not active\n thisframe.mydata = [None,None,None,None,None,None]\n # if the widget has a layout, then show it\n if msg.Layout & LAYOUTALL and msg.Layout != MENULAYOUT:\n\n cont.grid()\t\t\t\n linfo = layout_info()\n if can_update(linfo): return\n\n RefDict.clear()\n current_selection = Selection() # save current selection\n setWidgetSelection(msg) # set selection for current user widget\n maxlen = 0\n for entry in linfo: maxlen = max(maxlen,len(entry))\n\n # make a list of tuples of the layout dictionary and sort important options at the beginning\n layoutlist = []\n for entry in (\n\"y\",\n\"x\",\n\"row\",\n\"column\",\n\"rowspan\",\n\"columnspan\",\n\"side\",\n\"sticky\",\n\"width\",\n\"height\",\n\"anchor\",\n\"fill\",\n\"expand\",\n\"bordermode\",\n\"padx\",\n\"pady\",\n\"ipadx\",\n\"ipady\",\n\"relx\",\n\"rely\",\n\"relwidth\",\n\"relheight\"):\n if entry in linfo: layoutlist.append((entry,linfo.pop(entry)))\n for layoutname,entry in linfo.items():layoutlist.append((layoutname,entry))\n # now delete all widgets in frame LayoutOptions and set selection to this frame\n deleteAllWidgets(thisframe) # Frame\n setWidgetSelection(thisframe,thisframe)\n\n entry_row = 0\n for entry in layoutlist:\n # for each option, we make a frame an in this frame a label with the option name and an entry\n # for showing and changing the value\n Frame('Frame')\n goIn()\n Label(text=entry[0],width=maxlen,anchor=E).rcgrid(0,0)\n if entry[0] in (\n\"y\", # Place Layout\n\"x\", # Place Layout\n\"row\", # Grid Layout\n\"column\", # Grid Layout\n\"columnspan\", # Grid Layout (Integer default 1)\n\"rowspan\", \n\"width\", # Place Layout (Default leer \"\" oder Integer)\n\"height\", # Place Layout (Default leer \"\" oder Integer)\n\"expand\", # Pack Layout (Integer default 0)\n\"padx\", # Pack Layout und Grid Layout (Integer default 0)\n\"pady\", # Pack Layout und Grid Layout (Integer default 0)\n\"ipadx\", # Pack Layout und Grid Layout (Integer default 0)\n\"ipady\"): # Pack Layout und Grid Layout (Integer default 0)\n Spinbox(\"Entry\",from_=0,to=3000,increment=1,width=entry_width)\n do_command(e_event,wishWidget=True) # via return key the option value can be changed\n elif entry[0] in (\n\"relx\", # Place Layout (Integer default 0)\n\"rely\", # Place Layout (Integer default 0)\n\"relwidth\", # Place Layout (Default leer \"\" oder Integer)\n\"relheight\"): # Place Layout (Default leer \"\" oder Integer): \n Spinbox(\"Entry\",from_=0,to=1,increment=0.01,width=entry_width)\n do_command(e_event,wishWidget=True) # via return key the option value can be changed\n else: Entry(\"Entry\",width=entry_width)\n do_action('color',lambda me = this(): me.config(bg='white'))\n \n var = StringVar()\n var.set(get_entry_as_string(entry[1]))\n this().mydata=[entry[0],var] # mydata shall also contain the option name\n this()['textvariable'] = var\n RefDict[entry[0]] = this()\n rcgrid(0,1,sticky=E+W)\n\n do_event(\"\",e_event,wishWidget=True) # via return key the option value can be changed\n\n # reference update for a value refresh without new show layout option creation\n if entry[0] == \"x\": thisframe.mydata[0]=this()\n elif entry[0] == \"y\": thisframe.mydata[1]=this()\n elif entry[0] == \"row\": thisframe.mydata[2]=this()\n elif entry[0] == \"column\": thisframe.mydata[3]=this()\n elif entry[0] == \"side\": thisframe.mydata[4]=this()\n elif entry[0] == \"index\": thisframe.mydata[5]=this()\n\n # listboxes and readonly state for some options\n if entry[0] ==\"side\":\n Listbox(width=7,height=4).fillList((\"top\",\"bottom\",\"left\",\"right\"))\n lbox_select()\n\n elif entry[0] ==\"anchor\":\n Listbox(width=7,height=9).fillList((\"nw\",\"n\",\"ne\",\"e\",\"se\",\"s\",\"sw\",\"w\",\"center\"))\n lbox_select()\n\n elif entry[0] ==\"in\": this().config(state=\"readonly\")\n elif entry[0] ==\"fill\":\n Listbox(width=4,height=4).fillList((\"none\",\"x\",\"y\",\"both\"))\n lbox_select()\n\n elif entry[0] ==\"bordermode\":\n Listbox(width=7,height=3).fillList((\"inside\",\"outside\",\"ignore\"))\n lbox_select()\n\n # help info message box for sticky option\n elif entry[0] ==\"sticky\":\n Button(text=\"?\").rcgrid(0,2)\n do_command(lambda par = this(): messagebox.showinfo(\"Layout option 'sticky'\",\"The 'sticky' option may be empty or may contain one or more of these characters: 'n' 'e' 'w' 's'\",parent=par))\n\n goOut() # leaving the frame for the option entry and pack it\n rcgrid(entry_row,0,sticky='nw')\n entry_row += 1\n\n setSelection(current_selection)\n if thisframe['width'] < thisframe.winfo_reqwidth():\n thisframe['width'] = thisframe.winfo_reqwidth()\n cont.grid_columnconfigure(0,minsize = thisframe.winfo_reqwidth())\n\n else: # if the widget doesn't have a layout, then disable value refresh and hide the layout options\n cont.unlayout()\n thisframe.mydata=[None,None,None,None,None,None]\n\n\ndo_receive('SHOW_LAYOUT',show_layout,wishMessage = True)\n\n### ========================================================\n","sub_path":"GuiDesigner/guidesigner/DetailedLayout.py","file_name":"DetailedLayout.py","file_ext":"py","file_size_in_byte":10778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"168617790","text":"import sys\n\n\nDefault = open(sys.argv[1]).read().split(\"\\n\")\nAnnot = open(sys.argv[2]).read().split(\"\\n\")\n\nannotation_terms = []\nfor i in Annot:\n if \"NEIS\" in i:\n I = i.split(\"\\t\")\n start = i.index(\"fasta:\")\n end = i.index(\";locus\")\n NEIS = (i[start + len(\"fasta:\"):end])\n annotation_terms.append([I[3],I[4],NEIS])\n\n\ndata_frame = []\nfor i in Default:\n if \"product\" in i:\n site = i.index(\";product\")\n I = i.split(\"\\t\")\n coord1 = I[3]\n coord2 = I[4]\n l = 0\n for A in annotation_terms:\n if coord1 == A[0] and coord2 == A[1]:\n new_line = i[:site] + \";note=Equivalent NEIS number is = \" + A[2] + i[site:]\n data_frame.append(new_line)\n l = l + 1\n if l == 0:\n data_frame.append(i)\n else:\n data_frame.append(i)\n\n\n\nnew_file = open(sys.argv[3],\"w\")\nfor i in data_frame:\n new_file.write(i + \"\\n\")\nnew_file.close()\n\n\n\n\n\n","sub_path":"Custom_Annotation.py","file_name":"Custom_Annotation.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"214767358","text":"#!/usr/bin/python3\n\n\"\"\"\nCreate a new view for the link between Place objects and\nAmenity objects that handles\nall default RestFul API actions.\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request\nfrom models import storage, storage_t\nfrom models.place import Place\nfrom models.amenity import Amenity\n\n\n@app_views.route('/places//amenities', methods=['GET'],\n strict_slashes=False)\ndef get_amen(place_id):\n \"\"\" Return all amenities \"\"\"\n place = storage.get(Place, place_id)\n if not place:\n abort(404, \"Not found\")\n if storage_t == \"db\":\n return jsonify([e.to_dict() for e in place.amenities])\n else:\n l = []\n for e in place.amenity_ids:\n l.append(storage.get(Amenity, e).to_dict())\n return jsonify(l)\n\n\n@app_views.route('/places//amenities/',\n methods=['DELETE'],\n strict_slashes=False)\ndef delete_amen(place_id, amenity_id):\n \"\"\" Delete an amenity \"\"\"\n pl = storage.get(Place, place_id)\n amn = storage.get(Amenity, amenity_id)\n if not pl or not amn:\n abort(404)\n if storage_t == \"db\":\n if amn not in pl.amenities:\n abort(404)\n pl.amenities.remove(amn)\n pl.save()\n else:\n if amn.id not in pl.amenity_ids:\n abort(404)\n pl.amenity_ids.remove(amenity_id)\n pl.save()\n return jsonify({}), 200\n\n\n@app_views.route('/places//amenities/', methods=['POST'],\n strict_slashes=False)\ndef link_amen(place_id, amenity_id):\n \"\"\" Link an amenity to a place \"\"\"\n place = storage.get(Place, place_id)\n amn = storage.get(Amenity, amenity_id)\n if not place or not amn:\n abort(404, \"Not found\")\n\n if storage_t == \"db\":\n if amn in place.amenities:\n return jsonify(amn.to_dict()), 200\n place.amenities.append(amn)\n place.save()\n else:\n if amn in place.amenity_ids:\n return jsonify(amn.to_dict()), 200\n place.amenity_ids.append(amenity_id)\n place.save()\n return jsonify(amn.to_dict()), 201\n","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"233382541","text":"from dymos.transcriptions.common.timeseries_output_comp import TimeseriesOutputCompBase\n\n\nclass SolveIVPTimeseriesOutputComp(TimeseriesOutputCompBase):\n \"\"\"\n Class definition for SolveIVPTimeseriesOutputComp.\n\n Parameters\n ----------\n **kwargs : dict\n Dictionary of optional arguments.\n \"\"\"\n def initialize(self):\n \"\"\"\n Declare component options.\n \"\"\"\n super(SolveIVPTimeseriesOutputComp, self).initialize()\n\n self.options.declare('output_nodes_per_seg', default=None, types=(int,), allow_none=True,\n desc='If None, results are provided at the all nodes within each'\n 'segment. If an int (n) then results are provided at n '\n 'equally distributed points in time within each segment.')\n\n def setup(self):\n \"\"\"\n Define the independent variables as output variables.\n \"\"\"\n grid_data = self.options['input_grid_data']\n if self.options['output_nodes_per_seg'] is None:\n self.num_nodes = grid_data.num_nodes\n else:\n self.num_nodes = grid_data.num_segments * self.options['output_nodes_per_seg']\n\n for (name, kwargs) in self._timeseries_outputs:\n units = kwargs['units']\n desc = kwargs['units']\n shape = kwargs['shape']\n self._add_output_configure(name, units, shape, desc)\n\n def _add_output_configure(self, name, units, shape, desc):\n \"\"\"\n Add a single timeseries output.\n\n Can be called by parent groups in configure.\n\n Parameters\n ----------\n name : str\n name of the variable in this component's namespace.\n shape : int or tuple or list or None\n Shape of this variable, only required if val is not an array.\n Default is None.\n units : str or None\n Units in which the output variables will be provided to the component during execution.\n Default is None, which means it has no units.\n desc : str\n description of the timeseries output variable.\n \"\"\"\n num_nodes = self.num_nodes\n\n input_name = f'all_values:{name}'\n self.add_input(input_name,\n shape=(num_nodes,) + shape,\n units=units, desc=desc)\n\n output_name = name\n self.add_output(output_name,\n shape=(num_nodes,) + shape,\n units=units, desc=desc)\n\n self._vars[name] = (input_name, output_name, shape)\n\n def compute(self, inputs, outputs):\n \"\"\"\n Compute component outputs.\n\n Parameters\n ----------\n inputs : `Vector`\n `Vector` containing inputs.\n outputs : `Vector`\n `Vector` containing outputs.\n \"\"\"\n for (input_name, output_name, _) in self._vars.values():\n outputs[output_name] = inputs[input_name]\n","sub_path":"dymos/transcriptions/solve_ivp/components/solve_ivp_timeseries_comp.py","file_name":"solve_ivp_timeseries_comp.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"272059754","text":"# coding: utf-8\n__author__ = 'Stiller'\n\nimport datetime\nfrom collections import OrderedDict\nfrom mis.mis import config\n\n\ndef set_teacher_event(cdt=None):\n \"\"\"设置老师的日历\"\"\"\n rs = []\n now = datetime.datetime.now()\n for c in cdt:\n e = OrderedDict()\n e['id'] = c.id\n e['title'] = \"%s > %s %s > %s > %s\" % (c.student.school.number,\n c.student.number or '',\n c.student.nickname,\n c.get_attendance_display(),\n c.course.en_name)\n e['start'] = c.start_time.strftime('%Y-%m-%d %H:%M:%S')\n end = c.end_time\n # if end.minute % 30 == 0:\n # delta = 0\n # else:\n # delta = (30 - end.minute >= 0) and 30 - end.minute or 60 - end.minute\n # e['end'] = (end + datetime.timedelta(minutes=delta)).strftime('%Y-%m-%d %H:%M:%S')\n e['end'] = end.strftime('%Y-%m-%d %H:%M:%S')\n e['className'] = config.Attendance.CLASSNAME.get(c.attendance)\n e['allDay'] = False\n e['durationEditable'] = False\n e['borderColor'] = 'white'\n e['extra'] = c.end_time > now and 'class' or ''\n rs.append(e)\n return rs\n\n\ndef set_student_event(cdt=None):\n \"\"\"设置老师的日历\"\"\"\n rs = []\n for c in cdt:\n e = OrderedDict()\n e['id'] = c.id\n e['title'] = c.get_attendance_display()\n e['start'] = c.start_time.strftime('%Y-%m-%d %H:%M:%S')\n end = c.end_time\n if end.minute % 30 == 0:\n delta = 0\n else:\n delta = (30 - end.minute >= 0) and 30 - end.minute or 60 - end.minute\n e['end'] = (end + datetime.timedelta(minutes=delta)).strftime('%Y-%m-%d %H:%M:%S')\n e['className'] = config.Attendance.CLASSNAME.get(c.attendance)\n e['borderColor'] = 'white'\n e['allDay'] = False\n e['durationEditable'] = False\n rs.append(e)\n return rs\n","sub_path":"mis/calendars/writer/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"201494503","text":"import os\nfrom IPython.core.pylabtools import figsize\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nimport math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\nfrom setuptools.dist import Feature\nfrom tensorflow.python.ops.metrics_impl import root_mean_squared_error\nfrom idlelib.pyparse import trans\n\ndef preprocess_features(california_housing_dataframe):\n \n selected_features = california_housing_dataframe[\n ['latitude', \n 'longitude', \n 'housing_median_age', \n 'total_rooms',\n 'total_bedrooms',\n 'population',\n 'households',\n 'median_income' ]]\n processed_features = selected_features.copy()\n processed_features['total_rooms'] = clip(processed_features['total_rooms'],0, 9000)\n processed_features['total_bedrooms'] = clip(processed_features['total_bedrooms'],0, 2000)\n processed_features['rooms_per_person'] = processed_features['total_rooms'] / processed_features['population']\n processed_features['rooms_per_person'] = clip(processed_features['rooms_per_person'],0, 6)\n return processed_features\n\ndef normalize2(examples, z_score_params):\n normalized_features = pd.DataFrame()\n \n normalized_features['latitude'] = z_score_normalize(examples['latitude'], 'latitude', z_score_params)\n normalized_features['longitude'] = z_score_normalize(examples['longitude'], 'longitude', z_score_params)\n normalized_features['housing_median_age'] = z_score_normalize(examples['housing_median_age'], 'housing_median_age', z_score_params)\n normalized_features['total_rooms'] = z_score_normalize(examples['total_rooms'], 'total_rooms', z_score_params)\n normalized_features['total_bedrooms'] = z_score_normalize(examples['total_bedrooms'], 'total_bedrooms', z_score_params)\n normalized_features['households'] = z_score_normalize(examples['households'], 'households', z_score_params)\n normalized_features['median_income'] = z_score_normalize(examples['median_income'], 'median_income', z_score_params)\n normalized_features['population'] = log_normalize(examples['population'])\n \n return normalized_features\n\ndef preprocess_targets(california_housing_dataframe):\n output_targets = pd.DataFrame()\n output_targets['median_house_value'] = california_housing_dataframe['median_house_value'] / 1000\n return output_targets\n\ndef my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): \n features = {key: np.array(value) for key, value in dict(features).items()}\n temp = (features,targets)\n ds = Dataset.from_tensor_slices((features, targets))\n ds = ds.batch(batch_size).repeat(num_epochs)\n if shuffle:\n ds = ds.shuffle(buffer_size = 1000)\n features, labels = ds.make_one_shot_iterator().get_next()\n return features, labels\n\ndef construct_feature_columns(input_features):\n return set([tf.feature_column.numeric_column(feature) for feature in input_features])\n\ndef predictions_to_numpy_array(predictions):\n return np.array([item['predictions'][0] for item in predictions])\n\ndef calculate_root_mean_squared_error(predictions, targets):\n return math.sqrt(metrics.mean_squared_error(predictions, targets))\n \ndef linear_scale(series, feature, scale_params):\n print(scale_params)\n return series.apply(lambda x: ((x -scale_params[feature]['min_value'])/scale_params[feature]['scale']) - 1)\n\ndef log_normalize(series):\n return series.apply(lambda x: math.log(x + 1))\n\ndef clip(series, clip_to_min, clip_to_max):\n return series.apply(lambda x: max(min(x, clip_to_max), clip_to_min))\n\ndef z_score_normalize(series, feature, z_score_params):\n return series.apply(lambda x: (x - z_score_params[feature]['mean'])/z_score_params[feature]['std'])\n\ndef binary_threshold(series, threshold):\n return series.apply(lambda x: 1 if x > threshold else 0)\n\ndef get_linear_scale_params(series):\n min_value = series.min()\n max_value = series.max()\n \n return {\n 'min_value': min_value,\n 'scale' : (max_value -min_value) / 2\n }\n\ndef get_z_score_params(series):\n \n return {\n 'mean': series.mean(),\n 'std' : series.std()\n }\n\ndef get_linear_scale_params_df(dataframe):\n scale_params = {}\n for feature in dataframe:\n scale_params[feature] = get_linear_scale_params(dataframe[feature])\n \n return scale_params\n\ndef get_z_score_params_df(dataframe):\n params = {}\n for feature in dataframe:\n params[feature] = get_z_score_params(dataframe[feature])\n \n return params\n\ndef normalize(examples, normalize_fn):\n normalized_features = pd.DataFrame()\n \n for feature in examples:\n normalized_features[feature] = normalize_fn(examples[feature], feature)\n \n return normalized_features\n\ndef train_model(optimizer, steps, batch_size, hidden_units, training_examples, training_targets, validation_examples, validation_targets):\n periods = 10\n steps_per_period = steps/periods\n \n my_label = 'median_house_value'\n \n feature_columns = construct_feature_columns(training_examples)\n \n optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer, 5.0)\n \n dnn_regressor = tf.estimator.DNNRegressor(feature_columns = feature_columns, hidden_units=hidden_units, optimizer=optimizer)\n \n training_input_fn=lambda:my_input_fn(training_examples, training_targets, batch_size=batch_size)\n predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets, shuffle=False, num_epochs=1)\n predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets, shuffle=False, num_epochs=1)\n \n print ('Training model...')\n print ('RMSE (on training data):')\n training_root_mean_squared_errors = []\n validation_root_mean_squared_errors = []\n for period in range(0, periods):\n dnn_regressor.train(input_fn=training_input_fn, steps=steps_per_period)\n training_predictions = dnn_regressor.predict(input_fn=predict_training_input_fn)\n validation_predictions = dnn_regressor.predict(input_fn=predict_validation_input_fn)\n training_predictions = predictions_to_numpy_array(training_predictions)\n validation_predictions = predictions_to_numpy_array(validation_predictions)\n training_rmse = calculate_root_mean_squared_error(training_predictions, training_targets)\n validation_rmse = calculate_root_mean_squared_error(validation_predictions, validation_targets)\n training_root_mean_squared_errors.append(training_rmse)\n validation_root_mean_squared_errors.append(validation_rmse)\n print(\"Root Mean squared error for period {} training set: {:.3f} validation set: {:.3f}\".format(period, training_rmse, validation_rmse))\n \n print(\"Model training finished.\")\n \n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title('RMSE per Periods')\n plt.tight_layout()\n plt.plot(training_root_mean_squared_errors, label=\"training\")\n plt.plot(validation_root_mean_squared_errors, label=\"validation\")\n plt.show()\n return dnn_regressor\n\n \n '''calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(targets)\n display.display(calibration_data.describe())\n \n plt.subplot(1,3,3)\n plt.title('Targets vs Predictions')\n plt.xlabel('predictions')\n plt.ylabel('targets')\n plt.scatter(calibration_data['predictions'], calibration_data['targets'])\n plt.show()\n \n print('Final RMSE: {:.3f}'.format(root_mean_squared_error))'''\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\ncalifornia_housing_dataframe = pd.read_csv(\"https://storage.googleapis.com/mledu-datasets/california_housing_train.csv\", sep=\",\")\ncalifornia_housing_dataframe = california_housing_dataframe.reindex(np.random.permutation(california_housing_dataframe.index))\n\n\n\ntraining_examples = preprocess_features(california_housing_dataframe.head(12000))\ntraining_examples.hist(bins=20, figsize=(18,12), xlabelsize=10)\nplt.show()\nscale_params = get_linear_scale_params_df(training_examples)\nz_score_params = get_z_score_params_df(training_examples)\n\nlinear_scale_normalize_fn = lambda x, feature, scale_params=scale_params: linear_scale(x, feature, scale_params)\nz_score_normalize_fn = lambda x, feature, z_score_params=z_score_params: z_score_normalize(x, feature, z_score_params)\n\nnormalize_fn = z_score_normalize_fn\n\ntraining_examples = normalize(training_examples, z_score_normalize_fn)\ntraining_examples.hist(bins=20, figsize=(18,12), xlabelsize=10)\nplt.show()\n\ntraining_targets = preprocess_targets(california_housing_dataframe.head(12000))\n\nvalidation_examples = normalize(preprocess_features(california_housing_dataframe.tail(5000)), normalize_fn)\nvalidation_targets = preprocess_targets(california_housing_dataframe.tail(5000))\n\ndnn_regressor = train_model(optimizer=tf.train.AdamOptimizer(learning_rate=0.007), steps=2000, batch_size=100, hidden_units=[10,10], training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets)\n\ncalifornia_housing_test_data = pd.read_csv(\"https://storage.googleapis.com/mledu-datasets/california_housing_test.csv\", sep=\",\")\n\ntest_examples = normalize(preprocess_features(california_housing_test_data), normalize_fn)\ntest_targets = preprocess_targets(california_housing_test_data)\npredict_test_input_fn = lambda: my_input_fn(test_examples, test_targets, shuffle=False, num_epochs=1)\n\ntest_predictions = dnn_regressor.predict(input_fn=predict_test_input_fn)\ntest_predictions = predictions_to_numpy_array(test_predictions)\ntest_rmse = calculate_root_mean_squared_error(test_predictions, test_targets)\nprint(\"Root Mean squared error for the test set: {:.3f} \".format(test_rmse))\n\n\n'''plt.figure(figsize=(13, 8))\nax = plt.subplot(1,2,1)\nax.set_title('Validation Data')\nax.set_autoscaley_on(False)\nax.set_ylim([32,43])\nax.set_autoscalex_on(False)\nax.set_xlim([-126,-112])\nplt.scatter(validation_examples['longitude'], validation_examples['latitude'], cmap='coolwarm', c=validation_targets['median_house_value']/validation_targets['median_house_value'].max())\n\nax = plt.subplot(1,2,2)\nax.set_title('Training Data')\nax.set_autoscaley_on(False)\nax.set_ylim([32,43])\nax.set_autoscalex_on(False)\nax.set_xlim([-126,-112])\nplt.scatter(training_examples['longitude'], training_examples['latitude'], cmap='coolwarm', c=training_targets['median_house_value']/training_targets['median_house_value'].max())\nplt.show() '''\n","sub_path":"tensorflow-neural-network-optimized.py","file_name":"tensorflow-neural-network-optimized.py","file_ext":"py","file_size_in_byte":10820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"653252707","text":"import boto3\nimport json\nimport decimal\nimport os\nimport shutil\nimport gc\nimport predict\nimport time\nimport random\n\ndef lambda_handler(event, context):\n '''\n Handles HTML generation.\n\n Input: JSON coordinates and boxes from detect handler\n Output: S3 keys for HTML and CSS files\n {\n 'html_key': string,\n 'css_key': string\n }\n '''\n # Json to list\n coordinates = []\n for key, value in event.items():\n coordinates.append(value)\n\n # Time in epoch for ID\n epoch_time = str(time.time())\n\n # S3 access\n s3 = boto3.client('s3')\n\n # Pick a random number to get the style file for\n rand_style = str(random.randint(1,11))\n style_file = \"style\" + rand_style + \".css\"\n css_key = epoch_time+\".css\"\n s3.download_file(\"cse110.html.css\", style_file, \"/tmp/\"+style_file)\n s3.upload_file(\"/tmp/\"+style_file, \"cse110.html.css\", css_key, ExtraArgs={'ContentType': 'text/css'})\n\n css_key = \"https://s3.us-east-2.amazonaws.com/cse110.html.css/\" + epoch_time + \".css\"\n \n # Write string to html file, then upload to S3\n html_string = predict.prediction(coordinates, css_key)\n with open(\"/tmp/temp.html\", \"w\") as text_file:\n text_file.write(html_string)\n html_key = epoch_time + \".html\"\n s3.upload_file(\"/tmp/temp.html\", \"cse110.html.html\", html_key, ExtraArgs={'ContentType': 'text/html'})\n\n # Deleting files in /tmp directory\n folder = '/tmp'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n print(\"deleting file: \"+file_path)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n \n print(\"Object leak: %s\" % str(gc.garbage))\n\n return { \"html_key\": html_key, \"css_key\": css_key }\n","sub_path":"Predict/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"529775262","text":"# -*- encoding: utf-8 -*-\n'''\n@File : annotation_tools.py\n@Time : 2019/12/02 11:14:56\n@Author : wangtf\n@Desc : None\n'''\n\n# here put the import lib\nimport os\nimport glob\nimport numpy as np\nfrom ._xml_parser import XmlParser\nimport matplotlib.pyplot as plt\n\n\ndef split_list(val, split_num):\n max_val = max(val)\n min_val = min(val)\n x = list(np.arange(min_val, max_val, (max_val - min_val) / split_num)) + [max_val]\n\n normalized_val = [(n - min_val) / (max_val - min_val) for n in val]\n normalized_val = [int(n * split_num) for n in normalized_val]\n y = [normalized_val.count(n) for n in range(split_num + 1)]\n return x, y\n\n\nclass AnnotationTools():\n \"\"\" deal with annotations in PascalVOC format dataset.\n\n Attributes:\n ann_dir: str, the dir including xmls.\n name_set: str, tha id list file.\n Raises:\n AssertError: can not find path.\n \"\"\"\n def __init__(self, ann_dir, name_set=None):\n self.ann_dir = ann_dir\n self.name_set = name_set\n self.ann_list = self.get_ann_list()\n print('Find {} xml files.'.format(len(self.ann_list)))\n\n def get_ann_list(self):\n if self.name_set is None:\n ann_list = glob.glob(os.path.join(self.ann_dir, '*.xml'))\n else:\n name_set_path = os.path.join(\n self.ann_dir, '../ImageSets/Main/{}.txt'.format(self.name_set))\n assert os.path.exists(\n name_set_path), 'Can not find file: {}'.format(name_set_path)\n with open(name_set_path) as f:\n name_list = f.read().strip().split('\\n')\n ann_list = [\n os.path.join(self.ann_dir, '{}.xml'.format(name))\n for name in name_list\n ]\n return ann_list\n\n def get_class_dict(self):\n name_dict = {}\n for xml_path in self.ann_list:\n xml_data = XmlParser().load(xml_path)\n xml_name_list = [obj['name'] for obj in xml_data['object']]\n for name in xml_name_list:\n if name not in name_dict:\n name_dict[name] = {'count': 0, 'included_file': []}\n name_dict[name]['count'] += 1\n file_name = os.path.basename(xml_path)\n if file_name not in name_dict[name]['included_file']:\n name_dict[name]['included_file'].append(file_name)\n return name_dict\n\n def get_bbox_info(self):\n id_bbox_map = {}\n for xml_path in self.ann_list:\n xml_data = XmlParser().load(xml_path)\n size = [\n int(xml_data['size']['heihgt']),\n int(xml_data['size']['width'])\n ]\n if 0 in size:\n print('Warrning: {} size error: {}'.format(xml_path, size))\n continue\n objects = xml_data['object']\n id_bbox_map[os.path.basename(xml_path)] = objects\n return id_bbox_map\n\n def iou_analyse(self, save_dir='./', split_num=100):\n \"\"\" Draw iou distribution in dataset.\n\n Args:\n save_dir: str, image save dir;\n split_num: int, the number of data partitions\n \"\"\"\n class_iou_map = {}\n for xml_path in self.ann_list:\n xml_data = XmlParser().load(xml_path)\n width = int(xml_data['size']['width'])\n height = int(xml_data['size']['height'])\n image_area = width * height\n\n for obj in xml_data['object']:\n if obj['name'] not in class_iou_map:\n class_iou_map[obj['name']] = []\n\n xmin = int(obj['bndbox']['xmin'])\n ymin = int(obj['bndbox']['ymin'])\n xmax = int(obj['bndbox']['xmax'])\n ymax = int(obj['bndbox']['ymax'])\n roi_area = (xmax - xmin) * (ymax - ymin)\n class_iou_map[obj['name']].append(roi_area / image_area)\n\n for key, val in class_iou_map.items():\n x, y = split_list(val, split_num)\n\n plt.figure(figsize=(8, 4))\n plt.plot(x, y, \"bD-\", linewidth=1)\n plt.xlabel(\"IOU(object area/image area)\")\n plt.ylabel(\"Times\")\n plt.title(\"The distribution of Objects' IOU in the dataset\")\n plt.savefig(\n os.path.join(save_dir, \"IOU_distribution-{}.jpg\".format(key)))\n return class_iou_map\n\n def height_analyse(self, save_dir='./', split_num=100):\n \"\"\" Draw height ratio distribution in dataset.\n\n Args:\n save_dir: str, image save dir;\n split_num: int, the number of data partitions\n \"\"\"\n class_iou_map = {}\n for xml_path in self.ann_list:\n xml_data = XmlParser().load(xml_path)\n height = int(xml_data['size']['height'])\n\n for obj in xml_data['object']:\n if obj['name'] not in class_iou_map:\n class_iou_map[obj['name']] = []\n\n ymin = int(obj['bndbox']['ymin'])\n ymax = int(obj['bndbox']['ymax'])\n class_iou_map[obj['name']].append((ymax - ymin) / height)\n\n for key, val in class_iou_map.items():\n x, y = split_list(val, split_num)\n\n plt.figure(figsize=(8, 4))\n plt.plot(x, y, \"bD-\", linewidth=1)\n plt.xlabel(\"Height ratio(object/image) percent/%\")\n plt.ylabel(\"Times\")\n plt.title(\"The distribution of Objects' height in the dataset\")\n plt.savefig(\n os.path.join(save_dir,\n \"Height_distribution-{}.jpg\".format(key)))\n return class_iou_map\n\n def width_analyse(self, save_dir='./', split_num=100):\n \"\"\" Draw width ratio distribution in dataset.\n\n Args:\n save_dir: str, image save dir;\n split_num: int, the number of data partitions\n \"\"\"\n class_iou_map = {}\n for xml_path in self.ann_list:\n xml_data = XmlParser().load(xml_path)\n width = int(xml_data['size']['width'])\n\n for obj in xml_data['object']:\n if obj['name'] not in class_iou_map:\n class_iou_map[obj['name']] = []\n\n xmin = int(obj['bndbox']['xmin'])\n xmax = int(obj['bndbox']['xmax'])\n width_ratio = (xmax - xmin) / width\n\n class_iou_map[obj['name']].append(width_ratio)\n\n for key, val in class_iou_map.items():\n x, y = split_list(val, split_num)\n\n plt.figure(figsize=(8, 4))\n plt.plot(x, y, \"bD-\", linewidth=1)\n plt.xlabel(\"Width ratio(object/image) percent/%\")\n plt.ylabel(\"Times\")\n plt.title(\"The distribution of Objects' width in the dataset\")\n plt.savefig(\n os.path.join(save_dir,\n \"Width_distribution-{}.jpg\".format(key)))\n return class_iou_map","sub_path":"pascal_voc_tools/annotation_tools.py","file_name":"annotation_tools.py","file_ext":"py","file_size_in_byte":6913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"612341144","text":"import json\r\nimport os\r\nimport re\r\nfrom pdb import set_trace as stop\r\n\r\nTEMPLATE_JSON_LOCATION = os.path.join(os.getcwd(), \"nsxjson.schemas\")\r\nTEMPLATE_STRUCT_LOCATION = os.path.join(os.getcwd(), \"nsxstruct.template\")\r\nFILE_GENERATED = os.path.join(os.getcwd(), \"inc\", \"nsxjson_gen.hpp\")\r\n\r\ndef generate_getter_string(item):\r\n ostr = \"\"\r\n if item[\"type\"] == \"string\":\r\n ostr += '\\tstring get_%s(const NSxTransac & ob) ' %item[\"name\"]\r\n ostr += 'const { return \"\\\\\"\" + ob.%s + \"\\\\\"\"; }\\n' %(item[\"name\"])\r\n else:\r\n ostr += \"\\tstring get_%s(const NSxTransac & ob) \" %item[\"name\"]\r\n ostr += \"const { return to_string(ob.%s); }\\n\" %(item[\"name\"])\r\n return ostr\r\n\r\ndef generate_getters(json_content):\r\n ostr = \"\"\r\n generated_getters = [] # not to generate the same getter twice\r\n for field, val in json_content[\"properties\"].items():\r\n if val[\"type\"] == \"array\":\r\n if isinstance(val[\"items\"], dict):\r\n if val[\"items\"][\"type\"] == \"array\":\r\n for item in val[\"items\"][\"items\"]:\r\n if item not in generated_getters:\r\n ostr += generate_getter_string(item)\r\n generated_getters.append(item)\r\n elif isinstance(val[\"items\"], list):\r\n for item in val[\"items\"]:\r\n if item not in generated_getters:\r\n ostr += generate_getter_string(item)\r\n generated_getters.append(item)\r\n # not an array but simply one element\r\n else:\r\n if val[\"items\"] not in generated_getters:\r\n ostr += generate_getter_string(val[\"items\"])\r\n generated_getters.append(val[\"items\"])\r\n return ostr\r\n\r\ndef get_struct_name(json_content):\r\n return json_content[\"name\"]\r\n\r\ndef fill_arr(arr, field, json_content):\r\n if isinstance(json_content[\"properties\"][field][\"items\"], dict):\r\n res = []\r\n if json_content[\"properties\"][field][\"items\"][\"type\"] == \"array\":\r\n for item in json_content[\"properties\"][field][\"items\"][\"items\"]:\r\n res.append('get_%s(item)' %item[\"name\"])\r\n arr.append(res)\r\n else:\r\n for item in json_content[\"properties\"][field][\"items\"]:\r\n arr.append('get_%s(item)' %item[\"name\"])\r\n # stop()\r\n\r\ndef is_string(item):\r\n if isinstance(item, str) or isinstance(item, unicode):\r\n return True\r\n return False\r\n\r\ndef generate_to_json(json_content):\r\n res = {}\r\n out = 'string out = \"{\";\\n'\r\n fields = json_content[\"required\"]\r\n for field in fields:\r\n if json_content[\"properties\"][field][\"type\"] == \"array\":\r\n res[field] = []\r\n fill_arr(res[field], field, json_content)\r\n else:\r\n res[field] = 'get_%s(item)' %json_content[\"properties\"][field][\"items\"][\"name\"]\r\n \r\n for key, val in res.items():\r\n if isinstance(val, list):\r\n out += '\\tstring %s = \"\\\\\"%s\\\\\": [\";' %(key, key)\r\n out += \"\\n\"\r\n elif is_string(val):\r\n out += '\\tstring %s;' %key\r\n out += \"\\n\"\r\n out += \"\\n\"\r\n\r\n out += \"\\tfor (auto const & item : m_series) {\\n\"\r\n for key, val in res.items():\r\n if isinstance(val[0], list):\r\n # out += \"\\t\\tvector %s_tmp;\\n\" %key\r\n for idx, item in enumerate(val[0]):\r\n if idx > 0:\r\n out += '%s + \", \" + ' %(item)\r\n else:\r\n out += '\\t\\t%s += \"[\" + %s + \", \" + ' %(key, item)\r\n out = out[:-7] + '\"], \"; '\r\n elif isinstance(val, list):\r\n out += '\\t\\t%s += %s + \", \"; \\n' %(key, val[0])\r\n else:\r\n out += '\\t\\t%s = \"\\\\\"%s\\\\\": \" + %s; \\n' %(key, key, val)\r\n\r\n out += \"\\n\"\r\n\r\n out += \"\\t}\\n\"\r\n\r\n for key, val in res.items():\r\n if isinstance(val, list):\r\n out += '\\n\\t%s.replace(%s.end()-2, %s.end(), \"]\");\\n' %(key, key, key)\r\n\r\n for key, val in res.items():\r\n out += '\\tout += %s + \", \";\\n' %(key)\r\n\r\n out += '\\n\\tout.replace(out.end()-2, out.end(), \"\");\\n'\r\n\r\n out += '\\tout += \"}\";\\n\\treturn out;';\r\n return out\r\n\r\ndef generate_headers():\r\n headers = \"#ifndef NS_X_STRUCT_TEMPLATE\\n\"\r\n headers += \"#define NS_X_STRUCT_TEMPLATE\\n\\n\"\r\n headers += \"#include \\n\"\r\n headers += '#include \"nsxtransac.hpp\"\\n\\n'\r\n headers += \"using namespace std;\"\r\n return headers\r\n\r\ndef main():\r\n with open(TEMPLATE_JSON_LOCATION, \"r\") as ifile:\r\n json_content = json.loads(ifile.read())\r\n\r\n with open(TEMPLATE_STRUCT_LOCATION, \"r\") as ifile:\r\n res = ifile.read()\r\n\r\n with open(FILE_GENERATED, \"w\") as ofile:\r\n ofile.write(\"\")\r\n\r\n headers = generate_headers()\r\n out = re.sub(\"{% HEADERS %}\", headers, res)\r\n\r\n for idx, schema in enumerate(json_content):\r\n getters = generate_getters(schema)\r\n name = get_struct_name(schema)\r\n tojson = generate_to_json(schema)\r\n\r\n if idx > 0:\r\n # generate include headers only once\r\n out = re.sub(\"{% HEADERS %}\", \"\", res)\r\n out = re.sub(\"{% NAME %}\", name, out)\r\n out = re.sub(\"{% GETTERS %}\", getters, out)\r\n out = re.sub(\"{% TO_JSON %}\", tojson, out)\r\n\r\n if idx == len(json_content) - 1:\r\n out += \"\\n#endif\\n\"\r\n\r\n with open(FILE_GENERATED, \"a+\") as ofile:\r\n ofile.write(out)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"backend/nsxjson.py","file_name":"nsxjson.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"437768372","text":"import os\nimport cv2\nimport numpy as np\n\nBASE_DIR = os.path.dirname(__file__)\nSYMBOL_DIR = os.path.join(BASE_DIR, \"Symbols3\")\nSYMBOL_DIR_EXTRA = os.path.join(BASE_DIR, \"SymbolExtra\")\nCHALLENGE_DIR = os.path.join(BASE_DIR, \"Challenge\")\nSYMBOL_PREFX = \"Slide\"\n\nnoise_max = 1000\ntitle_window = \"noise\"\n\ndef on_trackbar(val):\n val = val/100\n gauss = np.random.normal(0, val, img.size)\n gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype('uint8')\n # Add the Gaussian noise to the image\n img_gauss = cv2.add(img, gauss)\n img_speck = img + img * gauss\n cv2.putText(img_gauss, str(val), (0,img_gauss.shape[0]), cv2.FONT_HERSHEY_DUPLEX, 2, (0,0,255),1)\n cv2.putText(img_speck, str(val), (0, img_gauss.shape[0]), cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 1)\n cv2.imshow(title_window, img_gauss)\n cv2.imshow(\"speck\", img_speck)\n\ntrackbar_name = 'sd x %d' % (noise_max/100)\n\nfor root, dirs, files in os.walk(CHALLENGE_DIR):\n for file in files:\n if file.endswith(\"png\") or file.endswith(\"jpg\") or file.endswith(\"jpeg\"):\n img = cv2.imread(os.path.join(root, file))\n cv2.imshow(\"ori\", img)\n\n #Generatr noise mat\n # Generate Gaussian noise\n gauss = np.random.normal(0, 1, img.size)\n gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype('uint8')\n # Add the Gaussian noise to the image\n img_speck = img + img * gauss\n img_gauss = cv2.add(img, gauss)\n # Display the image\n cv2.imshow(\"noise\", img_gauss)\n cv2.imshow(\"speck\", img_speck)\n\non_trackbar(100)\ncv2.createTrackbar(trackbar_name, title_window , 0, noise_max, on_trackbar)\n\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"Eval/noiseGenerator.py","file_name":"noiseGenerator.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"237829809","text":"from typing import List\nimport numpy as np\nfrom mapadroid.db.DbWrapper import DbWrapper\nfrom mapadroid.route.RouteManagerBase import RoutePoolEntry\nfrom mapadroid.route.RouteManagerQuests import RouteManagerQuests\nfrom mapadroid.utils.collections import Location\nfrom mapadroid.utils.logging import get_logger, LoggerEnums\n\n\nlogger = get_logger(LoggerEnums.routemanager)\n\n\nclass RouteManagerLevelingRoutefree(RouteManagerQuests):\n def __init__(self, db_wrapper: DbWrapper, dbm, area_id, coords: List[Location], max_radius: float,\n max_coords_within_radius: int, path_to_include_geofence: str, path_to_exclude_geofence: str,\n routefile: str, mode=None, init: bool = False, name: str = \"unknown\", settings: dict = None,\n level: bool = False, calctype: str = \"route\", joinqueue=None):\n RouteManagerQuests.__init__(self, db_wrapper=db_wrapper, dbm=dbm, area_id=area_id, coords=coords,\n max_radius=max_radius, max_coords_within_radius=max_coords_within_radius,\n path_to_include_geofence=path_to_include_geofence,\n path_to_exclude_geofence=path_to_exclude_geofence,\n routefile=routefile, init=init,\n name=name, settings=settings, mode=mode, level=level, calctype=calctype,\n joinqueue=joinqueue\n )\n\n def _worker_changed_update_routepools(self):\n with self._manager_mutex and self._workers_registered_mutex:\n self.logger.info(\"Updating all routepools in level mode for {} origins\", len(self._routepool))\n if len(self._workers_registered) == 0:\n self.logger.info(\"No registered workers, aborting __worker_changed_update_routepools...\")\n return False\n\n any_at_all = False\n for origin in self._routepool:\n origin_local_list = []\n entry: RoutePoolEntry = self._routepool[origin]\n\n if len(entry.queue) > 0:\n self.logger.debug(\"origin {} already has a queue, do not touch...\", origin)\n continue\n current_worker_pos = entry.current_pos\n unvisited_stops = self.db_wrapper.get_nearest_stops_from_position(geofence_helper=self.geofence_helper,\n origin=origin,\n lat=current_worker_pos.lat,\n lon=current_worker_pos.lng,\n limit=30,\n ignore_spinned=self.settings.get(\n \"ignore_spinned_stops\", True),\n maxdistance=5)\n if len(unvisited_stops) == 0:\n self.logger.info(\"There are no unvisited stops left in DB for {} - nothing more to do!\", origin)\n continue\n\n for coord in unvisited_stops:\n coord_location = Location(coord.lat, coord.lng)\n if coord_location in self._coords_to_be_ignored:\n self.logger.info('Already tried this Stop but it failed spinnable test, skip it')\n continue\n origin_local_list.append(coord_location)\n\n if len(unvisited_stops) > 0:\n self.logger.info(\"Recalc a route\")\n new_route = self._local_recalc_subroute(unvisited_stops)\n origin_local_list.clear()\n for coord in new_route:\n origin_local_list.append(Location(coord[\"lat\"], coord[\"lng\"]))\n\n # subroute is all stops unvisited\n self.logger.info(\"Origin {} has {} unvisited stops for this route\", origin, len(origin_local_list))\n entry.subroute = origin_local_list\n # let's clean the queue just to make sure\n entry.queue.clear()\n [entry.queue.append(i) for i in origin_local_list]\n any_at_all = len(origin_local_list) > 0 or any_at_all\n # saving new startposition of walker in db\n newstartposition: Location = entry.queue[0]\n self.db_wrapper.save_last_walker_position(origin=origin,\n lat=newstartposition.lat,\n lng=newstartposition.lng)\n return True\n\n def _local_recalc_subroute(self, unvisited_stops):\n to_be_route = np.zeros(shape=(len(unvisited_stops), 2))\n for i in range(len(unvisited_stops)):\n to_be_route[i][0] = float(unvisited_stops[i].lat)\n to_be_route[i][1] = float(unvisited_stops[i].lng)\n new_route = self.calculate_new_route(to_be_route, self._max_radius, self._max_coords_within_radius,\n False, 1,\n True)\n\n return new_route\n\n def _retrieve_latest_priority_queue(self):\n return None\n\n def _get_coords_post_init(self):\n return self.db_wrapper.stops_from_db(self.geofence_helper)\n\n def _cluster_priority_queue_criteria(self):\n pass\n\n def _priority_queue_update_interval(self):\n return 0\n\n def _recalc_route_workertype(self):\n self.recalc_route(self._max_radius, self._max_coords_within_radius, 1, delete_old_route=False,\n in_memory=True)\n self._init_route_queue()\n\n def _get_coords_after_finish_route(self) -> bool:\n self._manager_mutex.acquire()\n try:\n\n if self._shutdown_route:\n self.logger.info('Other worker shutdown route - leaving it')\n return False\n\n self._worker_changed_update_routepools()\n self._start_calc = False\n return True\n finally:\n self._manager_mutex.release()\n\n def _check_unprocessed_stops(self):\n self._manager_mutex.acquire()\n\n try:\n # We finish routes on a per walker/origin level, so the route itself is always the same as long as at\n # least one origin is connected to it.\n return self._stoplist\n finally:\n self._manager_mutex.release()\n\n def _start_routemanager(self):\n self._manager_mutex.acquire()\n try:\n if not self._is_started:\n self._is_started = True\n self.logger.info(\"Starting routemanager\")\n\n if self._shutdown_route:\n self.logger.info('Other worker shutdown route - leaving it')\n return False\n\n self._prio_queue = None\n self.delay_after_timestamp_prio = None\n self.starve_route = False\n self._start_check_routepools()\n\n return True\n\n finally:\n self._manager_mutex.release()\n\n return True\n\n def _recalc_stop_route(self, stops):\n self._clear_coords()\n self.add_coords_list(stops)\n self._overwrite_calculation = True\n self._recalc_route_workertype()\n self._init_route_queue()\n\n def _delete_coord_after_fetch(self) -> bool:\n return False\n\n def _quit_route(self):\n self.logger.info('Shutdown Route')\n if self._is_started:\n self._is_started = False\n self._round_started_time = None\n if self.init:\n self._first_started = False\n self._shutdown_route = False\n\n # clear not processed stops\n self._stops_not_processed.clear()\n self._coords_to_be_ignored.clear()\n self._stoplist.clear()\n\n def _check_coords_before_returning(self, lat, lng, origin):\n if self.init:\n self.logger.debug('Init Mode - coord is valid')\n return True\n stop = Location(lat, lng)\n self.logger.info('Checking Stop with ID {}', stop)\n if stop in self._coords_to_be_ignored:\n self.logger.info('Already tried this Stop and failed it')\n return False\n self.logger.info('DB knows nothing of this stop for {} lets try and go there', origin)\n return True\n","sub_path":"mapadroid/route/RouteManagerLevelingRoutefree.py","file_name":"RouteManagerLevelingRoutefree.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"626831670","text":"from appium import webdriver\n\ncaps = {\n \"platformName\": \"Android\",\n \"platformVersion\": \"5.1.1\",\n \"deviceName\": \"127.0.0.1:62001\",\n \"browserName\":\"Chrome\"\n}\ndriver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\n\ndriver.get(\"http://baidu.com\")\ndriver.find_element_by_id(\"\")","sub_path":"practice/myLove/appium/Day03/App_browser.py","file_name":"App_browser.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"566117894","text":"\nfrom flask_restful import Resource\nfrom flask_jsonpify import jsonify\nimport logging\nfrom modules import DBConn\n\n\nclass MessageNumber(Resource):\n def get(self, user_id):\n try:\n conn = DBConn.conn().connect() # connect to database\n sql_request = \"\"\"select count(*) as mes_num from \npublic.message s\nwhere to_id = {} and not is_read;\"\"\".format(user_id)\n logging.info('Request to DB: {}'.format(sql_request))\n query = conn.execute(sql_request) # This line performs query and returns json result\n result = dict(zip(tuple(query.keys()), query.fetchone()))\n return jsonify(result)\n except Exception as e:\n logging.info('Exception: {}'.format(e))\n return 'Exception: {}'.format(e), 500","sub_path":"modules/MessageNumber.py","file_name":"MessageNumber.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"470903825","text":"from module.windowControl import WindowController\nfrom module import handTrackingModule as htm\nfrom module import gestureModelModule as gmm\nfrom module import draw\nfrom tensorflow import keras\nimport cv2\n\nclass Gesture():\n def __init__(self):\n # 웹캡 사이즈 설정 변수\n self.hCam, self.wCam = 640, 640 \n # 필요한 변수\n self.draw_arr = []\n self.in_check = 0\n self.out_check = 0\n self.pyauto = WindowController()\n # 모델 관련 변수\n self.model_selection = 'CNN'\n self.conf_limit = 0.75\n self.detector = htm.HandDetector(maxHands=1, detectionCon=0.75)\n # self.gesture_model = keras.models.load_model('model/vgg16_model_8cls_2dropnorm_randomsd.h5')\n self.gesture_model = keras.models.load_model('model/VGGColab-2021-07-06_08-26-05.h5')\n\n def predict(self, img, modeChange):\n # 손 인식시\n img = self.detector.findHands(img)\n self.landmark_list, _ = self.detector.findPosition(img, draw=False)\n action = ''\n imgCanvas = None\n control_mode = False\n if self.landmark_list:\n self.out_check = 0\n self.fingers = self.detector.fingersUp()\n # 검지가 펴졌을때만 control mode\n control_mode = (self.fingers[1]==1) and (1 not in self.fingers[2:])\n if control_mode:\n self.draw_arr.append(self.landmark_list[8][1:])\n cv2.circle(img, tuple(self.landmark_list[8][1:]), 7, (255,0,0), cv2.FILLED)\n else:\n self.out_check += 1\n if self.out_check == 10:\n if 30 < len(self.draw_arr) <= 100:\n # 저장한 좌표로 input 데이터 생성\n # 모델 추론\n self.draw_arr = self.draw_arr[10:-7] # 앞, 뒤 10 frame 씩 제외 ## -5\n input_data, imgCanvas = gmm.trans_input(self.draw_arr, self.wCam, self.hCam, self.model_selection)\n pred, confidence = gmm.predict(self.gesture_model, input_data)\n \n if confidence > self.conf_limit:\n modes = [self.pyauto.youtube, self.pyauto.webMode, self.pyauto.presentMode]\n action = modes[modeChange](pred)\n draw.save_file(imgCanvas, self.draw_arr, pred)\n self.draw_arr.clear()\n\n return control_mode, action, imgCanvas","sub_path":"제출용/getGesture.py","file_name":"getGesture.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"355443093","text":"import os\nimport sys\nimport glob\n\n\ndef get_all_protos(proto_dir):\n '''Get all proto files in the parse_dir\n\n :return: the list of relative path of all proto files\n '''\n # get proto dirs\n parse_paths = glob.glob('{dir}/**'.format(dir=proto_dir), recursive=False)\n proto_paths = [path for path in parse_paths if path.endswith('.proto')]\n return proto_paths\n\n\ndef get_all_subdirs(parse_dir):\n parse_paths = glob.glob('{dir}/**'.format(dir=parse_dir), recursive=True)\n suddirs = [path for path in parse_paths if os.path.isdir(path)]\n return suddirs\n\n\ndef create_build_cmd_base(proto_dir, store_dir):\n # run the protoc\n py_exe = sys.executable\n cmd = py_exe + ' -m grpc_tools.protoc '\n\n # define where to fine proto files\n cmd += '--proto_path={proto_dir} '.format(proto_dir=proto_dir)\n # cmd += '--proto_path=msg '\n\n # define the location for python output\n cmd += '--python_out={store_dir} '.format(store_dir=store_dir)\n\n # define the location for grpc output\n cmd += '--grpc_python_out={store_dir} '.format(store_dir=store_dir)\n\n return cmd\n\n\ndef build_proto_file(base_cmd, proto_file_path):\n ''' Protoc one proto file\n\n :param base_cmd: the command containing everything needed beside the proto file to build one proto file\n :param proto_file_path:\n :return:\n '''\n pass\n cmd = base_cmd + '{path} '.format(path=proto_file_path)\n print(cmd)\n os.system(cmd)\n\n\ndef build_proto_files():\n gen_list = ['algorithms', 'integration', 'services']\n for gen_key in gen_list:\n proto_dir = 'protos'\n store_dir = '.'\n base_cmd = create_build_cmd_base(proto_dir, store_dir)\n proto_files = get_all_protos(proto_dir+'/{key}'.format(key=gen_key))\n for proto_file in proto_files:\n print(proto_file.split('/')[-1])\n build_proto_file(base_cmd, proto_file)\n\n\nif __name__ == '__main__':\n build_proto_files()\n","sub_path":"proto/build_proto/build_proto.py","file_name":"build_proto.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"518063425","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n@register.filter(needs_autoescape=True)\n@stringfilter\ndef highlight(value, search_term, autoescape=True):\n return mark_safe(replace_all(value, search_term))\n\n#replaces words regardless of captalization\ndef replace_all(value, s_term):\n\tvalue = value.replace(s_term, \"%s\" % s_term)\n\tvalue = value.replace(s_term.lower(), \"%s\" % s_term.lower())\n\tvalue = value.replace(s_term.upper(), \"%s\" % s_term.upper())\n\tvalue = value.replace(s_term.capitalize(), \"%s\" % s_term.capitalize())\n\treturn value","sub_path":"geography/templatetags/highlight.py","file_name":"highlight.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"64036620","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nfig, ax = plt.subplots(1, 1)\nx_range = np.array([-50, 50, 20]) # 1 indicates 0.01 nautical mile\ny_range = np.array([-25, 25, 25]) # 1 indicates 0.01 nautical mile\nax.plot([x_range[0], x_range[1]],[0, 0],'--r')\n\n\n\nax.plot(0, 5.1, '>k', label = 'aircraft 1')\nax.plot(0, 0, '\\w+)/$', views.ajax_make_move, name='ajax_make_move'),\n url(r'^game/$', views.game_page, name='game'),\n)","sub_path":"TicTacToe/game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"565488316","text":"from graphene import ObjectType, Field, Boolean, String, List\nfrom ..models import LogisticsRequest, TransferRequest, LogisticsRequestPosition, TransferPosition\nfrom .types import PagedLogisticsRequestType, LogisticsRequestFilter, TransferRequestData, \\\n\tPagedTransferRequestsType, TransferRequestsFilter, PagedLogisticsRequestPositionType, LogisticsRequestPositionFilter,\\\n\tTransferRequestType, LogisticsRequestType, GoodLocationInfoType\nfrom crm.schema.types import PagedInput, IntID\nfrom warehouse.models import Good\n\nclass Query(ObjectType):\n\tpaged_logistics_request = Field(PagedLogisticsRequestType, paged=PagedInput(required=True), filters=LogisticsRequestFilter())\n\ttransfer_request = Field(TransferRequestData, requestId=IntID(required=True), sort_by=String(), desc=Boolean())\n\tget_logistics_request = Field(LogisticsRequestType, id=IntID())\n\tpaged_transfer_requests = Field(PagedTransferRequestsType, paged=PagedInput(required=True), filters=TransferRequestsFilter())\n\tpaged_logistics_request_position = Field(PagedLogisticsRequestPositionType, paged=PagedInput(required=True),\n\t filters=LogisticsRequestPositionFilter())\n\ttransfer_request_position_info = List(TransferRequestType, position_id=IntID(required=True))\n\tgood_location_info = List(GoodLocationInfoType, position_id=IntID(required=True))\n\tget_logistics_request_id = IntID()\n\tget_logistics_request_project_id = IntID()\n\n\tdef resolve_paged_logistics_request(self, info, paged, **kwargs):\n\t\tlogistics_requests, total_count = LogisticsRequest.objects.list_paged_logistics_requests(info.context.user,\n\t\t **paged, **kwargs)\n\t\treturn PagedLogisticsRequestType(logistics_requests=logistics_requests, total_count=total_count)\n\n\tdef resolve_transfer_request(self, info, **kwargs):\n\t\ttransfer_request, transfer_positions = TransferRequest.objects.transfer_request(info, **kwargs)\n\t\treturn TransferRequestData(transfer_request=transfer_request, transfer_positions=transfer_positions)\n\n\tdef resolve_get_logistics_request(self, info, id):\n\t\treturn LogisticsRequest.objects.get(id=id)\n\n\tdef resolve_paged_transfer_requests(self, info, paged, **kwargs):\n\t\ttransfer_requests, total_count = TransferRequest.objects.list_paged_transfer_requests(info.context.user,\n\t\t **paged, **kwargs)\n\t\treturn PagedTransferRequestsType(transfer_requests=transfer_requests, total_count=total_count)\n\n\tdef resolve_paged_logistics_request_position(self, info, paged, **kwargs):\n\t\tlogistics_request_positions, total_count = LogisticsRequestPosition.objects.list_paged_logistics_request_position(info.context.user,\n\t\t **paged, **kwargs)\n\t\treturn PagedLogisticsRequestPositionType(logistics_request_positions=logistics_request_positions, total_count=total_count)\n\n\tdef resolve_transfer_request_position_info(self, info, position_id):\n\t\ttry:\n\t\t\tposition = LogisticsRequestPosition.objects.get(id=position_id)\n\t\texcept LogisticsRequestPosition.DoesNotExist:\n\t\t\treturn []\n\t\ttransfer_requests = []\n\t\tfor tp in TransferPosition.objects.filter(good__good_kind=position.good_kind, good__unit=position.unit, transfer_request__completed=False).exclude(logistics_request_position=position):\n\t\t\ttransfer_request = tp.transfer_request\n\t\t\ttransfer_request.count = '%s %s' % (str(int(tp.count) if isinstance(tp.count, float) and tp.count.is_integer() else tp.count), tp.good.unit.short_name)\n\t\t\ttransfer_requests.append(transfer_request)\n\t\ttransfer_requests = list(set(transfer_requests))\n\t\treturn transfer_requests\n\n\tdef resolve_good_location_info(self, info, position_id):\n\t\ttry:\n\t\t\tposition = LogisticsRequestPosition.objects.get(id=position_id)\n\t\texcept LogisticsRequestPosition.DoesNotExist:\n\t\t\treturn []\n\t\tresult = []\n\t\tfor g in Good.objects.filter(good_kind=position.good_kind, unit=position.unit).exclude(location_id__in=(1, 646)):\n\t\t\tif (g.project and g.project.id == position.request.reason.project.id) or \\\n\t\t\t\t\t(g.location.project and g.location.project.id == position.request.reason.project.id):\n\t\t\t\tcontinue\n\t\t\tresult.append(GoodLocationInfoType(project=g.location.project, good=g))\n\t\treturn result\n\n\n\tdef resolve_get_logistics_request_id(self, info):\n\t\tlr = LogisticsRequest.objects.filter(logisticsrequestposition__transferposition__transfer_request__ready_to_go=False,\n\t\t logisticsrequestposition__transferposition__transfer_request__who_requested=info.context.user).distinct()\n\t\tif lr:\n\t\t\tlr = lr[0].id\n\t\t\treturn lr\n\t\telse:\n\t\t\treturn -1\n\n\tdef resolve_get_logistics_request_project_id(self, info):\n\t\tproj = None\n\t\ttr = TransferRequest.objects.filter(ready_to_go=False, who_requested=info.context.user)\n\t\tif tr:\n\t\t\ttp = TransferPosition.objects.filter(transfer_request=tr[0]).select_related('logistics_request_position__request')\n\t\t\tproj = tp[0].logistics_request_position.request.reason.project\n\n\t\tif proj:\n\t\t\tproj = proj.id\n\t\t\treturn proj\n\t\telse:\n\t\t\treturn -1\n","sub_path":"backend_v3/logistics/schema/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491946350","text":"import pandas as pd\nimport numpy as np\npd.set_option('display.precision', 1)\npd.set_option('display.width', None) # None means all data displayed\npd.set_option('display.max_rows', None)\n\n'''\nCalculate the total time of all phone calls, taken from datafile\n'''\n\ndef main(): \n column_names = ['A', 'B', 'time', 'D', 'E']\n df = pd.read_csv(\"data/phonecalls.dat\", \n skiprows = 14,\n engine = 'python',\n names = column_names, \n sep = '(£|Calls to UK landlines|calls|mobiles)')\n \n# extract time column and split into hours, mins, secs\n df2 = df['time'].str.split(':',expand=True)\n df2.columns = ['hour','min','sec']\n df2 = df2.astype('int')\n hours, mins, secs = df2[['hour', 'min','sec']].sum()\n\n def convertTime(hours, mins, secs):\n mins += secs // 60\n secs = secs % 60\n hours += mins//60\n mins = mins %60\n print(f\"Total time = {hours:02}:{mins:02}:{secs:02}\") \n\n convertTime(hours, mins, secs)\n\nmain()\n","sub_path":"Level 2/13 Scientific Python/Pandas/12_more_on_dataframes.py","file_name":"12_more_on_dataframes.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"639448204","text":"import io, sys, os\nsys.path.append(os.environ['DE_PATH'])\nfrom django.test import RequestFactory\nfrom django.conf import settings\nfrom request_logging.middleware import MAX_BODY_LENGTH, LoggingMiddleware\nfrom request_logging.middleware import match_ignored\nimport request_logging\nimport unittest\nimport mock\n\n#settings.configure()\n\n@mock.patch.object(request_logging.middleware, \"request_logger\")\nclass ChunkedLogTestCase(unittest.TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = LoggingMiddleware()\n\n def test_does_not_error_with_binary_content_larger_than_chunk_size(self, mock_log):\n body = MAX_BODY_LENGTH * \"0\" + \"1\"\n datafile = io.StringIO(body)\n request = self.factory.post(\"/somewhere\", data={\"file\": datafile})\n self.middleware.process_request(request)\n self.assert_logged(mock_log, str(request.body[:MAX_BODY_LENGTH]))\n self.assert_not_logged(mock_log, body)\n\n def test_request_body_logged(self, mock_log):\n body = \"some body\"\n datafile = io.StringIO(body)\n request = self.factory.post(\"/somewhere\", data={\"file\": datafile})\n self.middleware.process_request(request)\n self.assert_logged(mock_log, \"some body\")\n\n def assert_logged(self, mock_log, expected_entry):\n calls = mock_log.log.call_args_list\n text = \" \".join([call[0][1] for call in calls])\n self.assertTrue(expected_entry in text)\n\n def assert_not_logged(self, mock_log, unexpected_entry):\n calls = mock_log.log.call_args_list\n text = \" \".join([call[0][1] for call in calls])\n self.assertTrue(unexpected_entry not in text)\n\nclass DigitalEventsTestCase(unittest.TestCase):\n \"\"\"\n First argument is path to match.\n Second argument is a list of paths to match exactly.\n Third argument is a list of paths to match only if initial starts with\n the pattern.\n \"\"\"\n def test_match_ignored_path_exact(self):\n self.assertTrue (match_ignored('/foo', ['/foo'], []))\n self.assertTrue (match_ignored('/foo', ['/foo', '/bar'], []))\n self.assertTrue (match_ignored('/foo/$?=', ['/bar/', '/foo/$?='], []))\n\n def test_match_ignored_path_startswith(self):\n self.assertTrue (match_ignored('/foobarbar', [], ['/foo']))\n self.assertTrue (match_ignored('/foo/bar/foobar', [], ['/foo/']))\n\n\n def test_no_match(self):\n self.assertFalse (match_ignored('/foobarbar', [], ['/oo']))\n self.assertFalse (match_ignored('/foobarbar', ['/foobar'], ['/oo']))\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"259583864","text":"from django.conf.urls import patterns, url, include\nfrom django.conf import settings\n\n\nfrom taskmanager.api import UserResource\nfrom task.api import TaskResource\nfrom tastypie.api import Api\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(TaskResource())\n\nurlpatterns = patterns('',\n \n # Landing Page\n url(r'^$', 'major.views.landing'),\n \n (r'^api/', include(v1_api.urls)),\n \n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, }),\n )\n","sub_path":"taskmanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"480495121","text":"import re\n\nfrom discord.ext import commands\nimport os\nimport logging\nimport pika\nimport json\n\n\nRABBIT_MQ_HOST = os.environ.get(\"RABBITMQ_SERVICE_SERVICE_HOST\", \"127.0.0.1\")\nRABBIT_MQ_PORT = os.environ.get(\"RABBITMQ_SERVICE_SERVICE_PORT\", 5672)\nbot = commands.Bot(command_prefix=\"!\")\n\n\n@bot.event\nasync def on_ready():\n logging.info(f\"[LISTENER]Authenticated as {bot.user.name}\")\n\n\ndef send_to_queue(msg):\n logging.info(f\"CONNECTING to {RABBIT_MQ_HOST} {RABBIT_MQ_PORT}\")\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT))\n channel = connection.channel()\n channel.queue_declare(queue='amass_jobs')\n channel.basic_publish(exchange='', routing_key='amass_jobs', body=str.encode(msg))\n print(f\"[+] Sent '{msg}'\")\n connection.close()\n\n\n@bot.command()\nasync def amass(ctx, domain):\n print(f\"Triggering amass recon scan on {domain}\")\n if not bool(re.match(\"[a-zA-Z0-9][a-zA-Z0-9-]{1,61}[a-zA-Z0-9]\\.[a-zA-Z]{2,}\", domain)):\n await ctx.send(f\"'{domain}' did not pass domain validation\")\n return\n\n await ctx.send(f\"[+]Attempting to start amass scan for {domain}\")\n try:\n send_to_queue(domain)\n except Exception as e:\n logging.error(f\"Failed to send jobs to queue due to {e}\")\n await ctx.send(f\"[+] Failed to make api request due to {e}\")\n else:\n await ctx.send(f\"[+]{domain} has been submitted to the amass scanner\")\n logging.error(f\"[+]{domain} has been submitted to the amass scanner\")\n\n\n@bot.command()\nasync def queue_length(ctx, queue_name):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT))\n channel = connection.channel()\n q = channel.queue_declare(queue_name)\n q_len = q.method.message_count\n await ctx.send(f\"Queue '{queue_name}' as {q_len} messages\")\n\n\n@bot.command()\nasync def test(ctx, queue_name):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT))\n channel = connection.channel()\n method_frame, header_frame, body = channel.basic_get(queue_name)\n if method_frame:\n print(method_frame, header_frame, body)\n channel.basic_ack(method_frame.delivery_tag)\n else:\n print('No message returned')\n\n\n@bot.command()\nasync def config(ctx):\n \"\"\"\n Sends the current config to the discord servicer\n \"\"\"\n rabbit_mq = {\"mq_host\": RABBIT_MQ_HOST, \"mq_port\": RABBIT_MQ_PORT}\n await ctx.send(json.dumps(rabbit_mq))\n\n\ndef main():\n bot.run(os.environ.get(\"TOKEN\"))\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"scanner/discord/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"441200819","text":"#!/usr/bin/env python\n\n# Copyright (c) 2018-2019 Intel Corporation\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n\n\"\"\"\nWelcome to CARLA scenario_runner\n\nThis is the main script to be executed when running a scenario.\nIt loads the scenario configuration, loads the scenario and manager,\nand finally triggers the scenario execution.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport traceback\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\nimport importlib\nimport inspect\nimport time\nimport pkg_resources\n\nimport carla\n\nfrom srunner.scenariomanager.carla_data_provider import *\nfrom srunner.scenariomanager.scenario_manager import ScenarioManager\nfrom srunner.scenarios.background_activity import *\nfrom srunner.scenarios.control_loss import *\nfrom srunner.scenarios.follow_leading_vehicle import *\nfrom srunner.scenarios.maneuver_opposite_direction import *\nfrom srunner.scenarios.master_scenario import *\nfrom srunner.scenarios.no_signal_junction_crossing import *\nfrom srunner.scenarios.object_crash_intersection import *\nfrom srunner.scenarios.object_crash_vehicle import *\nfrom srunner.scenarios.opposite_vehicle_taking_priority import *\nfrom srunner.scenarios.other_leading_vehicle import *\nfrom srunner.scenarios.signalized_junction_left_turn import *\nfrom srunner.scenarios.signalized_junction_right_turn import *\nfrom srunner.scenarios.basic_scenario import BasicScenario\nfrom srunner.scenarios.open_scenario import OpenScenario\nfrom srunner.tools.config_parser import *\nfrom srunner.tools.openscenario_parser import OpenScenarioConfiguration\n\n# Version of scenario_runner\nVERSION = 0.5\n\n\n# Dictionary of all supported scenarios.\n# key = Name of config file in Configs/\n# value = List as defined in the scenario module\nSCENARIOS = {\n \"BackgroundActivity\": BACKGROUND_ACTIVITY_SCENARIOS,\n \"FollowLeadingVehicle\": FOLLOW_LEADING_VEHICLE_SCENARIOS,\n \"ObjectCrossing\": OBJECT_CROSSING_SCENARIOS,\n \"RunningRedLight\": RUNNING_RED_LIGHT_SCENARIOS,\n \"NoSignalJunction\": NO_SIGNAL_JUNCTION_SCENARIOS,\n \"VehicleTurning\": VEHICLE_TURNING_SCENARIOS,\n \"ControlLoss\": CONTROL_LOSS_SCENARIOS,\n \"OppositeDirection\": MANEUVER_OPPOSITE_DIRECTION,\n \"OtherLeadingVehicle\": OTHER_LEADING_VEHICLE_SCENARIOS,\n \"SignalizedJunctionRightTurn\": TURNING_RIGHT_SIGNALIZED_JUNCTION_SCENARIOS,\n \"SignalizedJunctionLeftTurn\": TURN_LEFT_SIGNALIZED_JUNCTION_SCENARIOS,\n \"MasterScenario\": MASTER_SCENARIO\n}\n\n\nclass ScenarioRunner(object):\n\n \"\"\"\n This is the core scenario runner module. It is responsible for\n running (and repeating) a single scenario or a list of scenarios.\n\n Usage:\n scenario_runner = ScenarioRunner(args)\n scenario_runner.run(args)\n del scenario_runner\n \"\"\"\n\n ego_vehicles = []\n\n # Tunable parameters\n client_timeout = 30.0 # in seconds\n wait_for_world = 20.0 # in seconds\n frame_rate = 20.0 # in Hz\n\n # CARLA world and scenario handlers\n world = None\n manager = None\n\n additional_scenario_module = None\n\n def __init__(self, args):\n \"\"\"\n Setup CARLA client and world\n Setup ScenarioManager\n \"\"\"\n\n # First of all, we need to create the client that will send the requests\n # to the simulator. Here we'll assume the simulator is accepting\n # requests in the localhost at port 2000.\n self.client = carla.Client(args.host, int(args.port))\n self.client.set_timeout(self.client_timeout)\n\n dist = pkg_resources.get_distribution(\"carla\")\n if LooseVersion(dist.version) < LooseVersion('0.9.6'):\n raise ImportError(\"CARLA version 0.9.6 or newer required. CARLA version found: {}\".format(dist))\n\n # Load additional scenario definitions, if there are any\n if args.additionalScenario != '':\n module_name = os.path.basename(args.additionalScenario).split('.')[0]\n sys.path.insert(0, os.path.dirname(args.additionalScenario))\n self.additional_scenario_module = importlib.import_module(module_name)\n\n def __del__(self):\n \"\"\"\n Cleanup and delete actors, ScenarioManager and CARLA world\n \"\"\"\n\n self.cleanup(True)\n if self.manager is not None:\n del self.manager\n if self.world is not None:\n del self.world\n\n def get_scenario_class_or_fail(self, scenario):\n \"\"\"\n Get scenario class by scenario name\n If scenario is not supported or not found, exit script\n \"\"\"\n\n for scenarios in SCENARIOS.values():\n if scenario in scenarios:\n if scenario in globals():\n return globals()[scenario]\n\n for member in inspect.getmembers(self.additional_scenario_module):\n if scenario in member and inspect.isclass(member[1]):\n return member[1]\n\n print(\"Scenario '{}' not supported ... Exiting\".format(scenario))\n sys.exit(-1)\n\n def cleanup(self, ego=False):\n \"\"\"\n Remove and destroy all actors\n \"\"\"\n\n CarlaDataProvider.cleanup()\n CarlaActorPool.cleanup()\n\n for i, _ in enumerate(self.ego_vehicles):\n if self.ego_vehicles[i]:\n if ego:\n self.ego_vehicles[i].destroy()\n self.ego_vehicles[i] = None\n self.ego_vehicles = []\n\n def prepare_ego_vehicles(self, config, wait_for_ego_vehicles=False):\n \"\"\"\n Spawn or update the ego vehicle according to\n its parameters provided in config\n\n As the world is re-loaded for every scenario, no ego exists so far\n \"\"\"\n\n if not wait_for_ego_vehicles:\n for vehicle in config.ego_vehicles:\n self.ego_vehicles.append(CarlaActorPool.setup_actor(vehicle.model,\n vehicle.transform,\n vehicle.rolename,\n True))\n else:\n ego_vehicle_missing = True\n while ego_vehicle_missing:\n self.ego_vehicles = []\n ego_vehicle_missing = False\n for ego_vehicle in config.ego_vehicles:\n ego_vehicle_found = False\n carla_vehicles = CarlaDataProvider.get_world().get_actors().filter('vehicle.*')\n for carla_vehicle in carla_vehicles:\n if carla_vehicle.attributes['role_name'] == ego_vehicle.rolename:\n ego_vehicle_found = True\n self.ego_vehicles.append(carla_vehicle)\n break\n if not ego_vehicle_found:\n ego_vehicle_missing = True\n break\n\n for i, _ in enumerate(self.ego_vehicles):\n self.ego_vehicles[i].set_transform(config.ego_vehicles[i].transform)\n\n # sync state\n CarlaDataProvider.get_world().tick()\n\n def analyze_scenario(self, args, config):\n \"\"\"\n Provide feedback about success/failure of a scenario\n \"\"\"\n\n current_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))\n junit_filename = None\n config_name = config.name\n if args.outputDir != '':\n config_name = os.path.join(args.outputDir, config_name)\n if args.junit:\n junit_filename = config_name + current_time + \".xml\"\n filename = None\n if args.file:\n filename = config_name + current_time + \".txt\"\n\n if not self.manager.analyze_scenario(args.output, filename, junit_filename):\n print(\"Success!\")\n else:\n print(\"Failure!\")\n\n def load_and_wait_for_world(self, args, config):\n \"\"\"\n Load a new CARLA world and provide data to CarlaActorPool and CarlaDataProvider\n \"\"\"\n\n if args.reloadWorld:\n self.world = self.client.load_world(config.town)\n settings = self.world.get_settings()\n settings.fixed_delta_seconds = 1.0 / self.frame_rate\n self.world.apply_settings(settings)\n else:\n # if the world should not be reloaded, wait at least until all ego vehicles are ready\n ego_vehicle_found = False\n if args.waitForEgo:\n while not ego_vehicle_found:\n vehicles = self.client.get_world().get_actors().filter('vehicle.*')\n for ego_vehicle in config.ego_vehicles:\n ego_vehicle_found = False\n for vehicle in vehicles:\n if vehicle.attributes['role_name'] == ego_vehicle.rolename:\n ego_vehicle_found = True\n break\n if not ego_vehicle_found:\n print(\"Not all ego vehicles ready. Waiting ... \")\n time.sleep(1)\n break\n\n self.world = self.client.get_world()\n CarlaActorPool.set_client(self.client)\n CarlaActorPool.set_world(self.world)\n CarlaDataProvider.set_world(self.world)\n\n # Wait for the world to be ready\n self.world.tick()\n\n if CarlaDataProvider.get_map().name != config.town:\n print(\"The CARLA server uses the wrong map!\")\n print(\"This scenario requires to use map {}\".format(config.town))\n return False\n\n return True\n\n def load_and_run_scenario(self, args, config, scenario):\n \"\"\"\n Load and run the given scenario\n \"\"\"\n\n # Set the appropriate weather conditions\n weather = carla.WeatherParameters(\n cloudyness=config.weather.cloudyness,\n precipitation=config.weather.precipitation,\n precipitation_deposits=config.weather.precipitation_deposits,\n wind_intensity=config.weather.wind_intensity,\n sun_azimuth_angle=config.weather.sun_azimuth,\n sun_altitude_angle=config.weather.sun_altitude\n )\n\n self.world.set_weather(weather)\n\n # Load scenario and run it\n self.manager.load_scenario(scenario)\n self.manager.run_scenario()\n\n # Provide outputs if required\n self.analyze_scenario(args, config)\n\n # Stop scenario and cleanup\n self.manager.stop_scenario()\n scenario.remove_all_actors()\n\n self.cleanup()\n\n def run(self, args):\n \"\"\"\n Run all scenarios according to provided commandline args\n \"\"\"\n\n if args.openscenario:\n self.run_openscenario(args)\n return\n\n # Setup and run the scenarios for repetition times\n for _ in range(int(args.repetitions)):\n\n # Load the scenario configurations provided in the config file\n scenario_configurations = None\n scenario_config_file = find_scenario_config(args.scenario, args.configFile)\n if scenario_config_file is None:\n print(\"Configuration for scenario {} cannot be found!\".format(args.scenario))\n continue\n\n scenario_configurations = parse_scenario_configuration(scenario_config_file, args.scenario)\n\n # Execute each configuration\n config_counter = 0\n for config in scenario_configurations:\n\n if not self.load_and_wait_for_world(args, config):\n self.cleanup()\n continue\n\n # Create scenario manager\n self.manager = ScenarioManager(self.world, args.debug)\n\n # Prepare scenario\n print(\"Preparing scenario: \" + config.name)\n scenario_class = self.get_scenario_class_or_fail(config.type)\n try:\n CarlaActorPool.set_world(self.world)\n self.prepare_ego_vehicles(config)\n scenario = scenario_class(self.world,\n self.ego_vehicles,\n config,\n args.randomize,\n args.debug)\n except Exception as exception:\n print(\"The scenario cannot be loaded\")\n if args.debug:\n traceback.print_exc()\n print(exception)\n self.cleanup()\n config_counter += 1\n continue\n\n self.load_and_run_scenario(args, config, scenario)\n\n config_counter += 1\n\n self.cleanup(ego=(not args.waitForEgo))\n\n print(\"No more scenarios .... Exiting\")\n\n def run_openscenario(self, args):\n \"\"\"\n Run openscenario\n \"\"\"\n\n # Load the scenario configurations provided in the config file\n if not os.path.isfile(args.openscenario):\n print(\"File does not exist\")\n self.cleanup()\n return\n\n config = OpenScenarioConfiguration(args.openscenario)\n\n if not self.load_and_wait_for_world(args, config):\n self.cleanup()\n return\n\n # Create scenario manager\n self.manager = ScenarioManager(self.world, args.debug)\n\n # Prepare scenario\n print(\"Preparing scenario: \" + config.name)\n try:\n CarlaActorPool.set_world(self.world)\n self.prepare_ego_vehicles(config, args.waitForEgo)\n scenario = OpenScenario(world=self.world,\n ego_vehicles=self.ego_vehicles,\n config=config,\n config_file=args.openscenario,\n timeout=100000)\n except Exception as exception:\n print(\"The scenario cannot be loaded\")\n if args.debug:\n traceback.print_exc()\n print(exception)\n self.cleanup()\n return\n\n self.load_and_run_scenario(args, config, scenario)\n\n self.cleanup(ego=(not args.waitForEgo))\n\n print(\"No more scenarios .... Exiting\")\n\n\nif __name__ == '__main__':\n\n DESCRIPTION = (\"CARLA Scenario Runner: Setup, Run and Evaluate scenarios using CARLA\\n\"\n \"Current version: \" + str(VERSION))\n\n PARSER = argparse.ArgumentParser(description=DESCRIPTION,\n formatter_class=RawTextHelpFormatter)\n PARSER.add_argument('--host', default='127.0.0.1',\n help='IP of the host server (default: localhost)')\n PARSER.add_argument('--port', default='2000',\n help='TCP port to listen to (default: 2000)')\n PARSER.add_argument('--debug', action=\"store_true\", help='Run with debug output')\n PARSER.add_argument('--output', action=\"store_true\", help='Provide results on stdout')\n PARSER.add_argument('--file', action=\"store_true\", help='Write results into a txt file')\n PARSER.add_argument('--junit', action=\"store_true\", help='Write results into a junit file')\n PARSER.add_argument('--outputDir', default='', help='Directory for output files (default: this directory)')\n PARSER.add_argument('--waitForEgo', action=\"store_true\", help='Connect the scenario to an existing ego vehicle')\n PARSER.add_argument('--configFile', default='', help='Provide an additional scenario configuration file (*.xml)')\n PARSER.add_argument('--additionalScenario', default='', help='Provide additional scenario implementations (*.py)')\n PARSER.add_argument('--reloadWorld', action=\"store_true\",\n help='Reload the CARLA world before starting a scenario (default=True)')\n # pylint: disable=line-too-long\n PARSER.add_argument(\n '--scenario', help='Name of the scenario to be executed. Use the preposition \\'group:\\' to run all scenarios of one class, e.g. ControlLoss or FollowLeadingVehicle')\n # pylint: enable=line-too-long\n PARSER.add_argument('--randomize', action=\"store_true\", help='Scenario parameters are randomized')\n PARSER.add_argument('--repetitions', default=1, help='Number of scenario executions')\n PARSER.add_argument('--list', action=\"store_true\", help='List all supported scenarios and exit')\n PARSER.add_argument('--listClass', action=\"store_true\", help='List all supported scenario classes and exit')\n PARSER.add_argument('--openscenario', help='Provide an OpenScenario definition')\n PARSER.add_argument('-v', '--version', action='version', version='%(prog)s ' + str(VERSION))\n ARGUMENTS = PARSER.parse_args()\n\n if ARGUMENTS.list:\n print(\"Currently the following scenarios are supported:\")\n print(*get_list_of_scenarios(ARGUMENTS.configFile), sep='\\n')\n sys.exit(0)\n\n if ARGUMENTS.listClass:\n print(\"Currently the following scenario classes are supported:\")\n print(*SCENARIOS.keys(), sep='\\n')\n sys.exit(0)\n\n if not ARGUMENTS.scenario and not ARGUMENTS.openscenario:\n print(\"Please specify a scenario using '--scenario SCENARIONAME'\\n\\n\")\n PARSER.print_help(sys.stdout)\n sys.exit(0)\n\n SCENARIORUNNER = None\n try:\n SCENARIORUNNER = ScenarioRunner(ARGUMENTS)\n SCENARIORUNNER.run(ARGUMENTS)\n finally:\n if SCENARIORUNNER is not None:\n del SCENARIORUNNER\n","sub_path":"scenario_runner.py","file_name":"scenario_runner.py","file_ext":"py","file_size_in_byte":17581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"188445718","text":"import numpy as np\n\nfrom revgraph.core.functions.operations.gradient.base.optimizer import Optimizer\n\n\nclass Adam(Optimizer):\n def __init__(self,\n lr=0.001,\n beta1=0.9,\n beta2=0.999,\n amsgrad=False,\n epsilon=1e-6,\n decay=0.0):\n super().__init__()\n self.lr = lr\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.decay = decay\n self.amsgrad = amsgrad\n self.iteration = 0\n\n def init_param_memory(self, param, memory, global_memory):\n memory['m'] = np.zeros_like(param.data)\n memory['v'] = np.zeros_like(param.data)\n\n if self.amsgrad:\n memory['vhat'] = np.zeros_like(param.data)\n else:\n memory['vhat'] = np.zeros(1)\n\n def before_update(self, global_memory):\n self.iteration += 1\n if self.decay > 0:\n lr = global_memory['lr'] = self.lr * (1.0 / (1.0 + self.decay *\n self.iteration))\n else:\n lr = global_memory['lr'] = self.lr\n\n t = float(self.iteration) + 1\n global_memory['lr_t'] = lr * (np.sqrt(1 - np.power(self.beta2, t)) /\n (1 - np.power(self.beta1, t)))\n\n def update(self, param, memory, global_memory):\n lr, lr_t = map(global_memory.__getitem__, ['lr', 'lr_t'])\n m, v, vhat = map(memory.__getitem__, ['m', 'v', 'vhat'])\n b1, b2 = self.beta1, self.beta2\n g = param.gradient\n m_t = memory['m'] = (b1 * m) + (1 - b1) * g\n v_t = memory['v'] = (b2 * v) + (1 - b2) * np.square(g)\n\n if self.amsgrad:\n vhat_t = memory['vhat'] = np.maximum(vhat, v_t)\n param.data -= lr_t * m_t / (np.sqrt(vhat_t) + self.epsilon)\n else:\n param.data -= lr_t * m_t / (np.sqrt(v_t) + self.epsilon)\n","sub_path":"revgraph/core/functions/operations/gradient/optimizers/adam.py","file_name":"adam.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"175991053","text":"from flask import Flask, render_template\r\nfrom flask.json import jsonify\r\nfrom datetime import datetime\r\nfrom flask_cors import CORS\r\n\r\n# Import our pymongo library, which lets us connect our Flask app to our Mongo database.\r\nimport pymongo\r\nimport json\r\nimport pandas as pd\r\n\r\n# Create an instance of our Flask app.\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n# Create connection variable\r\nconn = 'mongodb://localhost:27017'\r\n\r\n# Pass connection to the pymongo instance.\r\nclient = pymongo.MongoClient(conn)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/posts')\r\ndef posts():\r\n return render_template('posts.html')\r\n\r\n@app.route('/primary_school')\r\ndef primary_school():\r\n return render_template('primary_school.html')\r\n\r\n@app.route('/poverty_rate')\r\ndef poverty_rate():\r\n return render_template('poverty_rate.html')\r\n\r\n@app.route('/birth_rate')\r\ndef birth_rate():\r\n return render_template('birth_rate.html')\r\n\r\n@app.route('/api')\r\ndef get_data():\r\n db = client.poverty_db\r\n try:\r\n q = db.poverty.find({})\r\n docs = [doc for doc in q]\r\n print(len(docs))\r\n return jsonify(docs)\r\n except Exception as e:\r\n return jsonify({\"message\":e})\r\n\r\n@app.route('/api/primary-school')\r\ndef get_primary_school():\r\n db = client.primary_school_db\r\n try:\r\n q = db.primary_school.find({})\r\n docs = [doc for doc in q]\r\n print(len(docs))\r\n return jsonify(docs)\r\n except Exception as e:\r\n return jsonify({\"message\":e})\r\n\r\n@app.route('/api/primary-completion')\r\ndef get_primary_completion():\r\n db = client.primary_completion_db\r\n try:\r\n q = db.primary_completion.find({})\r\n docs = [doc for doc in q]\r\n print(len(docs))\r\n return jsonify(docs)\r\n except Exception as e:\r\n return jsonify({\"message\":e})\r\n \r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)\r\n","sub_path":"FLASK/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"134332563","text":"class Solution:\n # 双循环/暴力法\n def maxProfit_1(self, prices):\n max = 0\n left, right = 0, 0\n while left < len(prices):\n right = left + 1\n while right < len(prices):\n if prices[right] > prices[left]:\n cur = prices[right] - prices[left]\n if cur > max: max = cur\n right += 1\n left += 1\n return max\n\n # 单循环/滑动窗口\n def maxProfit_2(self, prices):\n minprice, maxprofit = float(\"inf\"), 0\n for i in range(len(prices)):\n if prices[i] < minprice:\n minprice = prices[i]\n else:\n maxprofit = max(prices[i] - minprice, maxprofit)\n return maxprofit\n\n # 单调栈\n def maxProfit_3(self, prices):\n maxprofit = 0\n prices.append(-1)\n stack = []\n for i in range(len(prices)):\n if not stack:\n stack.append(prices[i])\n else:\n while stack and stack[-1] > prices[i]:\n popprice = stack.pop()\n if stack: maxprofit = max(maxprofit, popprice - stack[0])\n stack.append(prices[i])\n return maxprofit\n\n # 动态规划\n def maxProfit(self, prices):\n if len(prices) <= 1:\n return 0\n minprice, maxprofit = prices[0], 0\n for i in range(1, len(prices)):\n minprice = min(prices[i], minprice)\n maxprofit = max(maxprofit, prices[i] - minprice)\n return maxprofit\n\n\nif __name__ == '__main__':\n print(Solution().maxProfit([3,2,4,0,3]))\n\n\n","sub_path":"python code/题库/121. 买卖股票的最佳时机.py","file_name":"121. 买卖股票的最佳时机.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"303361343","text":"import numpy as np\nimport pandas as pd\n\ndata = pd.read_csv('cars.csv', header=None)\n\n# 选择分类模型: 逻辑回归、决策树、RF、GBDT、AdaBoost\n# 本次选择 RF(随机森林:相似的输入产生相似的输出),也可以选择其他模型\n\n# 数据预处理 针对当前数据完成标签编码 处理\nimport sklearn.preprocessing as sp\n# 另存一份数据\ntrain_data = pd.DataFrame([])\nencoders = {}\nfor col_ind, col_val in data.items():\n encoder = sp.LabelEncoder()\n train_data[col_ind] = encoder.fit_transform(col_val)\n encoders[col_ind] = encoder\n# 整理输入 与 输出集\nx, y = train_data.iloc[:, :-1], train_data[6]\n# 创建分类模型\nimport sklearn.ensemble as se\nimport sklearn.metrics as sm\nimport sklearn.model_selection as ms\nmodel = se.RandomForestClassifier(max_depth=13, n_estimators=15, random_state=12)\n# 做五次交叉验证\nscores = ms.cross_val_score(model, x, y, cv=5, scoring='f1_weighted')\nprint(scores.mean())\n# 验证曲线,选取最优超参数 n_estimators\n# params = np.arange(15,16,1)\n# train_scores, test_scores = ms.validation_curve(model, x, y, 'n_estimators', params, cv=5)\n# scores = test_scores.mean(axis=1)\n# 评估分数\n# print(scores)\n# 可视化\n# import matplotlib.pyplot as plt\n# plt.grid(linestyle=':')\n# plt.plot(params, scores, 'o-', color='orangered', label='n_estimators VC', linestyle='--')\n# plt.legend()\n# plt.show()\n\n# 验证曲线,选取最优超参数 n_estimators\n# params = np.arange(13,14,1)\n# train_scores, test_scores = ms.validation_curve(model, x, y, 'max_depth', params, cv=5)\n# scores = test_scores.mean(axis=1)\n# 评估分数\n# print(scores)\n# 可视化\n# import matplotlib.pyplot as plt\n# plt.grid(linestyle=':')\n# plt.plot(params, scores, 'o-', color='dodgerblue', label='max_depth VC', linestyle='--')\n# plt.legend()\n\n# 验证曲线,选取最优超参数 random_state\n# params = np.arange(1,20,1)\n# train_scores, test_scores = ms.validation_curve(model, x, y, 'random_state', params, cv=5)\n# scores = test_scores.mean(axis=1)\n# 评估分数\n# print(scores)\n# 可视化\n# import matplotlib.pyplot as plt\n# plt.grid(linestyle=':')\n# plt.plot(params, scores, 'o-', color='yellow', label='random_state VC', linestyle='--')\n# plt.legend()\n# plt.show()\n\n\nmodel.fit(x, y)\npred_y = model.predict(x)\nprint(sm.classification_report(y, pred_y))\n\n# 模型预测\ndata = [\n ['high', 'med', '5more', '4', 'big', 'low', 'unacc'],\n ['high', 'high', '4', '4', 'med', 'med', 'acc'],\n ['low', 'low', '2', '4', 'small', 'high', 'good'],\n ['low', 'med', '3', '4', 'med', 'high', 'vgood']\n]\ntest_data = pd.DataFrame(data)\nfor col_ind, col_val in test_data.items():\n encoder = encoders[col_ind]\n encoded_col = encoder.transform(col_val)\n test_data[col_ind] = encoded_col\n\ntest_x, test_y = test_data.iloc[:, :-1], test_data[6]\npred_y = model.predict(test_x)\nprint(encoders[6].inverse_transform(pred_y))","sub_path":"分类模型/09验证曲线.py","file_name":"09验证曲线.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"105190387","text":"\"\"\"\nReport on applied injectables.\n\nThere are uses to keeping a list of registrations. For example, if you\nhave a bunch of ``@config`` decorators, you can get a listing of them,\nalong with a \"shortname\" from an info structure.\n\"\"\"\n\nfrom wired_injector import InjectorRegistry\n\nfrom . import factories\nfrom .constants import Area, Kind\n\n\ndef test():\n # The app\n registry = InjectorRegistry(use_injectables=True)\n\n # System\n registry.scan(factories)\n registry.injectables.commit(area=Area.system)\n\n # All done, write the injectables to the registry\n registry.injectables.apply_injectables()\n\n # Get all the kind=config that have a shortname\n injectables = registry.injectables.get_info(kind=Kind.config)\n results = [\n (injectable.info['shortname'], injectable.for_)\n for injectable in injectables\n if 'shortname' in injectable.info\n ]\n result = results[0][0]\n expected = 'bob'\n\n return result, expected\n","sub_path":"examples/injectables/info/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"136036130","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 29 21:13:45 2017\n\n@author: Caiyd\n\"\"\"\n\nimport click\n\n\ndef loadqsm(samplelistfile):\n \"\"\"\n load query sample\n \"\"\"\n samplelist = []\n with open(samplelistfile) as f:\n for line in f:\n line = line.strip()\n if line:\n samplelist.append(line)\n return samplelist\n\n\ndef loadmsp(mspfile, samplelist, sourceindex):\n region_list = []\n smindex = []\n sourceindex = str(int(sourceindex))\n with open(mspfile) as f:\n header = f.readline()\n header = f.readline().strip().split()\n for sample in samplelist:\n hap1 = sample + '.0'\n hap2 = sample + '.1'\n smindex.append(header.index(hap1))\n smindex.append(header.index(hap2))\n for line in f:\n line = line.strip().split()\n for index in smindex:\n if line[index] == sourceindex:\n region_list.append('\\t'.join(line[:3]))\n break\n return region_list\n\n\n@click.command()\n@click.argument('msplistfile')\n@click.argument('samplelistfile')\n@click.argument('sourceindex', type=str)\n@click.argument('outfile')\ndef main(msplistfile, samplelistfile, sourceindex, outfile):\n mspfilelist = loadqsm(msplistfile)\n samplelist = loadqsm(samplelistfile)\n region_list = []\n with click.progressbar(mspfilelist, label='deal msp files') as bar:\n for mspfile in bar:\n region_list += loadmsp(mspfile, samplelist, sourceindex)\n with open(outfile, 'w') as f:\n with click.progressbar(region_list, label='output...') as bar:\n for line in bar:\n f.write(line + '\\n')\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"snp_tools/rfmix_querysminterval.py","file_name":"rfmix_querysminterval.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"261400662","text":"# coding: utf-8\n\n\"\"\"\n Cost Management\n\n The API for Project Koku and OpenShift cost management. You can find out more about Project Koku at [https://github.com/project-koku/](https://github.com/project-koku/). # noqa: E501\n\n The version of the OpenAPI document: 1.0.2\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom openapi_client.configuration import Configuration\n\n\nclass SourceOutAllOf(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'id': 'int',\n 'uuid': 'str',\n 'name': 'str',\n 'source_type': 'str',\n 'authentication': 'object',\n 'billing_source': 'object',\n 'provider_linked': 'bool',\n 'infrastructure': 'str',\n 'cost_models': 'list[SourceOutAllOfCostModels]'\n }\n\n attribute_map = {\n 'id': 'id',\n 'uuid': 'uuid',\n 'name': 'name',\n 'source_type': 'source_type',\n 'authentication': 'authentication',\n 'billing_source': 'billing_source',\n 'provider_linked': 'provider_linked',\n 'infrastructure': 'infrastructure',\n 'cost_models': 'cost_models'\n }\n\n def __init__(self, id=None, uuid=None, name=None, source_type=None, authentication=None, billing_source=None, provider_linked=False, infrastructure=None, cost_models=None, local_vars_configuration=None): # noqa: E501\n \"\"\"SourceOutAllOf - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._uuid = None\n self._name = None\n self._source_type = None\n self._authentication = None\n self._billing_source = None\n self._provider_linked = None\n self._infrastructure = None\n self._cost_models = None\n self.discriminator = None\n\n self.id = id\n if uuid is not None:\n self.uuid = uuid\n if name is not None:\n self.name = name\n if source_type is not None:\n self.source_type = source_type\n if authentication is not None:\n self.authentication = authentication\n if billing_source is not None:\n self.billing_source = billing_source\n if provider_linked is not None:\n self.provider_linked = provider_linked\n if infrastructure is not None:\n self.infrastructure = infrastructure\n if cost_models is not None:\n self.cost_models = cost_models\n\n @property\n def id(self):\n \"\"\"Gets the id of this SourceOutAllOf. # noqa: E501\n\n\n :return: The id of this SourceOutAllOf. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this SourceOutAllOf.\n\n\n :param id: The id of this SourceOutAllOf. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def uuid(self):\n \"\"\"Gets the uuid of this SourceOutAllOf. # noqa: E501\n\n\n :return: The uuid of this SourceOutAllOf. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, uuid):\n \"\"\"Sets the uuid of this SourceOutAllOf.\n\n\n :param uuid: The uuid of this SourceOutAllOf. # noqa: E501\n :type: str\n \"\"\"\n\n self._uuid = uuid\n\n @property\n def name(self):\n \"\"\"Gets the name of this SourceOutAllOf. # noqa: E501\n\n\n :return: The name of this SourceOutAllOf. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this SourceOutAllOf.\n\n\n :param name: The name of this SourceOutAllOf. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def source_type(self):\n \"\"\"Gets the source_type of this SourceOutAllOf. # noqa: E501\n\n\n :return: The source_type of this SourceOutAllOf. # noqa: E501\n :rtype: str\n \"\"\"\n return self._source_type\n\n @source_type.setter\n def source_type(self, source_type):\n \"\"\"Sets the source_type of this SourceOutAllOf.\n\n\n :param source_type: The source_type of this SourceOutAllOf. # noqa: E501\n :type: str\n \"\"\"\n\n self._source_type = source_type\n\n @property\n def authentication(self):\n \"\"\"Gets the authentication of this SourceOutAllOf. # noqa: E501\n\n Dictionary containing resource name. # noqa: E501\n\n :return: The authentication of this SourceOutAllOf. # noqa: E501\n :rtype: object\n \"\"\"\n return self._authentication\n\n @authentication.setter\n def authentication(self, authentication):\n \"\"\"Sets the authentication of this SourceOutAllOf.\n\n Dictionary containing resource name. # noqa: E501\n\n :param authentication: The authentication of this SourceOutAllOf. # noqa: E501\n :type: object\n \"\"\"\n\n self._authentication = authentication\n\n @property\n def billing_source(self):\n \"\"\"Gets the billing_source of this SourceOutAllOf. # noqa: E501\n\n Dictionary containing billing source. # noqa: E501\n\n :return: The billing_source of this SourceOutAllOf. # noqa: E501\n :rtype: object\n \"\"\"\n return self._billing_source\n\n @billing_source.setter\n def billing_source(self, billing_source):\n \"\"\"Sets the billing_source of this SourceOutAllOf.\n\n Dictionary containing billing source. # noqa: E501\n\n :param billing_source: The billing_source of this SourceOutAllOf. # noqa: E501\n :type: object\n \"\"\"\n\n self._billing_source = billing_source\n\n @property\n def provider_linked(self):\n \"\"\"Gets the provider_linked of this SourceOutAllOf. # noqa: E501\n\n Flag to indicate if provider is linked to source. # noqa: E501\n\n :return: The provider_linked of this SourceOutAllOf. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._provider_linked\n\n @provider_linked.setter\n def provider_linked(self, provider_linked):\n \"\"\"Sets the provider_linked of this SourceOutAllOf.\n\n Flag to indicate if provider is linked to source. # noqa: E501\n\n :param provider_linked: The provider_linked of this SourceOutAllOf. # noqa: E501\n :type: bool\n \"\"\"\n\n self._provider_linked = provider_linked\n\n @property\n def infrastructure(self):\n \"\"\"Gets the infrastructure of this SourceOutAllOf. # noqa: E501\n\n OpenShift foundational infrastructure type. # noqa: E501\n\n :return: The infrastructure of this SourceOutAllOf. # noqa: E501\n :rtype: str\n \"\"\"\n return self._infrastructure\n\n @infrastructure.setter\n def infrastructure(self, infrastructure):\n \"\"\"Sets the infrastructure of this SourceOutAllOf.\n\n OpenShift foundational infrastructure type. # noqa: E501\n\n :param infrastructure: The infrastructure of this SourceOutAllOf. # noqa: E501\n :type: str\n \"\"\"\n\n self._infrastructure = infrastructure\n\n @property\n def cost_models(self):\n \"\"\"Gets the cost_models of this SourceOutAllOf. # noqa: E501\n\n List of cost model name and UUIDs associated with this source. # noqa: E501\n\n :return: The cost_models of this SourceOutAllOf. # noqa: E501\n :rtype: list[SourceOutAllOfCostModels]\n \"\"\"\n return self._cost_models\n\n @cost_models.setter\n def cost_models(self, cost_models):\n \"\"\"Sets the cost_models of this SourceOutAllOf.\n\n List of cost model name and UUIDs associated with this source. # noqa: E501\n\n :param cost_models: The cost_models of this SourceOutAllOf. # noqa: E501\n :type: list[SourceOutAllOfCostModels]\n \"\"\"\n\n self._cost_models = cost_models\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, SourceOutAllOf):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, SourceOutAllOf):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"openapi_client/models/source_out_all_of.py","file_name":"source_out_all_of.py","file_ext":"py","file_size_in_byte":10021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"581069143","text":"import Enemigo\nfrom Usuario import UsuarioNormal\n\nclass Hechizo():\n\n def execute(self, enemigo:Enemigo, usuario: UsuarioNormal):\n \"\"\"Orden del patrón COMMAND. Se ejecuta sobre el enemigo.\n\n :param enemigo: enemigo sobre el que se lanza el ataque\n :param usuario: usuario que lanza el ataque\n\n \"\"\"\n if(usuario.hechizos == 0):\n print();\n print(\"No tienes más hechizos.\");\n print();\n else:\n if enemigo.enPiedra():\n print(\"El enemigo ya se encuentra en piedra.\");\n else:\n usuario.hechizos -= 1;\n enemigo.piedra = True;\n print(\"Hechizo invocado!\")\n\n def __str__(self):\n \"\"\"Descripción de la orden.\n\n Se explica la orden en profundidad.\n \"\"\"\n return \"\\t<= maxrow:\r\n break\r\n for c in r:\r\n row = c.row\r\n if self_row == row:\r\n continue\r\n pk = row[0:len] # simlen - 12\r\n rowdata = {}\r\n for cv in c.columnValues:\r\n cm = cv.qualifier.strip()\r\n if cm in ks:\r\n rowdata.update({cm: cv.value})\r\n groups.append({row: rowdata})\r\n r = hd.client.getScannerRows(scanner, 30)\r\n except:\r\n pass\r\n return groups\r\n\r\n def scan_data_main(self,scan):\r\n hd = hbase_dao()\r\n groups = {}\r\n scanner = hd.open_scan(self.caizheng_table, scan)\r\n r = hd.client.getScannerRows(scanner, 30)\r\n i = 0\r\n arts = 20\r\n while r:\r\n if i>=arts:\r\n break\r\n try:\r\n i += 1\r\n for c in r:\r\n row = c.row\r\n pk = row[0:self.simlen - self.splen] # simlen - 12\r\n filename = ''\r\n title = ''\r\n for cv in c.columnValues:\r\n cm = cv.qualifier.strip()\r\n if cm == 'filename':\r\n filename=cv.value\r\n if cm == 'content':\r\n title = cv.value\r\n if pk not in groups.keys():\r\n groups.update({pk:[]})\r\n groups[pk].append({filename:title})\r\n r = hd.client.getScannerRows(scanner, 30)\r\n except:\r\n pass\r\n\r\n result_grout = []\r\n for g,v in groups.items():\r\n if len(v) >= 2 :\r\n result_grout.append(v)\r\n return json.dumps(result_grout, ensure_ascii=False, indent=2)\r\n\r\n\r\n","sub_path":"HaoPy/haohbase/simart/get_for_scan.py","file_name":"get_for_scan.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"455487691","text":"from flask import Flask, request, render_template\nfrom src.modules.scraper import driver\nimport json\n\napp = Flask(__name__, template_folder=\".\")\n\n\n@app.route(\"/\")\ndef landingpage():\n return render_template(\"./static/landing.html\")\n\n\n@app.route(\"/search\", methods=[\"POST\", \"GET\"])\ndef product_search(new_product=\"\", sort=None, currency=None, num=None):\n product = request.args.get(\"product_name\")\n if product == None:\n product = new_product\n\n data = driver(product, currency, num, 0, False, None, True, sort)\n\n return render_template(\"./static/result.html\", data=data, prod=product)\n\n\n@app.route(\"/filter\", methods=[\"POST\", \"GET\"])\ndef product_search_filtered():\n\n product = request.args.get(\"product_name\")\n sort = request.form[\"sort\"]\n currency = request.form[\"currency\"]\n num = request.form[\"num\"]\n\n if sort == \"default\":\n sort = None\n if currency == \"usd\":\n currency = None\n if num == \"default\":\n num = None\n return product_search(product, sort, currency, num)\n","sub_path":"src/modules/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"535313925","text":"import time\nfrom typing import Any, List, Dict, Sequence, DefaultDict, Union, Optional\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom spectacles.client import LookerClient\nfrom spectacles.lookml import Project, Model, Explore, Dimension\nfrom spectacles.logger import GLOBAL_LOGGER as logger\nfrom spectacles.exceptions import SqlError, DataTestError, SpectaclesException\nimport spectacles.printer as printer\n\n\nclass Query:\n \"\"\"Stores IDs and a reference to the LookML object being queried\"\"\"\n\n def __init__(\n self,\n query_id: str,\n lookml_ref: Union[Dimension, Explore],\n query_task_id: Optional[str] = None,\n ):\n self.query_id = query_id\n self.lookml_ref = lookml_ref\n self.query_task_id = query_task_id\n\n\nclass QueryResult:\n \"\"\"Stores ID, query status, and error details for a completed query task\"\"\"\n\n def __init__(\n self, query_task_id: str, status: str, error: Optional[Dict[str, Any]] = None\n ):\n self.query_task_id = query_task_id\n self.status = status\n self.error = error\n\n\nclass Validator(ABC): # pragma: no cover\n \"\"\"Defines abstract base interface for validators.\n\n Not intended to be used directly, only inherited.\n\n Attributes:\n client: Looker API client.\n\n \"\"\"\n\n def __init__(self, client: LookerClient):\n self.client = client\n\n @abstractmethod\n def validate(self):\n raise NotImplementedError\n\n\nclass DataTestValidator(Validator):\n \"\"\"Runs LookML/data tests for a given project.\n\n Args:\n client: Looker API client.\n project: Name of the LookML project to validate.\n\n \"\"\"\n\n def __init__(self, client: LookerClient, project: str):\n super().__init__(client)\n self.project = project\n\n def validate(self) -> List[DataTestError]:\n tests = self.client.all_lookml_tests(self.project)\n test_count = len(tests)\n printer.print_header(\n f\"Running {test_count} {'test' if test_count == 1 else 'tests'}\"\n )\n errors = []\n test_results = self.client.run_lookml_test(self.project)\n for result in test_results:\n message = f\"{result['model_name']}.{result['test_name']}\"\n if result[\"success\"]:\n printer.print_validation_result(\"success\", message)\n else:\n for error in result[\"errors\"]:\n printer.print_validation_result(\"error\", message)\n errors.append(\n DataTestError(\n path=f\"{result['model_name']}/{result['test_name']}\",\n message=error[\"message\"],\n )\n )\n return errors\n\n\nclass SqlValidator(Validator):\n \"\"\"Runs and validates the SQL for each selected LookML dimension.\n\n Args:\n client: Looker API client.\n project: Name of the LookML project to validate.\n\n Attributes:\n project: LookML project object representation.\n query_tasks: Mapping of query task IDs to LookML objects\n\n \"\"\"\n\n def __init__(self, client: LookerClient, project: str, concurrency: int = 10):\n super().__init__(client)\n\n self.project = Project(project, models=[])\n self.query_slots = concurrency\n self._running_queries: List[Query] = []\n # Lookup used to retrieve the LookML object\n self._query_by_task_id: Dict[str, Query] = {}\n\n def get_query_by_task_id(self, query_task_id: str) -> Query:\n return self._query_by_task_id[query_task_id]\n\n def get_running_query_tasks(self) -> List[str]:\n return [\n query.query_task_id\n for query in self._running_queries\n if query.query_task_id\n ]\n\n @staticmethod\n def parse_selectors(selectors: List[str]) -> DefaultDict[str, set]:\n \"\"\"Parses explore selectors with the format 'model_name/explore_name'.\n\n Args:\n selectors: List of selector strings in 'model_name/explore_name' format.\n The '*' wildcard selects all models or explores. For instance,\n 'model_name/*' would select all explores in the 'model_name' model.\n\n Returns:\n DefaultDict[str, set]: A hierarchy of selected model names (keys) and\n explore names (values).\n\n \"\"\"\n selection: DefaultDict = defaultdict(set)\n for selector in selectors:\n try:\n model, explore = selector.split(\"/\")\n except ValueError:\n raise SpectaclesException(\n f\"Explore selector '{selector}' is not valid.\\n\"\n \"Instead, use the format 'model_name/explore_name'. \"\n f\"Use 'model_name/*' to select all explores in a model.\"\n )\n else:\n selection[model].add(explore)\n return selection\n\n # TODO: Refactor this so it's more obvious how selection works\n def _select(self, choices: Sequence[str], select_from: Sequence) -> Sequence:\n unique_choices = set(choices)\n select_from_names = set(each.name for each in select_from)\n difference = unique_choices.difference(select_from_names)\n if difference:\n raise SpectaclesException(\n f\"{select_from[0].__class__.__name__}\"\n f'{\"\" if len(difference) == 1 else \"s\"} '\n + \", \".join(difference)\n + f\" not found in LookML under project '{self.project.name}'\"\n )\n return [each for each in select_from if each.name in unique_choices]\n\n def build_project(self, selectors: List[str], views: List[str]) -> None:\n \"\"\"Creates an object representation of the project's LookML.\n\n Args:\n selectors: List of selector strings in 'model_name/explore_name' format.\n The '*' wildcard selects all models or explores. For instance,\n 'model_name/*' would select all explores in the 'model_name' model.\n\n \"\"\"\n selection = self.parse_selectors(selectors)\n logger.info(\n f\"Building LookML project hierarchy for project {self.project.name}\"\n )\n\n all_models = [\n Model.from_json(model) for model in self.client.get_lookml_models()\n ]\n project_models = [\n model for model in all_models if model.project == self.project.name\n ]\n\n # Expand wildcard operator to include all specified or discovered models\n selected_model_names = selection.keys()\n if \"*\" in selected_model_names:\n explore_names = selection.pop(\"*\")\n for model in project_models:\n selection[model.name].update(explore_names)\n\n selected_models = self._select(\n choices=tuple(selection.keys()), select_from=project_models\n )\n\n for model in selected_models:\n # Expand wildcard operator to include all specified or discovered explores\n selected_explore_names = selection[model.name]\n if \"*\" in selected_explore_names:\n selected_explore_names.remove(\"*\")\n selected_explore_names.update(\n set(explore.name for explore in model.explores)\n )\n\n selected_explores = self._select(\n choices=tuple(selected_explore_names), select_from=model.explores\n )\n\n for explore in selected_explores:\n dimensions_json = self.client.get_lookml_dimensions(\n model.name, explore.name\n )\n for dimension_json in dimensions_json:\n dimension = Dimension.from_json(dimension_json)\n dimension.url = self.client.base_url + dimension.url\n if not dimension.ignore:\n if len(views) == 0:\n explore.add_dimension(dimension)\n elif dimension.view in views:\n explore.add_dimension(dimension)\n\n # Remove any explores that have 0 dimensions to test\n selected_explores = [explore for explore in selected_explores if len(explore.dimensions) > 0]\n\n model.explores = selected_explores\n\n self.project.models = selected_models\n\n def validate(self, mode: str = \"batch\") -> List[SqlError]:\n \"\"\"Queries selected explores and returns the project tree with errors.\"\"\"\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode]\"\n )\n\n errors = self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n errors = self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n if explore.errored:\n printer.print_validation_result(\"error\", message)\n else:\n printer.print_validation_result(\"success\", message)\n\n return errors\n\n def _create_and_run(self, mode: str = \"batch\") -> List[SqlError]:\n \"\"\"Runs a single validation using a specified mode\"\"\"\n queries: List[Query] = []\n try:\n queries = self._create_queries(mode)\n errors = self._run_queries(queries)\n except KeyboardInterrupt:\n logger.info(\n \"\\n\\n\" + \"Please wait, asking Looker to cancel any running queries\"\n )\n query_tasks = self.get_running_query_tasks()\n self._cancel_queries(query_tasks)\n message = \"SQL validation was interrupted. \"\n if query_tasks:\n message += (\n f\"Attempted to cancel {len(query_tasks)} running \"\n f\"{'query' if len(query_tasks) == 1 else 'queries'}.\"\n )\n else:\n message += \"No queries were running at the time.\"\n raise SpectaclesException(message)\n return errors\n\n def _create_queries(self, mode: str) -> List[Query]:\n \"\"\"Creates a list of queries to be executed for validation\"\"\"\n queries: List[Query] = []\n for model in self.project.models:\n for explore in model.explores:\n if mode == \"batch\" or (mode == \"hybrid\" and not explore.queried):\n query = self._create_explore_query(explore, model.name)\n queries.append(query)\n elif mode == \"single\" or (mode == \"hybrid\" and explore.errored):\n explore_queries = self._create_dimension_queries(\n explore, model.name\n )\n queries.extend(explore_queries)\n return queries\n\n def _create_explore_query(self, explore: Explore, model_name: str) -> Query:\n \"\"\"Creates a single query with all dimensions of an explore\"\"\"\n dimensions = [dimension.name for dimension in explore.dimensions]\n query_id = self.client.create_query(model_name, explore.name, dimensions)\n return Query(query_id, lookml_ref=explore)\n\n def _create_dimension_queries(\n self, explore: Explore, model_name: str\n ) -> List[Query]:\n \"\"\"Creates individual queries for each dimension in an explore\"\"\"\n queries = []\n for dimension in explore.dimensions:\n query_id = self.client.create_query(\n model_name, explore.name, [dimension.name]\n )\n query = Query(query_id, lookml_ref=dimension)\n queries.append(query)\n return queries\n\n def _run_queries(self, queries: List[Query]) -> List[SqlError]:\n \"\"\"Creates and runs queries with a maximum concurrency defined by query slots\"\"\"\n QUERY_TASK_LIMIT = 250\n errors: List[SqlError] = []\n\n while queries or self._running_queries:\n if queries:\n logger.debug(f\"Starting a new loop, {len(queries)} queries queued\")\n self._fill_query_slots(queries)\n query_tasks = self.get_running_query_tasks()[:QUERY_TASK_LIMIT]\n logger.debug(f\"Checking for results of {len(query_tasks)} query tasks\")\n for query_result in self._get_query_results(query_tasks):\n sql_error = self._handle_query_result(query_result)\n if sql_error:\n errors.append(sql_error)\n time.sleep(0.5)\n return errors\n\n def _fill_query_slots(self, queries: List[Query]) -> None:\n \"\"\"Creates query tasks until all slots are used or all queries are running\"\"\"\n while queries and self.query_slots > 0:\n logger.debug(\n f\"{self.query_slots} available query slots, creating query task\"\n )\n query = queries.pop(0)\n query_task_id = self.client.create_query_task(query.query_id)\n self.query_slots -= 1\n query.query_task_id = query_task_id\n self._query_by_task_id[query_task_id] = query\n self._running_queries.append(query)\n\n def _get_completed_query_tasks(\n self, query_task_ids: List[str]\n ) -> List[QueryResult]:\n \"\"\"Returns ID, status, and error message for completed and errored tasks\"\"\"\n query_results = []\n results = self.client.get_query_task_multi_results(query_task_ids)\n for query_task_id, result in results.items():\n status = result[\"status\"]\n logger.debug(f\"Query task {query_task_id} status is: {status}\")\n if status in (\"complete\", \"error\"):\n self.query_slots += 1\n query_result = QueryResult(query_task_id, status)\n if status == \"error\":\n try:\n query_result.error = self._extract_error_details(result)\n except (KeyError, TypeError, IndexError) as error:\n raise SpectaclesException(\n \"Encountered an unexpected API query result format, \"\n \"unable to extract error details. \"\n f\"The query result was: {result}\"\n ) from error\n query_results.append(query_result)\n elif status in (\"running\", \"added\", \"expired\"):\n continue\n else:\n raise SpectaclesException(\n f'Unexpected query result status \"{status}\" '\n \"returned by the Looker API\"\n )\n return query_results\n\n def _get_query_results(self, query_task_ids: List[str]) -> List[QueryResult]:\n \"\"\"Returns ID, status, and error message for all query tasks\"\"\"\n query_results = []\n results = self.client.get_query_task_multi_results(query_task_ids)\n for query_task_id, result in results.items():\n status = result[\"status\"]\n if status not in (\"complete\", \"error\", \"running\", \"added\", \"expired\"):\n raise SpectaclesException(\n f'Unexpected query result status \"{status}\" '\n \"returned by the Looker API\"\n )\n logger.debug(f\"Query task {query_task_id} status is: {status}\")\n query_result = QueryResult(query_task_id, status)\n if status == \"error\":\n try:\n error_details = self._extract_error_details(result)\n except (KeyError, TypeError, IndexError) as error:\n raise SpectaclesException(\n \"Encountered an unexpected API query result format, \"\n \"unable to extract error details. \"\n f\"The query result was: {result}\"\n ) from error\n else:\n query_result.error = error_details\n query_results.append(query_result)\n return query_results\n\n def _handle_query_result(self, result: QueryResult) -> Optional[SqlError]:\n query = self.get_query_by_task_id(result.query_task_id)\n if result.status in (\"complete\", \"error\"):\n self._running_queries.remove(query)\n self.query_slots += 1\n lookml_object = query.lookml_ref\n lookml_object.queried = True\n\n if result.status == \"error\" and result.error:\n sql_error = SqlError(\n path=lookml_object.name,\n url=getattr(lookml_object, \"url\", None),\n **result.error,\n )\n lookml_object.error = sql_error\n return sql_error\n return None\n\n @staticmethod\n def _extract_error_details(query_result: Dict) -> Dict:\n \"\"\"Extracts the relevant error fields from a Looker API response\"\"\"\n data = query_result[\"data\"]\n if isinstance(data, dict):\n errors = data.get(\"errors\") or [data.get(\"error\")]\n first_error = errors[0]\n message = \" \".join(\n filter(\n None,\n [first_error.get(\"message\"), first_error.get(\"message_details\")],\n )\n )\n sql = data.get(\"sql\")\n error_loc = first_error.get(\"sql_error_loc\")\n if error_loc:\n line_number = error_loc.get(\"line\")\n else:\n line_number = None\n elif isinstance(data, list):\n message = data[0]\n line_number = None\n sql = None\n else:\n raise TypeError(\n \"Unexpected error response type. \"\n \"Expected a dict or a list, \"\n f\"received type {type(data)}: {data}\"\n )\n\n return {\"message\": message, \"sql\": sql, \"line_number\": line_number}\n\n def _cancel_queries(self, query_task_ids: List[str]) -> None:\n \"\"\"Asks the Looker API to cancel specified queries\"\"\"\n for query_task_id in query_task_ids:\n self.client.cancel_query_task(query_task_id)\n\n def _count_explores(self) -> int:\n \"\"\"Counts the explores in the LookML project hierarchy.\"\"\"\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count\n","sub_path":"spectacles/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":18528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"154710355","text":"import socket\nimport sys\nimport os\n\nfilename = str(sys.argv[1])\npath = './downloads'\n\ns = socket.socket()\nhost = socket.gethostname()\nport = 60000\n\ns.connect((host, port))\ns.send(filename.encode('utf-8'))\n\nfull_path = os.path.join(path, filename)\n\nwith open(full_path, 'wb') as f:\n print('file opened')\n while True:\n print('receiving data...')\n data = s.recv(1024)\n # print('data=%s', (data))\n if not data:\n break\n # write data to a file\n f.write(data)\n\nf.close()\nprint('Successfully get the file')\ns.close()\nprint('connection closed')","sub_path":"CN2/legacy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"181746894","text":"import numpy as np\nfrom xgboost import XGBClassifier, XGBRegressor\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.metrics import r2_score\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndataset = load_diabetes()\n\nx = dataset.data\ny = dataset.target\n\nprint(x.shape)\nprint(y.shape)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, \n train_size=0.8,shuffle=True, random_state=66)\n\nxgb = XGBClassifier(n_estimators=1000, learning_rate=0.1)\n\nparameters = {\n 'n_estimators': [100, 200, 400],\n 'learning_rate' : [0.01, 0.03, 0.05, 0.1],\n 'colsample_bytree': [0.5, 0.7, 0.8, 0.9],\n 'colsample_bylevel':[0.5, 0.7, 0.8, 0.9],\n 'max_depth': [4, 5, 6]\n}\n\nmodel = RandomizedSearchCV(xgb,parameters, cv=5, n_jobs=-1 )\n\nmodel.fit(x_train, y_train)# verbose=True, eval_metric=[\"logloss\",\"rmse\"],\n #eval_set=[(x_train, y_train), (x_test, y_test)],\n #early_stopping_rounds=20)\n\nscore = model.score(x_test,y_test)\nprint(\"r2score: \", score)\nthresholds = np.sort(model.best_estimator_.feature_importances_)\nprint(thresholds)\n\n# from matplotlib import pyplot\n# pyplot.bar(range(len(model.feature_importances_)), model.feature_importances_)\n# pyplot.show()\n\ntmp = 0\ntmp2 = [0,0]\nfor thresh in thresholds:\n selection = SelectFromModel(model.best_estimator_, threshold=thresh, prefit=True) #predit ??\n\n selection_x_train = selection.transform(x_train)\n print(selection_x_train.shape)\n\n selection_model = XGBRegressor(n_jobs = 8)\n selection_model.fit(selection_x_train, y_train)\n\n selection_x_test = selection.transform(x_test)\n y_pred = selection_model.predict(selection_x_test)\n\n score = r2_score(y_test,y_pred)\n if score > tmp :\n tmp = score\n tmp2[0] = thresh\n tmp2[1] = selection_x_train.shape[1]\n\n print('Thresh=%.3f, n=%d, R2: %.2f%%' %(thresh, selection_x_train.shape[1], score*100))\n print(f'Best Score so far : {tmp*100}%')\n print('Best Threshold : ', tmp2[0])\n\nprint('=========================================================================================')\nprint(f'Best Threshold : {tmp2[0]}, n = {tmp2[1]}')\n\nselection = SelectFromModel(model.best_estimator_, threshold = tmp2[0], prefit = True)\n\nselection_x_train = selection.transform(x_train)\n\nselection_model = RandomizedSearchCV(xgb, parameters, cv =5)\nselection_model.fit(selection_x_train, y_train)\n\nselection_x_test = selection.transform(x_test)\ny_predict = selection_model.predict(selection_x_test)\n\nscore = r2_score(y_test, y_predict)\n\nprint('=========================================================================================')\nprint(f'최종 R2 score : {score*100}%, n = {tmp2[1]}일때!!')\nprint('=========================================================================================')\nprint(f'1번 점수 : {tmp*100}%\\n2번 점수 : {score*100}%')\nprint('=========================================================================================')\n\n\n\n\n\n","sub_path":".vscode/ml/ml43_SFM_diabetes.py","file_name":"ml43_SFM_diabetes.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413072097","text":"import requests \nimport time\nfrom random import seed\nfrom random import random\n\nseed(1)\n\nPRIVATE_KEY = 'a123'\n\n#API_ENDPOINT = \"http://localhost:8001/api/user\"\nAPI_ENDPOINT = 'http://64.227.0.108:8002/api/reading'\n#API_ENDPOINT = 'http://localhost:8001/api/reading'\n\ntestvalue = random()\n\ntestvalue = 2.\n\nsensorValue='sensorA'\n\nprint(testvalue)\n\ndata = {'private_key':PRIVATE_KEY, \n 'sensor':sensorValue, \n 'value':testvalue,\n } \n\nr = requests.post(url = API_ENDPOINT, json = data) \n\nprint('posted')\n\ntime.sleep(.1)\n","sub_path":"chomserver/ver11/poster.py","file_name":"poster.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"48506706","text":"pkgname = \"util-linux\"\n_mver = \"2.37\"\npkgver = f\"{_mver}.2\"\npkgrel = 0\nbuild_style = \"gnu_configure\"\nconfigure_args = [\n \"--exec-prefix=${prefix}\",\n \"--enable-libuuid\",\n \"--enable-libblkid\",\n \"--enable-fsck\",\n \"--enable-vipw\",\n \"--enable-newgrp\",\n \"--enable-chfn-chsh\",\n \"--enable-write\",\n \"--enable-fs-paths-extra=/usr/sbin:/usr/bin\",\n \"--disable-rpath\",\n \"--disable-makeinstall-chown\",\n \"--with-systemdsystemunitdir=no\",\n \"--without-udev\",\n \"--without-python\",\n]\nmake_cmd = \"gmake\"\nmake_install_args = [\"usrsbin_execdir=/usr/bin\"]\nhostmakedepends = [\"gmake\", \"gettext-tiny\", \"pkgconf\"]\nmakedepends = [\n \"linux-headers\", \"libcap-ng-devel\", \"linux-pam-devel\", \"zlib-devel\"\n]\ncheckdepends = [\"xz\", \"iproute2\", \"socat\", \"procps-ng\"]\ndepends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\npkgdesc = \"Miscellaneous Linux utilities\"\nmaintainer = \"q66 \"\nlicense = \"GPL-2.0-or-later\"\nurl = \"https://www.kernel.org/pub/linux/utils/util-linux\"\nsource = f\"$(KERNEL_SITE)/utils/{pkgname}/v{_mver}/{pkgname}-{pkgver}.tar.xz\"\nsha256 = \"6a0764c1aae7fb607ef8a6dd2c0f6c47d5e5fd27aa08820abaad9ec14e28e9d9\"\ntool_flags = {\"CFLAGS\": [\"-D_DIRENT_HAVE_D_TYPE\"]}\nsuid_files = [\n \"usr/bin/chfn\",\n \"usr/bin/chsh\",\n \"usr/bin/mount\",\n \"usr/bin/newgrp\",\n \"usr/bin/su\",\n \"usr/bin/umount\",\n \"usr/bin/wall\",\n \"usr/bin/write\",\n]\n# checkdepends are missing\noptions = [\"!check\"]\n\n# FIXME/TODO:\n# - uuidd service\n# - /usr/bin/{wall, write} should be owned by tty\n# - maybe install libuuid license in its subpackage\n\ndef post_extract(self):\n self.rm(\"tests/ts/lsns/ioctl_ns\", force = True)\n self.rm(\"tests/ts/col/multibyte\", force = True)\n\ndef post_install(self):\n self.install_license(\"Documentation/licenses/COPYING.BSD-3-Clause\")\n\n # fix permissions\n for f in suid_files:\n (self.destdir / f).chmod(0o4755)\n\n # these should be setgid and not setuid\n for f in [\"wall\", \"write\"]:\n (self.destdir / \"usr/bin\" / f).chmod(0o2755)\n\n # PAM login utils\n self.install_file(\n self.files_path / \"login.pam\", \"etc/pam.d\", name = \"login\"\n )\n self.install_file(self.files_path / \"su.pam\", \"etc/pam.d\", name = \"su\")\n self.install_file(self.files_path / \"su.pam\", \"etc/pam.d\", name = \"su-l\")\n self.install_file(\n self.files_path / \"common.pam\", \"etc/pam.d\", name = \"chfn\"\n )\n self.install_file(\n self.files_path / \"common.pam\", \"etc/pam.d\", name = \"chsh\"\n )\n\n # conflicts with bsdutils\n self.rm(self.destdir / \"usr/bin/hexdump\")\n self.rm(self.destdir / \"usr/share/man/man1/hexdump.1\")\n self.rm(self.destdir / \"usr/share/bash-completion/completions/hexdump\")\n\n@subpackage(\"util-linux-libs\")\ndef _libs(self):\n self.build_style = \"meta\"\n self.depends = [\n f\"libfdisk={pkgver}-r{pkgrel}\",\n f\"libblkid={pkgver}-r{pkgrel}\",\n f\"libmount={pkgver}-r{pkgrel}\",\n f\"libsmartcols={pkgver}-r{pkgrel}\",\n f\"libuuid={pkgver}-r{pkgrel}\",\n ]\n return []\n\n@subpackage(\"util-linux-common\")\ndef _common(self):\n self.pkgdesc += \" (common files)\"\n return [\"usr/share/locale\"]\n\n@subpackage(\"libfdisk\")\ndef _libfdisk(self):\n self.pkgdesc = \"Library for fdisk(8)\"\n self.depends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\n return [\"usr/lib/libfdisk.so.*\"]\n\n@subpackage(\"libfdisk-devel\")\ndef _libfdisk_devel(self):\n self.pkgdesc = \"Library for fdisk(8) (development files)\"\n return [\n \"usr/lib/libfdisk.*\",\n \"usr/lib/pkgconfig/*fdisk*\",\n \"usr/include/libfdisk\"\n ]\n\n@subpackage(\"libmount\")\ndef _libmount(self):\n self.pkgdesc = \"Library for mount(8)\"\n self.depends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\n return [\"usr/lib/libmount.so.*\"]\n\n@subpackage(\"libmount-devel\")\ndef _libmount_devel(self):\n self.pkgdesc = \"Library for mount(8) (development files)\"\n return [\n \"usr/lib/libmount.*\",\n \"usr/lib/pkgconfig/*mount*\",\n \"usr/include/libmount\"\n ]\n\n@subpackage(\"libblkid\")\ndef _libblkid(self):\n self.pkgdesc = \"Library to handle device identification\"\n self.depends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\n return [\"usr/lib/libblkid.so.*\"]\n\n@subpackage(\"libblkid-devel\")\ndef _libblkid_devel(self):\n self.pkgdesc = \"Library to handle device identification (development files)\"\n self.depends += [f\"libuuid-devel={pkgver}-r{pkgrel}\"]\n return [\n \"usr/lib/libblkid.*\",\n \"usr/lib/pkgconfig/*blkid*\",\n \"usr/include/blkid\",\n \"usr/share/man/man3/libblkid.3\"\n ]\n\n@subpackage(\"libuuid\")\ndef _libuuid(self):\n self.pkgdesc = \"UUID library from util-linux\"\n self.license = \"BSD-3-Clause\"\n self.depends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\n return [\"usr/lib/libuuid.so.*\"]\n\n@subpackage(\"libuuid-devel\")\ndef _libuuid_devel(self):\n self.pkgdesc = \"UUID library from util-linux (development files)\"\n self.license = \"BSD-3-Clause\"\n return [\n \"usr/lib/libuuid.*\",\n \"usr/lib/pkgconfig/*uuid*\",\n \"usr/include/uuid\",\n \"usr/share/man/man3/uuid*\"\n ]\n\n@subpackage(\"libsmartcols\")\ndef _libsmartcols(self):\n self.pkgdesc = \"Table or Tree library from util-linux\"\n self.depends = [f\"util-linux-common={pkgver}-r{pkgrel}\"]\n return [\"usr/lib/libsmartcols.so.*\"]\n\n@subpackage(\"libsmartcols-devel\")\ndef _libsmartcols_devel(self):\n self.pkgdesc = \"Table or Tree library from util-linux (development files)\"\n return [\n \"usr/lib/libsmartcols.*\",\n \"usr/lib/pkgconfig/*smartcols*\",\n \"usr/include/libsmartcols\"\n ]\n\n","sub_path":"main/util-linux/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"355130223","text":"from flask import request, render_template, redirect, url_for\nfrom flask.blueprints import Blueprint\nfrom flask_login import current_user, login_required\n\nfrom cmdbmix.forms.cmdb import IdcAddForm, IdcEditForm\nfrom cmdbmix.models.cmdb import Idc\nfrom cmdbmix.utils import load_data_to_model, get_page_args, load_data_to_form\n\nidc_bp = Blueprint('idc', __name__)\n\n\n@idc_bp.route('/idc_manager', methods=['GET'])\n@login_required\ndef idc_manager():\n page, per_page = get_page_args(request)\n pagination = Idc.query.paginate(page, per_page=per_page)\n return render_template('cmdb/idc.html', pagination=pagination)\n\n\n\n@idc_bp.route('/idc_manager/add_idc', methods=['GET', 'POST'])\n@login_required\ndef add_idc():\n form = IdcAddForm()\n if form.validate_on_submit():\n idc = Idc()\n load_data_to_model(idc, form).save()\n return redirect(url_for('idc.idc_manager'))\n return render_template('cmdb/add_idc.html', form=form)\n\n\n@idc_bp.route('/idc_manager/edit_idc/', methods=['GET', 'POST'])\n@login_required\ndef edit_idc(idc_id):\n form = IdcEditForm()\n idc = Idc.query.get_or_404(idc_id)\n if form.validate_on_submit():\n load_data_to_model(idc, form).save()\n return redirect(url_for('idc.idc_manager'))\n load_data_to_form(idc, form)\n return render_template('cmdb/edit_idc.html', form=form, idc=idc)\n\n\n@idc_bp.route('/idc_manager/delete_idc/', methods=['GET', 'POST'])\n@login_required\ndef delete_idc(idc_id):\n Idc.query.get_or_404(idc_id).delete()\n return redirect(url_for('idc.idc_manager'))","sub_path":"cmdbmix/blueprints/cmdb/idc.py","file_name":"idc.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"176567634","text":"# -*- coding: utf-8 -*-\nfrom crawlCrap.crawlUtils import chromeBrowser, openCrawlUrl\nfrom crawlCrap.brandFromEtmoc import reBrandNames, floatAss\nfrom utils.mysqlUtils import executeMysql, insertIntoMysql, importMysqlAsPandas\nimport re\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\") # 向mysql写数据时,需要指定汉字编码格式\n\n\ndef getAllUrls(table, interval):\n executeMysql(\"truncate table %s\" % table)\n urlBasic = \"http://www.kanyanjiu.com\"\n\n # 品类(大陆 港澳台 国外 历史)\n for groupsType in [\"/yan/7_1.html\", \"/yan/10_1.html\", \"/yan/17_1.html\", \"/yan/33_1.html\"]:\n urlGroupsType = urlBasic + groupsType\n groupType = openCrawlUrl(chromeBrowser, urlGroupsType, interval=interval)\n groups = groupType.find(attrs={\"class\": \"am-g\"}).find_all(\"a\")\n\n # 品类-品牌\n for group in groups:\n groupName = group.string\n firstPageUrl = urlBasic + group.get(\"href\")\n firstPage = openCrawlUrl(chromeBrowser, firstPageUrl, interval=interval)\n pages = firstPage.find(attrs={\"class\": \"pageinfo\"}).get_text()\n pages = pages.split(u\"页\")[0]\n pages = re.sub(u\"\\D+\", \"\", pages)\n pages = int(pages)\n print(pages)\n\n # 品类-品牌-产品页号\n for page in range(pages):\n if pages == 1 or page == 0:\n brands = firstPage.find(attrs={\"class\": \"mes_list2\"}).find_all(attrs={\"class\": \"hang\"})\n else:\n urlPage = firstPageUrl.replace(\"_1.html\", \"_%s.html\" % (page + 1))\n brands = openCrawlUrl(chromeBrowser, urlPage, interval=interval)\n brands = brands.find(attrs={\"class\": \"mes_list2\"}).find_all(attrs={\"class\": \"hang\"})\n\n # 品类-品牌-产品\n for brand in brands:\n\n # 品类-品牌-产品-产品详情的链接\n urlDetail = urlBasic + brand.find(\"a\").get(\"href\")\n # detail = openCrawlUrl(chromeBrowser, urlDetail, interval=interval)\n # detail = detail.find(attrs={\"class\": \"jibenmes\"}).ul.get_text()\n # print(detail)\n print(urlDetail)\n insertIntoMysql(table, [\"group_name\", \"url\"], [groupName, urlDetail])\n\n\ndef openDetailUrls(urlTable, toTable, interval):\n def reGuiGe(ass):\n qtyPerBar = \"-1\"\n if ass.find(u\"支/条\") > -1 and re.search(u\"\\d+\", ass) is not None:\n qtyPerBar = re.search(u\"\\d+\", ass).group()\n ass = ass.replace(qtyPerBar, \"\").replace(u\"支/条\", \"\").replace(\"(\", \"\").replace(\")\", \"\")\n assHe = ass.split(u\"盒\")\n if len(assHe) > 1 and assHe[1] != \"\":\n zhi = assHe[1]\n he = assHe[0] + u\"盒\"\n else:\n assZhi = ass.split(u\"支\")\n if len(assZhi) > 1 and assZhi[1] != \"\":\n he = assZhi[1]\n zhi = assZhi[0] + u\"支\"\n else:\n he, zhi = \"\", \"\"\n return int(qtyPerBar), he, zhi\n\n def detailCln(r):\n group = r[0]\n brandName = reBrandNames(r[1])\n prodType = r[2]\n tar = floatAss(r[3].strip(\"mg\"), exception=-1.0)\n nicotine = floatAss(r[4].strip(\"mg\"), exception=-1.0)\n carbonMonoxide = floatAss(r[5].strip(\"mg\"), exception=-1.0)\n qtyPerBar, packageType, cigaretteSpecification = reGuiGe(r[6])\n cigaretteSpecification = r[7] + \" \" + cigaretteSpecification\n brandCodeBox, brandCodeBar = r[8], r[9]\n retailPriceBox = floatAss(r[10].strip(u\"元/盒\"))\n retailPriceBar = floatAss(r[11].strip(u\"元/条\"))\n\n if qtyPerBar != -1 and retailPriceBar != 0 and retailPriceBox != 0:\n qtyPerBox = qtyPerBar / retailPriceBar * retailPriceBox\n else:\n qtyPerBox = -1\n\n retailPriceWanZhi = retailPriceBar / qtyPerBar * 10000.0\n if retailPriceWanZhi < 0:\n retailPriceWanZhi = 0.0\n\n wholesalePriceBar = floatAss(r[12].strip(u\"元/条\"))\n wholesalePriceWanZhi = wholesalePriceBar / qtyPerBar * 10000.0\n if wholesalePriceWanZhi < 0:\n wholesalePriceWanZhi = 0.0\n\n updateTime = r[13].replace(\"-\", \"\")[0:8]\n manufacture = r[14]\n return [group, brandName, prodType, tar, nicotine, carbonMonoxide, packageType, qtyPerBox, qtyPerBar,\n cigaretteSpecification, brandCodeBox, brandCodeBar, retailPriceBox, retailPriceBar, retailPriceWanZhi,\n wholesalePriceBar, wholesalePriceWanZhi, updateTime, manufacture]\n\n columns = [\"group_name\", \"brand_name\", \"prod_type\", \"tar\", \"nicotine\", \"carbon_monoxide\", \"package_type\",\n \"qty_per_box\", \"qty_per_bar\", \"cigarette_specification\", \"brand_code_box\", \"brand_code_bar\",\n \"retail_price_box\", \"retail_price_bar\", \"retail_price_wanzhi\", \"wholesale_price_bar\",\n \"wholesale_price_wanzhi\", \"update_time\", \"manufacture\"]\n\n data = importMysqlAsPandas(\"select group_name, url from %s\" % urlTable).values\n for row in data:\n groupName = row[0]\n url = row[1]\n detail = openCrawlUrl(chromeBrowser, url, interval=interval)\n detail = detail.find(attrs={\"class\": \"jibenmes\"}).ul.get_text()\n detail = detail.replace(u\"mm 小盒条形码\", u\"mm\\n小盒条形码\").replace(u\" \", \"\").replace(u\" \", \"\")\n detail = [x for x in detail.split(\"\\n\") if x.find(u\":\") > -1]\n\n detailDic = {}\n for d in detail:\n key = d.split(u\":\")[0]\n value = d.split(u\":\")[1]\n if key == u\"零售参考价格\":\n key = key + u\"(\" + value[-1] + u\")\"\n detailDic[key] = value\n\n brandDetail = [groupName]\n keys = [u\"名称\", u\"类型\", u\"焦油量\", u\"烟气烟碱量\", u\"烟气一氧化碳量\", u\"规格\", u\"烟长\", u\"小盒条形码\", u\"条包条形码\",\n u\"零售参考价格(包)\", u\"零售参考价格(条)\", u\"批发价格\", u\"更新日期\", u\"公司/厂家\"]\n for key in keys:\n try:\n brandDetail.append(detailDic[key])\n except:\n brandDetail.append(\"\")\n\n brandDetail = detailCln(brandDetail)\n insertIntoMysql(toTable, columns, brandDetail)\n\n\nif __name__ == \"__main__\":\n # getAllUrls(\"tobacco_link2\", 20)\n openDetailUrls(\"tobacco_link2\", \"tobacco_brand2\", 10)\n chromeBrowser.quit()\n df = importMysqlAsPandas(\"select * from tobacco_brand2\")\n df = df.drop_duplicates()\n df = df[df[\"group_name\"] != u\"Marlboro(���宝路)\"] # 和 万宝路 重复\n df.to_csv(\"D:\\\\workspace\\\\brand_info2.csv\", index=False, encoding=\"utf-8\")","sub_path":"crawlCrap/brandFromKanyanjiu.py","file_name":"brandFromKanyanjiu.py","file_ext":"py","file_size_in_byte":6711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"487796851","text":"class BST:\n\n def __init__(self):\n\n self.root = None\n\n def setRoot(self,val):\n\n self.root = Node(val)\n\n def insert(self, val):\n\n #mutator method to add a node to the sree \n\n if(self.root is None):\n\n self.setRoot(val)\n\n else:\n\n self.__insertNode(self.root, val)\n\n def __insertNode(self, currentNode, val):\n\n #private method to create a node\n\n if(val <= currentNode.getVal()):\n\n if(currentNode.leftChild):\n\n self.__insertNode(currentNode.leftChild, val)\n\n else:\n\n currentNode.leftChild = Node(val)\n\n elif(val > currentNode.val):\n\n if(currentNode.rightChild):\n\n self.__insertNode(currentNode.rightChild, val)\n\n else:\n\n currentNode.rightChild = Node(val)\n\n def find(self, val):\n\n #accessor method to find and return a node's value\n\n return self.__findNode(self.root, val)\n\n def __findNode(self, currentNode, val):\n\n #private method to return the value from a node\n\n if(currentNode is None):\n\n return False\n\n elif(val == currentNode.val):\n\n return True\n\n elif(val < currentNode.val):\n\n return self.__findNode(currentNode.leftChild, val)\n\n else:\n\n return self.__findNode(currentNode.rightChild, val)\n","sub_path":"Trees/Binary Search Trees/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"418154887","text":"#!/usr/bin/env python3\n\nimport os\nfrom gi.repository import RwYang, RwNsdYang\nmodel = RwYang.model_create_libncx()\nmodel.load_schema_ypbc(RwNsdYang.get_schema())\n\nfor filename in os.listdir(\".\"):\n if not filename.endswith(\".xml\"):\n continue\n\n yaml_filename, _ = os.path.splitext(filename)\n yaml_filename += \".yaml\"\n\n with open(filename) as hdl:\n xml_str = hdl.read()\n\n nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_xml_v2(model, xml_str)\n\n yaml_str = nsd.to_yaml(model)\n\n with open(yaml_filename, 'w') as hdl:\n hdl.write(yaml_str)\n","sub_path":"modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/xml_to_yaml.py","file_name":"xml_to_yaml.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"481603981","text":"\nfrom time import ctime\nimport scienceLogicMonitoring as sl\nfrom datetime import datetime, timedelta\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport socket\n\n\nsched = BlockingScheduler()\npolling_sx_minute = '10,20,40'\n\n# @sched.scheduled_job('cron',second=polling_sx_minute)\ndef job_function():\n print('job_function: ' + ctime())\n sched.add_job(timed_job, 'cron', second='42')\ndef timed_job(name):\n print('timed_job: ' + ctime())\n# sched.add_job(job_function, 'date', run_date=datetime.now() + timedelta(seconds=8))\n# sched.add_job(lambda: sl.disable_monitoring(sl.get_id('tus1grcappdin18')), 'date', run_date=datetime.now() + timedelta(seconds=8))\n# sched.configure()\n# sched.start()\n\n# sched.add_job(timed_job, 'cron', minute='42,43,44')\n# sched.start()\n\n\n# @sched.scheduled_job('cron', minute='32,33,34,35')\ns = socket.socket()\nhost = socket.gethostname()\nport = 12345\ns.bind((host,port))\ns.listen(5)\nwhile True:\n c, addr = s.accept()\n print(\"got connection from {}\".format(addr))\n print(c.recv(1024))\n c.close()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"364235911","text":"from openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom datetime import date, time\nfrom openerp import api\n\n\n\n\n\nclass admission_payment(osv.osv):\n _name = \"admission.payment.configuration\"\n\n\n\n\n _columns = {\n\n 'name': fields.char(\"Name\"),\n 'doctor_id': fields.many2one('doctors.profile', 'Doctor/Broker Name'),\n 'start_date':fields.date('MOU Start Date'),\n 'end_date':fields.date('MOU End Date'),\n 'overall_admission_rate': fields.float('Overall admission Rate (%)'),\n 'overall_default_discount': fields.float('Overall Discount Rate (%)'),\n 'max_default_discount': fields.float('Max Discount Rate (%)'),\n 'deduct_from_discount': fields.boolean(\"Deduct Excess Discount From admission\"),\n 'add_few_departments': fields.boolean(\"Add by Department\"),\n 'department_ids':fields.many2one('diagnosis.department','Department List'),\n 'admission_configuration_line_ids':fields.one2many(\"admission.payment.configuration.line\",'admission_configuration_line_ids',\"admission Lines\"),\n 'state': fields.selection(\n [('pending', 'Pending'), ('done', 'Confirmed'), ('cancelled', 'Cancelled')],\n 'Status', default='pending', readonly=True)\n\n }\n\n _defaults = {\n 'state': 'pending',\n\n }\n\n _order = 'id desc'\n\n @api.model\n def create(self, vals):\n record = super(admission_payment, self).create(vals)\n\n record.name = 'CA-0' + str(record.id)\n return record\n\n @api.onchange('overall_admission_rate')\n def add_tests_ids_in_line_with_rate(self):\n line_data =[]\n if self.overall_admission_rate:\n try:\n comm_rate = round((self.overall_admission_rate/100),2)\n except:\n comm_rate=0\n if self.admission_configuration_line_ids:\n for items in self.admission_configuration_line_ids:\n est_comm = round((comm_rate*items.test_price),2)\n\n line_data.append({\n\n 'department_id': items.department_id,\n 'test_id': items.test_id,\n 'applicable': items.applicable,\n 'fixed_amount': items.fixed_amount,\n 'variance_amount': comm_rate,\n 'test_price': items.test_price,\n 'est_admission_amount': est_comm,\n 'max_admission_amount': items.max_admission_amount\n\n })\n self.admission_configuration_line_ids=line_data\n\n\n return 'x'\n\n\n @api.onchange('department_ids')\n def add_tests_ids_in_line(self):\n comm_rate=0\n if self.overall_admission_rate:\n try:\n comm_rate = round((self.overall_admission_rate / 100), 2)\n except:\n comm_rate=0\n if self.department_ids:\n depet_id=self.department_ids.id\n query=\"select id,name,department,rate from examination_entry where department=%s\"\n self._cr.execute(query, ([depet_id]))\n all_data = self._cr.dictfetchall()\n configure_line=[]\n # import pdb\n # pdb.set_trace()\n\n\n if self.admission_configuration_line_ids:\n for items in self.admission_configuration_line_ids:\n est_comm = round((comm_rate*items.test_price),2)\n\n configure_line.append({\n\n 'department_id': items.department_id,\n 'test_id': items.test_id,\n 'applicable': items.applicable,\n 'fixed_amount': items.fixed_amount,\n 'variance_amount': comm_rate,\n 'test_price': items.test_price,\n 'est_admission_amount': est_comm,\n 'max_admission_amount': items.max_admission_amount\n\n })\n\n\n for items in all_data:\n est_amnt=round((comm_rate*items.get('rate')),2)\n configure_line.append(\n {\n\n 'department_id': items.get('department'),\n 'test_id': items.get('id'),\n 'applicable':True ,\n 'fixed_amount': 0,\n 'variance_amount':0 ,\n 'test_price': items.get('rate'),\n 'est_admission_amount': est_amnt,\n 'max_admission_amount': 0\n\n }\n )\n self.admission_configuration_line_ids=configure_line\n\n\n\n return \"xXxXxXxXxX\"\n\n\n\n def confirm_configuration(self, cr, uid, ids, context=None):\n\n cr.execute(\"update admission_payment set state='done' where id=%s\", (ids))\n cr.commit()\n\n config_data = self.browse(cr, uid, ids, context=context)\n doc_id = config_data.doctor_id.id\n\n if config_data.state == 'done':\n raise osv.except_osv(_('Already Confirmed!'),\n _('Already Confirmed'))\n\n\n cr.execute(\"update doctors_profile set cc_id=%s where id=%s\", ([doc_id,ids[0]]))\n cr.commit()\n\n\n\n\n return True\n\n def cancel_configuration(self, cr, uid, ids, context=None):\n config_data = self.browse(cr, uid, ids, context=context)\n\n if config_data.state == 'done':\n raise osv.except_osv(_('Already Confirmed!'),\n _('Already Confirmed'))\n\n cr.execute(\"update admission_payment set state='cancelled' where id=%s\", (ids))\n cr.commit()\n\n return True\n\n\n\n\n\n\nclass admission_payment_line(osv.osv):\n _name = \"admission.payment.configuration.line\"\n\n _columns = {\n 'name':fields.char(\"name\"),\n 'admission_configuration_line_ids': fields.many2one('admission.payment.configuration', 'admission Configuration ID'),\n 'department_id':fields.many2one('diagnosis.department','Department'),\n 'test_id':fields.many2one('examination.entry','Test Name'),\n 'applicable':fields.boolean('Applicable'),\n 'fixed_amount': fields.float('Fixed Amount'),\n 'variance_amount': fields.float('Amount (%)'),\n 'test_price': fields.float('Test Fee'),\n 'est_admission_amount': fields.float('admission Amount'),\n 'max_admission_amount': fields.float('Max admission Amount'),\n\n\n }\n\n\n# class doctors_profile(osv.osv):\n# _inherit = \"doctors.profile\"\n# _columns = {\n#\n# 'cc_id': fields.many2one('admission.payment', 'admission Rule')\n# }\n\n","sub_path":"hospital_admission/hospital_payment_config/hospital_admission_payment.py","file_name":"hospital_admission_payment.py","file_ext":"py","file_size_in_byte":6620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"422793932","text":"\"\"\"\r\n Handles the about page.\r\n Default from Google.\r\n\"\"\"\r\n\r\nimport drivebase\r\nfrom oauth2client.client import AccessTokenRefreshError\r\n\r\nclass AboutHandler(drivebase.BaseDriveHandler):\r\n \"\"\"Web handler for the service to read user information.\"\"\"\r\n\r\n def get(self):\r\n \"\"\"Called when HTTP GET requests are received by the web application.\"\"\"\r\n # Create a Drive service\r\n service = self.CreateDrive()\r\n if service is None:\r\n return\r\n try:\r\n result = service.about().get().execute()\r\n # Generate a JSON response with the file data and return to the client.\r\n self.RespondJSON(result)\r\n except AccessTokenRefreshError:\r\n # Catch AccessTokenRefreshError which occurs when the API client library\r\n # fails to refresh a token. This occurs, for example, when a refresh token\r\n # is revoked. When this happens the user is redirected to the\r\n # Authorization URL.\r\n self.RedirectAuth()","sub_path":"src/pages/About.py","file_name":"About.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"315404735","text":"import pickle\nfrom collections import OrderedDict, namedtuple\nfrom typing import *\n\nimport numpy as np\nimport torch.nn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport yaml\nfrom torchvision.models import resnet34, resnet18\nfrom torchvision.models.segmentation import deeplabv3_resnet50, deeplabv3_resnet101\nfrom torchvision.models.segmentation.deeplabv3 import DeepLabHead\nfrom efficientnet_pytorch import EfficientNet\nfrom transforms import DeNormalize\n\nfrom coord_conv import CoordConv\n\nclass LocallyConnected2d(nn.Module):\n \"\"\"Locally Connected Layer\n Adapted from https://github.com/pytorch/pytorch/issues/499#issuecomment-430212395\n \"\"\"\n def calculate_spatial_output_shape(self, input_shape, kernel_size, dilation, padding, stride):\n return [np.floor((input_shape[index] + 2 * padding[index] - dilation[index] * (kernel_size[index] - 1) - 1) /\n stride[index] + 1).astype(int) for index in range(len(input_shape))]\n \n def __init__(self, input_shape, in_channels, out_channels, kernel_size, dilation=(1,1), padding=(0,0), stride=(1,1)):\n super().__init__()\n self.kernel_size = kernel_size\n self.out_channels = out_channels\n self.dilation = dilation\n self.padding = padding\n self.stride = stride\n \n self.output_height, self.output_width = self.calculate_spatial_output_shape(input_shape, kernel_size,\n dilation, padding, stride)\n self.weight_tensor_depth = in_channels * kernel_size[0] * kernel_size[1]\n self.spatial_blocks_size = self.output_height * self.output_width\n self.weights = nn.Parameter(torch.empty((1, self.weight_tensor_depth, self.spatial_blocks_size, out_channels),\n requires_grad=True, dtype=torch.float))\n self.bias = nn.Parameter(torch.empty((1, out_channels, self.output_height, self.output_width),\n requires_grad=True, dtype=torch.float))\n \n torch.nn.init.xavier_uniform_(self.weights)\n torch.nn.init.xavier_uniform_(self.bias)\n \n def forward(self, input):\n input_unf = torch.nn.functional.unfold(input, self.kernel_size, dilation=self.dilation,\n padding=self.padding, stride=self.stride)\n local_conv_unf = (input_unf.view((*input_unf.shape, 1)) * self.weights)\n return local_conv_unf.sum(dim=1).transpose(2, 1).reshape(\n (-1, self.out_channels, self.output_height, self.output_width)) + self.bias\n\n\nclass PixelwiseDeepLabHead(DeepLabHead):\n def __init__(self, in_channels, out_channels, activation: str = None, argmax_eval=False, unnorm_meanstd_eval=None,\n inference_fn_key=None, training_only=False, logvar=False, upconv_features=False):\n super().__init__(in_channels, out_channels)\n # Store head definition information\n self.key = 'pixelwise_deeplab_head'\n self.params = {\n 'training_only': training_only,\n 'inference_fn_key': inference_fn_key,\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n 'activation': activation,\n 'argmax_eval': argmax_eval,\n 'unnorm_meanstd_eval': unnorm_meanstd_eval,\n 'upconv_features': upconv_features,\n }\n if logvar:\n self.logvar = torch.nn.Parameter(torch.tensor([0.]))\n self.training_only = training_only\n if activation is not None:\n self.activation = getattr(F, activation) # Map string to activation function\n self.argmax_eval = argmax_eval\n if unnorm_meanstd_eval is not None:\n self.denorm = DeNormalize(*unnorm_meanstd_eval)\n if upconv_features:\n self.upconv = torch.nn.ConvTranspose2d(in_channels, in_channels)\n\n def forward(self, features, input_shape=None, scale_factor=None):\n if self.params['upconv_features']:\n features = self.upconv(features)\n pred = super().forward(features)\n if input_shape is not None:\n pred = F.interpolate(pred, size=input_shape, mode='bilinear', align_corners=False)\n elif scale_factor is not None:\n pred = F.interpolate(pred, scale_factor=scale_factor, mode='bilinear', align_corners=False)\n if self.params['activation'] is not None:\n pred = self.activation(pred)\n\n if self.argmax_eval and not self.training:\n # Argmax for classification\n pred = torch.argmax(pred, dim=1)\n if self.params['unnorm_meanstd_eval'] is not None and not self.training:\n # Denormalization for regression heads\n pred = self.denorm(pred)\n\n return pred\n\nclass RegressionHead(torch.nn.Module):\n def __init__(self, in_channels, out_channels, activation: str = None, unnorm_meanstd_eval=None,\n inference_fn_key=None, training_only=False, layer_type='coord_conv'):\n super().__init__()\n # Store head definition information\n self.key = 'regression_head'\n self.params = {\n 'training_only': training_only,\n 'inference_fn_key': inference_fn_key,\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n 'activation': activation,\n 'unnorm_meanstd_eval': unnorm_meanstd_eval,\n 'layer_type': layer_type,\n }\n self.training_only = training_only\n if layer_type == 'coord_conv':\n self.net = torch.nn.Sequential(\n CoordConv(in_channels, 128, kernel_size=3),\n torch.nn.BatchNorm2d(128),\n torch.nn.AdaptiveAvgPool2d((3,3)),\n torch.nn.Flatten(),\n torch.nn.Linear(128 * 3 * 3, out_channels),\n )\n else:\n raise ValueError(f\"layer_type '{layer_type}' is not supported\")\n if activation is not None:\n self.activation = getattr(F, activation) # Map string to activation function\n if unnorm_meanstd_eval is not None:\n self.denorm = DeNormalize(*unnorm_meanstd_eval)\n\n def forward(self, features, **kwargs):\n pred = self.net(features)\n\n if self.params['activation'] is not None:\n pred = self.activation(pred)\n\n if self.params['unnorm_meanstd_eval'] is not None and not self.training:\n pred = self.denorm(pred)\n\n return pred\n\nclass GlobalDeepLabHead(torch.nn.Module):\n def __init__(self, in_channels, out_channels, activation: str = None, argmax_eval=True, unnorm_meanstd_eval=None,\n inference_fn_key=None, training_only=False):\n super().__init__()\n self.aspp = DeepLabHead(in_channels, 256)\n # Store head definition information\n self.key = 'global_deeplab_head'\n self.params = {\n 'training_only': training_only,\n 'inference_fn_key': inference_fn_key,\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n 'activation': activation,\n 'argmax_eval': argmax_eval,\n 'unnorm_meanstd_eval': unnorm_meanstd_eval,\n }\n self.logvar = torch.nn.Parameter(torch.tensor([0.]))\n self.training_only = training_only\n self.pool = torch.nn.AdaptiveAvgPool2d((1, 1))\n self.fc = torch.nn.Linear(256, out_channels)\n if activation is not None:\n self.activation = getattr(F, activation) # Map string to activation function\n self.argmax_eval = argmax_eval\n if unnorm_meanstd_eval is not None:\n self.denorm = DeNormalize(*unnorm_meanstd_eval)\n\n def forward(self, features, **kwargs):\n pred = self.aspp.forward(features)\n pred = self.pool(pred).squeeze(-1).squeeze(-1)\n pred = self.fc(pred)\n\n if self.params['activation'] is not None:\n pred = self.activation(pred)\n\n if self.argmax_eval and not self.training:\n # Argmax for classification\n pred = torch.argmax(pred, dim=1)\n if self.params['unnorm_meanstd_eval'] is not None and not self.training:\n # Denormalization for regression heads\n pred = self.denorm(pred)\n\n return pred\n\n\nclass HydraNet(torch.nn.Module):\n\n @staticmethod\n def make_from_backbone_name(backbone_name, pretrained=True, **kwargs):\n backbone = get_backbone(backbone_name=backbone_name, pretrained=pretrained)\n return HydraNet(backbone, **kwargs)\n\n def __init__(self, backbone, heads: torch.nn.ModuleDict = torch.nn.ModuleDict({}),\n return_all=False, return_tuple=False, scale_factor=None, upscale=True, upscale_feats=0):\n super().__init__()\n self.backbone = backbone\n self.n_features = backbone.n_features\n self.heads = heads\n self.return_all = return_all\n self.return_tuple = return_tuple\n self.scale_factor = scale_factor\n self.upscale = upscale\n self.upscale_feats = upscale_feats\n\n def forward(self, x, target_keys=None):\n input_shape = x.shape[-2:] if self.upscale else None\n features = self.backbone(x)\n x = features['out']\n if self.upscale_feats:\n x = F.interpolate(x, scale_factor=self.upscale_feats, mode='bilinear', align_corners=False)\n result = OrderedDict()\n keys = self.heads.keys() if target_keys is None or self.return_all else target_keys\n\n if target_keys is None and not self.training:\n keys = [key for key in keys if not self.heads[key].training_only]\n for key in keys:\n head = self.heads[key]\n if self.scale_factor is not None:\n result[key] = head(x, scale_factor=scale_factor)\n else:\n result[key] = head(x, input_shape=input_shape)\n\n if self.return_tuple:\n return tuple([val for val in result.values()])\n return result\n\n\nAVAILABLE_HEADS = {\n 'pixelwise_deeplab_head': PixelwiseDeepLabHead,\n 'global_deeplab_head': GlobalDeepLabHead\n}\n\n\ndef head_from_conf(conf):\n return AVAILABLE_HEADS[conf['key']](**conf['params'])\n\n\ndef hydranet_from_def(def_path, **kwargs):\n with open(def_path, 'r') as f:\n configs = yaml.safe_load(f)\n if 'backbone' in configs:\n backbone_name = configs['backbone']\n del configs['backbone']\n else:\n backbone_name = 'resnet50'\n backbone = get_backbone(backbone_name, pretrained=False)\n heads = torch.nn.ModuleDict({name: head_from_conf(configs[name]) for name in configs})\n return HydraNet(backbone, heads, **kwargs)\n\nclass EfficientNetWrapper(torch.nn.Module):\n def __init__(self, name, pretrained=True):\n super().__init__()\n if pretrained:\n self.model = EfficientNet.from_pretrained(name)\n else:\n self.model = EfficientNet.from_name(name)\n # test number of channels\n self.n_features = self.model.extract_features(torch.randn(2,3,64,64)).shape[1]\n \n def forward(self, x):\n return {'out': self.model.extract_features(x)}\n\ndef get_backbone(backbone_name, pretrained=True):\n if backbone_name == 'resnet50':\n backbone = deeplabv3_resnet50(pretrained=pretrained, progress=True).backbone\n backbone.n_features = 2048\n elif backbone_name == 'resnet101':\n backbone = deeplabv3_resnet101(pretrained=pretrained, progress=True).backbone\n backbone.n_features = 2048\n elif backbone_name == 'resnet34':\n model = resnet34(pretrained=pretrained)\n backbone = torch.nn.Sequential(*list(model.children())[:-2])\n backbone.n_features = 512\n elif backbone_name == 'resnet18':\n model = resnet18(pretrained=pretrained)\n backbone = torch.nn.Sequential(*list(model.children())[:-2])\n backbone.n_features = 512\n elif backbone_name.startswith('efficientnet-b'):\n backbone = EfficientNetWrapper(backbone_name, pretrained)\n else:\n raise ValueError(f'Backbone {backbone_name} is not supported. Check supported backbones in `hydranet.py:get_backbone()`')\n return backbone\n","sub_path":"hydranet.py","file_name":"hydranet.py","file_ext":"py","file_size_in_byte":12241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"32902459","text":"from torch.autograd import Variable\nfrom make_env import make_env\nfrom MADDPG_old import MADDPG\nimport numpy as np\nimport torch as th\nfrom tensorboardX import SummaryWriter\nfrom OrnsteinUhlenbeckActionNoise import OrnsteinUhlenbeckActionNoise as ou\nimport torchvision.utils as vutils\nimport time\nimport pdb\n\n\nenv = make_env('simple_speaker_listener')\nn_agents = len(env.world.agents)\ndim_obs_list = [env.observation_space[i].shape[0] for i in range(n_agents)]\ndim_act_list = [env.action_space[i].n for i in range(n_agents)]\n\ncapacity = 1000000\nbatch_size = 2 # 1024\n\nn_episode = 60000 # 25000\nmax_steps = 2 # 35\nepisodes_before_train = 1 # 50 ? Not specified in paper\nepisodes_to_break = 500\n\n# reward_record = []\n\nsnapshot_path = \"/home/jadeng/Documents/snapshot/\"\n# snapshot_path = \"/home/jadeng/Desktop/snapshot_SL/\"\nsnapshot_name = \"speaker_listener_latest_episode_\"\npath = snapshot_path + snapshot_name + '800'\n\nmaddpg = MADDPG(n_agents,\n dim_obs_list,\n dim_act_list,\n batch_size,\n capacity,\n episodes_before_train,\n load_models=None, # path\n isOU=False) # ou_noises\n\nFloatTensor = th.cuda.FloatTensor if maddpg.use_cuda else th.FloatTensor\n\nwriter = SummaryWriter()\n\nfor i_episode in range(n_episode):\n obs = env.reset()\n # obs = [obs[i] for i in range(n_agents)]\n # import pdb\n # pdb.set_trace()\n obs = np.concatenate(obs, 0)\n if isinstance(obs, np.ndarray):\n obs = th.from_numpy(obs).float() # obs in Tensor now\n total_reward = 0.0\n av_critics_grad = np.zeros((n_agents, 6))\n av_actors_grad = np.zeros((n_agents, 6))\n n = 0\n print('Start of episode', i_episode)\n print('Target landmark for agent 1: ', env.world.agents[0].goal_b.name)\n print('Target landmark color: ', env.world.agents[0].goal_b.color)\n for t in range(max_steps):\n # print(t)\n env.render()\n\n pdb.set_trace()\n # obs turns to Variable before feed into Actor\n obs = Variable(obs).type(FloatTensor)\n # print('obs', obs)\n\n action = maddpg.select_action(obs).data.cpu() # actions in Variable\n # convert action from Variable to list\n pdb.set_trace()\n action = [action[0].numpy()[:dim_act_list[0]], action[0].numpy()[dim_act_list[0]:]]\n obs_, reward, done, _ = env.step(action)\n\n action = np.concatenate(action, 0)\n action = th.from_numpy(action).float()\n # print('action', action)\n\n reward = th.FloatTensor(reward).type(FloatTensor)\n\n # obs_ = [obs_[i] for i in range(n_agents)]\n obs_ = np.concatenate(obs_, 0)\n obs_ = th.from_numpy(obs_).float() # in Tensor\n if t != max_steps - 1:\n next_obs = obs_\n else:\n next_obs = None\n '''\n if i_episode >= episodes_to_break and reward.sum() > 5.0:\n break\n '''\n total_reward += reward.sum()\n\n maddpg.memory.push(obs.data, action, next_obs, reward) # tensors\n # print('obs', obs.data)\n # print('action', action)\n # print('next_obs', next_obs)\n # print('reward', reward)\n\n obs = next_obs\n\n critics_grad, actors_grad = maddpg.update_policy()\n\n if maddpg.episode_done > maddpg.episodes_before_train:\n av_critics_grad += np.array(critics_grad)\n av_actors_grad += np.array(actors_grad)\n n += 1\n\n # time.sleep(0.05)\n\n if n != 0:\n av_critics_grad = av_critics_grad / n\n av_actors_grad = av_actors_grad / n\n\n maddpg.episode_done += 1\n mean_reward = total_reward / max_steps\n '''\n import pdb\n pdb.set_trace()\n if i_episode >= episodes_to_break and n < max_steps:\n mean_reward = total_reward / n\n else:\n mean_reward = total_reward / max_steps\n '''\n\n print('End of Episode: %d, mean_reward = %f, total_reward = %f' % (i_episode, mean_reward, total_reward))\n # reward_record.append(total_reward)\n\n # plot of reward\n writer.add_scalar('data/reward', mean_reward, i_episode)\n\n # plot of agent0 - speaker gradient of critic net\n for i in range(6):\n writer.add_scalar('data/speaker_critic_gradient', av_critics_grad[0][i], i_episode)\n\n # plot of agent0 - speaker gradient of actor net\n for i in range(6):\n writer.add_scalar('data/speaker_actor_gradient', av_actors_grad[0][i], i_episode)\n\n # plot of agent1 - listener gradient of critics net\n for i in range(6):\n writer.add_scalar('data/listener_critic_gradient', av_critics_grad[1][i], i_episode)\n\n # plot of agent0 - speaker gradient of critics net\n for i in range(6):\n writer.add_scalar('data/listener_actor_gradient', av_actors_grad[1][i], i_episode)\n\n # to save models every 200 episodes\n if i_episode != 0 and i_episode % 200 == 0:\n print('Save models!')\n states = {'critics': maddpg.critics,\n 'actors': maddpg.actors,\n 'critic_optimizer': maddpg.critic_optimizer,\n 'actor_optimizer': maddpg.actor_optimizer,\n 'critics_target': maddpg.critics_target,\n 'actors_target': maddpg.actors_target,\n 'memory': maddpg.memory,\n 'var': maddpg.var,\n 'ou_prevs': [ou_noise.x_prev for ou_noise in maddpg.ou_noises]}\n th.save(states, snapshot_path + snapshot_name + str(i_episode))\n\n# print('reward_record', reward_record)\n\nwriter.export_scalars_to_json(\"./all_scalars.json\")\nwriter.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\nprint('number of actors: ', len(maddpg.actors))\nprint('number of critics: ', len(maddpg.critics))\nprint('number of actors target: ', len(maddpg.actors_target))\nprint('number of critics target: ', len(maddpg.critics_target))\nprint('exploration rate: ', maddpg.var)\n\nfor i_episode in range(n_episode):\n obs = env.reset()\n for t in range(max_steps):\n env.render()\n agent_actions = []\n for i, agent in enumerate(env.world.agents):\n agent_action_space = env.action_space[i]\n action = agent_action_space.sample()\n action_vec = np.zeros(agent_action_space.n)\n action_vec[action] = 1\n agent_actions.append(action_vec)\n\n time.sleep(0.033)\n observation, reward, done, info = env.step(agent_actions)\n\n print(agent_actions)\n print(observation)\n print(reward)\n print(done)\n print(info)\n print()\n'''\n\n\n\n\n\n\n\n\n","sub_path":"maddpg/main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"119773793","text":"import pprint\nimport mysql.connector\nfrom urlparse import parse_qs\nimport json\n\ndef application(env, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n with open(\"employees.html\", 'r') as employees:\n html = employees.read()\n #checks for the ajax request\n if(env['REQUEST_METHOD'] == 'POST'):\n foo = env['wsgi.input'].read()\n #if the value is not empty\n if foo:\n return query(foo)\n return html.encode()\n#function called on request takes integer as parameter\ndef query(idNumber):\n\n creds = { 'user' : 'jabba',\n 'database' : 'pf',\n 'password' : 'Sl4veLe!a',\n 'auth_plugin' : 'mysql_native_password'}\n cnx = mysql.connector.connect(**creds)\n\n cursor = cnx.cursor(dictionary=True)\n\n #queries mysql server\n cursor.execute('select concat(firstName,\\' \\',lastName) as name, orders.orderNumber, orders.productCode, products.priceEach, orders.quantity, (products.priceEach*orders.quantity) as total from employees join orders on orders.employeeNumber = employees.employeeNumber join products on products.productCode = orders.productCode where employees.employeeNumber = {} order by orderNumber;'.format(int(idNumber)))\n\n\n values = cursor.fetchall()\n\n dict = []\n\n #goes through all the values returned by query\n for value in values:\n\n name = value[\"name\"]\n orderNumber = value['orderNumber']\n productCode = value[\"productCode\"]\n priceEach = float(value['priceEach'])\n quantityOrdered = value['quantity']\n #dictionary containing all the values for a certain entry\n temp = {\n \"name\":name,\n \"orderNumber\":orderNumber,\n \"productCode\":productCode,\n \"priceEach\":priceEach,\n \"quantityOrdered\":quantityOrdered,\n \"total\":total\n }\n #appending temp dictionary to the list\n dict.append(temp)\n\n #converts python list of dictionaries to json file\n jsdict = json.dumps(dict)\n\n return jsdict\n\n","sub_path":"Final Project Web Programming/emloyees.py","file_name":"emloyees.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"312619541","text":"import pandas as pd\nimport csv\nfrom pymongo import MongoClient\nimport urllib.request\n\n# Pandas\ndf = pd.read_excel('db/dog_ex.xlsx')\ndf.to_csv('db/dog_ex.csv')\n\n# CSV\ne = open('db/dog_ex.csv', 'r')\ng = csv.DictReader((e))\n\n# Mongo DB\nclient = MongoClient('localhost', 27017)\ndb = client.dbFindMyDogTest\n\n# Mongo DB 초기화\ndb.customer.delete_many({})\ndb.dog.delete_many({})\n\n# 로그인 테스트 데이터\ncustomer_data = {\n 'id': 'hufs',\n 'pw': '1234',\n 'name': '홍길동',\n 'dept': 'indi',\n }\n\ndb.customer.insert_one(customer_data)\n\n\n# 유기견 테스트 데이터\nfor i in g:\n del i['']\n # i['breed'] = '[\"테스트1\",\"테스트2\",\"테스트3\"]'\n dog_data = i\n db.dog.insert_one(dog_data)\n\n # for k, v in i.items():\n # print(k,v)\n # print(\"-----------------\")\n\nregistered_dog = db.dog.find({}, {'_id': 0})\nresult = registered_dog.count()\nprint(result)\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"575165895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 4 09:03:56 2018\n\n@author: pao\n\"\"\"\n#########################################################################\n# Grafica la pp acum elegida para cada miembro del emsable #\n#########################################################################\n\nimport time\nimport wrf\nimport numpy as np\nfrom netCDF4 import Dataset\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport glob\nfrom mpl_toolkits.axes_grid1 import AxesGrid\n\nstart_time = time.clock()\n\n##########################################################################\n\npath = \"../2017-09-27_01_00_00/anal00*\"\nwd = \"ANA\" #ANA = analsis / GUESS = guess\nvariable = \"RAINNC\"\nfigsize = (5, 4)\nnrows_ncols = (2, 5) #Grilla de figuras\ndatetime = \"2017-09-27_01_00_00\"\n###########################################################################\n\nfiles = sorted(glob.glob(path))\n\n#Abro un .nc para obtener la latitud y la longitud\nncfile = Dataset(files[0])\nlon = wrf.getvar(ncfile, \"XLONG\")\nlat = wrf.getvar(ncfile, \"XLAT\")\nllon = lon[0, 0] #Si cambia el dominio hay que cambiar esto.\nllat = lat[0, 0]\nrlon = lon[148, 98]\nrlat = lat[148, 98]\n\n#Leo los datos\n\nfiles = files[0:len(files)-1]\nalldata = np.empty((len(files), lon.shape[0], lon.shape[1]))\n\nfor f in range(len(files)):\n ncfile = Dataset(files[f])\n print(files[f])\n alldata[f,:,:] = wrf.to_np(wrf.getvar(ncfile, variable, meta = False)) + wrf.to_np(wrf.getvar(ncfile, \"RAINC\", meta = False))\n\n#Barra de colores\nlev_max = np.max(alldata)\nlev_min = 0\nif lev_max == 0:\n levels = np.around(np.linspace(lev_min, 10, 39), 2)\n ticks = np.around(np.linspace(lev_min, 10, 10), 2)\nelse:\n levels = np.around(np.linspace(lev_min, lev_max, 39), 2)\n ticks = np.around(np.linspace(lev_min, lev_max, 10), 2)\n\n#Grafico\nfig = plt.figure(figsize = figsize, dpi = 300)\ngrid = AxesGrid(fig, 111,\n nrows_ncols = nrows_ncols,\n axes_pad = 0.05,\n cbar_mode = 'single',\n cbar_location = 'bottom',\n cbar_pad = 0.05\n )\n\nfor f in range(len(files)):\n\n m = Basemap(resolution = 'i', llcrnrlon = llon, llcrnrlat = llat, urcrnrlon = rlon, urcrnrlat = rlat,\n projection = 'lcc', lat_1 = -31.847992, lat_2 = -31.848, lat_0 = -31.848, lon_0 = -61.537,\n ax = grid[f])\n x, y = m(wrf.to_np(lon), wrf.to_np(lat))\n cf = grid[f].contourf(x, y, alldata[f,:,:], levels = levels, cmap = 'Blues')\n #grid[f].contour(x, y, var, levels = levels, colors = '#f4fbd2', linewidths = 0.1)\n m.drawcoastlines(linewidth = 0.5)\n m.drawcountries(linewidth = 0.5)\n cbar = grid.cbar_axes[0].colorbar(cf, ticks = ticks, boundaries=[-5] + levels + [5], extend = 'both')\n cbar.ax.tick_params(labelsize = 8)\n for cax in grid.cbar_axes:\n cax.toggle_label(True)\n \nplt.subplots_adjust(left = 0.01, right = 0.99, bottom = 0.01, top = 0.99, wspace = 0.00001) \nfig.savefig(wd + \"_\" + datetime + \"_\" + variable + \"_miembros.pdf\")\n\nplt.close()\nprint(\"Plot variable ready!\")\nprint(time.clock() - start_time, \"seconds\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"plot_variable.py","file_name":"plot_variable.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"547212112","text":"# 各地の天気をURLで取得\nurl = \"http://weather.livedoor.com/forecast/webservice/json/v1\"\n# Sendai\nurl += \"?city=040010\"\n\n# Webから天気情報を取得する\nimport urllib.request as req\nres = req.urlopen(url)\njson_data = res.read()\n\n# JSONデータをPythonの辞書型に変換\nimport json\ndata = json.loads(json_data)\n\nfor row in data[\"forecasts\"]:\n label = row[\"dateLabel\"]\n telop = row[\"telop\"]\n print(label + \": \" + telop)\n","sub_path":"Python-entry/05/showWeather.py","file_name":"showWeather.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"571386584","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom mock import MagicMock, patch\nfrom openprocurement.bridge.basic.storages.redis_plugin import redis_includeme, lazy_includeme\n\n\nclass TestDbs(unittest.TestCase):\n\n def setUp(self):\n self.config = {\n 'storage_config': {\n 'cache_host': '127.0.0.1',\n 'cache_port': '6379',\n 'cache_db_name': '0'\n }\n }\n with patch('openprocurement.bridge.basic.storages.redis_plugin.redis') as mocked_redis:\n StrictRedis_mock = MagicMock()\n StrictRedis_mock.configure_mock(**{'set': None, 'exists': None})\n mocked_redis.StrictRedis.return_value = StrictRedis_mock\n\n self.db = redis_includeme(self.config)\n self.db.db = dict()\n\n def set_value(key, value):\n self.db.db[key] = value\n\n self.db.set_value = set_value\n self.db.has_value = lambda x: x in self.db.db\n\n @patch('openprocurement.bridge.basic.storages.redis_plugin.redis')\n def test_redis_includeme(self, mocked_redis):\n config = {\n 'storage_config': {\n 'cache_host': '127.0.0.1',\n 'cache_port': '6379',\n 'cache_db_name': '0'\n }\n }\n StrictRedis_mock = MagicMock()\n StrictRedis_mock.configure_mock(**{'set': None, 'exists': None})\n mocked_redis.StrictRedis.return_value = StrictRedis_mock\n\n db = redis_includeme(config)\n\n self.assertEqual(db._backend, 'redis')\n self.assertEqual(db._db_name, config['storage_config']['cache_db_name'])\n self.assertEqual(db._port, config['storage_config']['cache_port'])\n self.assertEqual(db._host, config['storage_config']['cache_host'])\n self.assertEqual(db._host, config['storage_config']['cache_host'])\n self.assertEqual(db.set_value, None)\n self.assertEqual(db.has_value, None)\n\n @patch('openprocurement.bridge.basic.storages.redis_plugin.Db')\n def test_cache_host_in_config(self, mocked_db):\n db = lazy_includeme(self.config)\n\n self.assertEqual(db._backend, 'lazydb')\n self.assertEqual(db._db_name, self.config['storage_config']['cache_db_name'])\n\n def test_get(self):\n self.assertEquals(self.db.get('test'), None)\n self.db.set_value('test', 'test')\n self.assertEquals(self.db.get('test'), 'test')\n\n def test_put(self):\n self.db.put('test_put', 'test_put')\n self.assertEquals(self.db.get('test_put'), 'test_put')\n\n def test_has(self):\n self.assertEquals(self.db.has('test_has'), False)\n self.db.set_value('test_has', 'test_has')\n self.assertEquals(self.db.has('test_has'), True)","sub_path":"openprocurement/bridge/basic/tests/test_redis_storage.py","file_name":"test_redis_storage.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"448246726","text":"from django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import EmailMessage\n\n\ndef send_mail(subject, message, from_email=None, to=[], cc=[], bcc=[], reply_to=None, attachments=[]):\n site = Site.objects.get_current()\n subject = '[{}] '.format(site.name) + subject\n\n if from_email is None:\n from_email = settings.DEFAULT_FROM_EMAIL\n\n mail = EmailMessage(subject, message, from_email, to=to, cc=cc, bcc=bcc, reply_to=reply_to, attachments=attachments)\n mail.send()\n","sub_path":"rdmo/core/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"54464378","text":"from django import forms\nfrom django.forms.formsets import formset_factory\nfrom django.contrib.auth import get_user_model\nimport models\nfrom tinymce.widgets import TinyMCE\n\nUser = get_user_model()\n\nclass CompetitionForm(forms.ModelForm):\n class Meta:\n model = models.Competition\n fields = ('title', 'description', 'force_submission_to_leaderboard', 'image', 'has_registration', 'end_date', 'published')\n widgets = { 'description' : TinyMCE(attrs={'rows' : 20, 'class' : 'competition-editor-description'},\n mce_attrs={\"theme\" : \"advanced\", \"cleanup_on_startup\" : True, \"theme_advanced_toolbar_location\" : \"top\", \"gecko_spellcheck\" : True})}\n\nclass CompetitionPhaseForm(forms.ModelForm):\n class Meta:\n model = models.CompetitionPhase\n fields = ('phasenumber', 'label', 'start_date', 'max_submissions', 'is_scoring_only', 'input_data', 'auto_migration', 'scoring_program', 'reference_data', 'leaderboard_management_mode')\n widgets = { 'leaderboard_management_mode' : forms.Select(attrs={'class': 'competition-editor-phase-leaderboard-mode'}, choices=(('default', 'Default'), ('hide_results', 'Hide Results'))),\n 'DELETE' : forms.HiddenInput, 'phasenumber': forms.HiddenInput }\n\nclass PageForm(forms.ModelForm):\n class Meta:\n model = models.Page\n fields = ('category', 'rank', 'label', 'html', 'container')\n widgets = { 'html' : TinyMCE(attrs={'rows' : 20, 'class' : 'competition-editor-page-html'},\n mce_attrs={\"theme\" : \"advanced\", \"cleanup_on_startup\" : True, \"theme_advanced_toolbar_location\" : \"top\", \"gecko_spellcheck\" : True}),\n 'DELETE' : forms.HiddenInput, 'container' : forms.HiddenInput}\n\nclass CompetitionDatasetForm(forms.ModelForm):\n class Meta:\n model = models.Dataset\n\nclass CompetitionParticipantForm(forms.ModelForm):\n class Meta:\n model = models.CompetitionParticipant\n","sub_path":"codalab/apps/web/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"85181685","text":"\"\"\"\nWrite a function called dict_invert that takes in a dictionary with immutable values and returns the inverse\nof the dictionary. The inverse of a dictionary d is another dictionary whose keys are the unique dictionary\nvalues in d. The value for a key in the inverse dictionary is a sorted list of all keys in d that have the\nsame value in d.\n\nHere are some examples:\n\nIf d = {1:10, 2:20, 3:30} then dict_invert(d) returns {10: [1], 20: [2], 30: [3]}\nIf d = {1:10, 2:20, 3:30, 4:30} then dict_invert(d) returns {10: [1], 20: [2], 30: [3, 4]}\nIf d = {4:True, 2:True, 0:True} then dict_invert(d) returns {True: [0, 2, 4]}\n\"\"\"\n\n\ndef dict_invert(d):\n new_dict = {}\n for x in d:\n if d[x] not in new_dict:\n new_dict.update({d[x]: [x]})\n else:\n prev = new_dict[d[x]]\n prev.append(x)\n prev.sort()\n new_dict.update({d[x]: prev})\n\n return new_dict\n\n\nassert({} == dict_invert({}))\nassert({1: [1]} == dict_invert({1: 1}))\nassert({3: [1], 4: [2]} == dict_invert({1: 3, 2: 4}))\nassert({1: [1, 2]} == dict_invert({1: 1, 2: 1}))\nassert({10: [2], 30: [600, 30000]} == dict_invert({30000: 30, 600: 30, 2: 10}))","sub_path":"Python/EdX/dict_invert.py","file_name":"dict_invert.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"216319176","text":"# -*- coding: utf-8 -*-\n\n\nimport constants as FLUXES\n\n\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy import (Table, Column, Integer, String, DateTime, BigInteger,\n Numeric, SmallInteger, Date)\n\n\nengine = create_engine(FLUXES.DB_FLUXES_URI + '?charset=utf8',\n encoding='utf-8',\n convert_unicode=True,\n pool_recycle=FLUXES.DB_POOL_RECYCLE,\n pool_size=FLUXES.DB_POOL_SIZE,\n echo=FLUXES.DB_ECHO)\n\n\nmeta = MetaData(bind=engine)\n\n\ncard_table = Table('card', meta,\n Column('id', Integer, primary_key=True),\n Column('iccid', String(20), nullable=False, unique=True),\n Column('mobile', String(18), nullable=False),\n Column('real_name', String(20)),\n Column('identity_card', String(20)),\n Column('primary_agent', Integer, index=True, default=0),\n Column('secondary_agent', Integer, index=True, default=0),\n Column('primary_at', DateTime),\n Column('secondary_at', DateTime),\n Column('monthly_amount', Numeric(12, 2), nullable=False),\n Column('total_amount', Numeric(12, 2), nullable=False),\n Column('alias', String(100)),\n Column('primary_alias', String(100)),\n Column('secondary_alias', String(100)),\n Column('remark', String(200)),\n Column('primary_remark', String(200)),\n Column('secondary_remark', String(200)),\n Column('created_at', DateTime, nullable=False),\n Column('updated_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_card_table():\n return card_table\n\n\nopen_card_table = Table('open_card', meta,\n Column('id', Integer, primary_key=True),\n Column('user_id', BigInteger, nullable=False,\n index=True),\n Column('iccid', String(20), nullable=False),\n Column('mobile', String(18), nullable=False),\n Column('created_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_open_card_table():\n return open_card_table\n\n\ndistribute_card_table = Table('distribute_card', meta,\n Column('id', Integer, primary_key=True),\n Column('user_id', BigInteger, nullable=False,\n index=True),\n Column('to_id', BigInteger, nullable=False),\n Column('iccid', String(20), nullable=False),\n Column('mobile', String(18), nullable=False),\n Column('created_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_distribute_card_table():\n return distribute_card_table\n\n\ncard_info_table = Table('card_info', meta,\n Column('id', Integer, primary_key=True),\n Column('number', Integer, nullable=False),\n Column(\"flux\", Numeric(12, 2), nullable=False),\n Column('price', Numeric(12, 2), nullable=False),\n Column('effected_at', DateTime, nullable=False),\n Column('created_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_card_info_table():\n '''\n 存储卡套餐、流量等信息\n '''\n return card_info_table\n\n\nmeta_table = Table('meta', meta,\n Column('id', Integer, primary_key=True),\n Column('key', String(200), nullable=False, index=True),\n Column('value', String(300), nullable=False),\n Column('type', SmallInteger, nullable=False),\n Column('created_at', DateTime, nullable=False),\n Column('updated_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_meta_table():\n return meta_table\n\n\nuser_amount_table = Table('user_amount', meta,\n Column('id', Integer, primary_key=True),\n Column('user_id', Integer, nullable=False,\n index=True),\n Column('role', SmallInteger, nullable=False),\n Column('amount', Numeric(12, 2), nullable=False),\n Column('profit', Numeric(12, 2), nullable=False,\n default=0),\n Column('date', Date, nullable=False),\n Column('updated_at', DateTime, nullable=False),\n mysql_engine='InnoDB')\n\n\ndef get_user_amount_table():\n return user_amount_table\n\n\ndef exec_query(sql, islist=False):\n conn = engine.connect()\n try:\n ret = []\n for one in conn.execute(sql).fetchall():\n ret.append(dict(one.items()))\n if not islist:\n return ret if len(ret) != 1 else ret[0]\n return ret\n except:\n raise\n finally:\n conn.close()\n\n\ndef exec_change(*args):\n conn = engine.connect()\n trans = conn.begin()\n try:\n ret = []\n for sql in args:\n ret.append(conn.execute(sql))\n\n trans.commit()\n return ret if len(ret) != 1 else ret[0]\n except:\n trans.rollback()\n raise\n finally:\n conn.close()\n\n\ndef create_fluxes_all():\n meta.create_all(bind=engine, checkfirst=True)\n\n\ndef drop_fluxes_all():\n meta.drop_all(bind=engine, checkfirst=True)\n","sub_path":"smarttree/admin/admin/fluxes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"474045996","text":"import h5py\nimport numpy as np\n\nimport nengo\nimport nengo_spa as spa\n\nfrom nengo_learn_assoc_mem.utils import numpy_bytes_to_str, norm_spa_vecs\n\n\nwith h5py.File(\"data/meg_ia_full_shuffled.h5py\", \"r\") as fi:\n print(list(fi.keys()))\n inp = list(np.array(fi['input']))\n cor = list(np.array(fi['correct']))\n\n fan1 = numpy_bytes_to_str(fi['fan1'])\n fan2 = numpy_bytes_to_str(fi['fan2'])\n foil1 = numpy_bytes_to_str(fi['foil1'])\n foil2 = numpy_bytes_to_str(fi['foil2'])\n\n v_strs = numpy_bytes_to_str(fi['vocab_strings'])\n v_vecs = list(fi['vocab_vectors'])\n D = fi['vocab_vectors'].attrs['dimensions']\n\n accum = list(np.array(fi['clean_accum']))\n\n dt = fi['t_range'].attrs['dt']\n t_range = np.arange(fi['t_range'][0], fi['t_range'][1], dt)\n t_pause = fi['t_range'].attrs['t_pause']\n t_present = fi['t_range'].attrs['t_present']\n\nvocab = spa.Vocabulary(D)\nfor val, vec in zip(v_strs, v_vecs):\n vocab.add(val, vec)\n\nfan1_pair_vecs = norm_spa_vecs(vocab, fan1)\nfan2_pair_vecs = norm_spa_vecs(vocab, fan2)\nfoil1_pair_vecs = norm_spa_vecs(vocab, foil1)\nfoil2_pair_vecs = norm_spa_vecs(vocab, foil2)\n\npair_vecs = np.array(fan1_pair_vecs + fan2_pair_vecs)\n\nn_neurons = 1000\nseed = 8\n\nwith nengo.Network(seed=seed) as model:\n in_nd = nengo.Node(lambda t: inp[int(t/dt)])\n accum_out = nengo.Node(lambda t: accum[int(t/dt)])\n correct = nengo.Node(lambda t: cor[int(t/dt)])\n\n err_nd = nengo.Node(lambda t, x: x[0] - x[1], size_in=2)\n output = nengo.Node(size_in=1)\n\n cmp = nengo.Ensemble(1000, 2*D)\n\n nengo.Connection(accum_out, cmp[D:],\n transform=pair_vecs.T, synapse=None)\n nengo.Connection(in_nd, cmp[:D], synapse=None)\n conn_out = nengo.Connection(cmp, output,\n transform=np.zeros((1, 2*D)),\n learning_rule_type=nengo.PES(1e-5))\n nengo.Connection(err_nd, conn_out.learning_rule, synapse=None)\n nengo.Connection(output, err_nd[0])\n nengo.Connection(correct, err_nd[1])\n\n p_err = nengo.Probe(err_nd, synapse=0.01)\n p_clean_out = nengo.Probe(output, synapse=0.01)\n p_dec = nengo.Probe(conn_out, 'weights', sample_every=t_present+t_pause)\n\nwith nengo.Simulator(model) as sim:\n sim.run(t_range[-1])\n\n\nfi_name = \"learn_dot\"\nwith h5py.File(f\"data/{fi_name}.h5py\", \"w\") as out_fi:\n out_fi.create_dataset(\"clean_out\", data=sim.data[p_clean_out])\n out_fi.create_dataset(\"err\", data=sim.data[p_err])\n out_fi.create_dataset(\"dec\", data=sim.data[p_dec])\n","sub_path":"prototypes/learn_dot_react.py","file_name":"learn_dot_react.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"514188713","text":"import pandas as pd\nimport numpy as np\n\npd.options.display.width = 0\npd.options.mode.chained_assignment = None\n\n\ndef update_financial_results(proforma_old, npv_old, discount_rate=0.1, growth_rate=0.03):\n proforma_csv = proforma_old.copy(deep=True)\n proforma_csv['project_year'] = np.arange(0, proforma_csv.shape[0])\n\n # Check if there is a user constraint column: if so, fix it\n if \"User Constraints Value\" in proforma_csv.columns:\n proforma_csv[\"User Constraints Value\"] = proforma_csv['project_year']. \\\n apply(lambda i: max(proforma_csv[\"User Constraints Value\"]) * (1 + growth_rate) ** (i - 1))\n proforma_csv[\"User Constraints Value\"][0] = 0\n proforma_csv[\"Yearly Net Value\"] = proforma_csv.iloc[:, 1:-2].sum(axis=1)\n\n # Calculate NPV for each column\n npv_csv = npv_old.copy(deep=True)\n npv_values = {}\n for col_name in proforma_csv.columns[1:-1]:\n npv_values[col_name] = np.npv(discount_rate, proforma_csv[col_name])\n if col_name != \"Yearly Net Value\":\n npv_csv[col_name] = npv_values[col_name]\n else:\n npv_csv[\"Lifetime Present Value\"] = npv_values[col_name]\n\n return proforma_csv, npv_csv\n\n\ndef new_financial_results(proforma_old, npv_old, params_path, ra_constraint=True):\n params = pd.read_csv(params_path)\n\n discount_rate = float(params.loc[(params['Key'] == \"npv_discount_rate\") &\n (params['Tag'] == \"Finance\"), 'Value'].values[0])/100\n growth_rate = float(params.loc[(params['Key'] == \"inflation_rate\") &\n (params['Tag'] == \"Finance\"), 'Value'].values[0])/100\n start_yr = float(params.loc[(params['Key'] == \"start_year\") &\n (params['Tag'] == \"Scenario\"), 'Value'].values[0])\n end_yr = float(params.loc[(params['Key'] == \"end_year\") &\n (params['Tag'] == \"Scenario\"), 'Value'].values[0])\n om_cost = float(params.loc[(params['Key'] == \"fixedOM\") &\n (params['Tag'] == \"Battery\"), 'Value'].values[0])\n ccost = float(params.loc[(params['Key'] == \"ccost_kwh\") &\n (params['Tag'] == \"Battery\"), 'Value'].values[0])\n monthly_data_file = str(params.loc[(params['Key'] == \"monthly_data_filename\") &\n (params['Tag'] == \"Scenario\"), 'Value'].values[0])\n batt_kw = float(params.loc[(params['Key'] == \"ch_max_rated\") &\n (params['Tag'] == \"Battery\"), 'Value'].values[0])\n batt_kwh = float(params.loc[(params['Key'] == \"ene_max_rated\") &\n (params['Tag'] == \"Battery\"), 'Value'].values[0])\n lifetime = int(end_yr - start_yr + 1)\n\n # get RA price\n monthly_data = pd.read_csv(monthly_data_file)\n ra_value = max(monthly_data['RA Capacity Price ($/kW)'])\n\n # copy over proforma, make it the correct size\n proforma_old.rename(columns={'Unnamed: 0': 'Year'}, inplace=True)\n proforma_old.iloc[0, 0] = np.NaN\n proforma_old[\"Year\"] = pd.to_numeric(proforma_old[\"Year\"], errors='coerce')\n if lifetime == proforma_old.shape[0] + 1:\n proforma_csv = proforma_old.copy(deep=True)\n proforma_csv['project_year'] = np.arange(0, proforma_csv.shape[0])\n elif lifetime < proforma_old.shape[0 + 1]:\n # subtract rows\n proforma_csv = proforma_old.copy(deep=True).iloc[0:(lifetime + 1), :]\n proforma_csv['project_year'] = np.arange(0, proforma_csv.shape[0])\n else:\n # add rows, propagate existing values for year\n proforma_csv = proforma_old.copy(deep=True).reindex(np.arange(0, lifetime + 1))\n proforma_csv['project_year'] = np.arange(0, proforma_csv.shape[0])\n proforma_csv.iloc[1:, 0] = proforma_csv['project_year']. \\\n apply(lambda i: str(int(proforma_csv.iloc[1:, 0].min()) - 1 + int(i)))\n\n # Check if there is a user constraint column: if so, fix it\n if \"User Constraints Value\" in proforma_csv.columns:\n ra_value = float(params.loc[(params['Key'] == \"price\") &\n (params['Tag'] == \"User\"), 'Value'].values[0])\n proforma_csv[\"User Constraints Value\"] = proforma_csv['project_year']. \\\n apply(lambda i: (ra_value * 12 * batt_kw) * (1 + growth_rate) ** (i - 1))\n proforma_csv[\"User Constraints Value\"][0] = 0\n\n\n # change capital cost\n ind1 = proforma_csv.columns.str.contains(\"Capital Cost\")\n ind2 = np.logical_not(proforma_csv.columns.str.contains(\"Site Load\"))\n ind3 = ind1 & ind2\n proforma_csv.loc[0, ind3] = ccost * batt_kwh * -1\n\n # change om cost\n ind1 = proforma_csv.columns.str.contains(\"Fixed O&M Cost\")\n ind2 = np.logical_not(proforma_csv.columns.str.contains(\"Site Load\"))\n ind3 = ind1 & ind2\n proforma_csv.loc[1, ind3] = om_cost * batt_kw * -1\n\n # change RA value\n if \"Resource AdequacyCapacity Payment\" in proforma_csv.columns:\n ind1 = proforma_csv.columns.str.contains(\"Resource Adequacy\")\n proforma_csv.loc[1, ind1] = ra_value * batt_kw * 12\n elif ra_constraint & (\"User Constraints Value\" in proforma_csv.columns):\n ind1 = proforma_csv.columns.str.contains(\"User Constraints\")\n proforma_csv.loc[1, ind1] = ra_value * batt_kw * 12\n\n # extend all rows (except first and last two) accordingly\n for c in proforma_csv.columns[1:-2]:\n proforma_csv.loc[1:,c] = proforma_csv.loc[1:,\"project_year\"]. \\\n apply(lambda i: proforma_csv.loc[1, c] * (1 + growth_rate) ** (i - 1))\n\n # update yearly value column\n proforma_csv[\"Yearly Net Value\"] = proforma_csv.iloc[:, 1:-2].sum(axis=1)\n\n # Calculate NPV for each column\n npv_csv = npv_old.copy(deep=True)\n npv_values = {}\n for col_name in proforma_csv.columns[1:-1]:\n npv_values[col_name] = np.npv(discount_rate, proforma_csv[col_name])\n if col_name != \"Yearly Net Value\":\n npv_csv[col_name] = npv_values[col_name]\n else:\n npv_csv[\"Lifetime Present Value\"] = npv_values[col_name]\n\n # Payback period calculations\n cost = proforma_csv.iloc[0,1:-2].sum(axis=0)\n yearly_revenue = proforma_csv.iloc[1,1:-2].sum(axis=0)\n d = {'Payback_Years': [cost/yearly_revenue]}\n payback_csv = pd.DataFrame(data=d)\n\n return proforma_csv, npv_csv, payback_csv\n","sub_path":"proforma_update.py","file_name":"proforma_update.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"96461386","text":"from data_elements.element import Element\nfrom utils.config import *\n\n\nclass Segment:\n\n def __init__(self, content):\n self.id = None\n self.data_elements = []\n self.content = content\n self.description = ''\n self.element_delimiter = DocumentSettings.element_delimiter\n self.segment_delimiter = DocumentSettings.segment_delimiter\n\n def to_dict(self):\n return {'segment': self.id.content,\n 'description': self.description,\n 'data_elements': [element.content for element in self.data_elements]}\n","sub_path":"data_elements/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366537736","text":"from __future__ import division\nfrom collections import Counter, defaultdict\nimport codecs\nfrom math import log\nimport string\n\ndef log2(n):\n return log(n, 2)\n\ndef build_unigrams(wordlist):\n unigrams = Counter(wordlist)\n for word in unigrams:\n unigrams[word]/=len(wordlist)\n return unigrams\n\ndef entropy(dist):\n return 0 - sum(map(lambda prob: prob*log2(prob), dist.values()))\n\nclass CorpusStats:\n def __init__(self, rawtext):\n self.rawtext = rawtext\n self.chartypes = set(self.rawtext).difference(set(string.whitespace))\n \n self.tokens = self.rawtext.split()\n self.numtokens = len(self.tokens)\n self.types = set(self.tokens)\n self.numtypes = len(self.types)\n \n def count_wordlengths(self):\n \"\"\"Count word length distribution over types and tokens (sec 4.1, figure 3)\"\"\"\n tokenlens = map(lambda word: len(word), self.tokens)\n tokenlens_dist = Counter(tokenlens)\n for length in tokenlens_dist:\n tokenlens_dist[length]/=self.numtokens \n \n typelens = map(lambda word: len(word), self.types)\n typelens_dist = Counter(typelens)\n for length in typelens_dist:\n typelens_dist[length]/=self.numtypes\n return tokenlens_dist, typelens_dist\n\n def compute_unigram_entropy(self):\n \"\"\"Shannon entropy of unigram distribution (sec 4.1, table 1)\"\"\"\n return entropy(build_unigrams(self.tokens))\n\n def get_bestsub(self):\n \"\"\"for each character, find alternate character d such that subsituting d for c in the whole corpus produces max reduction in word entropy (sec 3.2)\"\"\"\n pairwise_entdec = {}\n chartypelist = list(self.chartypes)\n for ci, c in enumerate(chartypelist):\n for d in chartypelist[ci+1:]:\n wordlist = filter(lambda word: c in word or d in word, self.tokens)\n myent = entropy(build_unigrams(wordlist))\n pairwise_entdec[(c, d)] = myent - entropy(build_unigrams(map(lambda word: word.replace(c, d), wordlist)))\n bestsub = defaultdict(lambda : ('', 0))\n for (c, d), score in pairwise_entdec.items():\n if score > bestsub[c][1]:\n bestsub[c] = (d, score)\n if score > bestsub[d][1]:\n bestsub[d] = (c, score)\n return bestsub\n\nif __name__=='__main__':\n voynichb = CorpusStats(codecs.open('data/voy.b.paged.wds', 'r', 'utf8').read())\n","sub_path":"basic_stats.py","file_name":"basic_stats.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"185916493","text":"\n# Assignment 7\n# UCID: JA573\n\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom keras.utils import np_utils, generic_utils, to_categorical\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.vgg16 import VGG16 \nfrom keras.models import Model\nimport numpy as np\nimport keras\nimport sys\n\nbatch_size = 16\nnb_classes = 2\nnb_epoch = 1\n\nimg_channels = 3\nimg_rows = 100\nimg_cols = 100\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntrain_generator = train_datagen.flow_from_directory(\n sys.argv[1],\n target_size=(100, 100),\n color_mode='rgb',\n batch_size=16,\n class_mode='categorical')\n\n# test_datagen = ImageDataGenerator(rescale=1./255)\n\n# validation_generator = test_datagen.flow_from_directory(\n# \"chest_xray/test\",\n# target_size=(100, 100),\n# batch_size=16,\n# \tcolor_mode = 'rgb',\n# class_mode='categorical')\n\nb_model = VGG16(weights = 'imagenet', include_top = False, input_shape = (100,100,3))\nop_layer = b_model.output\nf_layer = Flatten()(op_layer)\nd_layer = Dense(2)(f_layer)\na_layer = Activation('softmax')(d_layer)\nmodel = Model(inputs = b_model.input, output = a_layer)\n\nmodel = load_model(sys.argv[2])\n\nfor layer in model.layers[:18]:\n\tlayer.trainable = False\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adamax',\n metrics=['accuracy'])\n\nmodel.summary()\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=200,\n epochs=1)\n\nmodel.save(sys.argv[2])\n\nprint(\"Saved model to disk\")\n","sub_path":"Assignment8/chestDataset/chest_train.py","file_name":"chest_train.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"653756083","text":"# coding:iso-8859-9 Türkçe\r\n# p_30712b.py: İngilteredeki üniversitelere öğrenci kayıtları örneği.\r\n\r\nimport random\r\nimport numpy as np\r\nfrom collections import Counter\r\n\r\ndef kaçıncıArada (değerim, bölümler, uçlar_1Mi=True):\r\n for i in range (0, len (bölümler)):\r\n if değerim < bölümler[i]: return i-1 if uçlar_1Mi else i\r\n return -1 if uçlar_1Mi else len (bölümler)\r\n\r\ndef ağırlıklıTercih (ibareler, gelmeAğırlıkları, kriptoluMu=True):\r\n if kriptoluMu: x = random.SystemRandom().random()\r\n else: x = np.random.random()\r\n gelmeYüzdeleriToplamı = [0] + list (np.cumsum (gelmeAğırlıkları))\r\n endeks = kaçıncıArada (x, gelmeYüzdeleriToplamı)\r\n return ibareler[endeks]\r\n\r\ndef veridosyasınıİşle (dosyaAdı):\r\n üniversiteler = []\r\n kayıtlılar = []\r\n with open (dosyaAdı) as dosya:\r\n toplamKayıtlı = 0\r\n dosya.readline()\r\n for satır in dosya:\r\n üniversite, kayıtlı = satır.split (\", \")\r\n üniversite = üniversite[1:-1]\r\n kayıtlı = eval (kayıtlı)\r\n kayıtlılar.append (kayıtlı)\r\n üniversiteler.append (üniversite)\r\n toplamKayıtlı += kayıtlı\r\n return (üniversiteler, kayıtlılar, toplamKayıtlı)\r\n\r\n\r\nüniversiteler, kayıtlılar, toplamÖğrenci = veridosyasınıİşle (\"p_30712bx.txt\")\r\nsayı = len (üniversiteler)\r\nprint (\"İngilteredeki toplam \", sayı, \" üniversite ve kayıtlı öğrenci sayıları dökümü:\",\r\n \"\\n\", \"-\"*70, sep=\"\")\r\nfor i in range (sayı):\r\n print ((i+1), \") \", üniversiteler[i], sep=\"\", end=\": \")\r\n print (kayıtlılar[i])\r\nprint (\"-\"*70, \"\\nTüm üniversitelerdeki toplam öğrenci sayısı: \", toplamÖğrenci, sep=\"\")\r\n\r\noranlaştırılanKayıtlar = [kayıtlı / toplamÖğrenci for kayıtlı in kayıtlılar]\r\n# Hayali bir gelişigüzel ağırlıklı tercih kaydı yapalım:\r\nprint (\"\\nAğırlıklı tercih yöntemiyle tesadüfi bir üniversite seçimi: \", ağırlıklıTercih (üniversiteler, oranlaştırılanKayıtlar), sep=\"\")\r\n\r\ntry: toplam = abs (int (input (\"\\nToplam kaç farazi öğrenciyi tüm üniversitelere ağırlıklı kaydedelim [100 000]? \")))\r\nexcept: toplam = 100000\r\n\r\norantılıKayıtlar = []\r\nfor i in range (toplam): orantılıKayıtlar.append (ağırlıklıTercih (üniversiteler, oranlaştırılanKayıtlar) )\r\n#orantılıKayıtlar.sort() # Üniversite adlarını a->z sıralar...\r\nsay = Counter (orantılıKayıtlar)\r\n\r\nprint (\"\\nİngilteredeki toplam \", sayı, \" üniversiteye \", toplam, \" adet öğrenci dağıtımı:\", \"\\n\", \"-\"*66, sep=\"\")\r\ni = 1\r\nfor a in say:\r\n print (i, \") \", a, \": \", say[a], sep=\"\")\r\n i +=1\r\n\r\n\r\n\r\n\"\"\"Çıktı:\r\n>python p_30712b.py\r\nİngilteredeki toplam 20 üniversite ve kayıtlı öğrenci sayıları dökümü:\r\n----------------------------------------------------------------------\r\n1) Open University in England: 123490\r\n2) University of Manchester: 37925\r\n3) University of Nottingham: 33270\r\n4) Sheffield Hallam University: 33100\r\n5) University of Birmingham: 32335\r\n6) Manchester Metropolitan University: 32160\r\n7) University of Leeds: 30975\r\n8) Cardiff University: 30180\r\n9) University of South Wales: 29195\r\n10) University College London: 28430\r\n11) King's College London: 27645\r\n12) University of Edinburgh: 27625\r\n13) Northumbria University: 27565\r\n14) University of Glasgow: 27390\r\n15) University of Plymouth: 27203\r\n16) Coventry University: 27002\r\n17) University of the West of England: 26734\r\n18) University of Central Lancashire: 26265\r\n19) Nottingham Trent University: 26221\r\n20) University of Sheffield: 25908\r\n----------------------------------------------------------------------\r\nTüm üniversitelerdeki toplam öğrenci sayısı: 680618\r\n\r\nAğırlıklı tercih yöntemiyle tesadüfi bir üniversite seçimi: Nottingham Trent University\r\n\r\nToplam kaç farazi öğrenciyi tüm üniversitelere ağırlıklı kaydedelim [100 000]?\r\n\r\nİngilteredeki toplam 20 üniversiteye 100000 adet öğrenci dağıtımı:\r\n------------------------------------------------------------------\r\n1) King's College London: 4111\r\n2) Cardiff University: 4362\r\n3) University of the West of England: 3956\r\n4) University of South Wales: 4295\r\n5) Sheffield Hallam University: 4941\r\n6) University of Sheffield: 3712\r\n7) University of Manchester: 5637\r\n8) University College London: 4261\r\n9) Open University in England: 17981\r\n10) University of Plymouth: 3955\r\n11) Northumbria University: 4045\r\n12) University of Birmingham: 4728\r\n13) University of Central Lancashire: 3810\r\n14) University of Edinburgh: 4018\r\n15) University of Nottingham: 5009\r\n16) University of Glasgow: 3972\r\n17) Manchester Metropolitan University: 4739\r\n18) Coventry University: 3981\r\n19) Nottingham Trent University: 3889\r\n20) University of Leeds: 4598\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_30712b.py","file_name":"p_30712b.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"565438387","text":"import cv2\nimport pytesseract\nimport numpy as np\n#https://github.com/spmallick/learnopencv/blob/master/BlobDetector/blob.py\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport multiprocessing as mp\n# g e a r g e...\n\n# def f(x):\n# while 1:\n# pass # infinite loop\n\n# import multiprocessing as mp\n# n_cores = mp.cpu_count()\n# with mp.Pool(n_cores) as p:\n# p.map(f, range(n_cores))\n\ndef image_rot(img):\n rows,cols=img.shape\n i=0\n angle=0\n for angle in range (0,360,90):\n M=cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n dst = cv2.warpAffine(img,M,(cols,rows))\n text=detect_test(dst)\n if (text == \"G\") :\n exit()\n cv2.imshow(\"rot\",dst)\n print(\"text\",text)\n #print(\"angle\",angle)\n\n \n\ndef save_to_file(img):\n d+=1\n filename=\"/home/kiagkons/Documents/Eagles/Sdu_Eagles_Electronics/Detection/letters/im_%d.jpg\"%d\n cv2.imwrite(filename,sharpened)\n print(\"done\",d)\n\ndef detect_test(img):\n config = ('-l eng --oem 1 --psm 10')\n text = pytesseract.image_to_string(img, config=config)\n return text\n\n\nwidth=640\nheight=480\ncap = cv2.VideoCapture('samplevideo.mp4')\np=0\nprint(\"1\")\n# while True :\nwhile cap.isOpened():\n ret, frame = cap.read(300)\n frame=cv2.resize(frame, (width, height), fx=0, fy=0, interpolation=cv2.INTER_NEAREST)\n #frame=cv2.resize(frame,(width,height))\n #image=frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # cv2.imshow('frame',frame)\n #img= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p=p+1\n print('frame no.',p)\n\n\n # Simple blob detector\n params = cv2.SimpleBlobDetector_Params()\n\n # set threshold\n #for 640*480 min 10 max 200\n params.minThreshold = 10\n params.maxThreshold = 200\n\n # Area filtering\n #640*480 75,250\n params.filterByArea = True\n params.minArea = 100\n params.maxArea = 500\n\n\n params.filterByCircularity = False\n params.minCircularity = 0.75\n\n params.filterByConvexity = True\n params.filterByInertia = False\n detector = cv2.SimpleBlobDetector_create(params)\n keypoints = detector.detect(frame)\n\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures\n # the size of the circle corresponds to the size of blob\n\n im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (125,0,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n #print(\"test\", keypoints)\n # Show blobs\n\n cv2.imshow(\"Keypoints\", im_with_keypoints)\n print(\"frame no.=\",p)\n key = cv2.waitKey(75) & 0xFF\n if key == ord(\"q\"):\n break\n d=0\n for k in keypoints:\n d+=1\n\n (x,y) = k.pt\n x = int(round(x))\n y = int(round(y))\n s=k.size\n s=int(round(s))\n a=int(round(x+(s/2))+4)\n b=int(round(y+(s/2))+4)\n c = int(round(x - (s / 2))-2)\n d = int(round(y - (s / 2))-2)\n #rect1=x+4-s\n #rect2=y+4-s\n #rect3=int(round(2*s-8))\n #rect4=int(round(2*s-8))\n cv2.rectangle(frame,(a,b), (c,d), (0,0,0),3)\n #cv2.imshow(\"with frame\", frame)\n #Mat cropedImage = fullImage(Rect(X,Y,Width,Height));\n #crop_img=cv2.copyMakeBorder(frame,a,d,c,b, cv2.BORDER_REPLICATE)\n \n crop_img = frame[int(y-7):int(y+7),int(x-7):int(x+7)]\n crop_img = cv2.resize(crop_img, (30,30))\n # cv2.imshow(\"crop_img\", crop_img)\n # sharpening\n kernel = np.array([[-1,-1,-1],[-1, 9,-1],[-1,-1,-1]])\n sharpened = cv2.filter2D(crop_img, -1, kernel) \n \n # # img blur\n img = cv2.medianBlur(crop_img,3)\n # img=cv2.addWeighted(img,1.5,img,-0.5,0)\n \n ret,bina = cv2.threshold(img,150,255,cv2.THRESH_BINARY) \n th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,9,1)\n kern = np.ones((3,3),np.uint8)\n th3 = cv2.erode(th3,kern,iterations = 1)\n # th3 = cv2.dilate(th3,kern,iterations = 1)\n th3 = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kern)\n\n # cv2.imshow(\"binary\", bina)\n # cv2.imshow(\"sharp\", img)\n # cv2.imshow(\"adaptive\",th3)\n \n # image_rot(th3)\n\n # text = pytesseract.image_to_string(crop_img)\n # #os.remove(filename)\n # print(\"Text detected\",text)\n\n # show the output images\n \n\ncap.release()\ncv2.destroyAllWindows()\n\n\n","sub_path":"Detection/Bob.py","file_name":"Bob.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"246279540","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nimport torch.nn.functional as F\n\nfrom . import utils\n\nHIDDEN_STATE_SIZE = 128\nEMBEDDING_DIM = 50\n\n\nclass PhraseModel(nn.Module):\n def __init__(self, emb_size, dict_size, hid_size):\n super(PhraseModel, self).__init__()\n\n self.emb = nn.Embedding(num_embeddings=dict_size, embedding_dim=emb_size)\n # # BiLSTM\n # self.encoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,\n # num_layers=1, batch_first=True, bidirectional=True)\n # self.decoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,\n # num_layers=2, batch_first=True)\n\n # LSTM\n self.encoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,\n num_layers=1, batch_first=True)\n self.decoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,\n num_layers=1, batch_first=True)\n self.output = nn.Sequential(\n nn.Linear(hid_size, dict_size)\n )\n\n # hidden stat;\n def encode(self, x):\n _, hid = self.encoder(x)\n return hid\n\n def get_encoded_item(self, encoded, index):\n # For RNN\n # return encoded[:, index:index+1]\n # For LSTM\n return encoded[0][:, index:index+1].contiguous(), \\\n encoded[1][:, index:index+1].contiguous()\n\n def decode_teacher(self, hid, input_seq):\n # Method assumes batch of size=1\n out, _ = self.decoder(input_seq, hid)\n out = self.output(out.data)\n return out\n\n def decode_one(self, hid, input_x):\n # Example for unsqueeze:\n # >>> x = torch.tensor([1, 2, 3, 4])\n # >>> torch.unsqueeze(x, 0)\n # tensor([[ 1, 2, 3, 4]])\n # >>> torch.unsqueeze(x, 1)\n # tensor([[ 1],\n # [ 2],\n # [ 3],\n # [ 4]])\n out, new_hid = self.decoder(input_x.unsqueeze(0), hid)\n # Self.output(out) using nn.Linear(hid_size, dict_size) to transform logits to distribution over output vocab.\n out = self.output(out)\n # squeeze: Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.\n return out.squeeze(dim=0), new_hid\n\n def decode_chain_argmax(self, hid, begin_emb, seq_len, stop_at_token=None):\n \"\"\"\n Decode sequence by feeding predicted token to the net again. Act greedily\n \"\"\"\n res_logits = []\n res_tokens = []\n # First cur_emb is the embedding of '#BEG'.\n cur_emb = begin_emb\n\n # At first using the '#BEG' as first input token and hidden states from encoder as initial hidden state to predict the first output token and first decoder hidden state.\n # Then predict the output token by using last step's output token as current step's input and last step's decoder hidden state.\n for _ in range(seq_len):\n # The out_logits is the distribution over whole output vocabulary.\n # The hid is new hidden state generated from current time step.\n out_logits, hid = self.decode_one(hid, cur_emb)\n # After torch.max operation, the result is a list.\n # First element is the largest logit value in dimension-1 (each row), the second value is the index of the largest logit value.\n # >>> a = torch.randn(4, 4)\n # >>> a\n # tensor([[-1.2360, -0.2942, -0.1222, 0.8475],\n # [ 1.1949, -1.1127, -2.2379, -0.6702],\n # [ 1.5717, -0.9207, 0.1297, -1.8768],\n # [-0.6172, 1.0036, -0.6060, -0.2432]])\n # >>> torch.max(a, 1)\n # (tensor([ 0.8475, 1.1949, 1.5717, 1.0036]), tensor([ 3, 0, 0, 1]))\n out_token_v = torch.max(out_logits, dim=1)[1]\n # Transform tensorflow to array and return array[0];\n out_token = out_token_v.data.cpu().numpy()[0]\n # Using current output token's embedding.\n cur_emb = self.emb(out_token_v)\n\n # The list of out_logits list.\n res_logits.append(out_logits)\n # The list of output tokens.\n res_tokens.append(out_token)\n # When the EOS is predicted the prediction is ended.\n if stop_at_token is not None and out_token == stop_at_token:\n break\n # torch.cat(tensors, dim=0, out=None) → Tensor\n # Concatenates the given sequence of seq tensors in the given dimension.\n # All tensors must either have the same shape (except in the concatenating dimension) or be empty.\n # >>> x = torch.randn(2, 3)\n # >>> x\n # tensor([[ 0.6580, -1.0969, -0.4614],\n # [-0.1034, -0.5790, 0.1497]])\n # >>> torch.cat((x, x, x), 0)\n # tensor([[ 0.6580, -1.0969, -0.4614],\n # [-0.1034, -0.5790, 0.1497],\n # [ 0.6580, -1.0969, -0.4614],\n # [-0.1034, -0.5790, 0.1497],\n # [ 0.6580, -1.0969, -0.4614],\n # [-0.1034, -0.5790, 0.1497]])\n # >>> torch.cat((x, x, x), 1)\n # tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,\n # -1.0969, -0.4614],\n # [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,\n # -0.5790, 0.1497]])\n # Concatenate follow rows.\n return torch.cat(res_logits), res_tokens\n\n def decode_chain_sampling(self, hid, begin_emb, seq_len, stop_at_token=None):\n \"\"\"\n Decode sequence by feeding predicted token to the net again.\n Act according to probabilities\n \"\"\"\n res_logits = []\n res_actions = []\n cur_emb = begin_emb\n\n for _ in range(seq_len):\n out_logits, hid = self.decode_one(hid, cur_emb)\n # Using softmax to transform logits to probabilities.\n out_probs_v = F.softmax(out_logits, dim=1)\n out_probs = out_probs_v.data.cpu().numpy()[0]\n # np.random.choice(out_probs.shape[0], p=out_probs):\n # choose one index from out_probs.shape[0] by the probabilities associated with each entry as out_probs.\n action = int(np.random.choice(out_probs.shape[0], p=out_probs))\n # Transform action to tensor and cast it to the device where begin_emb is in.\n action_v = torch.LongTensor([action]).to(begin_emb.device)\n action_v = action_v.cuda()\n # Get the embedding of the sampled output token.\n cur_emb = self.emb(action_v)\n\n res_logits.append(out_logits)\n res_actions.append(action)\n if stop_at_token is not None and action == stop_at_token:\n break\n return torch.cat(res_logits), res_actions\n\n\ndef pack_batch_no_out(batch, embeddings, device=\"cpu\"):\n # Asserting statements is a convenient way to insert debugging assertions into a program.\n # To guarantee that the batch is a list.\n assert isinstance(batch, list)\n # The format of batch is a list of tuple: ((tuple),[[list of token ID list]])\n # A lambda function is a small anonymous function, the example is as following.\n # x = lambda a, b: a * b\n # print(x(5, 6))\n # Sort descending (CuDNN requirements) batch中第一个元素为最长的句子;\n batch.sort(key=lambda s: len(s[0]), reverse=True)\n # input_idx:一个batch的输入句子的tokens对应的ID矩阵;Each row is corresponding to one input sentence.\n # output_idx:一个batch的输出句子的tokens对应的ID矩阵;Each row is corresponding to a list of several output sentences.\n # zip wants a bunch of arguments to zip together, but what you have is a single argument (a list, whose elements are also lists).\n # The * in a function call \"unpacks\" a list (or other iterable), making each of its elements a separate argument.\n # For list p = [[1,2,3],[4,5,6]];\n # So without the *, you're doing zip( [[1,2,3],[4,5,6]] ). With the *, you're doing zip([1,2,3], [4,5,6]) = [(1, 4), (2, 5), (3, 6)].\n input_idx, output_idx = zip(*batch)\n # create padded matrix of inputs\n # map() function returns a list of the results after applying the given function to each item of a given iterable (list, tuple etc.)\n # For example:\n # numbers = (1, 2, 3, 4)\n # result = map(lambda x: x + x, numbers)\n # print(list(result))\n # Output: {2, 4, 6, 8}\n # 建立长度词典,为batch中每一个元素的长度;\n lens = list(map(len, input_idx))\n # 以最长的句子来建立batch*最长句子长度的全0矩阵;\n input_mat = np.zeros((len(batch), lens[0]), dtype=np.int64)\n # 将batch中每个句子的tokens对应的ID向量填入全0矩阵完成padding;\n # idx:index,x:token ID 组成的向量;\n for idx, x in enumerate(input_idx):\n input_mat[idx, :len(x)] = x\n # 将padding后的矩阵转换为tensor matrix;\n input_v = torch.tensor(input_mat).to(device)\n input_v = input_v.cuda()\n # 封装成PackedSequence类型的对象;\n # The padded sequence is the transposed matrix which is ``B x T x *``,\n # where `T` is the length of the longest sequence and `B` is the batch size.\n # Following the matrix is the list of lengths of each sequence in the batch (also in transposed format).\n # For instance:\n # [ a b c c d d d ]\n # [ a b c d ]\n # [ a b c ]\n # [ a b ]\n # could be transformed into [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] with batch size [4,4,3,2,1,1,1].\n input_seq = rnn_utils.pack_padded_sequence(input_v, lens, batch_first=True)\n input_seq = input_seq.cuda()\n r = embeddings(input_seq.data)\n # lookup embeddings;embeddings为模型已经建立的词向量矩阵;\n # r: the [B x T x dimension] matrix of the embeddings of the occurred words in input sequence.\n # The order is followed by the order in input_seq.\n # Which is transforming [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] into [embedding(a), embedding(a), ..., embedding(d), embedding(d)]\n r = r.cuda()\n # 加入了词嵌入的input_seq;\n # For instance, given data ``abc`` and `x`\n # the :class:`PackedSequence` would contain data ``axbc`` with ``batch_sizes=[2,1,1]``.\n # emb_input_seq is [B x T x dimension] matrix of the embeddings of the occurred words in input sequence with the batch size.\n # For instance, emb_input_seq is the padded data: [embedding(a), embedding(a), ..., embedding(d), embedding(d)] with batch size [4,4,3,2,1,1,1].\n emb_input_seq = rnn_utils.PackedSequence(r, input_seq.batch_sizes)\n emb_input_seq = emb_input_seq.cuda()\n return emb_input_seq, input_idx, output_idx\n\ndef pack_input(input_data, embeddings, device=\"cpu\"):\n input_v = torch.LongTensor([input_data]).to(device)\n input_v = input_v.cuda()\n r = embeddings(input_v)\n return rnn_utils.pack_padded_sequence(r, [len(input_data)], batch_first=True)\n\n\ndef pack_batch(batch, embeddings, device=\"cpu\"):\n emb_input_seq, input_idx, output_idx = pack_batch_no_out(batch, embeddings, device)\n\n # prepare output sequences, with end token stripped\n output_seq_list = []\n for out in output_idx:\n output_seq_list.append(pack_input(out[:-1], embeddings, device))\n return emb_input_seq, output_seq_list, input_idx, output_idx\n\n\ndef seq_bleu(model_out, ref_seq):\n model_seq = torch.max(model_out.data, dim=1)[1]\n model_seq = model_seq.cpu().numpy()\n return utils.calc_bleu(model_seq, ref_seq)\n","sub_path":"S2SRL/libbots/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"476008595","text":"import re\nimport argparse\ndef get_args():\n # set parser\n parser = argparse.ArgumentParser(description='A program to complete PS6.')\n # -k will be for kmer_length, must be int\n parser.add_argument('-k', '--kmer_length', type=int, help = 'How long you want the kmer size to be.')\n # -f will be for filename, must be string\n parser.add_argument('-f','--filename', type=str, help = 'Filename of your contigs.fa file.')\n # parse the user input\n args = parser.parse_args()\n # get filename and kmer length out\n filename = args.filename\n kmer_length = args.kmer_length\n #return args in array\n return(filename, kmer_length)\n\ndef get_physical_lengths(filename, KMER_LENGTH):\n physical_lengths = []\n with open(filename, 'r') as f:\n for line in f:\n if line.startswith('>'):\n kmer_length_header = int(re.findall('length_[0-9]+', line)[0][7:])\n kmer_coverage = float(re.findall('cov_[0-9]+\\.[0-9]+', line)[0][4:])\n physical_lengths.append(kmer_length_header + KMER_LENGTH - 1)\n return(physical_lengths)\n\ndef get_kmer_coverages(filename):\n kmer_coverages = []\n with open(filename, 'r') as f:\n for line in f:\n if line.startswith('>'):\n kmer_coverages.append(float(re.findall('cov_[0-9]+\\.[0-9]+', line)[0][4:]))\n return(kmer_coverages)\n\ndef get_num_of_contigs(filename):\n count = 0\n with open(filename,'r') as f:\n for line in f:\n if line.startswith('>'):\n count+=1\n return(count)\n\ndef get_max_contig_length(filename):\n seq_dict = {}\n max_length = 0\n with open(filename,'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n if line.startswith('>'):\n current_key = line\n seq_dict[current_key] = ''\n else:\n seq_dict[current_key] += line\n for key in seq_dict:\n current_value = len(seq_dict[key])\n if current_value > max_length:\n max_length = current_value\n return(max_length)\n\ndef get_total_length(filename):\n nuc_sum = 0\n with open(filename,'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n if not line.startswith('>'):\n for char in line:\n nuc_sum += 1\n return(nuc_sum)\n\n\ndef get_mean_depth_coverage(filename, kmer_coverages, physical_lengths, KMER_LENGTH):\n cov_sum = 0\n for index, length in enumerate(physical_lengths):\n current_cov = (kmer_coverages[index] * length) / (length - KMER_LENGTH + 1)\n cov_sum += current_cov\n return(cov_sum/len(physical_lengths))\n\ndef get_n50(filename, total_length):\n max_length = 0\n nuc_count = 0\n seq_dict = {}\n with open(filename,'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n if line.startswith('>'):\n current_key = line\n seq_dict[current_key] = ''\n else:\n seq_dict[current_key] += line\n all_contigs = list(seq_dict.values())\n all_contigs.sort(key=len,reverse=True)\n perc_50 = total_length/2\n for contig in all_contigs:\n for char in contig:\n nuc_count += 1\n if nuc_count >= perc_50:\n return(len(contig))\n\ndef get_contig_dist(filename):\n dist = {}\n seq_dict = {}\n with open(filename, 'r') as f:\n for index,line in enumerate(f):\n line = line.strip()\n if line.startswith('>'):\n current_key = line\n seq_dict[current_key] = ''\n else:\n seq_dict[current_key] += line\n for key in seq_dict:\n current_sequence = seq_dict[key]\n current_length = len(current_sequence)\n lowest_100 = int(current_length/100) * 100\n if lowest_100 in dist:\n dist[lowest_100] += 1\n else:\n dist[lowest_100] = 1\n print('Contig Length' + '\\t' + 'Number of contigs in this category')\n for key in sorted(dist.keys()):\n print(str(key) + '\\t' + str(dist[key]))\n return(dist)\n\n\n\ndef main():\n filename, KMER_LENGTH = get_args()\n num_contigs = get_num_of_contigs(filename)\n print('\\n********************')\n print('Processed ' + filename + '\\n')\n print('There are ' + str(num_contigs) + ' contigs in this file.' )\n max_contig_length = get_max_contig_length(filename )\n print('The maximum contig length is ' + str(max_contig_length) + ' nt.')\n total_length = get_total_length(filename)\n print('The total length of this file is ' + str(total_length) + ' nt.')\n mean_contig_length = total_length / num_contigs\n print('The mean contig length is approximately ' + str(round(mean_contig_length,2)) + ' nt.')\n kmer_coverages = get_kmer_coverages(filename)\n physical_lengths = get_physical_lengths(filename, KMER_LENGTH)\n mean_depth_coverage = get_mean_depth_coverage(filename, kmer_coverages, physical_lengths, KMER_LENGTH)\n print('The mean depth of coverage is approximately ' + str(round(mean_depth_coverage,2)) + 'x.')\n n50 = get_n50(filename, total_length)\n print('The N50 for this dataset is ' + str(n50) + ' nt.')\n print('********************')\n print('\\n***CONTIG DISTRIBUTION***')\n contig_dist = get_contig_dist(filename)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"part1_ps6.py","file_name":"part1_ps6.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"625413590","text":"\nimport re\nimport lxml.html\n\n\nselectors_search = {\n 'companyName': {\n 'xpath': './/*[@itemprop=\"name\"]//text()',\n },\n 'id': {\n 'xpath': './/a[@itemprop=\"url\"]/@href',\n 'regex': re.compile(r'gelbeseiten\\.de/(\\d+)'),\n },\n}\n\nselectors_company = {\n 'locality': {\n 'xpath': './/*[@itemprop=\"addressLocality\"]//text()',\n },\n 'postcode': {\n 'xpath': './/*[@itemprop=\"postalCode\"]//text()',\n },\n 'streetAddress': {\n 'xpath': './/*[@itemprop=\"streetAddress\"]//text()',\n },\n 'companyName': {\n 'xpath': './/*[@itemprop=\"name\"]//text()',\n },\n 'phone': {\n 'xpath': './/ul[@class=\"profile\"]//li[@class=\"phone\"]//span[@class=\"nummer\"]/text()',\n },\n 'website': {\n 'xpath': './/ul[@class=\"profile\"]//li[contains(@class, \"website\")]//span[@class=\"text\"]/text()',\n },\n}\n\n\ndef _build_tree(html):\n return lxml.html.fromstring(html)\n\n\ndef _get_property(name, obj, selectors):\n xpath = selectors[name]['xpath']\n value = obj.xpath(xpath)[0] if len(obj.xpath(xpath)) else None\n if value and 'regex' in selectors[name]:\n rgx = selectors[name]['regex']\n values = rgx.findall(value)\n value = values[0] if len(values) else None\n return value\n\n\ndef _get_property_search(name, obj):\n return _get_property(name, obj, selectors_search)\n\n\ndef _get_property_company(name, obj):\n return _get_property(name, obj, selectors_company)\n\n\ndef parse_companies(html):\n tree = _build_tree(html)\n xpath = '//*[@itemtype=\"http://schema.org/LocalBusiness\"]'\n companyObjs = tree.xpath(xpath)\n companies = []\n for n, i in enumerate(companyObjs):\n company = {}\n for name in selectors_search.keys():\n prop = _get_property_search(name, i)\n if prop:\n company[name] = prop\n companies.append(company)\n return companies\n\n\ndef parse_company(html):\n tree = _build_tree(html)\n xpath = '//*[@itemtype=\"http://schema.org/LocalBusiness\"]'\n obj = tree.xpath(xpath)[0]\n company = {}\n for name in selectors_company.keys():\n prop = _get_property_company(name, obj)\n if prop:\n company[name] = prop\n return company\n","sub_path":"_src/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"185336942","text":"__author__ = 'htan'\n#below are my own modules\nimport METextKit, MyUtil,HtmlSection\n\nimport mypyspark\nfrom gensim.models.doc2vec import TaggedDocument\nfrom collections import namedtuple\nimport logging,os\nimport numpy as np\nimport multiprocessing\nfrom multiprocessing.pool import ThreadPool\nfrom multiprocessing import Manager\n\n\n\nclass TaggedSecdbDocument(object):\n\n TenQDoc = namedtuple('TenQDoc', 'words tags')\n\n def __loadOneCompany(self, kv_pair):\n ticker, urls=kv_pair\n words=[]\n tags= [ticker]\n # urls = self.mappings[ticker]\n for url in urls:\n htmlstr = MyUtil.requestContent(url)\n htmltxt=HtmlSection.extractKeyTextfromStr(htmlstr)\n if htmltxt is None:\n continue\n word1 = METextKit.preprocess_text(htmltxt,toStem=True)\n words += [x.strip() for x in word1]\n print(words[:20])\n\n if(len(words)<100):\n logging.warn(ticker+\" has too few words in the documents\")\n return(TaggedDocument(words, tags))\n else:\n return(TaggedDocument(words, tags))#not working for non-spark\n\n def __init__(self, idfilepath, sparkIt=False):\n mappings=dict()\n rows = MyUtil.readFile(idfilepath)\n for r in rows:\n tic, url = r.split('\\t')\n if tic in mappings:\n mappings[tic].append(url.strip())\n else:\n mappings[tic] =[url.strip()]\n self._tickers = mappings.keys()\n self.mappings = mappings\n\n if sparkIt:\n self.__init_spark(idfilepath)\n else:\n self.__init_local(idfilepath)\n\n def __init_local(self, idfilepath):\n self.docs=[]\n\n paralell=True\n\n if paralell:\n pool = ThreadPool(processes=10)\n pool.map(self.__loadOneCompany, self.mappings.iteritems())\n else:\n for tic,urls in self.mappings.iteritems():\n self.__loadOneCompany(tic, urls)\n\n def __init_spark(self, folder):\n sparkCont =mypyspark.createSparkInstance()\n\n cidp = sparkCont.parallelize(self.mappings.iteritems(), 100)\n self.docs = cidp.map(self.__loadOneCompany).collect()\n self.docs = filter(lambda x: hasattr(x,'words') , self.docs)\n\n def __iter__(self):\n for doc in self.docs:\n yield doc\n\n\nclass TaggedMstarDocument(object):\n TenQDoc = namedtuple('TenQDoc', 'words tags')\n\n def __loadOneCompany(self, cid):\n subf= self.folder+os.sep+cid\n files = MyUtil.get_immediate_files(subf)\n words=[]\n tic = self._tickers[cid]\n tags= [tic]\n # maxtxt=str(max([int(x.split('.')[0]) for x in files]))+\".txt\"\n LL=[int(x.split('.')[0]) for x in files]\n\n files=[LL[i] for i in np.argsort(LL)[-2:]]\n files = [str(f)+\".txt\" for f in files]\n\n for file1 in files:\n # with open(subf+os.sep+file1, 'rb') as ff:\n # text = ff.read()\n # word1 = normalize_text(text)\n word1 = METextKit.preprocess_file(subf+os.sep+file1,toStem=True)\n words += [x.strip() for x in word1]\n if(len(words)<100):\n logging.warn(tic+\" has too few words in the documents\")\n else:\n return(TaggedDocument(words, tags))\n\n\n def __init_local(self, folder):\n self.folder = folder\n self.docs=[]\n pool = ThreadPool(processes=1)\n pool.map(self.__loadOneCompany, self._tickers.iteritems())\n\n def __init_spark(self, folder):\n sparkCont =mypyspark.createSparkInstance()\n\n def loadOneCompany(ids):\n cid,ticker = ids\n def get_immediate_files(a_dir):\n return [name for name in os.listdir(a_dir) if not os.path.isdir(os.path.join(a_dir, name))]\n folder='D:/SECFiling/10Q/text'\n import os\n import numpy as np\n subf= folder+os.sep+cid\n files = get_immediate_files(subf)\n words=[]\n # tic = tickers[cid]\n\n tags= [ticker]\n # maxtxt=str(max([int(x.split('.')[0]) for x in files]))+\".txt\"\n # LL=[int(x.split('.')[0]) for x in files]\n #\n # files=[LL[i] for i in np.argsort(LL)[-4:]]\n # files = [str(f)+\".txt\" for f in files]\n\n for file1 in files:\n # with open(subf+os.sep+file1, 'rb') as ff:\n # text = ff.read()\n # word1 = normalize_text(text)\n word1 = METextKit.preprocess_file(subf+os.sep+file1,toStem=True)\n words += [x.strip() for x in word1]\n if(len(words)<100):\n print(cid+\" has too few words in the documents\")\n\n return(TaggedDocument(words, tags))\n\n cidp = sparkCont.parallelize(self._tickers.iteritems(), 100)\n\n self.docs = cidp.map(loadOneCompany).collect()\n sparkCont.stop()\n\n def __init__(self, folder, sparkIt=False):\n\n with open('data/TICKERS.txt', 'r') as ff:\n ss =ff.readlines()\n self._tickers = {}\n for s in ss:\n cid, ticker = (s.split('\\t'))\n self._tickers[cid] = ticker.strip()\n\n if sparkIt:\n self.__init_spark(folder)\n else :\n self.__init_local(folder)\n\n def __iter__(self):\n for doc in self.docs:\n yield doc\n\n\n#bleow classes to define new money\nclass Sec10QCorpus():\n\n def __iter__(self, doc_path):\n for line in open(doc_path):\n yield dictionary.doc2bow(line.lower().split())","sub_path":"TaggedDocuments.py","file_name":"TaggedDocuments.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"189431403","text":"\"\"\"\nA mixin which provides some helper classes for User app\n\"\"\"\n\nfrom django.core.serializers import serialize\nimport json\nfrom rest_framework_jwt.utils import jwt_payload_handler\nimport jwt\nfrom application import settings\nfrom application import models\n\n\nclass UserSerializer(object): \n \"\"\"\n This class provide helper methods for user related serializers.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.request = self.context['request']\n self.user = None\n\n def get_data(self):\n \"\"\"\n Serialize user and its related objects.\n A serializer must provide self.user to consume this method\n \"\"\"\n \n try:\n user = serialize('json', [self.user])\n except Exception as e:\n print(e)\n \n user = json.loads(user)[0]['fields']\n user.pop('password')\n user.pop('groups')\n user.pop('is_superuser')\n user.pop('is_staff')\n user.pop('is_active')\n user.pop('user_permissions')\n user.pop('last_login')\n \n try:\n profile = models.Profile.objects.get(user_id=self.user)\n user['profile_media'] = str(profile.profile_media) if str(profile.profile_media)==\"\" else self.request.build_absolute_uri(str(profile.profile_media.url))\n user['coin'] = profile.coin\n user['cash'] = profile.cash\n\n app_settings = models.App_Settings.objects.filter()[:1].get()\n user['app_settings_coin'] = app_settings.top_up_coin\n except Exception as e:\n print(e)\n\n payload = jwt_payload_handler(self.user)\n token = jwt.encode(payload, settings.SECRET_KEY)\n \n user['token'] = token #self.user.auth_token.key\n \n return user","sub_path":"application/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367774062","text":"import sqlite3\r\n\r\nclass Album:\r\n all_albums=[]\r\n def __init__(self, title, artist):\r\n self.title = title\r\n self.artist = artist\r\n\r\n @classmethod #factory build list of album names\r\n def fetch_album(cls, cursor):\r\n cursor.execute('''\r\n SELECT ArtistId,Title\r\n FROM albums \r\n ''')\r\n result = cursor.fetchall()\r\n all_albums = []\r\n for record in result:\r\n all_album = cls(record[0], record[1])\r\n all_albums.append(all_album)\r\n return all_albums\r\n\r\n\r\nclass Artist:\r\n all_artists = []\r\n def __init__(self, artistId, name):\r\n self.name = name\r\n self.id = artistId\r\n @classmethod\r\n def fetch_artist(cls, cursor):\r\n cursor.execute('''SELECT ArtistId,Name\r\n FROM artists\r\n ''')\r\n result = cursor.fetchall() # for 1 entry row= cursor.fetchone\r\n all_artist = []\r\n for record in result:\r\n all_artist.append(cls(record[0], record[1]))\r\n return all_artist\r\n\r\nclass Tracks:\r\n all_tracks = []\r\n def __init__(self, name, album):\r\n self.album=[]\r\n self.name = name\r\n @classmethod\r\n def get_tracks(cls, cursor):\r\n cursor.execute('''SELECT ArtistId,name ,Title\r\n FROM tracks,albums\r\n ''')\r\n rec = cursor.fetchall()\r\n all_tracks = []\r\n for track in rec:\r\n all_tracks.append(cls(track[1], track[2]))\r\n return all_tracks\r\n\r\n @classmethod\r\n def get_artist_track(cls,name):\r\n cursor.execute('''SELECT tracks.Name,artists.Name\r\n FROM artists,tracks,albums\r\n WHERE tracks.AlbumId =albums.AlbumId AND artists.ArtistId = albums.ArtistId\r\n ''')\r\n record2 = cursor.fetchall()\r\n artist_track = []\r\n for at in record2:\r\n artist_track.append(cls.at[1], at[2])\r\n return artist_track\r\n\r\n\r\n\r\n\r\n\r\n#+++++++++++Main Code ++++++++\r\ndb = sqlite3.connect('chinook.db')\r\ncursor = db.cursor()\r\nall_artists = Artist.fetch_artist(cursor)\r\nall_albums = Album.fetch_album(cursor)\r\nall_tracks = Tracks.get_tracks(cursor)\r\n#artist_track = Tracks.get_artist_track(cursor)\r\n#print all artist\r\nfor artist in all_artists:\r\n print(artist.name)\r\n\r\n#print albums\r\nfor albums in all_albums:\r\n print(albums.title)\r\n#print tracks\r\nfor tracks in all_tracks:\r\n print(tracks.name)\r\n\r\n#track and artist\r\n#for al in artist_track:\r\n# print(al)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#album_name_search = input('Which album do you want to search for ?')\r\n\r\n#for track in tracks:\r\n# if track.album.title== album_name_search:\r\n# album_found=track.album\r\n# break\r\n\r\n#print(album_found.tracks)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#ask user for name of album\r\n# 1 Display all albums\r\n# 2 Display all tracks of album\r\n# 3 Display all albums of an artist\r\n# 4 Display all tracks of an artist\r\n","sub_path":"SQL3.py","file_name":"SQL3.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"602040803","text":"'''\n분할 정복 알고리즘\n분할 : 배열이 일정 조건이 만족되도록 두 부분으로 나눈다\n정복 : 각 부분을 순환적으로 정렬한다.\n합병 : NOTHING TO DO \nquick sort 재귀로 구현하기\n''' \n\ndef quick_sort(myList):\n if myList == []:\n return []\n else:\n pivot = myList[0]\n lesser = quick_sort([x for x in myList[1:] if x < pivot])\n greater = quick_sort([x for x in myList[1:] if x >= pivot])\n myList = lesser + [pivot] + greater\n return myList\n\nnum_list = [ int(_) for _ in input().split()]\nprint (quick_sort(num_list)) \n\n# 시간복잡도 = o(logN *N) - 재귀를 이용한 stack의 깊이가 log N 증명 N(데이터개수)/2^k = 1이 되는 k를 찾아야함 이때 k는 log N\n# 최악의 경우 o(N*N)\n\n'''\n 퀵소트 알고리즘 \n - 최악의 경우 o(n^2), 평균 시간 복잡도는 o(nlong2n)\n - 최악의 경우가 o(nlog2n)의 시간 복잡도를 가지는 정렬 알고리즘\n -> 합병정렬\n -> 힙 정렬\n - 데이터가 배열이 아닌 연결리스트에 저장되어 있다면? \n -> 이를 정렬하는 것도 o(n^2)이 최선 - insert sort \n \n - 최악의 경우 시간 복잡도 계산 : 이미 데이터가 정렬되어있는 경우\n T(n) =T(0) + T(n-1) + o(n) (n - 1 번의 비교연산을 의미)\n =T(n-1) + o(n)\n =T(0) + T(n-2) + o(n-1) + o(n)\n =o(1) + .. + o (n)\n = 1+ ... + n-1\n = o(n^2)\n- 최상의 경우 : 항상 절반으로 분할되는 경우\n T(n) = 2T(n/2) + o(n) = o (nlog2n)\n - 항상 적어도 1/9 이상이 되도록 분할된다면 ?\n depth = (9/10)^k * n \no(log10/9n*n)\n - 평균 시간 복잡도 A(n) = O(nlog2n)\n '''","sub_path":"data_structure/sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"433231625","text":"# Runtime: 44 ms, faster than 83.95% of Python online submissions for Best Time to Buy and Sell Stock II.\n# Memory Usage: 12.7 MB, less than 32.56% of Python online submissions for Best Time to Buy and Sell Stock II.\n\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n \n prices.append(0)\n \n p = 0\n pr = 0\n \n maxprof = 0\n for i in range(1, len(prices)):\n if prices[i] < prices[pr]:\n # maxprof = max(maxprof, prices[pr] - prices[p])\n maxprof += prices[pr] - prices[p]\n p = i\n \n pr = i\n \n return maxprof","sub_path":"Leetcode/Arrays/Easy/122_best_time_to_buy_and_sell_stock.py","file_name":"122_best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"524678409","text":"#python3\n\nclass TreeNode:\n def __init__(self,x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def sortedArrayToBST(self,nums) -> TreeNode:\n def helper(left,right):\n if left > right:\n return None\n \n mid = (left + right) // 2\n root = TreeNode(nums[mid])\n root.left = helper(left,mid - 1)\n root.right = helper(mid + 1,right)\n return root\n\n return helper(0,len(nums) - 1)\n\n\nif __name__ == \"__main__\":\n s = Solution() \n sortedList = [1,2,3,4,5,6]\n print(s.sortedArrayToBST(sortedList).val)\n ","sub_path":"leetcode/SortedArrayToBST.py","file_name":"SortedArrayToBST.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"323862463","text":"#\n# Solution By : Pritish Thakkar\n# Reference : https://gocodergo.wordpress.com/category/hackerrank-solutions/\n#\nimport sys\nimport re\ndef fib(d,f,n):\n a=d\n b=f\n for i in range(2,n):\n c=b*b+a\n a=b\n b=c\n return c\n \na,b,n=map(int, raw_input().split())\nprint(fib(a,b,n))\n","sub_path":"Fibonacci_Modified.py","file_name":"Fibonacci_Modified.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"584598500","text":"# | Copyright 2015-2016 Karlsruhe Institute of Technology\n# |\n# | Licensed under the Apache License, Version 2.0 (the \"License\");\n# | you may not use this file except in compliance with the License.\n# | You may obtain a copy of the License at\n# |\n# | http://www.apache.org/licenses/LICENSE-2.0\n# |\n# | Unless required by applicable law or agreed to in writing, software\n# | distributed under the License is distributed on an \"AS IS\" BASIS,\n# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# | See the License for the specific language governing permissions and\n# | limitations under the License.\n\nfrom grid_control.backends import WMS\nfrom grid_control.datasets.splitter_base import DataSplitter\nfrom grid_control.gc_plugin import ConfigurablePlugin\nfrom grid_control.parameters import ParameterInfo, ParameterMetadata\nfrom grid_control.utils.gc_itertools import lchain\nfrom hpfwk import AbstractError\nfrom python_compat import any, imap, lfilter, lmap, set\n\n# Class used by DataParameterSource to convert dataset splittings into parameter data\nclass PartitionProcessor(ConfigurablePlugin):\n\tdef getKeys(self):\n\t\traise AbstractError\n\n\tdef getNeededKeys(self, splitter):\n\t\treturn []\n\n\tdef process(self, pNum, splitInfo, result):\n\t\traise AbstractError\n\n\nclass MultiPartitionProcessor(PartitionProcessor):\n\tdef __init__(self, config, processorList):\n\t\tPartitionProcessor.__init__(self, config)\n\t\tself._processorList = processorList\n\n\tdef getKeys(self):\n\t\treturn lchain(imap(lambda p: p.getKeys(), self._processorList))\n\n\tdef getNeededKeys(self, splitter):\n\t\treturn lchain(imap(lambda p: p.getNeededKeys(splitter), self._processorList))\n\n\tdef process(self, pNum, splitInfo, result):\n\t\tfor processor in self._processorList:\n\t\t\tprocessor.process(pNum, splitInfo, result)\n\n\nclass BasicPartitionProcessor(PartitionProcessor):\n\tdef _formatFileList(self, fl):\n\t\treturn str.join(' ', fl)\n\n\tdef getKeys(self):\n\t\tresult = lmap(lambda k: ParameterMetadata(k, untracked = True), ['FILE_NAMES', 'MAX_EVENTS',\n\t\t\t'SKIP_EVENTS', 'DATASETID', 'DATASETPATH', 'DATASETBLOCK', 'DATASETNICK'])\n\t\tresult.append(ParameterMetadata('DATASETSPLIT', untracked = False))\n\t\treturn result\n\n\tdef getNeededKeys(self, splitter):\n\t\tenumMap = {\n\t\t\tDataSplitter.FileList: 'FILE_NAMES',\n\t\t\tDataSplitter.NEntries: 'MAX_EVENTS',\n\t\t\tDataSplitter.Skipped: 'SKIP_EVENTS'}\n\t\tfor enum in splitter.neededEnums():\n\t\t\tyield enumMap[enum]\n\n\tdef process(self, pNum, splitInfo, result):\n\t\tresult.update({\n\t\t\t'FILE_NAMES': self._formatFileList(splitInfo[DataSplitter.FileList]),\n\t\t\t'MAX_EVENTS': splitInfo[DataSplitter.NEntries],\n\t\t\t'SKIP_EVENTS': splitInfo.get(DataSplitter.Skipped, 0),\n\t\t\t'DATASETID': splitInfo.get(DataSplitter.DatasetID, None),\n\t\t\t'DATASETPATH': splitInfo.get(DataSplitter.Dataset, None),\n\t\t\t'DATASETBLOCK': splitInfo.get(DataSplitter.BlockName, None),\n\t\t\t'DATASETNICK': splitInfo.get(DataSplitter.Nickname, None),\n\t\t\t'DATASETSPLIT': pNum,\n\t\t})\n\t\tresult[ParameterInfo.ACTIVE] = result[ParameterInfo.ACTIVE] and not splitInfo.get(DataSplitter.Invalid, False)\n\n\nclass LocationPartitionProcessor(PartitionProcessor):\n\tdef __init__(self, config):\n\t\tPartitionProcessor.__init__(self, config)\n\t\tself._filter = config.getFilter('partition location filter', '', onChange = None,\n\t\t\tdefaultMatcher = 'blackwhite', defaultFilter = 'weak')\n\t\tself._preference = config.getList('partition location preference', [], onChange = None)\n\t\tself._reqs = config.getBool('partition location requirement', True, onChange = None)\n\t\tself._disable = config.getBool('partition location check', True, onChange = None)\n\n\tdef getKeys(self):\n\t\treturn []\n\n\tdef process(self, pNum, splitInfo, result):\n\t\tlocations = self._filter.filterList(splitInfo.get(DataSplitter.Locations))\n\t\tif self._preference:\n\t\t\tif not locations: # [] or None\n\t\t\t\tlocations = self._preference\n\t\t\telif any(imap(lambda x: x in self._preference, locations)): # preferred location available\n\t\t\t\tlocations = lfilter(lambda x: x in self._preference, locations)\n\t\tif self._reqs and (locations is not None):\n\t\t\tresult[ParameterInfo.REQS].append((WMS.STORAGE, locations))\n\t\tif self._disable:\n\t\t\tresult[ParameterInfo.ACTIVE] = result[ParameterInfo.ACTIVE] and (locations != [])\n\n\nclass MetaPartitionProcessor(PartitionProcessor):\n\tdef __init__(self, config):\n\t\tPartitionProcessor.__init__(self, config)\n\t\tself._metadata = config.getList('partition metadata', [])\n\n\tdef getKeys(self):\n\t\treturn lmap(lambda k: ParameterMetadata(k, untracked=True), self._metadata)\n\n\tdef process(self, pNum, splitInfo, result):\n\t\tfor idx, mkey in enumerate(splitInfo.get(DataSplitter.MetadataHeader, [])):\n\t\t\tif mkey in self._metadata:\n\t\t\t\ttmp = set(imap(lambda x: x[idx], splitInfo[DataSplitter.Metadata]))\n\t\t\t\tif len(tmp) == 1:\n\t\t\t\t\tresult[mkey] = tmp.pop()\n","sub_path":"packages/grid_control/datasets/pproc_base.py","file_name":"pproc_base.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"624133576","text":"from misli.basic_classes import Color\n\nSELECTION_OVERLAY_COLOR = Color(1, 1, 0, 0.5)\nALIGNMENT_LINE_LENGTH = 120\nLONG_PRESS_TIMEOUT = 0.3\n\n\nMAX_RENDER_TIME = 0.017 # (s) ~60 fps\nNOTE_MARGIN = 5\n\nMOVE_SPEED = 1\n\nINITIAL_EYE_Z = 40\n\nMIN_HEIGHT_SCALE = 0.2\nMAX_HEIGHT_SCALE = 200\n\nNO_SCALE_LINE_SPACING = 20\n\nFONT_SIZE_IN_PX = 12\n\nDEFAULT_NOTE_WIDTH = 320\nDEFAULT_NOTE_HEIGHT = 160\nMIN_NOTE_WIDTH = 30\nMIN_NOTE_HEIGHT = 30\nMAX_NOTE_WIDTH = 1920\nMAX_NOTE_HEIGHT = 1280\n\nDEFAULT_BG_COLOR = [0, 0, 1, 0.1]\nDEFAULT_COLOR = [0, 0, 1, 1]\n\nRESIZE_CIRCLE_RADIUS = 20\n\nALIGNMENT_GRID_UNIT = 10\n\nORGANISATION_NAME = 'p10'\nDESKTOP_APP_NAME = 'misli'\nDESKTOP_APP_VERSION = '4.0.0'\n","sub_path":"pamet/pamet/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"194665308","text":"import cv2\n\ncap=cv2.VideoCapture(0)\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))\n\nprint(cap.isOpened())\nwhile(cap.isOpened()):\n\n ret,frame = cap.read() #ret will store true /false if frame is available/notavaialable and frame will store and frame will be saved into frame variable\n \n if ret == True:\n print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n out.write(frame)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #cv2.imshow('frame',frame) simply read image from webcam\n cv2.imshow('frame',gray) #read in gray mode\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'): #when q is pressed close the window\n break\n else:\n break \n\ncap.release() ","sub_path":"videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"91352470","text":"\"\"\"Deterministic, Finite State machine.\"\"\"\nimport copy\n\n\nclass StopMachine(Exception):\n \"\"\"StopMachine is raised when the state machine is in a final state.\"\"\"\n pass\n\n\nclass StateMachine:\n \"\"\"Deterministic, Finite State machine.\"\"\"\n def __init__(self, blueprint):\n \"\"\"Override.\"\"\"\n self.blueprint = blueprint\n\n def __str__(self):\n \"\"\"Override.\"\"\"\n accepted = \"OK\" if self.accepted else \"NO\"\n\n return f\"{self.state} ({accepted})\"\n\n @property\n def blueprint(self):\n \"\"\"Get the blueprint for the state machine.\"\"\"\n return self.__blueprint\n\n @blueprint.setter\n def blueprint(self, blueprint):\n \"\"\"\n Set the blueprint for the state machine and initial state.\n\n Raise ValueError if the initial state is invalid.\n \"\"\"\n self.__blueprint = blueprint\n\n self.reset()\n\n def is_initial(self, state):\n \"\"\"Return True if state is the initial state.\"\"\"\n try:\n return state == self.blueprint[\"initialState\"]\n except KeyError:\n return False\n\n def is_valid(self, state):\n \"\"\"Return True if state is a valid state.\"\"\"\n try:\n return state in self.blueprint[\"validStates\"]\n except KeyError:\n return False\n\n def is_accepted(self, state):\n \"\"\"Return True if state is an accepted state.\"\"\"\n try:\n return state in self.blueprint[\"acceptedStates\"]\n except KeyError:\n return False\n\n def is_final(self, state):\n \"\"\"Return True if state is the final state.\"\"\"\n try:\n return state in self.blueprint[\"finalStates\"]\n except KeyError:\n return False\n\n def is_event(self, event):\n \"\"\"Return True if the event is a valid event.\"\"\"\n try:\n return event in self.blueprint[\"alphabet\"]\n except KeyError:\n return False\n\n def reset(self):\n \"\"\"\n Set the state machine to its initial state and context.\n\n Raise ValueError if the state is invalid.\n \"\"\"\n state = self.blueprint.get(\"initialState\")\n context = self.blueprint.get(\"initialContext\", dict())\n\n if not self.is_initial(state):\n raise ValueError(\"Invalid state\")\n\n self.set_state(state, context)\n\n def set_state(self, state, context):\n \"\"\"\n set the state machine to a new state and context.\n\n Raise ValueError if the state is invalid.\n \"\"\"\n if not self.is_valid(state):\n raise ValueError(\"Invalid state\")\n\n self.state = state\n self.context = context\n self.initial = self.is_initial(self.state)\n self.accepted = self.is_accepted(self.state)\n self.final = self.is_final(self.state)\n\n def transition(self, event):\n \"\"\"\n Transition to the next state by executing the transition function.\n\n Raise StopMachine if the current state is final or\n ValueError if event is not in alphabet or state is invalid.\n \"\"\"\n if self.is_final(self.state):\n raise StopMachine()\n\n if not self.is_event(event):\n raise ValueError(\"Invalid event\")\n\n def execute_lifecycle(lifecycle=\"before\"):\n lifecycles = self.blueprint.get(\"lifecycles\", dict())\n\n for hook in lifecycles.get(lifecycle, []):\n if event in hook[\"events\"]:\n for action in hook[\"actions\"]:\n action(self.state, self.context, event)\n\n def execute_transition():\n function = self.blueprint.get(\"transition\", lambda s, c, e: e)\n context = copy.deepcopy(self.context)\n state = function(self.state, context, event)\n\n self.set_state(state, context)\n\n execute_lifecycle(\"before\")\n execute_transition()\n execute_lifecycle(\"after\")\n","sub_path":"dfsmpy/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"470713547","text":"from urllib.parse import urlparse, urlunparse\nimport urllib, sys, argparse, requests\nrequests.packages.urllib3.disable_warnings()\n\nparser = argparse.ArgumentParser(\n description=\"use this script to fuzz endpoints that return a 401/403\"\n)\nparser.add_argument(\n '-url', '-u', action=\"store\", default=None, dest='url',\n help=\"Specify the target URL\")\nparser.add_argument(\n '-cookies', '-c', action=\"store\", default=None, dest='cookies',\n help=\"Specify cookies to use in requests. \\\n eg. '-cookie \\\"cookie1=blah; cookie2=blah\\\"'\")\nparser.add_argument(\n '-proxy', '-p', action=\"store\", default=None, dest='proxy',\n help=\"Specify a proxy to use for requests\")\nparser.add_argument(\n '-hc', action=\"store\", default=None, dest='hc',\n help=\"Hide a specified response code from output\")\nparser.add_argument(\n '-hl', action=\"store\", default=None, dest='hl',\n help=\"Hide a specified response length from output\")\nargs = parser.parse_args()\n\nif len(sys.argv) <= 1:\n parser.print_help()\n print()\n sys.exit()\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"DNT\": \"1\",\n \"Connection\": \"close\",\n \"Upgrade-Insecure-Requests\": \"1\"}\n\nprefix_payloads = [\n '%20', '//', '/;/', '/;//', '/./', '/.//', '/.;/', '/.;//', '/../', '/..',\n '/..//', '/..;/', '/..;//', '/../../', '/..//../', '/../..//', '//../../',\n '/../../../', '/../..//../', '/..//../../', '/../../..//', '%2f%2f',\n '%2f/', '/%2f', '/%3b/', '%2f%3b%2f', '%2f%3b%2f%2f', '/%2e/', '/%2e//',\n '/%2e%3b/', '/%2e%3b//', '/%2e%2e/', '/%2e%2e', '/%2e%2e%3b/', '/%2e%2f/',\n '/%2e%2e%2f/', '/%252e%253b/', '/%252e%252e%253b/', '%252f%252f', '%252f/',\n '/%252f', '/%252e/', '/%252e%252f/', '/%252e%252e%252f/']\n\nsuffix_payloads = [\n ';', '/', '%2f', '/./', '/%2e/', '/../', '/%2e%2e/', '.html', '.json', '#',\n '/%20', '%20']\n\n\ndef setup_payloads(parsed, pathPieces, query):\n urls = []\n # Set up paths with prefix payloads\n for i, piece in enumerate(pathPieces):\n for payload in prefix_payloads:\n parsed = parsed._replace(\n path=path.replace(\n '/{}'.format(piece), # original path\n \"{}{}\".format(payload, piece)), # add payload\n query=query)\n urls.append(urlunparse(parsed))\n\n # Set up paths with suffix payloads\n for i, piece in enumerate(pathPieces):\n for payload in suffix_payloads:\n parsed = parsed._replace(\n path=path.replace(\n piece, # original path\n \"{}{}\".format(piece, payload)), # add payload\n query=query)\n urls.append(urlunparse(parsed))\n\n return urls\n\n\ndef send_header_payloads(url, headers, cookies, proxies, h, p):\n headers[h] = p\n resp = requests.get(url, cookies=cookies, proxies=proxies, headers=headers, verify=False)\n headers.pop(h)\n\n return resp.status_code, resp.text\n\n\ndef send_url_payloads(s, url, cookies, proxies):\n r = requests.Request(\"GET\", url, cookies=cookies, headers=headers)\n prep = r.prepare()\n prep.url = url\n try:\n resp = s.send(prep, verify=False)\n except requests.exceptions.ConnectionError as e:\n print(e)\n\n parsed = urlparse(url)\n path = parsed.path\n return resp.status_code, resp.text, path\n\n\ndef send_options(url, cookies, proxies):\n resp = requests.options(url, cookies=cookies, proxies=proxies, headers=headers, verify=False)\n print(\"Response code: {} Response length: {} Sent OPTIONS method. \\n\".format(resp.status_code, len(resp.text)))\n\n if len(resp.text) < 1:\n print(\"Response length was 0 so probably NOT worth checking out....\\n\")\n\n print(\"Response Headers: \")\n for h, v in resp.request.headers.items():\n print(\"{}: {}\".format(h, v))\n\n\n# if proxy, set it for requests\nif args.proxy:\n try:\n proxies = {\"http\": args.proxy.split('//')[1],\n \"https\": args.proxy.split('//')[1]\n }\n except (IndexError, ValueError):\n print(\"invalid proxy specified\")\n sys.exit(1)\n\nelse:\n proxies = None\n\n# If cookies, parse them\nif args.cookies:\n cookies = dict(x.strip(' ').split('=') for x in args.cookies.split(';'))\nelse:\n cookies = {}\n\nhide = {}\nif args.hc:\n hide[\"hc\"] = args.hc\nelse:\n hide[\"hc\"] = ''\n\nif args.hl:\n hide[\"hl\"] = args.hl\nelse:\n hide[\"hl\"] = ''\n\nurl = args.url # https://target.com/some/path?param1=1¶m2=2\nparsed = urlparse(url)\npath = parsed.path # /some/path\nquery = parsed.query # param1=1param2=2\npathPieces = ' '.join(parsed.path.split('/')).split() # ['some', 'path']\nurl_payloads = setup_payloads(parsed, pathPieces, query)\n\nheader_payloads = {\n \"X-Original-URL\": path,\n \"X-Forwarded-For\": \"127.0.0.1\",\n \"X-Custom-IP-Authorization\": \"127.0.0.1\"\n }\n\nfor h, p in header_payloads.items():\n resp_code, resp_text = send_header_payloads(url, headers, cookies, proxies, h, p)\n MSG = \"Response code: {} Response length: {} Header: {}: {}\\n\".format(resp_code, len(resp_text), h, p)\n\n if hide[\"hc\"] != str(resp_code) and hide[\"hl\"] != str(len(resp_text)):\n print(MSG)\n\ns = requests.Session()\ns.proxies = proxies\nfor url in url_payloads:\n resp_code, resp_text, path = send_url_payloads(s, url, cookies, proxies)\n MSG = \"Response code: {} Response length: {} Path: {}\\n\".format(resp_code, len(resp_text), path)\n\n if hide[\"hc\"] != str(resp_code) and hide[\"hl\"] != str(len(resp_text)):\n print(MSG)\n\nsend_options(url, cookies, proxies)\n","sub_path":"403fuzzer.py","file_name":"403fuzzer.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"653709318","text":"from dataclasses import dataclass\nfrom base.common.models.request import SimpleRequestModel\n\n\n@dataclass\nclass GetImageAccessLogsRequestParams:\n LOAN_NUMBER_ID: str = \"LoanNumberID\"\n STATUS_ID: str = \"StatusID\"\n LIMIT: str = \"Limit\"\n\n\nclass GetImageAccessLogsRequest(SimpleRequestModel):\n def __init__(self, loan_number_id, status_id, limit, session_id, nonce, pretty_print):\n self.loan_number_id = loan_number_id\n self.status_id = status_id\n self.limit = limit\n super().__init__(session_id=session_id, nonce=nonce, pretty_print=pretty_print)\n\n def to_params(self):\n args = super().to_params()\n args[GetImageAccessLogsRequestParams.LOAN_NUMBER_ID] = self.loan_number_id\n args[GetImageAccessLogsRequestParams.STATUS_ID] = self.status_id\n args[GetImageAccessLogsRequestParams.LIMIT] = self.limit\n return args\n","sub_path":"APIs/task_items/requests/get_image_access_logs.py","file_name":"get_image_access_logs.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"390711638","text":"#!/usr/bin/env python\n\"\"\"\nExample of a colored prompt.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom prompt_toolkit import prompt\nfrom pygments.style import Style\nfrom pygments.styles.default import DefaultStyle\nfrom pygments.token import Token\n\n\nclass ExampleStyle(Style):\n styles = DefaultStyle.styles.copy()\n\n styles.update({\n # User input.\n Token: '#ff0066',\n\n # Prompt.\n Token.Username: '#884444',\n Token.At: '#00aa00',\n Token.Colon: '#00aa00',\n Token.Pound: '#00aa00',\n Token.Host: '#000088 bg:#aaaaff',\n Token.Path: '#884444 underline',\n })\n\n\ndef get_prompt_tokens(cli):\n return [\n (Token.Username, 'john'),\n (Token.At, '@'),\n (Token.Host, 'localhost'),\n (Token.Colon, ':'),\n (Token.Path, '/user/john'),\n (Token.Pound, '# '),\n ]\n\n\nif __name__ == '__main__':\n answer = prompt(get_prompt_tokens=get_prompt_tokens, style=ExampleStyle)\n print('You said: %s' % answer)\n","sub_path":"examples/colored-prompt.py","file_name":"colored-prompt.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"72516003","text":"from .auth import *\n\n\nclass Anilist:\n \"\"\"\n Initialize a new instance to the Anilist API. This instance will handle read only credentials.\n Pass in your client id and client secret. In calls that require a user's auth token, you will need to provide it.\n\n :ivar dict settings: Various settings used across the module\n :ivar ALAuth auth: Handle Authorization endpoints\n \"\"\"\n def __init__(self, csecret, cid):\n \"\"\"\n :param csecret: Client Secret\n :param cid: Client ID\n \"\"\"\n self.settings = {'header': {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'Pymoe (git.vertinext.com/ccubed/PyMoe'},\n 'apiurl': 'https://anilist.co/api',\n 'cid': cid,\n 'csecret': csecret}\n self.auth = ALAuth(self.settings)\n","sub_path":"Pymoe/Anilist/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"307222441","text":"import sqlite3\nimport shelve\nimport atexit\n\nfrom twisted.enterprise import adbapi\n\nfrom apiserver import settings\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\ndef set_dict_and_unicode_factory(conn):\n conn.row_factory = dict_factory\n conn.text_factory = sqlite3.OptimizedUnicode\n\ndbpool = adbapi.ConnectionPool('sqlite3', settings.DB_FILE, check_same_thread=False, cp_max=1, cp_min=1, cp_openfun=set_dict_and_unicode_factory)\nmetadata_db = shelve.open(settings.METADATA_FILE)\n\ndef shutdown_db():\n metadata_db.close()\n\natexit.register(shutdown_db)","sub_path":"apiserver/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"230574075","text":"\"\"\"Reusable tooling for interacting with Coq.\n\nKey contents:\n - CoqtopProc: thin wrapper for the `coqtop` process (via XML API)\n - CoqBot: high-level wrapper\n\"\"\"\n\nimport os.path\nimport re\nimport subprocess\nimport xml.etree.ElementTree as ET\nimport shlex\n\nfrom . import util\n\n\nclass CoqtopProc(object):\n\n def __init__(self, coq_install_dir, coq_version, extra_args=(), working_dir=None, verbose=False):\n \"\"\"\n Spawns a new coqtop process and creates pipes for interaction.\n \"\"\"\n\n if coq_version >= (8,9):\n cmd = [\n os.path.join(coq_install_dir, \"bin\", \"coqidetop\"),\n \"-main-channel\", \"stdfds\"]\n elif coq_version >= (8,5):\n cmd = [\n os.path.join(coq_install_dir, \"bin\", \"coqtop\"),\n \"-main-channel\", \"stdfds\", \"-ideslave\"]\n elif coq_version >= (8,4):\n cmd = [\n os.path.join(coq_install_dir, \"bin\", \"coqtop\"),\n \"-ideslave\"]\n else:\n raise Exception(\"specified version of coqtop is too old!\")\n\n cmd.extend(extra_args)\n\n if working_dir is not None:\n project_file = self.find_coqproject_file(working_dir)\n if project_file is not None:\n working_dir = os.path.dirname(project_file)\n with open(project_file, \"r\") as f:\n cmd.extend(shlex.split(f.read()))\n\n self.verbose = verbose\n\n print(\"Starting `{}` in {}\".format(\" \".join(cmd), working_dir))\n self.proc = subprocess.Popen(\n cmd,\n bufsize=0,\n cwd=working_dir,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n def print(self, value):\n if self.verbose:\n print(value)\n\n def find_coqproject_file(self, dir):\n if dir.endswith(\"/\"):\n dir = dir[:-1]\n if not dir:\n return None\n project_file = os.path.join(dir, \"_CoqProject\")\n if os.path.isfile(project_file):\n return project_file\n if dir != \"/\":\n return self.find_coqproject_file(os.path.dirname(dir))\n return None\n\n def send(self, text):\n \"\"\"\n Send the given text to coqtop. Yields XML tags found in the response.\n For proper operation, clients must always exhaust this generator.\n \"\"\"\n\n if text[-1] != \"\\n\":\n text += \"\\n\"\n self.print(\"sending: {}\".format(text.encode(\"unicode-escape\")))\n\n # Send\n self.proc.stdin.write(text.encode(\"ascii\"))\n self.proc.stdin.flush()\n self.print(\"sent\")\n\n # Recieve until we find ...\n xm = util.XMLMuncher()\n done = False\n while not done:\n buf = self.proc.stdout.read(1024)\n try:\n response = buf.decode(\"ascii\")\n except UnicodeDecodeError as e:\n self.print(\"{}\".format(list(\"{:x}\".format(b) for b in buf)))\n raise e\n self.print(\"got partial response: {}\".format(response))\n if not response:\n raise Exception(\"coqtop died!\")\n for tag in xm.process(response):\n xml = ET.fromstring(tag)\n yield xml\n if xml.tag == \"value\":\n done = True\n self.print(\"--- DONE ---\")\n\n def stop(self):\n \"\"\"\n Stop the underlying coqtop process.\n \"\"\"\n p = self.proc\n if p is not None:\n p.terminate()\n ret = p.wait()\n print(\"coqtop exited with status {}\".format(ret))\n self.proc = None\n\nTOKENS = (\n (\"open_comment\", re.compile(r'\\(\\*')),\n (\"close_comment\", re.compile(r'\\*\\)')),\n (\"string\", re.compile(r'\"[^\"]*\"')),\n (\"whitespace\", re.compile(r'\\s+')),\n (\"word\", re.compile(r'\\w+')),\n (\"fullstop\", re.compile(r'\\.(?:\\s+|$)')),\n)\n\ndef tokens(text, start=0):\n i = start\n comment_depth = 0\n while i < len(text):\n name = \"other\"\n match = text[i]\n for n, regex in TOKENS:\n m = regex.match(text, pos=i)\n if m and n == \"symbol\" and (\"(*\" in m.group(0) or \"*)\" in m.group(0)):\n m = None\n if m:\n name = n\n match = m.group(0)\n break\n if name == \"open_comment\":\n comment_depth += 1\n elif name == \"close_comment\":\n comment_depth -= 1\n elif name == \"whitespace\":\n pass\n elif comment_depth == 0:\n yield (i, name, len(match), match)\n i += len(match)\n\n\nBULLET_CHARS = { \"-\", \"+\", \"*\", \"{\", \"}\" }\nBULLET_CHARS_REGEX = re.compile(r\"\\s*[\" + re.escape(\"\".join(BULLET_CHARS)) + r\"]\")\ndef find_first_coq_command(text, start=0):\n \"\"\"Find the first Coq command in `text[start:]`.\n\n The return value is the index one past the end of the command, such that\n `text[start:RETURN_VALUE]` gives the text of the command.\n\n If no command is present, this function returns None.\n \"\"\"\n\n is_first = True\n for token_pos, token_type, token_len, token_text in tokens(text, start):\n\n # Bullet characters in Ltac require some care; each is its own command\n if is_first:\n match = BULLET_CHARS_REGEX.match(token_text)\n if match:\n return token_pos + match.end()\n\n # Otherwise, commands end in fullstops\n if token_type == \"fullstop\":\n return token_pos + 1\n\n is_first = False\n\n return None\n\n\ndef pr(e, depth=0):\n print(\"{}{} [text={}]\".format(\" \" * depth, e, e.text))\n if e:\n for x in e:\n pr(x, depth + 2)\n\n\ndef text_of(xml):\n # WTF ETree API?!? Why is this not a builtin?\n return \"\".join(xml.itertext())\n\n\ndef format_response(xml, coq_version):\n \"\"\"Takes XML output from coqtop and makes it clean and pretty.\n\n Sample input:\n \n \"\"\"\n\n messages = []\n for x in xml:\n if x.tag == \"feedback\":\n for msg in x.iter(\"message\"):\n messages.append(text_of(msg))\n if x.tag == \"value\":\n if x.attrib.get(\"val\") != \"good\":\n raise CoqException(text_of(x))\n goals = list(x.iter(\"goal\"))\n output = \"Goals: {}\\n\\n\".format(len(goals))\n output += \"\\n\".join(messages)\n if goals:\n # from xml import etree\n # print(\"\\n\".join(ET.tostring(g).decode(\"UTF-8\") for g in goals))\n primary_goal = goals[0]\n if coq_version >= (8,6):\n strs = list(primary_goal.iter(\"richpp\"))\n else:\n strs = list(primary_goal.iter(\"string\"))[1:]\n hyps = strs[:-1]\n goal = strs[-1]\n for h in hyps:\n output += \" {}\\n\".format(text_of(h))\n output += \" \" + (\"-\" * 40) + \"\\n\"\n output += \" {}\\n\".format(text_of(goal))\n return output\n # else:\n # print(\"got tag '{}'\".format(x))\n\n\nclass CoqException(Exception):\n pass\n\n\nclass CoqBot(object):\n\n def __init__(self, coq_install_dir, coq_version, extra_args=(), working_dir=None, verbose=False):\n self.verbose = verbose\n self.coqtop = CoqtopProc(\n coq_install_dir=coq_install_dir,\n coq_version=coq_version,\n extra_args=extra_args,\n working_dir=working_dir,\n verbose=verbose)\n self.coq_version = coq_version\n self.cmds_sent = [] # list of (command, state_id_before_command)\n\n self.state_id = None\n for parsed in self.coqtop.send(''):\n if parsed.tag == \"value\":\n self.state_id = int(parsed.find(\".//state_id\").attrib.get(\"val\"))\n if self.state_id is None:\n raise Exception(\"did not get an initial state ID from coqtop\")\n\n def print(self, value):\n if self.verbose:\n print(value)\n\n def append(self, text, start=0):\n \"\"\"Send the first command in `text[start:]` to Coq.\n\n Returns the new offset after processing the first command in\n text[start:], such that `text[start:RETURN_VALUE]` is what was sent.\n\n Appends the sent command to this object's \"sent buffer\" (see\n `rewind_to(...)`).\n\n Returns 0 if there is no command in the given text.\n\n Throws CoqException if Coq reports an error. Throws other kinds of\n exceptions if there is some problem communicating with the CoqTop\n process.\n\n NOTE: To send multiple commands, use a loop. For instance:\n\n idx = 0\n while True:\n n = bot.append(text, start=idx)\n if n == 0:\n break\n else:\n # Optional: update display\n idx = n\n \"\"\"\n\n index_of_end_of_command = find_first_coq_command(text, start)\n\n if index_of_end_of_command:\n coq_cmd = text[start:index_of_end_of_command]\n\n if self.coq_version >= (8,7):\n to_send = '{cmd}1'.format(\n cmd=util.xml_encode(coq_cmd),\n state_id=self.state_id)\n elif self.coq_version >= (8,5):\n to_send = '{}'.format(util.xml_encode(coq_cmd))\n else:\n to_send = '{}'.format(util.xml_encode(coq_cmd))\n\n for parsed in self.coqtop.send(to_send):\n if parsed.tag != \"value\":\n continue\n if parsed.attrib.get(\"val\") != \"good\":\n print(\"Error!\")\n pr(parsed)\n error = text_of(parsed).strip()\n if not error:\n error = \"(unknown error)\"\n raise CoqException(error)\n\n state_id = None\n if self.coq_version >= (8,5):\n # pr(parsed)\n # pr(parsed.find(\".//state_id\"))\n self.print(repr(parsed.find(\".//state_id\").attrib))\n self.print(repr(parsed.find(\".//state_id\").attrib.get(\"val\")))\n state_id = int(parsed.find(\".//state_id\").attrib.get(\"val\"))\n self.print(\"GOT STATE ID: {}\".format(state_id))\n self.cmds_sent.append((coq_cmd, self.state_id))\n self.state_id = state_id\n\n return index_of_end_of_command or 0\n\n def current_goal(self):\n \"\"\"Read the current goal.\n\n Returns text indicating how many unproven goals remain and showing the\n focused goal.\n \"\"\"\n\n self.print(\"asking for goal\")\n if self.coq_version >= (8,5):\n response = self.coqtop.send('')\n else:\n response = self.coqtop.send('')\n return format_response(response, coq_version=self.coq_version)\n\n def rewind_to(self, idx):\n \"\"\"Rewind to an earlier state.\n\n This procedure rewinds to the end of the last command which ends before\n `idx` in this object's \"sent buffer\". The `append(...)` call adds\n commands to the sent buffer.\n\n Returns the resulting index.\n \"\"\"\n\n index_of_earliest_undone_command = None\n count = 0\n for i, (cmd, state_id) in enumerate(self.cmds_sent):\n new_count = count + len(cmd)\n if new_count > idx:\n index_of_earliest_undone_command = i\n break\n count = new_count\n\n if index_of_earliest_undone_command is not None:\n _, state_to_rewind_to = self.cmds_sent[index_of_earliest_undone_command]\n to_send = ''.format(state_to_rewind_to)\n for parsed in self.coqtop.send(to_send):\n if parsed.tag != \"value\":\n continue\n if parsed.attrib.get(\"val\") != \"good\":\n print(\"Error!\")\n pr(parsed)\n error = text_of(parsed).strip()\n if not error:\n error = \"(unknown error)\"\n raise CoqException(error)\n self.cmds_sent = self.cmds_sent[0:index_of_earliest_undone_command]\n self.state_id = state_to_rewind_to\n\n return count\n\n def sent_buffer(self):\n for cmd_text, _ in self.cmds_sent:\n yield cmd_text\n\n def stop(self):\n self.coqtop.stop()\n","sub_path":"coq.py","file_name":"coq.py","file_ext":"py","file_size_in_byte":13783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"22467903","text":"'''\r\nthis is the trainer of the 'Future Frame Prediction for Anomaly Detection - A New Baseline CVPR2018'\r\n'''\r\n#!!!!! ignore the warning messages\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport os\r\nimport pickle\r\nimport math\r\nimport torch\r\nimport time\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom collections import OrderedDict\r\nfrom torch.utils.data import DataLoader\r\nimport torchvision.transforms as T\r\nimport torchvision.transforms.functional as tf\r\n\r\nfrom lib.core.engine.default_engine import DefaultTrainer, DefaultInference\r\nfrom lib.core.utils import AverageMeter, multi_obj_grid_crop, frame_gradient, get_batch_dets, tensorboard_vis_images\r\nfrom lib.datatools.evaluate.utils import psnr_error\r\n\r\n\r\nclass Trainer(DefaultTrainer):\r\n NAME = [\"OCAE.TRAIN\"]\r\n def __init__(self, *defaults, **kwargs):\r\n '''\r\n Args:\r\n defaults(tuple): the default will have:\r\n 0->model:{'Generator':net_g, 'Driscriminator':net_d, 'FlowNet':net_flow}\r\n 1->train_dataloader: the dataloader \r\n 2->val_dataloader: the dataloader \r\n 3->optimizer:{'optimizer_g':op_g, 'optimizer_d'}\r\n 4->loss_function: {'g_adverserial_loss':.., 'd_adverserial_loss':..., 'gradient_loss':.., 'opticalflow_loss':.., 'intentsity_loss':.. }\r\n 5->logger: the logger of the whole training process\r\n 6->config: the config object of the whole process\r\n\r\n kwargs(dict): the default will have:\r\n verbose(str):\r\n parallel(bool): True-> data parallel\r\n pertrain(bool): True-> use the pretarin model\r\n extra param:\r\n test_dataset_keys: the dataset keys of each video\r\n test_dataset_dict: the dataset dict of whole test videos\r\n '''\r\n self._hooks = []\r\n self._register_hooks(kwargs['hooks'])\r\n # logger & config\r\n self.logger = defaults[5]\r\n self.config = defaults[6]\r\n\r\n model = defaults[0]\r\n # basic things\r\n if kwargs['parallel']:\r\n self.A = self.data_parallel(model['A'])\r\n self.B = self.data_parallel(model['B'])\r\n self.C = self.data_parallel(model['C'])\r\n self.Detector = self.data_parallel(model['Detector'])\r\n else:\r\n self.A = model['A'].cuda()\r\n self.B = model['B'].cuda()\r\n self.C = model['C'].cuda()\r\n self.Detector = model['Detector'].cuda()\r\n \r\n if kwargs['pretrain']:\r\n self.load_pretrain()\r\n\r\n self.train_dataloader = defaults[1]\r\n self._train_loader_iter = iter(self.train_dataloader)\r\n\r\n self.val_dataloader = defaults[2]\r\n self._val_loader_iter = iter(self.val_dataloader)\r\n\r\n # get the optimizer\r\n optimizer = defaults[3]\r\n self.optim_ABC = optimizer['optimizer_abc']\r\n\r\n # get the loss_fucntion\r\n loss_function = defaults[4]\r\n self.a_loss = loss_function['A_loss']\r\n self.b_loss = loss_function['B_loss']\r\n self.c_loss = loss_function['C_loss']\r\n\r\n # basic meter\r\n self.batch_time = AverageMeter()\r\n self.data_time = AverageMeter()\r\n self.loss_meter_A = AverageMeter()\r\n self.loss_meter_B = AverageMeter()\r\n self.loss_meter_C = AverageMeter()\r\n self.loss_meter_ABC = AverageMeter()\r\n self.psnr = AverageMeter()\r\n\r\n # others\r\n self.verbose = kwargs['verbose']\r\n self.accuarcy = 0.0 # to store the accuracy varies from epoch to epoch\r\n self.config_name = kwargs['config_name']\r\n self.kwargs = kwargs\r\n self.train_normalize = self.config.ARGUMENT.train.normal.use\r\n self.train_mean = self.config.ARGUMENT.train.normal.mean\r\n self.train_std = self.config.ARGUMENT.train.normal.std\r\n self.val_normalize = self.config.ARGUMENT.train.normal.use\r\n self.val_mean = self.config.ARGUMENT.train.normal.mean\r\n self.val_std = self.config.ARGUMENT.train.normal.std\r\n # self.total_steps = len(self.train_dataloader)\r\n self.result_path = ''\r\n self.log_step = self.config.TRAIN.log_step # how many the steps, we will show the information\r\n self.eval_step = self.config.TRAIN.eval_step\r\n self.vis_step = self.config.TRAIN.vis_step # how many the steps, we will vis\r\n self.save_step = self.config.TRAIN.save_step # save the model whatever the acc of the model\r\n self.max_steps = self.config.TRAIN.max_steps\r\n # self.testing_data_folder = self.config.DATASET.test_path\r\n self.test_dataset_keys = kwargs['test_dataset_keys']\r\n self.test_dataset_dict = kwargs['test_dataset_dict']\r\n\r\n self.cluster_dataset_keys = kwargs['cluster_dataset_keys']\r\n self.cluster_dataset_dict = kwargs['cluster_dataset_dict']\r\n\r\n self.evaluate_function = kwargs['evaluate_function']\r\n \r\n # hypyer-parameters of loss \r\n self.loss_lamada = kwargs['loss_lamada']\r\n\r\n # the lr scheduler\r\n lr_scheduler_dict = kwargs['lr_scheduler_dict']\r\n self.lr_abc = lr_scheduler_dict['optimizer_abc_scheduler']\r\n\r\n if self.config.RESUME.flag:\r\n self.resume()\r\n \r\n if self.config.FINETUNE.flag:\r\n self.fine_tune()\r\n \r\n\r\n def train(self,current_step):\r\n # Pytorch [N, C, D, H, W]\r\n # initialize\r\n start = time.time()\r\n self.A.train()\r\n self.B.train()\r\n self.C.train()\r\n self.Detector.eval()\r\n writer = self.kwargs['writer_dict']['writer']\r\n global_steps = self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])]\r\n\r\n # get the data\r\n data, _ = next(self._train_loader_iter) # the core for dataloader\r\n self.data_time.update(time.time() - start)\r\n \r\n # base on the D to get each frame\r\n # in this method, D = 3 and not change\r\n future = data[:, :, -1, :, :].cuda() # t+1 frame \r\n current = data[:, :, 1, :, :].cuda() # t frame\r\n past = data[:, :, 0, :, :].cuda() # t-1 frame\r\n\r\n bboxs = get_batch_dets(self.Detector, current)\r\n # this method is based on the objects to train the model insted of frames\r\n for index, bbox in enumerate(bboxs):\r\n if bbox.numel() == 0:\r\n bbox = bbox.new_zeros([1, 4])\r\n # get the crop objects\r\n input_currentObject_B, _ = multi_obj_grid_crop(current[index], bbox)\r\n future_object, _ = multi_obj_grid_crop(future[index], bbox)\r\n future2current = torch.stack([future_object, input_currentObject_B], dim=1)\r\n past_object, _ = multi_obj_grid_crop(past[index], bbox)\r\n current2past = torch.stack([input_currentObject_B, past_object], dim=1)\r\n\r\n _, _, input_objectGradient_A = frame_gradient(future2current)\r\n input_objectGradient_A = input_objectGradient_A.sum(1)\r\n _, _, input_objectGradient_C = frame_gradient(current2past)\r\n input_objectGradient_C = input_objectGradient_C.sum(1)\r\n # import ipdb; ipdb.set_trace()\r\n # True Process =================Start===================\r\n _, output_recGradient_A = self.A(input_objectGradient_A)\r\n _, output_recObject_B = self.B(input_currentObject_B)\r\n _, output_recGradient_C = self.C(input_objectGradient_C)\r\n # import ipdb; ipdb.set_trace()\r\n loss_A = self.a_loss(output_recGradient_A, input_objectGradient_A)\r\n loss_B = self.b_loss(output_recObject_B, input_currentObject_B)\r\n loss_C = self.c_loss(output_recGradient_C, input_objectGradient_C)\r\n\r\n loss_all = self.loss_lamada['A_loss'] * loss_A + self.loss_lamada['B_loss'] * loss_B + self.loss_lamada['C_loss'] * loss_C\r\n self.optim_ABC.zero_grad()\r\n loss_all.backward()\r\n self.optim_ABC.step()\r\n # record\r\n self.loss_meter_ABC.update(loss_all.detach())\r\n if self.config.TRAIN.general.scheduler.use:\r\n self.lr_abc.step()\r\n \r\n # ======================End==================\r\n\r\n self.batch_time.update(time.time() - start)\r\n\r\n if (current_step % self.log_step == 0):\r\n msg = 'Step: [{0}/{1}]\\t' \\\r\n 'Type: {cae_type}\\t' \\\r\n 'Time: {batch_time.val:.2f}s ({batch_time.avg:.2f}s)\\t' \\\r\n 'Speed: {speed:.1f} samples/s\\t' \\\r\n 'Data: {data_time.val:.2f}s ({data_time.avg:.2f}s)\\t' \\\r\n 'Loss_ABC: {losses_ABC.val:.5f} ({losses_ABC.avg:.5f})\\t'.format(current_step, self.max_steps, cae_type=self.kwargs['model_type'], batch_time=self.batch_time, speed=self.config.TRAIN.batch_size/self.batch_time.val, data_time=self.data_time,losses_ABC=self.loss_meter_ABC)\r\n self.logger.info(msg)\r\n writer.add_scalar('Train_loss_ABC', self.loss_meter_ABC.val, global_steps)\r\n\r\n if (current_step % self.vis_step == 0):\r\n vis_objects = OrderedDict()\r\n vis_objects['train_input_objectGradient_A'] = input_objectGradient_A.detach()\r\n vis_objects['train_input_currentObject_B'] = input_currentObject_B.detach()\r\n vis_objects['train_input_objectGradient_C'] = input_objectGradient_C.detach()\r\n vis_objects['train_output_recGradient_A'] = output_recGradient_A.detach()\r\n vis_objects['train_output_recObject_B'] = output_recObject_B.detach()\r\n vis_objects['train_output_recGradient_C'] = output_recGradient_C.detach()\r\n tensorboard_vis_images(vis_objects, writer, global_steps, self.train_normalize, self.train_mean, self.train_std)\r\n global_steps += 1 \r\n # reset start\r\n start = time.time()\r\n \r\n self.saved_model = {'A':self.A, 'B':self.B, 'C':self.C}\r\n self.saved_optimizer = {'optim_ABC': self.optim_ABC}\r\n self.saved_loss = {'loss_ABC':self.loss_meter_ABC.val}\r\n self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])] = global_steps\r\n \r\n def mini_eval(self, current_step):\r\n if current_step % self.config.TRAIN.mini_eval_step != 0:\r\n return\r\n temp_meter_A = AverageMeter()\r\n temp_meter_B = AverageMeter()\r\n temp_meter_C = AverageMeter()\r\n self.A.eval()\r\n self.B.eval()\r\n self.C.eval()\r\n self.Detector.eval()\r\n for data, _ in self.val_dataloader:\r\n # base on the D to get each frame\r\n # in this method, D = 3 and not change\r\n future_mini = data[:, :, -1, :, :].cuda() # t+1 frame \r\n current_mini = data[:, :, 1, :, :].cuda() # t frame\r\n past_mini = data[:, :, 0, :, :].cuda() # t-1 frame\r\n\r\n bboxs_mini = get_batch_dets(self.Detector, current_mini)\r\n\r\n for index, bbox in enumerate(bboxs_mini):\r\n if bbox.numel() == 0:\r\n bbox = bbox.new_zeros([1, 4])\r\n # get the crop objects\r\n input_currentObject_B, _ = multi_obj_grid_crop(current_mini[index], bbox)\r\n future_object, _ = multi_obj_grid_crop(future_mini[index], bbox)\r\n future2current = torch.stack([future_object, input_currentObject_B], dim=1)\r\n past_object, _ = multi_obj_grid_crop(past_mini[index], bbox)\r\n current2past = torch.stack([input_currentObject_B, past_object], dim=1)\r\n\r\n _, _, input_objectGradient_A = frame_gradient(future2current)\r\n input_objectGradient_A = input_objectGradient_A.sum(1)\r\n _, _, input_objectGradient_C = frame_gradient(current2past)\r\n input_objectGradient_C = input_objectGradient_C.sum(1)\r\n \r\n _, output_recGradient_A = self.A(input_objectGradient_A)\r\n _, output_recObject_B = self.B(input_currentObject_B)\r\n _, output_recGradient_C = self.C(input_objectGradient_C)\r\n\r\n psnr_A = psnr_error(output_recGradient_A.detach(), input_objectGradient_A)\r\n psnr_B = psnr_error(output_recObject_B.detach(), input_currentObject_B)\r\n psnr_C = psnr_error(output_recGradient_C.detach(), input_objectGradient_C)\r\n temp_meter_A.update(psnr_A.detach())\r\n temp_meter_B.update(psnr_B.detach())\r\n temp_meter_C.update(psnr_C.detach())\r\n\r\n self.logger.info(f'&^*_*^& ==> Step:{current_step}/{self.max_steps} the A PSNR is {temp_meter_A.avg:.2f}, the B PSNR is {temp_meter_B.avg:.2f}, the C PSNR is {temp_meter_C.avg:.2f}')\r\n\r\n\r\nclass Inference(DefaultInference):\r\n NAME = [\"OCAE.INFERENCE\"]\r\n def __init__(self, *defaults,**kwargs):\r\n '''\r\n Args:\r\n defaults(tuple): the default will have:\r\n 0->model: the model of the experiment\r\n 1->model_path: the path of the model path\r\n 2->val_dataloader: the dataloader to inference\r\n 3->logger: the logger of the whole process\r\n 4->config: the config object of the whole process\r\n kwargs(dict): the default will have:\r\n verbose(str):\r\n parallel(bool): True-> data parallel\r\n pertrain(bool): True-> use the pretarin model\r\n mode(str): 'dataset' -> the data will use the dataloder to pass in(dicard, becasue we will use the dataset to get all I need)\r\n '''\r\n self._hooks = []\r\n self._register_hooks(kwargs['hooks'])\r\n self.logger = defaults[3]\r\n self.config = defaults[4]\r\n self.model_path = defaults[1]\r\n\r\n save_model = torch.load(self.model_path)\r\n \r\n model = defaults[0]\r\n if kwargs['parallel']:\r\n self.A = self.data_parallel(model['A'])\r\n self.B = self.data_parallel(model['B'])\r\n self.C = self.data_parallel(model['C'])\r\n self.Detector = self.data_parallel(model['Detector'])\r\n else:\r\n self.A = model['A'].cuda()\r\n self.B = model['B'].cuda()\r\n self.C = model['C'].cuda()\r\n self.Detector = model['Detector'].cuda()\r\n \r\n # self.load()\r\n\r\n self.verbose = kwargs['verbose']\r\n self.kwargs = kwargs\r\n self.config_name = kwargs['config_name']\r\n self.normalize = self.config.ARGUMENT.val.normal.use\r\n self.mean = self.config.ARGUMENT.val.normal.mean\r\n self.std = self.config.ARGUMENT.val.normal.std\r\n # self.mode = kwargs['mode']\r\n\r\n self.test_dataset_keys = kwargs['test_dataset_keys']\r\n self.test_dataset_dict = kwargs['test_dataset_dict']\r\n\r\n self.test_dataset_keys_w = kwargs['test_dataset_keys_w']\r\n self.test_dataset_dict_w = kwargs['test_dataset_dict_w']\r\n self.metric = 0.0\r\n self.evaluate_function = kwargs['evaluate_function']\r\n\r\n \r\n \r\n def inference(self):\r\n for h in self._hooks:\r\n h.inference()\r\n ","sub_path":"lib/core/ocae.py","file_name":"ocae.py","file_ext":"py","file_size_in_byte":15147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"301428975","text":"#!/usr/bin/env python\nimport random\n\n\ndef display_board(board=[str(i) for i in range(0, 10)]):\n\n print('\\n'*5)\n print(board[7] + ' | ' + board[8] + ' | ' + board[9] + ' ')\n print('----------')\n print(board[4] + ' | ' + board[5] + ' | ' + board[6] + ' ')\n print('----------')\n print(board[1] + ' | ' + board[2] + ' | ' + board[3] + ' ')\n\n\ndef player_input(player1, player2):\n markers = {}\n marker1 = ''\n while not marker1 == 'X' and not marker1 == 'O':\n marker1 = input(\n \"Player {}: please choose 'X' or 'O' ? \".format(player1))\n\n marker2 = 'O' if marker1 is 'X' else 'X'\n print('Player {} marker: {} Player {} marker: {}'.format(\n player1, marker1, player2, marker2))\n markers[player1] = marker1\n markers[player2] = marker2\n return markers\n\n\ndef place_marker(board, marker, position):\n board[int(position)] = marker\n return board\n\n\ndef win_check(board, mark):\n ''' create the winning combination\n and check if there is one \n '''\n h = [board[i:3+i] for i in [1, 4, 7]]\n v = [board[i:7+i:3] for i in [1, 2, 3]]\n d1 = [board[1:10:4]]\n d2 = [board[3:8:2]]\n\n win_combinations = h+v+d1+d2\n\n for i in range(0, len(win_combinations)):\n if len(set(win_combinations[i])) <= 1:\n if win_combinations[i][0] == mark:\n return True\n return False\n\n\ndef choose_first():\n first = random.randint(1, 2)\n return first\n\n\ndef space_check(board, position):\n position = int(position)\n return not (board[position] == 'X' or board[position] == 'O')\n\n\ndef full_board_check(board):\n for i in range(1, len(board)):\n if space_check(board, i):\n return False\n return True\n\n\ntest_board = [0, 'O', 'X', 'X', 'O', 'X', 'X', 'O', 'O', 'X']\nfull_board_check(test_board)\n\n\ndef player_choice(board, player):\n\n next_play = input('{} : What is your move ? '.format(player))\n while not space_check(board, int(next_play)):\n next_play = input(\n 'That position is already taken. Please choose a different move: ')\n return int(next_play)\n\n\ndef replay():\n play = input('Choose Y to play, N to exit ')\n return True if play.lower() == 'y' else False\n\n\ndef main():\n ''' Game of tic tac toe '''\n\n print('Welcome to Tic Tac Toe!')\n print('Instructions:\\n Choose the corresponding key to pick your position on the board\\n\\t')\n display = display_board()\n print(display)\n play = replay()\n pass\n\n while play:\n # Set the game up here\n player1 = input('Enter first player name: ')\n player2 = input('Enter second player name: ')\n if choose_first() == 2:\n player1, player2 = player2, player1\n print('Player {} goes first!'.format(player1))\n markers = player_input(player1, player2)\n board = [str(i) for i in range(0, 10)]\n\n full_board = False\n winner1 = False\n winner2 = False\n\n while not winner1 and not winner2 and not full_board:\n # Player 1 Turn\n move1 = player_choice(board, player1)\n board = place_marker(board, markers[player1], move1)\n display_board(board)\n\n full_board = full_board_check(board)\n winner1 = win_check(board, markers[player1])\n winner2 = win_check(board, markers[player2])\n\n if winner1 or winner2 or full_board:\n break\n\n # Player2's turn.\n move2 = player_choice(board, player2)\n board = place_marker(board, markers[player2], move2)\n display_board(board)\n\n full_board = full_board_check(board)\n winner1 = win_check(board, markers[player1])\n winner2 = win_check(board, markers[player2])\n\n if full_board and not winner1 and not winner2:\n print('\\n\\t Evenly matched! its a draw.')\n elif winner1:\n print('\\n\\t {} is the winner! CONGRATULATIONS!!'.format(player1))\n elif winner2:\n print('\\n\\t {} is the winner! CONGRATULATIONS!!'.format(player2))\n\n if not replay():\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"TicTacToe/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"485073621","text":"import xarray as xr\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport numpy as np\nimport pandas as pd\nimport xbpch\nimport cartopy.crs as ccrs\nfrom matplotlib import colorbar, colors\nimport statistics\nfrom sklearn.metrics import r2_score\n#%matplotlib inline\n\n# Define a function to take the mean of any variable at the surface.\ndef GeneralHgMeanSurface (Dataset_OLD, Dataset_NEW, Variable, Units, Unit_Conversion, Title=\"Mean over Time\"):\n \"\"\" Plot the mean over time for any chosen variable at the surface level for both the reference and new models. \n Produce the absolute and percent differences for the reference and new models.\n Args: \n Dataset_OLD (str) : Reference Model bpch file\n Dataset_NEW (str) : New Model bpch file \n Variable (str) : Names of the variable/s you are choosing to take the mean over time with e.g.\n ['Variable 1', 'Variable 2', etc]. \n Units (str) : Name of the units the data is in.\n Unit_Conversion (float) : Conversion factor that converts your data to your preferred unit.\n Title (str) : Title of your graph. \n \n \"\"\"\n # An if statement is used to account for the use of multiple variables.\n if type(Variable) is list and len(Variable) > 1:\n OLD_sum=0\n NEW_sum=0\n \n # Create a for loop to add each variable, extracting the mean of the variable in respect to time \n # at surface level.\n for i in range(len(Variable)):\n tmpVar1 = ((Dataset_OLD[Variable[i]].isel(lev=0).mean('time')) * Unit_Conversion)\n OLD_sum = OLD_sum + tmpVar1\n tmpVar2 = ((Dataset_NEW[Variable[i]].isel(lev=0).mean('time')) * Unit_Conversion)\n NEW_sum = NEW_sum + tmpVar2\n else:\n # Extract the mean of one variable in respect to time at surface level \n NEW_sum = ((Dataset_NEW[Variable].isel(lev=0).mean('time')) * Unit_Conversion)\n OLD_sum = ((Dataset_OLD[Variable].isel(lev=0).mean('time')) * Unit_Conversion)\n \n \n \n # Find the absolute difference between the reference and new model.\n Abs_diff = NEW_sum - OLD_sum\n # Find the absolute maximum value of the absolute difference. \n Abs_MaxVal= np.max(np.abs(Abs_diff))\n \n \n # Find the percent difference of the models. \n Perc_diff = (Abs_diff / OLD_sum)*100\n # Find the absolute maximum value of the percent difference. \n Perc_MaxVal= np.max(np.abs(Perc_diff))\n \n \n \n # Plot the four graphs as subplots.\n plt.figure(figsize=(20,10))\n \n # Plot the reference model and use a geographical map.\n ax = plt.subplot(221, projection=ccrs.PlateCarree())\n im=OLD_sum.plot.contourf(x='lon',y='lat',ax=ax, transform=ccrs.PlateCarree(), cmap='viridis', \n cbar_kwargs={'orientation':'horizontal',\n 'ticklocation':'auto',\n 'label': Units}) \n # Add a title.\n plt.title(' Reference Model Version: '+Title) \n # Show the coastlines.\n ax.coastlines()\n \n \n \n # Plot the new model using a geographical map. \n ax = plt.subplot(222, projection=ccrs.PlateCarree())\n im= NEW_sum.plot.contourf(x='lon',y='lat', cmap='viridis', transform=ccrs.PlateCarree(), ax=ax,\n cbar_kwargs={'orientation':'horizontal',\n 'ticklocation':'auto',\n 'label': Units})\n # Add a title.\n plt.title('New Model Version: '+ Title)\n # Show the coastlines.\n ax.coastlines()\n \n # Plot the absolute difference using a geographical map\n ax = plt.subplot(223, projection=ccrs.PlateCarree())\n im= Abs_diff.plot.imshow(x='lon',y='lat', ax=ax,transform=ccrs.PlateCarree(), cmap='RdBu', vmin=(-Abs_MaxVal), vmax=(Abs_MaxVal),\n cbar_kwargs={'orientation':'horizontal',\n 'ticklocation':'auto',\n 'label': Units})\n # Add a title\n plt.title(\"Absolute Difference\")\n # Show the coastlines\n ax.coastlines()\n \n # Plot the percent difference \n ax = plt.subplot(224, projection=ccrs.PlateCarree())\n im= Perc_diff.plot.imshow(x='lon',y='lat',ax=ax,transform=ccrs.PlateCarree(), cmap='RdBu', vmin=(-Perc_MaxVal), vmax=(Perc_MaxVal),\n cbar_kwargs={'orientation':'horizontal',\n 'ticklocation':'auto',\n 'label':\"%\" })\n # Add a title\n plt.title(\"Percent Difference (%)\")\n # Show the coastlines \n ax.coastlines()\n \n # Show the 4 subplots \n GenGraph= plt.show()\n \n # Return the 4 graphs\n return GenGraph\n \n\n","sub_path":"GeneralGraph.py","file_name":"GeneralGraph.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"394712884","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'http://www.vanityfair.com/society/2014/06/monica-lewinsky-humiliation-culture'\nr = requests.get(url)\nr_html = r.text\nsoup = BeautifulSoup(r_html, 'lxml')\n\n# for article in soup.select(\"body h1, body p\"):\n# print (article.text)\n\nwith open(\"lesswork.txt\", \"w\") as textfile:\n for article in soup.select(\"body h1, body p\"):\n textfile.write(article.text)\n","sub_path":"lessons/e19.py","file_name":"e19.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"228292236","text":"\"\"\"SingerTap and supporting classes.\n\nThis module contains the SingerTap class as well as a supporting methods.\n\"\"\"\nimport asyncio\nimport json\nimport logging\nimport shutil\nimport sys\nfrom asyncio import Task\nfrom asyncio.streams import StreamReader\nfrom hashlib import sha1\nfrom pathlib import Path\nfrom typing import Tuple\n\nfrom jsonschema import Draft4Validator\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.job import JobFinder, Payload\nfrom meltano.core.plugin.error import PluginExecutionError, PluginLacksCapabilityError\nfrom meltano.core.plugin_invoker import PluginInvoker\nfrom meltano.core.setting_definition import SettingDefinition, SettingKind\nfrom meltano.core.utils import file_has_data, flatten, merge\n\nfrom . import PluginType, SingerPlugin\nfrom .catalog import (\n MetadataExecutor,\n MetadataRule,\n SchemaExecutor,\n SchemaRule,\n property_breadcrumb,\n select_filter_metadata_rules,\n select_metadata_rules,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nasync def _stream_redirect(\n stream: asyncio.StreamReader, file_like_obj, write_str=False\n):\n \"\"\"Redirect stream to a file like obj.\"\"\"\n while not stream.at_eof():\n data = await stream.readline()\n file_like_obj.write(data.decode(\"ascii\") if write_str else data)\n\n\ndef _debug_logging_handler(\n name: str, plugin_invoker: PluginInvoker, stderr: StreamReader\n) -> Task:\n \"\"\"Route debug log lines to stderr or an OutputLogger if one is present in our invocation context.\"\"\"\n if not plugin_invoker.context or not plugin_invoker.context.base_output_logger:\n return asyncio.ensure_future(\n _stream_redirect(stderr, sys.stderr, write_str=True)\n )\n\n out = plugin_invoker.context.base_output_logger.out(\n name, color=\"yellow\", subtask_name=\"discovery\"\n )\n with out.line_writer() as outerr:\n return asyncio.ensure_future(_stream_redirect(stderr, outerr, write_str=True))\n\n\ndef config_metadata_rules(config):\n flat_config = flatten(config, \"dot\")\n\n rules = []\n for key, value in flat_config.items():\n # .\n # ..\n # ...\n # .properties..\n # .properties..properties..\n tap_stream_id, *props, key = key.split(\".\")\n\n rules.append(\n MetadataRule(\n tap_stream_id=tap_stream_id,\n breadcrumb=property_breadcrumb(props),\n key=key,\n value=value,\n )\n )\n\n return rules\n\n\ndef config_schema_rules(config):\n return [\n SchemaRule(\n tap_stream_id=tap_stream_id,\n breadcrumb=[\"properties\", prop],\n payload=payload,\n )\n for tap_stream_id, stream_config in config.items()\n for prop, payload in stream_config.items()\n ]\n\n\nclass SingerTap(SingerPlugin):\n __plugin_type__ = PluginType.EXTRACTORS\n\n EXTRA_SETTINGS = [\n SettingDefinition(name=\"_catalog\"),\n SettingDefinition(name=\"_state\"),\n SettingDefinition(name=\"_load_schema\", value=\"$MELTANO_EXTRACTOR_NAMESPACE\"),\n SettingDefinition(name=\"_select\", kind=SettingKind.ARRAY, value=[\"*.*\"]),\n SettingDefinition(\n name=\"_metadata\",\n aliases=[\"metadata\"],\n kind=SettingKind.OBJECT,\n value={},\n value_processor=\"nest_object\",\n ),\n SettingDefinition(\n name=\"_schema\",\n kind=SettingKind.OBJECT,\n value={},\n value_processor=\"nest_object\",\n ),\n SettingDefinition(name=\"_select_filter\", kind=SettingKind.ARRAY, value=[]),\n ]\n\n def exec_args(self, plugin_invoker):\n \"\"\"\n Return the arguments list with the complete runtime paths.\n \"\"\"\n args = [\"--config\", plugin_invoker.files[\"config\"]]\n\n catalog_path = plugin_invoker.files[\"catalog\"]\n if file_has_data(catalog_path):\n if \"catalog\" in plugin_invoker.capabilities:\n args += [\"--catalog\", catalog_path]\n elif \"properties\" in plugin_invoker.capabilities:\n args += [\"--properties\", catalog_path]\n else:\n logger.warn(\n \"A catalog file was found, but it will be ignored as the extractor does not advertise the `catalog` or `properties` capability\"\n )\n\n state_path = plugin_invoker.files[\"state\"]\n if file_has_data(state_path):\n if \"state\" in plugin_invoker.capabilities:\n args += [\"--state\", state_path]\n else:\n logger.warn(\n \"A state file was found, but it will be ignored as the extractor does not advertise the `state` capability\"\n )\n\n return args\n\n @property\n def config_files(self):\n return {\n \"config\": f\"tap.{self.instance_uuid}.config.json\",\n \"catalog\": \"tap.properties.json\",\n \"catalog_cache_key\": \"tap.properties.cache_key\",\n \"state\": \"state.json\",\n }\n\n @property\n def output_files(self):\n return {\"output\": \"tap.out\"}\n\n @hook(\"before_invoke\")\n async def look_up_state_hook(\n self,\n plugin_invoker: PluginInvoker,\n exec_args: Tuple[str, ...] = (),\n ):\n \"\"\"Look up state before being invoked if in sync mode.\"\"\"\n # Use state only in sync mode (i.e. no args)\n if exec_args:\n return\n\n try:\n await self.look_up_state(plugin_invoker)\n except PluginLacksCapabilityError:\n pass\n\n async def look_up_state( # noqa: WPS231, WPS213\n self, plugin_invoker: PluginInvoker\n ):\n \"\"\"Look up state, cleaning up and refreshing as needed.\"\"\"\n if \"state\" not in plugin_invoker.capabilities:\n raise PluginLacksCapabilityError(\n f\"Extractor '{self.name}' does not support incremental state\"\n )\n\n state_path = plugin_invoker.files[\"state\"]\n\n try:\n # Delete state left over from different pipeline run for same extractor\n state_path.unlink()\n except FileNotFoundError:\n pass\n\n elt_context = plugin_invoker.context\n if not elt_context or not elt_context.job:\n # Running outside pipeline context: incremental state could not be loaded\n return\n\n if elt_context.full_refresh:\n logger.info(\n \"Performing full refresh, ignoring state left behind by any previous runs.\"\n )\n return\n\n custom_state_filename = plugin_invoker.plugin_config_extras[\"_state\"]\n if custom_state_filename:\n custom_state_path = plugin_invoker.project.root.joinpath(\n custom_state_filename\n )\n\n try:\n shutil.copy(custom_state_path, state_path)\n logger.info(f\"Found state in {custom_state_filename}\")\n except FileNotFoundError as err:\n raise PluginExecutionError(\n f\"Could not find state file {custom_state_path}\"\n ) from err\n\n return\n\n # the `state.json` is stored in the database\n state = {}\n incomplete_since = None\n finder = JobFinder(elt_context.job.job_id)\n\n state_job = finder.latest_with_payload(elt_context.session, flags=Payload.STATE)\n if state_job:\n logger.info(f\"Found state from {state_job.started_at}.\")\n incomplete_since = state_job.ended_at\n if \"singer_state\" in state_job.payload:\n merge(state_job.payload[\"singer_state\"], state)\n\n incomplete_state_jobs = finder.with_payload(\n elt_context.session, flags=Payload.INCOMPLETE_STATE, since=incomplete_since\n )\n for state_job in incomplete_state_jobs:\n logger.info(\n f\"Found and merged incomplete state from {state_job.started_at}.\"\n )\n if \"singer_state\" in state_job.payload:\n merge(state_job.payload[\"singer_state\"], state)\n\n if state:\n with state_path.open(\"w\") as state_file:\n json.dump(state, state_file, indent=2)\n else:\n logger.warning(\"No state was found, complete import.\")\n\n @hook(\"before_invoke\")\n async def discover_catalog_hook(\n self,\n plugin_invoker: PluginInvoker,\n exec_args: Tuple[str, ...] = (),\n ):\n \"\"\"Discover Singer catalog before invoking tap if in sync mode.\n\n Args:\n plugin_invoker: The invocation handler of the plugin instance.\n exec_args: List of subcommand/args that we where invoked with.\n \"\"\"\n # Discover only in sync mode (i.e. no args)\n if exec_args:\n return\n\n try:\n await self.discover_catalog(plugin_invoker)\n except PluginLacksCapabilityError:\n pass\n\n async def discover_catalog(self, plugin_invoker: PluginInvoker): # noqa: WPS231\n \"\"\"Perform catalog discovery.\n\n Args:\n plugin_invoker: The invocation handler of the plugin instance.\n \"\"\"\n catalog_path = plugin_invoker.files[\"catalog\"]\n catalog_cache_key_path = plugin_invoker.files[\"catalog_cache_key\"]\n\n if catalog_path.exists():\n try:\n cached_key = catalog_cache_key_path.read_text()\n new_cache_key = self.catalog_cache_key(plugin_invoker)\n\n if cached_key == new_cache_key:\n logger.debug(f\"Using cached catalog file\")\n return\n except FileNotFoundError:\n pass\n\n logging.debug(\"Cached catalog is outdated, running discovery...\")\n\n # We're gonna generate a new catalog, so delete the cache key.\n try:\n catalog_cache_key_path.unlink()\n except FileNotFoundError:\n pass\n\n custom_catalog_filename = plugin_invoker.plugin_config_extras[\"_catalog\"]\n if custom_catalog_filename:\n custom_catalog_path = plugin_invoker.project.root.joinpath(\n custom_catalog_filename\n )\n\n try:\n shutil.copy(custom_catalog_path, catalog_path)\n logger.info(f\"Found catalog in {custom_catalog_path}\")\n except FileNotFoundError as err:\n raise PluginExecutionError(\n f\"Could not find catalog file {custom_catalog_path}\"\n ) from err\n else:\n await self.run_discovery(plugin_invoker, catalog_path)\n\n # test for the result to be a valid catalog\n try:\n with catalog_path.open(\"r\") as catalog_file:\n catalog = json.load(catalog_file)\n schema_valid = Draft4Validator.check_schema(catalog)\n except Exception as err:\n catalog_path.unlink()\n raise PluginExecutionError(\n f\"Catalog discovery failed: invalid catalog: {err}\"\n ) from err\n\n async def run_discovery(self, plugin_invoker: PluginInvoker, catalog_path: Path):\n \"\"\"Run tap in discovery mode and store the result.\n\n Args:\n plugin_invoker: The invocation handler of the plugin instance.\n catalog_path: Where discovery output should be written.\n \"\"\"\n if not \"discover\" in plugin_invoker.capabilities:\n raise PluginLacksCapabilityError(\n f\"Extractor '{self.name}' does not support catalog discovery (the `discover` capability is not advertised)\"\n )\n\n try:\n with catalog_path.open(mode=\"wb\") as catalog:\n handle = await plugin_invoker.invoke_async(\n \"--discover\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n universal_newlines=False,\n )\n\n invoke_futures = [\n asyncio.ensure_future(_stream_redirect(handle.stdout, catalog)),\n asyncio.ensure_future(handle.wait()),\n ]\n\n if logger.isEnabledFor(logging.DEBUG) and handle.stderr:\n invoke_futures.append(\n _debug_logging_handler(self.name, plugin_invoker, handle.stderr)\n )\n\n done, _ = await asyncio.wait(\n invoke_futures,\n return_when=asyncio.ALL_COMPLETED,\n )\n failed = [future for future in done if future.exception() is not None]\n if failed:\n failed_future = failed.pop()\n raise failed_future.exception()\n exit_code = handle.returncode\n except Exception:\n catalog_path.unlink()\n raise\n\n if exit_code != 0:\n catalog_path.unlink()\n raise PluginExecutionError(\n f\"Catalog discovery failed: command {plugin_invoker.exec_args('--discover')} returned {exit_code}\"\n )\n\n @hook(\"before_invoke\")\n async def apply_catalog_rules_hook(\n self, plugin_invoker: PluginInvoker, exec_args: Tuple[str, ...] = ()\n ):\n \"\"\"Apply catalog rules before invoke if in sync mode.\"\"\"\n # Apply only in sync mode (i.e. no args)\n if exec_args:\n return\n\n try:\n self.apply_catalog_rules(plugin_invoker, exec_args)\n except PluginLacksCapabilityError:\n pass\n\n def apply_catalog_rules( # noqa: WPS213,WPS231\n self,\n plugin_invoker: PluginInvoker,\n exec_args: Tuple[str, ...] = (),\n ):\n \"\"\"Apply Singer catalog and schema rules to discovered catalog.\"\"\"\n if (\n not \"catalog\" in plugin_invoker.capabilities\n and not \"properties\" in plugin_invoker.capabilities\n ):\n raise PluginLacksCapabilityError(\n f\"Extractor '{self.name}' does not support entity selection or catalog metadata and schema rules\"\n )\n\n config = plugin_invoker.plugin_config_extras\n\n schema_rules = []\n metadata_rules = []\n\n # If a custom catalog is provided, don't apply catalog rules\n if not config[\"_catalog\"]:\n schema_rules.extend(config_schema_rules(config[\"_schema\"]))\n\n metadata_rules.extend(select_metadata_rules([\"!*.*\"]))\n metadata_rules.extend(select_metadata_rules(config[\"_select\"]))\n metadata_rules.extend(config_metadata_rules(config[\"_metadata\"]))\n\n # Always apply select filters (`meltano elt` `--select` and `--exclude` options)\n metadata_rules.extend(select_filter_metadata_rules(config[\"_select_filter\"]))\n\n if not schema_rules and not metadata_rules:\n return\n\n catalog_path = plugin_invoker.files[\"catalog\"]\n catalog_cache_key_path = plugin_invoker.files[\"catalog_cache_key\"]\n\n try:\n with catalog_path.open() as catalog_file:\n catalog = json.load(catalog_file)\n\n if schema_rules:\n SchemaExecutor(schema_rules).visit(catalog)\n\n if metadata_rules:\n MetadataExecutor(metadata_rules).visit(catalog)\n\n with catalog_path.open(\"w\") as catalog_file:\n json.dump(catalog, catalog_file, indent=2)\n\n cache_key = self.catalog_cache_key(plugin_invoker)\n if cache_key:\n catalog_cache_key_path.write_text(cache_key)\n else:\n try:\n catalog_cache_key_path.unlink()\n except FileNotFoundError:\n pass\n except FileNotFoundError as err:\n raise PluginExecutionError(\n f\"Applying catalog rules failed: catalog file is missing.\"\n ) from err\n except Exception as err:\n catalog_path.unlink()\n raise PluginExecutionError(\n f\"Applying catalog rules failed: catalog file is invalid: {err}\"\n ) from err\n\n def catalog_cache_key(self, plugin_invoker):\n # Treat non-pip plugins as editable/dev-mode plugins and do not cache.\n if plugin_invoker.plugin.pip_url is None:\n return None\n\n # If the extractor is installed as editable, don't cache because\n # the result of discovery could change at any time.\n if plugin_invoker.plugin.pip_url.startswith(\"-e\"):\n return None\n\n extras = plugin_invoker.plugin_config_extras\n\n # If a custom catalog is provided, there's no need to cache\n if extras[\"_catalog\"]:\n return None\n\n # The catalog should be regenerated, and the catalog cache invalidated,\n # if any settings changed that could affect discovery, or if schema or\n # metadata rules changed.\n # Changes to selection rules and selection filter rules are ignored,\n # since \"selected\" metadata is reset using the `!*.*` selection rule anyway.\n key_dict = {\n **plugin_invoker.plugin_config,\n \"_schema\": extras[\"_schema\"],\n \"_metadata\": extras[\"_metadata\"],\n }\n\n key_json = json.dumps(key_dict)\n\n return sha1(key_json.encode()).hexdigest()\n","sub_path":"src/meltano/core/plugin/singer/tap.py","file_name":"tap.py","file_ext":"py","file_size_in_byte":17403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"157470370","text":"#! /usr/bin/python\n\nimport json\n\nparameters = {}\n\nwith open(\"seed.json\") as json_data:\n data = json.load(json_data)\n\n for key in data.keys():\n if len(key.split()) > 1:\n param_type = key.split()[0]\n param = key.split()[1]\n\n if param_type not in parameters.keys():\n parameters[param_type] = []\n\n if param.split(\"_\")[-1] in [\"8\", \"16\", \"32\", \"64\"]:\n if \"_\".join(param.split(\"_\")[:-1]) not in parameters[param_type]:\n parameters[param_type].append(\"_\".join(param.split(\"_\")[:-1]))\n else:\n parameters[param_type].append(param)\n else:\n if \"set_parameter\" not in parameters.keys():\n parameters[\"set_parameter\"] = []\n\n parameters[\"set_parameter\"].append(key.upper())\n\nprint(\"\\\\begin{table}[htpb]\")\nprint(\"\\\\centering\")\nprint(\"\\\\begin{tabular}{@{}p{0.14\\columnwidth}p{0.72\\columnwidth}@{}}\")\nprint(\"\\\\toprule\")\nprint(\"Type & \\\\multicolumn{1}{c}{Parameters} \\\\\\\\ \\\\midrule\")\n\nending = \"\\\\\\\\\"\n\nfor param_type in parameters.keys():\n title = \"\"\n\n if param_type == \"set_parameter\":\n title = \"Boolean or Multi-Valued\"\n elif param_type == \"set_operation_latency\":\n title = \"Operation Latency\"\n elif param_type == \"set_resource_constraint\":\n title = \"Resource Constraint\"\n\n line = title + \" & \\\\tiny{\"\n\n for param in parameters[param_type]:\n line += \"\\\\texttt{\" + param.replace(\"_\", \"\\\\_\") + \"}\"\n if param != parameters[param_type][-1]:\n line += \", \"\n\n line += \"} \"\n\n if param_type == \"set_parameter\":\n ending = \"\\\\\\\\ \\\\bottomrule\"\n\n line += ending\n\n print(line)\n\nprint(\"\\\\addlinespace{}\")\nprint(\"\\\\end{tabular}\")\nprint(\"\\\\caption{Subset of All Autotuned LegUP HLS Parameters}\")\nprint(\"\\\\label{tab:params}\")\nprint(\"\\\\end{table}\")\n","sub_path":"post_place_and_route/py/results/generate_parameter_table.py","file_name":"generate_parameter_table.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"203215636","text":"from selenium import webdriver\n\ndef before_all(context):\n\tchromedriver = '/home/prazz93/mamikostest/browsers/chromedriver'\n\toptions = webdriver.ChromeOptions()\n\t# options.add_argument('headless')\n\toptions.add_argument('--incognito')\n\tcontext.browser = webdriver.Chrome(executable_path=chromedriver,chrome_options=options)\n\t# self.driver = webdriver.Chrome('/home/prazz93/mamikostest/browsers/chromedriver')\n\tcontext.browser.delete_all_cookies()\n\tcontext.browser.set_window_size(1920, 1080)\n\tcontext.browser.maximize_window()\n\tcontext.browser.implicitly_wait(5)\n\ndef after_all(context):\n context.browser.quit()\n","sub_path":"mamikostest/features/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413279367","text":"import MySQLdb\n\n\nclass BaseDB(object):\n \"\"\"\n Common operations for DB instance.\n \"\"\"\n def __init__(self, host, user, password, db):\n self.host = host\n self.user = user\n self.password = password\n self.db = db\n\n def connect(self):\n \"\"\"\n Return a connection to the DB\n \"\"\"\n return MySQLdb.connect(host=self.host, user=self.user, passwd=self.password, db=self.db, use_unicode=True,\n charset=\"utf8\")\n\n def fetch_all(self, query, args):\n \"\"\"\n Fetch multiple\n \"\"\"\n connection = self.connect()\n cursor = connection.cursor(MySQLdb.cursors.SSDictCursor)\n cursor.execute(query, args)\n result = cursor.fetchall()\n cursor.close()\n connection.close()\n return result\n\n def fetch_one(self, query, args):\n \"\"\"\n Fetch one\n \"\"\"\n connection = self.connect()\n cursor = connection.cursor(MySQLdb.cursors.SSDictCursor)\n cursor.execute(query, args)\n result = cursor.fetchone()\n cursor.close()\n connection.close()\n return result\n\n def execute_query(self, query):\n \"\"\"\n Execute query (ex: INSERT or UPDATE)\n \"\"\"\n connection = self.connect()\n cursor = connection.cursor(MySQLdb.cursors.SSDictCursor)\n result = cursor.execute(query)\n connection.commit()\n cursor.close()\n connection.close()\n return {\"result\": result, \"lastrowid\": cursor.lastrowid}\n\n\nclass DB(BaseDB):\n\n base_select_string = '''\n urls.url as url,\n urls.protocol as protocol,\n urls.subdomain as subdomain,\n urls.domain as domain,\n urls.tld as tld,\n urls.path as path,\n urls.category as category,\n urls.query as query,\n urls.class_id as class,\n urls.id as id,\n responses.code as response_code\n '''\n\n def get_urls(self, params):\n \"\"\"\n Basic domains call data.\n \"\"\"\n count, page, rcode_g, rcode_l = params\n offset = count*page\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n {0}\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n urls.id > %s {1}\n ORDER BY\n urls.id ASC\n LIMIT\n %s\n '''.format(self.base_select_string, codes_where)\n args = (offset,)\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n args = args + (count,)\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_ip_location(self, country, params):\n \"\"\"\n Get URLs with an IP in a certain country\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n locations.country as ip_country,\n locations.ip_id as location_ip_id,\n ips.url_id as url_id,\n ips.id as ip_id,\n {0}\n FROM\n locations\n LEFT JOIN\n ips ON ips.id=locations.ip_id\n LEFT JOIN\n urls ON urls.id=ips.url_id\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n locations.country = %s AND urls.id IS NOT NULL {1}\n ORDER BY\n urls.id ASC'''.format(self.base_select_string, codes_where)\n args = (country, )\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_tld(self, tld, params):\n \"\"\"\n Get urls that have a particular TLD\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n {0}\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n urls.tld = %s {1}\n ORDER BY\n urls.id'''.format(self.base_select_string, codes_where)\n args = (tld,)\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_category(self, category, params):\n \"\"\"\n Get urls for a particular category\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n {0}\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n urls.category = %s {1}\n ORDER BY\n urls.id\n '''.format(self.base_select_string, codes_where)\n args = (category,)\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_class(self, class_id, params):\n \"\"\"\n Get urls that match a certain class\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n {0}\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n urls.class_id = %s {1}\n ORDER BY\n urls.id'''.format(self.base_select_string, codes_where)\n args = (class_id,)\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_regional_interest(self, region, params):\n \"\"\"\n Get urls for a particular regional interest\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n regional_interest.region as region,\n regional_interest.url_id as url_id,\n {0}\n FROM\n regional_interest\n LEFT JOIN\n urls ON urls.id=regional_interest.url_id\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n regional_interest.region = %s {1}\n ORDER BY\n urls.id\n '''.format(self.base_select_string, codes_where)\n args = (region, )\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_ip(self, ip, params):\n \"\"\"\n Get urls for a particular ip\n \"\"\"\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n urls.id as url_id,\n {0}\n FROM\n ips\n LEFT JOIN\n urls ON urls.id=ips.url_id\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n ips.ip = %s {1}\n ORDER BY\n urls.id\n '''.format(self.base_select_string, codes_where)\n args = (ip, )\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_urls_for_domain(self, domain, params):\n \"\"\"\n Get urls on a particular domain\n \"\"\"\n host, tld = domain.split(\".\")\n count, page, rcode_g, rcode_l = params\n codes_where = \"AND responses.code >= %s AND responses.code <= %s\" if rcode_g or rcode_l else \"\"\n q = '''\n SELECT\n {0}\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n WHERE\n urls.domain = %s AND urls.tld = %s {1}\n ORDER BY\n urls.id\n '''.format(self.base_select_string, codes_where)\n args = (host, tld, )\n args = args + (rcode_g, rcode_l,) if rcode_l or rcode_g else args\n result = self.fetch_all(q, args)\n return result\n\n def get_full_url_obj(self, url_id):\n \"\"\"\n Get a full URL object from id\n \"\"\"\n q = '''\n SELECT\n {0},\n ips.ip as ip_address,\n ips.asn_name as asn_name,\n ips.asn_number as asn_number,\n ips.last_checked as ip_last_checked,\n categories.id as categories_id,\n categories.category_description as category_description,\n classes.id as class_id,\n classes.name as class_name,\n responses.last_checked as response_last_checked,\n regional_interest.region as regional_interest,\n locations.country as ip_country,\n locations.state as ip_state,\n locations.city as ip_city,\n locations.code as ip_code,\n locations.latitude as ip_latitude,\n locations.longitude as ip_longitude,\n p1.combinedports as ports,\n p1.combinedtimechecks as port_last_check\n FROM\n urls\n LEFT JOIN\n responses ON responses.url_id=urls.id\n LEFT JOIN\n ips ON ips.url_id=urls.id\n LEFT JOIN\n categories ON categories.id=urls.category\n LEFT JOIN\n classes ON classes.id=urls.class_id\n LEFT JOIN\n regional_interest ON regional_interest.url_id=urls.id\n LEFT JOIN\n locations ON locations.ip_id=ips.id\n LEFT JOIN\n (SELECT p.ip, GROUP_CONCAT(p.port) as combinedports, GROUP_CONCAT(p.last_checked) as combinedtimechecks\n FROM ports p GROUP BY p.ip) p1 ON p1.ip=ips.ip\n WHERE\n urls.id = %s\n ORDER BY\n urls.id\n '''.format(self.base_select_string)\n\n args = (url_id, )\n result = self.fetch_one(q, args)\n return result\n\n def get_categories(self):\n \"\"\"\n Simple return available categories\n \"\"\"\n q = '''SELECT * FROM categories'''\n result = self.fetch_all(q, ())\n return result\n\n def get_classes(self):\n \"\"\"\n Simple return available classes\n \"\"\"\n q = '''SELECT * FROM classes'''\n result = self.fetch_all(q, ())\n return result","sub_path":"api/models/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":10635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"199636288","text":"from flask import Flask, render_template, redirect, request, session\nimport random\n\napp = Flask(__name__)\napp.secret_key = 'secret_key'\n\n\n@app.route('/')\ndef index():\n if( session.get('number') is None ):\n session['number'] = random.randrange(0, 101)\n\n if( session.get('text') is None ):\n session['text'] = ''\n\n return render_template('index.html', text=session['text'])\n\n\n@app.route('/guess', methods=['POST'])\ndef guess():\n number = int(request.form['number'])\n\n if number < session['number']:\n session['text'] = \"low\"\n elif number > session['number']:\n session['text'] = \"high\"\n else:\n session['text'] = \"perfect\"\n\n return redirect('/')\n\n\n@app.route('/reset', methods=['POST'])\ndef reset():\n session.pop('text')\n session.pop('number')\n\n return redirect('/')\n\n\napp.run(debug=True)\n","sub_path":"Python/04 - Flask/Flask Fundamentals/06 - number_game/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"328122617","text":"import getopt\nimport os.path\nimport random\nimport string\n\nimport jsonpickle\n\nfrom model.user import User\nimport sys\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of groups\", \"file\"])\nexcept getopt.GetoptError as err:\n getopt.usage()\n sys.exit(2)\n\n\nn = 1\nf = \"data/user.json\"\n\nfor o, a in opts:\n if o == \"-n\":\n n = int(a)\n elif o == \"-f\":\n f = a\n\n\ndef random_string(prefix, maxlen):\n symbols = string.ascii_letters + string.digits + \" \"*10\n return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])\n\ndef random_string_letters(prefix, maxlen):\n symbols_letters = string.ascii_letters + \" \"*5\n return prefix + \"\".join([random.choice(symbols_letters) for i in range(random.randrange(maxlen))])\n\ndef random_string_digits(prefix, maxlen):\n symbols_digits = string.digits + \" \"*5\n return prefix + \"\".join([random.choice(symbols_digits) for i in range(random.randrange(maxlen))])\n\n\n\n\ntestdata = [\n User(firstname=random_string(\"firstname\", 10), lastname=random_string(\"lastname\", 10), address1=random_string(\"address1\", 10),\n postcode=\"32-600\", city=random_string_letters(\"city\", 10), phone = random_string_digits(\"+48\", 6),\n email=random_string_letters(\"email\", 1) + \"@\" +random_string_letters( \"\", 3)+ \".com\", password=random_string_letters(\"\", 7),)\n for i in range(1)\n]\n\n\nfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f)\n\nwith open(file, \"w\") as out:\n jsonpickle.set_encoder_options(\"json\", indent=2)\n out.write(jsonpickle.encode(testdata))\n\n","sub_path":"generator/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"541815124","text":"#!/usr/bin/python3\n\nimport os\nimport signal\nimport sys\nimport logging\nfrom optparse import OptionParser\n\n# Setup logging\nscriptFolder = os.path.dirname(os.path.realpath(__file__))\n\n\ndef exit_gracefully(signum, frame):\n print('\\nExiting, user pressed Ctrl-C')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, exit_gracefully)\n\n parser = OptionParser()\n\n parser.add_option(\"-b\", action=\"store_false\", dest=\"boolOptionVar\", default=False,\n help=\"Set boolean option example\")\n\n parser.add_option(\"-s\", action=\"store_true\", dest=\"stringOptionVar\", default=False,\n help=\"Set string value example\")\n\n (options, args) = parser.parse_args()\n\n logging.debug(\"Started Script\")\n\n\n","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"186357549","text":"from django.conf.urls import url, include\nfrom users.views import *\n\n\nurlpatterns = [\n url(r'^(?P\\d+)/$', ProfileUserView.as_view(), name='user'),\n url(r'^posts/(?P\\d+)/$', UserAdsView.as_view(), name='user_ads'),\n url(r'^courses/(?P\\d+)/$', UserCoursesView.as_view(), name='my_courses'),\n url(r'^ankets/(?P\\d+)/$', UserAnketsView.as_view(), name='user_ankets'),\n url(r'^blacklist/(?P\\d+)/$', UserBlackListView.as_view(), name='user_blacklist'),\n url(r'^my_favorites/(?P\\d+)/$', UserFavoriteView.as_view(), name='user_favorite'),\n url(r'^my_subscribe/(?P\\d+)/$', MySubscribeView.as_view(), name='my_subscribes'),\n url(r'^subscribes/(?P\\d+)/$', SubscribesView.as_view(), name='subscribes'),\n url(r'^settings/(?P\\d+)/$', UserSettingsView.as_view(), name='user_settings'),\n\n url(r'^progs/', include('users.url.progs')),\n url(r'^ad_progs/', include('users.url.ad_progs')),\n url(r'^skill_progs/', include('users.url.skill_progs')),\n url(r'^love_progs/', include('users.url.love_progs')),\n url(r'^manage/', include('users.url.manage')),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"507307918","text":"from scoreEntry import ScoreEntry\nfrom collections import Counter\n\nclass CalcFullHouse(ScoreEntry):\n\n def calculateKat(self, newDice):\n if self.isAllowed(newDice):\n self.points = 25\n\n def isAllowed(self, newDice):\n l = [x.value for x in newDice]\n cnt = Counter(l)\n print(cnt)\n for k, v in cnt.items():\n if v == 3:\n for k, v in cnt.items():\n if v == 2:\n return True\n return False\n","sub_path":"kniffel_oop/calcFullHouse.py","file_name":"calcFullHouse.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"42461503","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Keurfon Luu \nLicense: MIT\n\"\"\"\nimport glob\nimport numpy as np\nimport os, sys, time\nfrom argparse import ArgumentParser\nfrom copy import deepcopy\ntry:\n from mpi4py import MPI\n mpi_exist = True\nexcept ImportError:\n mpi_exist = False\ntry:\n from evodcinv import DispersionCurve, LayeredModel, progress\nexcept ImportError:\n sys.path.append(\"../\")\n from evodcinv import DispersionCurve, LayeredModel, progress\n \n\n## generate curve list \n# input: picked curve data\ncurvelist =glob.glob(\"./data/DAS_pick_4km/*.txt\")\n\n## generate curve tuple - inside loop\n## tag the curve filename in the inversion result\n## print the output into txt file - by title\nfor curve in curvelist:\n\n if __name__ == \"__main__\":\n # Initialize MPI\n if mpi_exist:\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\n else:\n mpi_rank = 0\n\n print(mpi_rank)\n \n # Parse arguments\n parser = ArgumentParser()\n parser.add_argument(\"-n\", \"--num_threads\", type = int, default = 8)\n args = parser.parse_args()\n \n # Parameters\n ny = 200 # Number of velocity discretization points\n max_run = 10 # Number of runs\n outdir = \"test_0124_4layer\" # Output directory\n \n # Inversion boundaries\n # - params\n # - beta: S-wave boundaries in m/s\n beta = np.array([ [ 100., 1000. ], [ 500., 2500. ], [ 1000., 4000. ]])#,[1200.,4200. ]])\n # - NOTE: final layer \n# [100.,1000.]\n thickness = np.array([ [ 100., 1000. ], [ 100., 500. ], [ 99999., 99999. ] ])\n \n # Initialize dispersion curves\n # - param in tuple. filename, wtype, mode\n\n # data- frequency list - phase vel\n # - 97 data points in the example\n disp_param = [\n ( curve, \"rayleigh\", 0 ),\n ]\n \n dcurves = []\n for param in disp_param:\n filename, wtype, mode = param\n faxis, disp = np.loadtxt(filename, unpack = True)\n dc = DispersionCurve(disp, faxis, mode, wtype)\n dcurves.append(dc)\n\n # Evolutionary optimizer parameters\n ## Edit this for loop control\n evo_kws = dict(popsize = 20, max_iter = 100, constrain = True, mpi = mpi_exist)\n opt_kws = dict(solver = \"cpso\")\n \n # Multiple inversions\n # - first proc make the folder\n if mpi_rank == 0:\n starttime = time.time()\n os.makedirs(outdir, exist_ok = True)\n progress(-1, max_run, \"perc\", prefix = \"Inverting dispersion curves: \")\n \n # list of layered models\n models = []\n for i in range(max_run):\n lm = LayeredModel()\n lm.invert(dcurves, beta, thickness, ny = ny, n_threads = args.num_threads,\n evo_kws = evo_kws, opt_kws = opt_kws)\n if mpi_rank == 0:\n # inversion results are saved in pickle\n # whole lm object\n lm.save(\"%s/run%d.pickle\" % (outdir, i+1))\n models.append(deepcopy(lm))\n progress(i, max_run, \"perc\", prefix = \"Inverting dispersion curves: \")\n \n if mpi_rank == 0:\n# useful Output part\n# but anyhow only one output is written(4 in console)\n with open(curve[20:-4]+'_1_lay_3.txt','w') as f:\n saved_stdout = sys.stdout\n sys.stdout = f\n print(\"\\n\")\n misfits = [ m.misfit for m in models ]\n# get the output of the model w/ lowest misfit. check the __str__ of layered model\n print(models[np.argmin(misfits)])\n print(\"Elapsed time: %.2f seconds\\n\" % (time.time() - starttime))\n sys.stdout.close()\n sys.stdout = saved_stdout\n","sub_path":"examples/dcinvTest_mulCurve.py","file_name":"dcinvTest_mulCurve.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"325739666","text":"import runWorld as rw\nimport drawWorld as dw\nimport pygame as pg\nimport time\nfrom pygame.locals import *\nfrom random import randint\n\n################################################################\n\n# This program is an interactive simulation/game. A cat starts\n# to move across the screen. The direction of movement is reversed\n# on each \"mouse down\" event.\n#\n# The state of the cat is represented by a tuple (pos, delta-pos).\n# The first element, pos, represents the x-coordinate of the cat.\n# The second element, delta-pos, represents the amount that the\n# position changes on each iteration of the simulation loop.\n#\n# For example, the tuple (7,1) would represent the cat at x-coord,\n# 7, and moving to the right by 1 pixel per \"clock tick.\"\n#\n# The initial state of the cat in this program is (0,1), meaning that the cat\n# starts at the left of the screen and moves right one pixel per tick.\n#\n# Pressing a mouse button down while this simulation run updates the cat state\n# by leaving pos unchanged but reversing delta-pos (changing 1 to -1 and vice\n# versa). That is, pressing a mouse key reverses the direction of the\n# cat.\n#\n# The simulation ends when the cat is allowed to reach either the left\n# or the right edge of the screen.\n\n################################################################\n# Class definitions\nclass animal:\n def __init__(self,imgname):\n self.image=dw.loadImage(imgname)\n\nclass world(animal):\n frameRate = 60\n tiffany = (66,199,249)\n def __init__(self):\n#One time variable. Only to be used in the initalization method\n inst_line1 = \"When the cat catches the fish, you win!\"\n inst_line2 = \"Every time the cat reaches one of the points, it would accelerate.\"\n inst_line3 = \"When the cat reaches the lower and upper bounds, it will bounce back.\"\n inst_line4 = \"When the cat touches the left and right sides, it will die and the game is over.\"\n inst_line5 = \"Click the mouse and get start. Enjoy!\"\n name = \"Cat Fun. Press the mouse (but not too fast)!\"\n#Object variable to be used in other methods\n self.width = 500\n self.height = 500\n self.fig = 20\n self.firstdisp=True\n self.initState = (randint(100,399),0,randint(200,399),0) #initial status, x-cord, x-v, y-cord, y-v\n self.fishState = (randint(100,300),randint(50,150))\n self.acpntsState = (50,350,50,350) #accerlerating points locations (x1,x1,y1,y2)\n self.acstate=False\n self.maxstate=10\n# Initialize world\n################################################################\n rw.newDisplay(self.width, self.height, name)\n self.mylabel0 = dw.makeLabel(\"Instructions:\",\"serif\",24,dw.black)\n self.mylabel1 = dw.makeLabel(inst_line1,\"serif\",12,dw.black)\n self.mylabel2 = dw.makeLabel(inst_line2,\"serif\",12,dw.black)\n self.mylabel3 = dw.makeLabel(inst_line3,\"serif\",12,dw.black)\n self.mylabel4 = dw.makeLabel(inst_line4,\"serif\",12,dw.black)\n self.mylabel5 = dw.makeLabel(inst_line5,\"serif\",12,dw.black)\n self.cat=animal(\"cat.bmp\")\n self.fish=animal(\"Fish.bmp\")\n self.pnt=animal(\"circle.bmp\")\n\n# state -> image (IO)\n# draw the cat halfway up the screen (height/2) and at the x\n# coordinate given by the first component of the state tuple\n#\n# Display the state by drawing a cat at that x coordinate\n def updateDisplay(self,state):\n dw.fill(world.tiffany)\n if(self.firstdisp):\n dw.draw(self.mylabel0,(50,70))\n dw.draw(self.mylabel1,(50,100))\n dw.draw(self.mylabel2,(50,120))\n dw.draw(self.mylabel3,(50,140))\n dw.draw(self.mylabel4,(50,160))\n dw.draw(self.mylabel5,(50,180))\n #display fish, cat, and points after the first click\n else:\n dw.draw(self.cat.image, (state[0],state[2])) #x,y coordinate\n dw.draw(self.pnt.image, (self.acpntsState[0],self.acpntsState[2])) #x,y coordinate\n dw.draw(self.pnt.image, (self.acpntsState[0],self.acpntsState[3])) #x,y coordinate\n dw.draw(self.pnt.image, (self.acpntsState[1],self.acpntsState[2])) #x,y coordinate\n dw.draw(self.pnt.image, (self.acpntsState[1],self.acpntsState[3])) #x,y coordinate\n dw.draw(self.fish.image, self.fishState) #x,y coordinate\n\n\n\n\n\n################################################################\n\n# Change pos by delta-pos, leaving delta-pos unchanged\n# Note that pos is accessed as state[0], and delta-pos\n# as state[1]. Later on we'll see how to access state\n# components by name (as we saw with records in Idris).\n#\n# state -> state\n def updateState(self,state):\n # if(state[0]+state[1]>width-fig and state[1]>0): #condition1\n # state1=0-state[1] #if condtition1 satisfied\n # elif(state[0]+state[1]self.height-self.fig//2 and state[3]>0):\n state3=0-state[3]\n elif(state[2]+state[3]self.acpntsState[0]-self.fig//2)) or ((state[0]self.acpntsState[1]-self.fig//2)) ):\n if( ((state[2]self.acpntsState[2]-self.fig//2)) or ((state[2]self.acpntsState[3]-self.fig//2)) ):\n if(self.acstate==False): #Double speed only once in each accelerating point\n if(state1 bool\n def endState(self,state):\n if (state[0] > self.width or state[0] < 0 or state [2] > self.height or state[2] < 0):\n return True\n #Endstate happens when cat catches the fish\n elif((state[0]self.fishState[0]-self.fig*2) and (state[2]self.fishState[1]-self.fig*2) ):\n return True\n else:\n return False\n\n\n################################################################\n\n# We handle each event by printing (a serialized version of) it on the console\n# and by then responding to the event. If the event is not a \"mouse button down\n# event\" we ignore it by just returning the current state unchanged. Otherwise\n# we return a new state, with pos the same as in the original state, but\n# delta-pos reversed: if the cat was moving right, we update delta-pos so that\n# it moves left, and vice versa. Each mouse down event changes the cat\n# direction. The game is to keep the cat alive by not letting it run off the\n# edge of the screen.\n#\n# state -> event -> state\n#\n def handleEvent(self,state, event):\n #print(\"Handling event: \" + str(event))\n if (event.type == pg.MOUSEBUTTONDOWN):\n self.firstdisp = False \n if state[1] > 0:\n newState1 = 0-randint(1,3)\n else:\n newState1 = randint(1,3)\n if state[3] > 0:\n newState3 = 0-randint(1,3)\n else:\n newState3 = randint(1,3)\n #print(newState1,newState3)\n #print('success')\n return((state[0],newState1,state[2],newState3))\n else:\n #print('unsuccess')\n return(state)\n \n################################################################\n\n# World state will be single x coordinate at left edge of world\n\n# The cat starts at the left, moving right\n# Set the inital speed to 0, so that cat starts to move only after the first click\n# Run the simulation no faster than 60 frames per second\n\n# Run the simulation!\nnewworld=world()\nrw.runWorld(newworld.initState, newworld.updateDisplay, newworld.updateState, newworld.handleEvent,\n newworld.endState, world.frameRate)\n","sub_path":"catoo.py","file_name":"catoo.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"654323117","text":"import http\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.http import JsonResponse, QueryDict\nfrom django.template.loader import render_to_string\nfrom oscar.core.loading import get_model, get_class\n\nfrom apps.payment.gateway import ApiClient\nfrom apps.payment.constants import SETTLEMENT, PAID, BANK_TRANSFER, MANDIRI, PERMATA, CANCELED, \\\n CREDIT_CARD\n\nfrom apps.order.models import PaymentEvent, PaymentEventQuantity, PaymentEventType, bank_name_from_mask\n\nlog = logging.getLogger('smesco')\n\nSource = get_model('payment', 'Source')\n\nPaymentTransaction = get_model('payment', 'Transaction')\nEventHandler = get_class('order.processing', 'EventHandler')\n\n\ndef build_message(order_data: dict):\n if order_data.get(\"payment_type\") == BANK_TRANSFER:\n if order_data.get(\"va_numbers\"):\n bank = order_data.get(\"va_numbers\")[0].get(\"bank\").upper()\n va_number = order_data.get(\"va_numbers\")[0].get(\"va_number\")\n elif order_data.get(\"permata_va_number\"):\n bank = PERMATA.upper()\n va_number = order_data.get(\"permata_va_number\")\n return f\"Bank Name: {bank}, VA Number : {va_number}\"\n\n elif order_data.get(\"payment_type\") == MANDIRI:\n bank = \"mandiri\"\n biller_code = order_data.get(\"biller_code\")\n va_number = order_data.get(\"bill_key\")\n return f\"Bank Name: {bank}, Biller Code: {biller_code}, VA Number : {va_number}\"\n\n elif order_data.get(\"payment_type\") == CREDIT_CARD:\n masked_card = bank_name_from_mask(order_data.get(\"masked_card\"))\n card_type = order_data.get(\"card_type\")\n status = order_data.get(\"channel_response_message\")\n return f\"{masked_card.bank_name} {card_type.title()} Card({masked_card.card_type}), Status: {status}\"\n\n\ndef get_source(order):\n return Source.objects.get(order=order)\n\n\ndef midtrans_order_exist(request):\n client = ApiClient(settings.MIDTRANS.get(\"SERVER_KEY\"), settings.MIDTRANS.get(\"SANDBOX\"))\n body = json.loads(request.body)\n order_exist = client.get_order_status(body.get('order_id'))\n\n if order_exist.get(\"payment_type\") == CREDIT_CARD:\n return order_exist\n\n if body.get('transaction_status') != order_exist.get('transaction_status'):\n return JsonResponse({\"messages\": \"OK!\"}, status=http.HTTPStatus.OK)\n\n return order_exist\n\n\ndef create_order_event(order, order_exist, expired=False):\n event_type, __ = PaymentEventType.objects.get_or_create(\n name=order_exist.get('transaction_status'))\n\n event = PaymentEvent.objects.create(\n order=order,\n event_type=event_type, amount=order_exist.get('gross_amount'),\n reference=order_exist.get('transaction_id'))\n\n for line in order.lines.all():\n PaymentEventQuantity.objects.create(\n event=event, line=line, quantity=line.quantity)\n\n if expired:\n line.stockrecord.cancel_allocation(line.quantity)\n log.info(f\"finish create order event for {order}\")\n\n\ndef create_order_notes(order, order_exist, expired=False):\n status = CANCELED.title() if expired else order_exist.get('transaction_status').title()\n order.notes.create(\n message=f\"{build_message(order_exist)} And {status}\", note_type=\"System\",\n user=order.user)\n\n\ndef process_not_found_order(order, **kwargs):\n # we just send OK if not on SANDBOX MODE\n if settings.MIDTRANS.get(\"SANDBOX\"):\n message = f\"Order invalid, order number: {order.number} \"\n return JsonResponse({\"message\": message}, status=http.HTTPStatus.NOT_FOUND)\n return JsonResponse({\"message\": \"OK\"}, status=http.HTTPStatus.OK)\n\n\ndef create_payment_transaction(order, source, order_exist):\n transaction = PaymentTransaction.objects.create(source=source, amount=order_exist.get('gross_amount'))\n transaction.txn_type = order_exist.get('payment_type')\n transaction.reference = order_exist.get('transaction_id')\n if (order.status != PAID) and (order_exist.get('transaction_status') == SETTLEMENT):\n transaction.status = PAID\n else:\n transaction.status = order_exist.get('transaction_status')\n transaction.save()\n return transaction\n\n\ndef send_email_paid(order, **kwargs):\n current_site = get_current_site(kwargs.get('request'))\n\n mail_subject = f'Terima Kasih, Pembayaran Order {order.number} Berhasil'\n message_html = render_to_string('customer/emails/commtype_order_va_paid_body.html', {\n 'user': order.user,\n 'domain': current_site.domain,\n 'order': order\n })\n\n message_text = render_to_string('customer/emails/commtype_order_va_paid_body.txt', {\n 'user': order.user,\n 'domain': current_site.domain,\n 'order': order\n })\n to_email = order.user.email\n email = EmailMultiAlternatives(\n mail_subject, message_text, to=[to_email], from_email=settings.OSCAR_FROM_EMAIL\n )\n email.attach_alternative(message_html, 'text/html')\n email.send()\n log.info(f\"Email for Order {order.order_number} was Sent\")\n\n\ndef process_paid_order(order, **kwargs):\n \"\"\"\n\n :param order: Order object\n :param kwargs:\n :return:\n \"\"\"\n source = get_source(order)\n order_exist = midtrans_order_exist(kwargs.get(\"request\"))\n\n if (order.status == PAID) and (order_exist.get('transaction_status') == SETTLEMENT):\n return JsonResponse({\"message\": \"Already Paid\"}, status=http.HTTPStatus.OK)\n\n create_payment_transaction(order, source, order_exist)\n\n order.set_status(PAID)\n source.amount_debited = order_exist.get('gross_amount')\n source.save()\n\n create_order_event(order, order_exist)\n\n create_order_notes(order, order_exist)\n\n if order_exist.get(\"payment_type\") != CREDIT_CARD:\n send_email_paid(order, request=kwargs.get(\"request\"))\n log.info(f\"Order Id {order.order_number} has been Paid\")\n return JsonResponse({\"message\": \"Success\"}, status=http.HTTPStatus.OK)\n\n\ndef process_placed_order(order, **kwargs):\n \"\"\"\n\n :param order: Order object\n :param kwargs:\n :return:\n \"\"\"\n if order.payment_events.filter(event_type__code=\"pending\").count() > 2:\n return JsonResponse({\"message\": \"Payment Pending\"}, status=http.HTTPStatus.OK)\n\n source = get_source(order)\n order_exist = midtrans_order_exist(kwargs.get(\"request\"))\n\n create_payment_transaction(order, source, order_exist)\n\n create_order_event(order, order_exist)\n\n create_order_notes(order, order_exist)\n log.info(f\"Order Id {order.order_number} has been Placed by : System\")\n return JsonResponse({\"message\": \"Success\"}, status=http.HTTPStatus.OK)\n\n\ndef process_expired_order(order, **kwargs):\n \"\"\"\n\n :param order: Order object\n :param kwargs:\n :return:\n \"\"\"\n order.set_status(CANCELED)\n source = get_source(order)\n order_exist = midtrans_order_exist(kwargs.get(\"request\"))\n\n create_payment_transaction(order, source, order_exist)\n\n create_order_event(order, order_exist, expired=True)\n\n create_order_notes(order, order_exist, expired=True)\n log.info(f\"Order Id {order.order_number} has been Cancelled by : System\")\n\n return JsonResponse({\"message\": \"Success\"}, status=http.HTTPStatus.OK)\n\n\ndef process_cancel_order(request, order=False):\n client = ApiClient(settings.MIDTRANS.get(\"SERVER_KEY\"), settings.MIDTRANS.get(\"SANDBOX\"))\n order_id = order if order else QueryDict(request.body).get(\"order\")\n response = client.cancel_payment(order_id)\n log.info(f\"Order Id {order_id} has been Cancelled by : {request.user}\")\n return response\n\n\ndef process_capture_order(order, **kwargs):\n \"\"\"\n\n :param order:Order object\n :param kwargs:\n :return: JsonResponse\n \"\"\"\n source = get_source(order)\n order_exist = midtrans_order_exist(kwargs.get(\"request\"))\n\n if (order.status == PAID) and (order_exist.get('transaction_status') == SETTLEMENT):\n return JsonResponse({\"message\": \"Already Paid\"}, status=http.HTTPStatus.OK)\n\n create_payment_transaction(order, source, order_exist)\n\n order.set_status(PAID)\n source.amount_debited = order_exist.get('gross_amount')\n source.save()\n\n create_order_event(order, order_exist)\n\n create_order_notes(order, order_exist)\n\n send_email_paid(order, request=kwargs.get(\"request\"))\n log.info(f\"process capture payment for order {order.order_number} Success\")\n\n return JsonResponse({\"message\": \"Success\"}, status=http.HTTPStatus.OK)\n","sub_path":"src/smesco/apps/payment/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":8513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"648690125","text":"from collections import defaultdict\nimport logging\nimport pprint\n\n\nfrom test.framework.objectimport.loadobject import load_object\nfrom test.framework.utils.defer import process_parallel, process_chain, process_chain_both\nlogger = logging.getLogger(__name__)\n\n\n\nclass MiddlewareManager(object):\n \"\"\"\n 中间件的父类\n \"\"\"\n component_name = 'Parent Middleware'\n\n def __init__(self,*middlewares):\n # logger.debug(\"MiddlewareManager:%s 已初始化...\"%self.component_name)\n logger.debug(*self.lfm.crawled(\n \"Middleware\", self.component_name,\n '已初始化'\n ))\n self.clsnames = middlewares[0]\n self.middlewares = middlewares[1]\n\n self.methods = defaultdict(list)\n for name,mw in zip(self.clsnames,self.middlewares):\n self.methods[name].append(mw)\n self._add_middleware(mw)\n\n @classmethod\n # 子类实现该方法,从settings中获得方法,如果子类没呀重写该方法\n # 会报错\n def _get_mwlist_from_settings(cls,settings):\n raise NotImplementedError\n\n @classmethod\n def from_settings(cls,settings,crawler=None):\n \"\"\"\n 从default settings中加载默认的中间件\n :param settings:\n :param crawler:\n :return:\n \"\"\"\n cls.lfm = crawler.logformatter\n try :\n mwlist = cls._get_mwlist_from_settings(settings)\n middlewares = []\n clsnames = []\n enabled = []\n for clspath in mwlist:\n try:\n clsname = clspath.split('.')[-1]\n mwcls = load_object(clspath)\n # 两个if用来判断mwcls是类的情况下,是跟crawler关联还是和settings相关联\n # 用来实例化所有的中间件\n if crawler and hasattr(mwcls,'from_crawler'):\n mw = mwcls.from_crawler(crawler)\n elif hasattr(mwcls,'from_settings'):\n mw = mwcls.from_settings(settings)\n else:\n mw = mwcls\n middlewares.append(mw)\n enabled.append(clspath)\n clsnames.append(clsname)\n except Exception as e :\n if e.args:\n args = {'clsname': clsname, 'eargs': e.args[0]}\n logger.warning(*cls.lfm.crawled(\n \"Middleware\",cls.component_name,\n '未生效:{clsname}: {eargs}'.format(**args))\n )\n if len(middlewares) != len(clsnames):\n raise ImportError(\"中间件载入不完整\")\n if middlewares and clsnames:\n # logger.info(\"生效%(componentname)ss的Middleware :\\n%(enabledlist)s\",\n # {'componentname': cls.component_name,\n # 'enabledlist': pprint.pformat(enabled)},\n # extra={'crawler': crawler})\n for mw in enabled:\n logger.info(*cls.lfm.crawled(\n \"Middleware\", cls.component_name,\n '生效的中间件: %s' %mw))\n return cls(clsnames, middlewares)\n except Exception as e :\n logger.error(*cls.lfm.error(\n \"Middleware\", cls.component_name,\n function=None,\n msg = e),\n exc_info = True)\n\n @classmethod\n def from_crawler(cls,crawler):\n return cls.from_settings(crawler.settings,crawler)\n\n def _add_middleware(self,mw):\n # 如果中间层添加有对spider��行处理的方法,应遵循后处理,先关闭的原则\n # open:spider1->spider2->spider3\n # close:spider3->spider2->spider1\n if hasattr(mw,\"open_spider\"):\n self.methods['open_spider'].append(mw.open_spider)\n if hasattr(mw,\"close_spider\"):\n self.methods['close_spider'].insert(0,mw.close_spider)\n\n def _process_parallel(self,methodname,obj,*args):\n return process_parallel(self.methods[methodname],obj,*args)\n\n def _process_chain(self,methodname,obj,*args):\n return process_chain(self.methods[methodname],obj,*args)\n\n def _process_chain_both(self, cb_methodname, eb_methodname, obj, *args):\n return process_chain_both(self.methods[cb_methodname],\n self.methods[eb_methodname], obj, *args)\n\n def open_spider(self, spider):\n return self._process_parallel('open_spider', spider)\n\n def close_spider(self, spider):\n return self._process_parallel('close_spider', spider)\n\n\n\n","sub_path":"test/framework/middleware/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"179067935","text":"# Filename : calendarDemo.py\n# author by : www.oneplusone.top\n\n# 引入日历模块\nimport calendar\n\n# 输入指定年月\nyy = int(input(\"输入年份: \"))\nmm = int(input(\"输入月份: \"))\n\n# 显示日历\nprint(calendar.month(yy,mm))\n\ncalendarstr=calendar.month(yy,mm).__str__()\n\n# Python count() 方法用于统计字符串里某个字符出现的次数。可选参数为在字符串搜索的开始与结束位置。\nprint(calendarstr.count(\"1\"))\n\n# 输入年份: 2018\n# 输入月份: 6\n# June 2018\n# Mo Tu We Th Fr Sa Su\n# 1 2 3\n# 4 5 6 7 8 9 10\n# 11 12 13 14 15 16 17\n# 18 19 20 21 22 23 24\n# 25 26 27 28 29 30\n#\n# 14","sub_path":"com/lc/demoKu/calendarDemo.py","file_name":"calendarDemo.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"174501070","text":"from nmigen import *\n\nfrom cores.csr_bank import StatusSignal\nfrom util.stream import StreamEndpoint\n\n\nclass StreamCombiner(Elaboratable):\n def __init__(self, streams):\n self._streams = streams\n\n self.has_last = self._streams[0].has_last\n assert not any(self.has_last is not other.has_last for other in self._streams)\n\n width = sum(len(stream.payload) for stream in self._streams)\n self.output = StreamEndpoint(width, is_sink=False, has_last=self.has_last)\n\n self.different_last_error = StatusSignal()\n self.different_valid_error = StatusSignal()\n\n def elaborate(self, platform):\n m = Module()\n\n highest_bit = 0\n for i, stream in enumerate(self._streams):\n sink = StreamEndpoint.like(stream, is_sink=True, name=\"stream_combiner_sink_{}\".format(i))\n sink.connect(stream)\n m.d.comb += stream.ready.eq(self.output.ready)\n m.d.comb += self.output.payload[highest_bit:highest_bit + len(stream.payload)].eq(stream.payload)\n highest_bit += len(stream.payload)\n\n m.d.comb += self.output.valid.eq(stream.valid)\n with m.If(self.output.valid != stream.valid):\n m.d.sync += self.different_valid_error.eq(1)\n\n if self.has_last:\n m.d.comb += self.output.last.eq(stream.last)\n with m.If(self.output.last != stream.last):\n m.d.sync += self.different_last_error.eq(1)\n\n return m\n","sub_path":"src/cores/stream/combiner.py","file_name":"combiner.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348856809","text":"\"\"\"\nProblem:\n\nYou are given an N * M matrix of 0s and 1s. Starting from the top left corner, how many ways are there to reach the bottom right corner?\nYou can only move right and down. 0 represents an empty space while 1 represents a wall you cannot walk through.\n\nExample:\n\nInput = [[0, 0, 1],\n [0, 0, 1],\n [1, 0, 0]]\nOutput = 2\n(As there are only two ways to get to the bottom right:\n* Right, down, down, right\n* Down, right, down, right\nThe top left corner and bottom right corner will always be 0.)\n\"\"\"\n\n# FUNCTION TO PERFORM THE OPERATION\ndef get_possible_paths(mat):\n # getting the dimensions of the matrix\n n, m = len(mat), len(mat[0])\n\n # resetting the values of 1 to -1 as positive numbers are used to construct the paths\n for i in range(n):\n for j in range(m):\n if mat[i][j] == 1:\n mat[i][j] = -1\n\n # setting each element of the vertically down path as 1 (until a wall or the end is encountered)\n for i in range(n):\n if mat[i][0] == -1:\n break\n else:\n mat[i][0] = 1\n\n # setting each element of the horizontal (right-side) path as 1 (until a wall or the end is encountered)\n for i in range(m):\n if mat[0][i] == -1:\n break\n else:\n mat[0][i] = 1\n\n # iterating through the matrix, updating the paths\n for i in range(1, n):\n for j in range(1, m):\n # if the current position is not a wall, the value is updated\n if mat[i][j] != -1:\n # res stores how many ways the current position can be reached\n res = 0\n\n # updating the values based on whether it is a wall\n if mat[i - 1][j] != -1:\n res += mat[i - 1][j]\n if mat[i][j - 1] != -1:\n res += mat[i][j - 1]\n\n # storing the value in the matrix\n mat[i][j] = res\n\n # returning the required value\n return mat[-1][-1]\n\n\n# DRIVER CODE\nmatrix = [[0, 0, 1], [0, 0, 1], [1, 0, 0]]\nprint(get_possible_paths(matrix))\n\nmatrix = [[0, 0, 1], [1, 0, 1], [1, 0, 0]]\nprint(get_possible_paths(matrix))\n\nmatrix = [[0, 0, 0], [1, 0, 0], [0, 0, 0]]\nprint(get_possible_paths(matrix))\n\n# end cannot be reached as only right and down traversal is allowed\nmatrix = [[0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0]]\nprint(get_possible_paths(matrix))\n","sub_path":"Solutions/158.py","file_name":"158.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"346032185","text":"# 정수 X에 사용할 수 있는 연산은 다음과 같이 세 가지 이다.\n# X가 3으로 나누어 떨어지면, 3으로 나눈다.\n# X가 2로 나누어 떨어지면, 2로 나눈다.\n# 1을 뺀다.\n# 정수 N이 주어졌을 때, 위와 같은 연산 세 개를 적절히 사용해서 1을 만들려고 한다. 연산을 사용하는 횟수의 최솟값을 출력하시오.\n\nin1 = int(input())\nans = [0 for x in range(in1+1)]\n\nfor i in range(2, in1+1):\n ans[i] = ans[i-1] + 1\n if i % 2 == 0:\n ans[i] = min([ans[i], ans[i//2]+1])\n if i % 3 == 0:\n ans[i] = min([ans[i], ans[i//3]+1])\n\nprint(ans[in1])","sub_path":"코테/백준/DP_baekjoon_1463.py","file_name":"DP_baekjoon_1463.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"293440068","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n\n\nclass VdW:\n \"\"\"\n Class containing all Van der Waals equation related functions.\n \"\"\"\n def __init__(self):\n pass\n \n def a_m_sol(self, s, p):\n \"\"\"\n Return explicit solution for 'm', at 'a', 'T'\n \n Parameters\n ----------\n s : dictionary\n Contains the current temperature 'T' and attraction paramter 'a'.\n \n p : dictionary\n Contains the critical parameters 'T_c', 'a_c'.\n \n Dependencies\n ------------\n math\n \"\"\"\n from math import sqrt, log \n if p['Model'] == \"Soave\":\n p['m'] = (sqrt(s['a']/p['a_c']) - 1) / (1 - sqrt(s['T'] \\\n /p['T_c']))\n elif p['Model'] == 'Adachi-Lu':\n p['m'] = p['T_c'] * log(p['a_c']/s['a']) / (s['T'] - p['T_c']) \n return p # = s['m']\n \n def a_T(self, s, p):\n \"\"\"\n Return explicit solution for 'a' at 'm', 'T'\n \n Parameters\n ----------\n s : dictionary\n Contains the current state variable temperature 'T' \n \n p : dictionary\n Contains the critical paramters 'T_c', 'a_c' and the a dependancy \n model 'Model' with parameter 'm'\n \n Dependencies\n ------------\n math\n \"\"\"\n from math import e\n if p['Model'] == \"Soave\":\n s['a'] = p['a_c']*(1.0 + p['m']*(1 - (s['T']/p['T_c'])**(0.5)))**2\n elif p['Model'] == 'Adachi-Lu':\n s['a'] = p['a_c']*e**(p['m']*(1 - s['T']/p['T_c'])) \n return s # = s['a']\n\n def a_maxwell(self, s, p):\n \"\"\"\n Explicit solution of 'a' parameter of the VdW EoS Maxwell integral at \n Psat.\n \n Parameters\n ----------\n s : dictionary\n Contains the current temperature 'T' and phase volumes 'V_V','V_l'.\n \n p : dictionary\n Parameter dictionary container containing the universal gas \n constant paramter 'R'.\n \n Dependencies\n ------------\n math\n \"\"\"\n from math import log\n s['a'] = -(p['R']*s['T']*s['V_l']*s['V_v']*log(s['V_v'] \\\n /(s['V_l'] - s['b']) \\\n - s['b']/(s['V_l'] - s['b'])) + ((s['V_l']**2) * s['V_v'] \\\n - s['V_l'] * (s['V_v']**2)) * s['P']) / (s['V_l'] - s['V_v']) \n return s\n \n def V_root(self, s, p): \n \"\"\"\n Calculates the volume roots of the van der Waals equation using the \n analytic solution at specified values of P (or Psat), T, a(T) and b. If\n an analytical solution does not exist a numerical estimate is used.\n \n Parameters\n ----------\n s : dictionary\n Contains the current temperature state variables 'T', pressure 'P' \n and the VdW coefficients 'a' and 'b'.\n \n p : dictionary\n Contains the critical paramters 'T_c', 'a_c', 'R'.\n \n Dependencies\n ------------\n numpy, math\n \"\"\" \n import logging\n from math import sqrt, acos, cos, pi\n try:\n # Coefficients of V^3 + (C_1)V^2 + (C_2)V + C_3 = 0\n C = [- (p['R']*s['T']/s['P'] + s['b']), # Coefficient C_1\n s['a']/s['P'], # Coefficient C_2\n - s['a']*s['b']/s['P'] # Coefficient C_3\n ]\n # Substitutions (see solution of Cubic equations:\n # mathworld.wolfram.com/CubicFormula.html ) \n w = (3*C[1] - C[0]**2)/3.0\n q = (27*C[2] - 9*C[0]*C[1] + 2*C[0]**3)/27.0\n R_t = (w/3.0)**3 + (q/3.0)**2.0\n q_s = q/abs(q) #math.copysign(1, q)\n \n if R_t < 0:\n Ratio = sqrt(((q/2.0)**2)/(-(w/3.0)**3))\n if abs(Ratio) < 1.0:\n phi = acos(Ratio)\n else:\n raise ValueError # Raise Math error if no solution\n #phi = math.degrees(math.acos(Ratio-2)+math.pi)\n # math.degrees(math.acos(Ratio-2))\n else:\n raise ValueError \n # Analytical expressions for Volume roots:\n V_roots = [-2*q_s*sqrt(-w/3.0)*cos(phi/3.0 ) - C[0]/3.0,\n -2*q_s*sqrt(-w/3.0)*cos(phi/3.0 + 2*pi/3.0) - C[0]/3.0,\n 2*q_s*sqrt(-w/3.0)*cos(phi/3.0 + 4*pi/3.0) - C[0]/3.0 \n ]\n # Find physical volume roots\n s['V_v'], s['V_l'] = max(V_roots), min(V_roots) \n \n except(ValueError):\n from numpy import roots\n # Coefficients of (C_0)V^3 + (C_1)V^2 + (C_2)V + C_3 = 0\n C = [ 1.0, # Coefficient C_0\n - (p['R']*s['T']/s['P'] + s['b']), # Coefficient C_1\n s['a']/s['P'], # Coefficient C_2\n - s['a']*s['b']/s['P'] # Coefficient C_3\n ]\n \n V_roots = roots(C) \n s['V_v'], s['V_l'] = max(V_roots.real), min(V_roots.real) \n \n if abs(V_roots[0].imag) > 0.1 or abs(V_roots[1].imag) > 0.1 \\\n or abs(V_roots[2].imag) > 0.1:\n \n logging.warn('large imaginary roots in VdW.VRoot = '\n + '{}, {}, {}'.format(V_roots[0].imag,\n V_roots[1].imag,\n V_roots[2].imag)\n )\n return s\n \n #%% \n def Psat_V_roots(self, s, p, tol = 1e-20, estfactor=1e-3):\n \"\"\"\n Calculates the saturation pressure and volume roots of the Van der \n Waals equation at a specified temperature and pressure.\n \n Parameters\n ----------\n s : dictionary\n Contains the current state variable temperature 'T', pressure 'P' \n and the VdW coefficients 'a' and 'b' \n \n p : dictionrary\n Contains the critical paramters 'T_c', 'a_c', 'R' and the a \n dependancy model 'Model' with parameter 'm'\n \n tol : float, optional.\n Tolerance used in the Maxwell integral solution\n \n estfactor: float, optional\n If the first iteration attempt fails, a second saturation \n pressure is estimated from the an exponential scaling \n function based on the reduced temperature and critical \n pressure. The 'estfactor' will be multiplied with this \n factor, if convergence still fails even lower values should \n be attempted. \n \n Dependencies\n ------------\n numpy, math\n \n \"\"\"\n import logging\n from math import log # NOTE: math.log is the natural logarithm, not b10 \n from scipy.optimize import fsolve \n s = self.a_T(s,p) # Update s['a'] at specified T for given p['m']\n s = self.V_root(s,p) # Update s['V_v'] and s['V_l']] \n # The phase volumes at the specified pressure:\n s['V_v_P'], s['V_l_P'] = s['V_v'], s['V_l']\n \n def P_maxwell(P,s,p):\n s['P'] = P\n s = self.a_T(s,p)\n s = self.V_root(s,p) # Update s['V_v'] and s['V_l']\n try:\n return s['P']*(s['V_l'] - s['V_v']) \\\n + s['a']/s['V_v'] - (s['a']/s['V_l']) \\\n + p['R'] * s['T'] * log((s['V_v'] - s['b']) \\\n /(s['V_l'] - s['b'])) \n except(ValueError):\n raise IOError('Math error in P_maxwell in Psat_V_roots, try'+\\\n ' to use a lower starting value s[\\'P\\'] before executing the'\\\n +' function')\n logging.warn('Value error in P_maxwell, P = '\n + '{}'.format(s['P'])\n )\n \n try:\n s['P_sat'] = fsolve(P_maxwell, s['P'], args=(s,p), xtol=tol)\n except(IOError):\n try: # Scale to approx. P near P_sat\n s['P'] = p['P_c']**(s['T']/p['T_c'])*estfactor \n s = self.V_root(s,p) # Update s['V_v'] and s['V_l']\n s['P_sat'] = fsolve(P_maxwell, s['P'], args=(s,p), xtol=tol)\n except(IOError):\n raise IOError('Math error in P_maxwell in Psat_V_roots, try'+\\\n ' to use a lower starting value s[\\'P\\'] or a lower'+\\\n '\\'downscale\\' argument before executing the function.')\n \n from numpy import float64 # Convert 1x1 np arrays to floats\n s['V_v'], s['V_l'], s['P_sat'], s['P'] = float64(s['V_v']), \\\n float64(s['V_l']), float64(s['P_sat']), float64(s['P'])\n return s \n \n # Outpus are: 'V_v_P' = Vapour phase volume at specified Pressure\n # 'V_l_P' = Liquid phase volume at specified Pressure\n # 'V_v' = Vapour phase volume at saturation Pressure \n # 'V_l' = Liquid phase volume at saturation Pressure \n # 'P_sat' = Saturation Pressure at specified Temperature\n\n","sub_path":"Van_der_Waals.py","file_name":"Van_der_Waals.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"572617614","text":"from typing import List\n\ndef fibonacci(number:int) -> int:\n \"\"\"\n Returns the n'th fibonacci sequence number\n \n >>> fibonacci(number=10)\n 55\n \n \"\"\" \n a = 0\n b = 1\n if number < 0: \n return -1 \n elif number == 0: \n return a \n elif number == 1: \n return b \n else: \n for _ in range(2, number+1): \n c = a + b \n a = b \n b = c \n return b\n\ndef fibonacciSequence(number:int) -> List[int]:\n \"\"\"\n Returns a list of first n fibonacci numbers\n >>> fibonacciSequence(number=10)\n [0,1,1,2,3,5,8,13,21,34,55]\n \"\"\"\n return [0,1,1,2,3,5,8,13,21,34,55]\n\n\nif __name__ == \"__main__\":\n print(fibonacci(number=10))\n","sub_path":"basic/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"183320479","text":"from datetime import datetime\n\nclass Controller(object):\n\n def __init__(self, model):\n self.model = model\n\n def insertPhoto(self, name, description, albumID):\n if (isinstance(name, str) and isinstance(description, str) and isinstance(albumID, str)):\n self.model.insertPhoto(name, description, albumID)\n else:\n print(\"check data types\")\n\n def updatePhoto(self, oldname, name, description, albumID):\n if (isinstance(name, str) and isinstance(description, str) and isinstance(albumID, str)):\n self.model.updatePhoto(oldname, name, description, albumID)\n else:\n print(\"check data types\")\n\n def insertAlbum(self, name, description, owner):\n if (isinstance(name, str) and isinstance(description, str) and isinstance(owner, str)):\n self.model.insertAlbum(name, description, owner)\n else:\n print(\"check data types\")\n\n def updateAlbum(self, id, name, description, owner):\n if (isinstance(id, str) and isinstance(name, str) and isinstance(description, str) and isinstance(owner, str)):\n self.model.updateAlbum(id, name, description, owner)\n else:\n print(\"check data types\")\n\n def deletePhoto(self, id):\n if isinstance(id, str):\n self.model.deletePhoto(id)\n else:\n print(\"check data types\")\n\n def deleteAlbum(self, id):\n if isinstance(id, int):\n self.model.deleteAlbum(id)\n else:\n print(\"check data types\")\n\n def getAllPhotos(self):\n for it in self.model.getPhotos():\n print(it.name)\n\n def getAllAlbums(self):\n for it in self.model.getPhotos():\n print(it.name)\n\n def getPhotosByAttribute(self, attr, val):\n start_time = datetime.now()\n print(self.model.getPhotosByAttribute(attr, val))\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))\n\n def getAlbumsByAttribute(self, attr, val):\n start_time = datetime.now()\n print(self.model.getAlbumsByAttribute(attr, val))\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))\n\n def generateNewAlbums(self, number):\n albms = self.model.getAlbums()\n print(albms)\n print(albms[-1])\n newID = int(albms[-1][0])+1\n names = self.model.getRandomTexts(number, 10)\n descs = self.model.getRandomTexts(number, 20)\n ownrs = self.model.getRandomTexts(number, 7)\n for i in range(int(number)):\n self.model.insertAlbumWithID(newID, names[i], descs[i], ownrs[i])\n newID += 1\n\n def generateNewPhotos(self, number):\n albms = self.model.getAlbums()\n names = self.model.getRandomTexts(number, 10)\n descs = self.model.getRandomTexts(number, 20)\n numOfAlbms = len(albms)\n aIds = self.model.getRandomInts(numOfAlbms, number)\n print(aIds)\n for i in range(int(number)):\n el = albms[int(aIds[i][0])]\n albmId = int(el[0])\n self.model.insertPhoto(names[i], descs[i], albmId)","sub_path":"code/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"31080185","text":"def main():\n\n\tescolha = input()\n\tmatriz = [0] * 12\n\n\tfor i in range(12):\n\t\tcoluna = [0] * 12\n\t\tfor j in range(12):\n\t\t\tx = float(input())\n\t\t\tcoluna[j] = x\n\n\t\tmatriz[i] = coluna\n\n\tcont = 5\n\tsoma = 0\n\tsuport = 0\n\tfor a in range(7,12):\n\t\tcont -= 1\n\t\tfor b in range(12):\n\t\t\tif b > cont and b < (11 - cont):\n\t\t\t\tsoma += matriz[a][b]\n\t\t\t\tsuport += 1\n\n\tmedia = soma / suport\n\tif escolha == 'S':\n\t\tprint('%.1f' % soma)\n\telse:\n\t\tprint('%.1f' % media)\n\t\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"484478773","text":"import cv2\r\ndef haar_cascade():\r\n cascade = cv2.CascadeClassifier('./res/haarcascade_upperbody.xml')\r\n cam = cv2.VideoCapture('./env/PersonWalk.avi')\r\n while True:\r\n ret, img = cam.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.equalizeHist(gray)\r\n body = cascade.detectMultiScale(\r\n gray,\r\n scaleFactor = 1.1,\r\n minNeighbors = 5,\r\n minSize = (30,30),\r\n flags = cv2.CASCADE_SCALE_IMAGE\r\n )\r\n\r\n for (x, y, w, h) in body:\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n cv2.imshow('upper body',img)\r\n if cv2.waitKey(5) == 27:\r\n break\r\n cv2.destroyAllWindows()\r\n\r\ndef main():\r\n haar_cascade()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"gui/src/haar_cascade_test.py","file_name":"haar_cascade_test.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"613327383","text":"#!/usr/bin/env python3\nimport struct\nimport math\nfrom pathlib import Path\nfrom progress.bar import Bar\nimport zstandard as zstd\nimport numpy \nimport os\nimport glob\nfrom optparse import OptionParser\nimport time\nimport hdf5storage\nimport datetime\nimport ftplib\n#import matlab.engine\n\ndef parse_to_list(input):\n tmps=input.split(',')\n out=[]\n #print \"input\",input\n for tmp in tmps:\n # print \"tmp\",tmp\n tmp=tmp.split(':')\n # print \"tmp2\",tmp,len(tmp)\n\n \n if len(tmp) == 2:\n for k in range(int(tmp[0]),int(tmp[1])+1):\n out.append(k)\n elif len(tmp) == 1:\n out.append(int(tmp[0]))\n else:\n # print \"bad forma\"\n pass\n return out #numpy.asarray(out)\n\n\ndef parse_log(filname):\n beams = []\n for line in open(filname,'r'):\n out = {}\n for word in line.split():\n tmp = word.split('=')\n if len(tmp) == 2:\n key = tmp[0][2:]\n arg = tmp[1]\n if key == 'subbands' or key == 'beamlets' or key =='rcus':\n arg = parse_to_list(arg)\n\n if key == 'anadir' or key == 'digdir':\n tmp = arg.split(',')\n az = numpy.rad2deg(float(tmp[0]))\n elv = numpy.rad2deg(float(tmp[1]))\n ref = tmp[2]\n arg = {'az':az,'elv':elv,'ref':ref}\n out[key] = arg\n print(key,arg)\n else:\n tmp = word.split('/')\n if len(tmp)>4:\n out['beam'] = int(tmp[-1][7:])\n #print(out)\n beams.append(out)\n\n \n return(beams)\n\ndef setFileName(rcu_id):\n antenaNo = math.floor(rcu_id/2)\n if rcu_id % 2:\n fileName = str(antenaNo)+'-y'\n else:\n fileName = str(antenaNo)+'-x'\n if antenaNo in beams:\n fileName = beams[antenaNo]+'-'+ fileName\n return fileName+'.bin' \n\ndef get_lane_no(filename):\n name = os.path.basename(filename)\n port_no = int(name[8])-1\n return port_no\n\ndef get_beamlets_in_lane(filename):\n cctx = zstd.ZstdDecompressor()\n\n f = cctx.stream_reader(open(filename,'rb'))\n tbb_data =f.read(7824)\n tbb_format=\"BBHHbbII3904h\"\n tmp = struct.unpack(tbb_format,tbb_data)\n number_of_beamlets=tmp[4]\n\n f.close()\n return number_of_beamlets\n\n\ndef read_file(filename):\n print('Processing to check if synchronized')\n lane_no = get_lane_no(filename)\n beam_at_lane = get_beamlets_in_lane(filename)\n # bar = Bar(' Processing\\n', max=192)\n #bar.start()\n filePath = Path(filename)\n if beam_at_lane == 61:\n beam_format=\"BBHHbbII3904h\"\n if beam_at_lane == 122:\n beam_format=\"BBHHbbII7808b\"\n samples_format= \"h\"\n clock = 200e6\n missing = 0\n with open(filePath, \"rb\") as reader:\n print(filePath)\n cctx = zstd.ZstdDecompressor()\n f = cctx.stream_reader(reader)\n #f=reader\n tbb_data =f.read(7824)\n tmp = struct.unpack(beam_format,tbb_data)\n\n number_of_beamlets=tmp[4]\n number_of_blocks=tmp[5]\n timestamp1=tmp[6]\n timestamp = timestamp1\n print(timestamp)\n block_sequence_number=tmp[7]\n data=tmp[8:]\n data=numpy.asarray(data)\n \n data=numpy.reshape(data,(beam_at_lane,16,4))\n j = (timestamp*clock+512)/1024 + block_sequence_number\n j0 = j\n j1= j\n datax_tmp = []\n datay_tmp =[]\n time_tmp = []\n tmpx=1.0 * data[:,:,0] + 1.j * data[:,:,1]\n tmpy=numpy.squeeze(1.0 * data[:,:,2::4]+1.j * data[:,:,3::4])\n datax_tmp.append(tmpx)\n datay_tmp.append(tmpy)\n time_tmp.append(j)\n \n print(tmpx.shape,tmpy.shape)\n #exit()\n tbb_data = f.read(7824)\n #print('1.',datax[1,:])\n i = 1\n time0 = time.time()\n print('2')\n while tbb_data:\n tmp = struct.unpack(beam_format,tbb_data)\n number_of_beamlets=tmp[4]\n number_of_blocks=tmp[5]\n timestamp=tmp[6]\n block_sequence_number=tmp[7]\n data=tmp[8:]\n data=numpy.asarray(data)\n \n data=numpy.reshape(data,(beam_at_lane,16,4))\n j = (timestamp*clock+512)/1024 + block_sequence_number\n \n while j-j1 > 17:\n missing += 1\n datax_tmp.append(tmpx)\n datay_tmp.append(tmpy)\n time_tmp.append(j)\n j1+=16\n #print('add',j-j1,data.shape)\n j1=j\n i+=1\n \n #if i % 100 == 0:\n # print('%0.2f ms'%((j-j0)/(200e6/1024)*1000))\n # print(time.time()-time0)\n # time0 = time.time()\n #break\n tmpx=1.0 * data[:,:,0] + 1.j * data[:,:,1]\n tmpy=numpy.squeeze(1.0 * data[:,:,2::4]+1.j * data[:,:,3::4])\n \n datax_tmp.append(tmpx)\n datay_tmp.append(tmpy)\n time_tmp.append(j)\n \n tbb_data = f.read(7824)\n print('4')\n #f.close()\n print('3')\n return time_tmp,datax_tmp,datay_tmp,missing\n\n\n\ndef process_directory(dirname,outdir,configfile,comment='',split=True):\n config = parse_log(configfile)\n print(configfile)\n print(config)\n #exit()\n #outdir = 'out/'\n ext_dir = outdir.split('/')[-1]\n \n ftp = ftplib.FTP('polluks.ise.pw.edu.pl')\n ftp.login('lofar','B!75qZB1')\n # zmiana ktalogu\n ftp.cwd('LOFAR')\n # utowrzenie katalogu\n #ftp.mkd('recent')\n ftp.cwd('recent')\n try:\n ftp.mkd(ext_dir)\n except:\n pass\n \n \n print(ext_dir)\n ftp.cwd(ext_dir)\n print(config)\n for beam in config:\n print(beam)\n beam_no = beam['beam']\n subbands = beam['subbands']\n beamlets = numpy.array(beam['beamlets'])\n print(beam_no,beamlets)\n # datax_tmp = datax[beamlets,:]\n # print(datax_tmp.shape)\n \n # no = beam['\n continue\n #exit()\n #dirname = '/home/julia/data/local/2020-05-13T06:49:55.000/'\n #dirname = '/home/julia/data/local/2020-05-13T07:28:13.000/'\n files = glob.glob(dirname+'/udp_16101*')\n for file1 in files:\n \n file2 = file1.replace(\"16101\",\"16102\")\n file3 = file1.replace(\"16101\",\"16103\").replace('lofar1','lofar2')\n file4 = file1.replace(\"16101\",\"16104\").replace('lofar1','lofar2')\n print(file1,file2,file3,file3)\n \n time_msg_1,x_1,y_1,missing_1 = read_file(file1)\n #print(time_msg_1[0]/(200e6/1024),min(time_msg_1)/(200e6/1024),'ddd')\n #exit()\n time_msg_2,x_2,y_2,missing_2 = read_file(file2)\n \n time_msg_3,x_3,y_3,missing_3 = read_file(file3)\n \n time_msg_4,x_4,y_4,missing_4 = read_file(file4)\n\n\n\n n = len(time_msg_1)\n print('n',n)\n no_subbands,no_times = x_1[0].shape\n datax = numpy.ones([no_subbands*4,n*16],dtype=numpy.complex)\n datay = numpy.ones([no_subbands*4,n*16],dtype=numpy.complex)\n for k in range(len(time_msg_1)):\n datax[0:no_subbands,16*k:16*(k+1)] = x_1[k]\n datay[0:no_subbands,16*k:16*(k+1)] = y_1[k]\n datax[no_subbands*1:no_subbands*2,16*k:16*(k+1)] = x_2[k]\n datay[no_subbands*1:no_subbands*2,16*k:16*(k+1)] = y_2[k]\n datax[no_subbands*2:no_subbands*3,16*k:16*(k+1)] = x_3[k]\n datay[no_subbands*2:no_subbands*3,16*k:16*(k+1)] = y_3[k]\n datax[no_subbands*3:no_subbands*4,16*k:16*(k+1)] = x_4[k]\n datay[no_subbands*3:no_subbands*4,16*k:16*(k+1)] = y_4[k]\n # print(k)\n del x_1,y_1,x_2,y_2,x_3,y_3,x_4,y_4\n for beam in config:\n try:\n beam_no = beam['beam']\n subbands = beam['subbands']\n beamlets = numpy.array(beam['beamlets'])\n print(beamlets)\n datax_tmp = datax[beamlets,:]\n datay_tmp = datay[beamlets,:]\n print(datax_tmp.shape)\n timestamp1 = time_msg_1[0]\n timestamp2 = time_msg_2[0]\n timestamp3 = time_msg_3[0]\n timestamp4 = time_msg_4[0]\n print(numpy.int64(timestamp1/(200e6/1024) ))\n #exit()\n #break\n # no = beam['\n #continue\n #filename = outdir +'/'+'%3.3d_'%(beam_no) +str(int(timestamp1)) + '_x.mat'\n #filename = outdir +'/' +datetime.datetime.fromtimestamp(int(timestamp1/(200e6/1024)+0.1)).isoformat()+'_%3.3d'%(beam_no) + '_x.mat'\n if split == True:\n filename = outdir +'/' +datetime.datetime.fromtimestamp(int(timestamp1/(200e6/1024)+0.1)).strftime(\"%Y%m%d_%H%M%S\")+'_%3.3d'%(beam_no) + '_x.mat'\n else:\n filename = outdir +'/' +datetime.datetime.fromtimestamp(int(timestamp1/(200e6/1024)+0.1)).strftime(\"%Y%m%d_%H%M%S\")+'_%3.3d'%(beam_no) + '.mat'\n\n print(filename)\n #exit()\n #print(k+lane_no+beam_at_lane,no_subbands)\n print(filename)\n #exit()\n output_data = {}\n output_data['comment']=comment\n if split == True:\n output_data[u'data'] = numpy.squeeze(datax_tmp)\n output_data['polarization']='x'\n else:\n output_data[u'data_x'] = numpy.squeeze(datax_tmp)\n output_data['polarization']='both'\n #output_data[u'y'] = numpy.squeeze(datay_tmp)\n output_data[u'timetick'] = numpy.array([timestamp1,timestamp2,timestamp3,timestamp4])\n output_data[u'missing'] = numpy.array([missing_1*16,missing_2*16,missing_3*16,missing_4*16])\n output_data[u'config'] = beam\n output_data[u'bitmode'] = 32/(2**(no_subbands/61))\n if split == True:\n hdf5storage.write(output_data,'.',filename,matlab_compatible=True)\n\n ftp.storbinary('stor %s'%(os.path.basename(filename)),open(filename,'rb'))\n\n os.remove(filename)\n\n #filename = outdir +'/' +datetime.datetime.fromtimestamp(int(timestamp1/(200e6/1024)+0.1)).isoformat()+'_%3.3d'%(beam_no) + '_y.mat'\n if split == True:\n filename = outdir +'/' +datetime.datetime.fromtimestamp(int(timestamp1/(200e6/1024)+0.1)).strftime(\"%Y%m%d_%H%M%S\")+'_%3.3d'%(beam_no) + '_x.mat'\n\n #print(k+lane_no+beam_at_lane,no_subbands)\n\n #exit()\n output_data = {}\n output_data['comment']=comment\n if split == True:\n output_data[u'data'] = numpy.squeeze(datay_tmp)\n output_data['polarization']='y'\n else:\n output_data[u'data_y'] = numpy.squeeze(datay_tmp)\n\n #output_data[u'y'] = numpy.squeeze(datay_tmp)\n output_data[u'timetick'] = numpy.array([timestamp1,timestamp2,timestamp3,timestamp4])\n output_data[u'missing'] = numpy.array([missing_1*16,missing_2*16,missing_3*16,missing_4*16])\n output_data[u'config'] = beam\n output_data[u'bitmode'] = 32/(2**(no_subbands/61))\n #print (4<<(no_subbands/61))\n #exit()\n hdf5storage.write(output_data,'.',filename,matlab_compatible=True)\n ftp.storbinary('stor %s'%(os.path.basename(filename)),open(filename,'rb'))\n os.remove(filename)\n\n #del datax,datay\n #exit()\n except Exception as e:\n print(e)\n del datax,datay\n ftp.quit()\n ftp.close()\n","sub_path":"process_to_mat.py","file_name":"process_to_mat.py","file_ext":"py","file_size_in_byte":11887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"236663066","text":"\nfrom sklearn.model_selection import train_test_split\nfrom torchvision import datasets\nfrom typing import Callable, Optional\n\n\nclass CIFAR10Red(datasets.CIFAR10):\n\n def __init__(self,\n root: str,\n train: bool = True,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n portion=0.1,\n ) -> None:\n \n super().__init__(root, train, transform, target_transform,\n download)\n self.portion = portion\n \n self.reduce_dataset()\n \n \n def reduce_dataset(self):\n \n self.data, __, self.targets, __ = \\\n train_test_split(self.data, self.targets, train_size=self.portion,\n random_state=69, stratify=self.targets)\n\n","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"492480621","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 3 15:59:16 2019\n\n@author: eo\n\"\"\"\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Add local path\n\nimport os\nimport sys\n\ndef find_path_to_local(target_folder = \"local\"):\n \n # Skip path finding if we successfully import the dummy file\n try:\n from local.dummy import dummy_func; dummy_func(); return\n except ImportError:\n print(\"\", \"Couldn't find local directory!\", \"Searching for path...\", sep=\"\\n\")\n \n # Figure out where this file is located so we can work backwards to find the target folder\n file_directory = os.path.dirname(os.path.abspath(__file__))\n path_check = []\n \n # Check parent directories to see if we hit the main project directory containing the target folder\n prev_working_path = working_path = file_directory\n while True:\n \n # If we find the target folder in the given directory, add it to the python path (if it's not already there)\n if target_folder in os.listdir(working_path):\n if working_path not in sys.path:\n tilde_swarm = \"~\"*(4 + len(working_path))\n print(\"\\n{}\\nPython path updated:\\n {}\\n{}\".format(tilde_swarm, working_path, tilde_swarm))\n sys.path.append(working_path)\n break\n \n # Stop if we hit the filesystem root directory (parent directory isn't changing)\n prev_working_path, working_path = working_path, os.path.dirname(working_path)\n path_check.append(prev_working_path)\n if prev_working_path == working_path:\n print(\"\\nTried paths:\", *path_check, \"\", sep=\"\\n \")\n raise ImportError(\"Can't find '{}' directory!\".format(target_folder))\n \nfind_path_to_local()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Imports\n\nimport cv2\nimport numpy as np\n\nfrom collections import namedtuple, deque\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define classes\n\n\nclass Entity_Drawer:\n \n '''\n Class used to handle drawing UI for creating & editing drawn polygons\n Internally represents all point data in pixel units, but all input/output must be in normalized units!\n '''\n \n # .................................................................................................................\n \n def __init__(self,\n frame_wh,\n minimum_entities = 0,\n maximum_entities = None,\n minimum_points = 3,\n maximum_points = None,\n border_size_px = 60,\n debug_mode = False):\n \n # Safe-ify the input values\n safe_border_size_px = int(round(border_size_px))\n safe_min_entities = minimum_entities if (minimum_entities is not None) else 0\n safe_max_entities = maximum_entities if (maximum_entities is not None) else 1000\n safe_min_points = minimum_points if (minimum_points is not None) else 2\n safe_max_points = maximum_points if (maximum_points is not None) else 10000\n \n # Store defining characteristics\n self.frame_wh = frame_wh\n self.border_size_px = safe_border_size_px\n self.min_entities = safe_min_entities\n self.max_entities = safe_max_entities\n self.min_points_per_entity = max(2, safe_min_points)\n self.max_points_per_entity = safe_max_points \n self.debug_mode = debug_mode\n \n # Set up possible states & corresponding callbacks\n self.state = \"hover\"\n self.mouse_state_callbacks = {\"hover\": self._mouse_hover_callback,\n \"draw\": self._mouse_draw_callback,\n \"drag\": self._mouse_drag_callback}\n self.keypress_state_callbacks = {\"hover\": self._key_hover_callback,\n \"draw\": self._key_draw_callback,\n \"drag\": self._key_drag_callback}\n \n # Set up callback state storage\n self.mouse_state = namedtuple(\"Mouse_State\", [\"click\", \"double_click\", \"drag\", \"release\"])\n self.modifier_state = namedtuple(\"Modifiers\", [\"alt\", \"ctrl\", \"shift\"])\n \n # Set up mouse event/state storage, so keypress can access them\n self.mouse_xy_history = deque([np.int32((frame_wh[0] / 2, frame_wh[1] / 2))], maxlen=100)\n \n # Set up entity managment\n self.entity_collection = None\n self.initialize_entities([])\n \n # Storage for drawing style\n self._aesthetics_dict = None\n self.aesthetics()\n \n # Set up variables used to detect and manage changes\n self._entity_change = False\n \n # .................................................................................................................\n \n def __repr__(self):\n \n repr_strs = [\"Drawing object - Contains {} entites\".format(len(self.entity_collection)),\n \" Frame size (px): {} x {}\".format(*self.frame_wh),\n \" Border size (px): {}\".format(self.border_size_px),\n \" Min number of entities: {}\".format(self.min_entities),\n \" Max number of entities: {}\".format(self.max_entities),\n \" Min points per entity: {}\".format(self.min_points_per_entity),\n \" Max points per entity: {}\".format(self.max_points_per_entity)]\n \n return \"\\n\".join(repr_strs)\n \n # .................................................................................................................\n \n def __call__(self, *args, **kwargs):\n self.mouse_callback(*args, **kwargs)\n \n # .................................................................................................................\n \n def __len__(self):\n return len(self.entity_collection)\n \n # .................................................................................................................\n \n def _normalize(self, pixelized_points_array):\n \n ''' Function for mapping a pixelized array of points (xy-tuples) into a normalized list '''\n \n # Don't do anything with empty arrays\n empty_array = (pixelized_points_array.size == 0)\n if empty_array:\n return pixelized_points_array.tolist()\n \n # Calculate some handy scaling/offset terms\n frame_w, frame_h = self.frame_wh\n frame_scaling = np.float32((frame_w - 1, frame_h - 1))\n border_offset = np.int32((self.border_size_px, self.border_size_px))\n \n # Convert the input to an array for easier manipulation\n normalized_points_array = np.float32(pixelized_points_array - border_offset) / frame_scaling\n normalized_points_list = normalized_points_array.tolist()\n \n return normalized_points_list\n \n # .................................................................................................................\n \n def _pixelize(self, normalized_points_list):\n \n ''' Function for mapping a normalized list of points (xy-tuples) into a pixelized array '''\n \n # Don't do anything with empty lists\n empty_list = (len(normalized_points_list) == 0)\n if empty_list:\n return np.int32([]).tolist()\n \n # Calculate some handy scaling/offset terms\n frame_w, frame_h = self.frame_wh\n frame_scaling = np.float32((frame_w - 1, frame_h - 1))\n border_offset = np.int32((self.border_size_px, self.border_size_px))\n \n # Convert the input to an array for easier manipulation\n points_array = np.float32(normalized_points_list)\n pixelized_points_array = np.int32(np.round(border_offset + (points_array * frame_scaling)))\n pixelized_points_list = pixelized_points_array.tolist()\n \n return pixelized_points_list\n \n # .................................................................................................................\n \n @property\n def last_mouse_xy(self):\n ''' Return the last mouse co-ordinate (in pixels) '''\n return self.mouse_xy_history[0]\n \n # .................................................................................................................\n \n def get_entities_list(self, normalize = True):\n \n ''' Return a (possibly normalized) list-of-lists-of-tuples representing point xy co-ordinates ''' \n \n if normalize:\n return [self._normalize(each_entity.points()) for each_entity in self.entity_collection]\n return [each_entity.points() - self.border_size_px for each_entity in self.entity_collection]\n \n # .................................................................................................................\n \n def aesthetics(self, finished_color = (0, 255, 255), in_progress_color = (255, 255, 0),\n finished_thickness = 1, in_progress_thickness = 1,\n anchor_radius = 3, line_type = cv2.LINE_AA,\n show_anchors = True):\n \n ''' Function for changing default color/styling '''\n \n # Update internal record of aesthetics\n self._aesthetics_dict = {\"finished_color\": finished_color,\n \"in_progress_color\": in_progress_color,\n \"finished_thickness\": finished_thickness,\n \"in_progress_thickness\": in_progress_thickness,\n \"anchor_radius\": anchor_radius,\n \"line_type\": line_type,\n \"show_anchors\": show_anchors}\n \n # Propagate changes to the entity collection\n self.entity_collection.aesthetics(**self._aesthetics_dict)\n \n return self._aesthetics_dict\n \n # .................................................................................................................\n \n def on_change(self, only_on_hover = False):\n \n ''' \n Function for monitoring changes to the entities being drawn \n To access modified entity data, use the .get_entities_list() function\n '''\n \n # Get state flags\n hover_flag = (self.state == \"hover\") if only_on_hover else True\n \n # Check for entity change flag, and if present, consume it!\n if self._entity_change and hover_flag:\n self._entity_change = False\n return True\n \n return False\n \n # .................................................................................................................\n \n def update_frame_wh(self, new_frame_width, new_frame_height):\n \n '''\n Function for updating the known frame size.\n Only use this if the image size is changing over time,\n otherwise the frame size should be supplied on initialization\n '''\n \n # Figure out how much to scale entity points in x/y directions based on new vs old frame sizing\n old_frame_w, old_frame_h = self.frame_wh\n w_scale = (new_frame_width - 1) / (old_frame_w - 1)\n h_scale = (new_frame_height - 1) / (old_frame_h - 1)\n offset_px = self.border_size_px\n \n # Scale all entities, then update record of frame size\n self.entity_collection.scale_entities(w_scale, h_scale, offset_px) \n self.frame_wh = (new_frame_width, new_frame_height)\n \n # Update boundaries now that the frame size has changed\n self._set_entity_boundaries()\n \n # Signal change to entities\n self._entity_change = True\n \n # .................................................................................................................\n \n def _set_entity_boundaries(self):\n \n # Simpler names for clarity\n frame_w, frame_h = self.frame_wh\n combined_border_size = 2 * self.border_size_px\n \n # Set boundaries for entity collection so that entities don't draw off the displayed frame\n min_xy = (0, 0)\n max_xy = (combined_border_size + frame_w, combined_border_size + frame_h)\n self.entity_collection.set_entity_boundaries(min_xy, max_xy)\n \n # .................................................................................................................\n \n def initialize_entities(self, entity_list):\n \n '''\n Function for initializing the drawn entities\n Inputs:\n entity_list -> Should be a list of lists of xy-tuples\n Each xy-tuple represents a point, in normalized co-ordinates!\n Each list of xy-tuples represents an entity (i.e. a single polygon or line)\n For example: [[(0.11, 0.40), (0.35, 0.25), (0.70, 0.55)],\n [(0.11, 0.11), (0.22, 0.22), (0.44, 0.44)],\n [(0.12, 0.34), (0.56, 0.78), (0.90, 0.98), (0.76, 0.54)]]\n '''\n \n # Create pixelized copies of each entity for internal representation\n pixelized_entity_list = [self._pixelize(each_entity) for each_entity in entity_list]\n \n # Create collection object to handle the group of entities\n self.entity_collection = Entity_Collection(pixelized_entity_list,\n minimum_entities = self.min_entities,\n maximum_entities = self.max_entities,\n minimum_points_per_entity = self.min_points_per_entity,\n maximum_points_per_entity = self.max_points_per_entity,\n debug_mode = self.debug_mode)\n \n # Set boundaries\n self._set_entity_boundaries()\n \n # Force a change when initialized\n self._entity_change = True\n \n # .................................................................................................................\n \n def replace_drawing_functions(self, new_completed_drawing_function = None, new_inprogress_drawing_function = None):\n \n ''' \n Function used to override the way entitys are drawn.\n Inputs must be functions with arguments in the form:\n draw_func(frame, points_npint32_array)\n \n Where frame will be the frame data passed in to draw onto,\n and points_npint32_array will be the entity points that need to be drawn\n \n Any styling/controls should be built into the function itself (it cannot take additional arguments)\n \n Inputs left as 'None' will keep existing drawing style.\n '''\n \n self.entity_collection.replace_drawing_functions(new_completed_drawing_function,\n new_inprogress_drawing_function)\n \n return\n \n # .................................................................................................................\n \n def _debug_print(self, print_msg):\n if self.debug_mode:\n print(print_msg) \n return\n \n # .................................................................................................................\n \n def _get_mouse_state(self, event, flags):\n \n mouse_move = (event == cv2.EVENT_MOUSEMOVE)\n \n # Get left mouse button state\n mouse_left_state = \\\n self.mouse_state(click = (event == cv2.EVENT_LBUTTONDOWN),\n double_click = (event == cv2.EVENT_LBUTTONDBLCLK),\n drag = ((flags & cv2.EVENT_FLAG_LBUTTON) > 0),\n release = (event == cv2.EVENT_LBUTTONUP))\n \n # Get middle mouse button (wheel) state\n mouse_mid_state = \\\n self.mouse_state(click = (event == cv2.EVENT_MBUTTONDOWN),\n double_click = (event == cv2.EVENT_MBUTTONDBLCLK),\n drag = ((flags & cv2.EVENT_FLAG_MBUTTON) > 0),\n release = (event == cv2.EVENT_MBUTTONUP))\n \n # Get right mouse button state\n mouse_right_state = \\\n self.mouse_state(click = (event == cv2.EVENT_RBUTTONDOWN),\n double_click = (event == cv2.EVENT_RBUTTONDBLCLK),\n drag = ((flags & cv2.EVENT_FLAG_RBUTTON) > 0),\n release = (event == cv2.EVENT_RBUTTONUP))\n \n return mouse_move, mouse_left_state, mouse_mid_state, mouse_right_state\n \n # .................................................................................................................\n \n def _get_modifier_state(self, event, flags):\n \n modifiers = self.modifier_state(alt = ((flags & cv2.EVENT_FLAG_ALTKEY) > 0),\n ctrl = ((flags & cv2.EVENT_FLAG_CTRLKEY) > 0),\n shift = ((flags & cv2.EVENT_FLAG_SHIFTKEY) > 0))\n \n return modifiers\n \n # .................................................................................................................\n \n def _start_hover(self, mxy_array):\n self._change_state(\"hover\")\n \n # .................................................................................................................\n \n def _start_draw(self, mxy_array): \n self.entity_collection.new_entity_in_progress(mxy_array)\n self._change_state(\"draw\")\n \n # .................................................................................................................\n \n def _start_drag(self, mxy_array, record_history = True):\n \n # Enter dragging mode if the mouse was near enough to a point\n if self.entity_collection.point_in_range(mxy_array):\n self._change_state(\"drag\")\n \n # Manually allow for recording list history, since we may not always want to store every drag start\n if record_history:\n self.entity_collection.record_list_history()\n \n return\n \n # .................................................................................................................\n \n def _insert_point(self, mxy_array, record_history = True):\n \n # Insert a point into an existing entity, if the user clicked near a line segment\n if self.entity_collection.line_in_range(mxy_array):\n insert_success = self.entity_collection.insert_entity_point(mxy_array, record_history = record_history)\n self._debug_print(\"INSERT POINT ({})\".format(\"success\" if insert_success else \"failed\"))\n \n # Enter dragging mode on the newly added point because it just feels right\n if insert_success:\n self._start_drag(mxy_array, record_history = False)\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _remove_point(self, mxy_array, record_history = True):\n \n # Delete a point if the mouse was near enough\n if self.entity_collection.point_in_range(mxy_array):\n removal_success = self.entity_collection.remove_entity_point(record_history = record_history)\n self._debug_print(\"REMOVE POINT ({})\".format(\"success\" if removal_success else \"failed\"))\n \n if removal_success:\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _remove_entity(self, mxy_array, record_history = True):\n \n # Delete an entire entity if the mouse is near enough to a point or line\n if self.entity_collection.point_in_range(mxy_array):\n self.entity_collection.remove_entity(record_history = record_history)\n self._debug_print(\"REMOVE ENTITY (from point)\")\n \n # Signal change to entities\n self._entity_change = True\n \n elif self.entity_collection.line_in_range(mxy_array):\n self.entity_collection.remove_entity(record_history = record_history)\n self._debug_print(\"REMOVE ENTITY (from line)\")\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _draw_add_point(self, mxy_array):\n return self.entity_collection.build_entity_in_progress(mxy_array)\n \n # .................................................................................................................\n \n def _complete_draw(self, mxy_array):\n self.entity_collection.finish_entity_in_progress()\n self._start_hover(mxy_array)\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _cancel_draw(self, mxy_array):\n self.entity_collection.clear_entity_in_progress()\n self._start_hover(mxy_array)\n \n return\n \n # .................................................................................................................\n \n def _drag_point(self, mxy_array): \n self.entity_collection.move_entity_point(mxy_array)\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _complete_drag(self, mxy_array):\n self._start_hover(mxy_array)\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def mouse_callback(self, event, mx, my, flags, param):\n \n '''\n This callback handles all mouse/window events. It should be passed to a window using:\n cv2.setMouseCallback(window_name, object_instance.mouse_callback)\n \n Alternatively, passing just the object instance will work as well:\n cv2.setMouseCallback(window_name, object_instance)\n \n Note that to react to keypresses, the keypress_callback must be called as well\n (this must be polled in a loop, see keypress_callback help for details)\n '''\n\n mxy_array = np.int32((mx, my))\n \n # Figure out what the mouse is doing as well as modifier keys\n mouse_move, *mouse_button_states = self._get_mouse_state(event, flags)\n modifier_state = self._get_modifier_state(event, flags)\n \n # Record mouse position changes over time\n self.mouse_xy_history.appendleft(mxy_array)\n \n # Call the appropriate mouse callback, based on the current state\n mouse_state_func = self.mouse_state_callbacks[self.state]\n mouse_state_func(mxy_array, mouse_move, *mouse_button_states, modifier_state)\n \n return\n \n # .................................................................................................................\n \n def _mouse_hover_callback(self, mxy_array, mouse_move, mouse_left, mouse_mid, mouse_right, modifiers):\n \n ''' \n This callback handles mouse interactions when the drawer is in the hovering/idle state \n (i.e. no points-in-progress, not dragging)\n '''\n \n # Shift + left click enters drawing mode\n if modifiers.shift and mouse_left.click:\n self._start_draw(mxy_array)\n \n # Ctrl + left click inserts points into existing entities\n elif modifiers.ctrl and mouse_left.click:\n self._insert_point(mxy_array)\n \n # Left click (no modifiers) enters dragging state\n elif mouse_left.click:\n self._start_drag(mxy_array)\n \n # Ctrl + right click removes entities\n elif modifiers.ctrl and mouse_right.click:\n self._remove_entity(mxy_array)\n \n # Remove nearby points with a single right click\n elif mouse_right.click:\n self._remove_point(mxy_array)\n \n return\n \n # .................................................................................................................\n \n def _mouse_draw_callback(self, mxy_array, mouse_move, mouse_left, mouse_mid, mouse_right, modifiers):\n \n ''' \n This callback handles mouse interactions when the drawer is in the drawing state \n (i.e. points-in-progress) \n '''\n \n # Shift + left click adds more drawing points\n if modifiers.shift and mouse_left.click:\n self._draw_add_point(mxy_array)\n if self.entity_collection.check_entity_in_progress_complete():\n self._complete_draw(mxy_array)\n \n # Double left click ends drawing\n elif mouse_left.double_click:\n self._complete_draw(mxy_array)\n \n # Right click cancels drawing\n elif mouse_right.click:\n self._cancel_draw(mxy_array)\n \n return\n \n # .................................................................................................................\n \n def _mouse_drag_callback(self, mxy_array, mouse_move, mouse_left, mouse_mid, mouse_right, modifiers):\n \n '''\n This callback handles mouse interactions when the drawer is in the dragging state\n (i.e. no points-in-progress, but a point was clicked & held)\n '''\n \n # Release left click to end dragging\n if mouse_left.release:\n self._complete_drag(mxy_array)\n \n # Left click & drag to drag existing points around\n elif mouse_left.drag:\n self._drag_point(mxy_array)\n \n return\n \n # .................................................................................................................\n \n def keypress_callback(self, key_code, modifier_code):\n \n '''\n This callback handles keypress events which affect entities (undoing or nudging for example)\n It must be called in a loop using the keypress output from OpenCV's keypressEx result:\n \n key_code, modifier_code = waitKey_ex(frame_delay_ms)\n this_object_instance.keypress_callback(key_code, modifier_code)\n \n It's important that the keypress_ex value is used, not the normal cv2.waitKey() result, which\n does not contain information needed to catch modifier keys properly!\n '''\n \n # Only react to real keypress events (returns -1 if there is no keypress)\n if key_code != -1:\n \n # Interpret the modifier code, since we'll use these keys to implement certain functions\n mod_shift, mod_caps, mod_ctrl, mod_alt, mod_numlock, mod_super = \\\n keypress_modifier_decoder(modifier_code)\n \n # Call the appropriate keypress callback, based on the drawer state\n key_state_func = self.keypress_state_callbacks[self.state]\n key_state_func(key_code, mod_shift, mod_ctrl, mod_alt, self.last_mouse_xy)\n \n return\n \n # .................................................................................................................\n \n def _key_hover_callback(self, keycode, shift, ctrl, alt, mxy_array):\n \n ''' \n This callback handles keypress interactions when in the hovering/idle state \n (i.e. no points-in-progress, no dragging)\n '''\n \n # Nudge points with arrow keys\n self._arrow_nudge(mxy_array, keycode, shift)\n \n # Revert changes to the entity list (ctrl + z)\n self._undo_changes(mxy_array, keycode, ctrl)\n \n # Snap points to border (b key)\n self._snap_point_to_border(mxy_array, keycode)\n \n return\n \n # .................................................................................................................\n \n def _arrow_nudge(self, mxy_array, keycode, mod_shift):\n \n # List out key values for convenience\n left_arrow = 65361\n up_arrow = 65362\n right_arrow = 65363\n down_arrow = 65364\n a_key = 97\n w_key = 119\n d_key = 100\n s_key = 115\n arrow_key_pressed = (keycode in {left_arrow, up_arrow, right_arrow, down_arrow, a_key, w_key, d_key, s_key})\n \n # Only check for nearby points if an arrow key is actually pressed\n if arrow_key_pressed:\n \n # If the mouse isn't in range of any points, don't do anything\n if not self.entity_collection.point_in_range(mxy_array):\n return\n \n # Build shift amounts/direction\n amount_to_shift = 1 + 10 * mod_shift\n left_shift = int(keycode == left_arrow or keycode == a_key)\n right_shift = int(keycode == right_arrow or keycode == d_key)\n up_shift = int(keycode == up_arrow or keycode == w_key)\n down_shift = int(keycode == down_arrow or keycode == s_key)\n x_shift = amount_to_shift * (right_shift - left_shift)\n y_shift = amount_to_shift * (down_shift - up_shift)\n self.entity_collection.shift_entity_point(x_shift, y_shift, record_history = True)\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _undo_changes(self, mxy_array, keycode, mod_ctrl):\n \n # List out key values for convenience\n lower_z = 122\n upper_z = 90\n undo_pressed = (keycode in {lower_z, upper_z} and mod_ctrl)\n \n # Revert changes to the entity list if ctrl + z is pressed\n if undo_pressed:\n self.entity_collection.undo()\n self._debug_print(\"UNDO\")\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _snap_point_to_border(self, mxy_array, keycode):\n \n # List out key values for convenience\n lower_b = 98\n upper_b = 66\n b_pressed = (keycode in {lower_b, upper_b})\n \n # Check if we're near enough to a point, and if that point is near enough to the frame borders to snap\n if b_pressed: \n if self.entity_collection.point_in_range(mxy_array): \n snap_success = self.entity_collection.snap_to(min_x = self.border_size_px,\n max_x = self.frame_wh[0] + self.border_size_px,\n min_y = self.border_size_px,\n max_y = self.frame_wh[1] + self.border_size_px,\n record_history = True)\n self._debug_print(\"SNAP-TO-BORDER ({})\".format(\"success\" if snap_success else \"failed\"))\n \n # Signal change to entities\n self._entity_change = True\n \n return\n \n # .................................................................................................................\n \n def _key_draw_callback(self, keycode, shift, ctrl, alt, mxy_array):\n \n ''' \n This callback handles keypress interactions when in the drawing state \n (i.e. points-in-progress) \n '''\n \n # Keypress can be used to undo last drawn point\n self._undo_changes(mxy_array, keycode, ctrl)\n \n return\n \n # .................................................................................................................\n \n def _key_drag_callback(self, keycode, shift, ctrl, alt, mxy_array):\n \n '''\n This callback handles keypress interactions when in the dragging state\n (i.e. no points-in-progress, but a point was clicked & held)\n '''\n \n # Keypresses should do nothing in the dragging state\n \n return\n \n # .................................................................................................................\n \n def _change_state(self, new_state, debug = True):\n\n old_state = self.state\n \n if new_state in self.mouse_state_callbacks:\n self.state = new_state\n \n self._debug_print(\"STATE: {} -> {}\".format(old_state, new_state))\n \n return\n\n # .................................................................................................................\n \n def add_border_to_frame(self, frame):\n \n # copyMakeBorder(src, top, bottom, left, right, borderType[, dst[, value]]) -> dst\n if self.border_size_px > 0: \n return cv2.copyMakeBorder(frame,\n top = self.border_size_px,\n bottom = self.border_size_px,\n left = self.border_size_px,\n right = self.border_size_px,\n borderType = cv2.BORDER_CONSTANT,\n value = (40, 40, 40))\n \n return frame.copy()\n\n # .................................................................................................................\n \n def annotate(self, frame):\n \n '''\n Function for drawing entities onto a given frame. Note this function applies 'in-place' (i.e. no return value)\n Also draws any in-progress entities\n '''\n \n # Add border to incoming frame if needed\n bordered_frame = self.add_border_to_frame(frame)\n \n # Draw all existing entities\n return self.entity_collection.draw_all(bordered_frame, self.last_mouse_xy)\n \n # .................................................................................................................\n \n def mouse_trail(self, frame, line_color = (0, 0, 127), max_thickness = 12, max_history = 85):\n \n '''\n Function for drawing a trail of the mouse history over the frame\n '''\n \n num_history = min(len(self.mouse_xy_history), max_history)\n for each_idx in range(num_history - 1):\n pt1 = tuple(self.mouse_xy_history[each_idx])\n pt2 = tuple(self.mouse_xy_history[1 + each_idx])\n line_thickness = int(round(max_thickness*(num_history - each_idx)/num_history + 1))\n cv2.line(frame, pt1, pt2, line_color, line_thickness, cv2.LINE_AA)\n \n return\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# =====================================================================================================================\n# =====================================================================================================================\n \n \nclass Entity_Collection:\n \n # .................................................................................................................\n \n def __init__(self,\n initial_entity_list,\n minimum_entities = 0,\n maximum_entities = None,\n minimum_points_per_entity = 2,\n maximum_points_per_entity = None,\n maximum_undos = 100,\n debug_mode = False):\n \n '''\n Class used to manage groups of interactive entity objects as well as 'entity-in-progress' objects,\n which are used to show entities as they are being created/drawn\n Works entirely in pixel units!\n '''\n \n # Store defining characteristics\n self.min_entities = minimum_entities\n self.max_entities = maximum_entities\n self.min_points_per_entity = minimum_points_per_entity\n self.max_points_per_entity = maximum_points_per_entity\n self.max_undos = maximum_undos\n self.debug_mode = debug_mode\n \n # Storage for setting entity boundaries\n self.min_xy = np.array((-np.inf, -np.inf))\n self.max_xy = np.array((np.inf, np.inf))\n \n # Storage for drawing style\n self._finished_aesthetics_dict = {}\n self._inprog_aesthetics_dict = {}\n \n # Storage for possible custom drawing functions\n self._entity_draw_func = None\n self._inprog_draw_func = None\n \n # Initialize storage for undo\n self.undo_list_history = deque([], maxlen = maximum_undos)\n \n # Initialize storage for keeping track of most recent closest entities/points/lines\n self._closest_entity = None\n self._closest_point = None\n self._closest_line = None\n \n # Initialize storage for entities\n self.entity_list = self._initialize_entities(initial_entity_list)\n self.entity_in_progress = None\n \n # Set initial drawing style\n self.aesthetics()\n \n # .................................................................................................................\n \n def __repr__(self):\n return \"\\n\\n\".join([repr(each_entity) for each_entity in self.entity_list])\n \n # .................................................................................................................\n \n def __len__(self):\n return len(self.entity_list)\n \n # .................................................................................................................\n \n def __iter__(self):\n return iter(self.entity_list)\n \n # .................................................................................................................\n \n def _create_new_entity(self, initial_points = [], minimum_points = None, maximum_points = None,\n replace_drawing_function = True):\n \n # Use the built-in minimum/maximum point counts if they aren't provided\n minimum_points = self.min_points_per_entity if minimum_points is None else minimum_points\n maximum_points = self.max_points_per_entity if maximum_points is None else maximum_points\n \n # Create the new entity and add a custom drawing function, if available\n new_entity = Interactive_Entity(minimum_points, maximum_points, self.debug_mode)\n new_entity.set_boundaries(self.min_xy, self.max_xy)\n new_entity.aesthetics(**self._finished_aesthetics_dict)\n \n # Add initial points if provided\n if len(initial_points) > 0:\n new_entity.initialize_points(initial_points)\n \n # Add custom drawing function if needed\n if self._entity_draw_func and replace_drawing_function:\n new_entity.update_drawing_function(self._entity_draw_func)\n \n return new_entity\n \n # .................................................................................................................\n \n def _initialize_entities(self, initial_entity_list):\n \n # Build list of entity objects from lists of points\n entity_list = deque([], maxlen = self.max_entities)\n for each_entity_def in initial_entity_list:\n \n # Skip empty entities\n empty_entity = (len(each_entity_def) == 0)\n if empty_entity:\n continue\n \n # Create a new interactive entity object for each set of points\n new_entity = self._create_new_entity(each_entity_def)\n entity_list.append(new_entity)\n \n return entity_list\n \n # .................................................................................................................\n \n def aesthetics(self, finished_color = (0, 255, 255), in_progress_color = (255, 255, 0),\n finished_thickness = 1, in_progress_thickness = 1,\n anchor_radius = 3, line_type = cv2.LINE_AA,\n show_anchors = True):\n \n # Update internal records\n self._finished_aesthetics_dict = {\"color\": finished_color,\n \"thickness\": finished_thickness,\n \"anchor_radius\": anchor_radius,\n \"line_type\": line_type,\n \"show_anchors\": show_anchors}\n \n # Update internal records\n self._inprog_aesthetics_dict = {\"color\": in_progress_color,\n \"thickness\": in_progress_thickness,\n \"anchor_radius\": anchor_radius,\n \"line_type\": line_type,\n \"show_anchors\": show_anchors}\n \n # Propagate changes to finished entities\n for each_entity in self.entity_list:\n each_entity.aesthetics(**self._finished_aesthetics_dict)\n \n # Propagate changes to in-progress entity, if available\n if self.entity_in_progress is not None:\n self.entity_in_progress.aesthetics(**self._inprog_aesthetics_dict)\n \n return self._finished_aesthetics_dict, self._inprog_aesthetics_dict\n \n # .................................................................................................................\n \n def record_list_history(self):\n \n ''' Function used to record a copy of all current entity points, so they can be restored later if needed '''\n \n record_list = [each_entity.points() for each_entity in self.entity_list]\n self.undo_list_history.append(record_list)\n \n # .................................................................................................................\n \n def clear_list_history(self):\n \n ''' Function used to forcefully remove all undo history '''\n \n self.undo_list_history.clear()\n \n # .................................................................................................................\n \n def undo(self):\n \n '''\n Function used to restore previous states \n If an entity-in-progress exists, undo will remove the mostly recently added point\n If no entity-in-progress exists, undo will restore the last recorded copy of all entity points\n '''\n \n # Remove last point from the entity-in-progress if it exists\n if self.entity_in_progress is not None:\n \n if len(self.entity_in_progress) > 1:\n self.entity_in_progress.remove_point(-1)\n \n # Otherwise, revert entity list to previous state (assuming there is one!)\n elif len(self.undo_list_history) > 0:\n record_list = self.undo_list_history.pop()\n self.entity_list = self._initialize_entities(record_list)\n \n # .................................................................................................................\n \n def replace_drawing_functions(self, new_completed_function = None, new_inprogress_function = None):\n \n # Only replace the 'completed' drawing function if something was provided\n if new_completed_function is not None:\n self._entity_draw_func = new_completed_function\n for each_entity in self.entity_list:\n each_entity.update_drawing_function(new_completed_function)\n \n # Only replace the 'inprogress' drawing function if something was provided\n if new_inprogress_function is not None:\n self._inprog_draw_func = new_inprogress_function\n if self.entity_in_progress is not None:\n self.entity_in_progress.update_drawing_function(new_inprogress_function)\n \n # .................................................................................................................\n \n def new_entity_in_progress(self, new_point_xy):\n \n '''\n Function used to create a special entity-in-progress, intended for drawing feedback\n The entity-in-progress does not become part of the full entity list until calling: \n finish_entity_in_progress()\n '''\n \n # Create a new entity in progress, with the ability to have no points so it can be drawn up\n new_points = np.int32(new_point_xy)\n new_in_progress = self._create_new_entity(minimum_points = 0, replace_drawing_function = False)\n new_in_progress.aesthetics(**self._inprog_aesthetics_dict)\n new_in_progress.initialize_points(new_points)\n \n # Add a custom drawing function if needed\n if self._inprog_draw_func:\n new_in_progress.update_drawing_function(self._inprog_draw_func)\n self.entity_in_progress = new_in_progress\n \n # .................................................................................................................\n \n def build_entity_in_progress(self, new_point_xy, record_history = True):\n \n ''' Function for building up the entity-in-progress, by appending new points '''\n \n return self.entity_in_progress.add_point(new_point_xy)\n \n # .................................................................................................................\n \n def clear_entity_in_progress(self):\n \n ''' Function which clears/removes the entity-in-progress '''\n \n self.entity_in_progress = None\n \n # .................................................................................................................\n \n def check_entity_in_progress_complete(self):\n \n eip_exists = (self.entity_in_progress is not None)\n max_limit_exists = (self.max_points_per_entity is not None)\n valid_to_check = (eip_exists and max_limit_exists)\n \n return (len(self.entity_in_progress) == self.max_points_per_entity) if valid_to_check else False\n \n # .................................................................................................................\n \n def finish_entity_in_progress(self, record_history = True):\n \n ''' Add entity-in-progress to the entity list '''\n \n # Grab the current set of points from the entity-in-progress\n points_in_progress_px = self.entity_in_progress.points()\n num_points = len(points_in_progress_px)\n \n # Delete the entity in progress regardless of whether it is valid or not\n self.clear_entity_in_progress()\n \n # Don't do anything if there aren't enough points in the entity in progress\n if num_points < self.min_points_per_entity:\n return False\n \n # Create a new 'completed' entity from the entity in progress, assuming it has enough (minimum) points!\n new_entity = self._create_new_entity(points_in_progress_px)\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Add new entity to the deck\n self.entity_list.append(new_entity)\n \n # .................................................................................................................\n \n def set_entity_boundaries(self, min_xy, max_xy):\n \n ''' Function for setting min/max boundaries for all entities '''\n \n # Update internal min/max settings\n self.min_xy = np.array(min_xy)\n self.max_xy = np.array(max_xy)\n \n # Update min/max of any existing entities\n for each_entity in self.entity_list:\n each_entity.set_boundaries(self.min_xy, self.max_xy)\n \n # .................................................................................................................\n \n def remove_entity(self, entity_index = None, record_history = True):\n \n ''' Function for removing an entire entity from the collection '''\n \n # Don't do anything if we would drop below the minimum allowed number of entities\n if len(self.entity_list) <= self.min_entities:\n return\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Use the last known closest entity if the index wasn't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n \n del self.entity_list[entity_index]\n \n # .................................................................................................................\n \n def scale_entities(self, scale_x, scale_y, offset_px):\n \n ''' Function for applying scaling to all existing entity points. Used to account for frame size changes '''\n \n # Apply scaling to each entity individually\n scale_successes = []\n for each_entity in self.entity_list:\n scale_success = each_entity.scale_points(scale_x, scale_y, offset_px)\n scale_successes.append(scale_success)\n \n # Delete undo history, since it won't be preserved probably if scaling changes\n self.clear_list_history()\n \n return all(scale_successes)\n \n # .................................................................................................................\n \n def insert_entity_point(self, new_point_xy, entity_index = None, line_index = None, record_history = True):\n \n ''' Function used to insert a single point into a specific entity '''\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Use the last known closest entity/point if the indices weren't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n line_index = self._closest_line if line_index is None else line_index\n \n return self.entity_list[entity_index].insert_point(line_index, new_point_xy)\n \n # .................................................................................................................\n \n def remove_entity_point(self, entity_index = None, point_index = None, record_history = True):\n \n ''' Function for removing a single point from an existing entity '''\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Use the last known closest entity/point if the indices weren't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n point_index = self._closest_point if point_index is None else point_index\n \n return self.entity_list[entity_index].remove_point(point_index)\n \n # .................................................................................................................\n \n def move_entity_point(self, new_point_xy, entity_index = None, point_index = None, record_history = False):\n \n ''' Function for moving and single point from an existing entity '''\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Use the last known closest entity/point if the indices weren't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n point_index = self._closest_point if point_index is None else point_index\n \n return self.entity_list[entity_index].move_point(point_index, new_point_xy)\n \n # .................................................................................................................\n \n def shift_entity_point(self, x_shift, y_shift, entity_index = None, point_index = None, record_history = False):\n \n ''' Function for shifting a single point from an existing entity '''\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n # Use the last known closest entity/point if the indices weren't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n point_index = self._closest_point if point_index is None else point_index\n \n return self.entity_list[entity_index].shift_point(point_index, x_shift, y_shift)\n \n # .................................................................................................................\n \n def snap_to(self, min_x, max_x, min_y, max_y, max_snap_distance = 25,\n entity_index = None, point_index = None, record_history = False):\n \n ''' Function for snapping to nearby x/y points '''\n \n # Use the last known closest entity/point if the indices weren't explicity provided\n entity_index = self._closest_entity if entity_index is None else entity_index\n point_index = self._closest_point if point_index is None else point_index\n \n # Get the current target point location and check if it is close enough to snap to a min/max point\n current_points = self.entity_list[entity_index].points()\n target_xy = current_points[point_index]\n new_point_xy = target_xy.copy()\n \n # Build some helpers to write this out a bit more cleanly\n bounds_array = np.int32([(min_x, min_y), (max_x, max_y)])\n absdists_array = np.abs(target_xy - bounds_array)\n \n # Get the closest x/y indices (min edge or max edge) & corresponding distances\n # We want to make sure min/max are checked against one another, so we don't try to snap to both!\n closest_x_idx = np.argmin(absdists_array[:, 0])\n closest_y_idx = np.argmin(absdists_array[:, 1]) \n closest_x_dist = absdists_array[closest_x_idx, 0]\n closest_y_dist = absdists_array[closest_y_idx, 1]\n \n # Update the new point x-location if we're close enough\n change_x = (closest_x_dist < max_snap_distance)\n if change_x:\n new_x = bounds_array[closest_x_idx, 0]\n new_point_xy[0] = new_x\n \n # Update the new point y-location if we're close enough\n change_y = (closest_y_dist < max_snap_distance)\n if change_y:\n new_y = bounds_array[closest_y_idx, 1]\n new_point_xy[1] = new_y\n \n # Only update if we changed x or y values\n if change_x or change_y:\n \n # Record history for using undo, if needed\n if record_history:\n self.record_list_history()\n \n return self.entity_list[entity_index].move_point(point_index, new_point_xy)\n \n return False\n \n # .................................................................................................................\n \n def point_in_range(self, target_xy_nparray, max_match_sq_distance = 50 ** 2):\n \n ''' Function for checking whether a point in an entity is within range of the target xy point '''\n \n # Reset record of closest entity/point/line every time we re-check this\n self._closest_entity = None\n self._closest_point = None\n self._closest_line = None\n \n # Don't try to check closest distance if no entities exist!\n if len(self.entity_list) < 1:\n return False\n \n # First find the squared distance to every point of each entity\n ent_dists = [each_entity.square_distances(target_xy_nparray) for each_entity in self.entity_list]\n \n # Now find the closest distance per entity\n closest_pt_idxs = [np.argmin(each_dists) for each_dists in ent_dists]\n closest_pt_dists = [each_edist[each_idx] for each_edist, each_idx in zip(ent_dists, closest_pt_idxs)]\n \n # Now find the closest point of all entities, and decide if it's within match range\n closest_entity_idx = np.argmin(closest_pt_dists)\n closest_pt_sq_distance = closest_pt_dists[closest_entity_idx]\n closest_pt_idx = closest_pt_idxs[closest_entity_idx]\n within_match_range = (closest_pt_sq_distance < max_match_sq_distance)\n if within_match_range:\n self._closest_entity = closest_entity_idx\n self._closest_point = closest_pt_idx\n \n return within_match_range\n \n # .................................................................................................................\n \n def line_in_range(self, target_xy_nparray, max_match_sq_distance = 50 ** 2):\n \n ''' Function for checking whether a line segment of any entity is within range of the target xy point '''\n \n # Reset record of closest entity/point/line every time we re-check this\n self._closest_entity = None\n self._closest_point = None\n self._closest_line = None\n \n # Don't try to check closest distance if no entities exist!\n if len(self.entity_list) < 1:\n return False\n \n # First find the projection distances to every line segment of each entity\n closest_entity_idx = None\n closest_proj_sq_dist = 1E12\n proj_pt_idx = None\n #proj_pt_xy = None\n for entity_idx, each_entity in enumerate(self.entity_list):\n \n proj_sq_dists, proj_pts = each_entity.line_projections(target_xy_nparray)\n \n # Record projections which are close enough, and try to get the closest among all entities\n shortest_proj_idx = np.argmin(proj_sq_dists)\n shortest_sq_dist = proj_sq_dists[shortest_proj_idx]\n if shortest_sq_dist < closest_proj_sq_dist:\n closest_proj_sq_dist = shortest_sq_dist\n closest_entity_idx = entity_idx\n #proj_pt_xy = proj_pts[shortest_proj_idx]\n proj_pt_idx = shortest_proj_idx\n \n within_match_range = (closest_proj_sq_dist < max_match_sq_distance)\n if within_match_range:\n self._closest_entity = closest_entity_idx\n self._closest_line = proj_pt_idx\n \n return within_match_range\n \n # .................................................................................................................\n \n def draw_all(self, frame, last_mouse_xy):\n \n ''' Function for drawing all entity data onto a given frame. Acts in_place (i.e. no return value!) '''\n \n # Draw all entities in the list\n for each_entity in self.entity_list:\n each_entity.draw(frame)\n \n # Draw the in-progress entity with an additional point (mouse location) to indicate where drawing will occur\n if self.entity_in_progress:\n self.entity_in_progress.draw_with_additional_point(frame, last_mouse_xy)\n \n return frame\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# =====================================================================================================================\n# =====================================================================================================================\n\n\nclass Interactive_Entity:\n \n def __init__(self, minimum_points = 0, maximum_points = None, debug_mode = False):\n \n '''\n Helper class used to represent polygons with a given min/max number of points\n Includes functions for modifying the polygon points list, as well as handling basic drawing functionality\n Works entirely in pixel units\n '''\n \n # Set up representation variables\n self.points_array = np.int32([[]])\n self.min_points = minimum_points\n self.max_points = maximum_points\n self.debug_mode = debug_mode\n \n # Set up optional out-of-bounds variables\n self.min_xy = np.array((-np.inf, -np.inf))\n self.max_xy = np.array((np.inf, np.inf))\n \n # Set up drawing variables\n self.show_anchors = None\n self.line_gfx = {}\n self.anchor_gfx = {}\n self.aesthetics()\n \n # Set default drawing function\n self._draw_func = self._default_draw\n \n # .................................................................................................................\n \n def __repr__(self):\n \n repr_strs = [\"Entity\",\n \" Minimum points: {}\".format(self.min_points),\n \" Current points: {}\".format(len(self.points_array)),\n \" Maximum points: {}\".format(self.max_points)]\n \n # Include additional point location, if we have points!\n if not self._have_no_points():\n repr_strs += [\" Min xy: ({}, {})\".format(*np.min(self.points(), axis=0)),\n \" Max xy: ({}, {})\".format(*np.max(self.points(), axis=0))]\n \n return \"\\n\".join(repr_strs)\n \n # .................................................................................................................\n \n def __len__(self):\n return len(self.points_array)\n \n # .................................................................................................................\n \n def _update_points_array(self, new_points_array):\n \n ''' Function used to ensure constrained update of points array '''\n \n # Check that we still have a valid number of points before saving\n number_new_points = len(new_points_array)\n min_ok = (number_new_points >= self.min_points if self.min_points is not None else True)\n max_ok = (number_new_points <= self.max_points if self.max_points is not None else True)\n update_success = (min_ok and max_ok)\n \n # Only update the points array if we have a valid number of points\n if not update_success:\n if self.debug_mode:\n print(\"\",\n \"Can't update entity points! Not within min/max point limits\",\n \" Minimum points: {}\".format(self.min_points),\n \" Maximum points: {}\".format(self.max_points),\n \" Tried: {}\".format(number_new_points),\n sep=\"\\n\")\n return update_success\n \n # Apply boundary restrictions\n new_points_array = np.int32(np.clip(new_points_array, self.min_xy, self.max_xy))\n \n # If we get this far, we're allowed to update the points array\n self.points_array = new_points_array\n update_success = True\n \n return update_success\n \n # .................................................................................................................\n \n def _default_draw(self, frame, points_px_npint32):\n \n cv2.polylines(frame, [points_px_npint32], **self.line_gfx)\n if self.show_anchors:\n for each_point in points_px_npint32:\n cv2.circle(frame, tuple(each_point), **self.anchor_gfx)\n \n # .................................................................................................................\n \n def aesthetics(self, color = (0, 255, 255), thickness = 1, anchor_radius = 3, line_type = cv2.LINE_AA,\n show_anchors = True):\n \n ''' Function used to set the visual appearance of an entity '''\n \n # Set flag for showing/hiding anchors (i.e. points joining line segments)\n self.show_anchors = show_anchors\n \n # Set up graphical appearance of line drawings\n self.line_gfx = {\"isClosed\": True,\n \"color\": color,\n \"thickness\": thickness,\n \"lineType\": line_type}\n \n # Set up graphical appear of anchor drawings\n self.anchor_gfx = {\"radius\": anchor_radius,\n \"color\": color,\n \"thickness\": -1,\n \"lineType\": line_type}\n \n # .................................................................................................................\n \n def set_boundaries(self, min_xy, max_xy):\n \n # Set up boundary restrictions\n self.min_xy = np.array(min_xy)\n self.max_xy = np.array(max_xy)\n \n # .................................................................................................................\n \n def points(self, as_int32_array = True):\n \n ''' Function for returning the entity points (in pixels) as either an int32 array or a list '''\n \n return self.points_array if as_int32_array else self.points_array.tolist()\n \n # .................................................................................................................\n \n def update_drawing_function(self, drawing_function, test_validty = True):\n \n ''' \n Function used to override the default drawing functionality \n The new drawing function must have arguments:\n drawing_function(frame, points_npint32_array)\n \n Can be used to change the styling of the drawing \n (for example, by adding orientation markers or special coloring)\n '''\n \n self._draw_func = drawing_function\n \n # .................................................................................................................\n \n def square_distances(self, target_xy_nparray):\n \n ''' Function which returns the squared distance of every point in the entity to the given target xy point '''\n \n return np.sum(np.square(self.points_array - target_xy_nparray), axis=1)\n \n # .................................................................................................................\n \n def line_projections(self, target_xy_nparray, out_of_bounds_distance = np.inf):\n \n ''' \n Function which returns the projection distance of the target point to each possible line segment formed\n by sequential pairing of the entity points \n '''\n \n # Don't do any checks unless we've got at least 2 points!\n if len(self.points_array) < 2:\n return [out_of_bounds_distance], [None]\n \n # Get the entity points we'll use for calculating the projections\n check_points = self.points_array\n roll_points = np.roll(check_points, 1, axis=0)\n \n projection_sq_distances = []\n projection_points = []\n for each_start_point, each_end_point in zip(check_points, roll_points):\n \n # Calculate each line segment vector and the point-to-segment normalized projection\n each_vec = each_end_point - each_start_point\n vec_length = np.linalg.norm(each_vec)\n norm_vec = each_vec / vec_length\n shifted_target = target_xy_nparray - each_start_point\n shadow_length = np.dot(norm_vec, shifted_target)\n \n # If the projection doesn't land on the line segment, we won't bother calculating the projection distance\n norm_shadow = shadow_length / vec_length\n projects_to_line = (0.0 < norm_shadow < 1.0)\n if not projects_to_line:\n projection_sq_distances.append(out_of_bounds_distance)\n projection_points.append(None)\n continue\n \n # If we get here, the point can be projected onto the line, so find the projection distance\n proj_pt = each_start_point + (shadow_length * norm_vec)\n proj_sq_distance = np.sum(np.square(target_xy_nparray - proj_pt))\n projection_sq_distances.append(proj_sq_distance)\n projection_points.append(proj_pt)\n \n return projection_sq_distances, projection_points\n \n # .................................................................................................................\n \n def initialize_points(self, initial_points):\n \n ''' Function used to set initial points of the entity (without having to draw/add them one-by-one) '''\n \n initial_points_array = np.int32(np.atleast_2d(initial_points))\n update_success = self._update_points_array(initial_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def scale_points(self, scale_x, scale_y, offset_px = 0):\n \n ''' \n Function used to scale all x/y points by a given amount\n Used to deal with changing window sizes \n \n Inputs:\n scale_x, scale_y -> amount to scale x/y co-ordinates (1.0 does no scaling)\n offset_px -> amount to subtract and re-add before and after scaling (used to account for borders)\n '''\n \n # If we have no points, we don't need to scale!\n if self._have_no_points():\n return True\n \n offset_array = np.int32((offset_px, offset_px))\n new_points_array = np.float32(self.points_array - offset_array) * np.float32((scale_x, scale_y))\n new_points_array = np.int32(np.round(new_points_array)) + offset_array\n update_success = self._update_points_array(new_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def add_point(self, new_point_xy):\n \n ''' Function used to append a point to the entity '''\n \n # Create new array by 'appending' new xy point to old array\n new_points_array = np.concatenate((self.points_array, np.expand_dims(new_point_xy, 0)))\n update_success = self._update_points_array(new_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def insert_point(self, insert_index, new_point_xy):\n \n ''' \n Function used to insert a point between two existing points in the entity\n The insertion index will be the index of the newly added point after insertion\n '''\n \n # Create a copy of the previous points array with an additional point added at an arbitrary index\n new_points_array = np.insert(self.points_array, insert_index, new_point_xy, axis = 0)\n update_success = self._update_points_array(new_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def remove_point(self, removal_index):\n \n ''' Function for removing a specific point for the entity '''\n \n # Create a copy of the previous points array with one entry removed\n new_points_array = np.delete(self.points_array, removal_index, axis = 0)\n update_success = self._update_points_array(new_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def move_point(self, point_index, new_point_xy):\n \n ''' Function for changing the x/y location of an existing point in the entity '''\n \n # Create a copy of the previous points array with one entry being relocated\n new_points_array = self.points_array.copy()\n new_points_array[point_index] = new_point_xy\n update_success = self._update_points_array(new_points_array)\n \n return update_success\n \n # .................................................................................................................\n \n def shift_point(self, point_index, x_shift, y_shift):\n \n ''' Function used to shift an existing point the entity '''\n \n # Create a copy of the previous points array with on entry being shifted\n current_point_xy = self.points_array[point_index]\n \n # Construct the new point location\n shift_tuple = (x_shift, y_shift)\n shift_array = np.int32(shift_tuple)\n new_point_xy = current_point_xy + shift_array\n \n # Use built-in move to handle change of point location\n self.move_point(point_index, new_point_xy)\n \n # .................................................................................................................\n \n def draw(self, frame):\n \n ''' Function for drawing this entity onto a given frame '''\n \n # Don't do anything if we have no points to draw\n if self._have_no_points():\n return\n \n self._draw_func(frame, self.points_array)\n \n # .................................................................................................................\n \n def draw_with_additional_point(self, frame, new_point_xy):\n \n ''' Function for drawing this entity onto a given frame, with an additional point appended '''\n \n # Append additional point to the end of the array\n points_px_array = np.concatenate((self.points_array, np.expand_dims(new_point_xy, 0)))\n \n # Now draw just like normal\n self._draw_func(frame, points_px_array)\n \n # .................................................................................................................\n \n def _have_no_points(self):\n \n ''' Helper function to deal with case where we have no points for drawing '''\n \n # Return true if we have no points in our array\n return (self.points_array.size == 0)\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define functions\n \n# .....................................................................................................................\n\ndef waitKey_ex(frame_delay_ms = 1):\n \n '''\n Helper function for reading/interpretting cv2.waitKeyEx() calls \n \n Inputs: \n frame_delay_ms --> Integer. If zero, will wait forever for a key press. Otherwise waits X milliseconds.\n (See the regular cv2.waitKey or cv2.waitKeyEx functions for more info)\n \n Outputs:\n keycode, modifier_code\n \n keycode --> Integer, similar to the output of a regular cv2.waitKey call. \n Use this to determine which (normal) key was pressed. \n For example this value is 113 when q is pressed, 27 on esc key or 32 on spacebar\n \n modifier_code --> Integer. Encodes modifier keys using an 8-bit (?) value. \n This includes shift, ctrl, alt etc.\n Use the keypress_modifier_decoder function to interpret these values!\n '''\n \n keypress_ex = cv2.waitKeyEx(frame_delay_ms)\n keycode, modifier_code = keypress_ex_decoder(keypress_ex) \n \n return keycode, modifier_code\n\n# .....................................................................................................................\n\ndef keypress_ex_decoder(keypress_ex):\n \n '''\n Takes in a keypress code from cv2.waitKeyEx and splits it into separate outputs for the key pressed and \n any modifier keys (shift, ctrl, alt etc.) that were active at the same time.\n Note that pressing a modifier key will return a unique keycode!\n \n Inputs:\n keypress_ex --> Integer. Comes from cv2.waitKeyEx function\n \n Outputs:\n keycode, modifier_code\n '''\n \n # Don't bother decoding when no keys are being pressed\n if keypress_ex == -1:\n return -1, None\n \n is_modified = (keypress_ex > 65535)\n keycode = keypress_ex & 0x0000FFFF\n modifier_code = (keypress_ex & 0xFFFF0000) >> 16 if is_modified else None\n \n return keycode, modifier_code\n\n# .....................................................................................................................\n\ndef keypress_ex_quit(keypress_ex, quit_on_esc = True, quit_on_q = True):\n \n '''\n Helper function for handling quitting keypresses when using cv2.waitKeyEx\n \n Inputs:\n keypress_ex --> Integer. Comes from cv2.waitKeyEx\n \n quit_on_esc --> Boolean. If true, the esc-key will generate a break request\n \n quit_on_q --> Boolean. If true, the q or Q key will generate a break request\n \n Outputs:\n request_break --> Boolean. If true, the caller of this function should quit/break out of loops\n '''\n \n # Don't bother decoding when no keys are being pressed\n if keypress_ex == -1:\n return False\n \n # Get the keycode separate from any modifiers\n keycode = keypress_ex & 0x0000FFFF\n \n return keycode_quit(keycode, quit_on_esc, quit_on_q)\n\n# .....................................................................................................................\n\ndef keycode_quit(keycode, quit_on_esc = True, quit_on_q = True):\n \n ''' Helper function for cancelling/quitting on keypress events '''\n \n # For clarity\n esc_keys = [27] if quit_on_esc else []\n q_keys = [113, 81] if quit_on_q else []\n quit_keys = esc_keys + q_keys\n \n # We'll quit if we catch one of the quit keys\n request_break = (keycode in quit_keys)\n \n return request_break\n\n# .....................................................................................................................\n\ndef keypress_modifier_decoder(modifier_code):\n \n '''\n Takes modifier codes from the keypress_ex_decoder function and returns the state of modifier keys.\n Modifier codes appear to be 8-bit numbers with each bit representing the state of a specific modifier key.\n However, 2 states are currently unknown!\n \n Inputs:\n modifier_code --> Integer returned from keypress_ex_decoder\n \n Outputs:\n shift_is_active, capslock_is_active, ctrl_is_active, alt_is_active, numlock_is_active, super_is_active\n '''\n \n # Default to all false if no modifier code is present\n if not modifier_code:\n return False, False, False, False, False, False\n \n # Hard code key checks\n shift_is_active = modifier_code & 0b00000001\n capslock_is_active = modifier_code & 0b00000010\n ctrl_is_active = modifier_code & 0b00000100\n alt_is_active = modifier_code & 0b00001000\n numlock_is_active = modifier_code & 0b00010000\n # missing modifier key\n super_is_active = modifier_code & 0b01000000\n # missing modifier key\n \n return shift_is_active, capslock_is_active, ctrl_is_active, alt_is_active, numlock_is_active, super_is_active\n\n# .....................................................................................................................\n# .....................................................................................................................\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Demo\n\nif __name__ == \"__main__\":\n \n # Set display parameters\n frame_width, frame_height = 600, 300\n blank_frame = np.full((frame_height, frame_width, 3), (83, 33, 166), dtype=np.uint8)\n frame_wh = (frame_width, frame_height)\n \n # Set up example drawer\n drawer = Entity_Drawer(frame_wh,\n minimum_entities = 0,\n maximum_entities = 100,\n minimum_points = 0,\n maximum_points = None)\n \n # Some example starting points\n test_points = [[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]]\n drawer.initialize_entities(test_points)\n \n # Window creation & callback assignment\n window_name = \"DRAWING EXAMPLE\"\n cv2.namedWindow(window_name) \n cv2.setMouseCallback(window_name, drawer)\n\n\n while True:\n \n # Get a clean copy of the video\n display_frame = blank_frame.copy()\n \n # Get changes in zone data\n if drawer.on_change():\n print(\"Changed!\")\n print(drawer.get_entities_list())\n \n # Draw annotations\n drawn_frame = drawer.annotate(display_frame)\n cv2.imshow(window_name, drawn_frame)\n \n # Get keypresses\n keycode, modifier = waitKey_ex(10)\n if keycode_quit(keycode):\n break\n \n drawer.keypress_callback(keycode, modifier)\n \n # Clean up windows\n cv2.destroyAllWindows()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Scrap\n\n","sub_path":"local/lib/ui_utils/local_ui/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":83247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491372985","text":"import psycopg2\nimport numpy as np\nCOOP = psycopg2.connect(database='coop', host='iemdb', user='nobody')\ncursor = COOP.cursor()\n\ncursor.execute(\"\"\"\n SELECT extract(year from day + '6 months'::interval) as yr, sum(snow),\n sum(case when sday > '0306' and month < 7 then snow else 0 end) from\n alldata_ia where station = 'IA2203' and snow >= 0 GROUP by yr ORDER by yr ASC\n\"\"\")\n\nyears = []\ntotal = []\nafter = []\nfor row in cursor:\n years.append( row[0] )\n total.append( row[1] )\n after.append( row[2] )\n \nyears = np.array(years)\ntotal = np.array(total)\nafter = np.array(after)\npercentages = (total - after)/total * 100.0\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as PathEffects\n\n(fig, ax) = plt.subplots(2,1, sharex=True)\n\nax[0].set_title(\"1890-2014 Des Moines Snowfall before/after 6 March\")\nax[0].set_ylabel(\"% of Season Snow by 6 March\")\nax[0].bar(years[:-1]-0.4, percentages[:-1], ec='b', fc='b')\nax[0].axhline( np.average(percentages), color='k')\ntxt = ax[0].text(1900, np.average(percentages), \"%.1f%%\" % (np.average(percentages),),\n fontsize=18)\ntxt.set_path_effects([PathEffects.withStroke(linewidth=2,\n foreground=\"yellow\")])\nax[0].grid(True)\n\nax[1].bar(years-0.4, after, ec='b', fc='b')\nax[1].axhline( np.average(after), color='k')\nax[1].set_xlim(1890,2015)\nax[1].grid(True)\nax[1].set_ylabel(\"After 6 March Snowfall [inch]\")\ntxt = ax[1].text(1900, np.average(after), \"%.1f inch\" % (np.average(after),),\n fontsize=18)\ntxt.set_path_effects([PathEffects.withStroke(linewidth=2,\n foreground=\"yellow\")])\n\nfig.savefig('test.ps')\nimport iemplot\niemplot.makefeature('test')","sub_path":"scripts/feature/coop/snow_before_and_after.py","file_name":"snow_before_and_after.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"642863261","text":"def find_repeats(string):\n \"\"\" \n Finds repeats in input string and returns a dictionary object \n containing all the repeated characters and occurrence counts \n as key-value pairs.\n \"\"\"\n\n helper_set = set() # Stores all chars that have appeared before\n repeat_dict = {} # Stores repeat counts\n\n for char in string:\n # Checks if character has appeared previously.\n if char in helper_set:\n # Increments occurrence count if it exists, else sets to 2\n repeat_dict[char] = repeat_dict.get(char, 1) + 1\n else:\n helper_set.add(char)\n return repeat_dict\n","sub_path":"repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585154297","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass Option(Model):\n \"\"\"Option.\n\n :param additional_prop1:\n :type additional_prop1: str\n :param additional_prop2:\n :type additional_prop2: str\n :param additional_prop3:\n :type additional_prop3: str\n \"\"\"\n\n _attribute_map = {\n 'additional_prop1': {'key': 'additionalProp1', 'type': 'str'},\n 'additional_prop2': {'key': 'additionalProp2', 'type': 'str'},\n 'additional_prop3': {'key': 'additionalProp3', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(Option, self).__init__(**kwargs)\n self.additional_prop1 = kwargs.get('additional_prop1', None)\n self.additional_prop2 = kwargs.get('additional_prop2', None)\n self.additional_prop3 = kwargs.get('additional_prop3', None)\n","sub_path":"src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79610625","text":"from cv2 import cv2\nimport os \n\n\"\"\"简化版,读到文件名之后不保存,立即进行操作\"\"\"\nfor root,dirs,files in os.walk(\"D:/Tire_Test/code3/image/7/1\", True): # 遍历检索根、目录、文件\n\tfor file in files: # 文件遍历\n\t\tline = \"D:/Tire_Test/code3/image/7/1/\" + file\n\t\tprint(line)\n\t\timage = cv2.imread(line) # 读取文件名对应的图片\n\t\t# print(image.shape)\n\t\t# cropImg = image[0:1080:2, 240:1680:2] # 先y范围,后x范围,每个的第三个为采样步长\n\t\tcropImg = image[50:600, 0:1280] # 裁剪\n\t\tcv2.imwrite(\"D:/Tire_Test/code3/image/7/2/\" + line[29:], cropImg) # 保存\n","sub_path":"Tire_tread_defect_detection/TireTest_20200708/code3/image/7/cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"68558132","text":"import cv2\nfrom os import listdir\nfrom win32com.client import Dispatch\n\ndef resizeFig(pathIn, pathOut):\n\t# Load an color image in grayscale\n\timg = cv2.imread(pathIn,0)\n\tres = cv2.resize(img,(64, 64), interpolation = cv2.INTER_LINEAR)\n\tcv2.imwrite(pathOut,res)\n\ndef method1():\n\tdir_pre = 'D:/mysite_2/media/'\n\tdir_1 = 'repository/'\n\tdir_2 = 'resize_fig/'\n\ttestFileList = listdir(dir_pre + dir_1)\n\tfor name in testFileList:\n\t\tpathIn = dir_pre + dir_1 + name\n\t\tpathOut = dir_pre + dir_2 + name\n\t\tresizeFig(pathIn, pathOut)\n\t\n\th = Dispatch(\"Matlab.application\")\n\th.execute(\"cd 'D:\\\\mysite_2\\\\uploads';\")\n\th.execute(\"python_matlab('D:\\\\mysite_2\\\\media\\\\resize_fig\\\\', 'D:\\\\mysite_2\\\\media\\\\result.txt');\")\n\n\tresult_file = open(dir_pre + 'result.txt')\n\tresult = result_file.readlines()\n\tresult_file.close()\n\treturn result\n\t\ndef method2():\n\tdir_pre = 'D:/mysite_2/media/'\n\tdir_1 = 'repository/'\n\t\n\tresult_file = open(dir_pre + 'result.txt')\n\tresult = result_file.readlines()\n\tresult_file.close()\n\treturn result\n\n\n","sub_path":"part_2/mysite/python_matlab.py","file_name":"python_matlab.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"118265892","text":"import torch\nimport torch.nn.functional as F\n\nfrom torch import nn\n\nclass esem(nn.Module):\n def __init__(self):\n super(esem, self).__init__()\n self.NUM_PANELS = 16\n\n self.cnn_shape = nn.Sequential(\n nn.Conv2d(1, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n self.cnn_line = nn.Sequential(\n nn.Conv2d(1, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n self.cnn_global = nn.Sequential(\n nn.Conv2d(16, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 32, 3, 2),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n\n\n self.pre_g_fc_shape = nn.Linear(32 * 9 ** 2, 256)\n self.pre_g_batch_norm_shape= nn.BatchNorm1d(256)\n self.pre_g_fc_line = nn.Linear(32 * 9 ** 2, 256)\n self.pre_g_batch_norm_line = nn.BatchNorm1d(256)\n self.pre_g_fc2 = nn.Linear(32 * 9 ** 2, 256)\n self.pre_g_batch_norm2 = nn.BatchNorm1d(256)\n\n self.pre_g_fc_fusion = nn.Linear(256*2, 256)\n self.pre_g_batch_fusion = nn.BatchNorm1d(256)\n\n self.g = nn.Sequential(\n nn.Linear(512+512, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Linear(1024, 256*3),\n nn.BatchNorm1d(256*3),\n nn.ReLU(),\n nn.Linear(256*3, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout()\n )\n self.g2 = nn.Sequential(\n nn.Linear(512 + 512, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Linear(1024, 256 * 3),\n nn.BatchNorm1d(256 * 3),\n nn.ReLU(),\n nn.Linear(256 * 3, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout()\n )\n self.f = nn.Sequential(\n nn.Linear(512, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(256, 1)\n )\n\n def comp_panel_embedding(self, panel,panel2):\n batch_size = panel.shape[0]\n panel = torch.unsqueeze(panel, 1) # (batch_size, 160, 160) -> (batch_size, 1, 160, 160)\n panel2= torch.unsqueeze(panel2, 1)\n\n panel_embedding = self.cnn_line(panel2) # (batch_size, 1, 160, 160) -> (batch_size, 32, 9, 9)\n panel_embedding = panel_embedding.view(batch_size, 32 * 9 ** 2)\n panel_embedding = self.pre_g_fc_line(panel_embedding)\n panel_embedding_line = self.pre_g_batch_norm_line(panel_embedding)\n\n panel_embedding = self.cnn_shape(panel) # (batch_size, 1, 160, 160) -> (batch_size, 32, 9, 9)\n panel_embedding = panel_embedding.view(batch_size, 32 * 9 ** 2)\n panel_embedding = self.pre_g_fc_shape(panel_embedding)\n panel_embedding_shape = self.pre_g_batch_norm_shape(panel_embedding)\n\n fusion=torch.cat([panel_embedding_line,panel_embedding_shape],1)\n panel_embedding = self.pre_g_fc_fusion(fusion)\n panel_embedding = self.pre_g_batch_fusion(panel_embedding)\n panel_embedding = F.relu(panel_embedding)\n\n return panel_embedding\n\n def panel_comp_obj_pairs(self, objs,batch_size):\n obj_pairses_r =torch.zeros(batch_size, 2, 256*3).cuda()\n obj_pairses_c = torch.zeros(batch_size, 2, 256 * 3).cuda()\n obj_pairses= torch.zeros(batch_size, 54, 256 * 3).cuda()\n count=0\n index=0\n for i in range(8):\n for j in range(i):\n for k in range(j):\n if ((7-i)==0 and ((7-j)==1) and ((7-k)==2)) or((7-i)==3 and ((7-j)==4) and ((7-k)==5)):\n\n obj_pairses_r[:, (7-i)//3, :] = torch.cat(\n [torch.cat([objs[:, 7 - i, :], objs[:, 7 - j, :]], 1), objs[:, 7 - k, :]], 1)\n\n count -= 1\n elif ((7-i)==0 and ((7-j)==3) and ((7-k)==6)) or((7-i)==1 and ((7-j)==4) and ((7-k)==7)):\n\n obj_pairses_c[:, 7-i, :] = torch.cat(\n [torch.cat([objs[:, 7 - i, :], objs[:, 7 - j, :]], 1), objs[:, 7 - k, :]], 1)\n count -= 1\n else:\n obj_pairses[:,count,:] = torch.cat([torch.cat([objs[:, 7 - i, :], objs[:, 7 - j, :]],1), objs[:, 7-k, :]], 1)\n #obj_pairs = torch.cat([torch.unsqueeze( objs[:,7-i,:],1),torch.unsqueeze( objs[:,7-j,:],1)],2)\n #obj_pairses[:,count,:] = torch.cat([obj_pairs, torch.unsqueeze(objs[:, 7-k, :], 1)], 2)\n count+=1\n return obj_pairses,obj_pairses_c,obj_pairses_r\n\n def ans_comp_obj_pairs(self, ans,pan,batch_size):\n obj_pairses_r = torch.zeros(batch_size, 1, 256 * 3).cuda()\n obj_pairses_c = torch.zeros(batch_size, 1, 256 * 3).cuda()\n\n obj_pairses=torch.zeros(batch_size, 26, 256*3).cuda()\n count=0\n for i in range(8):\n for j in range(i):\n if (7-i)==2 and ((7-j)==5) :\n obj_pairs = torch.cat([pan[:, 7 - i, :], pan[:, 7 - j, :]], 1)\n obj_pairses_c[:, 0, :] = torch.cat([obj_pairs, ans], 1)\n count -= 1\n elif (7-i)==6 and ((7-j)==7) :\n\n\n obj_pairs = torch.cat([pan[:, 7 - i, :], pan[:, 7 - j, :]], 1)\n obj_pairses_r[:, 0, :] = torch.cat([obj_pairs, ans], 1)\n count -= 1\n else:\n obj_pairs = torch.cat([pan[:,7-i,:],pan[:,7-j,:]],1)\n obj_pairses[:,count,:] = torch.cat([obj_pairs, ans], 1)\n count+=1\n return obj_pairses,obj_pairses_c,obj_pairses_r\n def g_functin(self,context_pairs,panel_embedding_8,num_context_pairs,batch_size):\n context_pairs = torch.cat([context_pairs, panel_embedding_8.repeat(1, num_context_pairs, 1)], 2)\n context_pairs = context_pairs.view(batch_size * num_context_pairs, 1024)\n context_g_out = self.g(context_pairs)\n context_g_out = context_g_out.view(batch_size, num_context_pairs, 512)\n context_g_out = context_g_out.sum(1)\n return context_g_out\n def g_functin2(self,context_pairs,panel_embedding_8,num_context_pairs,batch_size):\n context_pairs = torch.cat([context_pairs, panel_embedding_8.repeat(1, num_context_pairs, 1)], 2)\n context_pairs = context_pairs.view(batch_size * num_context_pairs, 1024)\n context_g_out = self.g2(context_pairs)\n context_g_out = context_g_out.view(batch_size, num_context_pairs, 512)\n context_g_out = context_g_out.sum(1)\n return context_g_out\n def forward(self, x):\n batch_size = x.shape[0]\n # Compute panel embeddings\n panel_embeddings = torch.zeros(batch_size, self.NUM_PANELS, 256).cuda()\n panel_embedding_8 = self.cnn_global(x[:, 16:, :, :])\n panel_embedding_8 = self.pre_g_fc2(panel_embedding_8.view(batch_size, 32 * 9 ** 2))\n panel_embedding_8 = self.pre_g_batch_norm2(panel_embedding_8)\n panel_embedding_8 = F.relu(panel_embedding_8)\n panel_embedding_8 = torch.unsqueeze(panel_embedding_8, 1)\n for panel_ind in range(self.NUM_PANELS):\n panel = x[:, panel_ind, :, :]\n panel2= x[:, panel_ind+16, :, :]\n panel_embedding = self.comp_panel_embedding(panel,panel2)\n panel_embeddings[:, panel_ind, :] = panel_embedding\n context_embeddings = panel_embeddings[:, :int(self.NUM_PANELS/2), :] # (batch_size, 8, 256)\n answer_embeddings = panel_embeddings[:, int(self.NUM_PANELS/2):, :] # (batch_size, 8, 256)\n\n num_context_pairs = 56\n # Compute context pairs once to be used for each answer\n obj_pairses, obj_pairses_c, obj_pairses_r = self.panel_comp_obj_pairs(context_embeddings,batch_size)# (batch_size, 56, 256*3)\n\n '''context_pairs = torch.cat([context_pairs, panel_embedding_8.repeat(1, num_context_pairs, 1)], 2)\n context_pairs = context_pairs.view(batch_size * num_context_pairs, 1024)\n context_g_out = self.g(context_pairs)\n context_g_out = context_g_out.view(batch_size, num_context_pairs, 512)'''\n context_g_out1= self.g_functin2(obj_pairses,panel_embedding_8,54,batch_size)\n context_g_outr = self.g_functin(obj_pairses_r, panel_embedding_8, 2, batch_size)\n context_g_outc = self.g_functin(obj_pairses_c, panel_embedding_8, 2, batch_size)\n context_g_out = context_g_out1+context_g_outc+context_g_outr\n f_out = torch.zeros(batch_size, int(self.NUM_PANELS/2)).cuda()\n num_context_pairs = 28\n for answer_ind in range(8):\n answer_embedding = answer_embeddings[:, answer_ind, :] # (batch_size, 256)\n\n context_answer_pairs,context_answer_pairs_c,context_answer_pairs_r = self.ans_comp_obj_pairs(answer_embedding,context_embeddings,batch_size)# (batch_size, 28, 512)\n\n '''context_answer_pairs = torch.cat([context_answer_pairs, panel_embedding_8.repeat(1, 28, 1)], 2)\n context_answer_pairs = context_answer_pairs.view(batch_size * 28, 1024)\n context_answer_g_out = self.g(context_answer_pairs) # (8, 512)\n context_answer_g_out = context_answer_g_out.view(batch_size, 28, 512)\n context_answer_g_out = context_answer_g_out.sum(1)'''\n context_answer_g_out1 = self.g_functin2(context_answer_pairs, panel_embedding_8, 26, batch_size)\n context_answer_g_outr = self.g_functin(context_answer_pairs_r, panel_embedding_8, 1, batch_size)\n context_answer_g_outc = self.g_functin(context_answer_pairs_c, panel_embedding_8, 1, batch_size)\n context_answer_g_out = context_answer_g_out1 + context_answer_g_outc + context_answer_g_outr\n\n g_out = context_g_out + context_answer_g_out\n f_out[:, answer_ind] = self.f(g_out).squeeze()\n return F.log_softmax(f_out, dim=1)","sub_path":"model/model_esem.py","file_name":"model_esem.py","file_ext":"py","file_size_in_byte":10918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"548116758","text":"#!/usr/bin/env python \n\nimport openpyxl\n\nprint('Opening workbook...')\n\nwb = openpyxl.load_workbook('Inventaire Vignette.xlsx')\nsheet = wb.get_sheet_by_name('2012')\n\ncvmList = list()\n\nprint('Reading rows...')\n\ndef loopingCVM(startRow, letter1, letter2, letter3, letter4):\n\tfor i in range(startRow,sheet.get_highest_row()):\n\t\tdate = sheet[letter1 + str(i)].value\n\t\tvig = sheet[letter2 + str(i)].value\n\t\tcvm = sheet[letter3 + str(i)].value\n\t\tmec = sheet[letter4 + str(i)].value\n\t\tcvmData = {'date':date,'vig':vig,'cvm':cvm,'mec':mec}\n\t\tcvmList.append(cvmData)\n\n# Going through the Sheet and copying the values\n\nloopingCVM(22,'A','B','C','D')\nloopingCVM(7,'O','P','Q','R')\nloopingCVM(7,'U','V','W','X')\nloopingCVM(7,'AA','AB','AC','AD')\nloopingCVM(7,'AG','AH','AI','AJ')\nloopingCVM(7,'AM','AN','AO','AP')\nloopingCVM(7,'AS','AT','AU','AV')\nloopingCVM(7,'AY','AZ','BA','BB')\nloopingCVM(7,'BE','BF','BG','BH')\nloopingCVM(7,'BK','BL','BM','BN')\nloopingCVM(7,'BQ','BR','BS','BT')\nloopingCVM(7,'BW','BX','BY','BZ')\nloopingCVM(7,'CC','CD','CE','CF')\nloopingCVM(7,'CI','CJ','CK','CL')\n\n\nnewWB = openpyxl.Workbook()\nnewWB.create_sheet(index=0, title='Test')\nnewsheet = newWB.get_sheet_by_name('Test')\n\n\nprint('Populating tables...')\n\n\nfor i in range(len(cvmList)):\n\n\tnewsheet['A' + str(i+1)].value = cvmList[i]['date']\n\tnewsheet['B' + str(i+1)].value = cvmList[i]['vig']\n\tnewsheet['C' + str(i+1)].value = cvmList[i]['cvm']\n\tnewsheet['D' + str(i+1)].value = cvmList[i]['mec']\n\t\t\n\t\nprint('Writing results...')\n\nnewWB.save('test.xlsx')\n\nprint('Done.')\n","sub_path":"readxl.py","file_name":"readxl.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"322822039","text":"import socket, select, sys\nimport logging\nfrom datetime import datetime\n\n__author__ = \"timothyhseed\"\n\n\nclass dxspider(object):\n def __init__(self, node: str, port: int, call: str) -> None:\n \"\"\"\n\n :param node:\n :param port:\n :param call:\n \"\"\"\n\n self.node = node\n self.port = port\n self.call = call\n self.s = None\n self.firsttime = True\n self.socket_list = []\n self.logger = logging.getLogger()\n print(str.format(\"logger Name is {}\", self.logger.name))\n self.logger.info(str.format(\"Node is {}\", node))\n self.logger.info(str.format(\"port is {}\", port))\n self.logger.info(str.format(\"User is {}\", call))\n\n def __del__(self):\n self.logger.info(str.format(\"destructor being called \"))\n\n def do_connect(self):\n self.logger.info(str.format(\"Do_Connect\"))\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.logger.info(str.format(\"Socket created\"))\n self.s.settimeout(2)\n\n # connect to remote host\n try:\n self.s.connect((self.node, int(self.port)))\n except Exception as err:\n print(\"Unable to connect \" + str(err))\n return False\n print(\"Connected to remote host\")\n return True\n\n def get_dx(self, msg_to_send=\"\"):\n self.socket_list = [sys.stdin, self.s]\n\n # Get the list sockets which are readable\n\n read_sockets, write_sockets, error_sockets = select.select(\n self.socket_list, [], []\n )\n\n if self.firsttime is True:\n self.s.send(str(self.call + \"\\n\").encode())\n self.firsttime = False\n\n for sock in read_sockets:\n # incoming message from remote server\n if sock == self.s:\n data = sock.recv(4096)\n if not data:\n print(\"Connection closed\")\n sys.exit()\n else:\n # print data\n try:\n nw = datetime.now()\n sys.stdout.write(\n str(nw.isoformat(sep=\" \")) + \" \" + data.decode(\"utf-8\")\n )\n except:\n junk = 1\n pass\n\n # user entered a message\n else:\n if len(msg_to_send):\n msg = sys.stdin.readline()\n self.s.send(msg.encode())\n","sub_path":"ham/qsosvr/dxspider.py","file_name":"dxspider.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"321423476","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom PIL import Image\nimport scipy.io #Used to load the OCTAVE *.mat files\nimport scipy.misc #Used to show matrix as an image\nimport matplotlib.cm as cm #Used to display images in a specific colormap\nimport random #To pick random images to display\nfrom scipy.special import expit #Vectorized sigmoid function\n\nnp.set_printoptions(threshold=np.nan)\n\n\n# In[ ]:\n\n\ndatafile = 'ex3data1.mat'\nmat = scipy.io.loadmat( datafile )\nX, y = mat['X'], mat['y']\n#Insert a column of 1's to X as usual\nX = np.insert(X,0,1,axis=1)\n\n\n# In[190]:\n\n\ndef getDatumImg(row):\n \"\"\"\n Function that is handed a single np array with shape 1x400,\n crates an image object from it, and returns it\n \"\"\"\n width, height = 20, 20\n square = row[1:].reshape(width,height)\n return square.T\n \ndef displayData(indices_to_display = None):\n \"\"\"\n Function that picks 100 random rows from X, creates a 20x20 image from each,\n then stitches them together into a 10x10 grid of images, and shows it.\n \"\"\"\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)\n\n\n# In[191]:\n\n\ndisplayData()\n\n\n# In[ ]:\n\n\n#Hypothesis function and cost function for logistic regression\ndef h(mytheta,myX): #Logistic hypothesis function\n return expit(np.dot(myX,mytheta))\n\n#A more simply written cost function than last week, inspired by subokita:\ndef computeCost(mytheta,myX,myy,mylambda = 0.):\n m = myX.shape[0] #5000\n myh = h(mytheta,myX) #shape: (5000,1)\n term1 = np.log( myh ).dot( -myy.T ) #shape: (5000,5000)\n term2 = np.log( 1.0 - myh ).dot( 1 - myy.T ) #shape: (5000,5000)\n left_hand = (term1 - term2) / m #shape: (5000,5000)\n right_hand = mytheta.T.dot( mytheta ) * mylambda / (2*m) #shape: (1,1)\n return left_hand + right_hand #shape: (5000,5000)\n\n\n# In[85]:\n\n\ndef costGradient(mytheta,myX,myy,mylambda = 0.):\n m = myX.shape[0]\n #Tranpose y here because it makes the units work out in dot products later\n #(with the way I've written them, anyway)\n beta = h(mytheta,myX)-myy.T #shape: (5000,5000)\n\n #regularization skips the first element in theta\n regterm = mytheta[1:]*(mylambda/m) #shape: (400,1)\n\n grad = (1./m)*np.dot(myX.T,beta) #shape: (401, 5000)\n #regularization skips the first element in theta\n grad[1:] = grad[1:] + regterm\n return grad #shape: (401, 5000)\n\n\n# In[86]:\n\n\nfrom scipy import optimize\n\ndef optimizeTheta(mytheta,myX,myy,mylambda=0.):\n result = optimize.fmin_cg(computeCost, fprime=costGradient, x0=mytheta, args=(myX, myy, mylambda), maxiter=50, disp=False, full_output=True)\n return result[0], result[1]\n\n\n# In[87]:\n\n\ndef buildTheta():\n \"\"\"\n Function that determines an optimized theta for each class\n and returns a Theta function where each row corresponds\n to the learned logistic regression params for one class\n \"\"\"\n mylambda = 0.\n initial_theta = np.zeros((X.shape[1],1)).reshape(-1)\n Theta = np.zeros((10,X.shape[1]))\n for i in range(10):\n iclass = i if i else 10 #class \"10\" corresponds to handwritten zero\n logic_Y = np.array([1 if x == iclass else 0 for x in y])#.reshape((X.shape[0],1))\n itheta, imincost = optimizeTheta(initial_theta,X,logic_Y,mylambda)\n Theta[i,:] = itheta\n return Theta\n\n\n# In[94]:\n\n\nTheta = buildTheta()\n\n\n# In[92]:\n\n\ndef predictOneVsAll(myTheta,myrow):\n \"\"\"\n Function that computes a hypothesis for an individual image (row in X)\n and returns the predicted integer corresponding to the handwritten image\n \"\"\"\n classes = [10] + list(range(1,10))\n hypots = [0]*len(classes)\n #Compute a hypothesis for each possible outcome\n #Choose the maximum hypothesis to find result\n for i in range(len(classes)):\n hypots[i] = h(myTheta[i],myrow)\n return classes[np.argmax(np.array(hypots))]\n\n\n# In[192]:\n\n\nn_correct, n_total = 0., 0.\nincorrect_indices = []\nfor irow in range(X.shape[0]):\n n_total += 1\n if predictOneVsAll(Theta, X[irow]) == y[irow]: \n n_correct += 1\n else: incorrect_indices.append(irow)\nprint(\"Training set accuracy: %0.1f%%\"%(100*(n_correct/n_total)))\n\n","sub_path":"machine-learning/ex3/multiclass regression.py","file_name":"multiclass regression.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"639632329","text":"from dagster import check\nfrom dagster.core.host_representation.external_data import (\n ExternalScheduleExecutionData,\n ExternalScheduleExecutionErrorData,\n)\nfrom dagster.core.host_representation.handle import RepositoryHandle\n\nfrom .utils import execute_unary_api_cli_command\n\n\ndef sync_get_external_schedule_execution_data(instance, repository_handle, schedule_name):\n from dagster.cli.api import ScheduleExecutionDataCommandArgs\n\n check.inst_param(repository_handle, 'repository_handle', RepositoryHandle)\n check.str_param(schedule_name, 'schedule_name')\n\n origin = repository_handle.get_origin()\n\n return check.inst(\n execute_unary_api_cli_command(\n origin.executable_path,\n 'schedule_config',\n ScheduleExecutionDataCommandArgs(\n repository_origin=origin,\n instance_ref=instance.get_ref(),\n schedule_name=schedule_name,\n ),\n ),\n (ExternalScheduleExecutionData, ExternalScheduleExecutionErrorData),\n )\n","sub_path":"python_modules/dagster/dagster/api/snapshot_schedule.py","file_name":"snapshot_schedule.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"415573822","text":"import jobmatch\n\naccountManager = jobmatch.business.provider.account.AccountManager.getUniqueInstance()\n\n\n\n\nworks = [\n (\"UBS\", TimeUtil.getDate(1999,5,6), TimeUtil.getDate(2000,4,20), \"Ferienjob\", \"Hans Wurst\", \"Tellerwaescher\", 10),\n \n ]\n\nfor (employerName, begin, end, remarks, reference, function, pensum) in formations:\n \n work = WorkingExperience()\n formation.setCandidate(accountManager.getCandidateAccount(username).getCandidateBO())\n formation.setBeginDate(begin)\n formation.setEndDate(end)\n formation.set(Graduation.getGraduation(graduation))\n formation.setRemarks(remarks)\n school = School.getSchool(schoolname)\n school.setType(Schooltype.getSchooltype(schooltype))\n school.commit()\n formation.setSchool(school)\n formation.commit()\n \n \n \n \n\n \n\n \n","sub_path":"pse/kg2k/src/jobmatch/scripts/workingExperience.py","file_name":"workingExperience.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"608464079","text":"import pandas as PD\nimport numpy as NP\n\nDF = PD.read_csv('primary-results.csv')\n\n#print(DF.head())\n#print(len(DF))\n#print(DF.groupby('candidate'))\n'''print(\n DF.groupby('candidate').aggregate(\n {\n 'votes': [min, NP.mean, max]\n }\n )\n)'''\n#print(DF[DF['votes'] == 590502])\n'''print(\n DF.groupby('candidate').aggregate(\n {\n 'fraction_votes': [min, NP.mean, max]\n }\n )\n)'''\n#print(DF[DF['fraction_votes'] == 1])\n'''print(\n DF[\n (DF['fraction_votes'] == 1) & (DF['candidate'] == 'Hillary Clinton')\n ]\n)\ndef fracao_votos_filtro(X):\n return X['votes'].sum() > 1000000\n\n#print(DF.groupby('state').filter(fracao_votos_filtro))\n#print(DF[DF['state_abbreviation'] == 'AL']['votes'].sum())\nprint(DF.groupby(['state_abbreviation', 'candidate'])['votes'].sum())\n'''\nprint('Candidatos\\n')\nfor i in range(len(DF['candidate'].unique())):\n print(DF['candidate'].unique()[i])\n\nprint('\\nPivot Table Votos\\n', PD.pivot_table(\n DF, index=['state', 'party', 'candidate'], values=['votes'],\n aggfunc={'votes': NP.sum}\n))\n\nDF['rank'] = DF.groupby(['county', 'party'])['votes'].rank(ascending=False)\nprint('\\nVotos por Distrito\\n', DF[DF['county'] == 'Los Angeles'])\n\nDF_GROUPBY = DF.groupby(['state', 'party', 'candidate']).sum()\ndel DF_GROUPBY['fips']\ndel DF_GROUPBY['fraction_votes']\nDF_GROUPBY.reset_index(inplace=True)\nprint('\\nAgrupado por Estado, Partido e Candidato\\n', DF_GROUPBY.head())\n\nDF_GROUPBY['rank'] = DF_GROUPBY.groupby(['state', 'party'])['votes'].rank(ascending=False)\nprint('\\nAgrupado pelo Rank de Votos\\n', DF_GROUPBY.head(10))\n\nprint(\n '\\nPivot Table Votos por Estado, partido e candidatos\\n',\n PD.pivot_table(\n DF_GROUPBY, index=['state', 'party', 'candidate'], values=['rank', 'votes']\n )\n)\n\nprint(\n '\\nRanking dos Candidatos\\n',\n DF_GROUPBY[DF_GROUPBY['rank'] == 1]['candidate'].value_counts()\n)","sub_path":"Curso Machine Learning/Trabalhando-PANDAS-2.py","file_name":"Trabalhando-PANDAS-2.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"67253262","text":"adjacencyList = {\n 'Z': [], \n 'Y': ['Z', 'R'],\n 'W': ['Y', 'S'],\n 'S': ['T'],\n 'T': ['W'],\n 'P': ['W', 'R'],\n 'R': ['X'],\n 'X': [],\n 'Q': ['X']\n}\n\nstack = []\nmark = {\n 'Z': False,\n 'Y': False,\n 'W': False,\n 'S': False,\n 'T': False,\n 'P': False,\n 'R': False,\n 'X': False,\n 'Q': False\n}\n\ndef searchS(originCity, destinationCity):\n stack.append(originCity)\n mark[originCity] = True\n\n while stack:\n adjacentCities = adjacencyList[stack[len(stack) - 1]]\n if not adjacentCities:\n temp = stack.pop()\n else:\n if stack[len(stack) - 1] == destinationCity:\n return True\n else:\n mark[adjacentCities[len(adjacentCities) - 1]] = True\n stack.append(adjacentCities[len(adjacentCities) - 1])\n adjacentCities.pop()\n # print(stack)\n \n return False\n\noriginCity, destinationCity = input().split()\nprint(searchS(originCity, destinationCity))","sub_path":"6Stack/flightMapStack1.py","file_name":"flightMapStack1.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"315481639","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2018 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\n\nimport numpy as np\nfrom pyannote.core import Segment\nfrom pyannote.audio.features.utils import RawAudio\nfrom pyannote.audio.features.utils import get_audio_duration\nfrom pyannote.generators.fragment import random_subsegment\nfrom pyannote.database import get_protocol\nfrom pyannote.database import FileFinder\nfrom .base import Augmentation\n\n\nclass AddNoise(Augmentation):\n \"\"\"Add noise\n\n Parameters\n ----------\n collection : str or list of str\n `pyannote.database` collection(s) used for adding noise. Defaults to\n 'MUSAN.Collection.BackgroundNoise' available in `pyannote.db.musan`\n package.\n db_yml : str, optional\n Path to `pyannote.database` configuration file.\n snr_min, snr_max : int, optional\n Defines Signal-to-Noise Ratio (SNR) range in dB. Defaults to [5, 20].\n \"\"\"\n\n def __init__(self, collection=None, db_yml=None, snr_min=5, snr_max=20):\n super().__init__()\n\n if collection is None:\n collection = 'MUSAN.Collection.BackgroundNoise'\n if not isinstance(collection, (list, tuple)):\n collection = [collection]\n self.collection = collection\n self.db_yml = db_yml\n\n self.snr_min = snr_min\n self.snr_max = snr_max\n\n # load noise database\n self.files_ = []\n preprocessors = {'audio': FileFinder(config_yml=db_yml),\n 'duration': get_audio_duration}\n for collection in self.collection:\n protocol = get_protocol(collection, preprocessors=preprocessors)\n self.files_.extend(protocol.files())\n\n def normalize(self, waveform):\n return waveform / (np.sqrt(np.mean(waveform ** 2)) + 1e-8)\n\n def __call__(self, original, sample_rate):\n \"\"\"Augment original waveform\n\n Parameters\n ----------\n original : `np.ndarray`\n (n_samples, n_channels) waveform.\n sample_rate : `int`\n Sample rate.\n\n Returns\n -------\n augmented : `np.ndarray`\n (n_samples, n_channels) noise-augmented waveform.\n \"\"\"\n\n raw_audio = RawAudio(sample_rate=sample_rate, mono=True)\n\n original_duration = len(original) / sample_rate\n\n # accumulate enough noise to cover duration of original waveform\n noises = []\n left = original_duration\n while left > 0:\n\n # select noise file at random\n file = np.random.choice(self.files_)\n duration = file['duration']\n\n # if noise file is longer than what is needed, crop it\n if duration > left:\n segment = next(random_subsegment(Segment(0, duration), left))\n noise = raw_audio.crop(file, segment,\n mode='center', fixed=left)\n left = 0\n\n # otherwise, take the whole file\n else:\n noise = raw_audio(file).data\n left -= duration\n\n noise = self.normalize(noise)\n noises.append(noise)\n\n # concatenate\n # FIXME: use fade-in between concatenated noises\n noise = np.vstack(noises)\n\n # select SNR at random\n snr = (self.snr_max - self.snr_min) * np.random.random_sample() + self.snr_min\n alpha = np.exp(-np.log(10) * snr / 20)\n\n return self.normalize(original) + alpha * noise\n","sub_path":"pyannote/audio/augmentation/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"64695520","text":"from copy import deepcopy\nfrom json import dumps\nfrom socket import AF_INET\nfrom typing import Callable, Tuple\n\n\nclass MockSocket:\n\n family = AF_INET\n\n def getsockname(self) -> Tuple[str, int]:\n return (\"162.1.1.1\", 80)\n\n def getpeername(self) -> Tuple[str, int]:\n return (\"127.0.0.1\", 80)\n\n\nclass EmptyFramework:\n def __init__(self, scope: dict) -> None:\n pass\n\n async def __call__(self, send: Callable, receive: Callable) -> None:\n pass\n\n\nclass EchoFramework:\n def __init__(self, scope: dict) -> None:\n self.scope = deepcopy(scope)\n self.scope[\"query_string\"] = self.scope[\"query_string\"].decode()\n self.scope[\"headers\"] = [\n (name.decode(), value.decode()) for name, value in self.scope[\"headers\"]\n ]\n\n async def __call__(self, receive: Callable, send: Callable) -> None:\n body = bytearray()\n while True:\n event = await receive()\n if event[\"type\"] in {\"http.disconnect\", \"websocket.disconnect\"}:\n break\n elif event[\"type\"] == \"http.request\":\n body.extend(event.get(\"body\", b\"\"))\n if not event.get(\"more_body\", False):\n await self._send_echo(send, body)\n break\n elif event[\"type\"] == \"websocket.connect\":\n await send({\"type\": \"websocket.accept\"})\n elif event[\"type\"] == \"websocket.receive\":\n await send(\n {\"type\": \"websocket.send\", \"text\": event[\"text\"], \"bytes\": event[\"bytes\"]}\n )\n\n async def _send_echo(self, send: Callable, request_body: bytes) -> None:\n response = dumps({\"scope\": self.scope, \"request_body\": request_body.decode()}).encode()\n content_length = len(response)\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 200,\n \"headers\": [(b\"content-length\", str(content_length).encode())],\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": response, \"more_body\": False})\n\n\nclass ChunkedResponseFramework:\n def __init__(self, scope: dict) -> None:\n self.scope = scope\n\n async def __call__(self, receive: Callable, send: Callable) -> None:\n while True:\n event = await receive()\n if event[\"type\"] == \"http.disconnect\":\n break\n elif event[\"type\"] == \"http.request\":\n if not event.get(\"more_body\", False):\n await self._send_chunked(send)\n break\n\n async def _send_chunked(self, send: Callable) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 200,\n \"headers\": [(b\"transfer-encoding\", b\"chunked\")],\n }\n )\n for chunk in [b\"chunked \", b\"data\"]:\n await send({\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True})\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n\nclass BadFramework:\n def __init__(self, scope: dict) -> None:\n self.scope = scope\n if self.scope[\"path\"] == \"/\":\n raise Exception()\n\n async def __call__(self, receive: Callable, send: Callable) -> None:\n if self.scope[\"path\"] == \"/no_response\":\n return\n elif self.scope[\"path\"] == \"/call\":\n raise Exception()\n elif self.scope[\"path\"] == \"/accept\":\n await send({\"type\": \"websocket.accept\"})\n raise Exception()\n\n\nclass PushFramework:\n def __init__(self, scope: dict) -> None:\n self.scope = scope\n\n async def __call__(self, receive: Callable, send: Callable) -> None:\n while True:\n event = await receive()\n if event[\"type\"] == \"http.disconnect\":\n break\n elif event[\"type\"] == \"http.request\" and not event.get(\"more_body\", False):\n await send({\"type\": \"http.response.start\", \"status\": 200, \"headers\": []})\n await send({\"type\": \"http.response.push\", \"path\": \"/\", \"headers\": []})\n await send({\"type\": \"http.response.body\", \"more_body\": False})\n break\n","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"199684442","text":"from unittest import TestCase\n\nfrom trycap.numeric import numeric as cap\n\n\nclass T(TestCase):\n def test(self):\n expected = False, True, True\n _0 = cap(self)\n _1 = cap(0)\n _2 = cap(0j)\n actual = _0, _1, _2\n self.assertEqual(expected, actual)\n","sub_path":"ztest/number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"287886809","text":"# coding:utf-8\nimport itchat,time\n# import全部消息类型\nfrom itchat.content import *\n# 登录-持续\nitchat.auto_login()\nprint(u\"logged\")\n\n@itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING, PICTURE,\n RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM])\ndef simple_reply(msg):\n print(msg)\nitchat.run()\n\nwhile itchat.check_login:\n print(\"登录状态正常\")\n time.sleep(10)\n\n\n\n\n # print(itchat.loginInfo)\n # mps = itchat.search_mps(name=u'QQ飞车手游')[0]\n # mps.send(u'签到')\n","sub_path":"it_chat_ever.py","file_name":"it_chat_ever.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449443118","text":"import os\nimport torch\nimport tensorflow as tf\nimport onnx\nfrom onnx2keras import onnx_to_keras\n\ndef pytorch2onnx(model, sample_data, target_path):\n\t# Export the model\n\tinput_x = sample_data.reshape(1, sample_data.shape[1]) \n\toutput = model(input_x)\n\ttorch.onnx.export(model, input_x, target_path, export_params=True, input_names=['main_input'], output_names=['main_output'])\n\tprint (\"Exported pytorch model to ONNX and saved it as {}\".format(target_path))\n\ndef onnx2keras(model, model_path, target_path):\n\tonnx_model = onnx.load(model_path)\n\tk_model = onnx_to_keras(onnx_model, ['main_input'])\n\tk_model.save(target_path)\n\tprint (\"Exported ONNX model to keras model and saved it as {}\".format(target_path))\n\ndef keras2tflite(model, model_path, target_path):\n # Load the tensorflow model\n model = tf.keras.models.load_model(model_path)\n # TFlite model\n # converter = tf.lite.TFLiteConverter.from_keras_model(model) # TF 2.x\n converter = tf.lite.TFLiteConverter.from_keras_model_file(model_path) # TF 1.x\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n tflite_model = converter.convert()\n # Save the TF Lite model.\n with tf.io.gfile.GFile(target_path, 'wb') as f:\n f.write(tflite_model)\n print (\"Exported keras model to tflite model and saved it as {}\".format(target_path))\n\n\ndef tflite2cpp(model, model_path, target_path):\n os.system('xxd -i '+model_path+' > '+target_path+'')\n print (\"Exported tflite model to c++ model and saved it as {}\".format(target_path))\n\ndef check_onnx_model(model_path):\n\tonnx_model = onnx.load(model_path)\n\tprint('The model is:\\n{}'.format(onnx_model))\n\t# Check the model\n\ttry:\n\t\tonnx.checker.check_model(onnx_model)\n\texcept onnx.checker.ValidationError as e:\n\t\tprint('The model is invalid: %s' % e)\n\telse:\n\t\tprint('The model is valid!')","sub_path":"mlutils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"134339007","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/3/30 21:04\n# @Author : Warren.wang\n# @File : send_rocketMq.py\n# @Software: PyCharm\n\nfrom rocketmq.client import Producer, Message\n\nproduct = Producer(\"PID-123\") # 随便填写\nproduct.set_namesrv_addr(\"ip:port\")\nproduct.start()\n\nmsg = Message(\"主题\")\nmsg.set_keys(\"keys\")\nmsg.set_tags(\"标签\")\nmsg.set_body(\"发送的数据\")\nret = product.send_sync(msg)\nprint(ret.status, ret.msg_id, ret.offset)\n\n","sub_path":"daily/send_rocketMq.py","file_name":"send_rocketMq.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"211787432","text":"import string\r\n\r\ntmp = string.digits + string.ascii_lowercase\r\ndef convert(num, base):\r\n q, r = divmod(num, base)\r\n if q == 0:\r\n return tmp[r]\r\n else:\r\n return convert(q, base) + tmp[r]\r\ndef solution(n):\r\n tetra = convert(n, 3)\r\n return int(tetra[::-1], 3)\r\n\r\nprint(solution(45))\r\nprint(solution(125))\r\n\r\n# 어떻게 하는지 몰라서 구글링함. 종종 복습할 것.","sub_path":"프로그래머스 문제풀이/Level1/3진법 뒤집기.py","file_name":"3진법 뒤집기.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"406866751","text":"import boto3\nimport click\nimport json\nimport logging\nfrom os.path import basename\n\nfrom arki_common.aws import check_response\nfrom arki_common.configs import (\n init_wrapper,\n default_config_file_path,\n)\n\nAPP_NAME = basename(__file__).split(\".\")[0]\n\n# Default configuration file location\nDEFAULT_CONFIG_FILE = default_config_file_path(f\"{APP_NAME}.toml\")\n\nDEFAULT_CONFIGS = {\n \"aws.profile\": {\"required\": False},\n \"aws.lambda.name\": {\"required\": True},\n \"aws.lambda.alias\": {\"required\": True},\n \"aws.statemachine.arn\": {\"required\": True},\n \"aws.stepfunctions.region\": {\"required\": True},\n \"state_machine_definition_file\": {\"required\": True},\n}\n\n\ndef _prepare_definition(state_machine_definition_file, lambda_name, lambda_alias):\n \"\"\"\n Load the state machine definition file and update with the corresponding lambda alias\n before uploading to AWS Step Functions, for deploying to different stage.\n \"\"\"\n with open(state_machine_definition_file, \"r\") as def_json_file:\n data = json.load(def_json_file)\n\n # Update the `Resource` field in each \"Task\" block in the definition file.\n for state, attrs in data[\"States\"].items():\n for k, v in attrs.items():\n if k == \"Resource\":\n ori_name = v.split(\":\")[-1]\n data[\"States\"][state][\"Resource\"] = v.replace(ori_name, f\"{lambda_name}:{lambda_alias}\")\n\n logging.info(json.dumps(data, indent=2))\n return json.dumps(data)\n\n\ndef _statemachine_deploy(settings):\n \"\"\"Update a state machine definition\n \"\"\"\n logging.info(f\"Update {settings['aws.statemachine.arn']}...\")\n\n def_data = _prepare_definition(\n settings[\"state_machine_definition_file\"],\n settings[\"aws.lambda.name\"],\n settings[\"aws.lambda.alias\"]\n )\n\n sfn_client = boto3.client(\"stepfunctions\", region_name=settings[\"aws.stepfunctions.region\"])\n\n resp = sfn_client._statemachine_deploy(\n stateMachineArn=settings[\"aws.statemachine.arn\"],\n definition=def_data\n )\n return check_response(resp)\n\n\n@init_wrapper\ndef process(*args, **kwargs):\n logging.debug(\"At process\")\n logging.debug(kwargs)\n\n try:\n settings = kwargs.get(\"_arki_settings\")\n _statemachine_deploy(settings=settings)\n except Exception as e:\n logging.error(e)\n return 1\n\n return 0\n\n\n@click.command()\n@click.argument(\"config_file\", required=False, default=DEFAULT_CONFIG_FILE)\n@click.option(\"--config_section\", \"-s\", required=False, default=APP_NAME, help=f\"E.g. {APP_NAME}.staging\")\ndef lambda_deploy(config_file, config_section):\n\n process(\n app_name=APP_NAME,\n config_file=config_file,\n default_configs=DEFAULT_CONFIGS,\n config_section=config_section,\n )\n","sub_path":"StepFunctions/stepfunction_deploy.py","file_name":"stepfunction_deploy.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408051824","text":"## Team members (Team 13):\n## Yuansan Liu, 1037351\n## Karun Varghese Mathew, 1007247\n## Junlin Chen, 1065399\n## Jingyi Shao, 1049816\n## `Han Jiang, 1066425\n\nfrom util import *\n\n\napi, auth = init_api('mls')\n\nclass TweetStreamListener(tweepy.StreamListener):\n def __init__(self, api=None):\n super().__init__(api=api)\n self.db = get_db_client('junlin_id_fixed')\n\n def createDoc(self, data):\n if str(data.id) in self.db: ## check duplicate\n return\n\n place = getattr(data, 'place')\n\n is_truncated = data.truncated\n text = data.text\n if is_truncated:\n text = data.extended_tweet['full_text']\n doc = {\n '_id': str(data.id_str),\n 'post_at': data.created_at.timestamp(),\n 'text': text,\n 'json': data._json,\n 'author': int(data.author.id_str),\n 'place_name': place.name if place is not None else None,\n 'place_full_name': place.full_name if place is not None else None,\n 'place_type': place.place_type if place is not None else None\n }\n self.db.create_document(doc)\n\n def on_status(self, status):\n self.createDoc(status)\n\n def on_error(self, status_code):\n print('Error::', status_code)\n return True\n\n def on_timeout(self):\n return True\n\n## get new stream tweets\ntwitter_stream = tweepy.streaming.Stream(auth,TweetStreamListener())\n\naustralia_bound_box = get_location('australia')\naustralia_bound_box = australia_bound_box[0][::-1] + australia_bound_box[1][::-1]\ntwitter_stream.filter(locations=australia_bound_box)","sub_path":"crawler/StreamToCouchDB.py","file_name":"StreamToCouchDB.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79234811","text":"#!/usr/bin/env python\n\"\"\"Test behavior of counting module\"\"\"\n\n__author__ = \"Russell Horton\"\n__copyright__ = \"Copyright 2018, All Rights Reserved\"\n__license__ = \"For Toptal candidacy evaluation only. No license is granted.\"\n__version__ = \"0.1\"\n__maintainer__ = \"Russell Horton\"\n__email__ = \"russ@bagaduce.com\"\n\nimport unittest\nfrom carroperro import counting\n\n\nclass UtilTest(unittest.TestCase):\n\n def test_reverse_counts(self):\n\n counts = {\n 'prius': {'lab': 1, 'mutt': 3},\n 'yaris': {'lab': 4, 'pittie': 2},\n 'golf': {'golden': 4, 'pittie': 1},\n }\n reversed_counts = {\n 'lab': {'prius': 1, 'yaris': 4},\n 'mutt': {'prius': 3},\n 'pittie': {'yaris': 2, 'golf': 1},\n 'golden': {'golf': 4},\n }\n\n self.assertEqual(reversed_counts, counting.reverse_counts(counts))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"carroperro/tests/counting_test.py","file_name":"counting_test.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"338888595","text":"##############################################################################\n#\n# Copyright (c) 2003-2016 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Open Software License version 3.0\n# http://www.opensource.org/licenses/osl-3.0.php\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\nfrom templates.jessie_mpi_options import *\ncc_optim = '-O3 -march=native'\nverbose = True\nparmetis = True\numfpack = True\nsilo = True\ntrilinos=True\ntrilinos_prefix=\"/opt/trilinos_hybrid\"\nwerror=False\n\n\nparmetis_prefix='/usr/local'\n#longindices=True\n","sub_path":"scons/ferret_options.py","file_name":"ferret_options.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"273714824","text":"# coding=utf-8\n\"\"\"Utilities for working with D-Bus.\"\"\"\nimport signal\nfrom collections import namedtuple\nfrom typing import Iterable\n\nimport gi\ngi.require_version('Gtk', '3.0') # pylint:disable=wrong-import-position\nfrom gi.repository import GLib, Gio\n\nEvent = namedtuple('Event', ('none', 'new', 'removed'))\n\nEVENT = Event(' ', '+', '-')\n\nUnit = namedtuple('Unit', (\n 'name',\n 'description',\n 'load_state',\n 'active_state',\n 'sub_state',\n 'following',\n 'object_path',\n 'queued_job',\n 'job_type',\n 'job_path',\n))\n\nUnitNew = namedtuple('UnitNew', ('id', 'unit'))\n\nUnitRemoved = namedtuple('UnitRemoved', ('id', 'unit'))\n\n\n# Ignore too-few-methods because this is a small toy application, and it\n# doesn't need to be fully fleshed out.\nclass Main: # pylint:disable=too-few-public-methods\n \"\"\"Monitor unit appearances and disappearances.\"\"\"\n\n def __init__(self):\n \"\"\"Subscribe to signals, catch SIGINT, and run the GLib main loop.\"\"\"\n connection: Gio.DBusConnection = Gio.bus_get_sync(\n bus_type=Gio.BusType.SYSTEM,\n cancellable=None,\n )\n\n # NOTE: If the goal of this application is to actually *track* extant\n # units, then this should be executed after the signals below are\n # enabled. Simply moving this block of code down won't do the trick, as\n # signals are processed by main loops. It might be fruitful to\n # investigate creating multiple main loops within multiple threads.\n for unit in get_units(connection):\n print_unit_name(unit.name, EVENT.none)\n\n connection.signal_subscribe(\n sender='org.freedesktop.systemd1',\n interface_name='org.freedesktop.systemd1.Manager',\n member='UnitRemoved',\n object_path='/org/freedesktop/systemd1',\n arg0=None,\n flags=Gio.DBusSignalFlags.NONE,\n callback=handle_unit_removed,\n user_data=None,\n )\n connection.signal_subscribe(\n sender='org.freedesktop.systemd1',\n interface_name='org.freedesktop.systemd1.Manager',\n member='UnitNew',\n object_path='/org/freedesktop/systemd1',\n arg0=None,\n flags=Gio.DBusSignalFlags.NONE,\n callback=handle_unit_new,\n user_data=None,\n )\n self.main_loop = GLib.MainLoop()\n signal.signal(signal.SIGINT, self.handle_sigint)\n self.main_loop.run()\n\n def handle_sigint(self, signum, frame): # pylint:disable=unused-argument\n \"\"\"Quit the GLib main loop.\n\n SIGINT is emitted when a user presses ctrl+c.\n \"\"\"\n self.main_loop.quit()\n\n\ndef get_units(connection: Gio.DBusConnection) -> Iterable[Unit]:\n \"\"\"Get all extant units.\"\"\"\n response = connection.call_sync(\n bus_name='org.freedesktop.systemd1',\n object_path='/org/freedesktop/systemd1',\n interface_name='org.freedesktop.systemd1.Manager',\n method_name='ListUnits',\n parameters=None,\n reply_type=GLib.VariantType.new('(a(ssssssouso))'),\n flags=Gio.DBusCallFlags.NONE,\n timeout_msec=-1,\n cancellable=None,\n )\n return tuple(Unit(*unit_props) for unit_props in response.unpack()[0])\n\n\n# Ignore argument issues because the PyGObject API defines what's available.\ndef handle_unit_new( # pylint:disable=too-many-arguments,unused-argument\n connection,\n sender_name,\n object_path,\n interface_name,\n signal_name,\n parameters,\n *user_data) -> None:\n \"\"\"Handle the ``UnitNew`` signal.\"\"\"\n unit_new = UnitNew(*parameters.unpack())\n print_unit_name(unit_new.id, EVENT.new)\n\n\n# Ignore argument issues because the PyGObject API defines what's available.\ndef handle_unit_removed( # pylint:disable=too-many-arguments,unused-argument\n connection,\n sender_name,\n object_path,\n interface_name,\n signal_name,\n parameters,\n *user_data) -> None:\n \"\"\"Handle the ``UnitRemoved`` signal.\"\"\"\n unit_removed = UnitRemoved(*parameters.unpack())\n print_unit_name(unit_removed.id, EVENT.removed)\n\n\ndef print_unit_name(unit_name: str, prefix: str = EVENT.none) -> None:\n \"\"\"Print the given unit's name to stdout.\"\"\"\n print(prefix, unit_name)\n\n\nif __name__ == '__main__':\n Main()\n","sub_path":"python/unit-tracker/unit_tracker/dbus.py","file_name":"dbus.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"50541216","text":"\"\"\"Exercício Python 055:\nFaça um programa que leia o peso de cinco pessoas.\nNo final, mostre qual foi o maior e o menor peso lidos.\"\"\"\nmaior = 0\nmenor = 99999999\nfor pess in range(1, 6):\n peso = float(input(f'Digite o {pess}º peso: '))\n if peso > maior:\n maior = peso\n else:\n menor = peso\nprint(f'O maior peso foi o {maior} e o menor peso foi o {menor}')\n","sub_path":"Mundo 2-EstruturasDeControle/Anotações das Aulas e Desafios/ex055.py","file_name":"ex055.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"182720983","text":"import argparse\nimport csv\n\n# Constants\nDICT = 'dict.txt'\n\ndef file_to_dict(tsv):\n name_dict = {}\n\n with open(tsv, 'rb') as tsvin:\n tsvin = csv.reader(tsvin, delimiter='\\t', dialect=csv.excel_tab)\n for row in tsvin:\n name_dict.update(dict([tuple(row[0].split())]))\n\n return name_dict\n\n\ndef find_replace(vcf, name_dict, vcf_out):\n with open(vcf, 'rb') as vcf:\n vcf_str = vcf.read()\n for key in name_dict:\n vcf_str = vcf_str.replace(key, name_dict[key])\n\n with open(vcf_out, 'wb') as vcf_out:\n vcf_out.write(vcf_str)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Calls HWEfilter with the following arguments')\n parser.add_argument('-DICT', const=DICT, default=DICT, nargs='?', help='Name of the dict file. This always has to be a tab delimited file. First column is barcode/run name ie Xpress_00X. Second column is the sample name ie ARG109_5')\n parser.add_argument('-VCF', type=str, help='Vcf file to filter through')\n\n\n args = parser.parse_args()\n\n vcf_file = str(args.VCF)\n\n name_dict = file_to_dict(args.DICT)\n rep = find_replace(args.VCF, name_dict, vcf_file)\n\n\n\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"566975991","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 23:45:51 2017\n\n@author: alexkreamas\n\"\"\"\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.post_list, name = 'post_list'),\n url(r'^post/(?P\\d+)/$', views.post_detail, name = 'post_detail'),\n url(r'^post/new/$', views.post_new, name = 'post_new'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name = 'post_edit'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"93305462","text":"# 给定一个二叉搜索树, 找到该树中两个指定节点的最近公共祖先。\nfrom collections import deque\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n parents = {root: None}\n queue = deque([root])\n while queue:\n temp = queue.popleft()\n if temp.left:\n parents[temp.left] = temp\n queue.append(temp.left)\n if temp.right:\n parents[temp.right] = temp\n queue.append(temp.right)\n s = set()\n while p:\n s.add(p)\n p = parents[p]\n while q:\n if q in s:\n return q\n q = parents[q]\n\n\n# 方法2 利用二叉搜索树的性质:左子树节点值比根节点值小,右子树节点值比根节点值大,时间复杂度O(n),空间复杂度O(1)\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n cur = root\n while cur:\n if p.val < cur.val and q.val < cur.val:\n cur = cur.left\n elif p.val > cur.val and q.val > cur.val:\n cur = cur.right\n else:\n return cur\n\n\n# 方法3,和上面思想一样,时间复杂度O(n),空间复杂度O(n)\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n if p.val < root.val and q.val < root.val:\n return self.lowestCommonAncestor(root.left, p, q)\n if p.val > root.val and q.val > root.val:\n return self.lowestCommonAncestor(root.right, p, q)\n return root","sub_path":"leetcode/sword_to_offer68-1.py","file_name":"sword_to_offer68-1.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"601681878","text":"import sys\nimport os\n\nimport seqmap\nimport bowtie\nimport dictionaries\nimport features\n\ndef main() :\n \n alignment_file = sys.argv[1]\n\n test_map(alignment_file)\n\n\n\n# methods for testing the mapping object\n\ndef test_map(alignment_file) :\n\n refseq = dictionaries.RefseqInfo()\n \n print(\"Mapping with unfettered measurements...\")\n raw_stats = get_raw_actb_stats(refseq, alignment_file)\n\n print(\"Mapping with map_single_alignment...\")\n map_stats = get_map_actb_stats(refseq, alignment_file, raw_stats['seen'])\n\n print(\"Mapping with featureset...\")\n fset_stats = get_featureset_stats(refseq, alignment_file, {})\n\n names = ('ntotal','ntx','nintrons','nexons','ncds','nfutr','ntutr')\n for n in names :\n print(\"%s\\t%d\\t%d\\t%d\" % (n, raw_stats[n], map_stats[n], fset_stats[n]))\n \n exontab = raw_stats['exontab']\n for i in range(0,len(exontab)) :\n print(\"Exon %d\\t%d\\t%d\\t%d\" % (i,raw_stats['exontab'][i],map_stats['exontab'][i], fset_stats['exontab'][i]))\n\ndef get_raw_actb_stats(refseq, alignment_file) :\n \n actb = refseq.lookup(\"NM_007393\")\n exons = actb.exons()\n cds = (actb.get('cdsStart'), actb.get('cdsEnd'))\n tx = (exons[0][0],exons[-1][1])\n print(\"%i to %i\" % tx)\n \n nexons = 0\n nintrons = 0\n nfutr = 0\n ntutr = 0\n ncds = 0\n ntx = 0\n ntotal = 0\n seen = set()\n\n exontab = [0]*len(exons)\n \n fh = open(alignment_file, \"r\")\n\n for line in fh :\n \n ntotal += 1\n row = line.strip().split(\"\\t\")\n\n startpos = int(row[3])\n endpos = startpos + len(row[4])\n\n if (row[2]=='chr5') & (startpos >= tx[0]) & (endpos <= tx[1]) :\n ntx += 1\n seen.add(row[4])\n\n inexon = False\n i = 0\n for e in exons :\n if (endpos > e[0]) & (endpos <= e[1]) :\n inexon = True\n exontab[i] += 1\n nexons += 1\n\n i += 1\n \n if inexon :\n if endpos <= cds[0] :\n ntutr += 1\n elif endpos > cds[1] :\n nfutr += 1\n else :\n ncds += 1\n else :\n nintrons += 1\n \n exontab.reverse()\n return {'nexons' : nexons, 'nintrons' : nintrons, 'nfutr' : nfutr, 'seen' : seen,\n 'ntutr' : ntutr, 'ncds' : ncds, 'ntx' : ntx, 'ntotal' : ntotal, 'exontab' : exontab}\n \n \ndef get_map_actb_stats(refseq, alignment_file, seen) :\n \n alignment = bowtie.BowtieGenomeAlignment(alignment_file)\n m = seqmap.Mapper(refseq)\n actb = refseq.lookup(\"NM_007393\")\n cds = actb.cds_tx()\n exons = actb.exons()\n print(\"%i to %i\" % cds)\n \n nexons = 0\n nintrons = 0\n nfutr = 0\n ntutr = 0\n ncds = 0\n ntx = 0\n ntotal = 0\n \n exontab = [0]*len(exons)\n\n for r in alignment.alignments() :\n ntotal += 1\n features = m.map_single_alignment(r,r[0])\n if (r[4] in seen) & (len(features)<1):\n print(\"%s %d\" % (r[4],r[3]))\n\n for f in features :\n if f['feature'].name() == 'NM_007393' :\n ntx += 1\n \n # 'readpos' is the tx position\n pos = f['readpos']\n segment = f['feature'].in_segment('genome',pos)\n if segment > 0 :\n nexons += 1\n exontab[segment-1] += 1\n \n if pos <= cds[0] :\n nfutr += 1\n elif pos > cds[1] :\n ntutr += 1\n else :\n ncds += 1\n\n elif segment < 0 :\n nintrons += 1\n #else :\n # if pos :\n # print(\"%s %d %d %d\" % (r[4],pos, r[3], len(r[4])))\n return {'nexons' : nexons, 'nintrons' : nintrons, 'nfutr' : nfutr,\n 'ntutr' : ntutr, 'ncds' : ncds, 'ntx' : ntx, 'ntotal' : ntotal, 'exontab' : exontab}\n\ndef get_featureset_stats(refseq, alignment_file, seen) :\n \n alignment = bowtie.BowtieGenomeAlignment(alignment_file)\n m = seqmap.Mapper(refseq)\n fset = features.FeatureSet(refseq)\n m.map_alignment(alignment, output_set=fset)\n n = fset.num_items()\n print(\"Num items: \" + str(n))\n actb = fset.lookup(\"NM_007393\")\n ec = actb.exon_counts()\n\n for e in actb.exon_tx_coordinates() :\n print(\"%d - %d\" % e)\n\n print(\"read offset: \" + str(actb._read_offset))\n \n return {'nexons' : actb.num_in_exons(),\n 'nintrons' : actb.num_in_introns(),\n 'nfutr' : actb.num_in_5utr(),\n 'ntutr' : actb.num_in_3utr(),\n 'ncds' : actb.num_in_cds(),\n 'ntx' : actb.raw_count(),\n 'ntotal' : actb.raw_count(),\n 'exontab' : ec}\n \n\nif __name__ == '__main__' :\n main()\n","sub_path":"pytest.py","file_name":"pytest.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"590230595","text":"# Number Names\n# Show how to spell out a number in English. \n# You can use a preexisting implementation or roll your own, \n# but you should support inputs up to at least one million \n# (or the maximum value of your language's default bounded integer type, if that's less). \n# Optional: Support for inputs other than positive integers \n# (like zero, negative integers, and floating-point numbers).\n\ndef number_to_english(num):\n if num == '0':\n return \"Zero\"\n if num == '1':\n return \"One\"\n if num == '2':\n return \"Two\"\n if num == '3':\n return \"Three\"\n if num == '4':\n return \"Four\"\n if num == '5':\n return \"Five\"\n if num == '6':\n return \"Six\"\n if num == '7':\n return \"Seven\"\n if num == '8':\n return \"Eight\"\n if num == '9':\n return \"Nine\"\n\ndef spell_number(n):\n # Create a spell_number list use the list comprehension\n spell_number_list = [number_to_english(x) for x in n]\n \n # Spell out number names less than ten million\n def one_digit_number(n):\n return n\n def two_digit_number(t, n, List):\n if List[0] == 'One' and t == len(List) - 1:\n if List[1] == 'Zero':\n print(\"Ten\")\n elif List[1] == 'One':\n print(\"Eleven\")\n elif List[1] == 'Two':\n print(\"Twelve\")\n elif List[1] == 'Three':\n print(\"Thirteen\")\n elif List[1] == 'Five':\n print(\"Fifteen\")\n elif List[1] == 'Eight':\n print(\"Eighteen\")\n else:\n print(n+'teen')\n elif List[0] == 'Two' and t == len(List) - 1: \n if List[1] == 'Zero':\n print(\"Twenty\")\n else:\n print(\"Twenty-\"+n)\n elif List[0] == 'Three' and t == len(List) - 1: \n if List[1] == 'Zero':\n print(\"Thirty\")\n else:\n print(\"Thirty-\"+n)\n elif List[0] == 'Five' and t == len(List) - 1: \n if List[1] == 'Zero':\n print(\"Fifty\")\n else:\n print(\"Fifty-\"+n)\n elif List[0] == 'Eight' and t == len(List) - 1: \n if List[1] == 'Zero':\n print(\"Eighty\")\n else:\n print(\"Eighty-\"+n)\n elif t == len(List) - 1 and List[0] != 'Zero':\n if List[1] == 'Zero':\n print(List[0]+\"ty\")\n else:\n print(List[0]+\"ty\"+\"-\"+n)\n def three_digit_number(t, n, List):\n if t == len(List) - 1 and List[1] == \"Zero\" and List[-1] == \"Zero\":\n print(List[0]+\"-hundred\")\n elif t == len(List) - 1 and List[1] == \"Zero\":\n print(List[0]+\"-hundred\"+\" and \" + one_digit_number(List[-1]))\n elif t == len(List) - 1:\n print(List[0]+\"-hundred\"+\" and \")\n two_digit_number(t-1, n, List[1:])\n def four_digit_number(t, n, List):\n if t == len(List) - 1 and List[1] == \"Zero\" and List[2] == \"Zero\" and List[-1] == \"Zero\":\n print(List[0]+\"-thousand\")\n elif t == len(List) - 1 and List[1] == \"Zero\":\n if List[2] == \"Zero\":\n print(List[0]+\"-thousand\"+\" and \" + one_digit_number(List[-1]))\n else:\n print(List[0]+\"-thousand\"+\" and \")\n two_digit_number(t-2, n, List[2:])\n elif t == len(List) - 1:\n print(List[0]+\"-thousand\"+\" and \")\n three_digit_number(t-1, n, List[1:])\n def five_digit_number(t, n, List):\n List1 = List[:2]\n List2 = List[2:]\n if t == len(List) - 1:\n two_digit_number(t-3, List1[-1], List1)\n print(\"-thousand\")\n if List2[0] == 'Zero' and List2[1] == 'Zero':\n print(\"and \" + one_digit_number(List2[-1]))\n elif List2[0] == 'Zero':\n print(\"and \")\n two_digit_number(t-3, List2[-1], List2[1:])\n else:\n print(\"and \")\n three_digit_number(t-2, n, List2)\n def six_digit_number(t, n, List):\n List1 = List[:3]\n List2 = List[3:]\n if t == len(List) - 1:\n three_digit_number(t-3, List1[-1], List1)\n print(\"-thousand\")\n if List2[0] == 'Zero' and List2[1] == 'Zero':\n if List2[-1] == 'Zero':\n print(\"\")\n else:\n print(\"and \" + one_digit_number(List2[-1]))\n elif List2[0] == 'Zero':\n print(\"and \")\n two_digit_number(t-4, List2[-1], List2[1:])\n else:\n print(\"and \")\n three_digit_number(t-3, n, List2)\n def seven_digit_number(t, n, List):\n List1 = List[:1]\n List2 = List[1:]\n if t == len(List) - 1:\n print(one_digit_number(List1[0])+\"-million\")\n if List2[0] == 'Zero' and List2[1] == 'Zero' and List2[2] == 'Zero' and List2[3] == 'Zero' and List2[4] == 'Zero' and List2[5] == 'Zero':\n print(\"\")\n elif List2[0] == 'Zero' and List2[1] == 'Zero' and List2[2] == 'Zero' and List2[3] == 'Zero' and List2[4] == 'Zero':\n print(\"and \" + one_digit_number(List2[-1]))\n elif List2[0] == 'Zero' and List2[1] == 'Zero' and List2[2] == 'Zero' and List2[3] == 'Zero':\n print(\"and \")\n two_digit_number(t-5, n, List2[4:])\n elif List2[0] == 'Zero' and List2[1] == 'Zero' and List2[2] == 'Zero':\n print(\"and \")\n three_digit_number(t-4, n, List2[3:])\n elif List2[0] == 'Zero' and List2[1] == 'Zero':\n print(\"and \")\n four_digit_number(t-3, n, List2[2:])\n elif List2[0] == 'Zero':\n print(\"and \")\n five_digit_number(t-2, n, List2[1:])\n else:\n print(\"and \")\n six_digit_number(t-1, n, List2) \n \n # Spell out number names more than ten million \n def eight_digit_number(t, n, List):\n pass \n def nine_digit_number(t, n, List):\n pass \n def ten_digit_number(t, n, List):\n pass \n def eleven_digit_number(t, n, List):\n pass\n \n for k, i in enumerate(spell_number_list):\n if len(spell_number_list) == 1:\n print(one_digit_number(i))\n if len(spell_number_list) == 2:\n two_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 3:\n three_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 4:\n four_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 5:\n five_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 6:\n six_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 7:\n seven_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 8:\n eight_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 9:\n nine_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 10:\n ten_digit_number(k, i, spell_number_list)\n if len(spell_number_list) == 11:\n eleven_digit_number(k, i, spell_number_list)\n\nif __name__ == '__main__':\n # n less than ten million\n print(\"\\nTest Result: \\n\")\n n = '9999999'\n spell_number(n)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Final_Projects/name_numbers_test.py","file_name":"name_numbers_test.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"311244359","text":"from django.utils import translation\nfrom django.shortcuts import redirect\n\n\n\ndef set_lang(request, lang=None):\n # lang = request.POST['lang']\n translation.activate(lang)\n request.session[translation.LANGUAGE_SESSION_KEY] = lang\n # return redirect(request.META['HTTP_REFERER'])\n url = request.META['HTTP_REFERER'].split('/')\n url[3] = lang\n url = '/'.join(url)\n print(lang)\n return redirect(url)\n\n\ndef get_sk(request):\n sk = request.session.session_key\n if not sk: \n request.session.cycle_key()\n return sk \n\n\ndef get_user(request):\n if request.user.is_anonymous:\n return None\n return request.user\n\n\ndef get_line():\n import inspect \n caller = inspect.getframeinfo(inspect.stack()[1][0])\n print(caller)\n # print('filename:', inspect.getframeinfo(inspect.currentframe()).filename)\n # print('line:', inspect.getframeinfo(inspect.currentframe()).lineno)\n","sub_path":"box/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"44435555","text":"from __future__ import print_function\r\nimport os\r\nimport sys\r\n\r\nclass Node(object):\r\n def __init__(self, data):\r\n self.left = None\r\n self.right = None\r\n self.data = data\r\n\r\ndef convert(root):\r\n if not root:\r\n return\r\n convert(root.left)\r\n convert(root.right)\r\n\r\n if not root.left:\r\n root.left = root.right\r\n else:\r\n root.left.right = root.right\r\n root.right = None\r\n\r\ndef printtree(root):\r\n if root:\r\n print(root.data, end=' ')\r\n printtree(root.right)\r\n printtree(root.left)\r\n\r\nroot = Node(1)\r\nroot.left = Node(2)\r\nroot.right = Node(3)\r\nroot.right.left = Node(4)\r\nroot.right.right = Node(5)\r\nroot.right.left.left = Node(6)\r\nroot.right.right.left = Node(7)\r\nroot.right.right.right = Node(8)\r\nprinttree(root)\r\nprint()\r\nconvert(root)\r\nprinttree(root)\r\nprint()","sub_path":"geeks/tree/convert-left-right-representation-bianry-tree-right.py","file_name":"convert-left-right-representation-bianry-tree-right.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"440063666","text":"import torch\r\nfrom torch import nn\r\n\r\n\r\nclass ConvCritic(nn.Module):\r\n def __init__(self, latent_size, input_channel=1):\r\n super().__init__()\r\n\r\n self.input_channel = input_channel\r\n self.DIM = 64\r\n self.main = nn.Sequential(\r\n nn.Conv2d(self.input_channel, self.DIM, 5, stride=2, padding=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(self.DIM, 2 * self.DIM, 5, stride=2, padding=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(2 * self.DIM, 4 * self.DIM, 5, stride=2, padding=2),\r\n nn.ReLU(True),\r\n )\r\n\r\n embed_size = 64\r\n\r\n self.z_fc = nn.Sequential(\r\n nn.Linear(latent_size, embed_size),\r\n nn.LayerNorm(embed_size),\r\n nn.LeakyReLU(0.2),\r\n nn.Linear(embed_size, embed_size),\r\n )\r\n\r\n self.x_fc = nn.Linear(4 * 4 * 4 * self.DIM, embed_size)\r\n\r\n self.xz_fc = nn.Sequential(\r\n nn.Linear(embed_size * 2, embed_size),\r\n nn.LayerNorm(embed_size),\r\n nn.LeakyReLU(0.2),\r\n nn.Linear(embed_size, 1),\r\n )\r\n\r\n def forward(self, input):\r\n x, z = input\r\n x = x.view(-1, self.input_channel, 32, 32)\r\n x = self.main(x)\r\n x = x.view(x.shape[0], -1)\r\n x = self.x_fc(x)\r\n z = self.z_fc(z)\r\n xz = torch.cat((x, z), 1)\r\n xz = self.xz_fc(xz)\r\n return xz.view(-1)\r\n\r\n","sub_path":"partial-encoder-decoder/nets_predict/Beijing_critic.py","file_name":"Beijing_critic.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"570529961","text":"import sys\nimport csv\nimport xml.etree.ElementTree as ET\nimport xml.dom.minidom\n\nif len(sys.argv) != 3:\n print(\"Usage: ToXML.py CSVfile, XMLfile\")\n sys.exit()\n\nChar = ET.parse(sys.argv[2])\nroot = Char.getroot()\n\nwith open(sys.argv[1], 'r', newline='') as csvfile:\n movereader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for row in movereader:\n move = root.find(\".//moves/move[id='\" + row[0] + \"']\")\n if(move != None):\n move.find(\".//command\").text = row[2]\n #print(row[2])\n \n \nfilename = sys.argv[2]\n#copyfile(filename, os.path.join(self.directory, \"xml backups\", self.CharName + \"-\" + self.CharXml.getroot().attrib['version'] + \".xml\"))\n#self.CharXml.getroot().attrib['version'] = str(int(time.time()))\nf = open(filename, \"w\")\n# print(ET.tostring(Char))\nxmlFile = xml.dom.minidom.parseString(ET.tostring(Char.getroot(), 'utf-8'))\nf.write(xmlFile.toprettyxml(indent=\"\", newl=\"\"))\nf.close() ","sub_path":"TekkenData/Movelists/ToXML.py","file_name":"ToXML.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"428768158","text":"print(\"**********welcome to veggie store**********\")\ncart = {}\ndef menu():\n mainmenu()\ndef mainmenu():\n print(\"vegetables:price per kg\")\n avail_veg = {\"carrot\":50,\"onion\":30,\"brinjal\":30,\"tomato\":60,\"\":\"\",}\n print(avail_veg)\n choose =input(\"select vegetables you want to buy\")\n if choose == \"carrot\" :\n carrot()\n elif choose == \"onion\":\n onion()\n elif choose == \"tomato\":\n tomato()\n elif choose == \"brinjal\":\n brinjal()\n else:\n print (\"invalid choice\")\n mainmenu()\n\ndef carrot():\n print(\"price per kg = 50/-\")\n quantity = int(input(\"select the kg's you need: \"))\n tot_price = 50 * quantity\n print(\"the total price of carrots is -----\", tot_price)\n select = input(\"confirm your choice?y/n: \")\n if select == \"Y\" or select ==\"y\":\n name = \"carrot\"\n cart[name]= tot_price\n print(\"successfully added to cart\")\n print(\"do u like to shop more?Y/N\")\n ca = input()\n if ca == \"y\" or ca == \"Y\":\n mainmenu()\n elif ca == \"N\" or ca == \"n\":\n cart_item()\n else:\n print(\"inavalid choice\")\n elif select == \"n\" or select ==\"N\":\n mainmenu()\n\n\ndef cart_item():\n print(cart)\n val = cart.values()\n tot_val = sum(val)\n print(\"total value of the cart\", tot_val)\n print(\"do you want to continue shopping y/n:\")\n choice_3 = input()\n if choice_3 is \"Y\" or choice_3 is \"y\":\n mainmenu()\n elif choice_3 is \"N\" or choice_3 is \"n\":\n print(\"thankyou for shopping / bye...\")\n else:\n print(\"invalid input\")\n cart_item()\n\ndef onion():\n print(\"price per kg = 30/-\")\n quantity = int(input(\"select the kg's you need: \"))\n tot_price = 30 * quantity\n print(\"the total price of carrots is -----\", tot_price)\n select = input(\"confirm your choice?y/n: \")\n if select == \"Y\" or select ==\"y\":\n name = \"onion\"\n cart[name]= tot_price\n print(\"successfully added to cart\")\n print(\"do u like to shop more?Y/N\")\n on = input()\n if on == \"y\" or on == \"Y\":\n mainmenu()\n elif on == \"N\" or on == \"n\":\n cart_item()\n else:\n print(\"inavalid choice\")\n\n elif select == \"n\" or select ==\"N\":\n mainmenu()\n\ndef tomato():\n print(\"price per kg = 30/-\")\n quantity = int(input(\"select the kg's you need: \"))\n tot_price = 30 * quantity\n print(\"the total price of tomato is -----\", tot_price)\n select = input(\"confirm your choice?y/n: \")\n if select == \"Y\" or select ==\"y\":\n name = \"tomato\"\n cart[name]= tot_price\n print(\"successfully added to cart\")\n print(\"do u like to shop more?Y/N\")\n to = input()\n if to == \"y\" or to == \"Y\":\n mainmenu()\n elif to == \"N\" or to == \"n\":\n cart_item\n else:\n print(\"inavalid choice\")\n elif select == \"n\" or select ==\"N\":\n mainmenu()\n\ndef brinjal():\n print(\"price per kg = 40/-\")\n quantity = int(input(\"select the kg's you need: \"))\n tot_price = 40 * quantity\n print(\"the total price of brinjal is -----\", tot_price)\n select = input(\"confirm your choice?y/n: \")\n if select == \"Y\" or select ==\"y\":\n name = \"brinjal\"\n cart[name]= tot_price\n print(\"successfully added to cart\")\n print(\"do u like to shop more?Y/N\")\n br = input()\n if br == \"y\" or br == \"Y\":\n mainmenu()\n elif br == \"N\" or br == \"n\":\n cart_item()\n else:\n print(\"inavalid choice\")\n elif select == \"n\" or select ==\"N\":\n mainmenu()\n\nmenu()","sub_path":"veggiestore.py","file_name":"veggiestore.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"268837103","text":"import numpy as np\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.layers import *\nfrom tensorflow.compat.v1.keras.layers import CuDNNGRU\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.optimizers import Adam\n\n\nbase = np.load('/content/drive/My Drive/cullpdb+profile_6133_filtered.npy')\nbase = np.reshape(base, (-1, 700, 57))\na = np.arange(0,21)\nb = np.arange(35,56)\nc = np.hstack((a,b))\n\nprevisores = base[:, :, c]\nclasses = base[:, :, 22:30]\n\nX_train = previsores[:5278,:,:]\nX_val = previsores[5278:,:,:]\n\ny_train = classes[:5278,:,:]\ny_val = classes[5278:,:,:]\n\nbase = np.load('/content/drive/My Drive/cb513+profile_split1.npy')\nbase = np.reshape(base, (-1, 700, 57))\na = np.arange(0,21)\nb = np.arange(35,56)\nc = np.hstack((a,b))\n\nprevisores = base[:, :, c]\nclasses = base[:, :, 22:30]\n\nX_test = previsores\n\ny_test = classes\n\n\ndef conv_block(x, nb_filter, kernel_size, strides, pad):\n x = Conv1D(nb_filter, kernel_size, strides=strides, padding=pad)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x\n\n\ndef inception_A(input):\n\n a1 = conv_block(input, 96, 1, 1, 'same')\n\n a2 = conv_block(input, 64, 1, 1, 'same')\n a2 = conv_block(a2, 96, 3, 1, 'same')\n\n a3 = conv_block(input, 64, 1, 1, 'same')\n a3 = conv_block(a3, 96, 3, 1, 'same')\n a3 = conv_block(a3, 96, 3, 1, 'same')\n\n a4 = AveragePooling1D(pool_size=3, strides=1, padding='same')(input)\n a4 = conv_block(a4, 96, 1, 1, 'same')\n\n x = concatenate([a1, a2, a3, a4])\n\n return x\n\ndef inception_B(input):\n\n a1 = conv_block(input, 384, 1, 1, 'same')\n\n a2 = conv_block(input, 192, 1, 1, 'same')\n a2 = conv_block(a2, 224, 1, 1, 'same')\n a2 = conv_block(a2, 256, 7, 1, 'same')\n\n a3 = conv_block(input, 192, 1, 1, 'same')\n a3 = conv_block(a3, 192, 1, 1, 'same')\n a3 = conv_block(a3, 224, 7, 1, 'same')\n a3 = conv_block(a3, 224, 1, 1, 'same')\n a3 = conv_block(a3, 256, 7, 1, 'same')\n\n a4 = AveragePooling1D(pool_size=3, strides=1, padding='same')(input)\n a4 = conv_block(a4, 128, 1, 1, 'same')\n\n x = concatenate([a1, a2, a3, a4])\n\n return x\n\ndef inception_C(input):\n\n a1 = conv_block(input, 256, 1, 1, 'same')\n\n a2 = conv_block(input, 384, 1, 1, 'same')\n a21 = conv_block(a2, 256, 1, 1, 'same')\n a22 = conv_block(a2, 256, 3, 1, 'same')\n\n a3 = conv_block(input, 384, 1, 1, 'same')\n a3 = conv_block(a3, 448, 1, 1, 'same')\n a3 = conv_block(a3, 512, 3, 1, 'same')\n a31 = conv_block(a3, 256, 3, 1, 'same')\n a32 = conv_block(a3, 256, 1, 1, 'same')\n\n a4 = AveragePooling1D(pool_size=3, strides=1, padding='same')(input)\n a4 = conv_block(a4, 256, 1, 1, 'same')\n\n x = concatenate([a1, a21, a22, a31, a32, a4])\n\n return x\n\ndef criarRede(neurons, layers, b1=0, b2=10, b3=0):\n\n #aminoacid sequence\n inp1 = Input(shape=(700, 21, ))\n \n # PSSM\n inp2 = Input(shape=(700, 21, ))\n\n flat = Flatten()(inp1)\n emb = Embedding(14700, 1, input_length=(14700,))(flat)\n flat = Flatten()(emb)\n re = Reshape((700, 21))(flat)\n\n x = concatenate([re, inp2])\n\n for _ in range(b1):\n x = inception_A(x)\n \n for _ in range(b2):\n x = inception_B(x)\n\n for _ in range(b3):\n x = inception_C(x)\n\n gru = Bidirectional(CuDNNGRU(neurons, return_sequences=True))(x)\n\n for _ in range(layers-1):\n gru = Bidirectional(CuDNNGRU(neurons, return_sequences=True))(gru)\n \n out = Dense(8, activation='softmax')(gru)\n\n model = Model([inp1, inp2], out)\n\n adam = Adam(learning_rate=0.001)\n\n model.compile(optimizer = adam, metrics = ['acc'], loss='categorical_crossentropy')\n\n return model\n\n\ndef validate(neurons, layers, b2):\n model = criarRede(neurons, layers, b2=b2)\n es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min', restore_best_weights=True)\n lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, mode='min', verbose=1)\n mc = ModelCheckpoint('/content/drive/My Drive/IRN/cb6133-IRN-' + str(b2) + '.hdf5', save_best_only=True)\n model.fit([X_train[:, :, :21], X_train[:, :, 21:]], y_train, epochs=50, batch_size=32, callbacks=[es, lr, mc], validation_data=([X_val[:, :, :21], X_val[:, :, 21:]], y_val))\n\n return model\n\n\nfor b2 in [3, 4, 5, 6, 7]:\n model = None\n model = validate(100, 3, b2)\n\n # prediction validation\n pred = model.predict([X_val[:, :, :21], X_val[:, :, 21:]])\n predicted = np.reshape(pred, (pred.shape[0] * pred.shape[1], 8)) \n x_tst = np.reshape(X_val, (X_val.shape[0] * X_val.shape[1], X_val.shape[2]))\n \n for i in range(len(x_tst)):\n cont = 0\n for j in range(len(x_tst[i])):\n cont += x_tst[i][j]\n if cont != 0:\n y_pred.append(predicted[i])\n \n y_pred = np.array(y_pred)\n np.save('/content/drive/My Drive/IRN/cb6133-IRN-' + str(b2) +'-val.npy', y_pred)\n\n\n # prediction test\n pred = model.predict([X_test[:, :, :21], X_test[:, :, 21:]])\n predicted = np.reshape(pred, (pred.shape[0] * pred.shape[1], 8)) \n x_tst = np.reshape(X_test, (X_test.shape[0] * X_test.shape[1], X_test.shape[2]))\n \n for i in range(len(x_tst)):\n cont = 0\n for j in range(len(x_tst[i])):\n cont += x_tst[i][j]\n if cont != 0:\n y_pred.append(predicted[i])\n \n y_pred = np.array(y_pred)\n np.save('/content/drive/My Drive/IRN/cb6133-IRN-' + str(b2) +'-test.npy', y_pred)","sub_path":"Inception Recurrent Networks/CB513/IRN.py","file_name":"IRN.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"182125908","text":"from flask import Flask, g\nfrom flask import render_template, flash, redirect, url_for\n\n# connect new_sub w/ app.js\nfrom forms import SubForm, PostForm, CommentForm\nimport models\n\nDEBUG = True\nPORT = 8000\n\napp = Flask(__name__)\napp.secret_key = 'adkjfalj.adflja.dfnasdf.asd'\n\n# Handle requests when the come in (before) and when they complete (after)\n@app.before_request\ndef before_request():\n \"\"\"Connect to the DB before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n\n\n@app.after_request\ndef after_request(response):\n \"\"\"Close the database connection after each request.\"\"\"\n g.db.close()\n return response\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n # form variable represenditing subform\n form = SubForm()\n # check if form submission is valid\n if form.validate_on_submit():\n # if it is , createa new sub and redirect the user\n models.Sub.create(\n name=form.name.data.strip(),\n description=form.description.data.strip())\n\n flash(\"New sub registered. Called: {}\".format(form.name.data))\n return redirect('/r')\n\n # if not, send them back to the form\n return render_template('new_sub.html', title=\"New Sub\", form=form)\n\n\n@app.route('/r')\n@app.route('/r/')\n@app.route('/r/', methods=['GET', 'POST'])\ndef r(sub=None):\n if sub == None:\n # get all subs\n subs = models.Sub.select().limit(100)\n return render_template(\"subs.html\", subs=subs)\n else:\n # use the ID to find the right Sub\n sub_id = int(sub)\n # read use .get, want to loop over - like db.fimd in express\n sub = models.Sub.get(models.Sub.id == sub_id)\n # called posts postData - sub.post coming from models.py backref sub\n posts = sub.posts\n\n # Define the form for Posts\n form = PostForm()\n if form.validate_on_submit():\n models.Post.create(\n user=form.user.data.strip(),\n title=form.title.data.strip(),\n text=form.text.data.strip(),\n sub=sub)\n\n flash(\"New post created\")\n return redirect(\"/r/{}\".format(sub_id))\n # send our found sub to the template- the retrieved subs to our subs.html template\n return render_template(\"sub.html\", sub=sub, posts=posts, form=form)\n\n\n@app.route('/posts')\n@app.route('/posts/')\n@app.route('/posts/', methods=['GET', 'POST'])\ndef posts(id=None):\n if id == None:\n posts = models.Post.select().limit(100)\n return render_template('posts.html', posts=posts)\n else:\n post_id = int(id)\n post = models.Post.get(models.Post.id == post_id)\n comments = post.comments\n\n form = CommentForm()\n if form.validate_on_submit():\n models.Comment.create(\n user = form.user.data.strip(),\n # created_date = form.created_date.data.strip(),\n text = form.text.data.strip(),\n post = post)\n flash(\"New comment created\")\n return redirect(\"/posts/{}\".format(post_id))\n \n flash(\"Serving post page\")\n return render_template('post.html', post=post, comments=comments, form=form)\n\n\nif __name__ == '__main__':\n models.initialize()\n app.run(debug=DEBUG, port=PORT)\n","sub_path":"lourdesmorales-lvmor/week-8/Flask-Models/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"124267433","text":"'''\r\nhttps://leetcode.com/problems/evaluate-reverse-polish-notation/\r\n------------\r\nLogic\r\n------------\r\nInfix Notation\r\n(2 + 3) * 4\r\nPostfix Notation\r\n2 3 + 4 *\r\nRule 1: When I see operand, I push to stack\r\nRule 2: When I see operator, I pop twice, and do the simple calculation\r\npush 2\r\npush 3\r\npop 3\r\npop 2\r\ncaculate 2 * 3 = 6\r\npush 6\r\npush 4\r\npop 4\r\npop 6\r\ncalculate 4 * 6 = 24\r\npush 24\r\npop2 24 -> final result\r\n'''\r\n\r\nfrom typing import List\r\n\r\nclass Solution(object):\r\n def evalRPN(self, tokens: List[str]) -> int:\r\n stack = []\r\n for token in tokens:\r\n if token in [\"+\", \"-\", \"*\", \"/\"]:\r\n num2 = int(stack.pop())\r\n num1 = int(stack.pop())\r\n\r\n if token == '+':\r\n result = num1 + num2\r\n elif token == '*':\r\n result = num1 * num2\r\n elif token == '/':\r\n result = num1 / num2\r\n else:\r\n result = num1 - num2\r\n\r\n stack.append(result)\r\n else:\r\n stack.append(token)\r\n return int(stack.pop())\r\n","sub_path":"python_0472_practice_stack_leetcode_150_RPN.py","file_name":"python_0472_practice_stack_leetcode_150_RPN.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"9691111","text":"# -*- mode:python; coding:utf-8 -*-\n\n# Copyright (c) 2021 IBM Corp. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for trestle markdown_validator module.\"\"\"\nimport pathlib\n\nimport frontmatter\n\nimport pytest\n\nimport trestle.core.const as const\nfrom trestle.core.markdown.markdown_node import MarkdownNode\n\n\n@pytest.mark.parametrize('md_path', [(pathlib.Path('tests/data/markdown/valid_complex_md.md'))])\ndef test_tree_text_equal_to_md(md_path: pathlib.Path) -> None:\n \"\"\"Test tree construction.\"\"\"\n contents = frontmatter.loads(md_path.open('r', encoding=const.FILE_ENCODING).read())\n markdown_wo_header = contents.content\n lines = markdown_wo_header.split('\\n')\n\n tree: MarkdownNode = MarkdownNode.build_tree_from_markdown(lines)\n assert markdown_wo_header == tree.content.raw_text\n\n\n@pytest.mark.parametrize('md_path', [(pathlib.Path('tests/data/markdown/valid_complex_md.md'))])\ndef test_md_get_node_for_key(md_path: pathlib.Path) -> None:\n \"\"\"Test node fetching.\"\"\"\n contents = frontmatter.loads(md_path.open('r', encoding=const.FILE_ENCODING).read())\n markdown_wo_header = contents.content\n lines = markdown_wo_header.split('\\n')\n\n tree: MarkdownNode = MarkdownNode.build_tree_from_markdown(lines)\n assert tree.get_node_for_key('header1') is None\n assert tree.get_node_for_key('nonexisting header') is None\n node: MarkdownNode = tree.get_node_for_key('# 2. MD Header 2 Blockquotes')\n assert node is not None\n # Assert returned node has content\n assert node.key == '# 2. MD Header 2 Blockquotes'\n assert len(node.content.blockquotes) == 2\n assert node.content.text[2] == 'some text after blockquote'\n # Assert unstrict and strict matching return same notes\n deep_node_unstrict = tree.get_node_for_key('5.2.2.1', strict_matching=False)\n deep_node_strict = tree.get_node_for_key('#### 5.2.2.1 A even deeper section here 2')\n assert deep_node_strict == deep_node_unstrict\n # Assert substrings are matched\n node = tree.get_node_for_key('Header 4 Tricky', strict_matching=False)\n assert node is not None\n # Assert first match returned if strict matching is off\n node = tree.get_node_for_key('5.1.1', strict_matching=False)\n assert node.key == '### 5.1.1 A deeper section 1'\n\n\n@pytest.mark.parametrize('md_path', [(pathlib.Path('tests/data/markdown/valid_complex_md.md'))])\ndef test_md_content_is_correct(md_path: pathlib.Path) -> None:\n \"\"\"Test that read content is correct.\"\"\"\n contents = frontmatter.loads(md_path.open('r', encoding=const.FILE_ENCODING).read())\n markdown_wo_header = contents.content\n lines = markdown_wo_header.split('\\n')\n\n tree: MarkdownNode = MarkdownNode.build_tree_from_markdown(lines)\n assert tree is not None\n assert len(tree.content.subnodes_keys) == 28\n assert tree.content.raw_text == markdown_wo_header\n assert tree.key == 'root'\n assert len(tree.content.blockquotes) == 5\n assert len(tree.content.tables) == 7\n assert len(tree.content.code_lines) == 12\n deep_node = tree.get_node_for_key('5.1.1.1.1', strict_matching=False)\n assert deep_node.content.text[1] == 'some very deep text'\n\n\n@pytest.mark.parametrize('md_path', [(pathlib.Path('tests/data/markdown/valid_complex_md.md'))])\ndef test_md_headers_in_html_blocks_are_ignored(md_path: pathlib.Path) -> None:\n \"\"\"Test that headers in the various html blocks are ignored.\"\"\"\n contents = frontmatter.loads(md_path.open('r', encoding=const.FILE_ENCODING).read())\n markdown_wo_header = contents.content\n lines = markdown_wo_header.split('\\n')\n\n tree: MarkdownNode = MarkdownNode.build_tree_from_markdown(lines)\n assert tree is not None\n tricky_node = tree.get_node_for_key('1.3', strict_matching=False)\n assert tricky_node.key == '## 1.3 MD Subheader 1.3 HTML'\n assert len(tricky_node.content.subnodes_keys) == 4\n assert len(tricky_node.content.html_lines) == 36\n","sub_path":"tests/trestle/core/markdown/markdown_node_test.py","file_name":"markdown_node_test.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"298914364","text":"import math\nfrom fractions import Fraction\n\nn, q = map(int, input().split())\ntherms = []\nfor _ in range(n):\n a, b = map(int, input().split())\n therms.append((a, b))\n\nfor _ in range(q):\n a, b, v = map(int, input().split())\n # convert v from unit a to celsius\n # v / (b - a) = x / 100\n a -= 1\n b -= 1\n cels = Fraction(100, 1) * Fraction(v - therms[a][0], therms[a][1] - therms[a][0])\n\n # convert cels to unit b\n # x / 100 = y / (b - a)\n ans = therms[b][0] + Fraction(therms[b][1] - therms[b][0], 100) * cels\n g = math.gcd(ans.numerator, ans.denominator)\n print('{}/{}'.format(ans.numerator // g, ans.denominator // g))\n","sub_path":"kattis/thermostat.py","file_name":"thermostat.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"106160953","text":"import os\nimport unittest\n\nimport numpy as np\nfrom jina.executors import BaseExecutor\nfrom tests.executors import ExecutorTestCase\n\n\nclass ImageTestCase(ExecutorTestCase):\n @property\n def workspace(self):\n return os.path.join(os.environ['TEST_WORKDIR'], 'test_tmp')\n\n @property\n def target_output_dim(self):\n return self._target_output_dim\n\n @target_output_dim.setter\n def target_output_dim(self, output_dim):\n self._target_output_dim = output_dim\n\n @property\n def input_dim(self):\n return self._input_dim\n\n @input_dim.setter\n def input_dim(self, input_dim):\n self._input_dim = input_dim\n\n def get_encoder(self):\n encoder = self._get_encoder(self.metas)\n if encoder is not None:\n encoder.workspace = self.workspace\n self.add_tmpfile(encoder.workspace)\n return encoder\n\n def _get_encoder(self, metas):\n return None\n\n @unittest.skipUnless('JINA_TEST_PRETRAINED' in os.environ, 'skip the pretrained test if not set')\n def test_encoding_results(self):\n encoder = self.get_encoder()\n if encoder is None:\n return\n test_data = np.random.rand(2, 3, self.input_dim, self.input_dim)\n encoded_data = encoder.encode(test_data)\n self.assertEqual(encoded_data.shape, (2, self.target_output_dim))\n\n @unittest.skipUnless('JINA_TEST_PRETRAINED' in os.environ, 'skip the pretrained test if not set')\n def test_save_and_load(self):\n encoder = self.get_encoder()\n if encoder is None:\n return\n test_data = np.random.rand(2, 3, self.input_dim, self.input_dim)\n encoded_data_control = encoder.encode(test_data)\n encoder.touch()\n encoder.save()\n self.assertTrue(os.path.exists(encoder.save_abspath))\n encoder_loaded = BaseExecutor.load(encoder.save_abspath)\n encoded_data_test = encoder_loaded.encode(test_data)\n self.assertEqual(encoder_loaded.channel_axis, encoder.channel_axis)\n np.testing.assert_array_equal(encoded_data_control, encoded_data_test)\n\n @unittest.skipUnless('JINA_TEST_PRETRAINED' in os.environ, 'skip the pretrained test if not set')\n def test_save_and_load_config(self):\n encoder = self.get_encoder()\n if encoder is None:\n return\n encoder.save_config()\n self.assertTrue(os.path.exists(encoder.config_abspath))\n encoder_loaded = BaseExecutor.load_config(encoder.config_abspath)\n self.assertEqual(encoder_loaded.channel_axis, encoder.channel_axis)\n","sub_path":"tests/executors/encoders/image/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"190878766","text":"class BinaryTree:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def insert_left(self,new_val):\n if self.left==None:\n self.left=BinaryTree(new_val)\n else:\n new_tree=BinaryTree(new_val)\n new_tree.left=self.left\n self.left=new_tree\n \n def insert_right(self,new_val):\n if self.right==None:\n self.right=BinaryTree(new_val)\n else:\n new_tree=BinaryTree(new_val)\n new_tree.right=self.right\n self.right=new_tree\n \n\n\n def get_max_depth(self,current_max=0,l=[]):\n if self.left is not None:\n l=self.left.get_max_depth(current_max+1,l)\n if self.right is not None:\n l=self.right.get_max_depth(current_max+1,l)\n if self.right is None and self.left is None:\n print('max depth of'+str(current_max))\n l.append(current_max)\n return l\n return l\n\n def showTree(self):\n \"\"\" This function is to show the results for the given tree\n \"\"\"\n print(self.val)\n if self.left is not None:\n self.left.showTree()\n if self.right is not None:\n self.right.showTree() \n \n\nx=BinaryTree(1)\nx.insert_left(1)\nx.insert_right(2)\nx.left.insert_left(4)\nx.left.insert_right(5)\nx.insert_right(3)\nx.insert_right(1000)\n\n#print my tree\nx.showTree()\n#Should return all the different max depths\n\n\n\nl=max(x.get_max_depth())\nprint(l)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"12911886","text":"#--------------------------------------------------\n# If an element in an MxN matrix is 0, its entire row and\n# column are set to 0.\n#--------------------------------------------------\n'''\nTodo:\n 1. Figure out np.matrix index, am I correct way? mat[1,:][:,1] = arr[1][1]\n \nHint:\n 1. Find zero part first\n 2. Use only O(n) for extra space\n 3. Use original matrix to remember info\n\nNote:\n\n'''\n\nimport numpy as np\n\n\nDEBUG = 0\ndef printd(*args, **kwargs):\n if DEBUG:\n print(*args, **kwargs)\n\n\ndef zeroMatrix_1st_list(mat):\n ''' Complexity: O(N^2) / O(N^2) '''\n row = len(mat)\n col = len(mat[0])\n\n # Declaire new 2d array based on original 2d array\n retMat = [[0 for c in range(col)] for r in range(row)]\n\n # Copy 2d array\n for r in range(row):\n for c in range(col): \n retMat[r][c] = mat[r][c]\n\n # Set corresponding row and col to 0\n for r in range(row):\n for c in range(col): \n if mat[r][c] == 0: \n for rr in range(row):\n printd(\"[%d, %d] Clear row(%d) %d to 0\" % (r, c, rr, mat[rr][c]))\n retMat[rr][c] = 0\n for cc in range(col):\n printd(\"[%d, %d] Clear col(%d) %d to 0\" % (r, c, cc, mat[r][cc]))\n retMat[r][cc] = 0\n\n return retMat\n\n\ndef zeroMatrix_1st_numpyArray(mat):\n ''' Complexity: O(N^2) / O(N^2) '''\n row = len(mat)\n col = len(mat[0])\n\n nMat = np.array(mat)\n retMat = np.copy(nMat)\n\n for r in range(row):\n for c in range(col): \n if nMat[r][c] == 0: \n for rr in range(row):\n printd(\"[%d, %d] Clear row(%d) %d to 0\" % (r, c, rr, nMat[rr][c]))\n retMat[rr][c] = 0\n for cc in range(col):\n printd(\"[%d, %d] Clear col(%d) %d to 0\" % (r, c, cc, nMat[r][cc]))\n retMat[r][cc] = 0\n\n \n return retMat.tolist()\n\n\ndef zeroMatrix_1st_numpyMatrix(mat):\n ''' Complexity: O(N^2) / O(N^2) '''\n row = len(mat)\n col = len(mat[0])\n\n nMat = np.matrix(mat)\n retMat = np.matrix.copy(nMat)\n\n # Show shape and dimension\n print(\"Shape of matrix: \", retMat.shape)\n print(\"Dim of matrix: \", len(retMat.shape))\n \n # Index of row 0 in matrix\n print(\"Index of row 0 in matrix:\", end='')\n print(nMat[0]) \n # Index of [1][1] in matrix\n print(\"Index of [1][1] in matrix:\", end='')\n print(nMat[1,:][:,1])\n\n for r in range(row):\n for c in range(col): \n if nMat[r,:][:,c] == 0: \n for rr in range(row):\n #printd(\"[%d, %d] Clear row(%d) %d to 0\" % (r, c, rr, nMat[rr][c]))\n retMat[rr,:][:,c] = 0\n for cc in range(col):\n #printd(\"[%d, %d] Clear col(%d) %d to 0\" % (r, c, cc, nMat[r][cc]))\n retMat[r,:][:,cc] = 0\n\n\n printd(retMat)\n return retMat.tolist()\n\n\ndef zeroMatrix_2nd_list(mat):\n ''' Complexity: O(N^2) / O(c) '''\n row = len(mat)\n col = len(mat[0])\n\n # Set corresponding row and col to \n zeroRow = []\n zeroCol = []\n for r in range(row):\n for c in range(col):\n if mat[r][c] == 0:\n if r not in zeroRow:\n zeroRow.append(r)\n if c not in zeroCol:\n zeroCol.append(c)\n\n for r in range(row):\n for c in range(col):\n if r in zeroRow or c in zeroCol:\n mat[r][c] = 0\n\n return mat\n\n\ndef printMatrix(mat):\n row = len(mat)\n col = len(mat[0])\n for r in range(row):\n for c in range(col):\n print(mat[r][c], end =\" \")\n print(\"\")\n\n\ndef main():\n mat1 = [[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]]\n\n mat2 = [[1, 1, 1],\n [1, 0, 1],\n [0, 1, 1]]\n \n mat3 = [[1, 1, 1, 1],\n [1, 0, 1, 1],\n [0, 1, 1, 1]]\n\n matList = [mat1, mat2, mat3]\n\n funcList = [zeroMatrix_1st_list, zeroMatrix_1st_numpyArray, zeroMatrix_1st_numpyMatrix,\n zeroMatrix_2nd_list]\n\n for func in funcList:\n print(func)\n for m in matList:\n print(\"-------------\")\n ret = func(m)\n printMatrix(ret)\n\n\n#####################\n# File entry point\n#####################\nif __name__=='__main__':\n main()","sub_path":"1.8_ZeroMatrix.py","file_name":"1.8_ZeroMatrix.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"109001445","text":"import sys\n\nfile = open('contador.txt', 'a+')\nfile.seek(0)\ncontenido = file.readline()\n\nif len(contenido) == 0:\n\tcontenido = '0'\n\tfile.write(contenido)\n\nfile.close()\n\ntry:\n\tcontador = int(contenido)\n\n\tif len(sys.argv) == 2:\n\t\tif sys.argv[1] == 'inc':\n\t\t\tcontador += 1\n\t\telif sys.argv[1] == 'dec':\n\t\t\tcontador -= 1\n\n\tprint(contador)\n\n\tfile = open('contador.txt', 'w')\n\tfile.write(str(contador))\n\tfile.close()\nexcept:\n\tprint('Error: Fichero corrupto')","sub_path":"Scripts/contador.py","file_name":"contador.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"591193224","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\n\nfrom videocore.assembler import qpu\nfrom videocore.driver import Driver\n\n\n@qpu\ndef kernel(asm):\n \"\"\"\n kernel program\n \"\"\"\n # Memory -> VPM, 16 elements * 2 lines\n setup_dma_load(nrows=2)\n start_dma_load(uniform)\n wait_dma_load()\n \n # VPM -> Register, 16 elements * 2 lines\n setup_vpm_read(nrows = 2)\n mov(r0, vpm)\n mov(r1, vpm)\n\n # calc: add r0 and r1\n # Register -> VPM, 16 elements * 2 lines\n setup_vpm_write()\n fadd(vpm, r0, r1)\n\n # VPM -> Memory, 16 elements * 1 line\n setup_dma_store(nrows=1)\n start_dma_store(uniform)\n wait_dma_store()\n\n exit()\n\n\nif __name__ == '__main__':\n with Driver() as drv:\n \"\"\"\n host program\n \"\"\"\n list_a = np.arange(16).astype('float32')\n list_b = np.full(16, 2.0).astype('float32')\n\n # combine arrays\n inp = drv.copy(np.r_[list_a, list_b])\n out = drv.alloc(16, 'float32')\n\n print(' list_a '.center(80, '='))\n print(list_a)\n print(' list_b '.center(80, '='))\n print(list_b)\n\n \"\"\"\n execute kernel program\n \"\"\"\n drv.execute(\n n_threads=1,\n program=drv.program(kernel),\n uniforms=[inp.address, out.address]\n )\n\n print(' list_a + list_b gpu_out '.center(80, '='))\n print(out)\n\n cpu_ans = list_a + list_b\n error = cpu_ans - out\n\n print(' list_a + list_b cpu_out '.center(80, '='))\n print(cpu_ans)\n\n print(' cpu/gpu error '.center(80, '='))\n print(np.abs(error))\n","sub_path":"006calc_add_errorcheck.py","file_name":"006calc_add_errorcheck.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"458409084","text":"#!/usr/bin/env python\n\n# This file is part of Gummworld2.\n#\n# Gummworld2 is free software: you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Gummworld2 is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with Gummworld2. If not, see .\n\n\n__version__ = '$Id: basicmap.py 427 2013-08-26 04:25:04Z stabbingfinger@gmail.com $'\n__author__ = 'Gummbum, (c) 2011-2013'\n\n\n__doc__ = \"\"\"basicmap.py - Basic Map module for Gummworld2.\n\nDefines the BasicMap, which serves layers and sprite objects.\n\nBasicMap combines view (pygame) and model (world coordinates). It contains a\nrect attribute defining its dimensions, and observes pygame coordinate space.\n\nThe layers attribute is a spatialhash containing sprites. This can be\naccessed directly, or via the class methods. See also the toolkit module for\nconvenience utilities.\n\nThe caller must manage maps and their corresponding worlds by swapping the\nState.map and State.world package globals, for example:\n \n # Create the initial map and world, and save it.\n State.map = BasicMap(width, height, tile_width, tile_height)\n State.world = model.World(State.map.rect)\n levels = []\n levels.append((State.map,State.world))\n ...\n # Create a new one, save it.\n State.map = BasicMap(new_width, new_height, new_tile_width, new_tile_height)\n State.world = model.World(State.map.rect)\n levels.append((State.map,State.world))\n ...\n # Restore a map and world.\n State.map,State.world = levels[0]\n \nAlternatively State.save() and State.restore() can be used to facilitate this.\n\"\"\"\n\n\nimport pygame\n\nfrom gummworld2 import data, spatialhash, Vec2d\nfrom gummworld2.ui import text_color\n\n\nclass BasicMap(object):\n \n def __init__(self, width, height, tile_width, tile_height):\n \"\"\"Construct a BasicMap object.\n \"\"\"\n self.layers = []\n \n self.pixel_width = width * tile_width\n self.pixel_height = height * tile_height\n self.width = width\n self.height = height\n self.tile_width = tile_width\n self.tile_height = tile_height\n \n self.rect = pygame.Rect(0,0,self.pixel_width,self.pixel_height)\n \n def get_layer(self, layer_index):\n return self.layers[layer_index]\n \n def get_layers(self, which_layers=None):\n if not which_layers:\n which_layers = range(len(self.layers))\n return [L for i,L in enumerate(self.layers) if i in which_layers]\n \n def get_objects_in_rect(self, rect, layeri=0):\n tiles = []\n content2D = self.get_layer(layeri).content2D\n x1,y1,x2,y2 = self.rect_to_range(rect, layeri)\n for column in content2D[x1:x2+1]:\n tiles.extend([t for t in column[y1:y2+1] if t])\n return tiles\n \n def collapse(self, collapse=(1,1), which_layers=None):\n \"\"\"Collapse which_layers by joining num_tiles into one tile. The\n original layers are replaced by new layers.\n \n The collapse argument is the number of tiles on the X and Y axes to\n join.\n \n The collapse_layers argument is a sequence of indices indicating to\n which TiledMap.layers the collapse algorithm should be applied. See the\n tiledmap.collapse_map.\n \"\"\"\n if collapse <= (1,1):\n return\n if which_layers is None:\n which_layers = range(len(self.layers))\n for layeri in which_layers:\n self.layers[layeri].collapse(collapse)\n \n def merge_layers(self, which_layers=None):\n if which_layers is None:\n which_layers = range(len(self.layers))\n if len(which_layers) < 2:\n return\n dest_layer = self.layers[which_layers[0]]\n del_layers = []\n for layeri in which_layers[1:]:\n## print 'blit_layer'\n src_layer = self.layers[layeri]\n dest_layer.blit_layer(src_layer)\n del_layers.append(src_layer)\n for layer in del_layers:\n## print 'del layer'\n self.layers.remove(layer)\n\n\nclass BasicLayer(object):\n \n def __init__(self, parent_map, layer_index, cell_size=None):\n self.parent_map = parent_map\n \n self.tile_width = parent_map.tile_width\n self.tile_height = parent_map.tile_height\n self.width = parent_map.width\n self.height = parent_map.height\n self.pixel_width = self.width * self.tile_width\n self.pixel_height = self.height * self.tile_height\n \n if cell_size == None:\n cell_size = max(self.tile_width, self.tile_height)\n self.rect = pygame.Rect(0,0, self.pixel_width+1, self.pixel_height+1)\n self.objects = spatialhash.SpatialHash(self.rect, cell_size)\n \n self.layeri = layer_index\n self.visible = True\n \n def add(self, tile):\n self.objects.add(tile)\n \n def get_objects_in_rect(self, rect):\n return self.objects.intersect_objects(rect)\n \n def collapse(self, collapse=(1,1)):\n if collapse <= (1,1):\n return\n new_layer = BasicLayer(self.parent_map, self.layeri)\n collapse_layer(self, new_layer, collapse)\n self.parent_map.layers[self.layeri] = new_layer\n \n def blit_layer(self, src_layer):\n blit_layer(self, src_layer)\n \n def __iter__(self):\n return iter(self.objects)\n\n\ndef collapse_layer(old_layer, new_layer, num_tiles=(2,2)):\n \"\"\"Collapse a single layer by joining num_tiles into one tile. A new layer\n is returned.\n \n The old_layer argument is the layer to process.\n \n The new_layer argument is the layer to build.\n \n The num_tiles argument is a tuple representing the number of tiles in the X\n and Y axes to join.\n \n If a map area is sparse (fewer tiles than num_tiles[0] * num_tiles[1]) the\n tiles will be kept as they are.\n \n If tiles with different characteristics are joined, the results can be\n unexpected. These characteristics include some flags, depth, colorkey. This\n can be avoided by pre-processing the map to convert all images so they have\n compatible characteristics.\n \"\"\"\n from pygame.sprite import Sprite\n from gummworld2 import Vec2d\n \n # New layer dimensions.\n num_tiles = Vec2d(num_tiles)\n tw,th = (old_layer.tile_width,old_layer.tile_height) * num_tiles\n mw,mh = (old_layer.width,old_layer.height) // num_tiles\n if mw * num_tiles.x != old_layer.pixel_width:\n mw += 1\n if mh * num_tiles.y != old_layer.pixel_height:\n mh += 1\n # Poke the right values into new_layer.\n cell_size = max(tw,th) * 2\n new_layer.objects = spatialhash.SpatialHash(old_layer.rect, cell_size)\n new_layer.width = mw\n new_layer.height = mh\n new_layer.tile_width = tw\n new_layer.tile_height = th\n # Grab groups of map sprites, joining them into a single larger image.\n query_rect = pygame.Rect(0,0,tw-1,th-1)\n for y in range(0, mh*th, th):\n for x in range(0, mw*tw, tw):\n query_rect.topleft = x,y\n sprites = old_layer.objects.intersect_objects(query_rect)\n if len(sprites) != num_tiles.x * num_tiles.y:\n for s in sprites:\n new_layer.add(s)\n continue\n # If sprite images have different characteristics, they cannot be\n # reliably collapsed. In which case, keep them as-is.\n incompatible = False\n image = sprites[0].image\n flags = image.get_flags() ^ pygame.SRCALPHA\n colorkey = image.get_colorkey()\n depth = image.get_bitsize()\n# This is probably too restrictive. However, some combinations of tiles may\n# give funky results.\n# all_details = (flags,colorkey,depth)\n# for s in sprites[1:]:\n# if all_details != (\n# s.image.get_flags(),\n# s.image.get_colorkey(),\n# s.image.get_bitsize(),\n# ):\n# incompatible = True\n# if incompatible:\n# print 'collapse_layer: incompatible image characteristics'\n# for s in sprites:\n# new_layer.add(s)\n# continue\n # Make a new sprite.\n new_sprite = Sprite()\n new_sprite.rect = sprites[0].rect.unionall([s.rect for s in sprites[1:]])\n new_sprite.rect.topleft = x,y\n new_sprite.image = pygame.surface.Surface(new_sprite.rect.size, flags, depth)\n if colorkey:\n new_sprite.image.set_colorkey(colorkey)\n \n # Blit (x,y) tile and neighboring tiles to right and lower...\n left = reduce(min, [s.rect.x for s in sprites])\n top = reduce(min, [s.rect.y for s in sprites])\n for sprite in sprites:\n p = sprite.rect.x - left, sprite.rect.y - top\n new_sprite.image.blit(sprite.image.convert(depth, flags), p)\n new_layer.add(new_sprite)\n return new_layer\n\n\ndef blit_layer(dest_layer, src_layer):\n for dest_sprite in dest_layer:\n dimage = dest_sprite.image.copy()\n drect = dest_sprite.rect\n for src_sprite in src_layer:\n simage = src_sprite.image\n srect = src_sprite.rect\n x = srect.x - drect.x\n y = srect.y - drect.y\n dimage.blit(src_sprite.image, (x,y))\n dest_sprite.image = dimage\n","sub_path":"librerie/gamelib/gummworld2/basicmap.py","file_name":"basicmap.py","file_ext":"py","file_size_in_byte":9899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413846943","text":"# -*- coding: utf-8 -*-\n\nimport urlparse\n\nimport scrapy\n\nfrom chyxx.items import ChyxxItem\n_META_VERSION = 'v1.0'\n\n\nclass ChyxxSpider(scrapy.Spider):\n name = 'zhengce'\n result_dir = './result'\n filename = name + '.json'\n meta_version = _META_VERSION\n custom_settings = {\n 'CONCURRENT_REQUESTS': 1,\n 'ITEM_PIPELINES': {\n 'chyxx.pipelines.ChyxxPipeline': 300,\n },\n 'AUTOTHROTTLE_ENABLED': False,\n }\n\n def start_requests(self):\n for page in range(1, 42):\n yield scrapy.Request(\n url='http://www.chyxx.com/zhengce/{}.html'.format(page),\n meta={'Bigtype': '包装行业政策'}\n )\n\n def parse(self, response):\n for each_item in response.css('div.pageList ul.list li'):\n title = each_item.css('a:nth-child(2)::text').extract_first()\n yield scrapy.Request(\n url=each_item.css('a:nth-child(2)::attr(href)').extract_first(),\n callback=self.parse_content,\n meta={'title': title, 'Bigtype': response.meta.get('Bigtype')}\n )\n\n def parse_content(self, response):\n item = ChyxxItem()\n item['datePublished'] = response.css('div.detail div.info span:nth-child(1)::text').extract_first().strip()\n item['headline'] = response.meta.get('title')\n item['Bigtype'] = response.meta.get('Bigtype')\n item['articleBody'] = response.css('div#contentBody').xpath('string(.)').extract_first().replace(u'中国产业信息网微信服务号', '').replace(u'中国产业信息网微信公众号', '').replace(response.css('div.content-info::text').extract_first(), '')\n try:\n item['copyrightHolder'] = response.css('div.content-info::text').extract_first().split(u':')[1]\n except:\n item['copyrightHolder'] = ''\n item['annex'] = [urlparse.urljoin(response.url, each_pic)for each_pic in response.css('div#contentBody img::attr(src)').extract()]\n item['url'] = response.url\n yield item","sub_path":"chyxx/chyxx/spiders/zhengce.py","file_name":"zhengce.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"959340","text":"\"\"\"\n374. Spiral Matrix\nhttps://www.lintcode.com/problem/spiral-matrix/description?_from=ladder&&fromId=131\nBFS\n\"\"\"\n\nDIRECTIONS = [\n (0, 1),\n (1, 0),\n (0, -1),\n (-1, 0)\n]\nfrom collections import deque\nclass Solution:\n \"\"\"\n @param matrix: a matrix of m x n elements\n @return: an integer list\n \"\"\"\n def spiralOrder(self, matrix):\n # write your code here\n if not matrix or not matrix[0]:\n return []\n res = []\n visited = set()\n q = deque()\n q.append((0,0,0))\n visited.add((0, 0))\n while q:\n f_d_i, f_c_x, f_c_y = q.popleft()\n res.append(matrix[f_c_x][f_c_y])\n for i in range(4):\n delta_x, delta_y = DIRECTIONS[(f_d_i + i) % 4]\n nx, ny = f_c_x + delta_x, f_c_y + delta_y\n if self.is_valid(nx, ny, matrix, visited):\n visited.add((nx, ny))\n q.append(((f_d_i + i) % 4, nx, ny))\n break\n return res\n \n def is_valid(self, x, y, matrix, visited):\n n = len(matrix)\n m = len(matrix[0])\n \n if not (0 <= x < n and 0 <= y < m):\n return False\n if (x, y) in visited:\n return False\n return True","sub_path":"lintcode/374.py","file_name":"374.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"436425052","text":"import os\nimport urllib.request, urllib.error\nfrom django.core.files.base import ContentFile\n\nimport sys\n\ndef save_profile_picture(backend, strategy, details, response ,user=None, *args, **kwargs):\n url = None\n if backend.name == 'google-oauth2':\n # 画像のurlを保存\n url = response['image'].get('url')\n # 拡張子を用意\n ext = url.split('.')[-1].split('?')[0]\n if url:\n user.profile_picture_url = url\n picture = os.path.join(sys.MEDIA_ROOT, 'profiles', '%s.%s' % (user.username, ext))\n # 既にプロフィール画像がある場合、削除\n if os.path.exists(picture):\n os.remove(picture)\n user.profile_picture.save('%s.%s' % (user.username, ext), ContentFile(urllib.request.urlopen(url).read()))\n user.save()","sub_path":"accounts/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"577062484","text":"import pandas as pd\nimport numpy as np\nfrom Preprocessing.XGBpreproc import preprocess\nfrom Preprocessing.TABNETpreproc import preprocess_for_tabnet\nfrom pytorch_tabnet.tab_model import TabNetClassifier\nfrom XGboost.inference import competition_scorer\nimport time\n\n\n#import data\nrequests_test = pd.read_csv('../data/requests_test.csv')\n#Data preprocessing\n# Dataframe of categorical variables:\ncategorical_val= list(requests_test.select_dtypes(include=[np.object]))\ncategorical_val.remove('request_id')\n#we separe between categorical variables and date variables to preprocess it separately\ncategorical_val.remove('answer_creation_date')\ncategorical_val.remove('group_creation_date')\ncategorical_val.remove('request_creation_date')\ncategorical_val.remove('victim_of_violence_type')\ndate_columns = ['answer_creation_date','group_creation_date','request_creation_date']\nX_test , y_test = preprocess(requests_test,categorical_val,date_columns)\n\n# Drop Nan value because otherwise there are memory errors\nX_test['granted_number_of_nights'] = y_test\nX_test = X_test.dropna()\ny_test = X_test['granted_number_of_nights']\nX_test = X_test.drop(columns = ['granted_number_of_nights'])\n#preprocess the datasets for TabNet\nX_test_tab, y_test_tab = preprocess_for_tabnet(X_test,y_test)\n# retrieve model\n# Not working with version <3.7\n\nPATH = '../model_zoo/TabNet_model.zip'\nclf = TabNetClassifier()\nclf.load_model(PATH)\n\n#run inference\nstart = time.time()\npreds = clf.predict_proba(X_test_tab)\nend = time.time()\nscore = competition_scorer(y_test_tab, preds)\nprint('time per prediction:' ,(end-start)/len(X_test))\nprint('The competition score on test data', score)","sub_path":"TabNet/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"130992969","text":"import networkx as nx\nfrom Goap.Action import Actions\n\n# settings\nDEBUG = False\n\n# ACTIONS\nactions = Actions()\nactions.add_action(\n name='CreateVPC',\n pre_conditions={'vpc': False, 'app': False, 'db': False},\n effects={'vpc': True, 'app': False, 'db': False}\n)\nactions.add_action(\n name='CreateDB',\n pre_conditions={'vpc': True, 'db': False, 'app': False},\n effects={'vpc': True, 'db': True, 'app': False}\n)\nactions.add_action(\n name='CreateApp',\n pre_conditions={'vpc': True, 'db': True, 'app': False},\n effects={'vpc': True, 'db': True, 'app': True}\n)\n\n# initial STATE\ninitial_state = ({'vpc': False}, {'db': False}, {'app': False})\n\n# goal\ngoal = ({'vpc': True}, {'db': True}, {'app': True})\n\n# GRAPH / World\nG = nx.DiGraph()\n# G.add_node(0, attr_dict={'vpc': False, 'db': False, 'app': False})\n# G.add_node(1, attr_dict={'vpc': True, 'db': False, 'app': False})\n# G.add_node(2, attr_dict={'vpc': True, 'db': True, 'app': False})\n# G.add_node(3, attr_dict={'vpc': True, 'db': True, 'app': True})\n# print(G.nodes(data=True))\n\"\"\"\ni = 0\nfor action in actions:\n G.add_node(i, attr_dict=action.pre_conditions)\n if DEBUG:\n print('AUTO Conditions: {0}, i: {1}'.format(action.pre_conditions, i))\n i += 1\n\n if i == len(actions):\n G.add_node(i, attr_dict=action.effects)\n\n# Test\nfor action in actions:\n # print(action)\n src = None\n dst = None\n obj = None\n for node in G.nodes(data=True):\n # print(node)\n # print(node[1])\n if action.pre_conditions == node[1]:\n src = node[0]\n if DEBUG:\n print('SRC node data {0} = pre_conditions {2}'.format(node, node[1], action.pre_conditions))\n\n if action.effects == node[1]:\n dst = node[0]\n obj = action\n if DEBUG:\n print('DST node data {0} = effects {2}'.format(node, node[1], action.effects))\n\n if src is not None and dst is not None:\n G.add_edge(src, dst, object=obj)\n if DEBUG:\n print('Edge created!')\n\n# search\n#\n#\nfor node in G.nodes(data=True):\n if node[1] == initial_state:\n start = node[0]\n\n# final\nfor node in G.nodes(data=True):\n if node[1] == goal:\n final = node[0]\n\n# path = nx.astar_path(G, start, goal)\npath = nx.astar_path(G=G, source=start, target=final)\nprint(path)\n\n# list actions to achive goal\nprint(G.edges(path))\nfor src, dst in G.edges(path):\n print(G.get_edge_data(src,dst))\n\nprint('Edges: ', G.edges(data=True))\n\"\"\"\nfrom Goap.Action import Actions\nfrom pprint import pprint\n\nactions = Actions()\n# VPC/Network set\nactions.add_action(\n name='CreateVPC',\n pre_conditions={'vpc': False, 'db': False, 'app': False},\n effects={'vpc': True, 'db': False, 'app': False}\n)\n# DB set\nactions.add_action(\n name='CreateDB',\n pre_conditions={'vpc': True, 'db': False, 'app': False},\n effects={'vpc': True, 'db': True, 'app': False}\n)\nactions.add_action(\n name='StartDB',\n pre_conditions={'vpc': True, 'db': 'stopped', 'app': False},\n effects={'vpc': True, 'db': 'started', 'app': False}\n)\nactions.add_action(\n name='StopDB',\n pre_conditions={'vpc': True, 'db': 'started', 'app': False},\n effects={'vpc': True, 'db': 'stopped', 'app': False}\n)\nactions.add_action(\n name='DestroyDB',\n pre_conditions={'vpc': True, 'db': 'not_health', 'app': False},\n effects={'vpc': True, 'db': False, 'app': False}\n)\n# APP set\nactions.add_action(\n name='CreateApp',\n pre_conditions={'vpc': True, 'db': True, 'app': False},\n effects={'vpc': True, 'db': True, 'app': True}\n)\nactions.add_action(\n name='StartApp',\n pre_conditions={'vpc': True, 'db': True, 'app': 'stopped'},\n effects={'vpc': True, 'db': True, 'app': 'started'}\n)\nactions.add_action(\n name='StopApp',\n pre_conditions={'vpc': True, 'db': True, 'app': 'started'},\n effects={'vpc': True, 'db': True, 'app': 'stopped'}\n)\nactions.add_action(\n name='DestroyApp',\n pre_conditions={'vpc': True, 'db': True, 'app': 'not_health'},\n effects={'vpc': True, 'db': True, 'app': False}\n)\n# states\nstates = actions.all_possible_states()\n# generate states grid\ng = nx.DiGraph()\n# generate graph from all_possible_states() method\n[g.add_node(idx, attr_dict=state) for idx, state in enumerate(states)]\n\nnodes = g.nodes(data=True)\npprint(g.nodes(data=True), indent=2)\n\n# set edges\nfor action in actions:\n src = None\n dst = None\n for node in nodes:\n obj = action\n if action.pre_conditions == node[1]:\n src = node[0]\n if action.effects == node[1]:\n dst = node[0]\n if src is not None and dst is not None:\n g.add_edge(src, dst, object=obj)\n\npprint(g.edges(data=True), indent=2)\npprint(g.get_edge_data(0, 1), indent=2)\n","sub_path":"examples/goap_networkx.py","file_name":"goap_networkx.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"637677834","text":"import logging\nfrom datetime import datetime\nimport os\nfrom collections import OrderedDict\nimport tempfile\nimport subprocess\nimport shutil\nimport hashlib\nimport base64\nimport email\nimport email.utils\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nimport mailparser\nimport html2text\nimport dns.reversename\nimport dns.resolver\nimport dns.exception\n\n\nlogger = logging.getLogger(__name__)\n\nnull_file = open(os.devnull, \"w\")\n\nmarkdown_maker = html2text.HTML2Text()\nmarkdown_maker.unicode_snob = True\nmarkdown_maker.decode_errors = \"replace\"\nmarkdown_maker.body_width = 0\n\n\nclass EmailParserError(RuntimeError):\n \"\"\"Raised when an error parsing the email occurs\"\"\"\n\n\ndef decode_base64(data):\n \"\"\"\n Decodes a base64 string, with padding being optional\n\n Args:\n data: A base64 encoded string\n\n Returns:\n bytes: The decoded bytes\n\n \"\"\"\n data = bytes(data, encoding=\"ascii\")\n missing_padding = len(data) % 4\n if missing_padding != 0:\n data += b'=' * (4 - missing_padding)\n return base64.b64decode(data)\n\n\ndef parse_email_address(original_address):\n if original_address[0] == \"\":\n display_name = None\n else:\n display_name = original_address[0]\n address = original_address[1]\n address_parts = address.split(\"@\")\n local = None\n domain = None\n if len(address_parts) > 1:\n local = address_parts[0].lower()\n domain = address_parts[-1].lower()\n\n return OrderedDict([(\"display_name\", display_name),\n (\"address\", address),\n (\"local\", local),\n (\"domain\", domain)])\n\n\ndef get_filename_safe_string(string, max_length=146):\n \"\"\"\n Converts a string to a string that is safe for a filename\n Args:\n string (str): A string to make safe for a filename\n max_length (int): Truncate strings longer than this length\n\n Warning:\n Windows has a 260 character length limit on file paths\n\n Returns:\n str: A string safe for a filename\n \"\"\"\n invalid_filename_chars = ['\\\\', '/', ':', '\"', '*', '?',\n '<', '>', '|', '\\n', '\\r']\n if string is None:\n string = \"None\"\n\n for char in invalid_filename_chars:\n string = string.replace(char, \"\")\n string = string.rstrip(\".\")\n\n string = (string[:max_length]) if len(string) > max_length else string\n\n return string\n\n\ndef is_outlook_msg(content):\n \"\"\"\n Checks if the given content is a Outlook msg OLE file\n\n Args:\n content: Content to check\n\n Returns:\n bool: A flag the indicates if a file is a Outlook MSG file\n \"\"\"\n return type(content) == bytes and content.startswith(\n b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\")\n\n\ndef convert_outlook_msg(msg_bytes):\n \"\"\"\n Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to\n standard RFC 822 format\n\n Args:\n msg_bytes (bytes): the content of the .msg file\n\n Returns:\n A RFC 822 string\n \"\"\"\n if not is_outlook_msg(msg_bytes):\n raise ValueError(\"The supplied bytes are not an Outlook MSG file\")\n orig_dir = os.getcwd()\n tmp_dir = tempfile.mkdtemp()\n os.chdir(tmp_dir)\n with open(\"sample.msg\", \"wb\") as msg_file:\n msg_file.write(msg_bytes)\n try:\n subprocess.check_call([\"msgconvert\", \"sample.msg\"],\n stdout=null_file, stderr=null_file)\n eml_path = \"sample.eml\"\n with open(eml_path, \"rb\") as eml_file:\n rfc822 = eml_file.read()\n except FileNotFoundError:\n raise EmailParserError(\n \"Failed to convert Outlook MSG: msgconvert utility not found\")\n finally:\n os.chdir(orig_dir)\n shutil.rmtree(tmp_dir)\n\n return rfc822\n\n\ndef parse_email(data, strip_attachment_payloads=False):\n \"\"\"\n A simplified email parser\n\n Args:\n data: The RFC 822 message string, or MSG binary\n strip_attachment_payloads (bool): Remove attachment payloads\n\n Returns (dict): Parsed email data\n \"\"\"\n\n if type(data) == bytes:\n if is_outlook_msg(data):\n data = convert_outlook_msg(data)\n data = data.decode(\"utf-8\", errors=\"replace\")\n _parsed_email = mailparser.parse_from_string(data)\n headers = _parsed_email.headers\n parsed_email = _parsed_email.mail_partial\n parsed_email[\"headers\"] = headers\n headers_str = \"\"\n for header in headers:\n headers_str += \"{0}: {1}\\n\".format(header, headers[header])\n headers_str = headers_str.rstrip()\n parsed_email[\"headers_string\"] = headers_str\n if \"body\" not in parsed_email or parsed_email[\"body\"] is None:\n parsed_email[\"body\"] = \"\"\n parsed_email[\"raw_body\"] = parsed_email[\"body\"]\n parsed_email[\"text_plain\"] = _parsed_email.text_plain.copy()\n parsed_email[\"text_html\"] = _parsed_email.text_html.copy()\n if len(parsed_email[\"text_plain\"]) > 0:\n parsed_email[\"body\"] = \"\\n\\n\".join(parsed_email[\"text_plain\"])\n parsed_email[\"body_markdown\"] = \"\\n\\n\".join(parsed_email[\"text_plain\"])\n if len(parsed_email[\"text_html\"]) > 0:\n parsed_email[\"body\"] = \"\\n\\n\".join(parsed_email[\"text_html\"])\n parsed_email[\"body_markdown\"] = markdown_maker.handle(\n parsed_email[\"body\"])\n\n if \"received\" in parsed_email:\n for received in parsed_email[\"received\"]:\n if \"date_utc\" in received:\n if received[\"date_utc\"] is None:\n del received[\"date_utc\"]\n else:\n received[\"date_utc\"] = received[\"date_utc\"].replace(\"T\",\n \" \")\n\n if \"from\" not in parsed_email:\n if \"From\" in parsed_email[\"headers\"]:\n parsed_email[\"from\"] = parsed_email[\"Headers\"][\"From\"]\n else:\n parsed_email[\"from\"] = None\n\n if parsed_email[\"from\"] is not None:\n parsed_email[\"from\"] = parse_email_address(parsed_email[\"from\"][0])\n\n if \"date\" in parsed_email:\n if type(parsed_email[\"date\"] == datetime):\n parsed_email[\"date\"] = parsed_email[\"date\"].replace(\n microsecond=0).isoformat()\n else:\n parsed_email[\"date\"] = parsed_email[\"date\"].replace(\"T\", \" \")\n\n else:\n parsed_email[\"date\"] = None\n if \"reply_to\" in parsed_email:\n parsed_email[\"reply_to\"] = list(map(lambda x: parse_email_address(x),\n parsed_email[\"reply_to\"]))\n else:\n parsed_email[\"reply_to\"] = []\n\n if \"to\" in parsed_email:\n parsed_email[\"to\"] = list(map(lambda x: parse_email_address(x),\n parsed_email[\"to\"]))\n else:\n parsed_email[\"to\"] = []\n\n if \"cc\" in parsed_email:\n parsed_email[\"cc\"] = list(map(lambda x: parse_email_address(x),\n parsed_email[\"cc\"]))\n else:\n parsed_email[\"cc\"] = []\n\n if \"bcc\" in parsed_email:\n parsed_email[\"bcc\"] = list(map(lambda x: parse_email_address(x),\n parsed_email[\"bcc\"]))\n else:\n parsed_email[\"bcc\"] = []\n\n if \"delivered_to\" in parsed_email:\n parsed_email[\"delivered_to\"] = list(\n map(lambda x: parse_email_address(x),\n parsed_email[\"delivered_to\"])\n )\n\n if \"attachments\" not in parsed_email:\n parsed_email[\"attachments\"] = []\n else:\n for attachment in parsed_email[\"attachments\"]:\n if \"payload\" in attachment:\n payload = attachment[\"payload\"]\n try:\n if \"content_transfer_encoding\" in attachment:\n if attachment[\"content_transfer_encoding\"] == \"base64\":\n payload = decode_base64(payload)\n else:\n payload = str.encode(payload)\n attachment[\"sha256\"] = hashlib.sha256(payload).hexdigest()\n except Exception as e:\n logger.debug(\"Unable to decode attachment: {0}\".format(\n e.__str__()\n ))\n if strip_attachment_payloads:\n for attachment in parsed_email[\"attachments\"]:\n if \"payload\" in attachment:\n del attachment[\"payload\"]\n\n if \"subject\" not in parsed_email:\n parsed_email[\"subject\"] = None\n\n parsed_email[\"filename_safe_subject\"] = get_filename_safe_string(\n parsed_email[\"subject\"])\n\n if \"body\" not in parsed_email:\n parsed_email[\"body\"] = None\n parsed_email[\"body_markdown\"] = None\n else:\n parsed_email[\"body_markdown\"] = markdown_maker.handle(\n parsed_email[\"body\"])\n return parsed_email\n\n\ndef query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):\n \"\"\"\n Queries DNS\n\n Args:\n domain (str): The domain or subdomain to query about\n record_type (str): The record type to query for\n cache (ExpiringDict): Cache storage\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n timeout (float): Sets the DNS timeout in seconds\n\n Returns:\n list: A list of answers\n \"\"\"\n domain = str(domain).lower()\n record_type = record_type.upper()\n cache_key = \"{0}_{1}\".format(domain, record_type)\n if cache:\n records = cache.get(cache_key, None)\n if records:\n return records\n\n resolver = dns.resolver.Resolver()\n timeout = float(timeout)\n if nameservers is None:\n nameservers = [\"1.1.1.1\", \"1.0.0.1\",\n \"2606:4700:4700::1111\", \"2606:4700:4700::1001\",\n ]\n resolver.nameservers = nameservers\n resolver.timeout = timeout\n resolver.lifetime = timeout\n if record_type == \"TXT\":\n resource_records = list(map(\n lambda r: r.strings,\n resolver.query(domain, record_type, tcp=True, lifetime=timeout)))\n _resource_record = [\n resource_record[0][:0].join(resource_record)\n for resource_record in resource_records if resource_record]\n records = [r.decode() for r in _resource_record]\n else:\n records = list(map(\n lambda r: r.to_text().replace('\"', '').rstrip(\".\"),\n resolver.query(domain, record_type, tcp=True, lifetime=timeout)))\n if cache:\n cache[cache_key] = records\n\n return records\n\n\ndef get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):\n \"\"\"\n Resolves an IP address to a hostname using a reverse DNS query\n\n Args:\n ip_address (str): The IP address to resolve\n cache (ExpiringDict): Cache storage\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n timeout (float): Sets the DNS query timeout in seconds\n\n Returns:\n str: The reverse DNS hostname (if any)\n \"\"\"\n hostname = None\n try:\n address = dns.reversename.from_address(ip_address)\n hostname = query_dns(address, \"PTR\", cache=cache,\n nameservers=nameservers,\n timeout=timeout)[0]\n\n except dns.exception.DNSException:\n pass\n\n return hostname\n\n\ndef create_email(message_from, message_to=None, message_cc=None,\n subject=None, message_headers=None, attachments=None,\n plain_message=None, html_message=None):\n \"\"\"\n Creates an RFC 822 email message and returns it as a string\n\n Args:\n message_from (str): The value of the message from header\n message_to (list): A list of addresses to send mail to\n message_cc (list): A List of addresses to Carbon Copy (CC)\n subject (str): The message subject\n message_headers (dict): Custom message headers\n attachments (list): A list of tuples, containing filenames as bytes\n plain_message (str): The plain text message body\n html_message (str): The HTML message body\n\n Returns:\n str: A RFC 822 email message\n \"\"\"\n msg = MIMEMultipart()\n msg['From'] = message_from\n msg['To'] = \", \".join(message_to)\n if message_cc is not None:\n msg['Cc'] = \", \".join(message_cc)\n msg['Date'] = email.utils.formatdate(localtime=True)\n msg['Subject'] = subject\n if message_headers is not None:\n for header in message_headers:\n msg[header] = message_headers[header]\n if attachments is None:\n attachments = []\n\n msg.attach(MIMEText(plain_message, \"plain\"))\n if html_message is not None:\n msg.attach(MIMEText(plain_message, \"html\"))\n\n for attachment in attachments:\n filename = attachment[0]\n payload = attachment[1]\n part = MIMEApplication(payload, Name=filename)\n content_disposition = 'attachment; filename=\"{0}\"'.format(filename)\n part['Content-Disposition'] = content_disposition\n msg.attach(part)\n\n return msg.as_string()\n","sub_path":"mailsuite/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"604514923","text":"\"\"\"\nCopyright (c) 2012 Shotgun Software, Inc\n----------------------------------------------------\n\nMenu handling for Nuke\n\n\"\"\"\nimport os\nimport sys\nimport webbrowser\nimport unicodedata\n\nfrom pyfbsdk import FBMenuManager\nfrom pyfbsdk import FBGenericMenu\n\nclass MenuGenerator(object):\n \"\"\"\n Menu generation functionality for Nuke\n \"\"\"\n\n def __init__(self, engine):\n self._engine = engine\n self._dialogs = []\n self.__menu_index = 1\n self._callbacks = {}\n\n ##########################################################################################\n # public methods\n\n def create_menu(self):\n \"\"\"\n Render the entire Tank menu.\n \"\"\"\n # create main menu\n menu_mgr = FBMenuManager()\n self._menu_handle = menu_mgr.GetMenu(\"Tank\")\n if not self._menu_handle:\n menu_mgr.InsertBefore(None, \"Help\", \"Tank\")\n self._menu_handle = menu_mgr.GetMenu(\"Tank\")\n #self._menu_handle.clearMenu()\n self._menu_handle.OnMenuActivate.Add(self.__menu_event)\n # now add the context item on top of the main menu\n self._context_menu = self._add_context_menu()\n #self._menu_handle.addSeparator()\n\n # now add favourites\n for fav in self._engine.get_setting(\"menu_favourites\"):\n app_instance_name = fav[\"app_instance\"]\n menu_name = fav[\"name\"]\n\n # scan through all menu items\n for (cmd_name, cmd_details) in self._engine.commands.items():\n cmd = AppCommand(cmd_name, cmd_details)\n if cmd.get_app_instance_name() == app_instance_name and cmd.name == menu_name:\n # found our match!\n self.__menu_index += 1\n cmd.add_command_to_menu(self._menu_handle, self.__menu_index)\n # mark as a favourite item\n cmd.favourite = True \n \n\n #self._menu_handle.addSeparator()\n\n # now go through all of the menu items.\n # separate them out into various sections\n commands_by_app = {}\n\n context_menu_index = 103\n for (cmd_name, cmd_details) in self._engine.commands.items():\n cmd = AppCommand(cmd_name, cmd_details)\n\n if cmd.get_type() == \"context_menu\":\n # context menu!\n context_menu_index += 1\n cmd.add_command_to_menu(self._context_menu, context_menu_index)\n self._add_event_callback(cmd.name, cmd.callback)\n else:\n # normal menu\n app_name = cmd.get_app_name()\n if app_name is None:\n # un-parented app\n app_name = \"Other Items\"\n if not app_name in commands_by_app:\n commands_by_app[app_name] = []\n commands_by_app[app_name].append(cmd)\n\n # now add all apps to main menu\n self._add_app_menu(commands_by_app)\n\n def destroy_menu(self):\n item = self._menu_handle.GetFirstItem()\n while item:\n next_item = self._menu_handle.GetNextItem(item)\n self._menu_handle.DeleteItem(item)\n item = next_item\n self.__menu_index = 1\n self._callbacks = {}\n\n ##########################################################################################\n # context menu and UI\n\n def _add_context_menu(self):\n \"\"\"\n Adds a context menu which displays the current context\n \"\"\"\n\n ctx = self._engine.context\n ctx_name = str(ctx)\n\n # create the menu object\n ctx_menu = FBGenericMenu()\n\n ctx_menu.InsertLast(\"Jump to Shotgun\", self.__menu_index * 100 + 2)\n self._add_event_callback(\"Jump to Shotgun\", self._jump_to_sg)\n\n ctx_menu.InsertLast(\"Jump to File System\", self.__menu_index * 100 + 3)\n self._add_event_callback(\"Jump to File System\", self._jump_to_fs)\n\n ctx_menu.OnMenuActivate.Add(self.__menu_event)\n\n self._menu_handle.InsertFirst(ctx_name, self.__menu_index, ctx_menu)\n self.__menu_index += 1\n return ctx_menu\n\n def _add_event_callback(self, event_name, callback):\n \"\"\"\n Creates a mapping between the menu item name and the callback that should be\n run when it is clicked.\n \"\"\"\n self._callbacks[event_name] = callback\n\n def _jump_to_sg(self):\n \"\"\"\n Jump to shotgun, launch web browser\n \"\"\" \n url = self._engine.context.shotgun_url \n webbrowser.open(url)\n\n def _jump_to_fs(self):\n \"\"\"\n Jump from context to FS\n \"\"\"\n # launch one window for each location on disk\n paths = self._engine.context.filesystem_locations\n for disk_location in paths:\n\n # get the setting \n system = sys.platform\n \n # run the app\n if system == \"linux2\":\n cmd = 'xdg-open \"%s\"' % disk_location\n elif system == \"darwin\":\n cmd = 'open \"%s\"' % disk_location\n elif system == \"win32\":\n cmd = 'cmd.exe /C start \"Folder\" \"%s\"' % disk_location\n else:\n raise Exception(\"Platform '%s' is not supported.\" % system)\n\n exit_code = os.system(cmd)\n if exit_code != 0:\n self._engine.log_error(\"Failed to launch '%s'!\" % cmd)\n\n ##########################################################################################\n # app menus\n\n def _add_app_menu(self, commands_by_app):\n \"\"\"\n Add all apps to the main menu, process them one by one.\n \"\"\"\n\n for i, app_name in enumerate(sorted(commands_by_app.keys())):\n if len(commands_by_app[app_name]) > 1:\n # more than one menu entry fort his app\n # make a sub menu and put all items in the sub menu\n #app_menu = self._menu_handle.InsertLast(app_name, tank_index + i)\n app_menu = FBGenericMenu()\n self.__menu_index += 1\n menu_id = self.__menu_index * 100\n for j, cmd in enumerate(commands_by_app[app_name]):\n cmd.add_command_to_menu(app_menu, menu_id + j + 1)\n self._add_event_callback(cmd.name, cmd.callback)\n app_menu.OnMenuActivate.Add(self.__menu_event)\n app_name = self.__strip_unicode(app_name)\n self._menu_handle.InsertLast(app_name, menu_id, app_menu)\n else:\n # this app only has a single entry.\n # display that on the menu\n # todo: Should this be labelled with the name of the app\n # or the name of the menu item? Not sure.\n cmd_obj = commands_by_app[app_name][0]\n if not cmd_obj.favourite:\n self.__menu_index += 1\n cmd_obj.add_command_to_menu(self._menu_handle, self.__menu_index)\n self._add_event_callback(cmd_obj.name, cmd_obj.callback)\n\n ##########################################################################################\n # private methods\n\n def __menu_event(self, control, event):\n \"\"\"\n Handles menu events.\n \"\"\"\n callback = self._callbacks.get(event.Name)\n if callback:\n callback()\n \n def __strip_unicode(self, val):\n \"\"\"\n Get rid of unicode\n \"\"\"\n if val.__class__ == unicode:\n val = unicodedata.normalize('NFKD', val).encode('ascii', 'ignore')\n return val \n \n\nclass AppCommand(object):\n \"\"\"\n Wraps around a single command that you get from engine.commands\n \"\"\"\n\n def __init__(self, name, command_dict):\n\n self.properties = command_dict[\"properties\"]\n self.callback = command_dict[\"callback\"]\n self.favourite = False\n \n # deal with mobu's inability to handle unicode. #fail\n if name.__class__ == unicode:\n self.name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\n else:\n self.name = name\n\n def get_app_name(self):\n \"\"\"\n Returns the name of the app that this command belongs to\n \"\"\"\n if \"app\" in self.properties:\n return self.properties[\"app\"].display_name\n return None\n\n def get_app_instance_name(self):\n \"\"\"\n Returns the name of the app instance, as defined in the environment.\n Returns None if not found.\n \"\"\"\n if \"app\" not in self.properties:\n return None\n\n app_instance = self.properties[\"app\"]\n engine = app_instance.engine\n\n for (app_instance_name, app_instance_obj) in engine.apps.items():\n if app_instance_obj == app_instance:\n # found our app!\n return app_instance_name\n\n return None\n\n def get_documentation_url_str(self):\n \"\"\"\n Returns the documentation as a str\n \"\"\"\n if \"app\" in self.properties:\n app = self.properties[\"app\"]\n doc_url = app.documentation_url\n # deal with nuke's inability to handle unicode. #fail\n if doc_url.__class__ == unicode:\n doc_url = unicodedata.normalize('NFKD', doc_url).encode('ascii', 'ignore')\n return doc_url\n\n return None\n\n def get_type(self):\n \"\"\"\n returns the command type. Returns node, custom_pane or default\n \"\"\"\n return self.properties.get(\"type\", \"default\")\n\n def add_command_to_menu(self, menu, index):\n \"\"\"\n Adds an app command to the menu\n \"\"\"\n # std shotgun menu\n menu.InsertLast(self.name, index)\n","sub_path":"studio/install/engines/app_store/tk-motionbuilder/v0.2.6/python/tk_motionbuilder/menu_generation.py","file_name":"menu_generation.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"183808703","text":"from . import base\nimport datetime\nfrom sqlalchemy import Column, Integer, String, ForeignKey, JSON, Boolean, Text, TIMESTAMP\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.orm import relationship\nfrom geoalchemy2 import Geometry\nfrom .vote import Vote\nfrom .post import Post\n\n\nclass User(base.Base):\n __tablename__ = \"user\"\n\n id = Column(String(128), primary_key=True)\n email = Column(String(255), unique=True)\n username = Column(String(255), unique=True)\n new_power = Column(postgresql.DOUBLE_PRECISION, nullable=False)\n content_power = Column(postgresql.DOUBLE_PRECISION, nullable=False)\n vote_power = Column(postgresql.DOUBLE_PRECISION, nullable=False)\n network_power = Column(postgresql.DOUBLE_PRECISION, nullable=False)\n powerx = Column(postgresql.DOUBLE_PRECISION, nullable=True)\n new_coins = Column(Integer, nullable=False)\n life_level = Column(Integer, nullable=False)\n\n invited_by = Column(\n String(128), ForeignKey('user.id'))\n invite_code = Column(String(255))\n\n social = Column(JSON())\n bio = Column(Text)\n\n picture_path = Column(String(255))\n referenced_from = Column(String(255))\n confirmed = Column(Boolean, nullable=False)\n\n latitude = Column(postgresql.DOUBLE_PRECISION, nullable=True)\n longitude = Column(postgresql.DOUBLE_PRECISION, nullable=True)\n last_online_at = Column(TIMESTAMP)\n role = Column(String(255), server_default='user', default='user', nullable=False)\n geo = Column(Geometry(geometry_type='POINT'))\n\n # votes = relationship('Vote', backref='user', lazy='dynamic', cascade=\"all, delete-orphan\")\n posts = relationship('Post', cascade=\"all, delete-orphan\")\n invitor = relationship('User', remote_side=[id], uselist=False)\n invited_users = relationship('User', remote_side=[invited_by])\n\n def __init__(self, data):\n self.id = data.get('id')\n self.email = data.get('email')\n self.username = data.get('username')\n self.vote_power = data.get('vote_power', 0)\n self.new_power = data.get('new_power', 0)\n self.network_power = data.get('network_power', 0)\n self.content_power = data.get('content_power', 0)\n self.new_coins = data.get('new_coins', 0)\n self.life_level = data.get('life_level', 0)\n self.invited_by = data.get('invited_by')\n self.invite_code = data.get('invite_code')\n self.social = data.get('social')\n self.bio = data.get('bio')\n self.picture_path = data.get('picture_path')\n self.referenced_from = data.get('referenced_from')\n self.confirmed = data.get('confirmed', False)\n self.last_online_at = data.get('last_online_at', datetime.datetime.utcnow())\n self.created_at = data.get('created_at', datetime.datetime.utcnow())\n self.role = data.get('role', 'user')\n self.latitude = data.get('latitude')\n self.longitude = data.get('longitude')\n","sub_path":"backend/api/model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"18250739","text":"import socketserver\nimport os\nimport sys\nimport json\nimport traceback\nfrom pyrad.client import Client\nfrom pyrad import dictionary\nfrom pyrad import packet\n\n##\n## COA Proxy\n## nunez.emiliano@gmail.com\n## 9 Ago 2017\n##\n\nclass COAHandler(socketserver.BaseRequestHandler):\n\n def COA(self, coatype, nasip, request):\n\n client = Client(\n server=nasip,\n secret=bytes(self.server.secret, 'utf-8'),\n dict=dictionary.Dictionary(\"dictionary\")\n )\n request_p = client.CreateCoAPacket(\n code=coatype,\n **request\n )\n result = client.SendPacket(request_p)\n return result.code\n\n def COADisconnect(self, nasip, request):\n return self.COA(packet.DisconnectRequest, \n nasip, \n request)\n \n def COARequest(self, nasip, request):\n return self.COA(packet.CoARequest, \n nasip, \n request)\n\n def run (self, cmd):\n\n try:\n data = json.loads(cmd.decode(\"utf-8\"))\n coatype = data[\"coatype\"]\n nasip = data[\"nasip\"]\n request = data[\"request\"]\n num = getattr(self, coatype)(nasip, request)\n\n if num == packet.CoAACK or num == packet.DisconnectACK:\n ret = {\"state\":\"OK\"}\n else:\n ret = {\"state\":\"ERR\", \"description\":num}\n return ret\n\n except:\n\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"%s,%s,%s\" % (exc_type, fname, exc_tb.tb_lineno))\n print(traceback.format_exc())\n ret = {\"state\":\"ERR\", \"description\":str(exc_type)}\n return ret\n\n def handle (self):\n print (\"Connected from: {}\".format(self.client_address[0]))\n cmd = self.request.recv(1024).strip()\n if not cmd:\n return \n res = self.run (cmd)\n self.request.sendall(bytes(\"%s%s\" % (json.dumps(res), os.linesep), 'utf-8'))\n\nif __name__ == \"__main__\":\n\n HOST = \"0.0.0.0\"\n if len(sys.argv) == 3:\n port = int(sys.argv[1])\n socketserver.TCPServer.allow_reuse_address = True\n server = socketserver.TCPServer((HOST, port), COAHandler)\n server.secret = sys.argv[2]\n server.serve_forever()\n else:\n sys.exit( \"usage: %s \" % sys.argv[0] )\n","sub_path":"gc-freeradius-2/root/coa_proxy.py","file_name":"coa_proxy.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"234264064","text":"from pathlib import Path\n\nRUNID = 'u-al508'\nSTREAM = 'ap9'\n\nSCRIPT_PATH = '/home/users/mmuetz/projects/cosmic/cosmic/processing/convert_pp_to_nc.py'\nBASE_PATH = Path(f'/gws/nopw/j04/cosmic/mmuetz/data/{RUNID}/{STREAM}.pp')\n\npaths = sorted(BASE_PATH.glob('precip_??????/*.pp'))\nCONFIG_KEYS = [p.stem for p in paths]\n\nBSUB_KWARGS = {\n 'job_name': 'conv',\n 'queue': 'short-serial',\n 'max_runtime': '05:00',\n}\n\nIRIS_CUBE_ATTRS = {\n 'grid': 'N1280',\n 'institution': 'Met Office Hadley Centre, Fitzroy Road, Exeter, Devon, EX1 3PB, UK',\n 'institution_id': 'MOHC',\n 'source_type': 'AGCM',\n 'model': 'u-al508',\n 'experiment_details': 'convection parametrization scheme',\n}\n\nDIAGTYPE = 'precip'\nDELETE_PP = True\n\nSCRIPT_ARGS = {}\nfor k, path in zip(CONFIG_KEYS, paths):\n SCRIPT_ARGS[k] = path\n","sub_path":"ctrl/UM_N1280/u-al508_convert_ctrl.py","file_name":"u-al508_convert_ctrl.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"487284988","text":"import torch\nfrom torch.autograd import Variable\nimport numpy as np\n\nprint(torch.__version__)\n# 一元线性回归 回归问题\n\nx_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],\n [9.779], [6.182], [7.59], [2.167], [7.042],\n [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)\n\ny_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],\n [3.366], [2.596], [2.53], [1.221], [2.827],\n [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)\nx_train = torch.from_numpy(x_train)\ny_train = torch.from_numpy(y_train)\n\nw = Variable(torch.randn(1), requires_grad=True)\nb = Variable(torch.zeros(1), requires_grad=True)\nx_train= Variable(x_train)\ny_train= Variable(y_train)\n\ndef line_model(x):\n return w*x+b\n\ndef get_loss(y_,y):\n return torch.mean((y_-y)**2)\n\ny_ = line_model(x_train)\nloss = get_loss(y_, y_train)\nloss.backward()\n\nlr = 1e-4\n\nfor e in range(20):\n y_ = line_model(x_train)\n loss = get_loss(y_, y_train)\n w.grad.zero_() #记得梯度归零\n b.grad.zero_()\n loss.backward()\n w.data = w.data-lr*w.grad.data\n b.data = b.data-lr*b.grad.data\n print('epoch:{},loss:{}'.format(e,loss))\n\n\n# Logistic Sigmoid 分类问题\nimport torch.functional as F\ndef logistic_regression(x):\n return F.sigmoid(torch.mm(x,w)+b)\n\n# 优化器\nfrom torch import nn\n# loss=nn.MSELoss()\nloss = nn.BCEWithLogitsLoss()\nw = nn.Parameter(torch.randn(2,1))\nb = nn.Parameter(torch.zeros(1))\noptimizer = torch.optim.SGD([w,b],lr=0.01)\n\n# 三步走\noptimizer.zero_grad()\nloss.backward()\noptimizer.step()\n\n\n# Sequential Module\nseq_net=nn.Sequential(\n nn.Linear(2,4),\n nn.Tanh(),\n nn.Linear(4,1)\n)\nprint(seq_net[0]) #第一层\nw0= seq_net[0].weight\nparam =seq_net.parameters()\n\n# 同时保存模型和��数\ntorch.save(seq_net,\"save.pth\")\nseq_net1 = torch.load(\"save.pth\")\n# 只保存参数\ntorch.save(seq_net.state_dict(),\"save_params.pth\")\nseq_net2=nn.Sequential(\n nn.Linear(2,4),\n nn.Tanh(),\n nn.Linear(4,1)\n)\nseq_net2.load_state_dict(torch.load(\"save_params.pth\"))\n\n\n# Module\nclass net(nn.Module):\n def __init__(self,num_input,num_hidden,num_output):\n super(net, self).__init__()\n self.layer1=nn.Linear(num_input,num_hidden)\n self.layer2=nn.Tanh()\n self.layer3=nn.Linear(num_hidden,num_output)\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n return x\n\nnet = net(2,4,1)\nl1=net.layer1\nprint(l1,l1.weight)\n\n\nclass net2(nn.Module):\n def __init__(self,num_input,num_hidden,num_output):\n super(net2, self).__init__()\n self.network=nn.Sequential(\n nn.Linear(num_input,num_hidden),\n nn.Tanh(),\n nn.Linear(num_hidden,num_input)\n )\n def forward(self, x):\n x = self.network(x)\n return x\n\nnet = net2(2,4,1)\nprint(net.network)","sub_path":"base/base_2.py","file_name":"base_2.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"603508736","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# @Date : 2017-06-19 22:19:29\n# @Author : Shixiang Wang (w.shixiang@yahoo.com)\n# @Link : moiedotblog.wordpress.com\n# @Version : $Id$\n\nimport pygame, sys\nfrom pygame.locals import *\n\npygame.init() # 在导入了pygame之后并且在调用任何其他Pygame函数之前,都需调用该函数\nDISPLAYSURF = pygame.display.set_mode((400, 300)) # 函数返回窗口的surface对象,元组设定窗口的宽度和高度。\npygame.display.set_caption('Hello Wrold!') # 函数设定将要在窗口顶部显示的文本\nwhile True: # main game loop\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT: # event 被赋值为事件对象,都有属性type表征为何种事件。而针对每一种可能的事件,Pygame都有一个常量变量\n\t\t\tpygame.quit() # pygame.quit()是init()函数的一种相反的函数,它运行的代码会使得Pygame库停止工作。在终止程序之前,总是应该先做此步骤。\n\t\t\tsys.exit()\n\tpygame.display.update()\t# update()将set_mode()所返回的Surface对象绘制到屏幕上。z\n\n\n\n\n","sub_path":"first_pygame.py","file_name":"first_pygame.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"530031600","text":"from app import flask_app as app\nimport json\nfrom datetime import datetime\nfrom flask import request\nimport functools\nimport operator\n\n@app.route(\"/heartbeat\")\ndef heartbeat():\n return json.dumps(\n {\n \"status\": True,\n \"service\": \"Homework_Template\",\n \"datetime\": f\"{datetime.now()}\"\n }\n )\n\n@app.route(\"/sum\",methods=['POST'])\ndef sum():\n dic = request.json\n x = dic[\"x\"]\n y = dic[\"y\"]\n sumation = x + y\n return json.dumps(\n {\n \"result\":sumation\n }\n )\n\n@app.route(\"/minimum\",methods=['POST'])\ndef mini():\n min_data = request.json\n val = list(min_data.values())[0]\n m = min(val)\n return json.dumps(\n {\n \"minimum\":m\n }\n )\n\n@app.route(\"/product\",methods=['POST'])\ndef product():\n dic = request.json\n ls = dic[\"values\"]\n product = functools.reduce(operator.mul, ls, 1)\n return json.dumps(\n {\n \"Product\":product\n }\n )\n\n@app.before_first_request\ndef load_app():\n print(\"Loading App Before First Request\")\n","sub_path":"homework01_part02/app/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"365565463","text":"from gi import require_version\nrequire_version(\"Gtk\", \"3.0\")\n\nimport cairo\nimport io\nimport numpy\nimport os\nimport random\nimport requests\nimport shutil\n\nfrom background.reddit_background import Image\nfrom background.reddit_background import get_desktop_config\nfrom background.reddit_background import _download_to_directory, _safe_makedirs\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import as_completed\n\nfrom gi.repository import Gtk, Gio, GLib, GObject\nfrom gi.repository import GdkPixbuf\n\nfrom PIL import Image as pilImage\n\nfrom i3_pywal.main import wal\n\nfrom importlib_resources import path\n\nclass SubredditModel():\n \"\"\"\n \"\"\"\n def __init__(self):\n desktops = get_desktop_config()\n self.subreddits = []\n self.folder_path = '/tmp/reddit_gui'\n self.subreddit_title = None\n for desktop in desktops:\n self.subreddits.extend(desktop.subreddits)\n\n\n def get_images(self):\n self._clear_folder_contents()\n _safe_makedirs(self.folder_path)\n try:\n random_subreddit = random.choice(self.subreddits)\n self.subreddit_title = random_subreddit.name \n return random_subreddit.fetch_images()\n except IndexError as e:\n print(e)\n \n return None\n\n def load_image(self, image: Image):\n path = _download_to_directory(image.url, self.folder_path, image.filename)\n return path\n\n def _clear_folder_contents(self):\n if os.path.isdir(self.folder_path):\n shutil.rmtree(self.folder_path)\n \n\nclass SubredditController():\n \"\"\"\n \"\"\"\n def __init__(self):\n self.subreddit_model = SubredditModel()\n self.view = ImageWindow(self.subreddit_model)\n\n\n def run(self):\n self.view.show_all()\n self.view.connect('destroy', Gtk.main_quit)\n Gtk.main()\n\n\nclass RedditImageView(Gtk.EventBox):\n \"\"\"\n \"\"\"\n def __init__(self, model, image : Image, callback=None):\n Gtk.Box.__init__(self)\n self.model = model\n self.image = image \n self.callback = callback\n \n with path('background.resources.images', 'loading-wheel.gif') as p:\n loading_wheel = p.as_posix()\n\n self.image_view = Gtk.Image.new_from_file(loading_wheel)\n\n self.add(self.image_view)\n\n self.connect('button_press_event', self.on_pressed)\n self.load_image(self.image.url)\n\n def _set_image_data(self, gdaemonfile, result):\n try:\n _, data, _ = self.stream.load_contents_finish(result)\n\n pil: pilImage = pilImage.open(io.BytesIO(data))\n c_format = cairo.FORMAT_ARGB32\n \n if pil.mode == 'RGB':\n r, g, b = pil.split()\n pil = pilImage.merge(\"RGB\", (b, g, r))\n pil.putalpha(256)\n elif pil.mode == 'RGBA':\n r, g, b, a = pil.split()\n pil = pilImage.merge('RGBA', (b, g, r, a))\n \n size_val = 650\n size = (size_val, size_val)\n pil = pil.resize(size)\n \n arr = numpy.array(pil)\n cai_height, cai_width, _ = arr.shape\n\n surface = cairo.ImageSurface.create_for_data(arr, c_format, pil.height, pil.width)\n GLib.idle_add(self.image_view.set_from_surface, surface)\n except Exception as e:\n with path('background.resources.images', 'load-error.png') as p:\n GLib.idle_add(self.image_view.set_from_file, p.as_posix())\n\n print(e)\n\n def load_image(self, url):\n self.stream = Gio.file_new_for_uri(url)\n self.stream.load_contents_async(None, self._set_image_data)\n\n def on_pressed(self, widget, data):\n path = self.model.load_image(self.image)\n wal(image_path=path, manual=True)\n # TODO:Add some way to pause gui until wal is finished.\n\n\nclass ImageWindow(Gtk.Window):\n \"\"\"\n \"\"\"\n def __init__(self, model, title=r'Subreddit Images'):\n\n Gtk.Window.__init__(self, title=title)\n self.set_border_width(10)\n self.model = model\n\n # Create Grid View\n grid = Gtk.Grid()\n grid.set_property('orientation', Gtk.Orientation.VERTICAL) \n # Create Sroll Window \n scroll_view = Gtk.ScrolledWindow()\n scroll_view.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) \n scroll_view.set_hexpand(True)\n scroll_view.set_vexpand(True)\n\n self.flowbox = Gtk.FlowBox()\n self.flowbox.set_valign(Gtk.Align.BASELINE)\n self.flowbox.set_max_children_per_line(30)\n self.flowbox.set_column_spacing(0)\n self.flowbox.set_row_spacing(0)\n\n refresh_button = Gtk.Button.new_with_label('REFRESH')\n refresh_button.connect('clicked', self.on_click_refresh)\n\n scroll_view.add(self.flowbox)\n\n grid.add(scroll_view)\n grid.add(refresh_button)\n\n self.add(grid)\n\n def on_click_refresh(self, button):\n for child in self.flowbox.get_children():\n Gtk.Widget.destroy(child)\n\n for image in self.model.get_images(): \n reddit_imageview = RedditImageView(self.model, image)\n self.flowbox.add(reddit_imageview)\n\n self.set_title('{} - {}'.format('Subreddit Images', self.model.subreddit_title))\n self.flowbox.show_all()\n \n\n\n \n\ndef main():\n SubredditController().run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"background/gui/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"261822334","text":"import pygame as pg\npg.init()\nwidth,height = 1300, 200\nwindow = pg.display.set_mode((width, height))\n# colors...\nC_BWRGBYCM = [(0,0,0),(255, 255, 255),(255, 0, 0),(0, 255, 0),(0, 0, 255),(255, 255, 0),(0, 255, 255),(255, 0, 255)]\n# game_variables...\nexit_game = False\nclock = pg.time.Clock()\nfps = 800\nmas = 0.00000001\nx_b1,y_b1 = width*0.8,60\nmass_b1 = mas*1000000000000\nx_b2,y_b2 = width*0.4,110\nmass_b2 = mas\nv_b2 = 0\nv_b1 = 0.0001\ncolition = 0\nu1 = u2 = 0\nfront = pg.font.SysFont('None',30)\n# function...\ndef rect(pos_l_b,color):\n pg.draw.rect(window,color,(pos_l_b))\ndef text_screen(text, cor,color):\n texts = front.render(str(\"colisions = \"+ text), True, color)\n window.blit(texts, (cor[0], cor[1]))\ndef cal(v1,v2,m1,m2):\n u1 = v1\n u2 = v2\n a1 = ((m1-m2)/(m1+m2))*u1\n a2 = (2*m2/(m1+m2))*u2\n v1 = a1+a2\n a1 = (2*m1/(m1+m2))*u1\n a2 = ((m2-m1)/m1+m2)*u2\n v2 = a1+a2\n return v1,v2\ndef calculation(v1,v2,m1,m2):\n u1 = v1\n u2 = v2\n sumold = (m1*u1)+(m2*u2)\n v1 = -(sumold-(m2*v2))/m1\n v2 = (sumold-(m1*v1))/m2\n return v1,v2\n# game main loop...\nwhile not exit_game:\n window.fill(C_BWRGBYCM[0])\n for event in pg.event.get():\n if event.type==pg.QUIT:\n exit_game = True\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n quit()\n rect((int(x_b1),int(y_b1),100,100),C_BWRGBYCM[4])\n rect((int(x_b2),int(y_b2),50,50),C_BWRGBYCM[3])\n# calculations...\n for i in range(100000):\n x_b2 -= v_b2\n x_b1 -= v_b1\n if x_b2 <= width*0.2:\n v_b2 = -v_b2\n colition+=1\n if x_b2+50 >= x_b1:\n # v_b1 = -v_b1\n v_b1,v_b2 = cal(v_b1,v_b2,mass_b1,mass_b2)\n colition+=1\n text_screen(str(colition),(0,0),C_BWRGBYCM[1])\n rect((width*0.2,0,0,height),C_BWRGBYCM[2])\n rect((0,int(height*0.8),width,200),C_BWRGBYCM[7])\n pg.display.update()\n clock.tick(fps)\npg.quit()\nquit()\n","sub_path":"calculating_pi_with_block.py","file_name":"calculating_pi_with_block.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449189661","text":"from django.urls import path\nfrom . import views\n\napp_name = \"taskapp\"\nurlpatterns = [\n path('staff_requests/', views.StaffRequestsView.as_view(), name='staff_requests'),\n path('center_requests_status/', views.requestStatus, name='center_requests_status'),\n path('get_staff_requests/', views.get_staff_requests, name='get_staff_requests'),\n path('send/', views.request_send, name='request_send'),\n path('accept/', views.request_accept, name='request_accept'),\n path('reject/', views.request_reject, name='request_reject'),\n path('complete/', views.request_complete, name='request_complete'),\n path('cancel/', views.request_cancel, name='request_cancel'),\n path('assign/', views.request_assign, name='request_assign'),\n path('getList/', views.request_get_list, name='request_get_list'),\n path('getStaffID/', views.getStaffID, name='getStaffID'),\n]","sub_path":"hotel-db-system/TaskApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"156070798","text":"n = float(input())\n\npercentual = 0\n\nif(n <= 400):\n percentual = 15\nelif(n <= 800):\n percentual = 12\nelif(n <= 1200):\n percentual = 10\nelif(n <= 2000):\n percentual = 7\nelse:\n percentual = 4\n\nreajuste = percentual*0.01*n\nnsal = reajuste + n\n\nprint(\"Novo salario: %.2f\" % nsal)\nprint(\"Reajuste ganho: %.2f\" % reajuste)\nprint(\"Em percentual: %.0f %%\" % percentual) \n","sub_path":"URI_1048 - (3748278) - Accepted.py","file_name":"URI_1048 - (3748278) - Accepted.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"418340531","text":"#!/usr/bin/python3.4 -tt\n# -*- coding: utf-8 -*-\n\n\nimport cv2\nimport numpy as np\nimport time\nfrom threading import Thread\nimport functools\n\nimport marsem.protocol.car as car\nimport marsem.protocol.config as cfg\n\nblue_min = [255, 204, 204]\nblue_max = [255, 0, 0]\nred_min = [17, 15, 140]\nred_max = [50, 56, 200]\ngreen_min = [25, 94, 10]\ngreen_max = [55, 144, 45]\n\n#cv2.VideoCapture.set(cv2.CV_CAP_PROP_FPS, 200)\nvideo_capture = cv2.VideoCapture()\nvideo_capture.set(cv2.CAP_PROP_FPS, 200)\n\ncurrent_frame = None\nDEFAULT_TIMEOUT = 50\n\n# **************************************\n# OpenCV Color class\n# Sets the colors for opencv\n# **************************************\nclass Color():\n def __init__(self):\n \"\"\" Defaults to red color \"\"\"\n self.min = create_color_range(red_min)\n self.max = create_color_range(red_max)\n\n def set_min_max(self, xa, xb):\n self.set_min(xa)\n self.set_max(xb)\n \n def set_min(self, xs):\n self.min = create_color_range(xs)\n\n def set_max(self, xs):\n self.max = create_color_range(xs)\n\n def get_color(self):\n return 'Min: ' + str(self.min) + '\\nMax: ' + str(self.max)\n\n\n\ndef create_color_range(lst):\n return np.array(lst, dtype='uint8')\n\n\n# **************************************\n# OpenCV\n# OpenCV module\n# **************************************\n\ndef update_current_frame(f):\n global current_frame\n current_frame = f\n\ndef is_connected():\n return video_capture.isOpened()\n\n\n# Connects the video capture to its video source.\ndef connect(callback=None):\n \"\"\" Connects to the videostream on the raspberry pi \"\"\"\n if video_capture.isOpened():\n print(\"Already connected\")\n return True\n #if video_capture.open(0):\n if video_capture.open(cfg.stream_file):\n print(\"Success in connecting to remote file\")\n return True\n else:\n if callback:\n callback()\n print(\"Failed to open remote file, make sure the server is running and not busy\")\n return False\n\n\ndef run(color=Color() ,samples=[], callback=None, timeout=DEFAULT_TIMEOUT):\n t_end = time.time() + timeout\n time_t = time.time\n burst = 0\n kernel = np.ones((5,5), np.uint8)\n\n # Avoid re-evauluating the module calls inside the loop\n # Code optimization, do not remove!\n append = samples.append\n capt_read = video_capture.read\n inRange = cv2.inRange\n bitwise_and = cv2.bitwise_and\n cvtColor = cv2.cvtColor\n color_min = color.min\n color_max = color.max\n BGR2GRAY = cv2.COLOR_BGR2GRAY\n treshold = cv2.threshold\n THRESH_BINARY = cv2.THRESH_BINARY\n THRESH_OTSU = cv2.THRESH_OTSU\n morphologyEx = cv2.morphologyEx\n MORPH_CLOSE = cv2.MORPH_CLOSE\n findContours = cv2.findContours\n RETR_LIST = cv2.RETR_LIST\n CHAIN_APPROX_SIMPLE = cv2.CHAIN_APPROX_SIMPLE\n boundingRect = cv2.boundingRect\n rectangle = cv2.rectangle\n car_move_forward = car.move_forward\n car_move_right = car.move_right\n\n while (1):\n ret, frame = capt_read()\n \n if burst < 200:\n # Read 200 frames or so before starting\n burst += 1\n update_current_frame(frame)\n continue\n\n if time_t() > t_end:\n break\n \n mask = inRange(frame, color_min, color_max)\n mask_color = bitwise_and(frame, frame, mask=mask)\n gray = cvtColor(mask_color, BGR2GRAY)\n\n (thresh, im_bw) = treshold(gray, 128, 255, THRESH_BINARY + THRESH_OTSU)\n im_bw = treshold(gray, thresh, 255, THRESH_BINARY)[1]\n\n # Erode and dilate using MORPH_CLOSE in morphologyEx\n erosion = morphologyEx(im_bw, MORPH_CLOSE, kernel)\n (_, contours, heirarchy) = findContours(erosion.copy(), RETR_LIST, CHAIN_APPROX_SIMPLE)\n if contours:\n contour = contours[0]\n x, y, w, h = boundingRect(contour)\n append(x)\n\n center = x + int(w / 2)\n rectangle(frame, (center, 0), (center, 480), (0, 255, 0), 2)\n else:\n append(0)\n update_current_frame(frame)\n\n length = len(samples)\n if length >= 2:\n value = sum(samples) / length\n if value > 45:\n # Move a \"lot\" to the right\n car_move_right()\n else:\n car_move_forward()\n del samples[:]\n \n if callback:\n callback()\n \n time.sleep(2)\n stop()\n # Turn the stream OFF after OpenCV has run to completion.\n car.stream(False)\n\n\n# Returns a 'single' prepared frame from OpenCV\ndef get_video(callback=None):\n if video_capture.isOpened():\n return current_frame\n else:\n if callback:\n callback() # If things are not connected\n\n\n# Stops video capturing with OpenCV and stops the car stream (closes the camera).\ndef stop():\n video_capture.release()\n","sub_path":"marsem/opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"474440200","text":"from util import hook\nimport botmodes\n\n\n@hook.sieve\ndef ignore(bot, input, func, kind, args):\n channel = None\n if input.chan in input.users.channels:\n channel = input.users.channels[input.chan]\n user = None\n if input.nick in input.users.users:\n user = input.users.users[input.nick]\n c = botmodes.Checker(bot, user, channel)\n db = bot.get_db_connection(input.conn)\n if c.check(\"neverquiet.\" + kind, db):\n return input\n if c.check(\"quiet.\" + kind, db):\n return\n return input\n","sub_path":"plugins/ignore.py","file_name":"ignore.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"133074316","text":"import unittest\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass Blog_home_ATS(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n\n def test_home_screen(self):\n driver = self.driver\n driver.maximize_window()\n driver.get(\"http://127.0.0.1:8000\")\n time.sleep(1)\n elem = driver.find_element_by_xpath(\"/html/body/div[2]/div/div/div[1]/h2/a\").click()\n time.sleep(1)\n elem = driver.find_element_by_xpath(\"/html/body/div[1]/h1/a\").click()\n time.sleep(1)\n assert \"Home Screen Functionality Working\"\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"blog_home_ATS.py","file_name":"blog_home_ATS.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"451110979","text":"import socket\nimport time \nimport pickle\n\nhost = 'localhost'\nTCP_PORT = 6000\nBUFFER_SIZE = 1024\n\nwhile True:\n s1 = socket.socket()\n s1.connect((host, TCP_PORT))\n print(\"connected\")\n tm = s1.recv(1024)\n print(\"The time got from the edge server is %s\" % pickle.loads(tm))\n time.sleep(15)\n currentTime = time.ctime(time.time()) + \"\\r\\n\"\n s1.sendall(pickle.dumps(currentTime))\n s1.close()","sub_path":"Socket-Programming/Three Layer Multithreading system/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"573415193","text":"import csv\nimport nltk\nfrom nltk.tokenize import word_tokenize\nimport re, string; pattern = re.compile('[^a-zA-Z0-9_]+')\n\ndef write_fake():\n titles = set()\n try:\n for line in csv.DictReader(open(\"data/fake.csv\")):\n if line['thread_title']:\n otitle = line['thread_title'].lower()\n if \"trump\" not in otitle:\n continue\n title = otitle.replace(\"(video)\",\"\") \\\n .replace(\"[video]\",\"\") \\\n .replace(\"re:\",\"\") \\\n .replace(\"?\",\"\") \\\n .replace(\"100percentfedup.com\",\"\")\n\n title = pattern.sub(' ', title)\n twords = word_tokenize(title)\n twords = [w for w in twords if w != 's']\n ntitle = ' '.join(twords)\n\n # \"don t\" -> \"dont\"; \"wasn t\" -> \"wasnt\"; etc\n ntitle = ntitle.replace(\"n t \", \"nt \") \n titles.add(ntitle)\n except:\n pass\n\n outfile = open(\"data/clean_fake.txt\", \"w\")\n for ntitle in titles:\n outfile.write(ntitle + \"\\n\")\n\ndef write_real():\n titles = set()\n for line in csv.reader(open(\"data/abcnews-date-text.csv\")):\n date = line[0]\n if date[:5] >= \"20161\":\n title = line[1].lower()\n if \"trump\" not in title:\n continue\n title = pattern.sub(' ', title)\n twords = word_tokenize(title)\n twords = [w for w in twords if w != 's']\n ntitle = ' '.join(twords)\n titles.add(ntitle)\n\n outfile = open(\"data/clean_real.txt\", \"w\")\n for ntitle in titles:\n outfile.write(ntitle + \"\\n\")\n\nif __name__ == \"__main__\":\n write_fake()\n write_real()\n","sub_path":"hw1/original/clean_script.py","file_name":"clean_script.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27687110","text":"#vid_bsub.py\n#Author: Sean Devonport\n#Script that takes in video and applies basic frame differencing to frames.\nimport sys\nimport numpy as np\nimport cv2\nimport py_compile\n\ndef main(filename):\n\t# Name address of IP camera\n\tIP = \"http://10.0.0.4:8080/video\"\n\n\t# Open video capture\n\tvidcap = cv2.VideoCapture(IP)\n\tframes = [0]*3\n\tpF = [0]*2\n\tthr = 100;\n\n\t#capture first frame (background)\n\tsuccess, pB = vidcap.read()\n\tsize = np.shape(pB)\n\tcv2.imshow('Frame',pB)\n\tpB = cv2.cvtColor(pB,cv2.COLOR_BGR2GRAY)\n\n\twhile (True):\n\t\tsuccess,pCur = vidcap.read()\n\t\tpCur = cv2.cvtColor(pCur,cv2.COLOR_BGR2GRAY)\n\t\tif success:\t\t\n\t\t\t# Do processing\n\t\t\t# calculate foreground mask\n\t\t\tfor i in range(size[0]):\n\t\t\t\tfor j in range(size[1]):\n\t\t\t\t\tcond=abs(int(pCur[i][j])-int(pB[i][j]))\n\t\t\t\t\tif cond >thr:\n\t\t\t\t\t\tpCur[i][j]=255\n\t\t\t\t\telse:\n\t\t\t\t\t\tpCur[i][j]=0\n\n\t\t\tcv2.imshow('fdif',pCur)\n\n\t\t\t# Save video frames\n\t\t\tif cv2.waitKey(2) & 0xFF == 27:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tbreak\n\n\tvidcap.release()\n\tcv2.destroyAllWindows()\n\nmain(sys.argv[0])\n","sub_path":"basics/vid_bsub.py","file_name":"vid_bsub.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"627746072","text":"import re\nfrom datetime import datetime\nfrom urllib.parse import urlencode\n\nfrom functools import partial\nfrom typing import Any, Callable, List, Optional\n\nfrom PyQt6 import QtCore, QtWidgets, QtGui\nfrom PyQt6.QtCore import QThreadPool, QTimer, QUrl, Qt, pyqtSignal\nfrom PyQt6.QtGui import QIcon, QPixmap, QStandardItem, QStandardItemModel, QAction\nfrom PyQt6.QtWidgets import (QComboBox, QCompleter, QFrame, QHBoxLayout, QLineEdit, QMenu, QPushButton, QTabWidget, QToolBar, QVBoxLayout, QWidget)\n\nfrom sqlalchemy import and_\n# from tomlkit import items\n\nfrom simsapa import READING_BACKGROUND_COLOR, SEARCH_TIMER_SPEED, DbSchemaName, logger\nfrom simsapa.layouts.bookmark_dialog import HasBookmarkDialog\nfrom simsapa.layouts.find_panel import FindSearched, FindPanel\nfrom simsapa.layouts.reader_web import LinkHoverData, ReaderWebEnginePage\nfrom simsapa.layouts.search_query_worker import SearchQueryWorker\nfrom simsapa.layouts.sutta_queries import QuoteScope, SuttaQueries\nfrom simsapa.layouts.simsapa_webengine import SimsapaWebEngine\nfrom ..app.db.search import SearchResult, sutta_hit_to_search_result, RE_ALL_BOOK_SUTTA_REF\nfrom ..app.db import appdata_models as Am\nfrom ..app.db import userdata_models as Um\nfrom ..app.types import AppData, OpenPromptParams, QFixed, QMinimum, QExpanding, QueryType, SearchMode, SuttaQuote, SuttaSearchModeNameToType, USutta, UDictWord, SuttaSearchWindowInterface, sutta_quote_from_url\nfrom .sutta_tab import SuttaTabWidget\nfrom .memo_dialog import HasMemoDialog\nfrom .html_content import html_page\nfrom .help_info import setup_info_button\nfrom .sutta_select_dialog import SuttaSelectDialog\n\n\nclass SuttaSearchWindowState(QWidget, HasMemoDialog, HasBookmarkDialog):\n\n searchbar_layout: Optional[QHBoxLayout]\n sutta_tabs_layout: Optional[QVBoxLayout]\n tabs_layout: Optional[QVBoxLayout]\n\n queries: SuttaQueries\n search_extras: QHBoxLayout\n palibuttons_frame: QFrame\n search_input: QLineEdit\n toggle_pali_btn: QPushButton\n _app_data: AppData\n _autocomplete_model: QStandardItemModel\n sutta_tabs: QTabWidget\n sutta_tab: SuttaTabWidget\n _related_tabs: List[SuttaTabWidget]\n _search_timer = QTimer()\n _last_query_time = datetime.now()\n search_query_workers: List[SearchQueryWorker] = []\n search_mode_dropdown: QComboBox\n show_url_action_fn: Callable\n\n open_sutta_new_signal = pyqtSignal(str)\n # queue_id, side, uid\n open_in_study_window_signal = pyqtSignal([str, str, str])\n link_mouseover = pyqtSignal(dict)\n link_mouseleave = pyqtSignal(str)\n page_dblclick = pyqtSignal()\n hide_preview = pyqtSignal()\n bookmark_edit = pyqtSignal(str)\n open_gpt_prompt = pyqtSignal(dict)\n\n def __init__(self,\n app_data: AppData,\n parent_window: SuttaSearchWindowInterface,\n searchbar_layout: Optional[QHBoxLayout],\n sutta_tabs_layout: Optional[QVBoxLayout],\n tabs_layout: Optional[QVBoxLayout],\n focus_input: bool = True,\n enable_language_filter: bool = True,\n enable_search_extras: bool = True,\n enable_sidebar: bool = True,\n enable_find_panel: bool = True,\n show_query_results_in_active_tab: bool = False,\n custom_create_context_menu_fn: Optional[Callable] = None) -> None:\n super().__init__()\n\n self.pw = parent_window\n\n self.enable_language_filter = enable_language_filter\n self.enable_search_extras = enable_search_extras\n self.enable_sidebar = enable_sidebar\n self.enable_find_panel = enable_find_panel\n\n self.searchbar_layout = searchbar_layout\n self.sutta_tabs_layout = sutta_tabs_layout\n self.tabs_layout = tabs_layout\n\n self.query_in_tab = show_query_results_in_active_tab\n self.showing_query_in_tab = False\n\n self.custom_create_context_menu_fn = custom_create_context_menu_fn\n\n self.features: List[str] = []\n self._app_data: AppData = app_data\n\n self.show_url_action_fn = self._show_sutta_by_url\n\n self.queries = SuttaQueries(self._app_data)\n\n self.page_len = 20\n\n self.thread_pool = QThreadPool()\n\n self._recent: List[USutta] = []\n\n self._related_tabs: List[SuttaTabWidget] = []\n\n self._autocomplete_model = QStandardItemModel()\n\n self.focus_input = focus_input\n\n self._setup_ui()\n self._connect_signals()\n\n self.init_bookmark_dialog()\n self.init_memo_dialog()\n\n def _init_search_query_workers(self, query: str = \"\"):\n if self.enable_search_extras:\n idx = self.sutta_language_filter_dropdown.currentIndex()\n language = self.sutta_language_filter_dropdown.itemText(idx)\n if language == \"Language\":\n only_lang = None\n else:\n only_lang = language\n\n if hasattr(self, 'sutta_source_filter_dropdown'):\n idx = self.sutta_source_filter_dropdown.currentIndex()\n source = self.sutta_source_filter_dropdown.itemText(idx)\n if source == \"Source\":\n only_source = None\n else:\n only_source = source\n else:\n only_source = None\n\n else:\n only_lang = None\n only_source = None\n\n disabled_labels = self._app_data.app_settings.get('disabled_sutta_labels', None)\n self._last_query_time = datetime.now()\n\n idx = self.search_mode_dropdown.currentIndex()\n s = self.search_mode_dropdown.itemText(idx)\n mode = SuttaSearchModeNameToType[s]\n\n for i in self.search_query_workers:\n i.will_emit_finished = False\n\n self.search_query_workers = []\n\n # Sutta query worker\n\n w = SearchQueryWorker(self._app_data.search_indexed.suttas_index,\n self.page_len,\n mode,\n sutta_hit_to_search_result)\n\n w.set_query(query,\n self._last_query_time,\n disabled_labels,\n only_lang,\n only_source)\n\n w.signals.finished.connect(partial(self._search_query_finished))\n\n self.search_query_workers.append(w)\n\n # Language query workers\n\n index_names = self._app_data.search_indexed.suttas_lang_index.keys()\n for i in index_names:\n\n w = SearchQueryWorker(self._app_data.search_indexed.suttas_lang_index[i],\n self.page_len,\n mode,\n sutta_hit_to_search_result)\n\n w.set_query(query,\n self._last_query_time,\n disabled_labels,\n only_lang,\n only_source)\n\n w.signals.finished.connect(partial(self._search_query_finished))\n\n self.search_query_workers.append(w)\n\n def _get_active_tab(self) -> SuttaTabWidget:\n current_idx = self.sutta_tabs.currentIndex()\n if current_idx == 0:\n tab = self.sutta_tab\n else:\n tab = self._related_tabs[current_idx-1]\n\n return tab\n\n def _get_selection(self) -> Optional[str]:\n tab = self._get_active_tab()\n text = tab.qwe.selectedText()\n # U+2029 Paragraph Separator to blank line\n text = text.replace('\\u2029', \"\\n\\n\")\n text = text.strip()\n if len(text) > 0:\n return text\n else:\n return None\n\n def _setup_ui(self):\n self._setup_search_bar();\n\n self._setup_sutta_tabs()\n\n if self.enable_language_filter:\n self._setup_language_filter()\n\n if self.enable_search_extras:\n self._setup_source_filter()\n # self._setup_sutta_select_button() # TODO: list form is too long, not usable like this\n # self._setup_toggle_pali_button() # TODO: reimplement as hover window\n setup_info_button(self.search_extras, self)\n\n # self._setup_pali_buttons() # TODO: reimplement as hover window\n\n if self.enable_find_panel:\n self._find_panel = FindPanel()\n\n self.find_toolbar = QToolBar()\n self.find_toolbar.addWidget(self._find_panel)\n\n self.pw.addToolBar(QtCore.Qt.ToolBarArea.BottomToolBarArea, self.find_toolbar)\n self.find_toolbar.hide()\n\n def _setup_search_bar(self):\n if self.searchbar_layout is None:\n return\n\n self.back_recent_button = QtWidgets.QPushButton()\n sizePolicy = QtWidgets.QSizePolicy(QFixed, QFixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.back_recent_button.sizePolicy().hasHeightForWidth())\n\n self.back_recent_button.setSizePolicy(sizePolicy)\n self.back_recent_button.setMinimumSize(QtCore.QSize(40, 40))\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/arrow-left\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n\n self.back_recent_button.setIcon(icon)\n self.back_recent_button.setObjectName(\"back_recent_button\")\n\n self.searchbar_layout.addWidget(self.back_recent_button)\n\n self.forward_recent_button = QtWidgets.QPushButton()\n\n sizePolicy = QtWidgets.QSizePolicy(QFixed, QFixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.forward_recent_button.sizePolicy().hasHeightForWidth())\n\n self.forward_recent_button.setSizePolicy(sizePolicy)\n self.forward_recent_button.setMinimumSize(QtCore.QSize(40, 40))\n\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/arrow-right\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n self.forward_recent_button.setIcon(icon1)\n\n self.searchbar_layout.addWidget(self.forward_recent_button)\n\n self.search_input = QtWidgets.QLineEdit()\n\n sizePolicy = QtWidgets.QSizePolicy(QFixed, QFixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.search_input.sizePolicy().hasHeightForWidth())\n self.search_input.setSizePolicy(sizePolicy)\n\n self.search_input.setMinimumSize(QtCore.QSize(250, 35))\n self.search_input.setClearButtonEnabled(True)\n\n self.searchbar_layout.addWidget(self.search_input)\n\n if self.focus_input:\n self.search_input.setFocus()\n\n self.search_button = QtWidgets.QPushButton()\n\n sizePolicy = QtWidgets.QSizePolicy(QFixed, QFixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.search_button.sizePolicy().hasHeightForWidth())\n\n self.search_button.setSizePolicy(sizePolicy)\n self.search_button.setMinimumSize(QtCore.QSize(40, 40))\n\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/search\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n\n self.search_button.setIcon(icon2)\n self.searchbar_layout.addWidget(self.search_button)\n\n self.search_mode_dropdown = QComboBox()\n items = SuttaSearchModeNameToType.keys()\n self.search_mode_dropdown.addItems(items)\n self.search_mode_dropdown.setFixedHeight(40)\n\n mode = self._app_data.app_settings.get('sutta_search_mode', SearchMode.FulltextMatch)\n values = list(map(lambda x: x[1], SuttaSearchModeNameToType.items()))\n idx = values.index(mode)\n self.search_mode_dropdown.setCurrentIndex(idx)\n\n self.searchbar_layout.addWidget(self.search_mode_dropdown)\n\n self.search_extras = QtWidgets.QHBoxLayout()\n self.searchbar_layout.addLayout(self.search_extras)\n\n spacerItem = QtWidgets.QSpacerItem(40, 20, QExpanding, QMinimum)\n\n self.searchbar_layout.addItem(spacerItem)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/angles-right\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n\n self.show_sidebar_btn = QPushButton()\n self.show_sidebar_btn.setIcon(icon)\n self.show_sidebar_btn.setMinimumSize(QtCore.QSize(40, 40))\n self.show_sidebar_btn.setToolTip(\"Toggle Sidebar\")\n\n if self.enable_sidebar:\n self.searchbar_layout.addWidget(self.show_sidebar_btn)\n\n style = \"\"\"\nQWidget { border: 1px solid #272727; }\nQWidget:focus { border: 1px solid #1092C3; }\n \"\"\"\n\n self.search_input.setStyleSheet(style)\n\n completer = QCompleter(self._autocomplete_model, self)\n completer.setMaxVisibleItems(20)\n completer.setCaseSensitivity(Qt.CaseSensitivity.CaseInsensitive)\n completer.setModelSorting(QCompleter.ModelSorting.CaseInsensitivelySortedModel)\n\n self.search_input.setCompleter(completer)\n\n self.search_input.setFocus()\n\n def _setup_sutta_tabs(self):\n if self.sutta_tabs_layout is None:\n return\n\n self.sutta_tabs = QTabWidget()\n self.sutta_tabs.setStyleSheet(\"*[style_class='sutta_tab'] { background-color: %s; }\" % READING_BACKGROUND_COLOR)\n\n self.sutta_tab = SuttaTabWidget(self._app_data, \"Sutta\", 0, self._new_webengine())\n self.sutta_tab.setProperty('style_class', 'sutta_tab')\n self.sutta_tab.layout().setContentsMargins(0, 0, 0, 0)\n\n self.sutta_tabs.addTab(self.sutta_tab, \"Sutta\")\n\n html = html_page('', self._app_data.api_url)\n self.sutta_tab.set_qwe_html(html)\n\n self.sutta_tabs_layout.addWidget(self.sutta_tabs)\n\n def _link_mouseover(self, hover_data: LinkHoverData):\n self.link_mouseover.emit(hover_data)\n\n def _link_mouseleave(self, href: str):\n self.link_mouseleave.emit(href)\n\n def _page_dblclick(self):\n if self._app_data.app_settings['double_click_dict_lookup']:\n self.page_dblclick.emit()\n\n def _emit_hide_preview(self):\n self.hide_preview.emit()\n\n def _new_webengine(self) -> SimsapaWebEngine:\n page = ReaderWebEnginePage(self)\n page.helper.mouseover.connect(partial(self._link_mouseover))\n page.helper.mouseleave.connect(partial(self._link_mouseleave))\n page.helper.dblclick.connect(partial(self._page_dblclick))\n page.helper.hide_preview.connect(partial(self._emit_hide_preview))\n\n page.helper.bookmark_edit.connect(partial(self.handle_edit_bookmark))\n\n if self.custom_create_context_menu_fn:\n qwe = SimsapaWebEngine(page, self.custom_create_context_menu_fn)\n else:\n qwe = SimsapaWebEngine(page, self._create_qwe_context_menu)\n\n return qwe\n\n def _add_new_tab(self, title: str, sutta: Optional[USutta]):\n # don't substract one because the _related_tabs start after sutta_tab,\n # and tab indexing start from 0\n tab_index = len(self._related_tabs)\n tab = SuttaTabWidget(self._app_data,\n title,\n tab_index,\n self._new_webengine(),\n sutta)\n\n tab.render_sutta_content()\n\n self._related_tabs.append(tab)\n\n self.sutta_tabs.addTab(tab, title)\n\n def _toggle_pali_buttons(self):\n show = self.toggle_pali_btn.isChecked()\n self.pw.palibuttons_frame.setVisible(show)\n\n self._app_data.app_settings['suttas_show_pali_buttons'] = show\n self._app_data._save_app_settings()\n\n def _setup_toggle_pali_button(self):\n icon = QIcon()\n icon.addPixmap(QPixmap(\":/keyboard\"))\n\n btn = QPushButton()\n btn.setFixedSize(40, 40)\n btn.setToolTip(\"Toggle Pali Buttons\")\n btn.clicked.connect(partial(self._toggle_pali_buttons))\n btn.setIcon(icon)\n\n show = self._app_data.app_settings.get('suttas_show_pali_buttons', False)\n btn.setCheckable(True)\n btn.setChecked(show)\n\n self.toggle_pali_btn = btn\n self.search_extras.addWidget(self.toggle_pali_btn)\n\n def _setup_pali_buttons(self):\n palibuttons_layout = QHBoxLayout()\n self.pw.palibuttons_frame.setLayout(palibuttons_layout)\n\n lowercase = 'ā ī ū ṃ ṁ ṅ ñ ṭ ḍ ṇ ḷ ṛ ṣ ś'.split(' ')\n\n for i in lowercase:\n btn = QPushButton(i)\n btn.setFixedSize(35, 35)\n btn.clicked.connect(partial(self._append_to_query, i))\n palibuttons_layout.addWidget(btn)\n\n show = self._app_data.app_settings.get('suttas_show_pali_buttons', False)\n self.pw.palibuttons_frame.setVisible(show)\n\n def _get_language_labels(self):\n res = []\n\n r = self._app_data.db_session.query(Am.Sutta.language.distinct()).all()\n res.extend(r)\n\n r = self._app_data.db_session.query(Um.Sutta.language.distinct()).all()\n res.extend(r)\n\n labels = sorted(set(map(lambda x: str(x[0]).lower(), res)))\n\n return labels\n\n def _get_source_uid_labels(self):\n res = []\n\n r = self._app_data.db_session.query(Am.Sutta.source_uid.distinct()).all()\n res.extend(r)\n\n r = self._app_data.db_session.query(Um.Sutta.source_uid.distinct()).all()\n res.extend(r)\n\n labels = sorted(set(map(lambda x: str(x[0]).lower(), res)))\n\n return labels\n\n def _setup_language_filter(self):\n cmb = QComboBox()\n items = [\"Language\",]\n items.extend(self._get_language_labels())\n idx = self._app_data.app_settings.get('sutta_language_filter_idx', 0)\n\n cmb.addItems(items)\n cmb.setFixedHeight(40)\n cmb.setCurrentIndex(idx)\n self.sutta_language_filter_dropdown = cmb\n self.search_extras.addWidget(self.sutta_language_filter_dropdown)\n\n def _setup_source_filter(self):\n cmb = QComboBox()\n items = [\"Source\",]\n items.extend(self._get_source_uid_labels())\n idx = self._app_data.app_settings.get('sutta_source_filter_idx', 0)\n\n cmb.addItems(items)\n cmb.setFixedHeight(40)\n cmb.setCurrentIndex(idx)\n self.sutta_source_filter_dropdown = cmb\n self.search_extras.addWidget(self.sutta_source_filter_dropdown)\n\n def _setup_sutta_select_button(self):\n # TODO create a better layout, this is too long to use like this.\n icon = QIcon()\n icon.addPixmap(QPixmap(\":/book\"))\n\n btn = QPushButton()\n btn.setFixedSize(40, 40)\n btn.setToolTip(\"Select Sutta Sources\")\n btn.clicked.connect(partial(self._show_sutta_select_dialog))\n btn.setIcon(icon)\n\n self.sutta_select_btn = btn\n self.search_extras.addWidget(self.sutta_select_btn)\n\n def _show_sutta_select_dialog(self):\n d = SuttaSelectDialog(self._app_data, self)\n\n if d.exec():\n self._handle_query()\n\n def _set_query(self, s: str):\n self.search_input.setText(s)\n\n def _append_to_query(self, s: str):\n a = self.search_input.text().strip()\n n = self.search_input.cursorPosition()\n pre = a[:n]\n post = a[n:]\n self.search_input.setText(pre + s + post)\n self.search_input.setCursorPosition(n + len(s))\n self.search_input.setFocus()\n\n def query_hits(self) -> int:\n if len(self.search_query_workers) == 0:\n return 0\n else:\n return sum([i.query_hits() for i in self.search_query_workers])\n\n def results_page(self, page_num: int) -> List[SearchResult]:\n logger.info(f\"results_page(): page_num = {page_num}\")\n n = len(self.running_queries())\n if n != 0:\n logger.info(f\"Running queries: {n}, return empty results\")\n return []\n else:\n a: List[SearchResult] = []\n for i in self.search_query_workers:\n a.extend(i.results_page(page_num))\n\n # The higher the score, the better. Reverse to get descending order.\n res = sorted(a, key=lambda x: x['score'] or 0, reverse = True)\n return res\n\n def running_queries(self) -> List[SearchQueryWorker]:\n return [i for i in self.search_query_workers if i.query_finished is None]\n\n def _search_query_finished(self):\n n = len(self.running_queries())\n logger.info(f\"_search_query_finished(), still running: {n}\")\n\n if n > 0:\n return\n\n self.pw.stop_loading_animation()\n\n if len(self.search_query_workers) == 0:\n return\n\n if self._last_query_time != self.search_query_workers[0].query_started:\n return\n\n # Restore the search icon, processing finished\n icon_search = QtGui.QIcon()\n icon_search.addPixmap(QtGui.QPixmap(\":/search\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n\n self.search_button.setIcon(icon_search)\n\n if self.enable_sidebar:\n self.pw._update_sidebar_fulltext(self.query_hits())\n\n results = self.results_page(0)\n\n if self.query_hits() == 1 and results[0]['uid'] is not None:\n self._show_sutta_by_uid(results[0]['uid'])\n\n elif self.query_in_tab:\n self._render_results_in_active_tab(self.query_hits())\n\n def _start_query_worker(self, query: str):\n logger.info(\"_start_query_worker()\")\n self.pw.start_loading_animation()\n\n self._init_search_query_workers(query)\n for i in self.search_query_workers:\n self.thread_pool.start(i)\n\n def _handle_query(self, min_length: int = 4):\n query = self.search_input.text().strip()\n logger.info(f\"_handle_query(): {query}, {min_length}\")\n\n idx = self.sutta_language_filter_dropdown.currentIndex()\n self._app_data.app_settings['sutta_language_filter_idx'] = idx\n\n if hasattr(self, 'sutta_source_filter_dropdown'):\n idx = self.sutta_source_filter_dropdown.currentIndex()\n self._app_data.app_settings['sutta_source_filter_idx'] = idx\n\n self._app_data._save_app_settings()\n\n # Re-render the current sutta, in case user is trying to restore sutta\n # after a search in the Study Window with the clear input button.\n if len(query) == 0 and self.showing_query_in_tab and self._get_active_tab().sutta is not None:\n self._get_active_tab().render_sutta_content()\n return\n\n if re.search(RE_ALL_BOOK_SUTTA_REF, query) is None and len(query) < min_length:\n return\n\n # Not aborting, show the user that the app started processsing\n icon_processing = QtGui.QIcon()\n icon_processing.addPixmap(QtGui.QPixmap(\":/stopwatch\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n self.search_button.setIcon(icon_processing)\n\n self._start_query_worker(query)\n\n def _render_results_in_active_tab(self, hits: int):\n if hits == 0:\n return\n\n self.showing_query_in_tab = True\n if len(self.running_queries()) == 0:\n a = []\n for i in self.search_query_workers:\n a.extend(i.all_results())\n res = sorted(a, key=lambda x: x['score'] or 0, reverse = True)\n self._get_active_tab().render_search_results(res)\n\n def _handle_autocomplete_query(self, min_length: int = 4):\n if not self.pw.action_Search_Completion.isChecked():\n return\n\n query = self.search_input.text().strip()\n\n if len(query) < min_length:\n return\n\n self._autocomplete_model.clear()\n\n a = set(filter(lambda x: x.lower().startswith(query.lower()), self._app_data.completion_cache['sutta_titles']))\n\n for i in a:\n self._autocomplete_model.appendRow(QStandardItem(i))\n\n # NOTE: completion cache is already sorted.\n # self._autocomplete_model.sort(0)\n\n def _sutta_search_query(self, query: str, only_lang: Optional[str] = None, only_source: Optional[str] = None) -> List[SearchResult]:\n # TODO This is a synchronous version of _start_query_worker(), still\n # used in links_browser.py. Update and use the background thread worker.\n\n self._init_search_query_workers(query)\n\n disabled_labels = self._app_data.app_settings.get('disabled_sutta_labels', None)\n\n # first page results\n a = []\n for i in self.search_query_workers:\n i.search_query.new_query(query, disabled_labels, only_lang, only_source)\n a.extend(i.results_page(0))\n\n res = sorted(a, key=lambda x: x['score'] or 0, reverse = True)\n\n return res\n\n def _set_qwe_html(self, html: str):\n self.sutta_tab.set_qwe_html(html)\n\n def _add_recent(self, sutta: USutta):\n # de-duplicate: if item already exists, remove it\n if sutta in self._recent:\n self._recent.remove(sutta)\n # insert new item on top\n self._recent.insert(0, sutta)\n\n # Rebuild Qt recents list\n if self.enable_sidebar:\n def _to_title(x: USutta):\n return \" - \".join([str(x.uid), str(x.title)])\n\n titles = list(map(lambda x: _to_title(x), self._recent))\n\n self.pw._set_recent_list(titles)\n\n def _sutta_from_result(self, x: SearchResult) -> Optional[USutta]:\n if x['schema_name'] == DbSchemaName.AppData.value:\n sutta = self._app_data.db_session \\\n .query(Am.Sutta) \\\n .filter(Am.Sutta.uid == x['uid']) \\\n .first()\n else:\n sutta = self._app_data.db_session \\\n .query(Um.Sutta) \\\n .filter(Um.Sutta.uid == x['uid']) \\\n .first()\n return sutta\n\n @QtCore.pyqtSlot(dict)\n def on_searched(self, find_searched: FindSearched):\n tab = self._get_active_tab()\n if find_searched['flag'] is None:\n tab.qwe.findText(find_searched['text'])\n else:\n tab.qwe.findText(find_searched['text'], find_searched['flag'])\n\n def _select_prev_tab(self):\n selected_idx = self.sutta_tabs.currentIndex()\n if selected_idx == -1:\n self.sutta_tabs.setCurrentIndex(0)\n elif selected_idx == 0:\n return\n else:\n self.sutta_tabs.setCurrentIndex(selected_idx - 1)\n\n def _select_next_tab(self):\n selected_idx = self.sutta_tabs.currentIndex()\n if selected_idx == -1:\n self.sutta_tabs.setCurrentIndex(0)\n elif selected_idx + 1 < len(self.sutta_tabs):\n self.sutta_tabs.setCurrentIndex(selected_idx + 1)\n\n def _show_url(self, url: QUrl):\n if url.host() == QueryType.suttas:\n self.show_url_action_fn(url)\n\n # elif url.host() == QueryType.words:\n # self._show_words_by_url(url)\n\n def _show_sutta_from_message(self, info: Any):\n sutta: Optional[USutta] = None\n\n if not 'table' in info.keys() or not 'id' in info.keys():\n return\n\n if info['table'] == 'appdata.suttas':\n sutta = self._app_data.db_session \\\n .query(Am.Sutta) \\\n .filter(Am.Sutta.id == info['id']) \\\n .first()\n\n elif info['table'] == 'userdata.suttas':\n sutta = self._app_data.db_session \\\n .query(Um.Sutta) \\\n .filter(Um.Sutta.id == info['id']) \\\n .first()\n\n if sutta:\n self._show_sutta(sutta)\n\n def _show_sutta_by_url(self, url: QUrl):\n if url.host() != QueryType.suttas:\n return False\n\n uid = re.sub(r\"^/\", \"\", url.path())\n\n self._show_sutta_by_uid(uid, sutta_quote_from_url(url))\n\n def _show_sutta_by_quote(self, sutta_quote: SuttaQuote):\n if len(sutta_quote['quote']) == 0:\n return\n\n self._set_query(sutta_quote['quote'])\n self._start_query_worker(sutta_quote['quote'])\n\n results = self.queries.get_suttas_by_quote(sutta_quote['quote'])\n\n if len(results) > 0:\n self._show_sutta(results[0], sutta_quote)\n self._add_recent(results[0])\n\n def _show_sutta_by_partial_uid(self,\n part_uid: str,\n sutta_quote: Optional[SuttaQuote] = None,\n quote_scope = QuoteScope.Sutta):\n\n res_sutta = self.queries.get_sutta_by_partial_uid(part_uid, sutta_quote, quote_scope)\n if not res_sutta:\n return\n\n if sutta_quote:\n self._set_query(sutta_quote['quote'])\n self._start_query_worker(sutta_quote['quote'])\n\n self._show_sutta(res_sutta, sutta_quote)\n self._add_recent(res_sutta)\n\n def _show_sutta_by_uid(self,\n uid: str,\n sutta_quote: Optional[SuttaQuote] = None,\n quote_scope = QuoteScope.Sutta):\n\n if len(uid) == 0 and sutta_quote is None:\n return\n\n if len(uid) == 0 and sutta_quote is not None:\n self._show_sutta_by_quote(sutta_quote)\n return\n\n if len(uid) > 0 and not self.queries.is_complete_uid(uid):\n self._show_sutta_by_partial_uid(uid, sutta_quote, quote_scope)\n return\n\n if sutta_quote:\n self._set_query(sutta_quote['quote'])\n self._start_query_worker(sutta_quote['quote'])\n\n sutta = self.queries.get_sutta_by_uid(uid, sutta_quote, quote_scope)\n\n if sutta:\n self._show_sutta(sutta, sutta_quote)\n self._add_recent(sutta)\n else:\n logger.info(f\"Sutta not found: {uid}\")\n\n def _show_word_by_uid(self, uid: str):\n results: List[UDictWord] = []\n\n res = self._app_data.db_session \\\n .query(Am.DictWord) \\\n .filter(Am.DictWord.uid == uid) \\\n .all()\n results.extend(res)\n\n res = self._app_data.db_session \\\n .query(Um.DictWord) \\\n .filter(Um.DictWord.uid == uid) \\\n .all()\n results.extend(res)\n\n if len(results) > 0:\n self._app_data.dict_word_to_open = results[0]\n self.pw.action_Dictionary_Search.activate(QAction.ActionEvent.Trigger)\n\n def _show_sutta(self, sutta: USutta, sutta_quote: Optional[SuttaQuote] = None):\n logger.info(f\"_show_sutta() : {sutta.uid}\")\n self.showing_query_in_tab = False\n self.sutta_tab.sutta = sutta\n self.sutta_tab.render_sutta_content(sutta_quote)\n\n self.sutta_tabs.setTabText(0, str(sutta.uid))\n\n self._add_related_tabs(sutta)\n\n if self.enable_sidebar:\n self.pw.update_memos_list_for_sutta(sutta)\n self.pw.show_network_graph(sutta)\n\n def _show_next_recent(self):\n active_sutta = self._get_active_tab().sutta\n if active_sutta is None:\n return\n\n res = [x for x in range(len(self._recent)) if self._recent[x].uid == active_sutta.uid]\n\n if len(res) == 0:\n return\n else:\n current_idx = res[0]\n\n if current_idx + 1 >= len(self._recent):\n # This is already the last, no next item.\n if self.showing_query_in_tab:\n # Re-render it, in case user is trying to restore sutta after a search in the Study Window.\n self._show_sutta(self._recent[current_idx])\n else:\n return\n else:\n self._show_sutta(self._recent[current_idx + 1])\n\n def _show_prev_recent(self):\n active_sutta = self._get_active_tab().sutta\n if active_sutta is None:\n return\n\n res = [x for x in range(len(self._recent)) if self._recent[x].uid == active_sutta.uid]\n\n if len(res) == 0:\n return\n else:\n current_idx = res[0]\n\n if current_idx == 0:\n # This is already the first, no previous.\n if self.showing_query_in_tab:\n # Re-render it, in case user is trying to restore sutta after a search in the Study Window.\n self._show_sutta(self._recent[current_idx])\n else:\n return\n else:\n self._show_sutta(self._recent[current_idx - 1])\n\n def _remove_related_tabs(self):\n n = 0\n max_tries = 5\n # Tabs are not removed immediately. Have to repeatedly try to remove the\n # tabs until they are all gone.\n while len(self._related_tabs) > 0 and n < max_tries:\n for idx, tab in enumerate(self._related_tabs):\n del self._related_tabs[idx]\n tab.close()\n tab.deleteLater()\n\n n += 1\n\n def _add_related_tabs(self, sutta: USutta):\n self.sutta_tabs.setCurrentIndex(0)\n self._remove_related_tabs()\n\n # read state from the window action, not from app_data.app_settings, b/c\n # that will be set from windows.py\n if hasattr(self.pw, 'action_Show_Related_Suttas') \\\n and not self.pw.action_Show_Related_Suttas.isChecked():\n return\n\n uid_ref = re.sub('^([^/]+)/.*', r'\\1', str(sutta.uid))\n\n res: List[USutta] = []\n r = self._app_data.db_session \\\n .query(Am.Sutta) \\\n .filter(and_(\n Am.Sutta.uid != sutta.uid,\n Am.Sutta.uid.like(f\"{uid_ref}/%\"),\n )) \\\n .all()\n res.extend(r)\n\n r = self._app_data.db_session \\\n .query(Um.Sutta) \\\n .filter(and_(\n Um.Sutta.uid != sutta.uid,\n Um.Sutta.uid.like(f\"{uid_ref}/%\"),\n )) \\\n .all()\n res.extend(r)\n\n res_sorted: List[USutta] = []\n res_remain: List[USutta] = []\n\n # Pali first\n for i in res:\n if i.language == 'pli':\n res_sorted.append(i)\n else:\n res_remain.append(i)\n\n # sort the remaining items by language\n res_remain.sort(key=lambda x: str(x.language))\n\n res_sorted.extend(res_remain)\n\n for sutta in res_sorted:\n if sutta.uid is not None:\n title = str(sutta.uid)\n else:\n title = \"\"\n\n self._add_new_tab(title, sutta)\n\n def reload_page(self):\n self._get_active_tab().render_sutta_content()\n\n def _handle_copy(self):\n text = self._get_selection()\n if text is not None:\n self._app_data.clipboard_setText(text)\n\n def _handle_copy_link_to_sutta(self):\n active_sutta = self._get_active_tab().sutta\n if active_sutta is None:\n return\n\n url = QUrl(f\"ssp://{QueryType.suttas.value}/{active_sutta.uid}\")\n\n quote = self._get_selection()\n if quote is not None and len(quote) > 0:\n url.setQuery(urlencode({'q': quote}))\n\n self._app_data.clipboard_setText(url.toString())\n\n def _handle_copy_uid(self):\n active_sutta = self._get_active_tab().sutta\n if active_sutta is None:\n return\n\n uid = 'uid:' + active_sutta.uid\n self._app_data.clipboard_setText(uid)\n\n def _handle_paste(self):\n s = self._app_data.clipboard_getText()\n if s is not None:\n self._append_to_query(s)\n self._handle_query()\n\n def _open_in_study_window(self, side: str):\n tab = self._get_active_tab()\n sutta = tab.sutta\n if sutta is None:\n return\n\n uid: str = sutta.uid # type: ignore\n self.open_in_study_window_signal.emit(self.pw.queue_id, side, uid)\n\n def _lookup_selection_in_suttas(self):\n self.pw.activateWindow()\n text = self._get_selection()\n if text is not None:\n self._set_query(text)\n self._handle_query()\n\n def _lookup_selection_in_new_sutta_window(self):\n text = self._get_selection()\n if text is not None:\n self.pw.lookup_in_new_sutta_window_signal.emit(text)\n\n def _lookup_selection_in_dictionary(self):\n text = self._get_selection()\n if text is not None:\n self.pw.lookup_in_dictionary_signal.emit(text)\n\n def _create_qwe_context_menu(self, menu: QMenu):\n self.qwe_copy_selection = QAction(\"Copy Selection\")\n # NOTE: don't bind Ctrl-C, will be ambiguous to the window menu action\n self.qwe_copy_selection.triggered.connect(partial(self._handle_copy))\n menu.addAction(self.qwe_copy_selection)\n\n self.qwe_copy_link_to_sutta = QAction(\"Copy Link to Sutta and Selection\")\n self.qwe_copy_link_to_sutta.triggered.connect(partial(self._handle_copy_link_to_sutta))\n menu.addAction(self.qwe_copy_link_to_sutta)\n\n self.qwe_copy_uid = QAction(\"Copy uid\")\n self.qwe_copy_uid.triggered.connect(partial(self._handle_copy_uid))\n menu.addAction(self.qwe_copy_uid)\n\n self.qwe_bookmark = QAction(\"Create Bookmark from Selection\")\n self.qwe_bookmark.triggered.connect(partial(self.handle_create_bookmark_for_sutta))\n menu.addAction(self.qwe_bookmark)\n\n self.qwe_memo = QAction(\"Create Memo\")\n self.qwe_memo.triggered.connect(partial(self.handle_create_memo_for_sutta))\n menu.addAction(self.qwe_memo)\n\n self.qwe_study_menu = QMenu(\"Open in Study Window\")\n menu.addMenu(self.qwe_study_menu)\n\n self.qwe_study_left = QAction(\"Left\")\n self.qwe_study_left.triggered.connect(partial(self._open_in_study_window, 'left'))\n self.qwe_study_menu.addAction(self.qwe_study_left)\n\n self.qwe_study_middle = QAction(\"Middle\")\n self.qwe_study_middle.triggered.connect(partial(self._open_in_study_window, 'middle'))\n self.qwe_study_menu.addAction(self.qwe_study_middle)\n\n self.qwe_lookup_menu = QMenu(\"Lookup Selection\")\n menu.addMenu(self.qwe_lookup_menu)\n\n self.qwe_lookup_in_suttas = QAction(\"In Suttas\")\n self.qwe_lookup_in_suttas.triggered.connect(partial(self._lookup_selection_in_suttas))\n self.qwe_lookup_menu.addAction(self.qwe_lookup_in_suttas)\n\n self.qwe_lookup_in_dictionary = QAction(\"In Dictionary\")\n self.qwe_lookup_in_dictionary.triggered.connect(partial(self._lookup_selection_in_dictionary))\n self.qwe_lookup_menu.addAction(self.qwe_lookup_in_dictionary)\n\n self.gpt_prompts_menu = QMenu(\"GPT Prompts\")\n menu.addMenu(self.gpt_prompts_menu)\n\n prompts = self._app_data.db_session \\\n .query(Um.GptPrompt) \\\n .filter(Um.GptPrompt.show_in_context == True) \\\n .all()\n\n self.gpt_prompts_actions = []\n\n def _add_action_to_menu(x: Um.GptPrompt):\n a = QAction(str(x.name_path))\n db_id: int = x.id # type: ignore\n a.triggered.connect(partial(self._open_gpt_prompt_with_params, db_id))\n self.gpt_prompts_actions.append(a)\n self.gpt_prompts_menu.addAction(a)\n\n for i in prompts:\n _add_action_to_menu(i)\n\n icon = QIcon()\n icon.addPixmap(QPixmap(\":/new-window\"))\n\n self.qwe_open_new_action = QAction(\"Open in New Window\")\n self.qwe_open_new_action.setIcon(icon)\n self.qwe_open_new_action.triggered.connect(partial(self._handle_open_content_new))\n menu.addAction(self.qwe_open_new_action)\n\n tab = self._get_active_tab()\n\n self.qwe_devtools = QAction(\"Show Inspector\")\n self.qwe_devtools.setCheckable(True)\n self.qwe_devtools.setChecked(tab.devtools_open)\n self.qwe_devtools.triggered.connect(partial(self._toggle_devtools_inspector))\n menu.addAction(self.qwe_devtools)\n\n def _open_gpt_prompt_with_params(self, prompt_db_id: int):\n tab = self._get_active_tab()\n if tab.sutta is None:\n uid = None\n else:\n uid = str(tab.sutta.uid)\n\n params = OpenPromptParams(\n prompt_db_id = prompt_db_id,\n with_name = '', # Empty string to clear existing name\n sutta_uid = uid,\n selection_text = self._get_selection(),\n )\n\n self.open_gpt_prompt.emit(params)\n\n def _toggle_devtools_inspector(self):\n tab = self._get_active_tab()\n\n if self.qwe_devtools.isChecked():\n tab._show_devtools()\n else:\n tab._hide_devtools()\n\n def _handle_open_content_new(self):\n tab = self._get_active_tab()\n if tab.sutta is not None:\n self.open_sutta_new_signal.emit(str(tab.sutta.uid))\n else:\n logger.warn(\"Sutta is not set\")\n\n def _handle_show_related_suttas(self):\n active_sutta = self._get_active_tab().sutta\n if active_sutta is None:\n return\n\n if active_sutta is not None:\n self._add_related_tabs(active_sutta)\n\n def _handle_show_find_panel(self):\n self.find_toolbar.show()\n self._find_panel.search_input.setFocus()\n\n def _user_typed(self):\n self._handle_autocomplete_query(min_length=4)\n\n if not self.pw.action_Search_As_You_Type.isChecked():\n return\n\n matches = re.match(RE_ALL_BOOK_SUTTA_REF, self.search_input.text().strip())\n if matches is not None:\n min_length = 1\n else:\n min_length = 4\n\n if not self._search_timer.isActive():\n self._search_timer = QTimer()\n self._search_timer.timeout.connect(partial(self._handle_query, min_length=min_length))\n self._search_timer.setSingleShot(True)\n\n self._search_timer.start(SEARCH_TIMER_SPEED)\n\n def _handle_search_mode_changed(self):\n idx = self.search_mode_dropdown.currentIndex()\n s = self.search_mode_dropdown.itemText(idx)\n\n self._app_data.app_settings['sutta_search_mode'] = SuttaSearchModeNameToType[s]\n self._app_data._save_app_settings()\n\n def _connect_signals(self):\n if hasattr(self, 'search_button'):\n self.search_button.clicked.connect(partial(self._handle_query, min_length=1))\n\n if hasattr(self, 'search_input'):\n self.search_input.textEdited.connect(partial(self._user_typed))\n self.search_input.returnPressed.connect(partial(self._handle_query, min_length=1))\n self.search_input.completer().activated.connect(partial(self._handle_query, min_length=1))\n\n if hasattr(self, 'search_mode_dropdown'):\n self.search_mode_dropdown.currentIndexChanged.connect(partial(self._handle_search_mode_changed))\n\n if hasattr(self, 'back_recent_button'):\n if self.enable_sidebar:\n self.back_recent_button.clicked.connect(partial(self.pw._select_next_recent))\n self.forward_recent_button.clicked.connect(partial(self.pw._select_prev_recent))\n\n def _handle_sidebar():\n self.pw.action_Show_Sidebar.activate(QAction.ActionEvent.Trigger)\n\n self.show_sidebar_btn.clicked.connect(partial(_handle_sidebar))\n\n else:\n self.back_recent_button.clicked.connect(partial(self._show_next_recent))\n self.forward_recent_button.clicked.connect(partial(self._show_prev_recent))\n\n if self.enable_language_filter and hasattr(self, 'sutta_language_filter_dropdown'):\n self.sutta_language_filter_dropdown.currentIndexChanged.connect(partial(self._handle_query, min_length=4))\n\n if self.enable_search_extras and hasattr(self, 'sutta_source_filter_dropdown'):\n self.sutta_source_filter_dropdown.currentIndexChanged.connect(partial(self._handle_query, min_length=4))\n\n if self.enable_find_panel:\n self._find_panel.searched.connect(self.on_searched)\n self._find_panel.closed.connect(self.find_toolbar.hide)\n\n self.pw.action_Find_in_Page \\\n .triggered.connect(self._handle_show_find_panel)\n","sub_path":"simsapa/layouts/sutta_search_window_state.py","file_name":"sutta_search_window_state.py","file_ext":"py","file_size_in_byte":44139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"593573620","text":"from django.shortcuts import render,redirect, get_object_or_404\nfrom django.views.decorators.http import require_http_methods, require_POST, require_safe\nfrom django.core.paginator import Paginator\nfrom .models import Movie\nfrom .forms import MovieForm\n\n# Create your views here.\n@require_http_methods(['GET','POST'])\ndef create(request):\n if request.method == 'POST':\n form = MovieForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n movie = form.save()\n return redirect('movies:detail', movie.pk)\n else:\n form = MovieForm()\n context = {\n 'form':form,\n }\n return render(request, 'movies/create.html', context)\n\n@require_safe\ndef index(request):\n movies = Movie.objects.order_by('-pk')\n paginator = Paginator(movies, 3)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'page_obj':page_obj,\n }\n return render(request, 'movies/index.html', context)\n\n@require_safe\ndef detail(request, pk):\n movie = get_object_or_404(Movie, pk=pk)\n context = {\n 'movie':movie,\n }\n return render(request, 'movies/detail.html', context)\n\n@require_http_methods(['GET','POST'])\ndef update(request, pk):\n movie = get_object_or_404(Movie, pk=pk)\n if request.method == 'POST':\n form = MovieForm(data=request.POST, files=request.FILES, instance=movie)\n if form.is_valid():\n form.save()\n return redirect('movies:detail', movie.pk)\n else:\n form = MovieForm(instance=movie)\n context = {\n 'movie':movie,\n 'form':form,\n }\n return render(request, 'movies/update.html', context)\n\n@require_POST\ndef delete(request, pk):\n movie = get_object_or_404(Movie, pk=pk)\n movie.delete()\n return redirect('movies:index')","sub_path":"Projects/pjt05/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"403954921","text":"\n\n# There is a ball in a maze with empty spaces and walls. The ball can go through empty spaces by rolling up (u), down (d), left (l) or right (r), but it won't stop rolling until hitting a wall. When the ball stops, it could choose the next direction. \n# There is also a hole in this maze. The ball will drop into the hole if it rolls on to the hole.\n\n# Given the ball position, the hole position and the maze, find out how the ball could drop into the hole by moving the shortest distance. \n# The distance is defined by the number of empty spaces traveled by the ball from the start position (excluded) to the hole (included). \n# Output the moving directions by using 'u', 'd', 'l' and 'r'. Since there could be several different shortest ways, you should output the lexicographically smallest way. \n# If the ball cannot reach the hole, output \"impossible\".\n\n# The maze is represented by a binary 2D array. 1 means the wall and 0 means the empty space. \n# You may assume that the borders of the maze are all walls. The ball and the hole coordinates are represented by row and column indexes.\n\n# There is only one ball and one hole in the maze.\n# Both the ball and hole exist on an empty space, and they will not be at the same position initially.\n# The given maze does not contain border (like the red rectangle in the example pictures), but you could assume the border of the maze are all walls.\n# The maze contains at least 2 empty spaces, and the width and the height of the maze won't exceed 30.\n\n# 这道题与 The Maze II 的共同点与区别:\n# 1.这道题和II一样,都是要记录 steps,visited map 也是记录歇脚点的最短 steps;\n# 2.然后第一个区别是,III中球走到Hole了就会停止,而II中球必须先碰壁才能判断是不是到地儿了;\n# 3.第二个区别是,III返回的是路径(虽然这个路径也得通过steps的大小来确定),然后值得注意的是,球在行走过程中,路径不会重复记录,只有碰壁以后、行走方向改变了、路径才会更新。\n\n# 有洞会掉下去,要记录字典序最短路径\n\nclass Solution:\n def findShortestWay(self, maze, ball, hole):\n \"\"\"\n :type maze: List[List[int]]\n :type ball: List[int]\n :type hole: List[int]\n :rtype: str\n \"\"\"\n # heap\n m, n = len(maze), len(maze[0])\n hp = [(0, \"\", ball[0], ball[1])]\n heapq.heapify(hp)\n visited = {(ball[0], ball[1]): [0, \"\"]}\n while hp:\n dist, path, x, y = heapq.heappop(hp)\n if [x, y] == hole:\n return path\n \n for dx, dy, p in ((-1, 0, \"u\"), (1, 0, \"d\"), (0, -1, \"l\"), (0, 1, \"r\")):\n nx, ny = x , y\n step = 0\n while 0 <= nx + dx < m and 0 <= ny + dy < n and maze[nx + dx][ny + dy] == 0:\n nx += dx\n ny += dy\n step += 1\n if [nx, ny] == hole:\n break\n\n if (nx, ny) not in visited or [dist + step, path + p] < visited[(nx, ny)]: # shortest distance and lexicographically smallest way\n visited[(nx, ny)] = [dist + step, path + p]\n heapq.heappush(hp, (dist + step, path + p, nx, ny))\n \n return \"impossible\"\n ","sub_path":"The Maze III.py","file_name":"The Maze III.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"194523448","text":"from ..main import ActionRequestHandler as RequestHandler\n\n\n#\n# The name \"any\" basically means that this function will get a call for any of the verbs (GET/POST/HEAD at this moment)\n# Since the action is part of the module \"ping\" this means that everything \"/ping\"\n# will be redirected to this function.\ndef any(request: RequestHandler,\n url: 'urllib.parse.ParseResult' = None,\n query: dict = None,\n form: 'Dict[List[cgi.FieldStorage]]' = None,\n files: 'Dict[List[cgi.FieldStorage]]' = None,\n original_url: 'urllib.parse.ParseResult' = None,\n ):\n \"\"\"\n Action Handler function as an example. This responds with an 200-OK when a user does /ping\n\n :param request: The Handler that is being used to store the current request\n :param url: The parsed url for which this handler was called\n :param query: If any, the parsed query string\n :param form: Any posted form fields as a dict keyed by the field name with a list of FieldStorage instances as value\n :param files: Any posted files, \"\n \"\"\"\n request.success('Pong', content=\"Pong\")\n","sub_path":"simple_action_server/actions/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491325088","text":"\n\n#calss header\nclass _VOYEUR():\n\tdef __init__(self,): \n\t\tself.name = \"VOYEUR\"\n\t\tself.definitions = [u\"a person who gets sexual pleasure from secretly watching other people in sexual situations, or (more generally) a person who watches other people's private lives: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_voyeur.py","file_name":"_voyeur.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"402651316","text":"from django.shortcuts import render, get_object_or_404, redirect\n\nfrom .models import Task, status_choices\nfrom .forms import TaskForm\n\n\ndef index_view(request):\n tasks = Task.objects.all()\n return render(request, 'index.html', context={'tasks': tasks})\n\n\ndef task_view(request, pk):\n task = get_object_or_404(Task, id=pk)\n return render(request, 'task_view.html', context={'task': task})\n\n\ndef task_create_view(request):\n if request.method == \"GET\":\n form = TaskForm()\n context = {'status_choices': status_choices, 'form': form}\n return render(request, 'task_create.html', context)\n elif request.method == \"POST\":\n form = TaskForm(data=request.POST)\n if form.is_valid():\n task = Task(\n title=form.cleaned_data.get('title'),\n status=form.cleaned_data.get('status'),\n up_to=form.cleaned_data.get('up_to'),\n description=form.cleaned_data.get('description')\n )\n task.save()\n return redirect('task-view', pk=task.id)\n\n return render(request, 'task_create.html', context={'form': form})\n\n\ndef task_update_view(request, pk):\n task = get_object_or_404(Task, id=pk)\n\n if request.method == 'GET':\n form = TaskForm(initial={\n 'title': task.title,\n 'status': task.status,\n 'up_to': task.up_to,\n 'description': task.description\n })\n return render(request, 'task_update.html', context={'form': form,\n 'task': task})\n\n elif request.method == 'POST':\n form = TaskForm(\n data=request.POST)\n if form.is_valid():\n task.title = form.cleaned_data.get(\"title\")\n task.status = form.cleaned_data.get(\"status\")\n task.up_to = form.cleaned_data.get(\"up_to\")\n task.description = form.cleaned_data.get(\"description\")\n task.save()\n return redirect('article-view',\n pk=task.id)\n\n return render(request, 'task_create.html', context={'form': form,\n 'task': task})\n\n\ndef task_delete_view(request, pk):\n task = get_object_or_404(Task, id=pk)\n\n if request.method == 'GET':\n return render(request, 'task_delete.html', context={'task': task})\n elif request.method == 'POST':\n task.delete()\n return redirect('task-list')\n\n\ndef tasks_delete_all_view(request):\n Task.objects.all().delete()\n return render(request, 'index.html')","sub_path":"django_app/to_do_list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"488949845","text":"from selenium import webdriver\nimport os\nimport time\nimport json\n# wd = webdriver.Chrome()\n# wd.get(\"https://passport.damai.cn/login?ru=https%3A%2F%2Fwww.damai.cn%2F\")\n# wd.switch_to.frame(\"alibaba-login-box\") #进入iframe\n# wd.find_element_by_id(\"fm-login-id\").send_keys(\"15002449401\")\n# wd.find_element_by_id(\"fm-login-password\").send_keys(\"qq13889303016\")\n\n\ndef browser_initial():\n \"\"\"\"\n 进行浏览器初始化\n \"\"\"\n global browser\n os.chdir('E:\\\\pythonwork')\n browser = webdriver.Chrome()\n log_url = 'https://passport.damai.cn/login?ru=https%3A%2F%2Fwww.damai.cn%2F'\n return log_url, browser\n\n\ndef get_cookies(log_url, browser):\n \"\"\"\n 获取cookies保存至本地\n \"\"\"\n browser.get(log_url)\n time.sleep(15) # 进行扫码\n dictCookies = browser.get_cookies() # 获取list的cookies\n print(dictCookies)\n jsonCookies = json.dumps(dictCookies) # 转换成字符串保存\n\n with open('damai_cookies.txt', 'w') as f:\n f.write(jsonCookies)\n print('cookies保存成功!')\n # 保存成功之后,访问 网页\n browser = get_browser()\n # log_damai(browser)\n\ndef get_browser():\n \"\"\"\"\n 浏览器初始化,并打开大麦网购票界面(未登录状态)\n \"\"\"\n os.chdir('E:\\\\pythonwork')\n # browser = webdriver.Chrome()\n browser.get(\n 'https://detail.damai.cn/item.htm?spm=a2oeg.search_category.0.0.8778f91as7xLdc&id=610870234751&clicktitle=2020%E5%BC%A0%E6%9D%B0%E3%80%8C%E6%9C%AA%C2%B7LIVE%E3%80%8D%E5%B7%A1%E5%9B%9E%E6%BC%94%E5%94%B1%E4%BC%9A%20%E5%90%88%E8%82%A5%E7%AB%99')\n # return browser\n\n\ndef log_damai(browser):\n \"\"\"\n 从本地读取cookies并刷新页面,成为已登录状态\n \"\"\"\n with open('damai_cookies.txt', 'r', encoding='utf8') as f:\n listCookies = json.loads(f.read())\n\n # 往browser里添加cookies\n for cookie in listCookies:\n cookie_dict = {\n 'domain': '.damai.cn',\n 'name': cookie.get('name'),\n 'value': cookie.get('value'),\n \"expires\": '',\n 'path': '/',\n 'httpOnly': False,\n 'HostOnly': False,\n 'Secure': False\n }\n browser.add_cookie(cookie_dict)\n browser.refresh() # 刷新网页,cookies才成功\n\n\nif __name__ == \"__main__\":\n tur = browser_initial()\n get_cookies(tur[0], tur[1])","sub_path":"damai.py","file_name":"damai.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"610901647","text":"\n\nfrom behave import given,when,then,step\nimport requests\n\nglobal_general_variables = {}\nhttp_request_header = {}\nhttp_request_body = {}\nhttp_request_url_query_param = {}\n\n\n@given(u'Set basic web application url is \"{basic_app_url}\"')\ndef step_impl(context, basic_app_url):\n global_general_variables['basic_application_URL'] = basic_app_url\n\n\n@given(u'Set basic user details as \"{particular}\" and \"{value}\" below')\ndef step_impl(context, particular, value):\n for row in context.table:\n temp_value = row['value']\n global_general_variables[row['particular']] = temp_value\n if 'empty' in temp_value:\n global_general_variables[row['particular']] = ''\n\n\n@when(u'Set HEADER param request content type as \"{header_content_type}\"')\ndef step_impl(context, header_content_type):\n http_request_header['content-type'] = header_content_type\n\n\n@when(u'Set HEADER param response accept type as \"{header_accept_type}\"')\ndef step_impl(context, header_accept_type):\n http_request_header['Accept'] = header_accept_type\n\n\n@given(u'Set GET api endpoint as \"{get_api_endpoint}\"')\ndef step_impl(context, get_api_endpoint):\n global_general_variables['GET_api_endpoint'] = get_api_endpoint\n\n\n@when(u'Set Query param as \"{query_param}\"')\ndef step_impl(context, query_param):\n if 'empty' in query_param:\n http_request_url_query_param.clear()\n else:\n http_request_url_query_param.clear()\n http_request_url_query_param['signout_emailid'] = global_general_variables['email']\n http_request_url_query_param['session_id'] = global_general_variables['latest_session_key']\n\n\n@when(u'Raise \"{http_request_type}\" HTTP request')\ndef step_impl(context, http_request_type):\n url_temp = global_general_variables['basic_application_URL']\n if 'GET' == http_request_type:\n url_temp += global_general_variables['GET_api_endpoint']\n http_request_body.clear()\n global_general_variables['response_full'] = requests.get(url_temp,\n headers=http_request_header,\n params=http_request_url_query_param,\n data=http_request_body)\n elif 'POST' == http_request_type:\n url_temp += global_general_variables['POST_api_endpoint']\n http_request_url_query_param.clear()\n global_general_variables['response_full'] = requests.post(url_temp,\n headers=http_request_header,\n params=http_request_url_query_param,\n data=http_request_body)\n elif 'PUT' == http_request_type:\n url_temp += global_general_variables['PUT_api_endpoint']\n http_request_url_query_param.clear()\n global_general_variables['response_full'] = requests.put(url_temp,\n headers=http_request_header,\n params=http_request_url_query_param,\n data=http_request_body)\n elif 'DELETE' == http_request_type:\n url_temp += global_general_variables['DELETE_api_endpoint']\n http_request_body.clear()\n global_general_variables['response_full'] = requests.delete(url_temp,\n headers=http_request_header,\n params=http_request_url_query_param,\n data=http_request_body)\n\n\n@then(u'Valid HTTP response should be received')\ndef step_impl(context):\n if None in global_general_variables['response_full']:\n assert False, 'Null response received'\n\n\n@then(u'Response http code should be {expected_response_code:d}')\ndef step_impl(context, expected_response_code):\n global_general_variables['expected_response_code'] = expected_response_code\n actual_response_code = global_general_variables['response_full'].status_code\n if str(actual_response_code) not in str(expected_response_code):\n print (str(global_general_variables['response_full'].json()))\n assert False, '***ERROR: Following unexpected error response code received: ' + str(actual_response_code)\n\n\n@then(u'Response HEADER content type should be \"{expected_response_content_type}\"')\ndef step_impl(context, expected_response_content_type):\n global_general_variables['expected_response_content_type'] = expected_response_content_type\n actual_response_content_type = global_general_variables['response_full'].headers['Content-Type']\n if expected_response_content_type not in actual_response_content_type:\n assert False, '***ERROR: Following unexpected error response content type received: ' + actual_response_content_type\n\n\n@then(u'Response BODY should not be null or empty')\ndef step_impl(context):\n if None in global_general_variables['response_full']:\n assert False, '***ERROR: Null or none response body received'\n\n\n@then(u'Response BODY parsing for \"{body_parsing_for}\" should be successful')\ndef step_impl(context, body_parsing_for):\n current_json = global_general_variables['response_full'].json()\n if 'GET__signup' == body_parsing_for:\n print('Activity status : ' + current_json['Additional message'])\n print('Additional message : ' + current_json['Activity status'])\n print('Links : ')\n print(' Actual signup : ' + current_json['Links'].get('Actual signup'))\n print(' Link documentation : ' + current_json['Links'].get('Actual signup'))\n print('Payload : ')\n print(' signup_emailid : ' + current_json['Payload'].get('signup_emailid'))\n print(' signup_password : ' + current_json['Payload'].get('signup_password'))\n print(' signup_firstname : ' + current_json['Payload'].get('signup_firstname'))\n print(' signup_lastname : ' + current_json['Payload'].get('signup_lastname'))\n print(' signup_gender : ' + current_json['Payload'].get('signup_gender'))\n print(' signup_secret_question_1 : ' + current_json['Payload'].get('signup_secret_question_1'))\n print(' signup_secret_question_2 : ' + current_json['Payload'].get('signup_secret_question_2'))\n print(' signup_secret_question_1_answer : ' + current_json['Payload'].get('signup_secret_question_1_answer'))\n print(' signup_secret_question_2_answer : ' + current_json['Payload'].get('signup_secret_question_2_answer'))\n elif 'POST__signup' == body_parsing_for:\n print('Activity status : ' + current_json['Additional message'])\n print('Additional message : ' + current_json['Activity status'])\n global_general_variables['activation_key'] = current_json['Payload']\n print('Payload or activation key :\\n' + global_general_variables['activation_key'])\n elif 'PUT__modify_account_profile_details' == body_parsing_for:\n print('Activity status : ' + current_json['Additional message'])\n print('Additional message : ' + current_json['Activity status'])\n print('Payload : ' + current_json['Payload'])\n elif 'DELETE__signout' == body_parsing_for:\n print('Activity status : ' + current_json['Additional message'])\n print('Additional message : ' + current_json['Activity status'])\n print('Payload : ' + current_json['Payload'])\n global_general_variables['session id'] = ''\n","sub_path":"RESTAPI/SimpleAPITesting/features/steps/step_def_CRUD.py","file_name":"step_def_CRUD.py","file_ext":"py","file_size_in_byte":8526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"52620385","text":"# coding=utf-8\n\"\"\"Authors: Łukasz Opioła, Konrad Zemek\nCopyright (C) 2015 ACK CYFRONET AGH\nThis software is released under the MIT license cited in 'LICENSE.txt'\n\nBrings up a set of appmock instances.\n\"\"\"\n\nimport copy\nimport json\nimport os\nimport random\nimport string\nfrom timeouts import *\n\nfrom . import common, docker, dns, cluster_manager, worker\n\n\ndef domain(appmock_instance, uid):\n \"\"\"Formats domain for an appmock instance.\n It is intended to fake OP or OZ domain.\n \"\"\"\n return common.format_hostname(appmock_instance, uid)\n\n\ndef appmock_hostname(node_name, uid):\n \"\"\"Formats hostname for a docker hosting appmock.\n NOTE: Hostnames are also used as docker names!\n \"\"\"\n return common.format_hostname(node_name, uid)\n\n\ndef appmock_erl_node_name(node_name, uid):\n \"\"\"Formats erlang node name for a vm on appmock docker.\n \"\"\"\n hostname = appmock_hostname(node_name, uid)\n return common.format_erl_node_name('appmock', hostname)\n\n\ndef _tweak_config(config, appmock_node, appmock_instance, uid):\n cfg = copy.deepcopy(config)\n cfg['nodes'] = {'node': cfg['nodes'][appmock_node]}\n mocked_app = 'none'\n if 'mocked_app' in cfg['nodes']['node']:\n mocked_app = cfg['nodes']['node']['mocked_app']\n\n # Node name depends on mocked app, if none is specified,\n # default appmock_erl_node_name will be used.\n node_name = {\n 'cluster_manager': cluster_manager.cm_erl_node_name(appmock_node,\n appmock_instance,\n uid),\n 'op_worker': worker.worker_erl_node_name(appmock_node,\n appmock_instance,\n uid),\n 'oz_worker': worker.worker_erl_node_name(appmock_node, appmock_instance, uid)\n }.get(mocked_app, appmock_erl_node_name(appmock_node, uid))\n\n if 'vm.args' not in cfg['nodes']['node']:\n cfg['nodes']['node']['vm.args'] = {}\n vm_args = cfg['nodes']['node']['vm.args']\n vm_args['name'] = node_name\n # If cookie is not specified, set random cookie\n # so the node does not try to connect to others\n if 'setcookie' not in vm_args:\n vm_args['setcookie'] = ''.join(\n random.sample(string.ascii_letters + string.digits, 16))\n\n return cfg\n\n\ndef _node_up(image, bindir, config, config_path, dns_servers, logdir):\n node_name = config['nodes']['node']['vm.args']['name']\n (name, sep, hostname) = node_name.partition('@')\n\n sys_config = config['nodes']['node']['sys.config']['appmock']\n # can be an absolute path or relative to gen_dev_args.json\n app_desc_file_path = sys_config['app_description_file']\n app_desc_file_name = os.path.basename(app_desc_file_path)\n app_desc_file_path = os.path.join(common.get_file_dir(config_path),\n app_desc_file_path)\n\n # file_name must be preserved as it must match the Erlang module name\n sys_config['app_description_file'] = '/tmp/' + app_desc_file_name\n\n command = '''mkdir -p /root/bin/node/log/\nbindfs --create-for-user={uid} --create-for-group={gid} /root/bin/node/log /root/bin/node/log\nset -e\ncat <<\"EOF\" > /tmp/{app_desc_file_name}\n{app_desc_file}\nEOF\ncat <<\"EOF\" > /tmp/gen_dev_args.json\n{gen_dev_args}\nEOF\nescript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json\n/root/bin/node/bin/appmock console\nsleep 5''' # Add sleep so logs can be chowned\n command = command.format(\n uid=os.geteuid(),\n gid=os.getegid(),\n app_desc_file_name=app_desc_file_name,\n app_desc_file=open(app_desc_file_path, 'r').read(),\n gen_dev_args=json.dumps({'appmock': config}))\n\n bindir = os.path.abspath(bindir)\n volumes = ['/root/bin', (bindir, bindir, 'ro')]\n\n if logdir:\n logdir = os.path.join(os.path.abspath(logdir), hostname)\n volumes.extend([(logdir, '/root/bin/node/log', 'rw')])\n\n container = docker.run(\n image=image,\n name=hostname,\n hostname=hostname,\n detach=True,\n interactive=True,\n tty=True,\n workdir=bindir,\n volumes=volumes,\n dns_list=dns_servers,\n privileged=True,\n command=command)\n\n return container, {\n 'docker_ids': [container],\n 'appmock_nodes': [node_name]\n }\n\n\ndef _ready(node):\n node_ip = docker.inspect(node)['NetworkSettings']['IPAddress']\n return common.nagios_up(node_ip, '9999')\n\n\ndef up(image, bindir, dns_server, uid, config_path, logdir=None):\n config = common.parse_json_config_file(config_path)\n input_dir = config['dirs_config']['appmock']['input_dir']\n dns_servers, output = dns.maybe_start(dns_server, uid)\n\n for appmock_instance in config['appmock_domains']:\n gen_dev_cfg = {\n 'config': {\n 'input_dir': input_dir,\n 'target_dir': '/root/bin'\n },\n 'nodes': config['appmock_domains'][appmock_instance]['appmock']\n }\n\n tweaked_configs = [_tweak_config(gen_dev_cfg, appmock_node,\n appmock_instance, uid)\n for appmock_node in gen_dev_cfg['nodes']]\n\n include_domain = False\n appmock_ips = []\n appmocks = []\n for cfg in tweaked_configs:\n appmock_id, node_out = _node_up(image, bindir, cfg,\n config_path, dns_servers, logdir)\n appmocks.append(appmock_id)\n if 'mocked_app' in cfg['nodes']['node']:\n mocked_app = cfg['nodes']['node']['mocked_app']\n if mocked_app == 'op_worker' or mocked_app == 'oz_worker':\n include_domain = True\n appmock_ips.append(common.get_docker_ip(appmock_id))\n common.merge(output, node_out)\n\n common.wait_until(_ready, appmocks, APPMOCK_WAIT_FOR_NAGIOS_SECONDS)\n\n if include_domain:\n domains = {\n 'domains': {\n domain(appmock_instance, uid): {\n 'ns': [],\n 'a': appmock_ips\n }\n }\n }\n common.merge(output, domains)\n\n # Make sure domain are added to the dns server\n dns.maybe_restart_with_configuration(dns_server, uid, output)\n return output\n","sub_path":"bamboos/docker/environment/appmock.py","file_name":"appmock.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"450712129","text":"import raritan.rpc\n\n\nclass Structure(object):\n def __str__(self):\n l = max([len(e) for e in self.elements])\n pretty = \"\\n\".join(\n [\n raritan.rpc.Utils.indent(\n \"* %-*s = %s\" % (l, e, raritan.rpc.Utils.rprint(getattr(self, e))),\n 4,\n )\n for e in self.elements\n ]\n )\n return \"%s:\\n%s\" % (raritan.rpc.TypeInfo.typeBaseName(self.idlType), pretty)\n\n def __eq__(self, other):\n return (\n other != None\n and self.idlType == other.idlType\n and all([getattr(self, e) == getattr(other, e) for e in self.elements])\n )\n","sub_path":"raritan/rpc/Structure.py","file_name":"Structure.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"146267333","text":"#Author: linzebing\nfrom pylab import *\nfp = open('roc.txt','rb')\ntitle('ROC Curve')\ngrid(True)\nxlim(-0.2,1.2)\nylim(-0.2,1.2)\nTPR = []\nFPR = []\nwhile (1):\n\tt = fp.readline()\n\tif (not t):\n\t\tbreak\n\tTPR.append(eval(fp.readline()[:-1]))\n\tFPR.append(eval(fp.readline()[:-1]))\nlength = len(TPR)\nfor i in range(length):\n\tscatter([FPR[i],],[TPR[i],],50,color = 'green')\nplot(FPR,TPR)\nxlabel('False Positive Rate')\nylabel('True Positive Rate')\nshow()","sub_path":"liblinear_p1/plotROC.py","file_name":"plotROC.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"276365058","text":"from tkinter import *\r\nfrom tkinter import font\r\nimport tkinter.ttk\r\nimport tkinter.messagebox\r\nimport requests\r\nimport json\r\nimport os\r\nimport sys\r\nimport urllib\r\nimport urllib.request\r\nfrom io import BytesIO\r\nfrom PIL import Image,ImageTk\r\nimport webbrowser\r\n\r\n\r\n\r\nclass SearchMovie:\r\n def __init__(self):\r\n global frame2\r\n self.mainWnd = frame2\r\n self.movieCnt = 0\r\n\r\n self.mainWnd.mainloop()\r\n\r\n def search(self):\r\n global movieNmEt\r\n global movieListbox\r\n self.strSearch = movieNmEt.get()\r\n movieListbox.delete(0, END)\r\n\r\n # 네이버 openAPI 읽어오기\r\n client_id = \"tvo5aUWG9rwBq1YRMqyJ\"\r\n client_secret = \"40VkT1fuAS\"\r\n header_parms ={\"X-Naver-Client-Id\":client_id,\"X-Naver-Client-Secret\":client_secret}\r\n url = f\"https://openapi.naver.com/v1/search/movie.json?query={self.strSearch}\"\r\n res=requests.get(url,headers=header_parms)\r\n\r\n self.Alldata = res.json()\r\n self.movieCnt = len(self.Alldata['items'])\r\n self.title = []\r\n self.naverlink = []\r\n self.image = []\r\n self.date = []\r\n self.director = []\r\n self.actors = []\r\n self.rating = []\r\n\r\n for i in range(self.movieCnt):\r\n self.title.append(self.Alldata['items'][i]['title'].strip('
').replace('','').replace('',''))\r\n self.naverlink.append(self.Alldata['items'][i]['link'])\r\n self.image.append(self.Alldata['items'][i]['image'])\r\n self.date.append(self.Alldata['items'][i]['pubDate'])\r\n self.director.append(self.Alldata['items'][i]['director'].split('|')[0])\r\n self.actors.append(self.Alldata['items'][i]['actor'].replace('|', ', '))\r\n self.rating.append(float(self.Alldata['items'][i]['userRating']))\r\n self.showTitle()\r\n\r\n def showTitle(self):\r\n for i in range(self.movieCnt):\r\n movieListbox.insert(i, self.title[i])\r\n\r\n def showInfo(self):\r\n global labelDate\r\n global directorL\r\n global actorsL\r\n global labelRate\r\n self.indexInfo = movieListbox.curselection()[0]\r\n labelDate.config(text=self.date[self.indexInfo])\r\n directorL.config(text=self.director[self.indexInfo])\r\n strLen = len(self.actors[self.indexInfo])\r\n if strLen > 17:\r\n begStr = self.actors[self.indexInfo][0:17]\r\n midStr = self.actors[self.indexInfo][17:]\r\n actorsL.config(text=begStr)\r\n #self.labelActors2.config(text=midStr)\r\n else:\r\n actorsL.config(text=self.actors[self.indexInfo])\r\n #self.labelActors2.config(text=' ')\r\n\r\n labelRate.config(text=self.rating[self.indexInfo])\r\n\r\n # 네이버로 열기\r\n self.linkL = Label(self.mainWnd, text='네이버로 열기', cursor='hand2')\r\n self.linkL.pack()\r\n self.linkL.place(x=320,y=180)\r\n self.linkL.bind(\"\", lambda e: self.callback(self.naverlink[self.indexInfo]))\r\n\r\n # 관련 뉴스 - 네이버 openAPI 읽어오기\r\n client_id = \"tvo5aUWG9rwBq1YRMqyJ\"\r\n client_secret = \"40VkT1fuAS\"\r\n header_parms ={\"X-Naver-Client-Id\":client_id,\"X-Naver-Client-Secret\":client_secret}\r\n search_word = self.title[self.indexInfo] #검색어\r\n encode_type = 'json' #출력 방식 json 또는 xml\r\n max_display = 3 #출력 뉴스 수\r\n sort = 'sim' #결과값의 정렬기준 시간순 date, 관련도 순 sim\r\n start = 1 # 출력 위치\r\n\r\n url = f\"https://openapi.naver.com/v1/search/news.{encode_type}?query={search_word}&display={str(int(max_display))}&sort={sort}\"\r\n res=requests.get(url,headers=header_parms)\r\n datas = res.json()\r\n links = datas['items']\r\n self.link = []\r\n for i in links:\r\n self.link.append(i['link'])\r\n for i in range(max_display):\r\n string = '관련뉴스 ' + str(i+1)\r\n self.linkL = Label(self.mainWnd, text=string, cursor='hand2')\r\n self.linkL.pack()\r\n self.linkL.place(x=410,y=120 + i * 30)\r\n self.linkL.bind(\"\", lambda e: self.callback(self.link[i]))\r\n\r\n\r\n # 영화진흥회 openAPI 읽어오기\r\n dayOfficeURL = \"http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieList.json?key=edfd0508a0320efa8abbe1eeba097a94&movieNm=\"\r\n dayOfficeURL += self.title[self.indexInfo]\r\n res = requests.get(dayOfficeURL)\r\n text = res.text\r\n d = json.loads(text)\r\n code = []\r\n name = []\r\n genre = []\r\n for b in d['movieListResult']['movieList']:\r\n #if b['movieNm'] == self.title[self.indexInfo]:\r\n code.append(b['openDt'])\r\n name.append(b['movieNm'])\r\n genre.append(b['genreAlt'])\r\n self.labelGenre.config(text=genre)\r\n\r\n\r\n # 영화 이미지 띄우기\r\n if len(self.image) == 0:\r\n return\r\n\r\n url = self.image[self.indexInfo]\r\n with urllib.request.urlopen(url) as u:\r\n raw_data=u.read()\r\n\r\n im=Image.open(BytesIO(raw_data))\r\n global image2\r\n image2=ImageTk.PhotoImage(im, master=self.mainWnd)\r\n\r\n imgL = Label(self.mainWnd,height=100,width=100)\r\n imgL.pack()\r\n imgL.place(x=200,y=100)\r\n imgL.config(image=image2)\r\n\r\n def callback(self, url):\r\n webbrowser.open_new(url)\r\n","sub_path":"SearchMovie.py","file_name":"SearchMovie.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"514296310","text":"#!/usr/bin/env python3\n# coding: utf-8\n#\n# Usage: \n# Author: wxm71(weixing.mei@aispeech.com)\n\nfrom typing import List\nfrom .vocab import Vocab\n\ndef tokenize(fname, vocab, update_vocab=False, bos=False, eos=False):\n '''\n tokenize given file\n '''\n if not vocab:\n vocab = Vocab()\n update_vocab = True\n\n ids = []\n with open(fname, 'r') as fid:\n for line in fid:\n arr = line.split()\n\n if bos: ids.append(Vocab.BOS_ID) \n\n for wrd in arr:\n wrd = wrd.strip()\n wid = vocab.add_wrd(wrd) if update_vocab else vocab.get_wrd(wrd)\n ids.append(wid)\n\n if eos: ids.append(Vocab.EOS_ID)\n return ids, vocab\n\n\ndef batchify(data, batch_size):\n \"\"\"Reshape data into (nbatch, batch_size)\"\"\"\n nbatch = data.shape[0] // batch_size\n data = data[:nbatch * batch_size]\n data = data.reshape((batch_size, nbatch)).T\n return data\n\n\n","sub_path":"example/rnn/nce/src/loader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"162966706","text":"#!/usr/bin/env python\n\nn = input()\na = raw_input().split()\nc = 0\nfor i in range(n):\n for j in range(n-1, i-1, -1):\n m = a[:i].count('1') + a[i:j+1].count('0') + a[j+1:].count('1')\n if m>c:\n c=m\nprint(c)\n","sub_path":"contest11/bai2.py","file_name":"bai2.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"406823727","text":"from ...cereal import serializable\nfrom ..core import typecheck_promote\nfrom ..primitives import Str, Int, Float\nfrom ..containers import List, Struct, CollectionMixin\nfrom .geometry import Geometry\nfrom .mixins import GeometryMixin\n\nGeometryCollectionStruct = Struct[{\"type\": Str, \"geometries\": List[Geometry]}]\n\n\n@serializable(is_named_concrete_type=True)\nclass GeometryCollection(GeometryCollectionStruct, GeometryMixin, CollectionMixin):\n \"\"\"Proxy GeoJSON GeometryCollection constructed from a sequence of Geometries.\n\n Examples\n --------\n >>> from descarteslabs.workflows import Geometry, GeometryCollection\n >>> geom = Geometry(type=\"Point\", coordinates=[1, 2])\n >>> gc = GeometryCollection(geometries=[geom, geom, geom])\n >>> gc\n \n >>> gc.compute() # doctest: +SKIP\n GeometryCollectionResult(type=GeometryCollection,\n geometries=(\n GeometryResult(type=Point, coordinates=[1, 2]),\n GeometryResult(type=Point, coordinates=[1, 2]),\n GeometryResult(type=Point, coordinates=[1, 2])))\n\n >>> # constructing similar GeometryCollection to previous example, but using from_geojson\n >>> from descarteslabs.workflows import GeometryCollection\n >>> geojson = {\"type\": \"GeometryCollection\", \"geometries\": [{\"type\": \"Point\", \"coordinates\": [1, 2]}]}\n >>> gc = GeometryCollection.from_geojson(geojson)\n >>> gc.compute().__geo_interface__ # doctest: +SKIP\n {'type': 'GeometryCollection', 'geometries': [{'type': 'Point', 'coordinates': [1, 2]}]}\n \"\"\"\n\n _constructor = \"wf.GeometryCollection.create\"\n _element_type = Geometry\n\n def __init__(self, geometries, type=\"GeometryCollection\"):\n return super(GeometryCollection, self).__init__(\n type=type, geometries=geometries\n )\n\n @classmethod\n def from_geojson(cls, geojson):\n \"\"\"\n Construct a Workflows GeometryCollection from a GeoJSON mapping.\n\n Note that the GeoJSON must be relatively small (under 10MiB of serialized JSON).\n\n Parameters\n ----------\n geojson: Dict\n\n Returns\n -------\n ~descarteslabs.workflows.GeometryCollection\n\n Example\n -------\n >>> from descarteslabs.workflows import GeometryCollection\n >>> geojson = {\"type\": \"GeometryCollection\", \"geometries\":\n ... [{\"type\": \"Point\", \"coordinates\": [1, 2]}]}\n >>> gc = GeometryCollection.from_geojson(geojson)\n >>> gc.compute().__geo_interface__ # doctest: +SKIP\n {'type': 'GeometryCollection', 'geometries': [{'type': 'Point', 'coordinates': [1, 2]}]}\n \"\"\"\n try:\n return cls._from_apply(\n cls._constructor, type=geojson[\"type\"], geometries=geojson[\"geometries\"]\n )\n except KeyError:\n raise ValueError(\n \"Expected a GeoJSON mapping containing the fields 'type' and 'geometries', \"\n \"but got {}\".format(geojson)\n )\n\n @classmethod\n def _promote(cls, obj):\n if hasattr(obj, \"__geo_interface__\"):\n return cls.from_geo_interface(obj)\n if isinstance(obj, dict):\n return cls.from_geojson(obj)\n return super()._promote(obj)\n\n @typecheck_promote((Int, Float))\n def buffer(self, distance):\n \"\"\"\n Take the envelope of all the geometries, and buffer that by a given distance.\n\n Parameters\n ----------\n distance: Int or Float\n The distance (in decimal degrees) to buffer the area around the Geometry.\n\n Returns\n -------\n ~descarteslabs.workflows.Geometry\n\n Example\n -------\n >>> import descarteslabs.workflows as wf\n >>> geom = wf.Geometry(type=\"Point\", coordinates=[1, 2])\n >>> gc = wf.GeometryCollection(geometries=[geom, geom, geom])\n >>> gc.buffer(2)\n \n \"\"\"\n return Geometry._from_apply(\"wf.buffer\", self, distance)\n\n def length(self):\n \"\"\"Length is equivalent to the Python ``len`` operator.\n\n Returns\n -------\n Int\n An Int Proxytype\n\n Example\n -------\n >>> from descarteslabs.workflows import List, Int\n >>> my_list = List[Int]([1, 2, 3])\n >>> my_list.length().compute() # doctest: +SKIP\n 3\n \"\"\"\n return Int._from_apply(\"wf.length\", self)\n\n def __reversed__(self):\n return self._from_apply(\"wf.reversed\", self)\n","sub_path":"descarteslabs/workflows/types/geospatial/geometrycollection.py","file_name":"geometrycollection.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"353854840","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow,QAction,QApplication,qApp,QFileDialog,QMessageBox\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import uic\nimport scriptForGui as myfunc\nimport os\n\npathDir=os.getcwd()\nform_class = uic.loadUiType(pathDir+'\\\\gui.ui')[0]\n\nclass scriptPacker(QMainWindow, form_class) :\n def __init__(self) :\n super().__init__()\n self.setupUi(self)\n self.firstFileNumber=3520\n self.statusBar().showMessage('Ready')\n self.title = 'DigimonAdventure Text packer'\n self.setWindowTitle(self.title)\n\n self.btn_save.setDisabled(True)\n self.btn_insert.setDisabled(True)\n self.actioninsert.setDisabled(True)\n self.actionSave.setDisabled(True)\n self.actionSaveAs.setDisabled(True)\n self.actionSave_ID3841.setDisabled(True)\n self.actioninsert.setDisabled(True)\n self.actionNextText.setDisabled(True)\n self.actionPrevText.setDisabled(True)\n #self.action.setDisabled(True)\n #ListWidget의 시그널\n self.fileList.itemClicked.connect(self.clicked_script_name)\n self.fileList.currentItemChanged.connect(self.clicked_script_name)\n self.scriptsList.itemClicked.connect(self.clicked_script)\n self.scriptsList.currentItemChanged.connect(self.clicked_script)\n #버튼에 기능 연결\n self.btn_open.clicked.connect(self.clicked_btn_open)\n self.btn_insert.clicked.connect(self.insertText)\n self.btn_save.clicked.connect(self.clicked_btn_save)\n self.shiftJis.stateChanged.connect(self.chk_change)\n\n #액션 연결\n self.actionopen.triggered.connect(self.clicked_btn_open)\n self.actionSave.triggered.connect(self.clicked_btn_save)\n self.actionSaveAs.triggered.connect(self.save_as)\n self.actionOpen_ID3841_as.triggered.connect(self.clicked_btn_open_spsi)\n self.actionSave_ID3841.triggered.connect(self.clicked_save_spsi)\n self.actioninsert.triggered.connect(self.insertText)\n self.actionNextText.triggered.connect(self.Next_text)\n self.actionPrevText.triggered.connect(self.Prev_text)\n self.actionexit.triggered.connect(qApp.quit)\n self.actionExtract_bin_from_ISO.triggered.connect(self.clicked_extract_iso)\n self.actionImport_bin_to_ISO.triggered.connect(self.clicked_import_iso)\n self.actionmode_Change.triggered.connect(self.chk_changing)\n self.statusBar()\n\n #ListWidget의 시그널에 연결된 함수들\n def chk_changing(self):\n self.shiftJis.setChecked(False if self.shiftJis.isChecked() else True)\n def chk_change(self):\n self.fileList.setCurrentRow(self.fileList.currentRow()+1)\n self.fileList.setCurrentRow(self.fileList.currentRow()-1)\n def clicked_extract_iso(self):\n binfilter=\"bin files (*.bin);;All files (*.*)\"\n isofilter=\"iso files (*.iso);;All files (*.*)\"\n pathIso=QFileDialog.getOpenFileName(self,'Open to...','./',isofilter,\"iso files (*.iso)\")[0]\n if pathIso=='':return 0\n pathBin=QFileDialog.getSaveFileName(self,'Save as...','./',binfilter,\"bin files (*.bin)\")[0]\n if pathBin=='':return 0\n myfunc.dataExtractorForISO(pathIso,pathBin)\n \n def clicked_import_iso(self):\n myfilter=\"iso files (*.iso);;All files (*.*)\"\n binfilter=\"bin files (*.bin);;All files (*.*)\"\n pathBin=QFileDialog.getOpenFileName(self,'Open to...','./',binfilter,\"bin files (*.bin)\")[0]\n if pathIso=='':return 0\n pathIso=QFileDialog.getSaveFileName(self,'Save as...','./',isofilter,\"iso files (*.iso)\")[0]\n if pathBin=='':return 0\n myfunc.dataImportForISO(pathIso,pathBin)\n\n def clicked_save_spsi(self):\n if self.switchMode!='spsi':\n self.statusBar().showMessage('This is not a ID03831')\n return\n myfunc.IDspsi_Import(self.inf,self.texts)\n #print(self.texts[0])\n self.statusBar().showMessage('Save complete')\n QMessageBox.information(self, 'Information', \"Save complete\", QMessageBox.Ok,QMessageBox.Ok)\n\n def clicked_btn_save(self): \n if self.switchMode!='script':\n self.statusBar().showMessage('This is not a scriptfile')\n return 0\n myfunc.script_import_gui(self.headerList,self.dialogNum,self.texts,self.inf)\n self.statusBar().showMessage('Save complete')\n QMessageBox.information(self, 'Information', \"Save complete\", QMessageBox.Ok,QMessageBox.Ok)\n\n def save_as(self): \n if self.switchMode!='script':\n self.statusBar().showMessage('This is not a scriptfile')\n return 0 \n myfilter=\"Bin files (*.bin);;All files (*.*)\"\n savename=QFileDialog.getSaveFileName(self,'Save as...','./',myfilter,\"Bin files (*.bin)\")[0]\n if savename=='':return 0\n outf=open(savename,'wb+')\n self.inf.seek(0)\n outf.write(self.inf.read())\n outf.seek(0)\n myfunc.script_import_gui(self.headerList,self.dialogNum,self.texts,outf)\n outf.close()\n self.statusBar().showMessage('Save complete')\n QMessageBox.information(self, 'Information', \"Save complete\", QMessageBox.Ok,QMessageBox.Ok)\n \n def insertText(self):\n self.typingText=self.editText.toPlainText()\n self.currentText.setPlainText(self.typingText)\n currentRow=self.scriptsList.currentRow()\n self.scriptsList.takeItem(currentRow)\n if self.switchMode=='spsi':\n self.scriptsList.insertItem(currentRow,self.typingText)\n self.texts[currentRow]=self.typingText\n self.statusBar().showMessage('Insert complete')\n\n else:\n currentRow=str(currentRow)\n for j in range(4):\n if len(str(currentRow))<3:\n currentRow='0'+currentRow\n self.scriptsList.insertItem(int(currentRow),currentRow+'. '+self.typingText)\n iNum=sum(self.dialogNum[0:self.scriptName])+int(currentRow)\n\n self.texts[iNum]=myfunc.str_to_bin(self.typingText,1,1 if self.shiftJis.isChecked() else 0)\n self.statusBar().showMessage('Insert complete')\n\n def clicked_script_name(self):\n while True:\n try:\n self.scriptName=int(self.fileList.currentItem().text())-self.firstFileNumber\n break\n except:\n break\n num=0\n self.currentText.clear()\n self.editText.clear()\n self.scriptsList.clear()\n self.speakerName.clear()\n self.listName=[]\n if self.switchMode=='spsi':\n for i in range(len(self.texts)):\n self.scriptsList.addItem(self.texts[i])\n else:\n for i in range(self.dialogNum[self.scriptName]):\n num=str(num)\n for j in range(4):\n if len(str(num))<3:\n num='0'+num\n self.iNum=i+sum(self.dialogNum[0:self.scriptName])\n self.scriptsList.addItem(num+'. '+myfunc.str_to_bin(self.texts[self.iNum],2,1 if self.shiftJis.isChecked() else 0))\n self.listName.append(self.speakerAndDialogs[0][self.iNum])\n\n num=int(num)+1\n self.scriptsList.setCurrentRow(0)\n self.statusBar().showMessage('Script number %d is loaded'%(self.scriptName+self.firstFileNumber))\n \n def clicked_script(self):\n if self.switchMode=='spsi':\n while True:\n try:\n self.itemscript=self.scriptsList.currentItem().text()\n break\n except :\n break\n \n else:\n while True:\n try:\n self.itemscript=self.scriptsList.currentItem().text()[5:]\n self.itemNumber=int(self.scriptsList.currentItem().text()[0:3])\n #print(self.scriptName) #스크립트 번호반환\n #print(self.scriptsList.currentItem().text()[0:3]) # 현재 나열된 텍스트의 제일앞 번호 3자리\n break\n except :\n break\n #for i in self.texts:\n #print(myfunc.str_to_bin(i,2))\n num=0\n #self.text.clear()\n currentName=self.listName[self.itemNumber]\n self.speakerName.setPlainText(currentName)\n #print(self.listName[self.itemNumber])\n self.currentText.setPlainText(self.itemscript)\n self.editText.setPlainText(self.itemscript)\n #print(self.currentText.toPlainText())\n\n #버튼 함수\n def clicked_btn_open(self):\n binfilter=\"bin files (*.bin);;All files (*.*)\"\n self.filename = QFileDialog.getOpenFileName(self,'Open to...','./',binfilter,\"bin files (*.bin)\")[0]\n #self.filename='onlytext_test1.bin'\n if self.filename=='':return 0 \n try:self.inf.close()\n except:pass\n \n self.setWindowTitle(self.title + ' - ' +os.path.basename(self.filename))\n self.clearWindow()\n self.headerList=[]\n self.dialogNum=[]\n self.texts=[]\n self.clearWindow()\n self.switchMode='script'\n\n self.inf = open(self.filename,'rb+')\n self.data=self.inf.read()\n\n self.headerList=myfunc.find_header(b'\\x45\\x54\\x44\\x46',self.data)\n self.dialogNum=myfunc.dialog_num(self.headerList,self.data)\n\n for i in range(len(self.dialogNum)):\n self.fileList.addItem(str(i+self.firstFileNumber))\n self.speakerAndDialogs=myfunc.script_extract(self.headerList,self.dialogNum,self.inf) \n self.texts=self.speakerAndDialogs[1]\n self.fileList.setCurrentRow(0)\n \n self.enableMenus()\n\n\n def clicked_btn_open_spsi(self):\n try:self.inf.close()\n except:pass\n self.clearWindow()\n self.headerList=[]\n self.dialogNum=[]\n self.texts=[]\n self.switchMode='spsi'\n self.filename = QFileDialog.getOpenFileName(self)[0]\n if self.filename=='':return 0\n #self.filename = 'ID03841'\n self.fileList.addItem(self.filename)\n self.inf=open(self.filename,'rb+')\n self.texts=myfunc.IDspsi_Extract(self.inf)\n #self.scriptslist.addItem()\n self.fileList.setCurrentRow(0)\n def clearWindow(self):\n self.currentText.clear()\n self.editText.clear()\n self.scriptsList.clear()\n self.speakerName.clear()\n self.fileList.clear()\n def disableMenus(self):\n self.setdisabled(True)\n def enableMenus(self):\n self.btn_save.setEnabled(True)\n self.btn_insert.setEnabled(True)\n self.actioninsert.setEnabled(True)\n self.actionSave.setEnabled(True)\n self.actionSaveAs.setEnabled(True)\n self.actioninsert.setEnabled(True)\n self.actionNextText.setEnabled(True)\n self.actionPrevText.setEnabled(True)\n def Next_text(self):\n self.scriptsList.setCurrentRow(self.scriptsList.currentRow()+1)\n def Prev_text(self):\n self.scriptsList.setCurrentRow(self.scriptsList.currentRow()-1)\n\nif __name__ == \"__main__\" :\n print(\"Start gui for dgmAd psp scripter\")\n \n app = QApplication(sys.argv)\n myWindow = scriptPacker()\n myWindow.show()\n app.exec_()","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":11338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"133452580","text":"import argparse\nfrom pathlib import Path\n\nimport pandas as pd\nfrom analysis.report_utils import calculate_rate, get_date_input_file, match_input_files\n\n\ndef redact_and_round_column(df, col, decimals=-1):\n \"\"\"Redact values less-than or equal-to 10 and then round values to nearest 10.\"\"\"\n df[col] = df[col].apply(lambda x: x if x > 10 else 0)\n # `Series.round` introduces scaling and precision errors, meaning some numbers\n # aren't rounded. This isn't the case for the `round` builtin.\n df[col] = df[col].apply(round, ndigits=decimals)\n return df\n\n\ndef filter_data(df, filters):\n \"\"\"\n Filter a DataFrame based on specified columns and their corresponding filter values.\n\n Args:\n df (pd.DataFrame): The input DataFrame to be filtered.\n filters (dict): A dictionary where keys are column names and values are lists of\n the desired values for that column.\n\n Returns:\n pd.DataFrame: The filtered DataFrame.\n \"\"\"\n for column, filter_values in filters.items():\n if column in df.columns:\n df = df.loc[df[column].isin(filter_values), :]\n return df\n\n\ndef calculate_total_counts(df, date, group=None, group_value=None):\n \"\"\"\n Calculate the total counts for a DataFrame.\n\n Args:\n df (pd.DataFrame): The input DataFrame. Should contain columns \"event_measure\" and \"date\".\n date (str): The date of the input file.\n group (str, optional): The group category. Defaults to None.\n group_value (str, optional): The group value. Defaults to None.\n Returns:\n pd.DataFrame: A DataFrame (shape: (1, 5)) containing the total counts.\n \"\"\"\n if not {\"event_measure\", \"date\"}.issubset(df.columns):\n raise ValueError(\n \"The input DataFrame must contain 'event measure' and 'date' columns.\"\n )\n\n count = df[\"event_measure\"].sum()\n population = df[\"event_measure\"].count()\n\n row_dict = {\n \"date\": date,\n \"event_measure\": count,\n \"population\": population,\n \"group\": group,\n \"group_value\": group_value,\n }\n return pd.DataFrame.from_records([row_dict])\n\n\ndef calculate_group_counts(df, breakdown, date):\n \"\"\"\n Calculate the counts for a specified group.\n\n Args:\n df (pd.DataFrame): The input DataFrame. Should contain a column named \"breakdown\".\n breakdown (str): The name of the column to group by.\n date (str): The date of the input file.\n\n Returns:\n pd.DataFrame: A DataFrame containing the counts for the specified group.\n \"\"\"\n counts = (\n df.groupby(by=[breakdown])[\"event_measure\"]\n .agg([\"sum\", \"count\"])\n .reset_index()\n .rename(\n columns={\n breakdown: \"group_value\",\n \"sum\": \"event_measure\",\n \"count\": \"population\",\n }\n )\n )\n counts[\"date\"] = date\n counts[\"group\"] = breakdown\n\n # reorder the columns\n counts = counts[[\"date\", \"event_measure\", \"population\", \"group\", \"group_value\"]]\n return counts\n\n\ndef calculate_and_redact_values(df):\n \"\"\"\n Calculate the values for each group and redact where necessary.\n\n Args:\n df (pd.DataFrame): The input DataFrame. Should contain columns \"event_measure\", \"population\" and \"group\".\n\n Returns:\n pd.DataFrame: A DataFrame containing the calculated values.\n \"\"\"\n groups = df[\"group\"].unique()\n result = pd.DataFrame(columns=[\"group\", \"group_value\", \"value\"])\n for group in groups:\n group_df = df.loc[df[\"group\"] == group, :]\n\n if group == \"practice\":\n group_df.loc[:, \"value\"] = calculate_rate(\n group_df, \"event_measure\", \"population\"\n )\n else:\n group_df = redact_and_round_column(group_df, \"event_measure\", decimals=-1)\n group_df = redact_and_round_column(group_df, \"population\", decimals=-1)\n group_df.loc[:, \"value\"] = calculate_rate(\n group_df, \"event_measure\", \"population\"\n )\n group_df.loc[\n (group_df[\"event_measure\"] == 0) | (group_df[\"population\"] == 0),\n \"value\",\n ] = \"[Redacted]\"\n\n result = pd.concat([result, group_df], ignore_index=True)\n\n return result\n\n\ndef drop_redacted_rows(measure_df):\n \"\"\"\n Drop rows where the value is redacted.\n\n Args:\n measure_df (pd.DataFrame): Measure DataFrame. Should contain a \"group_value\" and \"value\" column.\n\n Returns:\n pd.DataFrame: A measure DataFrame where subgroups with >50% redacted values have been removed.\n \"\"\"\n\n for group_value in measure_df[\"group_value\"].unique():\n group_value_df = measure_df.loc[measure_df[\"group_value\"] == group_value, :]\n redacted_count = group_value_df.loc[group_value_df[\"value\"] == \"[Redacted]\", :][\n \"value\"\n ].count()\n total_count = group_value_df[\"value\"].count()\n if redacted_count / total_count > 0.5:\n measure_df = measure_df.loc[measure_df[\"group_value\"] != group_value, :]\n\n return measure_df\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--breakdowns\", action=\"append\", default=[], required=False)\n parser.add_argument(\"--input-dir\", type=Path, required=True)\n parser.add_argument(\"--output-dir\", type=Path, required=True)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n breakdowns = args.breakdowns\n\n breakdowns.extend([\"practice\", \"event_1_code\", \"event_2_code\"])\n\n measure_df = pd.DataFrame(\n columns=[\"date\", \"event_measure\", \"population\", \"group\", \"group_value\"]\n )\n\n for file in Path(args.input_dir).iterdir():\n if match_input_files(file.name):\n filters = {\n \"sex\": [\"M\", \"F\"],\n \"age_band\": [\n \"0-5\",\n \"6-10\",\n \"11-17\",\n \"18-29\",\n \"30-39\",\n \"40-49\",\n \"50-59\",\n \"60-69\",\n \"70-79\",\n \"80+\",\n ],\n }\n date = get_date_input_file(file.name)\n file_path = str(file.absolute())\n df = pd.read_feather(file_path).pipe(filter_data, filters).assign(date=date)\n\n total_count = calculate_total_counts(\n df, date, group=\"total\", group_value=\"total\"\n )\n\n measure_df = pd.concat([measure_df, total_count], ignore_index=True)\n\n for breakdown in breakdowns:\n counts = calculate_group_counts(df, breakdown, date)\n\n measure_df = pd.concat([measure_df, counts], ignore_index=True)\n\n # sort by date\n\n measure_df = measure_df.sort_values(by=[\"group\", \"group_value\", \"date\"])\n\n measure_df = calculate_and_redact_values(measure_df)\n measure_df.to_csv(args.output_dir / \"measure_all.csv\", index=False)\n measure_for_deciles = measure_df.loc[measure_df[\"group\"] == \"practice\", :]\n measure_for_deciles.to_csv(\n args.output_dir / \"measure_practice_rate_deciles.csv\", index=False\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"analysis/measures.py","file_name":"measures.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"20350855","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 30 11:55:29 2020\n获取月线数据并入库\n@author: 李博\n\"\"\"\nimport os\nimport pandas as pd\nimport json\nimport datetime\nimport time\nfrom starlette.requests import Request\n#from fastapi import FastAPI\nfrom fastapi import APIRouter\nrouter = APIRouter()\nfrom starlette.templating import Jinja2Templates\n# MONGODB CONNECT\nimport tushare as ts\nts.set_token('78282dabb315ee578fb73a9b328f493026e97d5af709acb331b7b348')\npro = ts.pro_api()\ntoday=time.strftime('%Y%m%d',)\n#入库\nfrom pymongo import MongoClient\n#print (os.getcwd())\n#client = MongoClient('mongodb://112.12.60.2:27017')\nclient = MongoClient('mongodb://127.0.0.1:27017')\nmydb=client[\"ptest\"]\n\n#计算指定日期的前N天的时间戳\ndef get_day_time(n):\n #the_date = datetime.datetime(2018,11,10) #指定当前日期 2018-11-10\n the_date = datetime.datetime.now()\n #the_date_str = the_date.strftime('%Y%m%d')\n pre_date = the_date - datetime.timedelta(days=n-1)\n pre_date_str = pre_date.strftime('%Y%m%d')#将日期转换为指定的显示格式\n return pre_date_str\n\n#获取条件集合函数 参数 col 返回df\ndef get_col_df(col):\n mycollection=mydb[col]\n rs_col = mycollection.find()\n list_col = list(rs_col)\n #将查询结果转换为Df\n df_col = pd.DataFrame(list_col)\n #print (df_stockcode)\n return df_col\n\n\n\n#获取当前分类的成份股\ndef getstocks(ccode):\n df_stocks = pro.concept_detail(id=ccode)\n dict_stocks = df_stocks.to_dict(orient='records')\n #print (type(dict_stocks))\n stocks_items=[]\n for stock in dict_stocks:\n #print (type(stock))\n stockname = stock['name']\n stockcode = stock['ts_code']\n conceptname = stock['concept_name']\n #拼装json\n data = {\"stockcode\":stockcode,\"stockname\":stockname,\"conceptname\":conceptname}\n stocks_items.append(data) \n return stocks_items\n#stocks=getstocks('TS344')\n#print (stocks)\n\n#获取ts概念分类\n'''\ndef get_concept():\n df = pro.concept()\n #print (df)\n conceptstockslist = []\n for i in df['code']:\n stockslist = getstocks(i)\n conceptstockslist.append(stockslist)\n df['stocks'] = conceptstockslist\n print (df)\n df.to_csv('./data/concept/'+'concept.csv')\n'''\n\n\n\ndef get_concept():\n #获取概念列表\n df_concept = pro.concept()\n mycollection=mydb['concept_list']\n mycollection.remove()\n records = json.loads(df_concept.T.to_json()).values()\n mycollection.insert(records)\n #获取基本信息\n df_stockbasic = pro.stock_basic(exchange='', list_status='L', fields='ts_code,market,name,area,industry,list_date')\n df_stockbasic['stockname'] = df_stockbasic['name']\n #获取每日行情\n df_tradedate = pro.trade_cal(exchange='SSE', is_open='1',fileds='cal_date',start_date=get_day_time(10), end_date=get_day_time(0))\n lasttradeday = df_tradedate['cal_date'].tail(1).iloc[0]\n df_daily = pro.daily(trade_date=lasttradeday)\n df_daily['amount'] = df_daily['amount']/10000\n #获取每日指标\n df_daily_basic = pro.daily_basic(ts_code='', trade_date=lasttradeday, fields='ts_code,turnover_rate,volume_ratio,pe_ttm,pb,circ_mv')\n df_daily_basic['circ_mv'] = df_daily_basic['circ_mv']/10000\n for i in df_concept['code']:\n #获取概念所有个股\n df_conceptstocks = pro.concept_detail(id=i)\n df_temp = pd.merge(df_conceptstocks, df_stockbasic, how='left', on='ts_code')\n df = pd.merge(df_temp, df_daily_basic, how='left', on='ts_code')\n df2 = pd.merge(df, df_daily, how='left', on='ts_code')\n #df.to_csv('./data/concept/'+i+'_concept.csv')\n mycollection=mydb['concept_'+i]\n mycollection.remove()\n records = json.loads(df2.T.to_json()).values()\n mycollection.insert(records)\n print (i)\n\n\ndef toMongodb(collectionname,filename):\n mycollection=mydb[collectionname]\n mycollection.remove()\n path_df=open('./data/concept/'+filename+'.csv','r',encoding='UTF-8') \n df_csv = pd.read_csv(path_df)\n records = json.loads(df_csv.T.to_json()).values()\n mycollection.insert(records)\n\ndef get_data_concept():\n #获取基本信息\n df_stockbasic = pro.stock_basic(exchange='', list_status='L', fields='ts_code,name,area,industry')\n df_stockbasic = df_stockbasic.rename(columns={'name':'stockname'})\n df_concept_list = get_col_df('concept_list')\n df_data_concept = pd.DataFrame()\n for index, row in df_concept_list.iterrows():\n df_stockbasic_concept_stockslist = pd.DataFrame()\n concept_name = row['concept_name']\n concept_code = row['concept_code']\n concept_stockslist = row['stockslist']\n df_stockbasic_concept_stockslist = df_stockbasic[df_stockbasic.ts_code.isin(concept_stockslist)]\n df_stockbasic_concept_stockslist['concept_name'] = concept_name\n df_stockbasic_concept_stockslist['concept_code'] = concept_code\n print (index,concept_name,concept_code,len(df_stockbasic_concept_stockslist))\n df_data_concept = df_data_concept.append(df_stockbasic_concept_stockslist,ignore_index=True)\n return df_data_concept\n#df = get_data_concept()\n\n\n#获取单日内概念热点分析数据\ndef get_daily_analysis_category_concept(tradedate):\n #获取日线数据\n df_daily = pro.daily(trade_date = tradedate)\n df_concept_list = get_col_df('concept_list')\n df_concept_result = pd.DataFrame()\n for index, row in df_concept_list.iterrows():\n concept_name = row['concept_name']\n concept_code = row['concept_code']\n concept_stockslist = row['stockslist'] \n result_dict = {}\n df_group = pd.DataFrame()\n #GET GROUP\n df_group = df_daily[df_daily.ts_code.isin(concept_stockslist)]\n #涨跌个股数量统计\n stocks_count = len(df_group['ts_code'])\n stocks_up_count = len(df_group[df_group['pct_chg']>=0])\n stocks_down_count = len(df_group[df_group['pct_chg']<0]) \n #最大涨幅\n stocks_pct_chg_max = round(df_group['pct_chg'].max(),2)\n #最小涨幅\n stocks_pct_chg_min = round(df_group['pct_chg'].min(),2)\n #平均涨幅\n stocks_pct_chg_avg = round(df_group['pct_chg'].mean(),2)\n #涨停个股数量统计\n stocks_limit_count = len(df_group[df_group['pct_chg']>9.8])\n #上涨个股数比例\n stocks_up_count_ratio = round(stocks_up_count/stocks_count,2) \n result_dict['concept_code'] = concept_code\n result_dict['concept_name'] = concept_name\n result_dict['trade_date'] = df_group['trade_date'].head(1).iloc[0]\n result_dict['stockslist_count'] = str(len(df_group))\n result_dict['stockslist'] = df_group['ts_code'].tolist()\n result_dict['stocks_amount_total'] = df_group['amount'].sum()\n result_dict['stocks_vol_total'] = df_group['vol'].sum()\n result_dict['stocks_up_count'] = stocks_up_count\n result_dict['stocks_down_count'] = stocks_down_count\n result_dict['stocks_up_count_ratio'] = stocks_up_count_ratio\n result_dict['stocks_pct_chg_max'] = stocks_pct_chg_max\n result_dict['stocks_pct_chg_min'] = stocks_pct_chg_min\n result_dict['stocks_pct_chg_avg'] = stocks_pct_chg_avg \n result_dict['stocks_limit_count'] = stocks_limit_count \n df_concept_result = df_concept_result.append(result_dict,ignore_index=True)\n return df_concept_result\n\n#保存概念分类分析数据入库\ndef save_daily_analysis_category_concept_tradedatelist(startdate,enddate):\n df_tradedatelist = pro.trade_cal(exchange='SSE', is_open='1',fileds='cal_date',start_date=startdate, end_date=enddate)\n tradedatelist = df_tradedatelist['cal_date'].tolist()\n #定义文档名称\n mycol = mydb['daily_analysis_category_concept']\n mycol.remove()\n for i in tradedatelist: \n df = get_daily_analysis_category_concept(i)\n mycol.insert_many(df.to_dict('records'))\n print (i,df['trade_date'][0],'daily_analysis_category_concept:'+str(len(df)))\n\n#save_daily_analysis_category_concept_tradedatelist(20200101,20201231)\n\ntmp = Jinja2Templates(directory='./api/templates')\n@router.get('/update/concept/')\nasync def get_indexs(request:Request):\n get_concept()\n #toMongodb('concept','concept')\n return tmp.TemplateResponse('update_data.html',\n {'request':request\n })","sub_path":"getData/toMongodb_concept.py","file_name":"toMongodb_concept.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"620967198","text":"import pytest\n\nfrom script006.kzheronkin import my_enumerate\n\n\ndef test_enumerator():\n inputted = [x for x in range(10)]\n tested, expected = my_enumerate(inputted), enumerate(inputted)\n with pytest.raises(StopIteration):\n while True:\n assert next(tested) == next(expected)\n\n\ndef test_enumerator_with_nonzero_start():\n inputted = [x for x in range(10)]\n start = 5\n tested, expected = my_enumerate(inputted, start), enumerate(inputted, start)\n with pytest.raises(StopIteration):\n while True:\n assert next(tested) == next(expected)","sub_path":"test/script006/kzheronkin/enumerator_test.py","file_name":"enumerator_test.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"573562987","text":"#\n# @lc app=leetcode.cn id=135 lang=python3\n#\n# [135] 分发糖果\n#\n\n# @lc code=start\nclass Solution:\n def candy(self, ratings: List[int]) -> int:\n nums = [1 for _ in range(len(ratings))]\n for i in range(1, len(ratings)):\n if ratings[i] > ratings[i-1]:\n nums[i] = max(nums[i], nums[i-1] + 1)\n\n for i in range(len(ratings) - 2, -1, -1):\n if ratings[i] > ratings[i+1] and nums[i] <= nums[i+1]:\n nums[i] = max(nums[i], nums[i+1] + 1)\n\n return sum(nums)\n\n# @lc code=end\n\n","sub_path":"src/135.分发糖果.py","file_name":"135.分发糖果.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"572734621","text":"'''\nPython测试题\n孙大勇\n'''\n\nimport pandas as pd\n\ndf = pd.read_csv('ds.csv')\n# 下面注释掉的语句用于分离出itemid=32371602的数据\n#gp = df.groupby('itemid').apply(lambda x: x.to_csv(str(x.name) + '.csv'))\nmydf = pd.read_csv('32371602.csv')\narr = mydf.values\nmyarr = arr[arr.argsort(0)[:, 2]] # 按年月排序\n# 增长率计算\n# 使用如下公式\n# 同比增长率=(本月-上月)/上月\ncal_arr = myarr[:, 3]\ntemp = cal_arr[0]\nratelist = []\nfor i in cal_arr[1:]:\n rate = (i - temp) / temp\n ratelist.append(rate)\n temp = i\n#ratelist=[(i-temp)/temp for i in myarr[1:]]\nfor i in range(len(ratelist)):\n print('%i' % myarr[i + 1][2] + '的同比增长率:', end='')\n print('%.2f' % (ratelist[i] * 100) + '%')\n","sub_path":"pandas_read_csv.py","file_name":"pandas_read_csv.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"194914751","text":"#!/usr/bin/env python\n\nimport ssl\nimport sys\nimport time\nimport random\nimport struct\nimport socket\nimport threading\nimport socketserver\n\nfrom dnslib import DNSRecord\nfrom settings import *\n\nclass BaseRequestHandler(socketserver.BaseRequestHandler):\n\n def get_data(self):\n raise NotImplementedError\n\n def send_data(self, data):\n raise NotImplementedError\n\n def handle(self):\n data = self.get_data()\n try:\n d = DNSRecord.parse(data)\n except Exception as e:\n print(\"ERROR: Invalid DNS request: {}\".format(e))\n else:\n upstream_response = self.dns_over_tls_query(data, random.choice(CLOUDFLARE_HOST), CLOUDFLARE_PORT, CLOUDFLARE_HOSTNAME)\n self.send_data(upstream_response)\n\n def dns_over_tls_query(self, request, host, port, hostname):\n # !: network (= big-endian)\n # H: unsigned short\n # Could have used `>` (big endian)\n # Form of a QNAME (part of the DNS question)\n request = struct.pack(\"!H\", len(request)) + request\n context = ssl.create_default_context()\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(SOCKET_TIMEOUT)\n conn = context.wrap_socket(sock, server_hostname=hostname)\n response = \"\"\n try:\n conn.connect((host, port))\n except socket.error as e:\n print(\"Socket error: {}\".format(e))\n except ssl.SSLError as e:\n print(\"TLS error: {}\".format(e))\n else:\n conn.sendall(request)\n # Get the first two octets (2 bytes - 16 bits)\n # Compressed data (2 bytes) of a TLS packet.\n lbytes = self.recv_socket(conn, 2)\n if (len(lbytes) != 2):\n raise ErrorMessage(\"recv() on socket failed.\")\n resp_len, = struct.unpack('!H', lbytes)\n response = self.recv_socket(conn, resp_len)\n finally:\n conn.close()\n return response\n\n def recv_socket(self, sock, num_octets):\n response = b\"\"\n octets_read = 0\n while (octets_read < num_octets):\n chunk = sock.recv(num_octets-octets_read)\n chunklen = len(chunk)\n if chunklen == 0:\n return b\"\"\n octets_read += chunklen\n response += chunk\n return response\n\n\nclass UDPHandler(BaseRequestHandler):\n\n def get_data(self):\n data, _ = self.request\n return data\n\n def get_socket(self):\n _, socket = self.request\n return socket\n\n def send_data(self, data):\n req_sock = self.get_socket()\n req_sock.sendto(data, self.client_address)\n\n\nclass TCPHandler(BaseRequestHandler):\n\n def get_data(self):\n data= self.request.recv(1024)\n length = struct.unpack(\"!H\", bytes(data[:2]))[0]\n while len(data) - 2 < length:\n new_data = self.request.recv(1024)\n if not new_data:\n break\n data += new_data\n return data[2:]\n\n def send_data(self, data):\n data = struct.pack(\"!H\", len(data)) + data\n self.request.sendall(data)\n\nif __name__ == '__main__':\n print(\"Starting DNS proxies...\")\n\n servers = [\n socketserver.ThreadingTCPServer(('', PORT), TCPHandler),\n socketserver.ThreadingUDPServer(('', PORT), UDPHandler),\n ]\n\n for server in servers:\n thread = threading.Thread(target=server.serve_forever)\n thread.daemon = True\n thread.start()\n print(\"{0} Server loop running in thread: {1}\".format(server.RequestHandlerClass.__name__[:3], thread.name))\n\n try:\n while 1:\n time.sleep(1)\n sys.stderr.flush()\n sys.stdout.flush()\n\n except KeyboardInterrupt:\n pass\n finally:\n for server in servers:\n server.shutdown()\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"261970013","text":"import os\nimport pdb\nimport cv2\nimport time\nimport torch\nimport random\nimport scipy\nimport logging\nimport traceback\nimport numpy as np\nfrom datetime import datetime\n# from config import HOME\nfrom tensorboard_logger import log_value, log_images\nfrom torchnet.meter import ConfusionMeter\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics import cohen_kappa_score, accuracy_score\nfrom matplotlib import pyplot as plt\nfrom pycm import ConfusionMatrix\n\nplt.switch_backend(\"agg\")\n\n\ndef logger_init(save_folder):\n mkdir(save_folder)\n logging.basicConfig(\n filename=os.path.join(save_folder, \"log.txt\"),\n filemode=\"a\",\n level=logging.DEBUG,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"%H:%M:%S\",\n )\n console = logging.StreamHandler()\n logger = logging.getLogger(__name__)\n logger.addHandler(console)\n return logger\n\n\ndef predict(X, coef):\n X_p = np.copy(X)\n return (X_p > coef).astype('int')\n\n\ndef compute_score_inv(thresholds, predictions, targets):\n predictions = predict(predictions, thresholds)\n score = accuracy_score(targets, predictions)\n return 1 - score\n\n\nclass Meter:\n def __init__(self, phase, epoch, save_folder):\n self.predictions = []\n self.targets = []\n self.phase = phase\n self.epoch = epoch\n self.save_folder = os.path.join(save_folder, \"logs\")\n self.best_thresholds = 0.5\n\n def update(self, targets, outputs):\n '''targets, outputs are detached CUDA tensors'''\n # get multi-label to single label\n #targets = torch.sum(targets, 1) - 1 # no multilabel target in regression\n targets = targets.type(torch.LongTensor).flatten()\n #import pdb; pdb.set_trace()\n outputs = torch.sigmoid(outputs).cpu().numpy() #.flatten() # [n, 1] -> [n]\n outputs = predict(outputs, self.best_thresholds).flatten()\n # outputs = torch.sum((outputs > 0.5), 1) - 1\n\n #pdb.set_trace()\n self.targets.extend(targets.tolist())\n self.predictions.extend(outputs.tolist())\n # self.predictions.extend(torch.argmax(outputs, dim=1).tolist()) #[2]\n\n def get_best_thresholds(self):\n '''Epoch over, let's get targets in np array [6]'''\n self.targets = np.array(self.targets)\n\n if self.phase == \"train\":\n return self.best_thresholds\n\n \"\"\"Used in the val phase of iteration, see [4]\"\"\"\n self.predictions = np.array(self.predictions)\n simplex = scipy.optimize.minimize(\n compute_score_inv,\n self.best_thresholds,\n args=(self.predictions, self.targets),\n method=\"nelder-mead\",\n )\n best_thresholds = simplex[\"x\"][0]\n print(\"Best thresholds: %s\" % best_thresholds)\n '''\n NOT using best threshold\n '''\n return self.best_thresholds\n\n def get_cm(self):\n #pdb.set_trace()\n thresholds = self.best_thresholds\n self.predictions = predict(self.predictions, self.best_thresholds)\n cm = ConfusionMatrix(self.targets, self.predictions)\n best_acc = accuracy_score(self.targets, self.predictions)\n return cm, best_acc\n\n\ndef print_time(log, start, string):\n diff = time.time() - start\n log(string + \": %02d:%02d\" % (diff // 60, diff % 60))\n\n\ndef adjust_lr(lr, optimizer):\n for param_group in optimizer.param_groups[:-1]:\n param_group[\"lr\"] = lr\n return optimizer\n\n\ndef epoch_log(log, tb, phase, epoch, epoch_loss, meter, start):\n diff = time.time() - start\n cm, best_acc = meter.get_cm()\n acc = cm.overall_stat[\"Overall ACC\"]\n tpr = cm.overall_stat[\"TPR Macro\"] #[7]\n ppv = cm.overall_stat[\"PPV Macro\"]\n cls_tpr = cm.class_stat['TPR']\n cls_ppv = cm.class_stat['PPV']\n tpr = 0 if tpr is \"None\" else tpr # [8]\n ppv = 0 if ppv is \"None\" else ppv\n #pdb.set_trace()\n print()\n log(\n \"%s %d | loss: %0.4f | ACC: %0.4f | TPR: %0.4f | PPV: %0.4f \\n\"\n % (phase, epoch, epoch_loss, acc, tpr, ppv)\n )\n try:\n cls_tpr = {x: \"%0.4f\" % y for x, y in cls_tpr.items()}\n cls_ppv = {x: \"%0.4f\" % y for x, y in cls_ppv.items()}\n except:\n pass\n\n log('Class TPR: %s' % cls_tpr)\n log('Class PPV: %s' % cls_ppv)\n log(cm.print_normalized_matrix())\n #log(\"Time taken for %s phase: %02d:%02d \\n\", phase, diff // 60, diff % 60)\n\n # tensorboard\n logger = tb[phase]\n logger.log_value(\"loss\", epoch_loss, epoch)\n logger.log_value(\"ACC\", acc, epoch)\n logger.log_value(\"Best_ACC\", best_acc, epoch)\n logger.log_value(\"TPR\", tpr, epoch)\n logger.log_value(\"PPV\", ppv, epoch)\n\n # save pycm confusion\n obj_path = os.path.join(meter.save_folder, f\"cm{phase}_{epoch}\")\n cm.save_obj(obj_path, save_stat=True, save_vector=False)\n\n return best_acc\n\n\ndef mkdir(folder):\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n\ndef save_hyperparameters(trainer, remark):\n hp_file = os.path.join(trainer.save_folder, \"parameters.txt\")\n time_now = datetime.now()\n augmentations = trainer.dataloaders['train'].dataset.transforms.transforms\n # pdb.set_trace()\n string_to_write = \\\n f\"Time: {time_now}\\n\" + \\\n f\"model_name: {trainer.model_name}\\n\" + \\\n f\"train_df_name: {trainer.train_df_name}\\n\" + \\\n f\"resume: {trainer.resume}\\n\" + \\\n f\"pretrained: {trainer.pretrained}\\n\" + \\\n f\"pretrained_path: {trainer.pretrained_path}\\n\" + \\\n f\"folder: {trainer.folder}\\n\" + \\\n f\"fold: {trainer.fold}\\n\" + \\\n f\"total_folds: {trainer.total_folds}\\n\" + \\\n f\"num_samples: {trainer.num_samples}\\n\" + \\\n f\"sampling class weights: {trainer.class_weights}\\n\" + \\\n f\"size: {trainer.size}\\n\" + \\\n f\"top_lr: {trainer.top_lr}\\n\" + \\\n f\"base_lr: {trainer.base_lr}\\n\" + \\\n f\"num_workers: {trainer.num_workers}\\n\" + \\\n f\"batchsize: {trainer.batch_size}\\n\" + \\\n f\"momentum: {trainer.momentum}\\n\" + \\\n f\"mean: {trainer.mean}\\n\" + \\\n f\"std: {trainer.std}\\n\" + \\\n f\"start_epoch: {trainer.start_epoch}\\n\" + \\\n f\"batchsize: {trainer.batch_size}\\n\" + \\\n f\"augmentations: {augmentations}\\n\" + \\\n f\"criterion: {trainer.criterion}\\n\" + \\\n f\"optimizer: {trainer.optimizer}\\n\" + \\\n f\"remark: {remark}\\n\"\n\n with open(hp_file, \"a\") as f:\n f.write(string_to_write)\n print(string_to_write)\n\n\ndef seed_pytorch(seed=69):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\n\"\"\"Footnotes:\n\n[1]: https://stackoverflow.com/questions/21884271/warning-about-too-many-open-figures\n\n[2]: Used in cross-entropy loss, one-hot to single label\n\n[3]: # argmax returns earliest/first index of the maximum value along the given axis\n get_preds ka ye hai ki agar kisi output me zero nahi / sare one hain to 5 nahi to jis index par pehli baar zero aya wahi lena hai, example:\n[[1, 1, 1, 1, 1], [1, 1, 0, 0, 0], [1, 0, 1, 1, 0], [0, 0, 0, 0, 0]]\n-> [4, 1, 0, 0]\nbaki clip karna hai (0, 4) me, we can get -1 for cases with all zeros.\n\n[4]: get_best_threshold is used in the validation phase, during each phase (train/val) outputs and targets are accumulated. At the end of train phase a threshold of 0.5 is used for\ngenerating the final predictions and henceforth for the computation of different metrics.\nNow for the validation phase, best_threshold function is used to compute the optimum threshold so that the qwk is minimum and that threshold is used to compute the metrics.\n\nIt can be argued ki why are we using 0.5 for train, then, well we used 0.5 for both train/val so far, so if we are computing this val set best threshold, then not only it can be used to best evaluate the model on val set, it can also be used during the test time prediction as it is being saved with each ckpt.pth\n\n[5]: np.array because it's a list and gets converted to np.array in get_best_threshold function only which is called in val phase and not training phase\n\n[6]: It's important to keep these two in np array, else ConfusionMatrix takes targets as strings. -_-\n\n[7]: macro mean average of all the classes. Micro is batch average or sth.\n\"\"\"\n","sub_path":"classifier/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"217110267","text":"from flask import Flask, request, render_template\napp = Flask(__name__)\n\nFLASK_DEBUG=1\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/hello')\n@app.route('/hello/')\ndef greeting(first_name=None):\n args = request.args\n if args.get('name'):\n name = args.get('name')\n else:\n name = \"\"\n if first_name:\n name = first_name\n\n return render_template(\"greeting.html\", n=name)","sub_path":"week8/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"394875342","text":"from pyaedt.edb_core.ipc2581.ecad.cad_data.polygon import Polygon\nfrom pyaedt.generic.general_methods import ET\n\n\nclass AssemblyDrawing(object):\n \"\"\"Class describing an IPC2581 assembly drawing.\"\"\"\n\n def __init__(self, ipc):\n self._ipc = ipc\n self.polygon = Polygon(self._ipc)\n self.line_ref = \"\"\n\n def write_xml(self, package): # pragma no cover\n assembly_drawing = ET.SubElement(package, \"AssemblyDrawing\")\n outline = ET.SubElement(assembly_drawing, \"Outline\")\n polygon = ET.SubElement(outline, \"Polygon\")\n polygon_begin = ET.SubElement(polygon, \"PolyBegin\")\n if self.polygon.poly_steps:\n polygon_begin.set(\"x\", str(self.polygon.poly_steps[0].x))\n polygon_begin.set(\"y\", str(self.polygon.poly_steps[0].y))\n for poly_step in self.polygon.poly_steps[1:]:\n polygon_segment = ET.SubElement(polygon, \"PolyStepSegment\")\n polygon_segment.set(\"x\", str(poly_step.x))\n polygon_segment.set(\"y\", str(poly_step.y))\n line_desc_ref = ET.SubElement(outline, \"LineDescRef\")\n line_desc_ref.set(\"id\", self.line_ref)\n","sub_path":"pyaedt/edb_core/ipc2581/ecad/cad_data/assembly_drawing.py","file_name":"assembly_drawing.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"264834315","text":"'''\nCreated on Apr 06, 2012\n\n@author: Michael Kraus (michael.kraus@ipp.mpg.de)\n'''\n\nimport argparse\nimport matplotlib\n\nfrom vorticity.diagnostics import Diagnostics\n\n\nclass replay(object):\n '''\n \n '''\n\n def __init__(self, hdf5_file, nPlot=1, nMax=0, output=False, contours=False):\n '''\n Constructor\n '''\n \n self.diagnostics = Diagnostics(hdf5_file)\n \n if nMax > 0 and nMax < self.diagnostics.nt:\n self.nMax = nMax\n else:\n self.nMax = self.diagnostics.nt\n \n self.nPlot = nPlot\n self.plot = PlotVorticity2D(self.diagnostics, output=output)\n \n \n def run(self):\n# for iTime in range(1, self.nMax+1):\n for iTime in [5,10,20,30,60]:\n if iTime == 0 or iTime % self.nPlot == 0 or iTime == self.nMax:\n print(iTime)\n self.diagnostics.read_from_hdf5(iTime)\n self.diagnostics.update_invariants(iTime)\n self.plot.update(iTime, final=(iTime == self.nMax))\n \n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Vorticity Equation Solver in 2D')\n \n parser.add_argument('hdf5_file', metavar='', type=str,\n help='Run HDF5 File')\n parser.add_argument('-np', metavar='i', type=int, default=1,\n help='plot every i\\'th frame')\n parser.add_argument('-nt', metavar='i', type=int, default=0,\n help='plot up to i\\'th frame')\n parser.add_argument('-o', action='store_true', required=False,\n help='save plots to file')\n parser.add_argument('-c', action='store_true', required=False,\n help='plot contours of streaming function in vorticity')\n \n args = parser.parse_args()\n \n print\n print(\"Replay run with \" + args.hdf5_file)\n print\n \n if args.o == True:\n matplotlib.use('AGG')\n from vorticity.plot.plot_contours import PlotVorticity2D\n pyvp = replay(args.hdf5_file, args.np, args.nt, output=True, contours=args.c)\n pyvp.run()\n else:\n from vorticity.plot.plot_contours import PlotVorticity2D\n pyvp = replay(args.hdf5_file, args.np, args.nt, output=False, contours=args.c)\n \n print\n input('Hit any key to start replay.')\n print\n \n pyvp.run()\n \n print\n print(\"Replay finished.\")\n print\n \n","sub_path":"diag_contours.py","file_name":"diag_contours.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187709620","text":"# 今天来讲一下Python中的排序函数。Python中有2个内建的排序函数,分别为sort() 和 sorted()\n\n# 下面介绍分别介绍一下2个函数:\n\n# 1.有一个列表 :a=[1,4,5,88,0,7],想要实现排序功能,可以使用sort() 和 sorted();\na=[1,4,5,88,0,7]\na.sort() # 默认升序排列\nprint(a) #输出:[0, 1, 4, 5, 7, 88]\n\na.sort(reverse=True) # reverse=True,降序排列。默认FALSE:升序;\nprint(a) # 输出:[88, 7, 5, 4, 1, 0]\n\nb = sorted(a,reverse=True) #有返回值,需要用一个变量进行接收\nprint(b) # 输出:[88, 7, 5, 4, 1, 0]\n\n# 在这里,可以看出sort()是没有返回值的,它会改变原有的列表,而sorted()需要用一个变量进行接收,它并不会修改原有的列表\n\n# 2. 参数key的应用:\n# 什么情况下使用:当列表中的元素不再单一,若列表中包含元组或字典\n# 如何应用: 使用lambda代表根据什么元素或key值进行排序\n# 列表中包含元组,通过元组中某个元素进行排序;lambda x:x[元素位置]\nstu = [(\"winnie\", \"A\", 12),(\"lucy\", \"C\", 16),(\"john\", \"B\", 14)]\nstu.sort(key=lambda x: x[2])\nprint(stu)\ns = sorted(stu, key=lambda x: x[1], reverse=True) # 默认false ,升序\nprint(s)\n# 输出:\n# [('winnie', 'A', 12), ('john', 'B', 14), ('lucy', 'C', 16)]\n# [('lucy', 'C', 16), ('john', 'B', 14), ('winnie', 'A', 12)]\n# 列表中包含字典,按照字典中某个key值进行排序 lambda x:x[key值]\nl1 = [{'name0': '李丽', 'age': 40}, {'name0': '张那', 'age': 30},{'name0':'王原','age':50},{'name0':'王丽萍','age':50}]\nl2 = sorted(l1, key=lambda x:x['age'])\nprint(l2)\n# 结果:[{'name0': '张那', 'age': 30}, {'name0': '李丽', 'age': 40}, {'name0': '王原', 'age': 50}, {'name0': '王丽萍', 'age': 50}]\n\n# 3.什么情况下不能使用sort()函数?\n# sort()函数是list的内建函数,不能针对字典等迭代,系统会直接报错 AttributeError: 'dict' object has no attribute 'sort'\n\n# 4.sorted()函数使用举例:\n# 有一个字典如下所示:\n# 其中key表示数字,value表示这个数字出现的次数,比如1:2表示数字1出现了2次。\n# 请针对这个字典按照出现的次数从多到少进行排序。\n\ndict1={1: 2, 2: 2, 3: 1, 4: 7, 5: 6, 6: 4, 7: 3, 8: 2, 9: 1}\n\nd1=sorted(dict1.values(),reverse=True) #按values值进行排序\nd2=sorted(dict1) #\nd3=sorted(dict1.keys(),reverse=True) #按key值进行排序\nprint(d1)\nprint(d2)\nprint(d3)\n\n# 输出:\n# [7, 6, 4, 3, 2, 2, 2, 1, 1]\n# [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# [9, 8, 7, 6, 5, 4, 3, 2, 1]\n# lambda函数添加一个参数\nfoo = [['z',19],['ll',54],['wa',23],['df',23],['xf',23]]\na = sorted(foo, key=lambda x: (x[1], x[0])) # 先按照x[1]排序,然后对于x[1]相同的,按照x[0]排序\nprint(a) # [['z', 19], ['df', 23], ['wa', 23], ['xf', 23], ['ll', 54]]\n\n# 根据键对字典排序 zip 函数\ndic = {'name':'za', 'sex':'man', 'city':'bj'}\nfoo = zip(dic.keys(), dic.values())\nfoo = [i for i in foo]\nprint('字典转换成列表嵌套元组', foo)\nb = sorted(foo, key=lambda x:x[0])\nprint('根据键排序', b)\n# 字典推导式构造新字典\nnew_dic = {i[0]:i[1] for i in b}\nprint(new_dic)\n# 字典转换成列表嵌套元组的更简单的方法如下\ndic = {'name':'za', 'sex':'man', 'city':'bj'}\nfoo = list(dic.items())\nprint('字典转换成列表嵌套元组', foo)\n\n# 列表推导式,字典推导式,生成器\nimport random\ntd_list = [i for i in range(10)] # 0-9\ndic = {k: random.randint(4, 9) for k in ['a','b','c','d']} # 4-8 not include 9\nprint(dic) # {'a': 6, 'b': 5, 'c': 4, 'd': 8}\n# 统计字符串中某个字符出现的次数\ns = \"beijing shang beijing tianjin a b\"\na = s.count('beijing')\nprint(a)","sub_path":"string_process/11-sort-sorted-lambda.py","file_name":"11-sort-sorted-lambda.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"290876487","text":"\"\"\"\nGiven a signed 32-bit integer x, return x with its digits reversed. If reversing x causes the value to go outside the signed 32-bit integer range [-231, 231 - 1], then return 0.\n\nAssume the environment does not allow you to store 64-bit integers (signed or unsigned).\n\n=> Example 1:\nInput: x = 123\nOutput: 321\n\n=> Example 2:\nInput: x = -123\nOutput: -321\n\n=> Example 3:\nInput: x = 120\nOutput: 21\n\n=> Example 4:\nInput: x = 0\nOutput: 0\n\n=== Constraints ===\n -231 <= x <= 231 - 1\n\"\"\"\n\ndef reverseInteger(integer):\n maximum = 2 ** 31\n minimum = (2 ** 31) * -1\n integer = \"-\" + str(integer)[:0:-1] if not str(integer)[0].isnumeric() else str(integer)[::-1] \n integer = int(integer)\n return integer if minimum < integer < maximum else 0\n\n\nif __name__ == \"__main__\":\n integer = 123\n print(reverseInteger(integer))\n\n integer = -123\n print(reverseInteger(integer))\n\n integer = 120\n print(reverseInteger(integer))\n\n integer = 0\n print(reverseInteger(integer))","sub_path":"leetcode/strings/reverseInteger.py","file_name":"reverseInteger.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"433617204","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect\nfrom django.utils import timezone\nfrom .forms import UserRegisterForm, CommentForm\nfrom .models import Picture, Profile, Comment\nfrom django.views.generic import ListView, DetailView\nfrom django.db.models import Q\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\n\n@login_required\ndef home(request):\n profile = get_object_or_404(Profile, id=request.user.pk)\n picture_list = Picture.objects.filter(owner__in=profile.following.all()).order_by('-post_date')\n\n context = {\n 'Pictures': picture_list\n }\n return render(request, 'users/main.html', context)\n\n\ndef trending(request):\n context = {\n 'Pictures': Picture.objects.annotate(like_count=Count('likes')).order_by('-like_count')\n }\n return render(request, 'users/trending.html', context)\n\n\nclass PictureListView(ListView):\n model = Picture\n template_name = 'users/main.html'\n context_object_name = 'Pictures'\n ordering = ['-post_date']\n\n\ndef picture_detail(request, pk):\n picture = get_object_or_404(Picture, pk=pk)\n comments = Comment.objects.filter(picture=picture)\n is_liked = False\n if picture.likes.filter(id=request.user.pk).exists():\n is_liked = True\n\n if request.method == 'POST':\n comment_form = CommentForm(request.POST or None)\n if comment_form.is_valid():\n comment = comment_form.save(commit=False)\n comment.picture = picture # Changed from .post to .picture MAY BE WRONG\n comment.author = request.user\n comment_form.save()\n else:\n comment_form = CommentForm()\n\n context = {\n 'picture': picture,\n 'comments': comments,\n 'comment_form': comment_form,\n 'is_liked': is_liked,\n 'total_likes': picture.total_likes(),\n }\n\n return render(request, 'users/picture_detail.html', context)\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your account has been created! You are now able to log in')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n latest_picture_list = Picture.objects.filter(owner=request.user).order_by('-post_date')\n context = {\n 'latest_picture_list': latest_picture_list\n }\n return render(request, 'users/profile.html', context)\n\n\n@login_required\ndef follow_user(request):\n profile = get_object_or_404(Profile, id=request.POST.get('profile_id'))\n requesting_user = get_object_or_404(Profile, id=request.user.pk)\n is_followed = False\n if profile.followed.filter(id=request.user.pk).exists():\n profile.followed.remove(request.user)\n requesting_user.following.remove(profile.user)\n is_followed = False\n else:\n profile.followed.add(request.user)\n requesting_user.following.add(profile.user)\n is_followed = True\n send_mail('Follow Notification',\n requesting_user.user.email + ' is now following you!',\n 'SlightlyDelayedGram123@gmail.com',\n [profile.user.email],\n fail_silently=False)\n return HttpResponseRedirect(profile.get_absolute_url())\n\n\ndef like_picture(request):\n post = get_object_or_404(Picture, id=request.POST.get('picture_id'))\n is_liked = False\n if post.likes.filter(id=request.user.pk).exists():\n post.likes.remove(request.user)\n is_liked = False\n else:\n post.likes.add(request.user)\n is_liked = True\n return HttpResponseRedirect(post.get_absolute_url())\n\n\n@login_required\ndef peer_profile(request, pk):\n profile = get_object_or_404(Profile, pk=pk)\n requesting_user = Profile.objects.get(user=request.user)\n is_followed = False\n if profile.followed.filter(id=request.user.pk).exists():\n is_followed = True\n\n # Determine if profile is followable\n\n latest_picture_list = Picture.objects.filter(owner=profile.user).order_by('-post_date')\n context = {\n 'latest_picture_list': latest_picture_list,\n 'profile': profile,\n 'is_followed': is_followed,\n }\n return render(request, 'users/peer_profile.html', context)\n\n\ndef upload_picture(request):\n try:\n pic = request.FILES['image']\n model = Picture(owner=request.user, picture_object=pic, post_date=timezone.now())\n model.save()\n profile = get_object_or_404(Profile, id=request.user.pk)\n print(profile.user)\n for follower in profile.followed.all():\n user_email = User.objects.get(username=follower).email\n print(user_email)\n send_mail('New Picture Notification',\n request.user.username + ' has posted!',\n 'SlightlyDelayedGram123@gmail.com',\n [user_email],\n fail_silently=False)\n\n return redirect('profile')\n except:\n return redirect('profile')\n\n\ndef delete_picture(request, pk):\n if request.method == 'POST':\n picture = Picture.objects.get(pk=pk)\n picture.delete()\n return redirect('profile')\n\n\n@login_required\ndef search(request):\n if request.method == 'POST':\n srch = request.POST['srh']\n\n if srch:\n match = Profile.objects.filter(Q(user__username__istartswith=srch))\n\n if match:\n return render(request, 'users/search.html', {'sr': match})\n else:\n messages.error(request, 'no result found')\n\n else:\n return HttpResponseRedirect('/search/')\n return render(request, 'users/search.html')\n","sub_path":"SlightlyDelayedGram/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"242700172","text":"from itertools import islice\n\n\"\"\" This class is the searching class for searching tirps in the index file\n the constructor gets the path to the output of the RawDataToIndexFile object\"\"\"\nclass SearchInIndexFile:\n\n _index_file = None\n\n _main_map_dic = None\n _vs_map_dic = None\n _m_hs_list = None\n\n _START_INDEX = 0\n _END_INDEX = 1\n\n _START_INDEX_OF_TIRPS = 3\n\n _MIN_VERTICAL_SUPPORT_DEFAULT = 0\n _MAX_VERTICAL_SUPPORT_DEFAULT = 100\n _MIN_MEAN_HORIZONTAL_SUPPORT_DEFAULT = 1\n _MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT = float('inf')\n\n\n \"\"\" The constructor gets the path to the index file a set all the relvent data bases for the search \n main_map_dic - dictionary that contains pairs of the sym and the first line that contains the relevant data\n vs_map_dic - dictionary that contains pairs of the vertical support(divided by 5) value and the line of the relevant data\n hs_list - list that contains tipels of the mean horizontal support value(fixed size buckets) and the first line of the relevant data\"\"\"\n def __init__( self , path ):\n\n self._index_file = open( path , 'r' )\n\n self._main_map_dic = {}\n self._vs_map_dic = {}\n self._m_hs_list = []\n\n self.create_map_dictionaries()\n self.creat_m_hs_list()\n\n \"\"\" This function creates the main_map_dic and the vs_map_dic \"\"\"\n def create_map_dictionaries( self ):\n\n self.set_parsed_map_line( self._main_map_dic , 0 )\n prev = self.set_parsed_map_line( self._vs_map_dic , self._main_map_dic[\"VS\"][self._START_INDEX] )\n # adds the last line of the vs section\n self._vs_map_dic[prev][self._END_INDEX] = self._main_map_dic[\"VS\"][self._END_INDEX]\n\n \"\"\" This function build a given dictionary from a given index of a line \"\"\"\n def set_parsed_map_line( self , dest_dic , index ):\n\n map_line = next( self.get_iter_lines( index , index + 1 ) ).split( \" \" )\n prev = None\n\n for pair in map_line:\n split_pair = pair.split( \":\" )\n dest_dic[split_pair[0]] = [int( split_pair[1] )]\n if prev != None:\n dest_dic[prev] += [int( split_pair[1] )]\n prev = split_pair[0]\n\n dest_dic[prev] += [None]\n return prev\n\n \"\"\" This function creats the _m_hs_list as discribed above \"\"\"\n def creat_m_hs_list(self):\n\n lst = next(self.get_iter_lines(self._main_map_dic[\"HS\"][self._START_INDEX]))[:-1].split( \" \" )\n for str in lst:\n pair = str.split( \":\" )\n self._m_hs_list += [( float( pair[0] ) , int( pair[1] ) )]\n\n \"\"\" This function gets a property: 's'/'c'/'e' and a sym as a string and returns a list of all the relevant tirps \n see doc of the RawDataToIndexFile object for more information on the property \"\"\"\n def get_tirps_by_property( self , prop , sym ):\n\n if not sym in self._main_map_dic:\n return []\n lines = self.get_iter_lines( self._main_map_dic[sym][self._START_INDEX] \\\n , self._main_map_dic[sym][self._END_INDEX] + 1 )\n\n for tirps_str in lines:\n if tirps_str[0] == prop:\n return tirps_str[self._START_INDEX_OF_TIRPS:][:-1].split( \" \" )\n\n return []\n\n \"\"\" This function returns all the tirps that their vertical support is between the given numbers \"\"\"\n def get_tirps_by_vs( self , min_vs = _MIN_VERTICAL_SUPPORT_DEFAULT , \\\n max_vs = _MAX_VERTICAL_SUPPORT_DEFAULT ):\n\n if min_vs < 0 or min_vs > 100 or max_vs < 0 or max_vs > 100:\n return []\n\n r_min_vs = self.divided_by_five_round( min_vs )\n r_max_vs = self.divided_by_five_round( max_vs )\n if r_min_vs > int(list(self._vs_map_dic)[-1]):#######################3\n return []\n\n for i in range( r_min_vs , r_max_vs + 1 , 5 ):\n if str( i ) in self._vs_map_dic.keys():\n r_min_vs = str( i )\n break\n\n for i in range( r_max_vs , int( r_min_vs ) - 1 , -5 ):\n if str( i ) in self._vs_map_dic.keys():\n r_max_vs = str( i )\n break\n\n tirps_line_iter = self.get_iter_lines( self._vs_map_dic[r_min_vs][self._START_INDEX] , \\\n self._vs_map_dic[r_max_vs][self._END_INDEX] )\n return self.lines_to_tirps_list( tirps_line_iter , min_vs / 100 , max_vs / 100 )\n\n \"\"\" This function returns all the tirps that their mean horizontal support is between the given numbers \"\"\"\n def get_tirps_by_hs( self , min_m_hs = _MIN_MEAN_HORIZONTAL_SUPPORT_DEFAULT , \\\n max_m_hs = _MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT ):\n\n if min_m_hs < 1 or max_m_hs < 1 or len( self._m_hs_list ) < 1:\n []\n\n min_hs_line = self._m_hs_list[0][1]\n max_hs_line = None\n\n for tup in self._m_hs_list:\n if tup[0] > min_m_hs:\n break\n min_hs_line = tup[1]\n\n for tup in self._m_hs_list:\n if tup[0] > max_m_hs:\n max_hs_line = tup[1]\n break\n\n\n trips_line_iter = self.get_iter_lines( min_hs_line , max_hs_line )\n return self.lines_to_tirps_list( trips_line_iter , min_m_hs , max_m_hs )\n\n \"\"\" This function gets a number of lines from the file and returns a list of all the tirps in those lines\n min_value\\max_value - in case the line represent a line of vs or m_hs these variables is used to select only the relevant tirps\"\"\"\n def lines_to_tirps_list( self , lines , min_value = 0 , max_value = _MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT ):\n\n tirps_list = []\n\n for line in lines:\n line = line[:-1]\n splited_line = line.split( \" \" )\n if min_value <= float ( splited_line[0][:-1] ) and max_value >= float ( splited_line[0][:-1] ):\n splited_line.pop( 0 )\n tirps_list += splited_line\n\n return tirps_list\n\n \"\"\" This function returns an iterable object that contains the lines between s_index to e_index\n e_index - is optional the defult is the end of the file \"\"\"\n def get_iter_lines( self , s_index , e_index = None ):\n\n self._index_file.seek(0)\n return islice( self._index_file , s_index , e_index )\n\n \"\"\" This function rounding down the given number to a number that divisible by 5 \"\"\"\n def divided_by_five_round( self , num ):\n\n return int( num / 5 ) * 5\n\n \"\"\" This function sets the default min vertical support\"\"\"\n def setMinVS(self, vs):\n self._MIN_VERTICAL_SUPPORT_DEFAULT = vs\n\n \"\"\" This function search for the tirps that meets all the conditions\n start_sym\\contains_sym\\end_sym - a list of all the symbols the tirps can start\\contains\\ends with\n min_vs\\max_vs - min\\max vertical support\n min_m_hs\\max_m_hs - min\\max mean horizontal support\"\"\"\n def get_serached_tirps( self , start_sym , contains_sym , end_sym , min_vs = 0 , max_vs = 100 , \\\n min_m_hs = _MIN_MEAN_HORIZONTAL_SUPPORT_DEFAULT , max_m_hs = _MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT ):\n\n starts_with_tirps = []\n contains_tirps = []\n ends_with_tirps = []\n all_lists = []\n # if not min_vs:\n # min_vs = 0\n # if not max_vs:\n # max_vs = 100\n # if not min_m_hs:\n # min_m_hs = self._MIN_MEAN_HORIZONTAL_SUPPORT_DEFAULT\n # if not max_m_hs:\n # max_m_hs = self._MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT\n\n if len(start_sym) > 0: ##########\n for sym in start_sym:\n starts_with_tirps += self.get_tirps_by_property( \"s\" , sym )\n starts_with_tirps = list( dict.fromkeys( starts_with_tirps ) )\n all_lists.append( starts_with_tirps )\n\n if len(contains_sym) > 0:\n for sym in contains_sym:\n contains_tirps += self.get_tirps_by_property( \"c\" , sym )\n contains_tirps = list( dict.fromkeys( contains_tirps ) )\n all_lists.append( contains_tirps )\n\n if len(end_sym) > 0:\n for sym in end_sym:\n ends_with_tirps += self.get_tirps_by_property( \"e\" , sym )\n ends_with_tirps = list( dict.fromkeys( ends_with_tirps ) )\n all_lists.append( ends_with_tirps )\n\n if min_vs and max_vs:\n all_lists.append( self.get_tirps_by_vs( min_vs , max_vs ) )\n\n if min_m_hs:\n if not max_m_hs:\n max_m_hs = self._MAX_MEAN_HORIZONTAL_SUPPORT_DEFAULT\n all_lists.append( self.get_tirps_by_hs( min_m_hs , max_m_hs ) )\n\n return self.get_combaind_list( all_lists )\n\n \"\"\" This function gets a list of lists and returns one united and sorted list \n that contains the intersection between the given lists\n empty list is ignored\"\"\"\n def get_combaind_list( self , lists ):\n\n #############\n if [] in lists:\n return []\n\n while [] in lists:\n lists.remove( [] )\n\n if len( lists ) < 1:\n return []\n\n combaind_list = lists[0]\n\n for lst in lists:\n combaind_list = list( set(combaind_list) & set(lst) )\n\n combaind_list.sort( key = lambda tirp : ( tirp.replace( \"(\" , \"\" ).split( \"-\" )[:-1] ) )\n\n return combaind_list\n\n","sub_path":"SearchInIndexFile.py","file_name":"SearchInIndexFile.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"654040096","text":"#%%\nimport math\nimport os \n\nprint(\"Hello world!\")\n\n#%%\n# From last week\nabool = True # boolean\nazero = 0 # int\naint = 35 # int\nafloat = -2.8 # float\nanumbzerostr = \"0\" # str\naemptystr = \"\" # str\naletter = 'a' # str\nastr = \"three-five\" # str\n# list / array\nalist = [1,'person',1,'heart',10,'fingers']\n# tuple # like list, but immutable (faster access and processing)\natuple = (1,'person',1,'heart',10,'fingers')\n# set # un-ordered, and distinct elements. \naset = { 1,'person',1,'heart',10,'fingers' }\n# dictionary\nadictionary = { \"name\": \"Einstein\", 1: \"one\", astr: 35, aint: 'thirty five', \"last\": alist }\n\n#%%\n# some more \n# note anything unexpected/unusual\nlist1 = [1,5,3,8,2]\nlist2 = [2]\ntuple1 = (1,5,3,8,2)\ntype(tuple1)\nprint(len(tuple1))\ntuple2 = (2,) #u gotta put a comma after this!\ntype(tuple2)\nprint(len(tuple2)) \ntuple3 = tuple([2])\ntype(tuple3)\nprint(len(tuple3))\ntuple4 = ()\ntype(tuple4)\nprint(len(tuple4))\nprint(\"type of tuple1: %s, length of tuple1: %d\" % (type(tuple1), len(tuple1)) )\n\ntuple2 = (2)\nprint(\"type of tuple2: %s\" % type(tuple2) )\n# print(\"type of tuple2: %s, length of tuple2: %d\" % (type(tuple2), len(tuple2)) )\n# len(tuple2) # does not work, error\n\ntuple3 = tuple([2])\nprint(\"type of tuple3: %s, length of tuple3: %d\" % (type(tuple3), len(tuple3)) )\n\ntuple4 = ()\nprint(\"type of tuple4: %s, length of tuple4: %d\" % (type(tuple4), len(tuple4)) )\n\n\n#%%\n# Slicing parts of list/tuple/set\n# Try\n# write some notes/comments for each case, so that you can review them easily yourself\nalist[1:4] # inclusive on the start index, exclusive of the end index\nalist[:4]\nalist[:]\n# optional argument, skipping every 1 element with :2 at the end\nalist[1:4:2]\nalist[1:5:2]\nalist[1:3:2]\n# what do you expect the result of this to be?\nalist[1::2]\n# Also try \nalist[-4]\nalist[-4:-2]\nalist[-4:]\nalist[-2:-4]\n\n#%%\n# Now try tuple, set, and dictionary\n# Put some notes for yourself\n# comment out the illegal ones so that you can run your entire file gracefully\n\n#%%[markdown]\n# # Logic\n# ## Conditional statment\n# \n# _________________________________________________ \n# Statement: If p, then q OR p -> q \n#\n# Contrapositve: If -q, then -p OR -q -> -p \n# _________________________________________________ \n# Inverse: If -p, then -q OR -p -> -q \n# \n# Converse: If q, then p OR q -> p \n# _________________________________________________ \n\n#%%[markdown]\n# ## Some other logic rules\n# \n# _________________________________________________ \n# -(p AND q)\n#\n# same as \n#\n# -p OR -q\n# _________________________________________________ \n# -(p OR q)\n#\n# same as \n#\n# -p AND -q\n# _________________________________________________ \n# p AND (q AND r)\n#\n# same as \n#\n# (p AND q) AND r\n#\n# we usually combine as \n#\n# (p AND q AND r)\n# _________________________________________________ \n# p OR (q OR r)\n#\n# same as \n#\n# (p OR q) OR r\n#\n# we usually combine as \n#\n# (p OR q OR r)\n# _________________________________________________ \n# ## Distributive law 1\n# p AND (q OR r)\n#\n# same as \n#\n# (p AND q) OR (p AND r)\n# _________________________________________________ \n# ## Distributive law 2\n# p OR (q AND r)\n#\n# same as \n#\n# (p OR q) AND (p OR r)\n# _________________________________________________ \n#\n#\n\n#%%\n# conditional\n# if :\nincome = 60000\nif income >100000 :\n print(\"rich\")\n# if else:\nif income >100000 :\n print(\"rich\")\nelse :\n print(\"not rich\")\n# if elif elif .... :\nif income >200000 :\n print(\"super rich\")\nelif income > 100000 :\n print(\"rich\")\nelif income > 40000 :\n print(\"not bad\")\nelif income > 0 :\n print(\"could be better\")\nelse :\n print(\"no idea\")\n\n# The above can be compacted into a one-liner\nprint(\"super rich\" if income > 200000 else \"rich\" if income > 100000 else \"not bad\" if income > 40000 else \"could be better\" if income > 0 else \"no idea\" )\n# or \nincomelevel = \"super rich\" if income > 200000 else \"rich\" if income > 100000 else \"not bad\" if income > 40000 else \"could be better\" if income > 0 else \"no idea\" \nprint(incomelevel)\n\n# write your conditional statment to assign letter grades A, A-, B+ etc according to the syllabus\n\n#%%\n# loops - basic\nfor i in range(10):\n print(i)\n\n#%%\n# loops - iterate a list/tuple/set/dictionary\n# any difference among the three below?\n# for val in list :\nfor val in [ 4,'2',(\"a\",5),'end' ] :\n print(val, type(val))\n# for val in tuple :\nfor val in ( 4,'2',(\"a\",5),'end' ) :\n print(val, type(val))\n# for val in set :\nfor val in { 4,'2',(\"a\",5),'end' } :\n print(val, type(val))\n\n# Now for dictionary\n# for val in dictionary : (keys only)\nfor key in { \"k0\":4, \"k8\":'2', \"k1\":(\"a\",5), \"k5\":'end' } :\n print(key, type(key))\n \n# for val in string :\nfor char in 'GW Rocks' :\n print(char, type(char))\n\n#CHANGE \n \n \n#%%\n# for index, val in enumerate(list) :\nthething = [ 4,'2',(\"a\",5),'end' ]\nfor index, val in enumerate(thething) :\n # print(\"index\", index, \"value\", val, thething[index], type(val))\n print(\"index: %d, value: %s, or theing[index]: %s, and type: %s\" % (index, val, thething[index], type(val)) )\n\n#%%\n# Try tuple, set, and dictionary\nthething = { 4,'2',(\"a\",5),'end' }\nthething = { \"k0\":4, \"k8\":'2', \"k1\":(\"a\",5), \"k5\":'end' }\n# re-run the for loop above -> error\n# what happened to set and dictionary? \n# Remember that set is un-ordered, and dictionary uses keys, not index. They cannot be enumerated.\n\n#%%\n# for dictionary, you do not need to enumerate to get key, value pair\n# either of these works\nthething = { \"k0\":4, \"k8\":'2', \"k1\":(\"a\",5), \"k5\":'end' }\nfor key in thething :\n # print(\"key:\", key, \"value:\", thething[key], \"type of value\", type(thething[key]))\n print(\"key: %s, value: %s, and type: %s\" % ( key, thething[key], type(thething[key]) ) )\n\n#%%\n# or try this\nthething.items() # creates a object type of dict_items, which can be looped thru as key/value pairs \nfor key, val in thething.items() :\n # print(\"key:\", key, \"value:\", val, \"type of value\", type(val))\n print(\"key: %s, value: %s, and type: %s\" % ( key, val, type(val) ) )\n\n#%%\n# external file\nimport os # already imported start of file\nfilepath = os.path.join( os.getcwd(), \"presidents.txt\")\n# filepath = '/Users/edwinlo/GDrive_GWU/github_elo/GWU_classes/DATS_6103_DataMining/Class02_functions/presidents.txt'\nprint(filepath)\nfh = open(filepath) # fh stands for file handle\nfor k in fh.readlines():\n print(type(k), ' ' ,k, end='')\n# notice that fh will be empty at the end of the loop. Will need to readlines again if you need it\n\n\n# %%\n","sub_path":"DATS_6103_DataMining/Class02_functions/InClass02_loops_logic.py","file_name":"InClass02_loops_logic.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"320282708","text":"from dal import autocomplete\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nfrom service.helper.enums import MetadataEnum\nfrom service.models import Metadata\n\n\n@method_decorator(login_required, name='dispatch')\nclass MetadataAutocomplete(autocomplete.Select2QuerySetView):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n model = Metadata\n search_fields = ['title', 'id']\n metadata_type = None\n\n def get_queryset(self):\n qs = super().get_queryset()\n if self.metadata_type:\n qs = qs.filter(metadata_type=self.metadata_type.value)\n return qs\n\n def get_result_label(self, result):\n \"\"\"\n we need to override this function to show the id of the metadata object,\n so the user can differentiate the results where title is equal.\n \"\"\"\n return '{} #{}'.format(result.title, result.id)\n\n\nclass MetadataServiceAutocomplete(MetadataAutocomplete):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n metadata_type = MetadataEnum.SERVICE\n\n\nclass MetadataDatasetAutocomplete(MetadataAutocomplete):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n metadata_type = MetadataEnum.DATASET\n\n\nclass MetadataLayerAutocomplete(MetadataAutocomplete):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n metadata_type = MetadataEnum.LAYER\n\n\nclass MetadataFeaturetypeAutocomplete(MetadataAutocomplete):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n metadata_type = MetadataEnum.FEATURETYPE\n\n\nclass MetadataCatalougeAutocomplete(MetadataAutocomplete):\n \"\"\" Provides an autocomplete functionality for dataset metadata records\n\n \"\"\"\n metadata_type = MetadataEnum.CATALOGUE\n","sub_path":"mrmap/service/autocompletes.py","file_name":"autocompletes.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309543712","text":"\"\"\"\nAUTHOR : Lungu Daniel\n\nACCURACY : 88.3 %\n\"\"\"\n\nfrom datetime import datetime\nimport numpy as np\nimport tensorflow as tf\nimport functools\nimport time\nimport math\nimport os\nimport sys\nimport re\n\n\nclass ImageRecognition(object):\n def __init__(self):\n # Process images of this size. Note that this differs from the original CIFAR\n # image size of 32 x 32. If one alters this number, then the entire model\n # architecture will change and any model would need to be retrained.\n self.IMAGE_SIZE = 24\n\n # Global constants describing the CIFAR-10 data set.\n self.NUM_CLASSES = 10\n self.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\n self.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\n # Constants describing the training process.\n self.MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\n self.NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.\n self.LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\n self.INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.\n self.EPSILON = 1e-3 # Hyperparamter for Batch Normalization.\n\n # If a model is trained with multiple GPUs, prefix all Op names with tower_name\n # to differentiate the operations. Note that this prefix is removed from the\n # names of the summaries when visualizing a model.\n self.TOWER_NAME = 'tower'\n\n def train(self):\n \"\"\"\n Train CIFAR-10 for a number of steps.\n :return: Nothing.\n \"\"\"\n\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n\n # Get images and labels for CIFAR-10.\n images, labels = self._distorted_inputs()\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = self.__inference(images)\n\n # Calculate loss.\n loss = self.__loss(logits, labels)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n train_op = self.__train(loss, global_step)\n\n class _LoggerHook(tf.train.SessionRunHook):\n \"\"\"Logs loss and runtime.\"\"\"\n\n def begin(self):\n self._step = -1\n\n def before_run(self, run_context):\n self._step += 1\n self._start_time = time.time()\n\n return tf.train.SessionRunArgs(loss) # Asks for loss value.\n\n def after_run(self, run_context, run_values):\n duration = time.time() - self._start_time\n loss_value = run_values.results\n\n if self._step % 10 == 0:\n num_examples_per_step = FLAGS.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'\n print(format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch))\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.train_dir,\n hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss),\n _LoggerHook()],\n config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) as mon_sess:\n while not mon_sess.should_stop():\n mon_sess.run(train_op)\n\n def evaluate(self):\n \"\"\"\n Eval CIFAR-10 for a number of steps.\n :return: Nothing.\n \"\"\"\n\n with tf.Graph().as_default() as g:\n # Get images and labels for CIFAR-10.\n eval_data = FLAGS.eval_data == 'test'\n images, labels = self._inputs(eval_data=eval_data)\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = self.__inference(images)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(self.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)\n\n while True:\n self.__eval_once(saver, summary_writer, top_k_op, summary_op)\n\n if FLAGS.run_once:\n break\n\n time.sleep(FLAGS.eval_interval_secs)\n\n def __eval_once(self, saver, summary_writer, top_k_op, summary_op):\n \"\"\"\n Run Eval once.\n :param saver: Saver.\n :param summary_writer: Summary writer.\n :param top_k_op: Top K op.\n :param summary_op: Summary op.\n :return: Nothing\n \"\"\"\n\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/cifar10_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n\n try:\n threads = []\n for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))\n\n num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))\n true_count = 0 # Counts the number of correct predictions.\n total_sample_count = num_iter * FLAGS.batch_size\n step = 0\n\n while step < num_iter and not coord.should_stop():\n predictions = sess.run([top_k_op])\n true_count += np.sum(predictions)\n step += 1\n\n # Compute precision @ 1.\n precision = true_count / total_sample_count\n print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))\n\n summary = tf.Summary()\n summary.ParseFromString(sess.run(summary_op))\n summary.value.add(tag='Precision @ 1', simple_value=precision)\n summary_writer.add_summary(summary, global_step)\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=10)\n\n def __read_cifar10(self, filename_queue):\n \"\"\"\n Reads and parses examples from CIFAR10 data files.\n :param filename_queue: filename_queue: A queue of strings with the filenames to read from.\n :return: An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n label_bytes = 1\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n\n # Every record consists of a label followed by the image, with a fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No header or footer in the CIFAR-10 format,\n # so we leave header_bytes and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [label_bytes + image_bytes]),\n [result.depth, result.height, result.width])\n\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n def __generate_image_and_label_batch(self, image, label, min_queue_examples, batch_size, shuffle):\n \"\"\"\n Construct a queued batch of images and labels.\n :param image: 3-D Tensor of [height, width, 3] of type.float32.\n :param label: 1-D Tensor of type.int32\n :param min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n :param batch_size: Number of images per batch.\n :param shuffle: boolean indicating whether to use a shuffling queue.\n :return: images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n # Create a queue that shuffles the examples, and then read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n\n if shuffle:\n images, label_batch = tf.train.shuffle_batch([image, label], batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch([image, label], batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.contrib.deprecated.image_summary('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n def __distorted_inputs(self, data_dir, batch_size):\n \"\"\"\n Construct distorted input for CIFAR training using the Reader ops.\n :param data_dir: Path to the CIFAR-10 data directory.\n :param batch_size: Number of images per batch.\n :return: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, 6)]\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = self.__read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = self.IMAGE_SIZE\n width = self.IMAGE_SIZE\n\n # Image processing for training the network. Note the many random distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(distorted_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(self.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)\n\n print('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return self.__generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size,\n shuffle=True)\n\n def __inputs(self, eval_data, data_dir, batch_size):\n \"\"\"\n Construct input for CIFAR evaluation using the Reader ops.\n :param eval_data: bool, indicating if one should use the train or eval data set.\n :param data_dir: Path to the CIFAR-10 data directory.\n :param batch_size: Number of images per batch.\n :return: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, 6)]\n num_examples_per_epoch = self.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = self.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = self.__read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = self.IMAGE_SIZE\n width = self.IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(resized_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return self.__generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size,\n shuffle=False)\n\n def __activation_summary(self, x):\n \"\"\"\n Helper to create summaries for activations.\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n :param x: Tensor\n :return: nothing\n \"\"\"\n\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training session.\n # This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % self.TOWER_NAME, '', x.op.name)\n tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)\n tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))\n\n def __variable_on_cpu(self, name, shape, initializer):\n \"\"\"\n Helper to create a Variable stored on CPU memory.\n :param name: name of the variable\n :param shape: list of ints\n :param initializer: initializer for Variable\n :return: Variable Tensor\n \"\"\"\n\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n\n return var\n\n def __variable_with_weight_decay(self, name, shape, stddev, wd):\n \"\"\"\n Helper to create an initialized Variable with weight decay.\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n :param name: name of the variable\n :param shape: list of ints\n :param stddev: standard deviation of a truncated Gaussian\n :param wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n :return: Variable Tensor\n \"\"\"\n\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = self.__variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n\n return var\n\n def _distorted_inputs(self):\n \"\"\"\n Construct distorted input for CIFAR training using the Reader ops.\n :return: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n\n data_dir = os.path.join(FLAGS.data_dir)\n images, labels = self.__distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n return images, labels\n\n def _inputs(self, eval_data):\n \"\"\"\n Construct input for CIFAR evaluation using the Reader ops.\n :param eval_data: bool, indicating if one should use the train or eval data set.\n :return: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n\n data_dir = os.path.join(FLAGS.data_dir)\n images, labels = self.__inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n return images, labels\n\n def __inference(self, images):\n \"\"\"\n Build the CIFAR-10 model.\n :param images: Images returned from distorted_inputs() or inputs().\n :return: Logits.\n \"\"\"\n\n # First Convolutional Layer\n with tf.variable_scope('conv1') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [5, 5, 3, 64])\n weights = self.__variable_with_weight_decay('weights', shape=[5, 5, 3, 64],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [64], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [64], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv1 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv1)\n\n # Second Convolutional Layer\n with tf.variable_scope('conv2') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [5, 5, 64, 128])\n weights = self.__variable_with_weight_decay('weights', shape=[5, 5, 64, 128],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [128], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [128], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(conv1, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv2 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv2)\n\n # First Pool Layer\n with tf.name_scope('pool1'):\n pool1 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')\n\n # Third Convolutional Layer\n with tf.variable_scope('conv3') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [5, 5, 128, 256])\n weights = self.__variable_with_weight_decay('weights', shape=[5, 5, 128, 256],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [256], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [256], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(pool1, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv3 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv3)\n\n # Fourth Convolutional Layer\n with tf.variable_scope('conv4') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [3, 3, 256, 256])\n weights = self.__variable_with_weight_decay('weights', shape=[3, 3, 256, 256],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [256], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [256], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(conv3, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv4 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv4)\n\n # Second Pool Layer\n with tf.name_scope('pool2'):\n pool2 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # First Dropout\n with tf.name_scope('dropout1'):\n dropout1 = tf.nn.dropout(pool2, 0.8)\n\n # Fifth Convolutional Layer\n with tf.variable_scope('conv5') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [3, 3, 256, 512])\n weights = self.__variable_with_weight_decay('weights', shape=[3, 3, 256, 512],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [512], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [512], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(dropout1, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv5 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv5)\n\n # Second Dropout\n with tf.name_scope('dropout2'):\n dropout2 = tf.nn.dropout(conv5, 0.8)\n\n # Sixth Convolutional Layer\n with tf.variable_scope('conv6') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [3, 3, 512, 512])\n weights = self.__variable_with_weight_decay('weights', shape=[3, 3, 512, 512],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [512], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [512], tf.constant_initializer(0.0))\n\n z = tf.nn.conv2d(dropout2, weights, strides=[1, 1, 1, 1], padding='SAME')\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n conv6 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(conv6)\n\n # Third Pool Layer\n with tf.name_scope('pool3'):\n pool3 = tf.nn.max_pool(conv6, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Third Dropout\n with tf.name_scope('dropout3'):\n dropout3 = tf.nn.dropout(pool3, 0.5)\n\n # First Fully Connected Layer\n with tf.variable_scope('fc1') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [4608, 2048])\n dropout3_flat = tf.reshape(dropout3, [-1, 4608])\n\n weights = self.__variable_with_weight_decay('weights', shape=[4608, 2048],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n scale = self.__variable_on_cpu('scale', [2048], tf.constant_initializer(1.0))\n beta = self.__variable_on_cpu('beta', [2048], tf.constant_initializer(0.0))\n\n z = tf.matmul(dropout3_flat, weights)\n batch_mean, batch_var = tf.nn.moments(z, [0])\n bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, self.EPSILON)\n fc1 = tf.nn.relu(bn, name=scope.name)\n self.__activation_summary(fc1)\n\n # SoftMax Linear\n with tf.variable_scope('softmax_linear') as scope:\n nr_units = functools.reduce(lambda x, y: x * y, [2048, self.NUM_CLASSES])\n\n weights = self.__variable_with_weight_decay('weights', shape=[2048, self.NUM_CLASSES],\n stddev=1.0 / math.sqrt(float(nr_units)), wd=0.0)\n biases = self.__variable_on_cpu('biases', [self.NUM_CLASSES], tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(fc1, weights), biases, name=scope.name)\n self.__activation_summary(softmax_linear)\n\n return softmax_linear\n\n def __loss(self, logits, labels):\n \"\"\"\n Add L2Loss to all the trainable variables.\n Add summary for \"Loss\" and \"Loss/avg\".\n :param logits: Logits from inference().\n :param labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size]\n :return: Loss tensor of type float.\n \"\"\"\n\n # Calculate the average cross entropy loss across the batch.\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits,\n name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n\n # The total loss is defined as the cross entropy loss plus all of the weight decay terms (L2 loss).\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n def __add_loss_summaries(self, total_loss):\n \"\"\"\n Add summaries for losses in CIFAR-10 model.\n Generates moving average for all losses and associated summaries for visualizing the performance of the network.\n :param total_loss: Total loss from loss().\n :return: op for generating moving averages of losses.\n \"\"\"\n\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the same for the averaged\n # version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)\n tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))\n\n return loss_averages_op\n\n def __train(self, total_loss, global_step):\n \"\"\"\n Train CIFAR-10 model.\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n :param total_loss: Total loss from loss().\n :param global_step: Integer Variable counting the number of training steps processed.\n :return: op for training.\n \"\"\"\n\n # Variables that affect learning rate.\n num_batches_per_epoch = self.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n decay_steps = int(num_batches_per_epoch * self.NUM_EPOCHS_PER_DECAY)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(self.INITIAL_LEARNING_RATE, global_step, decay_steps,\n self.LEARNING_RATE_DECAY_FACTOR, staircase=True)\n tf.contrib.deprecated.scalar_summary('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = self.__add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n opt = tf.train.AdamOptimizer(lr)\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.contrib.deprecated.histogram_summary(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(self.MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n model = ImageRecognition()\n\n if len(sys.argv) != 2:\n print('The program must be run as : python3.5 CNN5.py [train|eval]')\n sys.exit(2)\n else:\n if sys.argv[1] == 'train':\n print('Run Train .....')\n\n if tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.DeleteRecursively(FLAGS.train_dir)\n\n tf.gfile.MakeDirs(FLAGS.train_dir)\n\n model.train()\n\n elif sys.argv[1] == 'eval':\n print('Run Eval .....')\n\n if tf.gfile.Exists(FLAGS.eval_dir):\n tf.gfile.DeleteRecursively(FLAGS.eval_dir)\n\n tf.gfile.MakeDirs(FLAGS.eval_dir)\n\n model.evaluate()\n\n else:\n print('The available options for this script are : train and eval')\n sys.exit(2)\n\n\nif __name__ == \"__main__\":\n FLAGS = tf.app.flags.FLAGS\n\n # Basic model parameters.\n tf.app.flags.DEFINE_integer('batch_size', 128, \"\"\"Number of images to process in a batch.\"\"\")\n tf.app.flags.DEFINE_string('data_dir', 'data', \"\"\"Path to the CIFAR-10 data directory.\"\"\")\n tf.app.flags.DEFINE_boolean('use_fp16', False, \"\"\"Train the model using fp16.\"\"\")\n tf.app.flags.DEFINE_string('train_dir', 'result/CNN5/train_result',\n \"\"\"Directory where to write event logs and checkpoint.\"\"\")\n tf.app.flags.DEFINE_integer('max_steps', 100000, \"\"\"Number of batches to run.\"\"\")\n tf.app.flags.DEFINE_boolean('log_device_placement', False, \"\"\"Whether to log device placement.\"\"\")\n tf.app.flags.DEFINE_string('eval_dir', 'result/CNN5/eval_result', \"\"\"Directory where to write event logs.\"\"\")\n tf.app.flags.DEFINE_string('eval_data', 'test', \"\"\"Either 'test' or 'train_eval'.\"\"\")\n tf.app.flags.DEFINE_string('checkpoint_dir', 'result/CNN5/train_result',\n \"\"\"Directory where to read model checkpoints.\"\"\")\n tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5, \"\"\"How often to run the eval.\"\"\")\n tf.app.flags.DEFINE_integer('num_examples', 10000, \"\"\"Number of examples to run.\"\"\")\n tf.app.flags.DEFINE_boolean('run_once', False, \"\"\"Whether to run eval only once.\"\"\")\n\n tf.app.run()\n","sub_path":"CNN5.py","file_name":"CNN5.py","file_ext":"py","file_size_in_byte":34352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"565043718","text":"__author__ = ('Aitor Blanco Miguez (aitor.blancomiguez@unitn.it), '\n 'Duy Tin Truong (duytin.truong@unitn.it), '\n 'Francesco Asnicar (f.asnicar@unitn.it), '\n 'Moreno Zolfo (moreno.zolfo@unitn.it), '\n 'Francesco Beghini (francesco.beghini@unitn.it)')\n__version__ = '3.0.8'\n__date__ = '7 May 2021'\n\nimport os, sys, re, shutil\nimport subprocess as sb\ntry:\n from .util_fun import info, error\nexcept ImportError:\n from util_fun import info, error\n\n\"\"\"\nExecutes a command\n\n:param cmd: the command to execute\n\"\"\"\ndef execute(cmd):\n inp_f = None\n out_f = sb.DEVNULL\n\n if cmd['stdin']:\n inp_f = open(cmd['stdin'], 'r')\n if cmd['stdout']:\n out_f = open(cmd['stdout'], 'w')\n\n exec_res = sb.run(cmd['command_line'], stdin=inp_f, stdout=out_f)\n if exec_res.returncode == 1:\n error(\"An error was ocurred executing a external tool, exiting...\", \n init_new_line=True, exit=True) \n\n if cmd['stdin']:\n inp_f.close()\n if cmd['stdout']:\n out_f.close()\n\n\n\"\"\"\nCreates the BLASTn database to align the reference genomes\n\n:param output_dir: the output directory\n:param reference: the FASTA with the reference\n:returns: the created BLASTn database\n\"\"\"\ndef create_blastn_db(output_dir, reference):\n reference_name = os.path.splitext(os.path.basename(reference))[0] \n params = {\n \"program_name\" : \"makeblastdb\",\n \"params\" : \"-parse_seqids -dbtype nucl\",\n \"input\" : \"-in\",\n \"output\" : \"-out\",\n \"command_line\" : \"#program_name# #params# #input# #output#\"\n }\n execute(compose_command(params, input_file=reference, output_file=output_dir+reference_name))\n return output_dir+reference_name\n\n\n\"\"\"\nExecutes BLASTn\n\n:param output_dir: the output directory\n:param clade_markers: the FASTA with the markers\n:param blastn_db: the BLASTn database\n:param nprocs: the number of thread to use\n:returns: the BLASTn output file\n\"\"\"\ndef execute_blastn(output_dir, clade_markers, blastn_db, nprocs=1):\n db_name = os.path.splitext(os.path.basename(blastn_db))[0]\n params = {\n \"program_name\" : \"blastn\",\n \"params\" : \"-outfmt \\\"6 qseqid sseqid qlen qstart qend sstart send\\\" -evalue 1e-10 -max_target_seqs 1\",\n \"input\" : \"-query\",\n \"database\": \"-db\",\n \"output\" : \"-out\",\n \"threads\" : \"-num_threads\",\n \"command_line\" : \"#program_name# #params# #threads# #database# #input# #output#\"\n }\n execute(compose_command(params, input_file=clade_markers, database=blastn_db, \n output_file=output_dir+db_name+\".blastn\", nproc=nprocs))\n return output_dir+db_name+\".blastn\"\n\n\n\"\"\"\nCreates the PhyloPhlAn database\n\n:param output_dir: the output directory\n:param clade: the clade\n\"\"\"\ndef create_phylophlan_db(output_dir, clade):\n markers = output_dir+clade\n params = {\n \"program_name\" :\"phylophlan_setup_database\",\n \"params\" : \"-d \"+clade+\" -e fna -t n --overwrite\",\n \"input\" : \"-i\",\n \"command_line\" : \"#program_name# #input# #params#\"\n }\n execute(compose_command(params, input_file=markers))\n #os.rename(output_dir+clade+\".fna\", markers+\"/\"+clade+\".fna\")\n\n\n\"\"\"\nGenerates the PhyloPhlan configuration file\n\n:param output_dir: the output directory\n:returns: the generated configuration file\n\"\"\"\ndef generate_phylophlan_config_file(output_dir, configuration):\n conf_file = os.path.join(output_dir, \"phylophlan.cfg\")\n params = {\n \"program_name\" : \"phylophlan_write_config_file\",\n \"params\" : \"-d n --db_dna makeblastdb --map_dna \"+configuration['map']+\n \" --msa \"+configuration['aligner']+\" --trim \"+configuration['trim']+\n \" --tree1 \"+configuration['tree1'], #+\n # configuration['tree2'],\n \"output\" : \"-o\",\n \"command_line\" : \"#program_name# #output# #params#\"\n }\n execute(compose_command(params, output_file=conf_file))\n return conf_file\n\n\n\"\"\"\nExecutes PhyloPhlAn\n\n:param samples_markers_dir: the temporal samples markers directory\n:param conf_file: the PhyloPhlAn configuration file\n:param min_entries: the minimun number of entries to consider a good marker \n:param tmp_dir: the temporal output directory\n:param output_dir: the output_directory\n:param clade: the clade\n:param phylogeny_conf: the precision of the phylogenetic analysis\n:param mutation_rates: whether get the mutation rates for the markers\n:param nproc: the number of threads to run phylophlan\n\"\"\"\ndef execute_phylophlan(samples_markers_dir, conf_file, min_entries, tmp_dir, output_dir, \n clade, phylogeny_conf, mutation_rates, nprocs):\n accuracy = \" --{}\".format(phylogeny_conf)\n\n if mutation_rates:\n accuracy = accuracy + \" --mutation_rates\"\n params = {\n \"program_name\" : \"phylophlan\",\n \"params\" : \"-d \"+clade[:30]+\" --data_folder \"+tmp_dir+\n \" --databases_folder \"+tmp_dir+\" -t n -f \"+conf_file+\n \" --diversity low\"+accuracy+\" --genome_extension fna\"+\n \" --force_nucleotides --min_num_entries \"+str(min_entries),\n \"input\" : \"-i\",\n \"output_path\" : \"--output_folder\",\n \"output\" : \"-o\",\n \"threads\" : \"--nproc\",\n \"command_line\" : \"#program_name# #input# #output# #output_path# #params# #threads#\"\n }\n execute(compose_command(params=params, input_file=samples_markers_dir,\n output_path=output_dir, output_file=\".\", nproc=nprocs))\n \n\n#ToDo: Parametrize this function: default output_dir, remove the compressed file...\n\"\"\"\nDecompressed BZ2 files\n\n:param input: the input BZ2 file to decompress\n:param output_dir: the output directory\n:returns: the decompressed file\n\"\"\"\ndef decompress_bz2(input, output_dir):\n n, _ = os.path.splitext(os.path.basename(input))\n params = {\n \"program_name\" : \"bzip2\",\n \"input\" : \"-cdk\",\n \"command_line\" : \"#program_name# #input# > #output#\"\n } \n decompressed_file = output_dir + n \n execute(compose_command(params, input_file=input, output_file=decompressed_file))\n if decompressed_file.endswith('_sam'):\n os.rename(decompressed_file, decompressed_file[:-4] + '.sam')\n decompressed_file = decompressed_file[:-4] + '.sam'\n return decompressed_file\n\n\n\"\"\"\nConverts SAM files to sorted BAM files using samtools\n\n:param input: the input SAM file\n:param output_dir: the output directory\n:returns: the sorted BAM file\n\"\"\"\ndef samtools_sam_to_bam(input, output_dir):\n n, _ = os.path.splitext(os.path.basename(input)) \n params = {\n \"program_name\" : \"samtools\",\n \"params\" : \"view\",\n \"input\" : \"-Sb\",\n \"command_line\" : \"#program_name# #params# #input# > #output#\"\n } \n execute(compose_command(params, input_file=input, output_file=output_dir+n+\".bam\"))\n return output_dir+n+\".bam\"\n\n\n\"\"\"\nSort BAM files using samtools\n\n:param input: the input BAM file\n:param output_dir: the output directory\n:returns: the sorted BAM file\n\"\"\"\ndef samtools_sort_bam_v0(input, output_dir):\n n, _ = os.path.splitext(os.path.basename(input))\n params = {\n \"program_name\" : \"samtools\",\n \"params\" : \"sort\",\n \"command_line\" : \"#program_name# #params# #input# #output#\"\n } \n execute(compose_command(params, input_file=input, output_file=output_dir+n+\".sorted\"))\n return output_dir+n+\".sorted.bam\"\n\n\n\"\"\"\nSort BAM files using samtools\n\n:param input: the input BAM file\n:param output_dir: the output directory\n:returns: the sorted BAM file\n\"\"\"\ndef samtools_sort_bam_v1(input, output_dir):\n n, _ = os.path.splitext(os.path.basename(input)) \n params = {\n \"program_name\" : \"samtools\",\n \"params\" : \"sort\",\n \"output\" : \"-o\",\n \"command_line\" : \"#program_name# #params# #input# #output#\"\n } \n execute(compose_command(params, input_file=input, output_file=output_dir+n+\".sorted.bam\"))\n shutil.move(output_dir+n+\".sorted.bam\", output_dir+n+\".bam\")\n return output_dir+n+\".bam\"\n\n\n\"\"\"\nGenerates a FASTA file with the markers form a MetaPhlAn database\n\n:param database: the MetaPhlan markers database\n:param output_dir: the output directory\n\"\"\"\ndef generate_markers_fasta(database, output_dir):\n db_markers_faa = output_dir+\"db_markers.fna\"\n bowtie_database, _ = os.path.splitext(database)\n params = {\n \"program_name\" : \"bowtie2-inspect\",\n \"command_line\" : \"#program_name# #input# > #output#\"\n }\n execute(compose_command(params, input_file=bowtie_database, output_file=db_markers_faa))\n return db_markers_faa\n\n\n\"\"\"\nCompose a command for further executions\n\n:param params: the params of the command\n:param check: [default=False] check if program is available\n:param input_file: [optional] the input file\n:param database: [optional] the database\n:param output_path: [optional] the output path\n:param output_file: [optional] the output file\n:param nproc: [default=1] the number of procs to use\n\"\"\"\ndef compose_command(params, check=False, input_file=None, database=None, output_path=None, output_file=None, nproc=1):\n program_name = None\n stdin = None\n stdout = None\n environment = os.environ.copy()\n r_output_path = None\n r_output_file = None\n command_line = params['command_line']\n\n if 'program_name' in list(params):\n command_line = command_line.replace('#program_name#', params['program_name'])\n program_name = params['program_name']\n else:\n error('Error: something wrong... '+program_name+' not found!', exit=True)\n\n if check:\n command_line = program_name\n\n if 'version' in list(params):\n command_line = '{} {}'.format(program_name, params['version'])\n else:\n if 'params' in list(params):\n command_line = command_line.replace('#params#', params['params'])\n\n if 'threads' in list(params):\n command_line = command_line.replace('#threads#', '{} {}'.format(params['threads'], nproc))\n\n if output_path:\n r_output_path = output_path\n\n if 'output_path' in list(params):\n command_line = command_line.replace('#output_path#', '{} {}'.format(params['output_path'], output_path))\n else:\n output_file = os.path.join(output_path, output_file)\n\n if input_file:\n inp = input_file\n\n if 'input' in list(params):\n inp = '{} {}'.format(params['input'], input_file)\n\n if '<' in command_line:\n command_line = command_line.replace('<', '')\n command_line = command_line.replace('#input#', '')\n stdin = inp\n else:\n command_line = command_line.replace('#input#', inp)\n\n if database and ('database' in list(params)):\n command_line = command_line.replace('#database#', '{} {}'.format(params['database'], database))\n\n if output_file:\n out = output_file\n r_output_file = output_file\n\n if 'output' in list(params):\n out = '{} {}'.format(params['output'], output_file)\n\n if '>' in command_line:\n command_line = command_line.replace('>', '')\n command_line = command_line.replace('#output#', '')\n stdout = out\n else:\n command_line = command_line.replace('#output#', out)\n\n if 'environment' in list(params):\n new_environment = dict([(var.strip(), val.strip())\n for var, val in [a.strip().split('=') for a in params['environment'].split(',')]])\n environment.update(new_environment)\n\n # find string sourrunded with \" and make them as one string\n quotes = [j for j, e in enumerate(command_line) if e == '\"']\n\n for s, e in zip(quotes[0::2], quotes[1::2]):\n command_line = command_line.replace(command_line[s + 1:e], command_line[s + 1:e].replace(' ', '#'))\n\n return {'command_line': [str(a).replace('#', ' ') for a in re.sub(' +', ' ', command_line.replace('\"', '')).split(' ') if a],\n 'stdin': stdin, 'stdout': stdout, 'env': environment, 'output_path': r_output_path, 'output_file': r_output_file}\n\n","sub_path":"metaphlan/utils/external_exec.py","file_name":"external_exec.py","file_ext":"py","file_size_in_byte":12166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"575454753","text":"class Solution(object):\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x == 0:\n return 0\n left, right= 1, x\n while(True):\n mid = (right+left)//2\n if mid * mid <= x < (mid + 1) * (mid + 1):\n return mid\n elif mid * mid > x:\n right = mid\n else:\n left = mid + 1\n\n\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n a = solution.mySqrt(4)\n # 2147395600 46340\n # 2147483647 46340\n print(a)","sub_path":"068_Sqrt.py","file_name":"068_Sqrt.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"327647274","text":"#!/usr/bin/python\n# Comparative modeling with multiple templates\nfrom modeller import *\nfrom modeller.automodel import * # Load the automodel class\n\nlog.verbose() # request verbose output\nenv = environ() # create a new MODELLER environment to build this model in\n\n# directories for input atom files\nenv.io.atom_files_directory = ['.', '/home/hlim/wiZAN/RhoGAP/templates/']\n\nclass MyModel(automodel):\n def special_restraints(self, aln):\n rsr = self.restraints\n at = self.atoms\n# Add some restraints from a file:\n# rsr.append(file='my_rsrs1.rsr')\n\n# Residues 20 through 30 should be an alpha helix:\n rsr.add(secondary_structure.alpha(self.residue_range('25:', '31:')))\n rsr.add(secondary_structure.alpha(self.residue_range('41:', '53:')))\n rsr.add(secondary_structure.alpha(self.residue_range('67:', '77:')))\n rsr.add(secondary_structure.alpha(self.residue_range('89:', '102:')))\n rsr.add(secondary_structure.alpha(self.residue_range('110:', '121:')))\n rsr.add(secondary_structure.alpha(self.residue_range('125:', '138:')))\n rsr.add(secondary_structure.alpha(self.residue_range('142:', '160:')))\n rsr.add(secondary_structure.alpha(self.residue_range('172:', '175:')))\n rsr.add(secondary_structure.alpha(self.residue_range('188:', '210:')))\n# Two beta-strands:\n rsr.add(secondary_structure.strand(self.residue_range('17:', '22:')))\n rsr.add(secondary_structure.strand(self.residue_range('59:', '63:')))\n# An anti-parallel sheet composed of the two strands:\n# rsr.add(secondary_structure.sheet(at['N:1'], at['O:14'],\n# sheet_h_bonds=-5))\n# Use the following instead for a *parallel* sheet:\n# rsr.add(secondary_structure.sheet(at['N:1'], at['O:9'],\n# sheet_h_bonds=5))\n\n# Restrain the specified CA-CA distance to 10 angstroms (st. dev.=0.1)\n# Use a harmonic potential and X-Y distance group.\n# rsr.add(forms.gaussian(group=physical.xy_distance,\n# feature=features.distance(at['CA:35'],\n# at['CA:40']),\n# mean=10.0, stdev=0.1))\n\na = MyModel(env,\n alnfile = '/home/hlim/wiZAN/RhoGAP/comp155_c0_seq1/align/comp155_c0_seq1_noSig.pir', # alignment filename\n knowns = ('2mbg','2ovj','3wpq'), # codes of the templates\n sequence = 'comp155_c0_seq1') # code of the target\na.starting_model= 101 # index of the first model\na.ending_model = 102 # index of the last model\n # (determines how many models to calculate)\n#aln = alignment(env)\n#aln.append(file='clustalo-Lbtemp_pdbseq.pir', align_codes='all')\n#aln.check()\n\n#Very thorough VTFM optimization:\na.library_schedule = autosched.slow\na.max_var_iterations = 50\n\n#thorough MD optimization:\na.md_level = refine.slow\n\n#repeat the whole cycle 5 times and do not stop unless obj.func. > 1e6\na.repeat_optimization = 10\na.max_molpdf = 1e6\n\na.make() # do the actual comparative modeling\n","sub_path":"RhoGAP/comp155_c0_seq1_old/model4_secondstr.py","file_name":"model4_secondstr.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408009957","text":"# 1. naloga\n\ntest_matrix = [[1, 2, 0], [2, 4, 5], [7, 0, 1]]\n\ndef max_cheese (cheese_matrix):\n def max_cheese2 (vrstica, stolpec, cheese_matrix):\n max_vrstica = len(cheese_matrix) - 1\n max_stolpec = len(cheese_matrix[0]) - 1\n vrstica = 0\n stolpec = 0\n if (vrstica >= max_vrstica) or (stolpec >= max_stolpec):\n return 0\n else:\n desno = max_cheese2(vrstica, stolpec + 1, cheese_matrix)\n spodaj = max_cheese2(vrstica + 1, stolpec, cheese_matrix)\n our_cheese = cheese_matrix[vrstica][stolpec]\n our_cheese += max(desno, spodaj)\n max_cheese2 (0, 0, cheese_matrix)\n\n\n\n\n# 3. naloga\n\nfrom functools import lru_cache\n\narticles = [\n (\"yoghurt\", 0.39, 0.18),\n (\"milk\", 0.89, 1.03),\n (\"coffee\", 2.19, 0.2),\n (\"butter\", 1.49, 0.25),\n (\"yeast\", 0.22, 0.042),\n (\"eggs\", 2.39, 0.69),\n (\"sausage\", 3.76, 0.50),\n (\"bread\", 2.99, 1.0),\n (\"Nutella\", 4.99, 0.75),\n (\"juice\", 1.15, 2.0)\n]\n\n\ndef best_value(articles, max_w):\n\n @lru_cache(maxsize=None) # memoizacija v Pythonu\n def best_val(w):\n options = []\n for item in articles:\n (name, price, weight) = item\n if (w - weight) < 0:\n pass\n else:\n option = best_val(w - weight) + price\n options.append(option)\n if options:\n return max(options)\n else:\n return 0\n return best_val(max_w)\n\n\n\ndef best_value_unique(articles, max_w):\n # taken je string, pri katerem taken[n] = \"0\" označuje da n-ti izdelek še ni bil izbran\n\n @lru_cache(maxsize=None)\n def best_val(w, taken):\n options = []\n for i, item in enumerate(articles):\n (name, price, weight) = item\n if (w - weight < 0) or (taken[i] == \"1\"):\n pass\n else:\n new_taken = taken[:i] + \"1\" + taken[i+1:]\n option = best_val(w - weight, new_taken) + price\n options.append(option)\n if options:\n return max(options)\n else:\n return 0\n return best_val(max_w, \"0\" * len(articles))","sub_path":"12-dinamicno-programiranje/vaje/dinamicno_programiranje.py","file_name":"dinamicno_programiranje.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"600328625","text":"from __future__ import print_function\nfrom mbientlab.metawear import MetaWear, libmetawear, parse_value\nfrom mbientlab.metawear.cbindings import *\nfrom time import sleep\nfrom threading import Event\n\nimport platform\nimport sys\nimport RPi.GPIO as GPIO\nimport time\nimport numpy as np\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(2, GPIO.OUT)\n\nprint(\"loading..\")\n\n# Example: https://github.com/mbientlab/MetaWear-SDK-Python/blob/master/examples/stream_acc_gyro.py\n# Run with python3 metawear.py C4:89:ED:7D:03:ED\n\ndef press_button():\n GPIO.output([2], GPIO.HIGH)\n time.sleep(0.05)\n GPIO.output([2], GPIO.LOW)\n\nclass State:\n def __init__(self, device):\n self.isLookingDown = False\n self.device = device\n self.samples = 0\n self.callback = FnVoid_VoidP_DataP(self.data_handler)\n\n def data_handler(self, ctx, data):\n parsedData = parse_value(data)\n leftToRight = parsedData.x\n upAndDown = parsedData.z # -100 is looked down; 100 is looked back up (assuming starting looking straight)\n # print(upAndDown)\n\n if (self.isLookingDown is False):\n if (upAndDown <= -80):\n self.isLookingDown = True\n print('isLookingDown: ', self.isLookingDown)\n press_button()\n\n if (self.isLookingDown is True):\n if (upAndDown >= 80):\n self.isLookingDown = False\n print('isLookingDown: ', self.isLookingDown)\n press_button()\n\n # print(\"%s -> %s\" % (self.device.address, parse_value(data)))\n self.samples+= 1\n\n\nstates = []\nfor i in range(len(sys.argv) - 1):\n d = MetaWear(sys.argv[i + 1])\n d.connect()\n print(\"Connected to \" + d.address)\n states.append(State(d))\n\nfor s in states:\n print(\"Configuring device\")\n libmetawear.mbl_mw_settings_set_connection_parameters(s.device.board, 7.5, 7.5, 0, 6000)\n sleep(1.5)\n\n #libmetawear.mbl_mw_acc_set_odr(s.device.board, 100.0)\n #libmetawear.mbl_mw_acc_set_range(s.device.board, 16.0)\n #libmetawear.mbl_mw_acc_write_acceleration_config(s.device.board)\n\n #libmetawear.mbl_mw_gyro_bmi160_set_range(s.device.board, 2000.0);\n #libmetawear.mbl_mw_gyro_bmi160_set_odr(s.device.board, 25.0);\n #libmetawear.mbl_mw_gyro_bmi160_write_config(s.device.board);\n\n #acc = libmetawear.mbl_mw_acc_get_acceleration_data_signal(s.device.board)\n #libmetawear.mbl_mw_datasignal_subscribe(acc, None, s.callback)\n\n gyro = libmetawear.mbl_mw_gyro_bmi160_get_rotation_data_signal(s.device.board)\n libmetawear.mbl_mw_datasignal_subscribe(gyro, None, s.callback)\n\n #libmetawear.mbl_mw_acc_enable_acceleration_sampling(s.device.board)\n #libmetawear.mbl_mw_acc_start(s.device.board)\n\n libmetawear.mbl_mw_gyro_bmi160_enable_rotation_sampling(s.device.board)\n libmetawear.mbl_mw_gyro_bmi160_start(s.device.board)\n\ntry:\n while True:\n # do nothing and let the script run\n x = 1\nexcept KeyboardInterrupt:\n GPIO.output([2], GPIO.LOW)\n for s in states:\n libmetawear.mbl_mw_acc_stop(s.device.board)\n libmetawear.mbl_mw_acc_disable_acceleration_sampling(s.device.board)\n\n libmetawear.mbl_mw_gyro_bmi160_stop(s.device.board)\n libmetawear.mbl_mw_gyro_bmi160_disable_rotation_sampling(s.device.board)\n\n acc = libmetawear.mbl_mw_acc_get_acceleration_data_signal(s.device.board)\n libmetawear.mbl_mw_datasignal_unsubscribe(acc)\n\n gyro = libmetawear.mbl_mw_gyro_bmi160_get_rotation_data_signal(s.device.board)\n libmetawear.mbl_mw_datasignal_unsubscribe(gyro)\n\n libmetawear.mbl_mw_debug_disconnect(s.device.board)\n\n print(\"Total Samples Received\")\n for s in states:\n print(\"%s -> %d\" % (s.device.address, s.samples))\n\n\n","sub_path":"metawear-and-tv.py","file_name":"metawear-and-tv.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"563217835","text":"#!/bin/python3\n\nimport sys\nimport re\n\nN = int(input().strip())\nres = []\nfor a0 in range(N):\n firstName,emailID = input().strip().split(' ')\n firstName,emailID = [str(firstName),str(emailID)]\n if re.search('@gmail\\.com$', emailID):\n res.append(firstName)\nprint(*sorted(res), sep = \"\\n\") # use * before a list to unpack a list\n ","sub_path":"Day28_RegEx.py","file_name":"Day28_RegEx.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"226411314","text":"import time\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\ndef Spatial_CNN(input, is_training=False):\n '''SPATIAL DENOISING CNN'''\n with tf.variable_scope('block1'):\n output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.relu)\n for layers in xrange(2, 19+1):\n with tf.variable_scope('block%d' % layers):\n output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)\n output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training)) \n with tf.variable_scope('block20'):\n output = tf.layers.conv2d(output, 3, 3, padding='same',use_bias=False)\n return input - output\n\ndef Temp3_CNN(input):\n '''TEMPORAL DENOISING CNN'''\n input_middle = input[:,:,:,3:6]\n with tf.variable_scope('temp-block1'):\n output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.leaky_relu)\n for layers in xrange(2, 19+1):\n with tf.variable_scope('temp-block%d' % layers):\n output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False, activation=tf.nn.leaky_relu)\n with tf.variable_scope('temp-block20'):\n output = tf.layers.conv2d(output, 3, 3, padding='same')\n return input_middle - output\n \nclass vidcnn(object):\n def __init__(self, sess):\n self.sess = sess\n self.Y_ = tf.placeholder(tf.float32, [None, None, None, 3],name='clean_image')\n self.X = tf.placeholder(tf.float32, [None, None, None, 3],name='noisy_image')\n self.Y = Spatial_CNN(self.X)\n self.Y_frames = tf.placeholder(tf.float32, [None, None, None, 9],name='clean_frames')\n self.Xframes = tf.placeholder(tf.float32, [None, None, None, 9],name='noisy_frames')\n self.Yframes = Temp3_CNN(self.Xframes) \n init = tf.global_variables_initializer()\n self.sess.run(init)\n print(\"[*] Initialize model successfully...\")\n\n\n def test(self, noisy_data, orig_data, ckpt_dir, save_dir):\n \"\"\"Test VidCNN\"\"\"\n # init variables\n tf.global_variables_initializer().run() \n assert len(noisy_data) != 0, '[!] No test data in the specified folder! Check that contains an original and noisy folder.'\n load_model_status = self.load(ckpt_dir)\n assert load_model_status == True, '[!] Load weights FAILED! Check the checkpoint folder.' \n\n psnr_sum = 0\n start = time.time()\n for idx in xrange(len(noisy_data)-1): \n if idx==0:\n test = cv2.imread(orig_data[idx])\n test1 = cv2.imread(orig_data[idx+1])\n test2 = cv2.imread(orig_data[idx+2])\n noisy = cv2.imread(noisy_data[idx])\n noisy1 = cv2.imread(noisy_data[idx+1])\n noisy2 = cv2.imread(noisy_data[idx+2])\n \n test = test.astype(np.float32) / 255.0\n test1 = test1.astype(np.float32) / 255.0\n test2 = test2.astype(np.float32) / 255.0\n noisy = noisy.astype(np.float32) / 255.0\n noisy1 = noisy1.astype(np.float32) / 255.0\n noisy2 = noisy2.astype(np.float32) / 255.0\n \n noisyin2 = np.zeros((1,test.shape[0],test.shape[1],9)) \n current = np.zeros((test.shape[0],test.shape[1],3)) \n previous = np.zeros((test.shape[0],test.shape[1],3)) \n \n noisyin = np.zeros((3,test.shape[0],test.shape[1],3))\n noisyin[0] = noisy\n noisyin[1] = noisy1\n noisyin[2] = noisy2 \n out = self.sess.run([self.Y],feed_dict={self.X:noisyin})\n out = np.asarray(out)\n\n noisyin2[0,:,:,0:3] = out[0,0]\n noisyin2[0,:,:,3:6] = out[0,0]\n noisyin2[0,:,:,6:] = out[0,1]\n temp_clean_image= self.sess.run([self.Yframes],feed_dict={self.Xframes:noisyin2})\n temp_clean_image = np.asarray(temp_clean_image)\n cv2.imwrite(save_dir + '/%04d.png'%idx,temp_clean_image[0,0]*255)\n psnr = psnr_scaled(test,temp_clean_image[0,0])\n psnr_sum += psnr\n print(\"Frame %d PSNR: %f\" % (idx, psnr))\n\n noisyin2[0,:,:,0:3] = out[0,0]\n noisyin2[0,:,:,3:6] = out[0,1]\n noisyin2[0,:,:,6:] = out[0,2]\n current[:,:,:] = out[0,2,:,:,:]\n previous[:,:,:] = out[0,1,:,:,:]\n else:\n if idx<(len(noisy_data)-2):\n test3 = cv2.imread(orig_data[idx+2])\n test3 = test3.astype(np.float32) / 255.0\n noisy3 = cv2.imread(noisy_data[idx+2])\n noisy3 = noisy3.astype(np.float32) / 255.0\n \n out2 = self.sess.run([self.Y],feed_dict={self.X:np.expand_dims(noisy3,0)})\n out2 = np.asarray(out2)\n \n noisyin2[0,:,:,0:3] = previous\n noisyin2[0,:,:,3:6] = current\n noisyin2[0,:,:,6:] = out2[0,0]\n previous = current\n current = out2[0,0]\n else:\n try:\n out2\n except NameError:\n out2 = np.zeros((out.shape))\n out2=out\n out2[0,0]=out[0,2]\n noisyin2[0,:,:,0:3] = current\n noisyin2[0,:,:,3:6] = out2[0,0]\n noisyin2[0,:,:,6:] = out2[0,0]\n temp_clean_image= self.sess.run(\n [self.Yframes],feed_dict={self.Xframes:noisyin2}) \n \n temp_clean_image = np.asarray(temp_clean_image)\n cv2.imwrite(save_dir + '/%04d.png'%(idx+1),temp_clean_image[0,0]*255)\n\n # calculate PSNR\n if idx==0:\n psnr = psnr_scaled(test1, temp_clean_image[0,0])\n else:\n psnr = psnr_scaled(test2, temp_clean_image[0,0])\n try: #need this when testing with only 3 frames\n test3\n except NameError:\n test3=test2\n test2=test3\n \n print(\"Frame %d PSNR: %f\" % (idx+1, psnr))\n psnr_sum += psnr\n avg_psnr = psnr_sum / len(orig_data)\n print(\"### Average PSNR %.2f\" % avg_psnr)\n print(\"### Elapsed time: %.4f\" %(time.time()-start))\n\n\n def load(self, checkpoint_dir):\n print(\"[*] Reading checkpoint...\")\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n full_path = tf.train.latest_checkpoint(checkpoint_dir)\n saver.restore(self.sess, full_path)\n return True\n else:\n return False, 0\n\n \ndef psnr_scaled(im1, im2): # PSNR function for 0-1 values\n mse = ((im1 - im2) ** 2).mean()\n mse = mse * (255 ** 2)\n psnr = 10 * np.log10(255 **2 / mse)\n return psnr","sub_path":"model_vidcnn.py","file_name":"model_vidcnn.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"603119482","text":"#!/usr/bin/env python3\n#28 May 2018, Capstone exercise no. 1, 'Inventory Tracker'\n\n####GOALS####\n#Create a program that assumes inventory.txt will be local to where it was launched\n#Each line in inventory.txt contains a real world thins and the location of the thing (Dictionary)\n#Create 2 functions callable by the user from a repeating menu (while __ != 'q' loop)\n#create a new entry\n#search inventory for an entry, remove item if prompted by user\n\n#How I intend this to work:\n#Open dictionary from file, edit in program (NOT edit file directly)\n#append changes, display updated inventory when user enters 'q'\n\nimport json #thing from API lab that read dictionary from remote file\nInvDict = 'inventory.txt' #make calling the file easier in rest of script\nnewDict = {} #blank dictionary for adding new things to parent dictionary\n\n#declare our functions\ndef new_item(name, box): #function to add new item to inventory\n with open(InvDict) as working:\n\t newDict = json.loads(working.read())\n newDict[name] = box\n with open(InvDict, 'w') as working:\n working.write(json.dumps(newDict))\n print(name + 'successfully added to inventory.')\n\ndef search_item(IName): #function to search for an item (key) and return the location (value)\n with open(InvDict) as working:\n\t newDict = json.loads(working.read())\n print('The item you were looking for is in ' + newDict.get(IName) + '.')\n if type(newDict.get(IName))() is None:\n print('Item not found! Please try again!')\n else:\n udel = input('Enter D to delete this item from the inventory \\n Or hit \\'Enter\\'')\n if udel.upper() == 'D':\n newDict.pop(IName)\n with open(InvDict, 'w') as working:\n working.write(json.dumps(newDict))\n print('Item deleted')\n else:\n print('')\n\n#functions are declared, blank variables set up, now make the program\n\nprint('Welcome to the inventory reader program. This tool will tell you where to find things that you have packed into boxes for your move.')\nwhile True: #infinite loop to cycle through until user hits 'q'\n action = input('What can I help you with? \\n \\'q\\' to quit \\n \\'s\\' to search \\n \\'a\\' to add \\n')\n if action.lower() == 'q':\n print('Thank you, I hope this was helpful')\n break\n elif action.lower() == 's':\n IName = input('What item do you need to find?')\n search_item(IName)\n elif action.lower() == 'a':\n name = input('What item are you adding?')\n box = input('What box did you put it in?')\n new_item(name, box)\n else:\n print('Invalid selection. Please try again.')\nprint('Thank you for using this tool today. The most up to date version of the inventory is:')\nwith open(InvDict) as working:\n newDict = json.loads(working.read())\n for item in newDict:\n print(item, newDict[item])\n","sub_path":"wk1/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"616238907","text":"from __future__ import print_function\nimport pandas as pd\nimport gtf_related\nimport argparse\n\n''' usage:\np2 -i get_orth_info_in_dgri_for_dmel_gene.py -g Ilp1 -a y\np2 -i get_orth_info_in_dgri_for_dmel_gene.py -g Ilp1 -b y\n'''\n\nparser = argparse.ArgumentParser(description='please provide samples')\nparser.add_argument('-sp1', '--species1', default=\"dmel\", type=str) # dmel, aste, dgri\nparser.add_argument('-sp2', '--species2', default=\"dgri\", type=str) # dmel, aste, dgri\nparser.add_argument('-F', '--FBgn_ID', type=str)\nparser.add_argument('-g', '--gene_symbol', type=str)\nparser.add_argument('-a', '--axt_check', default=\"n\", type=str)\nparser.add_argument('-b', '--biomart_check', default=\"n\", type=str)\nargs = parser.parse_args()\n\nspecies1 = args.species1\nspecies2 = args.species2\n\n\n''' get both FBgn_ID and gene_symbol ready '''\nif args.FBgn_ID:\n FBgn_ID = args.FBgn_ID\n gene_symbol = gtf_related.get_gene_symbol_from_FBgn_ID(FBgn_ID)\nelif args.gene_symbol:\n gene_symbol = args.gene_symbol \n FBgn_ID = gtf_related.get_FBgn_ID_from_gene_symbol(gene_symbol)\n\n''' get gene range plus extension '''\ngtfDB = gtf_related.GtfDatabase(species1).get()\ngene=gtfDB[FBgn_ID]\nchrom0 = gene.seqid\nstart0 = gene.start\nend0 = gene.end\n\nprint(\"####################### DMEL INFO ###########################\")\nprint(FBgn_ID, gene_symbol, chrom0, start0, end0)\n\n\nif args.biomart_check == \"y\": \n df1 = pd.read_csv(\"biomart/dgri.dmel.biomart.txt\", sep=\"\\t\")\n df1_part = df1[df1[\"Drosophila melanogaster gene stable ID\"] == FBgn_ID]\n print(\"####################### BIOMART ###########################\")\n print(df1_part)\n \n lst_FBgn_ID_dgri = list(df1_part[\"Gene stable ID\"])\n\n df2 = pd.read_csv(\"htseq/mean_FPKM.golden64.muller.txt\", sep=\"\\t\",index_col=0)\n print(\"#################### DGRI TISSUES #########################\")\n for FBgn_ID_dgri in lst_FBgn_ID_dgri:\n print(df2[df2.index == FBgn_ID_dgri])\n\nif args.axt_check == \"y\": \n import axt_related\n axtDB = axt_related.AxtDatabase(species1, species2)\n alignments = axtDB.getAlignmentsOverlappedWithThisRange1(chrom0,start0,end0)\n print(alignments)\n\n \n","sub_path":"CRoS/get_orth_info_in_dgri_for_dmel_gene.py","file_name":"get_orth_info_in_dgri_for_dmel_gene.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"258218654","text":"\"\"\"\nHandles interacting with and parsing a json config file containing\ncluster credentials and other configuration information like\nbinary locations.\n\"\"\"\n\nimport json\n\nclass Config:\n \"\"\"\n A simple class to wrap accessing the configuration information needed\n by the modules in this package.\n\n The file is loaded and parsed on construction.\n\n A sample configuration file looks like::\n\n {\n \"clusters\": [\n {\n \"mvip\": \"some-mvip\",\n \"username\": \"admin\",\n \"password\": \"admin\"\n }\n ],\n \"binaries_path\": \"../binaries/\",\n \"remote_binaries_path\": \"192.168.x.y:8080\"\n }\n\n Each object in the `clusters` array specifies the credentials for one cluster.\n The `binaries_path` specifies the location of sfapp binaries for running c++ balancing algorithms.\n\n Args:\n file_path (str): Path to the configuration file.\n \"\"\"\n\n _file_path = \"\"\n _cluster_creds = []\n _binaries_path = \"\"\n _remote_binaries_path = \"\"\n\n def __init__(self, file_path):\n self._file_path = file_path\n self.reload()\n\n def reload(self):\n \"\"\"Reloads the contents of the config file into local memory.\"\"\"\n with open(self._file_path) as config_file:\n config = json.load(config_file)\n\n self._cluster_creds = config[\"clusters\"]\n self._binaries_path = config.get(\"binaries_path\", \"../binaries/\")\n self._remote_binaries_path = config.get(\"remote_binaries_path\", \"\")\n\n def get_cluster_credentials(self, mvip):\n \"\"\"Search the contents of the configuration file for cluster credentials.\n\n Args:\n mvip (str): Find and return the credentials for this mvip, if present in the configuration file.\n\n Returns:\n (str, str): (username, password) credentials for the cluster with the supplied mvip, if found.\n \"\"\"\n self.reload()\n\n for cluster in self._cluster_creds:\n if cluster[\"mvip\"] == mvip:\n return cluster[\"username\"], cluster[\"password\"]\n\n raise Exception(\"mvip {0} not found in cluster configuration.\".format(mvip))\n\n def get_binaries_path(self):\n \"\"\"Get the path to sfapp binaries from the configuration file.\n\n Returns:\n str: Path to sfapp binaries as specified in the configuration file.\n \"\"\"\n return self._binaries_path\n\n def get_remote_binaries_path(self):\n \"\"\"Get the path to remote sfapp binaries from the configuration file.\n\n Returns:\n str: Path to remote sfapp binaries as specified in the configuration file.\n \"\"\"\n return self._remote_binaries_path\n","sub_path":"matilda_scripts/slicecommander/python/slicecommand/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"165408385","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nsample_rate = 100 * 2\nfreq = 5\nfreq2 = 7\nx = np.arange(sample_rate)\ny = np.sin(2 * np.pi * freq * x / sample_rate)\nplt.plot(x + 50, y)\nplt.xlabel('time')\nplt.ylabel('sample(n)')\nplt.show()\n\n\n# from pylab import *\n#\n# sample_rate = .001\n# f0, f1 = 10, 20\n# t_change = 2\n#\n# times = arange(0, 4, sample_rate)\n#\n# ramp = 1./(1+exp(-6.*(times-t_change)))\n# freq = f0*(1-ramp)+f1*ramp\n# phase_correction = add.accumulate(times*concatenate((zeros(1), 2*pi*(freq[:-1]-freq[1:]))))\n#\n# figure()\n# subplot(311)\n# plot(times, freq)\n# subplot(312)\n# plot(times, sin(2*pi*freq*times))\n# subplot(313)\n# plot(times, sin(2*pi*freq*times+phase_correction))\n#\n# show()\n","sub_path":"audio/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"206073850","text":"# 检测故指的闪灯和翻牌\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom scipy import ndimage\r\n\r\n# 抓取或者加载图片\r\nimg_ori = cv2.imread('D:/test/device/yuanshi.jpeg')\r\nimg_fp = cv2.imread('D:/test/device/fanpai1.jpeg')\r\nimg_fpld = cv2.imread('D:/test/device/liangdneg2.jpeg')\r\n\r\n# 标定目标区域\r\ndev1_ori = img_ori[590:710, 30:190]\r\ndev2_ori = img_ori[700:860, 900:1080]\r\ndev3_ori = img_ori[850:970, 1860:2000]\r\n\r\ndev1_fp = img_fp[850:970, 30:190]\r\ndev2_fp = img_fp[720:880, 820:1000]\r\ndev3_fp = img_fp[610:730, 1860:2000]\r\n\r\ndev1_fpld = img_fpld[850:970, 30:190]\r\ndev2_fpld = img_fpld[720:880, 820:1000]\r\ndev3_fpld = img_fpld[610:730, 1860:2000]\r\n\r\ndef ShowImageRect(dev):\r\n if dev == 0:\r\n cv2.imshow(\"dev1\", dev1_ori)\r\n cv2.imshow(\"dev1fp\", dev1_fp)\r\n cv2.imshow(\"dev1ld\", dev1_fpld)\r\n print(dev1_ori.shape)\r\n elif dev == 1:\r\n cv2.imshow(\"dev2\", dev2_ori)\r\n cv2.imshow(\"dev2fp\", dev2_fp)\r\n cv2.imshow(\"dev2ld\", dev2_fpld)\r\n print(dev2_ori.shape)\r\n elif dev == 2:\r\n cv2.imshow(\"dev3\", dev3_ori)\r\n cv2.imshow(\"dev3fp\", dev3_fp)\r\n cv2.imshow(\"dev3ld\", dev3_fpld)\r\n print(dev3_ori.shape)\r\n\r\n\r\n# 亮灯检测方法1 - 亮度比对\r\n'''\r\ndef filter_brightness_low(src_img, dst_img, bsize, limit):\r\n gray = cv2.cvtColor(src_img.copy(), cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, bsize, 0)\r\n ret, thresh1 = cv2.threshold(blur, limit, 255, 0)\r\n\r\n gray = cv2.cvtColor(dst_img.copy(), cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, bsize, 0)\r\n ret, thresh2 = cv2.threshold(blur, limit, 255, 0)\r\n\r\n img, count = thresh2-thresh1, 0\r\n for i in img[img >= 255]:\r\n count += 1\r\n print('high bright count:{:d}'.format(count))\r\n return img, count\r\n\r\ndef filter_brightnesslow(src_img, bsize, limit):\r\n gray = cv2.cvtColor(src_img.copy(), cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, bsize, 0)\r\n ret, thresh = cv2.threshold(blur, limit, 255, 0)\r\n count = 0\r\n for i in thresh[thresh >= 255]:\r\n count += 1\r\n print('high bright count:{:d}'.format(count))\r\n return thresh, count\r\n\r\nShowImageRect(1)\r\n\r\nimg1, n = filter_brightnesslow(dev2_ori, (17,17), 150)\r\ncv2.imshow(\"dev201\", img1)\r\n\r\nimg2, n = filter_brightnesslow(dev2_fp, (17,17), 150)\r\ncv2.imshow(\"dev202\", img2)\r\n\r\nimg3, n = filter_brightnesslow(dev2_fpld, (17,17), 150)\r\ncv2.imshow(\"dev203\", img3)\r\n'''\r\n\r\n# 翻牌检测方法1 过滤其他颜色\r\ndef filter_without_red(src_img, min_val, max_val=255):\r\n point = 0\r\n image = np.zeros(src_img.shape, np.uint8)\r\n for i in range(src_img.shape[0]):\r\n for j in range(src_img.shape[1]):\r\n if src_img[i][j][2] > min_val and src_img[i][j][2] <= max_val:\r\n if src_img[i][j][0] < min_val and src_img[i][j][1] < min_val:\r\n image[i][j][2] = 255\r\n point += 1\r\n print('red point count:{:d}'.format(point))\r\n return image,point\r\n\r\nShowImageRect(0)\r\n\r\nimg1,n = filter_without_red(dev1_ori, 65)\r\ncv2.imshow(\"dev201\", img1)\r\n\r\nimg2,n = filter_without_red(dev1_fp, 65)\r\ncv2.imshow(\"dev202\", img2)\r\n\r\nimg3,n = filter_without_red(dev1_fpld, 65)\r\ncv2.imshow(\"dev203\", img3)\r\n\r\n\r\n\r\n'''\r\ngray = cv2.cvtColor(dev1, cv2.COLOR_BGR2GRAY)\r\nmimg = cv2.medianBlur(gray, 5)\r\ncimg = cv2.cvtColor(mimg,cv2.COLOR_GRAY2BGR)\r\n\r\ncircles = cv2.HoughCircles(mimg,cv2.HOUGH_GRADIENT,1,100,\r\n param1=100,param2=30,minRadius=10,maxRadius=60)\r\n\r\nprint(circles)\r\n#circles = np.uint16(np.around(circles))\r\n\r\nif circles is not None:\r\n print(circles)\r\n circles = np.uint16(np.around(circles))\r\n for i in circles[0,:]:\r\n # draw the outer circle\r\n cv2.circle(dev1, (i[0], i[1]), i[2], (255, 0, 0), 2)\r\n # draw the center of the circle\r\n # cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\r\n\r\n# cv2.imwrite(\"planets_circles.jpg\", mimg)\r\ncv2.imshow(\"HoughCirlces\", dev1)\r\n'''\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","sub_path":"python3/example/FI-detect/DI_Detect2.py","file_name":"DI_Detect2.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"312271505","text":"import KratosMultiphysics as KM\nfrom KratosMultiphysics.CoSimulationApplication.structural_mechanics_analysis_with_co_sim_io import StructuralMechanicsAnalysisWithCoSimIO\nfrom sys import argv\n\nif __name__ == '__main__':\n if len(argv) != 2:\n err_msg = 'Wrong number of input arguments!\\n'\n err_msg += 'Use this script in the following way:\\n'\n err_msg += ' \"python structural_mechanics_analysis_with_co_sim_io.py .json\"\\n'\n raise Exception(err_msg)\n\n parameter_file_name = argv[1]\n\n with open(parameter_file_name,'r') as parameter_file:\n parameters = KM.Parameters(parameter_file.read())\n\n model = KM.Model()\n StructuralMechanicsAnalysisWithCoSimIO(model, parameters).Run()\n","sub_path":"applications/CoSimulationApplication/tests/testing_structural_mechanics_analysis_with_co_sim_io.py","file_name":"testing_structural_mechanics_analysis_with_co_sim_io.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"374801684","text":"class Solution(object):\n def kthSmallest(self, root, k):\n if not root:\n return 0\n from collections import deque\n stack = deque()\n\n curr = root\n\n def collectleftnodes(node, stack):\n while node:\n stack.append(node)\n node = node.left\n\n collectleftnodes(curr, stack)\n ##利用二叉树的性质, 最左边的树都是小于右边的 + 再利用上stack的后进先出的特点, 第一个访问的点肯定是最小的点 然后依次增大\n result = 0\n while k > 0:\n node = stack.pop()\n result = node.val\n if node.right:\n node = node.right\n collectleftnodes(node, stack)\n k -= 1\n return result\n\n","sub_path":"binarytreeandrecursive/230-kthsamllestelementinbst.py","file_name":"230-kthsamllestelementinbst.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27196702","text":"from django.contrib import messages\nfrom django.http import HttpResponseRedirect\n\ndecorator_with_arguments = lambda decorator: lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)\n\n\n@decorator_with_arguments\ndef custom_permission_required(function, perm):\n def _function(request, *args, **kwargs):\n if request.user.has_perm(perm):\n return function(request, *args, **kwargs)\n else:\n mensaje = \"ACCESO RESTRINGIDO | Permiso Requerido: \"+str(perm)\n messages.add_message(request, messages.ERROR, mensaje, extra_tags='danger')\n try: url = request.META['HTTP_REFERER']\n except: url = '/' \n\n return HttpResponseRedirect(url)\n return _function","sub_path":"apps/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"556274054","text":"\r\nlista = ['oscreem', 'ueq', 'la', 'óncodificaci', 'ñaense', 'eshabilidad', 'eld', 'losig', '21', 'osusam', 'la', \r\n 'óncodificaci', 'moco', 'nau', 'taherramien', 'rapa', 'armostr', 'moco'] #Listsa original\r\ndef traductor(arg):\r\n \"\"\" La funcion que va a traducir los codigos \"\"\"\r\n letra = list(arg) #Se hace un array de las letras de la palabra tomada\r\n try:\r\n for i in range (0,2): #El loop va a mandar la primera letra al fondo y desplazar todas las demas a la izq\r\n letra.append( letra[0] )\r\n letra.pop(0)\r\n resultado = ''.join(letra) #Cuando se termina el loop se une de nuevo las letras para retornar la palabra traducida \r\n return resultado\r\n\r\n except len(letra) <2: #si son 2 letras el loop no afecta, si es 1 letra se retorna identicamente\r\n return arg \r\n\r\n \r\n\r\nfor i in range(0, len(lista) ): #se hace el loop para traducir todas las palabras de la lista, se une para tener la oracion y se imprime\r\n lista [i] = traductor( lista[i] )\r\noracion = ' '.join(lista)\r\nprint(oracion)\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"Ejercicio 2 Traductor.py","file_name":"Ejercicio 2 Traductor.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367528422","text":"from django.conf.urls import patterns, url\nfrom liushuizhang_site import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^(?P\\d+)/$', views.article, name='article'),\n\turl(r'^timeline/$',views.timeline,name='timeline'),\n\turl(r'^90s/$',views.ninty,name='90s'),\n\t# user auth urls\n\turl(r'^login/$',views.login),\n\turl(r'^auth/$',views.auth_view),\n\turl(r'^logout/$',views.auth_view),\n\turl(r'^loggedin/$',views.loggedin),\n\turl(r'^invalid/$',views.invalid_login),\n\turl(r'^register/$',views.register_user),\n\turl(r'^register_success/$',views.register_success),\n\turl(r'^create/$',views.create),\n\n]\n","sub_path":"liushuizhang_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"499653656","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nimport joblib\nimport traceback\nimport pandas as pd\nimport numpy as np\n\napp = FastAPI()\n\nclass People(BaseModel):\n model_name: str\n input: list\n \n@app.post('/predict')\nasync def predict(people: People):\n try:\n data = people.input\n lr = joblib.load(\"model.pkl\")\n model_columns = joblib.load(\"model_columns.pkl\")\n query = pd.get_dummies(pd.DataFrame(data))\n query = query.reindex(columns=model_columns, fill_value=0)\n prediction = list(lr.predict(query))\n print(prediction)\n return {'success': True, 'prediction': str(prediction)}\n except:\n return {'success': False, 'trace': traceback.format_exc()}","sub_path":"blogs/2020年/7月2日:对比Flask和FastAPI封装机器学习模型接口/code/flask_vs_fastapi/fastapi_api.py","file_name":"fastapi_api.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"122369105","text":"# from typing import Callable\n# from typing import TypeVar, Generic, NewType\n# UserId = NewType('next', Node)\n# T = TypeVar('T')\n\n# from com.jqc.abstractList import AbstractList\n\"\"\"\n单链表\n\"\"\"\nfrom com.jqc.list.abstractList import AbstractList\n\n\nclass Node(object):\n\tdef __init__(self, element=None, next_node=None):\n\t\tself.element = element\n\t\tself.next = next_node\n\t\n\tdef __str__(self):\n\t\treturn str(self.element)\n\n\nclass SingleLinkList(AbstractList):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.__fist = None\n\t\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t自定义打印\n\t\t:return:\n\t\t\"\"\"\n\t\tstring = 'size=' + str(self._size) + \", [\"\n\t\tnode = self.__fist\n\t\tfor i in range(self._size):\n\t\t\tif i != 0:\n\t\t\t\tstring += ','\n\t\t\tstring += str(node.element)\n\t\t\tnode = node.next\n\t\tstring += ']'\n\t\treturn string\n\t\n\tdef index_of(self, element):\n\t\t\"\"\"\n\t\t获取链表中某个元素的索引\n\t\t:param element:\n\t\t:return:\n\t\t\"\"\"\n\t\tnode = self.__fist\n\t\tfor i in range(self._size):\n\t\t\tif node.element == element:\n\t\t\t\treturn i\n\t\t\tnode = node.next\n\t\treturn -1\n\t\n\tdef get(self, index):\n\t\t\"\"\"\n\t\t根据索引获取指定位置的元素\n\t\t:param index:\n\t\t:return:\n\t\t\"\"\"\n\t\tnode = self.__node(index)\n\t\treturn node.element\n\t\n\tdef set(self, index, element):\n\t\t\"\"\"\n\t\t修改指定位置的元素\n\t\t:param index: 索引\n\t\t:param element: 新元素\n\t\t:return:\n\t\t\"\"\"\n\t\tnode = self.__node(index)\n\t\told_val = node.element\n\t\tnode.element = element\n\t\treturn old_val\n\t\n\tdef insert(self, index, element):\n\t\t\"\"\"\n\t\t在指定位置插入元素\n\t\t:param index: 索引\n\t\t:param element: 元素\n\t\t\"\"\"\n\t\tself._range_check_add(index)\n\t\tif index == 0:\n\t\t\tself.__fist = Node(element, self.__fist)\n\t\telse:\n\t\t\tpre = self.__node(index - 1)\n\t\t\tpre.next = Node(element, pre.next)\n\t\t\n\t\tself._size += 1\n\t\n\tdef remove(self, index):\n\t\t\"\"\"\n\t\t删除指定位置的节点\n\t\t:param index: 索引\n\t\t:return: 返回删除节点的元素\n\t\t\"\"\"\n\t\tself._range_check(index)\n\t\tnode = self.__fist\n\t\tif index == 0:\n\t\t\tself.__fist = node.next\n\t\telse:\n\t\t\tpre = self.__node(index - 1)\n\t\t\tnode = pre.next\n\t\t\tpre.next = node.next\n\t\tself._size -= 1\n\t\treturn node.element\n\t\n\tdef __node(self, index):\n\t\t\"\"\"\n\t\t根据索引获取node节点\n\t\t:param index:\n\t\t:return:\n\t\t\"\"\"\n\t\tself._range_check(index)\n\t\tnode = self.__fist\n\t\tfor i in range(index):\n\t\t\tnode = node.next\n\t\treturn node\n","sub_path":"data-structure/python/11-hashMap/02-hashMap扩容/com/jqc/list/singleLinkList.py","file_name":"singleLinkList.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"17901479","text":"from flask import Flask, request, jsonify\nimport fib\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n return \"Please visit /fibseq/NUMBER url to get Fibonacci sequence or /fib/NUMBER to get Fibonacci number.\\n\"\n\n@application.route('/fibseq/')\ndef show_fibonacci_sequence(num):\n if num[0] == '-':\n return jsonify({'input': num,\n 'error': 'Negative value, service accepts only non-negative numbers', 'sequence': None}\n ), 400\n\n try:\n sequence = fib.fib_sequence(int(num))\n return jsonify({'input': num, 'error': None, 'sequence': sequence}), 200\n except ValueError as e:\n return jsonify({'input': num, 'error': str(e), 'sequence': None}), 400\n\n@application.route('/fib/')\ndef show_fibonnaci_number(num):\n try:\n fib_num = fib.fib_number(int(num))\n return jsonify({'input': num, 'error': None, 'number': fib_num}), 200\n except ValueError as e:\n return jsonify({'input': num, 'error': str(e), 'number': None}), 400\n\n@application.route('/cache')\ndef show_cache():\n return jsonify(fib.cache())\n\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"438416499","text":"# ----- Infinibox related modules ----#\nimport requests\nfrom webapp.utility import infini_host_data,bytesto,infini_volume_data\n\n# ------Infinibox Server List ---- #\ndef get_infini_serverlist():\n\tserverlist = []\n\thostnamelist = []\n\tfor host in infini_host_data: #['result']:\n\t\t#server_dict = {}\n\t\tname= host['name']\n\t\tid = host['id']\n\t\tif name not in hostnamelist:\n\t\t\t\"\"\"server_dict['name'] = name\n\t\t\tserver_dict['value'] = str(id)\n\t\t\tserverlist.append(server_dict)\n\t\t\t\"\"\"\n\t\t\thostnamelist.append(name)\n\treturn hostnamelist\n\n# ------ Listing the unmapped volumes in infinibox ---- #\ndef get_unmapped_infini():\n\tvolume_list_json = infini_volume_data\n\terror = ''\n\treslist = []\n\ttry:\n\t\t\tfor volume in volume_list_json:\t\n\t\t\t\tvol_dict = {}\n\t\t\t\tif volume['mapped'] == False:\n\t\t\t\t\tvol_dict['name'] = volume['name']\n\t\t\t\t\tvol_dict['id'] = volume['id']\n\t\t\t\t\tsize = bytesto(volume['size'],'g')\n\t\t\t\t\tvol_dict['size'] = size\n\t\t\t\t\treslist.append(vol_dict)\t\t\n\texcept Exception as e:\n\t\terror = \"Error in Infinibox calculation - \"+str(e)\n\treturn (reslist,error)\n\n#---- Given the HOST and Volume list object, it calculates the disk names, disk ids and size of disk for each host ----# \ndef get_infini(hostlist,limit=1000):\n volume_list_json = infini_volume_data\n infini_total_usage = 0\n error = ''\n res_dict = {}\n try:\n if len(hostlist) == 0:\n hostlist = infini_host_data #['result']\n for host in hostlist:\n if host['name'] not in res_dict:\n res_dict[ host['name']] = {}\n res_dict[host['name']]['total_size'] = 0\n res_dict[host['name']]['disk_list'] = []\n luns=host['luns']\n for lun in luns:\n for volume in volume_list_json: #['result']:\n vol_dict = {}\n if volume['type'].upper() == 'MASTER':\n if volume['mapped'] == True:\n if lun['volume_id'] == volume['id']:\n vol_dict['name'] = volume['name']\n vol_dict['id'] = volume['id']\n vol_dict['source'] = 'Infinibox'\n size = bytesto(volume['size'],'g')\n res_dict[host['name']]['total_size']+= size\n infini_total_usage += size\n vol_dict['size'] = size\n res_dict[host['name']]['disk_list'].append(vol_dict)\n #infini_total_usage += res_dict[host['name']]['total_size']\n except Exception as e:\n error = \"Error in Infinibox calculation - \"+str(e)\n return (res_dict,infini_total_usage,error)\n\n","sub_path":"webapp/COPY/infinibox.py","file_name":"infinibox.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"56061456","text":"from rest_framework.test import APIClient\nfrom accounts.models import User\nfrom rest_framework import status\nfrom musics import models as musics_models\nfrom rest_framework.test import APITestCase\nfrom model_mommy import mommy\nfrom musics.serializers import WhoshSerializer\nimport json\n\n\nclass TestRadio(APITestCase):\n\n def setUp(self):\n self.c = APIClient()\n\n\n self.user = User.objects.create(username=\"user1\", facebook_id=\"1234556\")\n\n\n\n self.list_style_model = mommy.make(\"musics.style\", _quantity=3)\n self.list_mood_model = mommy.make(\"musics.mood\", _quantity=3)\n self.list_country_model = mommy.make(\"location.country\", _quantity=3)\n self.list_language_model = mommy.make(\"location.language\", _quantity=3)\n self.list_similar_band_model = mommy.make(\"musics.similarband\", _quantity=3)\n\n def add_m2m(n):\n for music in self.music1:\n music.style.add(self.list_style_model[n])\n music.mood.add(self.list_mood_model[n])\n music.similar_bands.add(self.list_similar_band_model[n])\n\n self.music1 = mommy.make(\"musics.music\",\n _quantity=10,\n country=self.list_country_model[0],\n language=self.list_language_model[0],\n tempo=100,\n energy=musics_models.LOW\n )\n add_m2m(0)\n\n self.music2 = mommy.make(\"musics.music\",\n _quantity=10,\n country=self.list_country_model[1],\n language=self.list_language_model[1],\n tempo=120,\n energy=musics_models.MEDIUM\n )\n add_m2m(1)\n\n self.music3 = mommy.make(\"musics.music\",\n _quantity=10,\n country=self.list_country_model[2],\n language=self.list_language_model[2],\n tempo=150,\n energy=musics_models.HIGH\n )\n\n add_m2m(2)\n\n\n def test_create_session_radio(self):\n\n \"\"\"\n Se debe poder visualizar musica ordenadaa por ranking y filtrada\n por los diferentes atributos de la canción\n \"\"\"\n # list_style = self.get_model_id(musics_models.Style)\n # list_mood = self.get_model_id(musics_models.Mood)\n # list_similar_band = self.get_model_id(musics_models.SimilarBand)\n \n pre_filter_data = {\n \"country__id\" : self.list_country_model[2].id,\n \"language__id\" : self.list_language_model[2].id,\n \"style__id\" : [self.list_style_model[2].id],\n \"mood__id\" : [self.list_mood_model[2].id],\n \"similar_bands__id\" : [self.list_similar_band_model[2].id],\n \"min_tempo\" : 149,\n \"energy\" : musics_models.HIGH\n\n }\n for key, value in pre_filter_data.items():\n filter_data = {\n key: value\n }\n response = self.c.post('/player/radio/', filter_data)\n # self.assertTrue(len(response.data) == 10, \"{0}: {1}\".format(key, value))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n self.assertTrue(isinstance(response.data.get(\"session\"), int), response.data)\n\n\n def get_model_id(self, Model):\n return [x.id for x in Model.objects.all()[:3]]\n\n\n def test_read_session_radio(self):\n\n \"\"\"\n Se debe poder visualizar canciones ordenados por ranking\n \"\"\"\n\n \n\n\n def test_read_radio_cache_radio_active(self):\n\n music = mommy.make(\"musics.music\", _fill_optional=True, make_m2m=True, _quantity=10)\n\n\n data = WhoshSerializer(music[0]).data\n json_data = json.dumps(data)\n\n\n cache = mommy.make(\"radio.cacheradio\", _fill_optional=True, make_m2m=True, is_active=True)\n\n\n session = mommy.make(\"radio.sessionradio\",\n _fill_optional=True,\n make_m2m=True,\n num_hash=cache.num_hash,\n json_search=json_data)\n\n response = self.c.get('/player/radio/{}/'.format(session.id))\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n\n self.assertDictEqual.__self__.maxDiff = None\n\n self.assertTrue(isinstance(response.data, list))\n\n\n def test_read_radio_cache_radio_deactive(self):\n\n music = mommy.make(\"musics.music\", _fill_optional=True, make_m2m=True, _quantity=10)\n\n data = WhoshSerializer(music[0]).data\n json_data = json.dumps(data)\n\n\n cache = mommy.make(\"radio.cacheradio\", _fill_optional=True, make_m2m=True, is_active=False)\n session = mommy.make(\"radio.sessionradio\",\n _fill_optional=True,\n make_m2m=True,\n num_hash=cache.num_hash,\n json_search=json_data)\n\n\n\n response = self.c.get('/player/radio/{}/'.format(session.id))\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n\n self.assertDictEqual.__self__.maxDiff = None\n\n self.assertTrue(isinstance(response.data, list))","sub_path":"zonia-backend-master/radio/tests/integration/test_radio.py","file_name":"test_radio.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"106208928","text":"#!/usr/bin/env python\n\nimport tf2_ros\nimport ros_numpy\nfrom tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud\nfrom sensor_msgs.msg import PointCloud2\nimport numpy as np\nimport rospy\nimport pcl\n\n\nclass TransformAndFilterPointCloud(object):\n\n \"\"\"\n Class allowing to transform a pointcloud to the \"world\" frame, to filter it and to publish it\n \"\"\"\n\n def __init__(self, x_min, x_max, y_min, y_max, z_min, z_max,output_topic_name=\"filtered_point_cloud\"):\n \"\"\"\n Initialize all attributes of the class\n @param output_topic_name: Name of the topic on which the filtered PointCloud2 must be published\n \"\"\"\n # Depending on the mode this script is launched, the input topic and the depth camera frame name change\n # if rospy.get_param(\"simulation\"):\n # self.sensor_link_name = \"camera_link\"\n # input_topic_name = \"/camera/depth/points\"\n # else:\n self.sensor_link_name = \"kinect2_ir_optical_frame\"\n input_topic_name = \"/kinect2/sd/points\"\n\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.z_min = z_min\n self.z_max = z_max\n\n # Initialize a transform buffer allowing to keep tf2 information during a given time\n self.transform_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(10))\n # Initialize a listener to all transforms in the tf2 tree\n self.transform_listener = tf2_ros.TransformListener(self.transform_buffer)\n # Initialize a publisher for the filtered point cloud\n self.publisher = rospy.Publisher(output_topic_name, PointCloud2, queue_size=2)\n # Initialize the subscriber to get the input PointCloud2\n self.subscriber = rospy.Subscriber(input_topic_name, PointCloud2, self.point_cloud_callback, queue_size=2)\n # Publish the filtered point cloud\n self.publish_filtered_point_cloud(output_topic_name)\n\n def point_cloud_callback(self, point_cloud):\n \"\"\"\n Callback gathering all operations required to tranform and filter the input PointCloud2\n @param point_cloud: PointCloud2 message to tranform and filter\n \"\"\"\n # Initialize tf2 buffer and listener to transform the point cloud so that the points are related to the world\n # frame\n tf_buffer = tf2_ros.Buffer()\n tf2_ros.TransformListener(tf_buffer)\n rospy.sleep(1.0)\n # Retrieve the necessary transform to change the point cloud from the frame \"camera_link\" to \"world\"\n transform = tf_buffer.lookup_transform(\"world\", self.sensor_link_name, rospy.Time())\n # Apply required operations to get a transformed point cloud\n transformed_point_cloud = do_transform_cloud(point_cloud, transform)\n\n # Create a pcl's PointCloud object in which we load the transformed PointCloud2 message\n pcl_point_cloud = pcl.PointCloud()\n numpy_point_cloud = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(transformed_point_cloud).astype(np.float32)\n pcl_point_cloud.from_array(numpy_point_cloud)\n # Filter the clean point cloud array (here the values correspond to the surface of the table which represents\n # the space reachable by the robot)\n\n filtered_point_cloud = self.filter_point_cloud(pcl_point_cloud, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max)\n # Values for the table\n #filtered_point_cloud = self.filter_point_cloud(pcl_point_cloud, -0.19, 0.25, 0.55, 1.05, 0.40, 1.0)\n\n # Publish the output PointCloud2 msg\n self.publisher.publish(filtered_point_cloud)\n\n def publish_filtered_point_cloud(self, topic_name):\n \"\"\"\n Function publishing the transformed and filtered point cloud without requiring to apply all operations\n everytime\n @param topic_name: Name of the topic on which the filter PointCloud2 will be published\n \"\"\"\n # Store an already filtered PointCloud2 message (initially published in the subscriber's callback)\n final_point_cloud = rospy.wait_for_message(topic_name, PointCloud2)\n # Unsubscribe to the input topic. Since the method is working as an open loop, the point cloud is not dynamic,\n # so filtering and transforming once is enough.\n # This is a ugly solution, but couldn't find another way to get lookup_transform working....\n self.subscriber.unregister()\n\n # Feedback to make sure that everything works properly\n rospy.loginfo(\"Point cloud successfully filtered, publishing result on topic {}\".format(topic_name))\n # Until rospy is shutdown, it will publish the same message (the stored one)\n while not rospy.is_shutdown():\n self.publisher.publish(final_point_cloud)\n\n def filter_point_cloud(self, pcl_point_cloud, x_min, x_max, y_min, y_max, z_min, z_max):\n \"\"\"\n Function taking as input a point cloud and return all points contained inside the specified bounds\n @param point_cloud_array: pcl's PointCloud object containing the point cloud\n @param x_min: Minimal bound for x-axis.\n @param x_max: Maximal bound for x-axis.\n @param y_min: Minimal bound for y-axis.\n @param y_max: Maximal bound for y-axis.\n @param z_min: Minimal bound for z-axis.\n @return: PointCloud2 Msg containing the filtered point cloud\n \"\"\"\n # Since the function make_cropbox seems problematic, using several passthrough filters\n # Set parameters for filtering on x-axis\n filter_x_axis = pcl_point_cloud.make_passthrough_filter()\n filter_x_axis.set_filter_field_name(\"x\")\n filter_x_axis.set_filter_limits(x_min, x_max)\n # Filter on x-axis\n x_filtered = filter_x_axis.filter()\n # Set parameters for filtering on y-axis\n filter_y_axis = x_filtered.make_passthrough_filter()\n filter_y_axis.set_filter_field_name(\"y\")\n filter_y_axis.set_filter_limits(y_min, y_max)\n # Filter on y-axis\n xy_filtered = filter_y_axis.filter()\n # Set parameters for filtering on z-axis\n filter_z_axis = xy_filtered.make_passthrough_filter()\n filter_z_axis.set_filter_field_name(\"z\")\n filter_z_axis.set_filter_limits(z_min, z_max)\n # Filter on z-axis\n filtered_point_cloud = filter_z_axis.filter()\n # Transform the filtered point cloud into a list in order to be able to\n # add color information and thus being understood by Rviz\n filtered_list = filtered_point_cloud.to_list()\n colored_filtered_list = list(map(lambda x: tuple(x + [64 << 16 | 64 << 8 | 64]), filtered_list))\n colored_array = np.array(colored_filtered_list, dtype=[\n ('x', '= self.size():\n self._redis.rpush(self.key, item)\n else:\n time.sleep(0.1)\n self.put_wait(item)\n\n def get(self):\n \"\"\"\n 返回队列第一个元素\n :return:\n \"\"\"\n result = self._redis.lpop(self.key)\n if isinstance(result, tuple):\n return str(result[1], encoding='UTF-8')\n else:\n return result\n\n\n def get_wait(self, timeout=None):\n \"\"\"\n 获取队列中的值\n :param timeout:\n :return:\n \"\"\"\n result = self._redis.blpop(self.key, timeout=timeout)\n if isinstance(result, tuple):\n return str(result[1], encoding='UTF-8')\n else:\n return result\n","sub_path":"service/rqueue.py","file_name":"rqueue.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"298264950","text":"'''\nCreated on Feb 8, 2012\n\n@author: spengler\n'''\n\n#Estado do Projeto\ncntEstadoProjeto_Novo = 1\ncntEstadoProjeto_Publicado = 2\ncntEstadoProjeto_AguardandoCredito = 3\ncntEstadoProjeto_PreDesenvolvimento = 4\ncntEstadoProjeto_EmDesenvolvimento = 5\ncntEstadoProjeto_EmAnalise = 6\ncntEstadoProjeto_EmRevisao = 7\ncntEstatoProjeto_AguardandoDebito = 8\ncntEstadoProjeto_Concluido = 9\ncntEstadoProjeto_Inativo = 10\n\n#Busca Projeto\ncntBuscaProjetos_Ultimos = 1\ncntBuscaProjetos_Finalizando = 2\ncntBuscaProjetos_Perfil = 3\ncntBuscaProjetos_Categoria_Desenvolvimento = 4\ncntBuscaProjetos_Categoria_SO = 5\ncntBuscaProjetos_Categoria_Infra = 6\ncntBuscaProjetos_Categoria_Dados = 7\ncntBuscaProjetos_Categoria_Softwares = 8\ncntBuscaProjetos_Categoria_Mobile = 9\ncntBuscaProjetos_Categoria_MidiasSociais = 10\ncntBuscaProjetos_Categoria_Outras = 11\ncntBuscaProjetos_PalavraChave = 12\n\n#Busca Freelancers\ncntBuscaFreelancers_Ultimos = 1\ncntBuscaFreelancers_Pontuacao = 2\n\n#Eventos do Historico\ncntEventoHistorico_Criacao = 1\ncntEventoHistorico_Publicacao = 2\ncntEventoHistorico_FimPublicacao = 3\ncntEventoHistorico_SelecionaGanhador = 4\ncntEventoHistorico_InicioDesdenvolvimento = 5\ncntEventoHistorico_FimDesenvolvimentoPrevisto = 6\ncntEventoHistorico_FimDesenvolvimentoReal = 7\ncntEventoHistorico_InicioRevisao = 8\ncntEventoHistorico_FimRevisaoPrevisto = 9\ncntEventoHistorico_FimRevisaoReal = 15\ncntEventoHistorico_PagamentoOfertante = 10\ncntEventoHistorico_PagamentoFreelancer = 11\ncntEventoHistorico_InicioAnalise = 12\ncntEventoHistorico_FimAnalise = 13\ncntEventoHistorico_Desativar = 14\n\n#Categorias de Projeto\ncntCategoriaProjetos_Desenvolvimento = 1\ncntCategoriaProjetos_SO = 2\ncntCategoriaProjetos_Softwares = 3\ncntCategoriaProjetos_Infra = 4\ncntCategoriaProjetos_Dados = 5\ncntCategoriaProjetos_Mobile = 6\ncntCategoriaProjetos_MidiasSociais = 7\ncntCategoriaProjetos_Outras = 8\n\n#Tipos de Usuario\ncntTipoUsuarioFreelancer = 1\ncntTipoUsuarioOfertante = 2\ncntTipoUsuarioAmbos = 3\n\n#Classe de Usuario\ncntClasseUsuarioPadrao = 1\n\n#Tipos de Pagamento\ncntTipoPagamento_DebitoFreelancer = 1\ncntTipoPagamento_CreditoOfertante = 2\n\n#Estados do Pagamento\ncntEstadoPagamento_Pendente = 1\ncntEstadoPagamento_EmAnalise = 2\ncntEstadoPagamento_Aprovado = 3\ncntEstadoPagamento_Reprovado = 4\n\n#Valor das Comissoes\ncntValorComissao_Freelancer = 0.10\ncntValorComissao_Ofertante = 0.05\n\n#Tipos de Promocao\ncntTipoPromocao_Freelancer = 1\ncntTipoPromocao_Ofertante = 2\n\n#Tipos de Mensagem\ncntTipoMensagemRevisao = 1\ncntTipoMensagemProjeto = 2\ncntTipoMensagemForum = 3\ncntTipoMensagemFeedback = 4\ncntTipoMensagemDireta = 5\n\n#Origem Mensagem\ncntOrigemMinhasPropostas = 1\ncntOrigemMeusProjetos = 2\ncntOrigemForumProjeto = 3\ncntOrigemFeedback = 4\ncntOrigemForumAcaoProjeto = 5\ncntOrigemPerfil = 6\n\n#Tipos de Email\ncntTipoEmailAguardandoCredito = 1\ncntTipoEmailEscolhaFreelancer = 2\ncntTipoEmailEmDesenvolvimento = 3\ncntTipoEmailDesenvolvimentoConcluido = 4\ncntTipoEmailDesenvolvimentoEntregue = 5\ncntTipoEmailSolicitaRevisao = 6\ncntTipoEmailEntregaRevisao = 7\ncntTipoEmailProjetoAceito = 8\ncntTipoEmailConfirmacaoPagamento = 9\ncntTipoEmailPropostaCancelada = 10\ncntTipoEmailAvisoInativo = 11\ncntTipoEmailCadastroUsuario = 12\ncntTipoEmailDesativarProjeto = 13\ncntTipoEmailViolacaoTermoDeUso = 14\ncntTipoEmailDenuncia = 15\ncntTipoEmailRecebeuProposta = 16\ncntTipoEmailRecebeuMensagem = 17\ncntTipoEmailMensagemForum = 18\ncntTipoEmailRecuperarSenha = 19\ncntTipoEmailProjetoPerfil = 20\n\n#Tipos de Criterio\ncntTipoCriterioQualiadeTrabalho = 1\ncntTipoCriterioComunicacao = 2\ncntTipoCriterioHabilidadeTecnica = 3\ncntTipoCriterioPontualidade = 4\ncntTipoCriterioProfissionalismo = 5\n\n#Endereco de Email\ncntEnderecoEmail_Contato = 'contato@freelati.com'\ncntEnderecoEmail_Suporte = 'suporte@freelati.com'\n\n#Valor do Projeto\ncntValorDoProjeto_Simples = 1\ncntValorDoProjeto_MuitoPequeno = 2\ncntValorDoProjeto_Pequeno = 3\ncntValorDoProjeto_Medio = 4\ncntValorDoProjeto_Grande = 5\ncntValorDoProjeto_MuitoGrande = 6\n\n#Origem de redirecionamento para edicao de perfil\ncntOrigemPerfilDireto = 0\ncntOrigemPerfilProjeto = 1 \ncntOrigemPerfilProposta = 2\n\n#Configuracoes\ncntTempoDePublicacao = 60\ncntQtdMensagensPainelControle = 5\n\n#Tipo de Violacao do Termo de Uso\ncntTipoViolacaoProjeto = 1\ncntTipoViolacaoMensagem = 2\ncntTipoViolacaoProposta = 3\ncntTipoViolacaoDescricao = 4\n\n#Origem denuncia\ncntOrigemDenunciaForum = 1\ncntOrigemDenunciaProposta = 2\ncntOrigemDenunciaMensagemDireta = 3\ncntOrigemDenunciaProjeto = 4\ncntOrigemDenunciaDescricaoPerfil = 5\n\n#Portfolio\ncntPortfolio_GitHub = 1\ncntPortfolio_LinkedIn = 2\ncntPortfolio_CoderWall \t= 3\ncntPortfolio_SitePessoal = 4\ncntPortfolio_SiteEmpresa = 5\n\n#Acao Projeto\ncntAcaoProjetoNovo = 0\ncntAcaoProjetoEditar = 1\ncntAcaoProjetoCopiar = 2\n\n#Servicos\ncntServico_EmailProjetoPerfil = 1\n\n#Pontuacao\ncntPontuacao_PerfilPessoaFisica = 16\ncntPontuacao_PerfilPessoaJuridica = 19\ncntPontuacao_PesoDescricao = 5\n\n#Links\ncntlink_Projeto = 'http://freelati.com/projeto/projetos/'\n\n#Tipos de Busca Painel de Controle\ncntTipoBuscaPainelControlePublicados = 1\ncntTipoBuscaPainelControleDev = 2\n","sub_path":"PyProject_FreelaTI/src/PyProject_FreelaTI/constantes.py","file_name":"constantes.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"122851167","text":"# -*- coding: utf-8 -*-\nimport logging\nimport itertools\nimport math\nimport urllib\nimport httplib as http\n\nfrom modularodm import Q\nfrom flask import request\n\nfrom framework import utils\nfrom framework import sentry\nfrom framework.auth.core import User\nfrom framework.flask import redirect # VOL-aware redirect\nfrom framework.routing import proxy_url\nfrom framework.exceptions import HTTPError\nfrom framework.auth.forms import SignInForm\nfrom framework.forms import utils as form_utils\nfrom framework.auth.forms import RegistrationForm\nfrom framework.auth.forms import ResetPasswordForm\nfrom framework.auth.forms import ForgotPasswordForm\nfrom framework.auth.decorators import collect_auth\nfrom framework.auth.decorators import must_be_logged_in\n\nfrom website.models import Guid\nfrom website.models import Node\nfrom website.util import rubeus\nfrom website.util import sanitize\nfrom website.project import model\nfrom website.util import web_url_for\nfrom website.util import permissions\nfrom website.project import new_dashboard\nfrom website.settings import ALL_MY_PROJECTS_ID\nfrom website.settings import ALL_MY_REGISTRATIONS_ID\n\nlogger = logging.getLogger(__name__)\n\n\ndef _rescale_ratio(auth, nodes):\n \"\"\"Get scaling denominator for log lists across a sequence of nodes.\n\n :param nodes: Nodes\n :return: Max number of logs\n\n \"\"\"\n if not nodes:\n return 0\n counts = [\n len(node.logs)\n for node in nodes\n if node.can_view(auth)\n ]\n if counts:\n return float(max(counts))\n return 0.0\n\n\ndef _render_node(node, auth=None):\n \"\"\"\n\n :param node:\n :return:\n\n \"\"\"\n perm = None\n # NOTE: auth.user may be None if viewing public project while not\n # logged in\n if auth and auth.user and node.get_permissions(auth.user):\n perm_list = node.get_permissions(auth.user)\n perm = permissions.reduce_permissions(perm_list)\n\n return {\n 'title': node.title,\n 'id': node._primary_key,\n 'url': node.url,\n 'api_url': node.api_url,\n 'primary': node.primary,\n 'date_modified': utils.iso8601format(node.date_modified),\n 'category': node.category,\n 'permissions': perm, # A string, e.g. 'admin', or None,\n 'archiving': node.archiving,\n }\n\n\ndef _render_nodes(nodes, auth=None, show_path=False):\n \"\"\"\n\n :param nodes:\n :return:\n \"\"\"\n ret = {\n 'nodes': [\n _render_node(node, auth)\n for node in nodes\n ],\n 'rescale_ratio': _rescale_ratio(auth, nodes),\n 'show_path': show_path\n }\n return ret\n\n\n@collect_auth\ndef index(auth):\n \"\"\"Redirect to dashboard if user is logged in, else show homepage.\n\n \"\"\"\n if auth.user:\n return redirect(web_url_for('dashboard'))\n return {}\n\n\ndef find_dashboard(user):\n dashboard_folder = user.node__contributed.find(\n Q('is_dashboard', 'eq', True)\n )\n\n if dashboard_folder.count() == 0:\n new_dashboard(user)\n dashboard_folder = user.node__contributed.find(\n Q('is_dashboard', 'eq', True)\n )\n return dashboard_folder[0]\n\n\n@must_be_logged_in\ndef get_dashboard(auth, nid=None, **kwargs):\n user = auth.user\n if nid is None:\n node = find_dashboard(user)\n dashboard_projects = [rubeus.to_project_root(node, auth, **kwargs)]\n return_value = {'data': dashboard_projects}\n elif nid == ALL_MY_PROJECTS_ID:\n return_value = {'data': get_all_projects_smart_folder(**kwargs)}\n elif nid == ALL_MY_REGISTRATIONS_ID:\n return_value = {'data': get_all_registrations_smart_folder(**kwargs)}\n else:\n node = Node.load(nid)\n dashboard_projects = rubeus.to_project_hgrid(node, auth, **kwargs)\n return_value = {'data': dashboard_projects}\n\n return_value['timezone'] = user.timezone\n return_value['locale'] = user.locale\n return_value['id'] = user._id\n return return_value\n\n\n@must_be_logged_in\ndef get_all_projects_smart_folder(auth, **kwargs):\n # TODO: Unit tests\n user = auth.user\n\n contributed = user.node__contributed\n nodes = contributed.find(\n Q('is_deleted', 'eq', False) &\n Q('is_registration', 'eq', False) &\n Q('is_folder', 'eq', False)\n ).sort('-title')\n\n keys = nodes.get_keys()\n return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.parent_id not in keys]\n\n@must_be_logged_in\ndef get_all_registrations_smart_folder(auth, **kwargs):\n # TODO: Unit tests\n user = auth.user\n contributed = user.node__contributed\n\n nodes = contributed.find(\n\n Q('is_deleted', 'eq', False) &\n Q('is_registration', 'eq', True) &\n Q('is_folder', 'eq', False)\n ).sort('-title')\n\n # Note(hrybacki): is_retracted and is_pending_embargo are property methods\n # and cannot be directly queried\n nodes = filter(lambda node: not node.is_retracted and not node.is_pending_embargo, nodes)\n keys = [node._id for node in nodes]\n return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.ids_above.isdisjoint(keys)]\n\n@must_be_logged_in\ndef get_dashboard_nodes(auth):\n \"\"\"Get summary information about the current user's dashboard nodes.\n\n :param-query no_components: Exclude components from response.\n NOTE: By default, components will only be shown if the current user\n is contributor on a comonent but not its parent project. This query\n parameter forces ALL components to be excluded from the request.\n :param-query permissions: Filter upon projects for which the current user\n has the specified permissions. Examples: 'write', 'admin'\n \"\"\"\n user = auth.user\n\n contributed = user.node__contributed # nodes user contributed to\n\n nodes = contributed.find(\n Q('category', 'eq', 'project') &\n Q('is_deleted', 'eq', False) &\n Q('is_registration', 'eq', False) &\n Q('is_folder', 'eq', False)\n )\n\n if request.args.get('no_components') not in [True, 'true', 'True', '1', 1]:\n comps = contributed.find(\n # components only\n Q('category', 'ne', 'project') &\n # exclude deleted nodes\n Q('is_deleted', 'eq', False) &\n # exclude registrations\n Q('is_registration', 'eq', False)\n )\n else:\n comps = []\n\n nodes = list(nodes) + list(comps)\n if request.args.get('permissions'):\n perm = request.args['permissions'].strip().lower()\n if perm not in permissions.PERMISSIONS:\n raise HTTPError(http.BAD_REQUEST, dict(\n message_short='Invalid query parameter',\n message_long='{0} is not in {1}'.format(perm, permissions.PERMISSIONS)\n ))\n response_nodes = [node for node in nodes if node.has_permission(user, permission=perm)]\n else:\n response_nodes = nodes\n return _render_nodes(response_nodes, auth)\n\n\n@must_be_logged_in\ndef dashboard(auth):\n user = auth.user\n dashboard_folder = find_dashboard(user)\n dashboard_id = dashboard_folder._id\n return {'addons_enabled': user.get_addon_names(),\n 'dashboard_id': dashboard_id,\n }\n\n\ndef validate_page_num(page, pages):\n if page < 0 or (pages and page >= pages):\n raise HTTPError(http.BAD_REQUEST, data=dict(\n message_long='Invalid value for \"page\".'\n ))\n\n\ndef paginate(items, total, page, size):\n pages = math.ceil(total / float(size))\n validate_page_num(page, pages)\n\n start = page * size\n paginated_items = itertools.islice(items, start, start + size)\n\n return paginated_items, pages\n\n\n@must_be_logged_in\ndef watched_logs_get(**kwargs):\n user = kwargs['auth'].user\n try:\n page = int(request.args.get('page', 0))\n except ValueError:\n raise HTTPError(http.BAD_REQUEST, data=dict(\n message_long='Invalid value for \"page\".'\n ))\n try:\n size = int(request.args.get('size', 10))\n except ValueError:\n raise HTTPError(http.BAD_REQUEST, data=dict(\n message_long='Invalid value for \"size\".'\n ))\n\n total = sum(1 for x in user.get_recent_log_ids())\n paginated_logs, pages = paginate(user.get_recent_log_ids(), total, page, size)\n logs = (model.NodeLog.load(id) for id in paginated_logs)\n\n return {\n \"logs\": [serialize_log(log) for log in logs],\n \"total\": total,\n \"pages\": pages,\n \"page\": page\n }\n\n\ndef serialize_log(node_log, auth=None, anonymous=False):\n '''Return a dictionary representation of the log.'''\n return {\n 'id': str(node_log._primary_key),\n 'user': node_log.user.serialize()\n if isinstance(node_log.user, User)\n else {'fullname': node_log.foreign_user},\n 'contributors': [node_log._render_log_contributor(c) for c in node_log.params.get(\"contributors\", [])],\n 'action': node_log.action,\n 'params': sanitize.unescape_entities(node_log.params),\n 'date': utils.iso8601format(node_log.date),\n 'node': node_log.node.serialize(auth) if node_log.node else None,\n 'anonymous': anonymous\n }\n\n\ndef reproducibility():\n return redirect('/ezcuj/wiki')\n\n\ndef registration_form():\n return form_utils.jsonify(RegistrationForm(prefix='register'))\n\n\ndef signin_form():\n return form_utils.jsonify(SignInForm())\n\n\ndef forgot_password_form():\n return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))\n\n\ndef reset_password_form():\n return form_utils.jsonify(ResetPasswordForm())\n\n\n# GUID ###\n\ndef _build_guid_url(base, suffix=None):\n url = '/'.join([\n each.strip('/') for each in [base, suffix]\n if each\n ])\n if not isinstance(url, unicode):\n url = url.decode('utf-8')\n return u'/{0}/'.format(url)\n\n\ndef resolve_guid(guid, suffix=None):\n \"\"\"Load GUID by primary key, look up the corresponding view function in the\n routing table, and return the return value of the view function without\n changing the URL.\n\n :param str guid: GUID primary key\n :param str suffix: Remainder of URL after the GUID\n :return: Return value of proxied view function\n \"\"\"\n # Look up GUID\n guid_object = Guid.load(guid)\n if guid_object:\n\n # verify that the object implements a GuidStoredObject-like interface. If a model\n # was once GuidStoredObject-like but that relationship has changed, it's\n # possible to have referents that are instances of classes that don't\n # have a deep_url attribute or otherwise don't behave as\n # expected.\n if not hasattr(guid_object.referent, 'deep_url'):\n sentry.log_message(\n 'Guid `{}` resolved to an object with no deep_url'.format(guid)\n )\n raise HTTPError(http.NOT_FOUND)\n referent = guid_object.referent\n if referent is None:\n logger.error('Referent of GUID {0} not found'.format(guid))\n raise HTTPError(http.NOT_FOUND)\n if not referent.deep_url:\n raise HTTPError(http.NOT_FOUND)\n url = _build_guid_url(urllib.unquote(referent.deep_url), suffix)\n return proxy_url(url)\n\n # GUID not found; try lower-cased and redirect if exists\n guid_object_lower = Guid.load(guid.lower())\n if guid_object_lower:\n return redirect(\n _build_guid_url(guid.lower(), suffix)\n )\n\n # GUID not found\n raise HTTPError(http.NOT_FOUND)\n\n##### Redirects #####\n\n# Redirect /about/ to OSF wiki page\n# https://github.com/CenterForOpenScience/osf.io/issues/3862\n# https://github.com/CenterForOpenScience/community/issues/294\ndef redirect_about(**kwargs):\n return redirect('https://osf.io/4znzp/wiki/home/')\n\n\ndef redirect_howosfworks(**kwargs):\n return redirect('/getting-started/')\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"515564844","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Xgbfir is a XGBoost model dump parser, which ranks features as well as\n# feature interactions by different metrics.\n# Copyright (c) 2016 Boris Kostenko\n# https://github.com/limexp/xgbfir/\n#\n# Originally based on implementation by Far0n\n# https://github.com/Far0n/xgbfi\n\nfrom __future__ import print_function, division\n\nimport argparse\nimport sys\nimport re\n\nimport xlsxwriter\n\n\nclass SplitValueHistogram:\n def __init__(self):\n self.values = {}\n\n def add_value(self, split_value, count):\n if not (split_value in self.values):\n self.values[split_value] = 0\n self.values[split_value] += count\n\n def merge(self, histogram):\n for key, value in histogram.values.items():\n self.add_value(key, value)\n\n\nclass FeatureInteraction:\n def __init__(self, tree_nodes, gain, cover, path_probability, depth, tree_index, fscore=1):\n self.histogram = SplitValueHistogram()\n\n features = sorted(tree_nodes, key=lambda x: x.feature)\n self.name = \"|\".join(x.feature for x in features)\n\n self.depth = len(tree_nodes) - 1\n self.gain = gain\n self.cover = cover\n self.fscore = fscore\n self.wfscore = path_probability\n self.tree_index = tree_index\n self.tree_depth = depth\n # TODO: Might need to set `expected_gain`\n self.has_leaf_stats = False\n\n if self.depth == 0:\n self.histogram.add_value(tree_nodes[0].split_value, 1)\n\n self.sum_leaf_values_left = 0.0\n self.sum_leaf_values_right = 0.0\n\n self.sum_leaf_covers_left = 0.0\n self.sum_leaf_covers_right = 0.0\n\n @property\n def average_wfscore(self):\n return self.wfscore / self.fscore\n\n @property\n def average_gain(self):\n return self.gain / self.fscore\n\n @property\n def expected_gain(self):\n # TODO: This could be wrong -- it was originally a += stat\n return self.gain * self.wfscore\n\n @property\n def average_tree_index(self):\n return self.tree_index / self.fscore\n\n @property\n def average_tree_depth(self):\n return self.tree_depth / self.fscore\n\n def __lt__(self, other):\n return self.name < other.name\n\n\nclass FeatureInteractions:\n def __init__(self, comparer=None):\n self.interactions = {}\n if not comparer:\n comparer = feature_score_comparer()\n self._comparer = comparer\n\n @property\n def count(self):\n # TODO: This was never used in previous code\n return len(self.interactions)\n\n def interactions_of_depth(self, depth):\n return sorted([\n self.interactions[key] for key in self.interactions.keys()\n if self.interactions[key].depth == depth\n ], key=self._comparer)\n\n def interactions_with_leaf_stats(self):\n return sorted([\n self.interactions[key] for key in self.interactions.keys()\n if self.interactions[key].has_leaf_stats\n ], key=self._comparer)\n\n def merge(self, other):\n for key in other.interactions.keys():\n feature = other.interactions[key]\n if key not in self.interactions:\n self.interactions[key] = feature\n else:\n self.interactions[key].gain += feature.gain\n self.interactions[key].cover += feature.cover\n self.interactions[key].fscore += feature.fscore\n self.interactions[key].wfscore += feature.wfscore\n self.interactions[key].tree_index += feature.tree_index\n self.interactions[key].tree_depth += feature.tree_depth\n self.interactions[key].sum_leaf_covers_left += feature.sum_leaf_covers_left\n self.interactions[key].sum_leaf_covers_right += feature.sum_leaf_covers_right\n self.interactions[key].sum_leaf_values_left += feature.sum_leaf_values_left\n self.interactions[key].sum_leaf_values_right += feature.sum_leaf_values_right\n self.interactions[key].histogram.merge(feature.histogram)\n\n\nclass XGBModel:\n def __init__(self, comparer=None, verbosity=0):\n self.xgb_trees = []\n self._tree_index = 0\n self._max_deepening = 0\n self._path_memo = set()\n self._max_interaction_depth = 0\n\n if not comparer:\n comparer = feature_score_comparer()\n self._comparer = comparer\n self._verbosity = verbosity\n\n def add_tree(self, tree):\n self.xgb_trees.append(tree)\n\n def feature_interactions(self, max_interaction_depth, max_deepening):\n xgb_feature_interactions = FeatureInteractions(self._comparer)\n self._max_interaction_depth = max_interaction_depth\n self._max_deepening = max_deepening\n\n if self._verbosity >= 1:\n if self._max_interaction_depth == -1:\n print(\"Collecting feature interactions\")\n else:\n print(\"Collecting feature interactions up to depth {}\".format(self._max_interaction_depth))\n\n for i, tree in enumerate(self.xgb_trees):\n if self._verbosity >= 2:\n sys.stdout.write(\"Collecting feature interactions within tree #{} \".format(i + 1))\n\n self._tree_feature_interactions = FeatureInteractions(self._comparer)\n self._path_memo = set()\n self._tree_index = i\n\n tree_nodes = []\n self.collect_interactions(tree, tree_nodes)\n\n if self._verbosity >= 2:\n number_interactions = len(self._tree_feature_interactions.interactions)\n sys.stdout.write(\"=> number of interactions: {}\\n\".format(number_interactions))\n\n xgb_feature_interactions.merge(self._tree_feature_interactions)\n\n if self._verbosity >= 1:\n number_collected = len(xgb_feature_interactions.interactions)\n print(\"{} feature interactions has been collected.\".format(number_collected))\n\n return xgb_feature_interactions\n\n def collect_interactions(self, tree, current_interaction, gain=0.0, cover=0.0, path_probability=1.0, depth=0, deepening=0):\n if tree.node.is_leaf:\n return\n\n current_interaction.append(tree.node)\n gain += tree.node.gain\n cover += tree.node.cover\n\n path_probability_left = path_probability * (tree.left.node.cover / tree.node.cover)\n path_probability_right = path_probability * (tree.right.node.cover / tree.node.cover)\n\n fi = FeatureInteraction(current_interaction, gain, cover, path_probability, depth, self._tree_index, 1)\n\n if depth < self._max_deepening or self._max_deepening < 0:\n interaction_left = []\n interaction_right = []\n # TODO: I think the recursion is here?\n self.collect_interactions(tree.left, interaction_left, 0.0, 0.0, path_probability_left, depth + 1, deepening + 1)\n self.collect_interactions(tree.right, interaction_right, 0.0, 0.0, path_probability_right, depth + 1, deepening + 1)\n\n path = \",\".join(str(n.number) for n in current_interaction)\n\n if fi.name not in self._tree_feature_interactions.interactions:\n self._tree_feature_interactions.interactions[fi.name] = fi\n self._path_memo.add(path)\n else:\n if path in self._path_memo:\n return\n self._path_memo.add(path)\n\n # TODO: Shouldn't `tfi` do this with an update method?\n tfi = self._tree_feature_interactions.interactions[fi.name]\n tfi.gain += gain\n tfi.cover += cover\n tfi.fscore += 1\n tfi.wfscore += path_probability\n tfi.tree_depth += depth\n tfi.tree_index += self._tree_index\n tfi.histogram.merge(fi.histogram)\n\n if len(current_interaction) - 1 == self._max_interaction_depth:\n return\n\n current_interaction_left = list(current_interaction)\n current_interaction_right = list(current_interaction)\n\n left_tree = tree.left\n right_tree = tree.right\n\n if left_tree.node.is_leaf and deepening == 0:\n tfi = self._tree_feature_interactions.interactions[fi.name]\n tfi.sum_leaf_values_left += left_tree.node.leaf_value\n tfi.sum_leaf_covers_left += left_tree.node.cover\n tfi.has_leaf_stats = True\n\n if right_tree.node.is_leaf and deepening == 0:\n tfi = self._tree_feature_interactions.interactions[fi.name]\n tfi.sum_leaf_values_right += right_tree.node.leaf_value\n tfi.sum_leaf_covers_right += right_tree.node.cover\n tfi.has_leaf_stats = True\n\n self.collect_interactions(tree.left, current_interaction_left, gain, cover, path_probability_left, depth + 1, deepening)\n self.collect_interactions(tree.right, current_interaction_right, gain, cover, path_probability_right, depth + 1, deepening)\n\n\nclass XGBTreeNode:\n def __init__(self):\n self.feature = ''\n self.gain = 0.0\n self.cover = 0.0\n self.number = -1\n self.left_child = None\n self.right_child = None\n self.leaf_value = 0.0\n self.split_value = 0.0\n self.is_leaf = False\n\n def __lt__(self, other):\n return self.number < other.number\n\n\nclass XGBTree:\n def __init__(self, node):\n self.left = None\n self.right = None\n self.node = node # or node.copy()\n\n\nclass XGBModelParser:\n def __init__(self, comparer=None, verbosity=0):\n if not comparer:\n comparer = feature_score_comparer()\n self._comparer = comparer\n self._verbosity = verbosity\n\n # One update: nodes might not be less than a value\n self.node_regex = re.compile(\"(\\d+):\\[(.+)\\]\\syes=(.*),no=(.*),missing=.*,gain=(.*),cover=(.*)\")\n self.leaf_regex = re.compile(\"(\\d+):leaf=(.*),cover=(.*)\")\n self.node_list = {}\n\n def construct_tree(self, tree):\n if tree.node.left_child is not None:\n tree.left = XGBTree(self.node_list[tree.node.left_child])\n self.construct_tree(tree.left)\n if tree.node.right_child is not None:\n tree.right = XGBTree(self.node_list[tree.node.right_child])\n self.construct_tree(tree.right)\n\n def parse_tree_node(self, line):\n node = XGBTreeNode()\n if \"leaf\" in line:\n m = self.leaf_regex.match(line)\n node.is_leaf = True\n node.number = int(m.group(1))\n node.leaf_value = float(m.group(2))\n node.cover = float(m.group(3))\n else:\n m = self.node_regex.match(line)\n node.is_leaf = False\n node.number = int(m.group(1))\n\n feature = m.group(2)\n if \"<\" in feature:\n node.feature, node.split_value = feature.split(\"<\")\n else:\n # The feature is True/False in these instances\n node.feature = feature\n node.split_value = 1\n\n node.left_child = int(m.group(3))\n node.right_child = int(m.group(4))\n node.gain = float(m.group(5))\n node.cover = float(m.group(6))\n return node\n\n def model_from_file(self, file_name, max_trees):\n model = XGBModel(self._comparer, self._verbosity)\n self.node_list = {}\n number_of_trees = 0\n with open(file_name) as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('booster'):\n if any(self.node_list):\n number_of_trees += 1\n if self._verbosity >= 2:\n sys.stdout.write(\"Constructing tree #{}\\n\".format(number_of_trees))\n tree = XGBTree(self.node_list[0])\n self.construct_tree(tree)\n\n model.add_tree(tree)\n self.node_list = {}\n if number_of_trees == max_trees:\n if self._verbosity >= 1:\n print(\"Maximum number of trees reached: #{}\".format(max_trees))\n break\n else:\n node = self.parse_tree_node(line)\n if not node:\n return None\n self.node_list[node.number] = node\n\n if any(self.node_list) and (max_trees < 0 or number_of_trees < max_trees):\n number_of_trees += 1\n if self._verbosity >= 2:\n sys.stdout.write(\"Constructing tree #{}\\n\".format(number_of_trees))\n tree = XGBTree(self.node_list[0])\n self.construct_tree(tree)\n\n model.add_tree(tree)\n self.node_list = {}\n\n return model\n\n def model_from_memory(self, dump, max_trees):\n model = XGBModel(self._comparer, self._verbosity)\n self.node_list = {}\n number_of_trees = 0\n for booster_line in dump:\n self.node_list = {}\n for line in booster_line.split('\\n'):\n line = line.strip()\n if not line:\n continue\n node = self.parse_tree_node(line)\n if not node:\n return None\n self.node_list[node.number] = node\n number_of_trees += 1\n tree = XGBTree(self.node_list[0])\n self.construct_tree(tree)\n model.add_tree(tree)\n if number_of_trees == max_trees:\n break\n return model\n\n\ndef rank_inplace(a):\n c = [(j, i[0]) for j, i in enumerate(sorted(enumerate(a), key=lambda x:x[1]))]\n c.sort(key=lambda x: x[1])\n return [i[0] for i in c]\n\n\ndef feature_score_comparer(metric=\"gain\"):\n comparer = {\n \"gain\": lambda x: -x.gain,\n \"fscore\": lambda x: -x.fscore,\n \"wfscore\": lambda x: -x.wfscore,\n \"avgwfscore\": lambda x: -x.average_wfscore,\n \"avggain\": lambda x: -x.average_gain,\n \"expgain\": lambda x: -x.expected_gain,\n }\n metric = metric.lower()\n if metric not in comparer:\n raise Exception(\"Can not compare by: {}\".format(metric))\n return comparer[metric]\n\n\ndef FeatureInteractionsWriter(feature_interactions, file_name, max_depth, top_k, max_histograms, verbosity=0):\n\n if verbosity >= 1:\n print(\"Writing {}\".format(file_name))\n\n workbook = xlsxwriter.Workbook(file_name)\n\n first_row = workbook.add_format()\n first_row.set_align('center')\n first_row.set_align('vcenter')\n first_row.set_bold(True)\n\n first_column = workbook.add_format()\n first_column.set_align('center')\n first_column.set_align('vcenter')\n\n number_format = workbook.add_format()\n number_format.set_num_format('#,###.00')\n\n for depth in range(max_depth + 1):\n if verbosity >= 1:\n print(\"Writing feature interactions with depth {}\".format(depth))\n\n interactions = feature_interactions.interactions_of_depth(depth)\n\n KTotalGain = sum([i.gain for i in interactions])\n TotalCover = sum([i.cover for i in interactions])\n TotalFScore = sum([i.fscore for i in interactions])\n TotalFScoreWeighted = sum([i.wfscore for i in interactions])\n TotalFScoreWeightedAverage = sum([i.average_wfscore for i in interactions])\n\n if top_k > 0:\n interactions = interactions[0:top_k]\n\n if not interactions:\n break\n\n ws = workbook.add_worksheet(\"Interaction Depth {}\".format(depth))\n\n ws.set_row(0, 20, first_row)\n ws.set_column(0, 0, max([len(i.name) for i in interactions]) + 10, first_column)\n\n ws.set_column(1, 13, 17)\n ws.set_column(10, 11, 18)\n ws.set_column(12, 12, 19)\n ws.set_column(13, 13, 17, number_format)\n ws.set_column(14, 15, 19, number_format)\n\n for col, name in enumerate([\n \"Interaction\", \"Gain\", \"FScore\", \"wFScore\", \"Average wFScore\", \"Average Gain\", \"Expected Gain\",\n \"Gain Rank\", \"FScore Rank\", \"wFScore Rank\", \"Avg wFScore Rank\", \"Avg Gain Rank\", \"Expected Gain Rank\",\n \"Average Rank\", \"Average Tree Index\", \"Average Tree Depth\"\n ]):\n ws.write(0, col, name)\n\n gain_sorted = rank_inplace([-f.gain for f in interactions])\n fscore_sorted = rank_inplace([-f.fscore for f in interactions])\n wfscore_sorted = rank_inplace([-f.wfscore for f in interactions])\n average_wfscore_sorted = rank_inplace([-f.average_wfscore for f in interactions])\n average_gain_sorted = rank_inplace([-f.average_gain for f in interactions])\n expected_gain_sorted = rank_inplace([-f.expected_gain for f in interactions])\n\n for i, fi in enumerate(interactions):\n ws.write(i + 1, 0, fi.name)\n ws.write(i + 1, 1, fi.gain, number_format)\n ws.write(i + 1, 2, fi.fscore, number_format)\n ws.write(i + 1, 3, fi.wfscore, number_format)\n ws.write(i + 1, 4, fi.average_wfscore, number_format)\n ws.write(i + 1, 5, fi.average_gain, number_format)\n ws.write(i + 1, 6, fi.expected_gain, number_format)\n ws.write(i + 1, 7, 1 + gain_sorted[i])\n ws.write(i + 1, 8, 1 + fscore_sorted[i])\n ws.write(i + 1, 9, 1 + wfscore_sorted[i])\n ws.write(i + 1, 10, 1 + average_wfscore_sorted[i])\n ws.write(i + 1, 11, 1 + average_gain_sorted[i])\n ws.write(i + 1, 12, 1 + expected_gain_sorted[i])\n\n average_rank = (6.0 + gain_sorted[i] + fscore_sorted[i] + wfscore_sorted[i] + average_wfscore_sorted[i] + average_gain_sorted[i] + expected_gain_sorted[i]) / 6.0\n ws.write(i + 1, 13, average_rank, number_format)\n\n ws.write(i + 1, 14, fi.average_tree_index, number_format)\n ws.write(i + 1, 15, fi.average_tree_depth, number_format)\n\n interactions = feature_interactions.interactions_with_leaf_stats()\n if interactions:\n if verbosity >= 1:\n print(\"Writing leaf statistics\")\n\n ws = workbook.add_worksheet(\"Leaf Statistics\")\n\n ws.set_row(0, 20, first_row)\n ws.set_column(0, 0, max([len(i.name) for i in interactions]) + 10, first_column)\n ws.set_column(1, 4, 20)\n\n for col, name in enumerate([\n \"Interaction\", \"Sum Leaf Values Left\", \"Sum Leaf Values Right\", \"Sum Leaf Covers Left\", \"Sum Leaf Covers Right\"\n ]):\n ws.write(0, col, name)\n\n for i, fi in enumerate(interactions):\n ws.write(i + 1, 0, fi.name)\n ws.write(i + 1, 1, fi.sum_leaf_values_left, number_format)\n ws.write(i + 1, 2, fi.sum_leaf_values_right, number_format)\n ws.write(i + 1, 3, fi.sum_leaf_covers_left, number_format)\n ws.write(i + 1, 4, fi.sum_leaf_covers_right, number_format)\n\n interactions = feature_interactions.interactions_of_depth(0)\n if interactions:\n if verbosity >= 1:\n print(\"Writing split value histograms\")\n\n ws = workbook.add_worksheet(\"Split Value Histograms\")\n\n ws.set_row(0, 20, first_row)\n ws.set_column(0, 0, max([len(i.name) for i in interactions]) + 10, first_column)\n ws.set_column(1, 4, 20)\n\n for col, name in enumerate([\n \"Interaction\", \"Sum Leaf Values Left\", \"Sum Leaf Values Right\", \"Sum Leaf Covers Left\", \"Sum Leaf Covers Right\"\n ]):\n ws.write(0, col, name)\n\n for i, fi in enumerate(interactions):\n if i >= max_histograms:\n break\n\n c1 = i * 2\n c2 = c1 + 1\n\n ws.merge_range(0, c1, 0, c2, fi.name)\n ws.set_column(c1, c1, max(10, (len(fi.name) + 4) / 2))\n ws.set_column(c2, c2, max(10, (len(fi.name) + 4) / 2))\n\n for j, key in enumerate(sorted(fi.histogram.values.keys())):\n ws.write(j + 1, c1, key)\n ws.write(j + 1, c2, fi.histogram.values[key])\n\n workbook.close()\n\n\ndef main(argv):\n epilog = '''\nXGBoost Feature Interactions Reshaped 0.2\nURL: https://github.com/limexp/xgbfir\n'''\n\n arg_parser = argparse.ArgumentParser(\n prog=argv[0],\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='XGBoost model dump parser, which ranks features as well as feature interactions by different metrics.',\n epilog=epilog)\n\n arg_parser.add_argument(\n '-V', '--version', action='version',\n version='XGBoost Feature Interactions Reshaped 0.2')\n\n arg_parser.add_argument(\n '-m', dest='XGBModelFile', action='store', default='xgb.dump',\n help=\"Xgboost model dump (dumped w/ 'with_stats=True')\")\n\n arg_parser.add_argument(\n '-o', dest='output', action='store', default='XGBFeatureInteractions.xlsx',\n help='Xlsx file to be written')\n\n arg_parser.add_argument(\n '-t', dest='max_trees', action='store', default='100', type=int,\n help='Upper bound for trees to be parsed')\n\n arg_parser.add_argument(\n '-d', dest='max_interaction_depth', action='store', default='2', type=int,\n help='Upper bound for extracted feature interactions depth')\n\n arg_parser.add_argument(\n '-g', dest='max_deepening', action='store', default='-1', type=int,\n help='Upper bound for interaction start deepening (zero deepening => interactions starting @root only)')\n\n arg_parser.add_argument(\n '-k', dest='top_k', action='store', default='100', type=int,\n help='Upper bound for exported feature interactions per depth level')\n\n arg_parser.add_argument(\n '-H', dest='max_histograms', action='store', default='10', type=int,\n help='Maximum number of histograms')\n\n arg_parser.add_argument(\n '-s', dest='sort', action='store', default='Gain',\n help='Score metric to sort by (Gain, FScore, wFScore, AvgwFScore, AvgGain, ExpGain)')\n\n arg_parser.add_argument(\n '-v', '--verbosity', dest='Verbosity', action='count', default='2',\n help='Increate output verbosity')\n\n args = arg_parser.parse_args(args=argv[1:])\n\n args.XGBModelFile = args.XGBModelFile.strip()\n args.output = args.output.strip()\n\n verbosity = int(args.Verbosity)\n\n settings_print = '''\nSettings:\n=========\nXGBModelFile (-m): {model}\noutput (-o): {output}\nmax_interaction_depth: {depth}\nmax_deepening (-g): {deepening}\nmax_trees (-t): {trees}\ntop_k (-k): {topk}\nsort (-s): {sortby}\nmax_histograms (-H): {histograms}\n'''.format(\n model=args.XGBModelFile,\n output=args.output,\n depth=args.max_interaction_depth,\n deepening=args.max_deepening,\n trees=args.max_trees,\n topk=args.top_k,\n sortby=args.sort,\n histograms=args.max_histograms\n )\n\n if verbosity >= 1:\n print(settings_print)\n\n comparer = feature_score_comparer(args.sort)\n parser = XGBModelParser(comparer, verbosity)\n model = parser.model_from_file(args.XGBModelFile, args.max_trees)\n interactions = model.feature_interactions(args.max_interaction_depth, args.max_deepening)\n\n FeatureInteractionsWriter(interactions, args.output, args.max_interaction_depth, args.top_k, args.max_histograms)\n\n if verbosity >= 1:\n print(epilog)\n\n return 0\n\n\ndef entry_point():\n \"\"\"Zero-argument entry point for use with setuptools/distribute.\"\"\"\n raise SystemExit(main(sys.argv))\n\n\ndef save_excel(booster, feature_names=None, output='XGBFeatureInteractions.xlsx', max_trees=100, max_interaction_depth=2, max_deepening=-1, top_k=100, max_histograms=10, sort='Gain'):\n if 'get_dump' not in dir(booster):\n if 'booster' in dir(booster):\n booster = booster.booster()\n else:\n raise Exception(\"Could not find `booster` of XGB model.\")\n if feature_names is not None:\n if isinstance(feature_names, list):\n booster.feature_names = feature_names\n else:\n booster.feature_names = list(feature_names)\n\n comparer = feature_score_comparer(sort)\n parser = XGBModelParser(comparer)\n dump = booster.get_dump('', with_stats=True)\n model = parser.model_from_memory(dump, max_trees)\n interactions = model.feature_interactions(max_interaction_depth, max_deepening)\n FeatureInteractionsWriter(interactions, output, max_interaction_depth, top_k, max_histograms)\n\n\nif __name__ == '__main__':\n entry_point()\n","sub_path":"xgbfir/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"412356604","text":"#!/usr/bin/env python3\n\"\"\"\nName: jranderson\nDate: 03.30.19\nPurpose: Regular Expression Date Parsing \n\"\"\"\n\nimport os\nimport re\nimport sys\n\n#---------------------------------------------------------\ndef main():\n \"\"\"Main Codin!\"\"\"\n\n args = sys.argv[1:]\n #print(args)\n\n if len(args) != 1:\n print('Usage: {} DATE'.format(os.path.basename(sys.argv[0])))\n sys.exit(1)\n\n input = args[0]\n mth_dict = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12} \n\n\n sep = '[-\\S*]'\n year = '(?P\\d{4})'\n short_year = '(?P\\d{2})'\n month = '(?P\\d{1,2})'\n day = '(?:-(?P\\d{1,2}?))'\n written_mth = '(?PJan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)'\n\n #print(written_mth) \n\n date_re1 = re.compile('^(?P\\d{4})[-](?P\\d{1,2})(?:-(?P\\d{1,2})?)')\n match1 = date_re1.match(input)\n# print('1: {}'.format(match1))\n \n date_re2 = re.compile('^(?P\\d{4})[-/](?P\\d{1,2})')\n match2 = date_re2.match(input)\n# print('2: {}'.format(match2))\n\n date_re3 = re.compile('^(?P\\d{1,2})[/](?P\\d{2})')\n match3 = date_re3.match(input)\n# print('3: {}'.format(match3))\n\n #match4 = date_re4.match(input)\n #print('4: {}'.format(match4))\n\n date_re5 = re.compile(written_mth + '[,-]\\s?' + year)\n match5 = date_re5.search(input)\n #print(match5)\n\n if match1:\n y1 = int(match1.group('year'))\n m1 = int(match1.group('month'))\n d1 = int(match1.group('day'))\n print('{}-{:02d}-{:02d}'.format(y1, m1, d1))\n elif match2: \n y2 = int(match2.group('year'))\n m2 = int(match2.group('month'))\n print('{}-{:02}-01'.format(y2, m2))\n elif match3:\n m3 = int(match3.group('month'))\n y3 = int(match3.group('year'))\n print('20{:02}-{:02}-01'.format(y3, m3))\n elif match5:\n m5 = match5.group('mth')\n m5_num = int(mth_dict[str(m5[:3])])\n y5 = int(match5.group('year'))\n print('{:4}-{:02}-01'.format(y5, m5_num))\n else:\n print('No match')\n\n \n\n#---------------------------------------------------------\nif __name__ == '__main__':\n main()\n\n","sub_path":"assignments/11-regex-dates/dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"330724068","text":"import speech_recognition as sr\n\nr = sr.Recognizer()\nm = sr.Microphone()\n\nwith m as source:\n audio = r.listen(source)\ntry:\n print(r.recognize_bing(audio))\nexcept sr.UnknownValueError:\n print(\"I can't understand you\")\n","sub_path":"pocketSphinxTest.py","file_name":"pocketSphinxTest.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"556423445","text":"import boto3\nimport xlrd\nimport os\n\n## To create client for boto3\nec2 = boto3.resource('ec2')\nclient = boto3.client('ec2', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key= os.environ['AWS_SECRET_ACCESS_KEY'], region_name='us-east-1')\n\nec2_list = []\n\n## To save instances in list from AWS account\nfor instance in ec2.instances.all():\n ec2_list.append(instance.id)\nprint(ec2_list)\n\n# To read read excel file\nprint(\"Starting read file \")\nfile_location = \"/home/ubuntu/pythn-learn/excel.xlsx\"\nworkbook = xlrd.open_workbook(file_location)\nsheet = workbook.sheet_by_index(0)\n\n## To read rows in excel and put in row_list list\nrow_list = []\nrow = 0\nwhile (row in range(sheet.nrows)):\n row_names = sheet.cell_value(row, 0)\n row_list.append(row_names)\n row = row + 1\nprint('ec2 in row list', row_list)\n\n## To read columns in excel and put in col_list list\ncol_list = []\ncol = 0\nwhile (col in range(sheet.ncols)):\n col_names = sheet.cell_value(0, col)\n col_list.append(col_names)\n col = col + 1\nprint(col_list)\n\n## Funstion to get value of tag key using row_index and col_index\n## Read row_list i.e instances list\n## Read col_list i.e column list of tags\n\ndef tag_value (row_key, col_key):\n row_index = row_list.index(row_key)\n #print(\"The row index of \" + row_key + \":\", row_index)\n\n col_index = col_list.index(col_key)\n #print(\"The col index of \" + col_key + \":\", col_index)\n\n data = [[sheet.cell_value(r,c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\n value = data[row_index][col_index]\n #print(\"Value:\", value)\n return value\n\n\nfor instance in ec2.instances.all():\n if (instance.id in ec2_list):\n print('instance found in list with id', instance.id)\n print('start tagging stage for instance ID:', instance.id)\n #row_key = instance.id\n #A = tag_value(row_key= instance.id, col_key= \"A\")\n #print(\"Tag value of A is :\", A)\n #tag = ec2.Tag(instance.id, 'A', A)\n col = 1\n while (col in range(sheet.ncols)):\n key = sheet.cell_value(0, col)\n value = tag_value(row_key= instance.id, col_key= key)\n response = client.create_tags(\n Resources=[instance.id],\n Tags = [\n {\n 'Key': key,\n 'Value': value\n }\n ]\n )\n print(response)\n col = col + 1\n\n\n else:\n print('instance not found in list with id', instance.id)\n\n\n","sub_path":"python/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"257516095","text":"import turtle\nimport random\nturtle = turtle.Turtle()\nturtle.screen.tracer(0,0)\na = 1\nturtle.speed(100)\ncolors = ['red', 'blue', 'black', 'green', 'purple', 'orange', 'pink', 'aquamarine', 'cyan', 'dark violet', 'indigo']\ndef hex(x):\n for n in range(6):\n turtle.forward(x)\n turtle.left(60)\nfor x in range(2):\n for x in range(3):\n for x in range(190):\n color_1 = random.choice(colors)\n turtle.color(color_1)\n hex(a)\n a += 1\n a = 1\n turtle.left(120)\n turtle.right(60)\n","sub_path":"hexagon.py","file_name":"hexagon.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"288663420","text":"\"\"\"\nCode to process data dumps from panoptes\n\"\"\"\nfrom config.config import cfg_model, cfg_path\nimport urllib.request\nimport os\nimport pandas as pd\nimport json\nimport time\nimport numpy as np\nfrom collections import Counter\nfrom tools.subjects import SubjectSet, Subject\nimport pickle\nfrom datetime import datetime\n\n\n#########################\n# Functions\n#########################\n\n# retrieve file from url\ndef get_url(url, fname):\n \"\"\" Retrieves file from url and returns path to file \"\"\"\n path_to_file = urllib.request.urlretrieve(url, fname)\n return path_to_file\n\ndef create_path(path, create_path=True):\n if not os.path.exists(path) & create_path:\n os.mkdir(path)\n else:\n NameError(\"Path not Found\")\n\ndef read_subject_data(path_csv):\n subs_df = pd.read_csv(path_csv)\n # subject ids / urls / metadata\n subject_ids = subs_df['subject_id']\n subject_urls = [json.loads(x)['0'] for x in subs_df['locations']]\n subject_meta = [json.loads(x) for x in subs_df['metadata']]\n # fill dictionary\n subs_dir = dict()\n for i in range(0, len(subject_ids)):\n subs_dir[subject_ids[i]] = {'url': subject_urls[i],\n 'metadata': subject_meta[i]}\n return subs_dir\n\ndef read_classification_data(path_csv):\n # read csv\n cls_df = pd.read_csv(path_csv)\n return cls_df\n\n\n#########################\n# Parameters\n#########################\n\n# link_cl = 'https://zooniverse.slack.com/files/adam/F5YQGD5ML/elephant-expedition-classifications.csv.zip'\n# link_sub = 'https://zooniverse.slack.com/files/adam/F5Y3FHA13/elephant-expedition-subjects.csv.zip'\n\n#########################\n# Get Data & Save\n# manual step\n#########################\n\n# cfg_path['db']\n#\n# # save classifications\n# create_path(cfg_path['db'])\n# path_to_file = get_url(link_cl, cfg_path['db'] + 'classifications.zip')\n#\n#\n# # save subject data\n# create_path(cfg_path['db'] + 'subjects')\n# path_to_file = get_url(link_cl, cfg_path['db'] + 'subjects.zip')\n\n\n#########################\n# Get Data\n#########################\n\nsubs = read_subject_data(cfg_path['db'] + 'subjects.csv')\nsubs[list(subs.keys())[0]]\n\ncls = read_classification_data(cfg_path['db'] + 'classifications.csv')\ncls.head\n\n###############################\n# Analysis\n###############################\n\n# workflows\ncls['workflow_name'].unique()\ncls.groupby(['workflow_name']).size()\n\n# workflow_name\n# Angola 553147\n# Empty Or Not 3\n# Gabon (1) 61848\n# Namibia (1) 292281\n# SE Asia (1) 240096\n# SW Angola 1835325\n# South Africa (3) 1303327\n# Tajikistan (1) 408101\n# test 39\n\n# workflow versions\ncls.groupby(['workflow_version']).size()\n\n# filter on workflow\ncls = cls[cls.workflow_name == 'South Africa (3)']\n\n# filter classifications without a choice\ncls = cls[cls.annotations.str.contains('choice')]\n\n# filter classifications without most recent workflow_version\nwork_v = cls.groupby(['workflow_version']).size()\n# workflow_version\n# 311.3 142783\n# 318.4 1160544\nmost_recent_wf = work_v.index[-1]\ncls = cls[cls.workflow_version == most_recent_wf]\n\n# subject id\nprint(\"number of subjects %s\" % len(cls['subject_ids'].unique()))\n\n\n# look for multiple choices per classification\nchoic = list()\nfor i in range(0, cls['annotations'].shape[0]):\n tt = json.loads(cls['annotations'].iloc[i])\n choices = [x['choice'] for x in tt[0]['value']]\n choic.append(choices)\n\n# print answers with multiple choices\nfor i in range(0, len(choic)):\n if len(choic[i]) >1:\n print(\"----------------------------\")\n print(i)\n print(choic[i])\n\n# take a look at an individual multiple choice answer\nidd = 1159980\ncls.iloc[idd]\ncls.iloc[idd]['subject_data']\nsubs[int(list(json.loads(cls.iloc[idd]['subject_data']).keys())[0])]\n\n# retirement reasons\nsubd = dict()\nretirement_reasons = list()\nfor i in range(0, cls['subject_data'].shape[0]):\n tt = json.loads(cls['subject_data'].iloc[i])\n key = list(tt.keys())[0]\n if tt[key]['retired'] is None:\n subd[key] = 'Not Retired'\n else:\n subd[key] = tt[key]['retired']['retirement_reason']\n retirement_reasons.append(subd[key])\n\ntt = pd.DataFrame(retirement_reasons, columns=['retirement_reason'])\ntt.groupby(['retirement_reason']).size()\n\n# retirement_reason\n# Not Retired 75601\n# classification_count 238450\n# consensus 53\n# nothing_here 146492\n# other 270239\n# dtype: int64\n\n\n# check number of entries per subject and user\ncls.columns\ntt = cls.groupby(['subject_ids', 'user_name']).size()\ntt = tt[tt > 1]\n#tt = cls[(cls.subject_ids == 10444100) & (cls.user_name == 'cmdctrl')]\n\nfor i in range(0, tt.shape[0]):\n tt2 = cls[(cls.subject_ids == tt.index[i][0]) & (cls.user_name == tt.index[i][1])]\n time.sleep(3)\n print(\"--------------------------------\")\n for ii in range(0, tt2.shape[0]):\n print(\"Classification ID \" + str(tt2.iloc[ii, :]['classification_id']))\n print(\"User name \" + str(tt2.iloc[ii, :]['user_name']))\n print(\"Annotations \" + str(tt2.iloc[ii, :]['annotations']))\n print(\"Subject Data \" + str(tt2.iloc[ii, :]['subject_data']))\n\n\n# check number of entries per subject and user\ncls.columns\ntt = cls.groupby(['subject_ids', 'user_name', 'classification_id']).size()\ntt = tt[tt > 1]\n\n\n\n# workflows\ncls.iloc[1672081,:]['metadata']\n\nfor i in range(500, 550):\n print(\"%s: ------------------\" % i)\n print(cls.iloc[i,:]['annotations'])\n","sub_path":"transfer_learning/db/camera_catalogue/analyse_data_dumps.py","file_name":"analyse_data_dumps.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"228605955","text":"from math import sqrt\nimport sys\n\nif sys.version_info > (3,):\n xrange = range\n\n\ndef pent(x):\n return x*(3*x-1)/2\n\n\ndef is_pent(x):\n f = (.5 + sqrt(.25+6*x))/3\n if f - int(f) == 0:\n return True\n else:\n return False\n\n\nflag = False\nfor i in xrange(1, 3000):\n if i % 100 == 0:\n print('i = %d' % (i))\n for j in xrange(i+1, 3000):\n if is_pent(pent(j) - pent(i)) and is_pent(pent(j) + pent(i)):\n print('answer = %d' % (pent(j) - pent(i)))\n flag = True\n break\n if flag:\n break\n","sub_path":"project-euler/44/from-forum/emandres/pent.py","file_name":"pent.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"528815897","text":"\"\"\"\nStandard imagenet crop method for training and testing\nEntry: imagenet_standard_crop\n\"\"\"\n\nfrom typing import Tuple\nimport random\nimport math\n\nBOX = Tuple[int, int, int, int]\nFLOAT_RANGE = Tuple[float, float]\n\ndef imagenet_standard_crop(width: int, height: int, complexity: int, phase: str, \\\n *, standard: str=\"latest\") -> BOX:\n \"\"\"\n Get standard crop box\n\n Parameters:\n width - image width\n height - image height\n complexity - model complexity in MFLOPs\n phase - in { \"TRAIN\", \"TEST\" }\n standard - in { \"latest\", \"v1\" }\n\n Returns\n (left, top, right, bottom) - where crop region lies in [left, right) and [top, bottom)\n \"\"\"\n assert width > 0 and height > 0\n phase = phase.upper()\n assert phase in {\"TRAIN\", \"TEST\"}\n standard = standard.lower()\n crop_fun = _crop_train if phase == \"TRAIN\" else _crop_test\n return crop_fun(width, height, complexity, standard)\n\ndef _crop_train(width: int, height: int, complexity: int, standard: str) -> BOX:\n assert standard in _CROP_TRAIN_VERSIONS\n return _CROP_TRAIN_VERSIONS[standard](width, height, complexity)\n\ndef _crop_test(width: int, height: int, complexity: int, standard: str) -> BOX:\n assert standard in _CROP_TEST_VERSIONS\n return _CROP_TEST_VERSIONS[standard](width, height, complexity)\n\ndef _crop_train_v1(width: int, height: int, complexity: int) -> BOX:\n if complexity <= 500:\n box = _rand_crop_with_jitter(width, height, (0.49, 1), (3. / 4, 4. / 3))\n else:\n box = _rand_crop_with_jitter(width, height, (0.08, 1), (3. / 4, 4. / 3))\n\n if complexity > 2000:\n box = _add_rand_aspect_ratio(width, height, box, 0.75)\n\n return box\n\ndef _crop_test_v1(width: int, height: int, _) -> BOX:\n return _center_crop(width, height, 0.875)\n\ndef _center_crop(width: int, height: int, ratio: float) -> BOX:\n assert ratio > 0 and ratio <= 1\n crop_size = int(min(width, height) * ratio)\n crop_size = max(crop_size, 1)\n left = (width - crop_size) // 2\n top = (height - crop_size) // 2\n return left, top, left + crop_size, top + crop_size\n\ndef _rand_crop(width: int, height: int, crop_width: int, crop_height: int) -> BOX:\n assert width >= crop_width and height >= crop_height\n assert crop_width > 0 and crop_height > 0\n left = random.randint(0, width - crop_width)\n top = random.randint(0, height - crop_height)\n return left, top, left + crop_width, top + crop_height\n\ndef _rand_crop_with_jitter(width: int, height: int, ratio_range: FLOAT_RANGE, \\\n aspect_range: FLOAT_RANGE) -> BOX:\n assert len(ratio_range) == 2 and ratio_range[0] <= ratio_range[1] and ratio_range[0] > 0\n assert len(aspect_range) == 2 and aspect_range[0] <= aspect_range[1] and aspect_range[0] > 0\n area = float(width * height)\n\n for _ in range(10):\n target_area = random.uniform(ratio_range[0], ratio_range[1]) * area\n aspect_ratio = random.uniform(aspect_range[0], aspect_range[1])\n target_w = max(round(math.sqrt(target_area * aspect_ratio)), 1)\n target_h = max(round(math.sqrt(target_area / aspect_ratio)), 1)\n if random.uniform(0, 1) < 0.5:\n target_w, target_h = target_h, target_w\n\n if target_w <= width and target_h <= height:\n return _rand_crop(width, height, target_w, target_h)\n\n crop_size = min(width, height)\n return _rand_crop(width, height, crop_size, crop_size)\n\ndef _add_rand_aspect_ratio(width: int, height: int, box: BOX, max_ratio: float) -> BOX:\n assert max_ratio > 0\n min_ratio = 1.\n if min_ratio > max_ratio:\n min_ratio, max_ratio = max_ratio, min_ratio\n\n sel_ratio = random.uniform(min_ratio, max_ratio)\n if random.uniform(0, 1) < 0.5:\n sel_ratio = 1 / sel_ratio\n\n area = float((box[2] - box[0]) * (box[3] - box[1]))\n new_h = max(round(math.sqrt(area) / sel_ratio), 1)\n new_w = max(round(new_h * sel_ratio), 1)\n center_x = box[0] + (box[2] - box[0]) // 2\n center_y = box[1] + (box[3] - box[1]) // 2\n new_left = max(center_x - new_w // 2, 0)\n new_top = max(center_y - new_h // 2, 0)\n\n new_right = min(new_left + new_w, width)\n new_bottom = min(new_top + new_h, height)\n\n return new_left, new_top, new_right, new_bottom\n\n_CROP_TRAIN_VERSIONS = {\n \"latest\": _crop_train_v1,\n \"v0\": _crop_train_v1\n }\n\n_CROP_TEST_VERSIONS = {\n \"latest\": _crop_test_v1,\n \"v0\": _crop_test_v1\n }\n","sub_path":"imagenet_crop.py","file_name":"imagenet_crop.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"100059431","text":"#!/usr/bin/env python\nimport time, itertools as it\n\nstart_time = time.time() \n\n\ndef prime_factors(n):\n\t\"\"\" Wheel factorization (wow very fast)\n\t\"\"\"\n\n\tf = 2\n\tincrements = it.chain( [1,2,2], it.cycle([4,2,4,2,4,6,2,6]) )\n\n\tfor incr in increments:\n\t\tif f*f > n:\n\t\t\tbreak\n\n\t\twhile n % f == 0:\n\t\t\tyield f\n\t\t\tn //= f\n\t\tf += incr\n\n\tif n > 1:\n\t\tyield n\n\n\ndef count_prime_factors(n):\n\n\tprimes = set(prime_factors(n))\n\treturn len(primes)\n\n\n\ndef solve_problem47():\n\n\tcounts = []\n\ti = 1\n\twhile counts[-4:] != [4,4,4,4]:\n\t\ti += 1\n\t\tcounts.append(count_prime_factors(i))\n\n\treturn i - 3\n\n\t\t\n\nanswer = solve_problem47() \nend_time = time.time() \nrun_time = end_time - start_time\n\n\nprint( \"--------------------------------------------\")\nprint( \"| Solution to Project Euler problem 47: |\" )\nprint( \"--------------------------------------------\")\nprint( \"Question: Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers?\" )\nprint( \"Answer: {:d}\".format(answer) )\nprint( \"Wall time: {:3.3f} seconds\".format(run_time) )\n","sub_path":"47/problem47.py","file_name":"problem47.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"486306395","text":"from tools.paginator import get_pages\n\nfrom blog.models import Post, Tag, Category\n\nfrom view_record.models import Recorder, ViewNum\n\nfrom tools.check_ajax_post_data import ajax_post_data\n\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.template.context_processors import csrf\nfrom django.views.generic import DetailView, ListView\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import Http404, JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, render_to_response, get_object_or_404\n\n\nclass BlogListView(ListView):\n\n model = Post\n template_name = 'blog/blog_list.html'\n context_object_name = 'posts'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pages, posts = get_pages(self.request, self.object_list)\n\n context['posts'] = posts\n context['pages'] = pages\n context['random_posts'] = Post.objects.order_by('?')[:settings.RANDOM_POST_NUM]\n return context\n\n\nclass CategoryView(BlogListView):\n\n def get_queryset(self):\n _category = get_object_or_404(Category, pk=self.kwargs.get('pk'))\n return Post.objects.filter(category=_category)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['current_category'] = get_object_or_404(Category, pk=self.kwargs.get('pk'))\n return context\n\n\nclass TagView(BlogListView):\n\n def get_queryset(self):\n _tag = get_object_or_404(Tag, pk=self.kwargs.get('pk'))\n return Post.objects.filter(tags=_tag)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['current_tag'] = get_object_or_404(Tag, pk=self.kwargs.get('pk'))\n return context\n\n\nclass BlogDetailView(DetailView):\n\n model = Post\n template_name = 'blog/blog_detail.html'\n context_object_name = 'post'\n\n def get(self, request,*args, **kwargs):\n response = super().get(request, *args, **kwargs)\n obj = Post.objects.get(id=self.object.id)\n\n cookie_name = \"post_%s_readed\" % self.object.id\n\n if cookie_name not in request.COOKIES:\n recorder = Recorder(content_object=obj)\n recorder.ip_address = request.META.get(\"HTTP_X_FORWARDED_FOR\", request.META.get(\"REMOTE_ADDR\", None))\n recorder.user = request.user if request.user.is_authenticated() else None\n recorder.save()\n\n obj_type = ContentType.objects.get_for_model(obj)\n viewers = ViewNum.objects.filter(content_type=obj_type, object_id=obj.id)\n\n if viewers.count() > 0:\n viewer = viewers[0]\n else:\n viewer = ViewNum(content_type=obj_type, object_id=obj.id)\n viewer.view_num += 1\n viewer.save()\n\n response = super().get(request, *args, **kwargs)\n response.set_cookie(cookie_name, \"True\")\n return response\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n show_edit_url = False\n if self.request.user.is_authenticated():\n if self.request.user == self.object.author:\n show_edit_url = True\n context['show_edit_url'] = show_edit_url\n context['random_posts'] = Post.objects.exclude(id=self.object.pk).order_by('?')[:settings.RANDOM_POST_NUM]\n return context\n\n\ndef search(request):\n \"\"\"show blogs' list\"\"\"\n try:\n q = request.GET['q']\n if not q:\n return HttpResponseRedirect(reverse('blog:index'))\n\n posts = Post.objects.filter(Q(title__icontains=q) | Q(content__icontains=q))\n pages, posts = get_pages(request, posts)\n\n data = dict()\n data[\"posts\"] = posts\n data[\"pages\"] = pages\n data['random_posts'] = Post.objects.order_by('?')[:settings.RANDOM_POST_NUM]\n data[\"q\"] = q\n\n except Exception:\n raise Http404\n return render(request, 'blog/blog_search.html', data)\n\n\n# 检查是否是管理员的装饰器\ndef check_admin(func):\n def wrapper(request):\n if request.user.is_authenticated():\n if request.user.is_superuser:\n return func(request)\n\n data = dict()\n data['goto_url'] = '/'\n data['goto_time'] = 3000\n data['goto_page'] = True\n data['message'] = '您不是网站管理员,没有权限访问这个页面'\n return render(request, 'message.html', data)\n return wrapper\n\n\n@check_admin\ndef add_blog(request):\n data = csrf(request)\n data['tags'] = Tag.objects.all()\n data['categories'] = Category.objects.all()\n return render_to_response('blog/blog_add.html', context=data)\n\n\n@check_admin\ndef add_blog_ajax(request):\n data = dict()\n try:\n post = Post()\n post_data = ajax_post_data(request)\n\n post.title = post_data['post_title']\n post.author = post_data['post_author']\n post.category_id = post_data['post_category']\n post.content = post_data['post_content']\n post.save()\n print(post_data['tag_ids'])\n post.tags.clear()\n for tag in Tag.objects.filter(id__in=post_data['tag_ids']):\n post.tags.add(tag)\n\n data['success'] = True\n data['message'] = reverse('blog:detail', args=[post.pk, ])\n except Exception as e:\n data['success'] = False\n data['message'] = str(e)\n\n return JsonResponse(data)\n\n\n# 检查是否是当前文章作者的��饰器\ndef check_blog_author(is_json=False):\n def __check_blog_author(func):\n def wrapper(request, post_id):\n try:\n if not request.user.is_authenticated():\n raise Exception(u'您尚未登录!')\n\n posts = Post.objects.filter(id=post_id)\n if posts.count == 0:\n raise Exception(u'找不到没有对应的博客')\n\n blog = posts[0]\n if blog.author != request.user:\n raise Exception(u'您不是该博文的作者,不能编辑')\n\n # 验证通过,继续执行\n return func(request, post_id)\n except Exception as e:\n data = {}\n\n # 判断是否是要返回json\n if is_json:\n data['success'] = False\n data['message'] = str(e)\n return JsonResponse(data)\n else:\n data['goto_url'] = '/'\n data['goto_time'] = 3000\n data['goto_page'] = True\n data['message'] = e\n return render_to_response('message.html', data)\n return wrapper\n return __check_blog_author\n\n\n@check_blog_author(False)\ndef edit_blog(request, post_id):\n post = Post.objects.get(id=post_id)\n\n # 对当前文章之前勾选的 tag 进行标记\n tags = []\n post_tags = post.tags.all()\n for tag in Tag.objects.all():\n tag.checked = True if tag in post_tags else False\n tags.append(tag)\n\n # 对当前文章之前勾选的 category 进行标记\n categories = []\n post_category = post.category\n for category in Category.objects.all():\n category.selected = True if category == post_category else False\n categories.append(category)\n\n data = csrf(request)\n data['post'] = post\n data['tags'] = tags\n data['categories'] = categories\n return render_to_response('blog/blog_edit.html', context=data)\n\n\n@check_blog_author(True)\ndef edit_blog_content(request, post_id):\n post = Post.objects.get(id=post_id)\n\n data = dict()\n data['success'] = True\n data['message'] = post.content\n return JsonResponse(data)\n\n\n@check_blog_author(True)\ndef edit_blog_ajax(request, post_id):\n data = dict()\n try:\n post_data = ajax_post_data(request)\n\n post = Post.objects.get(id=post_id)\n post.title = post_data['post_title']\n post.author = post_data['post_author']\n post.category_id = post_data['post_category']\n post.content = post_data['post_content']\n post.save()\n\n post.tags.clear()\n for tag in Tag.objects.filter(id__in=post_data['tag_ids']):\n post.tags.add(tag)\n\n data['success'] = True\n data['message'] = reverse('blog:detail', args=[post.pk, ])\n except Exception as e:\n data['success'] = False\n data['message'] = str(e)\n\n return JsonResponse(data)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"641982197","text":"import math\r\nimport numpy as np\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn import svm\r\nimport os\r\nimport sys\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.externals import joblib\r\n\r\n\r\n########################################Creates a predictor based on PSI BLAST results####################################\r\n\r\n############################Parse PSSMs to a suitable format for SVM learning####################\r\n\r\nf = open ('C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\datasets\\\\tm_globular_3state.txt', 'r')\r\n\r\nlist_all = list()\r\n\r\n#remove line breaks\r\nfor line in f:\r\n\tnewline = line.replace('\\n', '')\r\n\tlist_all.append(newline)\r\n\r\nword_listflat=[]\r\nmfeature_list=[]\r\nordered_stru=[]\r\nlist_title=[]\r\nlist_stru=[]\r\nword_list=[]\r\npssm_list=[]\r\n\r\n\r\n#defines window size\r\nwindows = 45\r\n\r\n#creates lists separating titles and structures\r\nfor i in range (0, len(list_all), 3):\r\n\tlist_title.append(list_all[i])\r\n\tlist_stru.append((math.floor(windows/2))*'X'+list_all[i+2]+(math.floor(windows/2))*'X')\r\n\r\n\t\r\n#calls PSSMs by title only if they exist in directory\r\nfor title in list_title:\r\n\tnewtitle = title.strip('>').replace('|', '_') #WARNING!!!!!!!!!!!!!!IF THIS REPLACE COMMAND IS NEEDED DEPENDS ON THE COMPUTER AND HOW THE PSSM FILE NAMES LOOK LIKE!!!!\r\n\tnewtitle = '%s.fasta.pssm' %newtitle\r\n\tif os.path.isfile(\"C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\datasets\\\\PSSM\\\\626_pssm\\\\%s\" %newtitle):\t\t\r\n\t\tprint(\"Running: %s\" %newtitle)\t\t\r\n\t\t\r\n\t\tf= list(open(\"C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\datasets\\\\PSSM\\\\626_pssm\\\\%s\" %newtitle, 'r'))\r\n\t\tpssm_list = []\r\n\t\tword_list = []\r\n\t\t\r\n\t\t#takes desired positions from PSSM lines\r\n\t\tfor i in range(3,(len(f)-6)):\r\n\t\t\tif i == 3:\r\n\t\t\t\tpssm_list.extend((math.floor(windows/2))*[list(np.zeros(20))])\r\n\t\t\telements= f[i].split()\r\n\t\t\tpssm_list.append(list(elements[22:-2]))\r\n\t\t\tif i == (len(f)-7):\t\t\r\n\t\t\t\tpssm_list.extend((math.floor(windows/2))*[list(np.zeros(20))])\r\n\t\t\r\n\t\t#splitting and normalization of psssm frequency numbers \r\n\t\tfor position in pssm_list:\r\n\t\t\tfor element in range (0, len(position)):\r\n\t\t\t\tposition[element] = int(position[element])/100\r\n\r\n\t\t#creates sliding windows from psssm frequencies\r\n\t\tfor aa in range(0, len(pssm_list)-(windows-1)):\r\n\t\t\tword_list.append(pssm_list[aa:aa+windows])\r\n\t\tfor word in word_list:\r\n\t\t\tsumaalist=list()\r\n\t\t\tfor aa in word:\r\n\t\t\t\tsumaalist = sumaalist+aa \r\n\t\t\tword_listflat.append(sumaalist) #word_listflat collects all sliding windows from all PSSMs called in this script\r\n\t\t\r\n\t\t#Extraction of structure features corresponding to the pssm sequence\r\n\t\tindex = list_title.index(title)\r\n\t\tordered_stru.append(list_stru[index])\r\n\r\n#creates sliding windows of the structure features and saves the middle feature \r\nfor structure in ordered_stru:\r\n\tfeat_list = list (structure)\r\n\tfor feature in range (int(windows/2), len(feat_list)-math.floor(windows/2)):\r\n\t\tmfeature_list.append([feat_list[feature]])\t\t\r\n\r\n#translantes structure windows into numerical code\r\nfrom structure_dict import structure_dict\r\nfor feature in range(0, len(mfeature_list)):\r\n\tfor key in structure_dict:\r\n\t\tif key == mfeature_list [feature][0]:\r\n\t\t\tmfeature_list[feature][0] = int(structure_dict[key])\r\n\r\n\t\t\t\r\n##########################################SVM learning#############################################################\r\n\r\n#splits structure and psssm windows into training and test sets \r\nX_train, X_test, Y_train, Y_test = train_test_split(word_listflat, mfeature_list, test_size=0.2)\r\n\r\n#SVM learning\r\nclf = svm.LinearSVC(C=0.5, class_weight='balanced').fit(X_train, Y_train)\r\nscore = clf.score(X_test, Y_test)\r\nprint(score)\r\n##########################################prediction+quality report###############################################################\r\npredicted_f = clf.predict(X_test)\r\nY_test = [j for i in Y_test for j in i]\r\nprint(classification_report(Y_test, predicted_f)) #for detailed evalutation prints classification report\r\n\r\n##########################################save model############################################################################# \r\njoblib.dump(clf, 'pssm_predictor')\r\n\r\nprint('finished: Model saved')\r\n\r\n","sub_path":"scripts/PSSM_based_prediction/PSSM_modelcreator.py","file_name":"PSSM_modelcreator.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"194958273","text":"from dtw import dtw\nimport pickle\nimport time\nimport statistics\nfrom numpy import zeros\n\n\ntimes = []\ndef get_frames(filename):\n frames = []\n with open(filename, \"rb\") as file:\n while True:\n try:\n frames.append(pickle.load(file))\n except EOFError:\n break\n\n print(\"Read \" + str(len(frames)) + \" frames from file '\" + filename + \"'\")\n return frames\n\ndef frame_distance(frame1, frame2):\n dist = 0\n\n for p1, p2 in zip(frame1, frame2):\n dist += (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2\n return dist\n\n\ndef fun(gesture,sequence):\n global times\n gest_len = len(gesture)\n\n\n for i in range(gest_len,len(sequence)):\n t = time.time()\n d, cost_matrix, acc_cost_matrix,path = dtw(gesture, sequence[i - gest_len:i], dist=frame_distance)\n ct = time.time() - t\n print(\"dtw computation time: \" + str(ct) + \"s\")\n print()\n print(cost_matrix)\n print()\n times.append(ct)\n i += 1\n\n\ndef acc_cost(cost, n):\n acc_cost_m = zeros((n,n)).tolist()\n\n acc_cost_m[0][0]=cost[0][0]\n\n for i in range(1,n):\n acc_cost_m[i][0] = acc_cost_m[i-1][0] + cost[i][0]\n\n for i in range(1,n):\n acc_cost_m[0][i] = acc_cost_m[0][i-1] + cost[0][i]\n\n for i in range(1,n):\n for j in range(1,n):\n acc_cost_m[i][j] = min(acc_cost_m[i-1][j],acc_cost_m[i][j-1],acc_cost_m[i-1][j-1]) + cost[i][j]\n\n return acc_cost_m\n\ndef get_cost(x,y,n):\n cost_m = zeros((n,n)).tolist()\n\n for i in range(n):\n for j in range(n):\n cost_m[i][j] = frame_distance(x[i],y[j])\n\n\n return cost_m\n\n\ndef fun2(gesture,sequence):\n gest_len = len(gesture)\n\n t = time.time()\n d, cost_matrix1, acc_cost_matrix1,path = dtw(gesture, sequence[0:gest_len], dist=frame_distance)\n ct = time.time() - t\n print(\"dtw computation time: \" + str(ct) + \"s\")\n # print()\n # print(acc_cost_matrix1)\n # print()\n\n t = time.time()\n d, cost_matrix2, acc_cost_matrix2,path = dtw(gesture, sequence[1:gest_len+1], dist=frame_distance)\n old_ct = time.time() - t\n\n print(path.tolist())\n print(\"dtw computation time: \" + str(ct) + \"s\")\n # print()\n # print(cost_matrix2)\n # print()\n\n t = time.time()\n cost_matrix1 = cost_matrix1.tolist()\n cost_matrix2 = cost_matrix2.tolist()\n acc_cost_matrix2 = acc_cost_matrix2.tolist()\n \n convesion = time.time() - t\n\n print(\"conversion time: \" + str(convesion))\n\n print()\n\n t = time.time()\n naive_cost = get_cost(gesture,sequence[1:gest_len+1], gest_len)\n n_cost_t = time.time() - t\n print(\"naive cost time \" + str(n_cost_t))\n\n print(naive_cost == cost_matrix2)\n\n t = time.time()\n custom_cost = [ cost_matrix1[i][1:] + [frame_distance(x,sequence[gest_len])] for (i,x) in enumerate(gesture)]\n cost_t = time.time() - t\n print(\"custom cost time: \" + str(cost_t) + \"s\")\n\n t = time.time()\n my_acc_cost = acc_cost(custom_cost, gest_len)\n acc_cost_t = time.time() - t\n print(\"custom acc_cost time = \" + str(acc_cost_t))\n\n # my_acc_cost = my_acc_cost.tolist()\n print(my_acc_cost == acc_cost_matrix2)\n\n # print(my_acc_cost)\n # print(acc_cost_matrix2)\n\n\n new_ct = acc_cost_t + cost_t\n print(\"total custom time: \" + str(new_ct))\n\n print(\"better than dtw by \" + str(old_ct/new_ct))\n print(\"better than ccc by \" + str(n_cost_t/cost_t))\n\n\n\n\nif __name__ == \"__main__\":\n gesture = get_frames(\"/home/vladyslav/gr/tf-pose-estimation/left_forward\")\n sequence = get_frames(\"/home/vladyslav/gr/tf-pose-estimation/gr2\")\n\n fun2(gesture,sequence)\n #print(statistics.mean(times))\n ","sub_path":"dtw_ex.py","file_name":"dtw_ex.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"44081448","text":"from django.urls import path\n\nfrom .views import index, by_rubric, BbCreateView, BbDetailView, BbUpdateView, BbDeleteView\n\nurlpatterns = [\n\t# path('add/', add_and_save, name = 'add'),\n\tpath('add/', BbCreateView.as_view(), name = 'add'),\n\tpath('/', by_rubric, name = 'by_rubric'),\n\tpath('', index, name = 'index'),\n\tpath('detail//', BbDetailView.as_view(), name = 'detail'),\n\tpath('update//', BbUpdateView.as_view(), name = 'update'),\n\tpath('delete//', BbDeleteView.as_view(), name = 'delete')\n]","sub_path":"UniqueSite/bboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"535591303","text":"import CrazyCod.Utilities.filters3 as filt\n\nimport CrazyCod.Futures.Models.OldWork.Spreads.Old_Work.step5_get_returns_spreads_new as s5\nfrom CrazyCod.Futures.folder import *\nfrom CrazyCod.Utilities.frames import DataFrame\nfrom CrazyCod.Utilities.pkgstdy import *\n\n# AttractSpreadsUniverse1 = s5.AttractSpreadsUniverse1\n# AttractSpreadsUniverse2 = s5.AttractSpreadsUniverse2 # 19 minus Fcoj, Mlk\nAttractSpreadsUniverse3 = s5.AttractSpreadsUniverse3 # 14 minus Fcoj, Mlk (main candidate)\nAttractSpreadsUniverse4 = s5.AttractSpreadsUniverse4 # 30 minus Mlk\nAttractSpreadsUniverse5 = s5.AttractSpreadsUniverse5 # 19 minus Mlk\nAttractSpreadsUniverse6 = s5.AttractSpreadsUniverse6 # 40\nAttractSpreadsUniverse7 = s5.MasterSpreadsUniverse7 # 16 minus Mlk\nAttractSpreadsUniverse8 = s5.MasterSpreadsUniverse8 # 19 minua Mlk\n\nIlliquid = ['FCOJ', 'Milk']\n\n\ndef port_eq_wgt(_all_ret):\n _all_ret2 = _all_ret.copy()\n _num_cols = len(_all_ret2.tick_cols())\n _all_ret2 = _all_ret2.reduce_by_min_data(int(_num_cols/2))\n _all_ret2_m = _all_ret2[_all_ret2.tick_cols()].values\n\n _all_ret2_m_s = np.sum(~np.isnan(_all_ret2_m), axis=1)\n _all_ret2_m1 = _all_ret2_m.copy()\n _all_ret2_m1[np.isnan(_all_ret2_m1)] = 0\n _all_ret2_m2 = np.sum(_all_ret2_m1, axis=1)/_all_ret2_m_s\n\n _res_df = dummy_df(_all_ret2)\n _res_df['PortRet'] = _all_ret2_m2\n _res_df['PortNav'] = conv_to_price_exp(_all_ret2_m2)\n return _res_df\n\n\ndef port_op_wgt(_all_ret):\n _all_ret2 = _all_ret.copy()\n _num_cols = len(_all_ret2.tick_cols())\n _all_ret2 = _all_ret2.reduce_by_min_data(int(_num_cols/2))\n _all_ret2_m = _all_ret2[_all_ret2.tick_cols()].values\n\n # do optimization\n _num_sig = _all_ret2_m.shape[1]\n _num_idx = list(range(0, _num_sig))\n _no_neg = False\n while not _no_neg:\n _sh = np.empty(_num_sig)*np.nan\n _mn = np.empty(_num_sig)*np.nan\n _vl = np.empty(_num_sig)*np.nan\n _cor_mat = np.identity(_num_sig)\n for i in range(0, _num_sig):\n _mn[i] = np.nanmean(_all_ret2_m[:, _num_idx[i]])*252\n _vl[i] = np.nanstd(_all_ret2_m[:, _num_idx[i]])*np.sqrt(252)\n _sh[i] = _mn[i]/_vl[i]\n for i in range(0, _num_sig-1):\n for j in range(i+1, _num_sig):\n _cor_mat[i, j] = smart_spearman(_all_ret2_m[:, _num_idx[i]], _all_ret2_m[:, _num_idx[j]])\n _cor_mat[j, i] = _cor_mat[i, j]\n _wgt1 = np.dot(np.linalg.inv(_cor_mat), _sh)\n if np.any(_wgt1 < 0):\n _neg_arg = np.argmin(_wgt1)\n _num_idx = [i for i in _num_idx if i != _neg_arg]\n _num_sig -= 1\n else:\n _wgt1 /= np.sum(_wgt1)\n _no_neg = True\n\n # these are the weights that sum to one\n _num_sig = _all_ret2_m.shape[1]\n _wgt1a = np.zeros(_num_sig)\n _wgt1a[_num_idx] = _wgt1\n\n # now adjust the weights so that all series are equal vol\n _vl1a = np.zeros(_num_sig)\n _vl1a[_num_idx] = np.mean(_vl)/_vl\n _wgt1a *= _vl1a\n\n _all_ret2_m1 = _all_ret2_m.copy()\n _all_ret2_m1[np.isnan(_all_ret2_m1)] = 0\n _all_ret2_m2 = (~np.isnan(_all_ret2_m)).astype('float64')\n\n _all_ret2_m1w1 = _all_ret2_m1 * _wgt1a[np.newaxis, :]\n _all_ret2_m2w1 = _all_ret2_m2 * _wgt1a[np.newaxis, :]\n\n _all_ret2_w1 = np.sum(_all_ret2_m1w1, axis=1) / np.sum(_all_ret2_m2w1, axis=1)\n\n _res_df = dummy_df(_all_ret2)\n _res_df['PortRet'] = _all_ret2_w1\n _res_df['PortNav'] = conv_to_price_exp(_all_ret2_w1)\n return _res_df, _wgt1a\n\n\ndef port_op_wgt2(_all_ret, keep=10):\n _all_ret2 = _all_ret.copy()\n _num_cols = len(_all_ret2.tick_cols())\n _all_ret2 = _all_ret2.reduce_by_min_data(int(_num_cols/2))\n _all_ret2_m = _all_ret2[_all_ret2.tick_cols()].values\n\n # do optimization\n _num_sig = _all_ret2_m.shape[1]\n _num_idx = list(range(0, _num_sig))\n _no_neg = False\n while not _no_neg:\n _sh = np.empty(_num_sig)*np.nan\n _mn = np.empty(_num_sig)*np.nan\n _vl = np.empty(_num_sig)*np.nan\n _cor_mat = np.identity(_num_sig)\n for i in range(0, _num_sig):\n _mn[i] = np.nanmean(_all_ret2_m[:, _num_idx[i]])*252\n _vl[i] = np.nanstd(_all_ret2_m[:, _num_idx[i]])*np.sqrt(252)\n _sh[i] = _mn[i]/_vl[i]\n for i in range(0, _num_sig-1):\n for j in range(i+1, _num_sig):\n _cor_mat[i, j] = smart_spearman(_all_ret2_m[:, _num_idx[i]], _all_ret2_m[:, _num_idx[j]])\n _cor_mat[j, i] = _cor_mat[i, j]\n _wgt1 = np.dot(np.linalg.inv(_cor_mat), _sh)\n # print(_wgt1)\n if np.where(_wgt1 > 0)[0].shape[0] > keep:\n _neg_arg = np.argmin(_wgt1)\n # print(_neg_arg)\n _num_idx = [i for i_idx, i in enumerate(_num_idx) if i_idx != _neg_arg]\n # print(_num_idx)\n _num_sig -= 1\n else:\n _wgt1 /= np.sum(_wgt1)\n _no_neg = True\n\n # these are the weights that sum to one\n _num_sig = _all_ret2_m.shape[1]\n _wgt1a = np.zeros(_num_sig)\n _wgt1a[_num_idx] = _wgt1\n\n # now adjust the weights so that all series are equal vol\n _vl1a = np.zeros(_num_sig)\n _vl1a[_num_idx] = np.mean(_vl)/_vl\n _wgt1a *= _vl1a\n\n _all_ret2_m1 = _all_ret2_m.copy()\n _all_ret2_m1[np.isnan(_all_ret2_m1)] = 0\n _all_ret2_m2 = (~np.isnan(_all_ret2_m)).astype('float64')\n\n _all_ret2_m1w1 = _all_ret2_m1 * _wgt1a[np.newaxis, :]\n _all_ret2_m2w1 = _all_ret2_m2 * _wgt1a[np.newaxis, :]\n\n _all_ret2_w1 = np.sum(_all_ret2_m1w1, axis=1) / np.sum(_all_ret2_m2w1, axis=1)\n\n _res_df = dummy_df(_all_ret2)\n _res_df['PortRet'] = _all_ret2_w1\n _res_df['PortNav'] = conv_to_price_exp(_all_ret2_w1)\n return _res_df, _wgt1a\n\n\ndef get_port_returns_eq(univ, plot_res=False, rem_ill=True):\n _all_ret = None\n for i in univ:\n if rem_ill:\n if len([j for j in Illiquid if j in i.crname]) > 0:\n continue\n _ret = mkt_spr_retrieve(i.crname, 'Spreads', 'StratRet')[['Date', 'ModelRet']]\n _lev = mkt_spr_load(i.crname, 'Spreads', 'Leverage')\n _ret['ModelRet'] = _ret['ModelRet'].values * _lev\n _ret.rename({'ModelRet': i.crname})\n if _all_ret is None:\n _all_ret = _ret.copy()\n else:\n _all_ret = DataFrame.merge_on_date(_all_ret, _ret)\n\n _all_ret = _all_ret[_all_ret['Date'].values > 20051231, :]\n for i in _all_ret.tick_cols():\n _all_ret[i] = filt.fill1(_all_ret[i].values, 0)\n\n _res_df = port_eq_wgt(_all_ret)\n\n print(np.nanmean(_res_df['PortRet'].values)*np.sqrt(252)/np.nanstd(_res_df['PortRet'].values),\n np.nanmean(_res_df['PortRet'].values) * 252)\n if plot_res:\n plot_ts_new(_res_df['Date'].values, _res_df['PortNav'].values)\n return _res_df\n\n\ndef get_port_returns_op(univ, plot_res=False, rem_ill=True, print_wgt=False, keep_sig=None):\n _all_ret = None\n for i in univ:\n if rem_ill:\n if len([j for j in Illiquid if j in i.crname]) > 0:\n continue\n _ret = mkt_spr_retrieve(i.crname, 'Spreads', 'StratRet')[['Date', 'ModelRet']]\n _lev = mkt_spr_load(i.crname, 'Spreads', 'Leverage')\n _ret['ModelRet'] = _ret['ModelRet'].values * _lev\n _ret.rename({'ModelRet': i.crname})\n if _all_ret is None:\n _all_ret = _ret.copy()\n else:\n _all_ret = DataFrame.merge_on_date(_all_ret, _ret)\n\n _all_ret = _all_ret[_all_ret['Date'].values > 20051231, :]\n for i in _all_ret.tick_cols():\n _all_ret[i] = filt.fill1(_all_ret[i].values, 0)\n\n # print(keep_sig)\n if keep_sig is None:\n _res_df, _wgt = port_op_wgt(_all_ret)\n else:\n _res_df, _wgt = port_op_wgt2(_all_ret, keep=keep_sig)\n\n print(np.nanmean(_res_df['PortRet'].values)*np.sqrt(252)/np.nanstd(_res_df['PortRet'].values),\n np.nanmean(_res_df['PortRet'].values)*252)\n if print_wgt:\n for i in zip(_all_ret.tick_cols(), _wgt):\n print('(\\''+i[0]+'\\', ', i[1], '), ')\n if plot_res:\n plot_ts_new(_res_df['Date'].values, _res_df['PortNav'].values, logy=True)\n return _res_df\n\n\n_au3_wgt = [ # Pretty bad not what I want\n ('Wheat_01_KWheat_04_Corn_06_Inv', 0.0816480093996), ('FCattle_02_FCattle_04_FCattle_05_Inv', 0.0998075118985),\n ('SoyOil_01_SoyOil_05_Soybean_04_Inv', 0.0845114025752),\n ('LeanHogs_05_LeanHogs_01_LeanHogs_02_Reg', 0.0813258107023),\n ('Sugar_01_Sugar_05_SugarLdn_01_Inv', 0.0800881054344), ('Sugar_04_Sugar_05_Inv', 0.0412398512731),\n ('Sugar_02_Sugar_03_Inv', 0.0529664566498), ('Coffee_01_Coffee_04_Inv', 0.133441444633),\n ('NatGas_11_NatGas_02_NatGas_08_Reg', 0.100385806781),\n ('CrudeBrent_02_CrudeBrent_01_CrudeBrent_06_Inv', 0.0736482893353),\n ('CrudeWTI_06_CrudeWTI_02_CrudeWTI_05_Reg', 0.0), ('CrudeWTI_02_CrudeWTI_03_Gasoline_08_Inv', 0.0770610292249),\n ('HeatOil_02_HeatOil_01_HeatOil_08_Inv', 0.0309492330018),\n ('Gasoline_05_HeatOil_02_Gasoline_02_Reg', 0.0682033941064)]\n\n_au6_wgt = [ # closest to what I want\n ('KWheat_02_KWheat_01_KWheat_03_Inv', 0.00269625931248), ('KWheat_01_KWheat_02_KWheat_05_Inv', 0.0401882708346),\n ('KWheat_04_Wheat_02_KWheat_05_Reg', 0.0516057833386), ('LCattle_03_LCattle_02_LCattle_04_Inv', 0.0425388389297),\n ('FCattle_03_FCattle_02_FCattle_05_Inv', 0.0147975904105),\n ('FCattle_04_FCattle_02_FCattle_03_Reg', 0.0538058281819), ('LCattle_04_LCattle_05_Inv', 0.0345425584814),\n ('FCattle_05_FCattle_02_FCattle_04_Reg', 0.0306586919788),\n ('LCattle_04_LCattle_02_LCattle_03_Reg', 0.0227881206918), ('FCattle_03_LCattle_04_FCattle_05_Inv', 0.0),\n ('SoyOil_02_SoyOil_01_SoyOil_03_Inv', 0.0194066437666), ('SoyOil_04_SoyOil_02_SoyOil_05_Reg', 0.0057659590879),\n ('SoyOil_03_SoyOil_01_SoyOil_02_Reg', 0.0644372369745), ('SoyOil_03_SoyOil_04_SoyOil_05_Inv', 0.0600160654987),\n ('Soybean_04_Soybean_05_SoyMeal_04_Inv', 0.0165003940053), ('Sugar_06_Sugar_05_Sugar_07_Reg', 0.0334537529091),\n ('SugarLdn_02_SugarLdn_01_SugarLdn_03_Inv', 0.0222147752506),\n ('SugarLdn_03_SugarLdn_01_SugarLdn_04_Inv', 0.0253200088902), ('Sugar_06_Sugar_03_Sugar_05_Reg', 0.0129916291057),\n ('SugarLdn_04_SugarLdn_01_SugarLdn_05_Inv', 0.0280909280148),\n ('Coffee_03_Coffee_02_Coffee_05_Inv', 0.000265711989896), ('Coffee_04_Coffee_01_Coffee_05_Reg', 0.0304923955489),\n ('Coffee_02_Coffee_01_Coffee_05_Inv', 0.0191880253491), ('NatGas_08_NatGas_06_NatGas_09_Inv', 0.0301819679119),\n ('NatGas_11_NatGas_08_NatGas_12_Reg', 0.0368551360098), ('NatGas_10_NatGas_06_NatGas_14_Reg', 0.0312390565686),\n ('Gasoline_04_Gasoline_03_Gasoline_06_Inv', 0.0544969595075),\n ('Gasoline_06_Gasoline_04_Gasoline_08_Reg', 0.026710366786),\n ('HeatOil_04_HeatOil_01_HeatOil_07_Inv', 0.0624226166376),\n ('HeatOil_02_HeatOil_01_HeatOil_06_Inv', 0.0356466857006), ('CrudeWTI_07_CrudeWTI_03_CrudeWTI_09_Reg', 0.0),\n ('CrudeBrent_09_CrudeBrent_05_CrudeBrent_08_Reg', 0.0), ('Gasoline_02_Gasoline_01_Gasoline_05_Inv', 0.0),\n ('CrudeBrent_04_CrudeBrent_01_CrudeBrent_06_Inv', 0.0),\n ('CrudeWTI_03_CrudeWTI_01_CrudeWTI_08_Inv', 0.000910116546844),\n ('GasOilEur_02_GasOilEur_03_Reg', 0.00995681821489), ('HeatOil_08_HeatOil_06_Gasoline_08_Inv', 0.0445017758783),\n ('HeatOil_08_CrudeWTI_04_HeatOil_07_Reg', 0.0205086324149),\n ('GasOilEur_04_GasOilEur_01_GasOilEur_06_Inv', 0.0167722642411),\n ('Gasoline_07_CrudeWTI_09_Gasoline_03_Reg', 0.0169817189386)]\n\n\n_au4_wgt = [ # also not inline with what I want\n ('Corn_04_Corn_03_Corn_06_Inv', 0.0248274034439), ('Corn_06_Wheat_05_Corn_01_Reg', 0.00732215062546),\n ('FCattle_02_FCattle_01_FCattle_05_Inv', 0.051894303457), ('FCattle_02_LCattle_01_FCattle_05_Inv', 0.0180937911723),\n ('SoyOil_01_SoyOil_05_Soybean_05_Inv', 0.0246938740298), ('SoyOil_04_SoyOil_02_SoyOil_03_Reg', 0.0653022886862),\n ('SoyOil_05_SoyOil_03_SoyOil_04_Reg', 0.0349306647262), ('Soybean_02_Soybean_05_SoyMeal_01_Inv', 0.0136259192741),\n ('LeanHogs_05_LeanHogs_02_LeanHogs_04_Reg', 0.0651058186215),\n ('LeanHogs_04_LeanHogs_02_LeanHogs_03_Reg', 0.0274298224494),\n ('Sugar_02_Sugar_05_SugarLdn_01_Inv', 0.0549893362478),\n ('Sugar_04_Sugar_05_Inv', 0.0279923204754), ('Sugar_04_Sugar_06_SugarLdn_05_Inv', 0.00906038310135),\n ('Sugar_05_Sugar_02_Sugar_07_Reg', 0.00612230503919), ('Cocoa_02_Cocoa_01_Cocoa_05_Inv', 0.0128813643648),\n ('Coffee_02_Coffee_04_Inv', 0.075961535063), ('Platinum_01_Gold_03_Palladium_02_Inv', 0.0536963293831),\n ('NatGas_08_NatGas_09_NatGas_11_Inv', 0.0569961986757), ('NatGas_02_NatGas_01_NatGas_05_Inv', 0.0437826688254),\n ('NatGas_10_NatGas_06_NatGas_12_Reg', 0.0161971545753),\n ('CrudeWTI_01_CrudeWTI_06_Gasoline_08_Inv', 0.00111409041088),\n ('CrudeBrent_03_CrudeBrent_01_CrudeBrent_07_Inv', 0.0724345561071),\n ('CrudeWTI_02_CrudeWTI_01_CrudeWTI_05_Inv', 0.0198601852438),\n ('HeatOil_02_HeatOil_01_HeatOil_06_Inv', 0.0368087768648),\n ('Gasoline_06_HeatOil_02_Gasoline_04_Reg', 0.0363067737359),\n ('HeatOilEur_03_GasolineEur_03_GasOilEur_01_Inv', 0.0306959807239),\n ('CrudeBrent_08_CrudeBrent_03_CrudeBrent_09_Reg', 0.0),\n ('CrudeWTI_07_CrudeWTI_01_HeatOil_04_Reg', 0.0312169456088), \n ('Gasoline_08_CrudeWTI_09_Gasoline_01_Reg', 0.0585819436605), \n ('CrudeBrent_03_CrudeBrent_09_GasolineEur_03_Inv', 0.0412657519862)]\n\n_au6_wgt_05 = [('LeanHogs_05_LeanHogs_02_LeanHogs_04_Reg', 0.196342823432),\n ('Sugar_02_Sugar_05_SugarLdn_01_Inv', 0.192737050779),\n ('Coffee_02_Coffee_04_Inv', 0.200888497705),\n ('CrudeBrent_03_CrudeBrent_01_CrudeBrent_07_Inv', 0.232513839212),\n ('Gasoline_06_HeatOil_02_Gasoline_04_Reg', 0.180105024524)]\n\n_au6_wgt_10 = [('FCattle_02_FCattle_01_FCattle_05_Inv', 0.0802501190339),\n ('SoyOil_04_SoyOil_02_SoyOil_03_Reg', 0.136127212301),\n ('LeanHogs_05_LeanHogs_02_LeanHogs_04_Reg', 0.113826349107),\n ('Sugar_02_Sugar_05_SugarLdn_01_Inv', 0.110281862484),\n ('Coffee_02_Coffee_04_Inv', 0.119060690909),\n ('Platinum_01_Gold_03_Palladium_02_Inv', 0.0788208092636),\n ('NatGas_08_NatGas_09_NatGas_11_Inv', 0.0868693474816),\n ('CrudeBrent_03_CrudeBrent_01_CrudeBrent_07_Inv', 0.12871597742),\n ('Gasoline_06_HeatOil_02_Gasoline_04_Reg', 0.0811475681992),\n ('Gasoline_08_CrudeWTI_09_Gasoline_01_Reg', 0.0807847024312)]\n\n# j = AttractSpreadsUniverse4\n# for i, keep_sig in enumerate([5, 10, 20]):\n# # keep_sig = 20\n# plt.figure(i+1)\n# res_df = get_port_returns_op(j, keep_sig=keep_sig, plot_res=True, print_wgt=20)\n# _tmp = np.cumprod(1+res_df[res_df['Date'].values > 20161231, 'PortRet'].values)\n# print(_tmp[_tmp.shape[0]-1]-1)\n# plt.ylim([1, 10])\n\n\ndef generate_positions(sig_wgt, capital, leverage):\n # capital = 500000\n # leverage = 2\n # sig_wgt = _au6_wgt_05\n\n # get dollar positions\n lev_captial = capital * leverage\n sig_wgt_dollar = [(i[0], i[1]*lev_captial) for i in sig_wgt]\n\n # read the leverage at signal level\n for i_idx, i in enumerate(sig_wgt_dollar):\n # i = sig_wgt_dollar[0]\n sig_lev = mkt_spr_load(i[0], 'Spreads', 'Leverage')\n sig_wgt_dollar[i_idx] = (i[0], i[1]*sig_lev)\n\n # now read the beta(s) and signal\n _all_positions = []\n for i_idx, i in enumerate(sig_wgt_dollar):\n # process the betas and signals\n sig_val = mkt_spr_retrieve(i[0], 'Spreads', 'StratRet')\n sig_val = sig_val[sig_val.shape[0]-1, 'Signal']\n betas = mkt_spr_load(i[0], 'Stats', 'Betas')\n _name = i[0].split(sep='_')\n if len(_name) == 7: # butterfly\n _legs = (_name[0]+'_'+_name[1], _name[2]+'_'+_name[3], _name[4]+'_'+_name[5])\n if _name[6] == 'Inv':\n sig_val = -sig_val\n _betas = [1, -betas[0], -betas[1]]\n elif len(_name) == 5:\n _legs = (_name[0]+'_'+_name[1], _name[2]+'_'+_name[3])\n if _name[4] == 'Inv':\n sig_val = -sig_val\n _betas = [1, -betas[0]]\n else:\n _legs = (_name[0]+'_'+_name[1])\n if _name[2] == 'Inv':\n sig_val = -sig_val\n _betas = [1]\n _sig_wgt_dollar = i[1]\n _pos_size = [sig_val * j * _sig_wgt_dollar for j in _betas]\n\n # process the contract details\n _legs = list(_legs)\n _f_legs = []\n for j in _legs:\n _test = j.split('_')\n _f_legs.append(eval('s4._f' + _test[1] + '_' + _test[0]))\n _last_price = []\n for j in _legs:\n _last_price.append(mkt_retrieve(j, 'Stats', 'Returns')['Close'].values[-1])\n _cont_mult = []\n for j in _f_legs:\n _cont_mult.append(j.lotsize)\n _cont_val = [j*k for j, k in zip(_last_price, _cont_mult)]\n\n # process the number of contracts to hold\n _num_cont = [np.round(j/k, 2) for j, k in zip(_pos_size, _cont_val)]\n _num_cont_rnd = [np.round(j) for j in _num_cont]\n _legs_cont = [(j, k, l) for j, k, l in zip(_legs, _num_cont, _num_cont_rnd)]\n _all_positions.append(_legs_cont)\n\n # convert all positions in positions to hold\n _act_positions = []\n for i_idx, i in enumerate(_all_positions):\n # i_idx = 0\n # i = _all_positions[i_idx]\n for j in range(0, len(i)):\n # j = 1\n pr = mkt_retrieve(i[j][0], 'Stats', 'Returns')\n pr[pr['ContractAtClose'].values > 0, :] # Something wrong with LeanHogs_05\n\n pr = mkt_retrieve('Coffee_02', 'Stats', 'Returns')\n pr[pr['ContractAtClose'].values > 0, :] # Wtf !!! Contract not being read properly?\n # Something wrong with LeanHogs -- but ok with everything else\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CrazyCod/Futures/Models/OldWork/Spreads/Old_Work/step7_generate_portfolio_results.py","file_name":"step7_generate_portfolio_results.py","file_ext":"py","file_size_in_byte":17754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"157685342","text":"# Copyright 2016 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for\n# the specific language governing permissions and limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nfrom sys import _getframe\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom opveclib.expression import position_in, output_like\nfrom opveclib.operator import Operator\nfrom opveclib.local import cuda_enabled\n\n\nclass TestIntegration(unittest.TestCase):\n def test_single_output(self):\n print('*** Running Test: ' + self.__class__.__name__ + ' function: ' + _getframe().f_code.co_name)\n\n class AddOp(Operator):\n def op(self, x, y):\n pos = position_in(x.shape)\n out = output_like(x)\n out[pos] = x[pos] + y[pos]\n return out\n\n in0 = np.random.random(5).astype(np.float32)\n in1 = np.random.random(5).astype(np.float32)\n reference = 4*(in0 + in1)*(in0 + in1)\n\n with tf.Session() as sess:\n with tf.device('/cpu:0'):\n a = in0*2\n b = in1*2\n c = AddOp(a, b, clear_cache=True).as_tensorflow()\n squared = tf.square(c)\n if cuda_enabled:\n with tf.device('/gpu:0'):\n a_gpu = in0*2\n b_gpu = in1*2\n c_gpu = AddOp(a_gpu, b_gpu).as_tensorflow()\n squared_gpu = tf.square(c_gpu)\n result, result_gpu = sess.run([squared, squared_gpu])\n assert np.allclose(reference, result_gpu)\n else:\n result = sess.run([squared])\n\n assert np.allclose(reference, result)\n\n\n def test_multiple_outputs(self):\n print('*** Running Test: ' + self.__class__.__name__ + ' function: ' + _getframe().f_code.co_name)\n\n class MultiOp(Operator):\n # first output is the sum of first two inputs\n # second output is the sum of the first two multiplied by the third\n # third output is sum of all three inputs\n def op(self, input0, input1, input2):\n pos = position_in(input0.shape)\n output0 = output_like(input0)\n output1 = output_like(input0)\n output2 = output_like(input0)\n\n a = input0[pos]\n b = input1[pos]\n c = input2[pos]\n d = a + b\n output0[pos] = d\n output1[pos] = d*c\n output2[pos] = d+c\n\n return output0, output1, output2\n\n rng = np.random.RandomState()\n in0 = rng.uniform(-1, 1, 5).astype(np.float32)\n in1 = rng.uniform(-1, 1, 5).astype(np.float32)\n in2 = rng.uniform(-1, 1, 5).astype(np.float32)\n\n np1 = in0*in0 + in1*in1\n np2 = np1*in2\n np3 = np1 + in2\n\n with tf.Session() as sess:\n sq0 = tf.square(in0)\n sq1 = tf.square(in1)\n\n with tf.device('/cpu:0'):\n op = MultiOp(sq0, sq1, in2, clear_cache=True)\n out0, out1, out2 = op.as_tensorflow()\n\n if cuda_enabled:\n with tf.device('/gpu:0'):\n op_gpu = MultiOp(sq0, sq1, in2, clear_cache=True)\n out0_gpu, out1_gpu, out2_gpu = op_gpu.as_tensorflow()\n\n eval1, eval2, eval3, eval1_gpu, eval2_gpu, eval3_gpu = \\\n sess.run([out0, out1, out2, out0_gpu, out1_gpu, out2_gpu])\n assert np.allclose(eval1_gpu, np1)\n assert np.allclose(eval2_gpu, np2)\n assert np.allclose(eval3_gpu, np3)\n else:\n eval1, eval2, eval3 = sess.run([out0, out1, out2])\n\n assert np.allclose(eval1, np1)\n assert np.allclose(eval2, np2)\n assert np.allclose(eval3, np3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"opveclib/test_tensorflow/test_tensorflow_integration.py","file_name":"test_tensorflow_integration.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187657466","text":"import requests\nimport pandas as pd \nfrom lxml import html\nimport os \nimport sys\nimport json\n# Para identificar o repositório em que o arquivo do programa se encontra e salvando os arquivos nos formatos CSV e JSON\nrepository = os.path.abspath(os.path.dirname(sys.argv[0]) or '.')\n\n\n# Classe do Crawler cotendo os metodos a serem usados\nclass Crawler_table(): \n\n\n# Começa extração das informações da pagina alvo 1 e retorna em formato de Data Frame \n def extractionPaginaAlvo_1(self):\n\n df = pd.DataFrame(columns=['Storage','CPU', 'Memory','Bandwidth', 'Price'])\n response = requests.get('https://www.vultr.com/products/cloud-compute/#pricing')\n content_web = html.fromstring(response.content)\n \n if response.status_code == 200:\n# Seleciona qual tabela que o usuário deseja via xpath\n div_table = '//div[@class = \"pt__body js-body\"]'\n table = content_web.xpath(div_table)[0]\n# Seleciona qual linha que o usuário deseja via xpath\n div_rows = table.xpath('./div')\n \n for link in range(0,len(div_rows)):\n# Inicio da extração dos textos da tabela desejada e sendo convertidos em Data Frame\n data = []\n div_content = div_rows[link].xpath('./div[@class = \"pt__row-content\"]')[0]\n\n divs_texto = div_content.xpath('./div')\n \n data.append(divs_texto[1].text_content().strip())\n data.append(divs_texto[2].text_content().strip())\n data.append(divs_texto[3].text_content().strip().split('Ram')[0])\n data.append(divs_texto[4].text_content().strip().split('Bandwidth')[0])\n data.append(divs_texto[5].text_content().strip().split('\\n')[0])\n \n df.loc[link] = data\n else:\n print(\"Infelizmente não foi possível extrair os dados, verifique os seletores se estão corretos\")\n return df \n \n# Começa extração das informações da pagina alvo 2 e retorna em formato de Data Frame \n def extractionPaginaAlvo_2(self):\n\n df = pd.DataFrame(columns=['Memory','vCPUs', 'SSD Disk','Transfer', '$/MO'])\n response = requests.get('https://www.digitalocean.com/pricing/')\n content_web = html.fromstring(response.content)\n \n if response.status_code == 200:\n# Seleciona qual tabela que o usuário deseja via xpath\n select_table = '//ul[@class = \"priceBox\"]'\n table = content_web.xpath(select_table)[0]\n# Seleciona qual tlinha que o usuário deseja via xpath\n rows_table = table.xpath('.//li[@class = \"priceBoxItem\"]')\n\n for link in range(0,len(rows_table)):\n# Inicio da extração dos textos da tabela desejada e sendo convertidos em Data Frame\n content_table = rows_table[link].xpath('.//a/div/ul/li')\n money_table = rows_table[link].xpath('.//a/div/div')\n rows_table = table.xpath('.//li[@class = \"priceBoxItem\"]')\n #print(money_table[0].text_content())\n data = []\n data.append(content_table[0].text_content().split(\"/\")[0])\n data.append(content_table[0].text_content().split(\"/\")[1])\n data.append(content_table[1].text_content().split(\"SSD\")[0])\n data.append(content_table[2].text_content().split(\"transfer\")[0])\n data.append(money_table[0].text_content().split(\"/\")[0])\n df.loc[link] = data\n else:\n print(\"Infelizmente não foi possível extrair os dados, verifique os seletores se estão corretos\")\n return df\n\n\n# Metodo para salvar o arquivo no formato JSON. \n def Save_Json(self,df):\n print(\"Favor colocar o nome do arquivo, sem .json\")\n name = input()\n dic = df.to_dict()\n with open(repository+'/'+name+'.json', 'w') as data:\n json.dump(dic, data,indent=4)\n print(\"##########################################\")\n print(\"Arquivo salvo no formato JSON com sucesso\")\n print(\"##########################################\")\n# Metodo para salvar o arquivo no formato CSV. \n def Save_CSV(self,df):\n print(\"Favor colocar o nome do arquivo, sem .csv\")\n name = input()\n df.to_csv(repository+'/'+name+'.csv',index=False)\n print(\"#########################################\")\n print(\"Arquivo salvo no formato CSV com sucesso\")\n print(\"#########################################\")\n\n\n# Metodo para printar na tela o conteudo extraido. \n def Print_info(self,df):\n print(df,\"\\n\")\n \n \n\n# Metodo para o usuario selecionar qual opcao que ele deseja\n def Menu(self):\n df_pagina1 = self.extractionPaginaAlvo_1()\n df_pagina2 = self.extractionPaginaAlvo_2()\n if not df_pagina1.empty and not df_pagina2.empty:\n action = -1\n while action != 0: \n print(\"Selecione qual opção deseja executar:\")\n action = input(\"1 para imprimir os dados\\n2 para Salvar em CSV\\n3 para Salvar em json\\n0 para sair\\nDigite sua opção: \")\n if action != \"0\":\n pagina = input(\"Qual site deseja fazer WebScrapping?\\n1 para a página alvo Vultr\\n2 para a página alvo Digital Ocean: \")\n \n if action == \"1\" and pagina == \"1\":\n self.Print_info(df_pagina1)\n elif action == \"2\" and pagina == \"1\":\n self.Save_CSV(df_pagina1)\n elif action == \"3\" and pagina == \"1\":\n self.Save_Json(df_pagina1)\n elif action == \"1\" and pagina == \"2\":\n self.Print_info(df_pagina2)\n elif action == \"2\" and pagina == \"2\":\n self.Save_CSV(df_pagina2)\n elif action == \"3\" and pagina == \"2\":\n self.Save_Json(df_pagina2) \n elif action == \"0\":\n return None\n else:\n print(\"Não existe essa opção, favor colocar as opções desejada novamente\\n\")\n else:\n print(\"Infelizmente não foi possível extrair os dados, verifique os seletores se estão corretos\")\n \n\nif __name__ == \"__main__\":\n# Cria-se uma instancia da classe e executa as acoes que o usuario deseja\n Extract_Data = Crawler_table()\n Extract_Data.Menu()","sub_path":"desafio.py","file_name":"desafio.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"84435212","text":"\nimport json\nimport requests\nimport hashlib\n\napi = 'xGXMdnyltTAHZMSHPZHg'\n\npartenaire = 'CgFVrWdhuSVccijkVVwF'\n\nmembre_email = 'amundi3.admin@vodeclic.com'\n\nid_membre = 'amundi3.admin'\n\ncrypt = hashlib.sha256(partenaire+api).hexdigest()\n\nlanguage_fr = \"fr\";\n\nlanguage_en = \"en\";\n\nskills = \"false\";\n\nimport json\nimport requests\n\nurl = 'https://lms.vodeclic.com/api/catalogs/courses.json'\n\ndata_fr = json.loads('{\"partenaire\" : \"'+partenaire+'\", \"encrypted_partenaire\" : \"'+crypt+'\", \"lang\" : \"'+language_fr+'\", \"id_membre\" : \"'+id_membre+'\", \"membre_email\" : \"'+membre_email+'\"}')\n\ndata_en = json.loads('{\"partenaire\" : \"'+partenaire+'\", \"encrypted_partenaire\" : \"'+crypt+'\", \"lang\" : \"'+language_en+'\", \"id_membre\" : \"'+id_membre+'\", \"membre_email\" : \"'+membre_email+'\"}')\n\nr_fr = requests.get(url, params=data_fr, headers = {'content-type': 'application/json'} )\n\nr_en = requests.get(url, params=data_en, headers = {'content-type': 'application/json'} )\n\n#fr\nid_fr = []\ntitle_fr = []\ndescription_fr = []\nlanguage_fr = []\nlogo_fr = []\nlength_fr = \"\";\n#en\nid_en = []\ntitle_en = []\ndescription_en = []\nlanguage_en = []\nlogo_en = []\nlength_en = \"\";\n\n#fr\njson_file_fr = json.loads(r_fr.content)\njson_file_fr = json_file_fr.get('data')\njson_file_fr = json_file_fr.get('subject')\n\nfor (i, item) in enumerate(json_file_fr):\n id_fr.insert( i, item.get('id'))\n description_fr.insert( i, item.get('description'))\n language_fr.insert( i, item.get('language'))\n title_fr.insert( i, item.get('title'))\n logo_fr.insert( i,item.get('large_image_png_url'))\n length_fr = i\n\n#en\njson_file_en = json.loads(r_en.content)\njson_file_en = json_file_en.get('data')\njson_file_en = json_file_en.get('subject')\n\nfor (j, item_en) in enumerate(json_file_en):\n id_en.insert( j, item_en.get('id'))\n description_en.insert( j, item_en.get('description'))\n language_en.insert( j, item_en.get('language'))\n title_en.insert( j, item_en.get('title'))\n logo_en.insert( j,item.get('large_image_png_url'))\n length_en = j\n\nvaleur_base_fr = 0\nvaleur_base_en = 0\nformations = []\nfor valeur_base_fr in range(length_fr):\n formations.append(json.dumps(\n {\"id\":id_fr[valeur_base_fr],\n \"description\":description_fr[valeur_base_fr],\n \"language\":language_fr[valeur_base_fr],\n \"title\":title_fr[valeur_base_fr]\n }, sort_keys=True,indent=4, separators=(',', ': '), ensure_ascii=False, encoding=\"utf-8\").encode('utf8')\n )\n\nfor valeur_base_en in range(length_en):\n formations.append(json.dumps(\n {\"id\":id_en[valeur_base_en],\n \"description\":description_en[valeur_base_en],\n \"language\":language_en[valeur_base_en],\n \"title\":title_en[valeur_base_en]\n }, sort_keys=True,indent=4, separators=(',', ': '), ensure_ascii=False, encoding=\"utf-8\").encode('utf8')\n )\n\nfichier = open('vodeclic_get.json',\"w\")\nfichier.close()\nfichier = open(\"vodeclic_get.json\", \"r+\")\nfichier.write('{\"data\":{\"subject\":'+\"\".join(formations)+'}}')\nfichier.close()\n\n#recuperation des images\nbase_url = 'https://lms.vodeclic.com/'\nvaleur_base_pic_fr = 0\nvaleur_base_pic_en = 0\npic_folder = 'picture'\nimport urllib\nfor valeur_base_pic_fr in range(length_fr):\n urllib.urlretrieve(base_url+logo_fr[valeur_base_pic_fr], \"picture/\"+id_fr[valeur_base_pic_fr]+\".png\")\nfor valeur_base_pic_en in range(length_en):\n urllib.urlretrieve(base_url+logo_en[valeur_base_pic_en], \"picture/\"+id_en[valeur_base_pic_en]+\".png\")\n","sub_path":"vodeclic_get.py","file_name":"vodeclic_get.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"317738679","text":"# pylint: skip-file\n\"\"\"\nTests for the Stack class.\n\"\"\"\n\nimport unittest\n\nfrom stack.stack import Stack\n\n\nclass StackTestCase(unittest.TestCase):\n \"\"\"\n This Case of tests checks the functionality of the implementation of Stack\n \"\"\"\n\n def test_new_stack_is_empty(self):\n \"\"\"\n Create an empty Stack.\n Test that its size is 0.\n \"\"\"\n stack = Stack()\n self.assertTrue(stack.empty())\n self.assertEqual(stack.size(), 0)\n\n def test_new_stack_from_list(self):\n \"\"\"\n Create a Stack from a list.\n Check that the size of stack equals to the size of the list.\n Check that the top element of stack equals to the latest element of the list.\n \"\"\"\n data_to_stack = [1, 3, 5, 7, 2, 4]\n stack = Stack(data_to_stack)\n self.assertFalse(stack.empty())\n self.assertEqual(stack.size(), len(data_to_stack))\n self.assertEqual(stack.top(), data_to_stack[-1])\n\n def test_new_stack_from_generator(self):\n \"\"\"\n Create a Stack from a generator.\n Test that its size equals to the number provided in the generator.\n \"\"\"\n stack = Stack(range(10))\n self.assertFalse(stack.empty())\n self.assertEqual(stack.size(), 10)\n self.assertEqual(stack.top(), 9)\n\n def test_push_element(self):\n \"\"\"\n Push an element in stack.\n Test that its size is 1.\n \"\"\"\n stack = Stack()\n stack.push(None)\n self.assertFalse(stack.empty())\n self.assertEqual(stack.size(), 1)\n\n def test_push_sequence_of_elements(self):\n \"\"\"\n Push a sequence of elements in stack.\n Test that its size equals to the length of the given sequence.\n Pop all elements from stack and check reversed order.\n \"\"\"\n stack = Stack()\n elements = (1, 2, \"string\", None, 0, Stack())\n for element in elements:\n stack.push(element)\n self.assertEqual(stack.size(), len(elements))\n for index, element in enumerate(reversed(elements)):\n top = stack.top()\n self.assertEqual(top, element)\n stack.pop()\n self.assertEqual(stack.size(), len(elements) - index - 1)\n self.assertTrue(stack.empty())\n\n def test_call_top_of_empty_stack_raised_error(self):\n \"\"\"\n Create an empty Stack.\n Test that call of top function raises Value error\n \"\"\"\n stack = Stack()\n self.assertRaises(ValueError, stack.top)\n\n def test_call_pop_of_empty_stack_raised_error(self):\n \"\"\"\n Create an empty Stack.\n Test that call of pop function raises Value error\n \"\"\"\n stack = Stack()\n self.assertRaises(ValueError, stack.pop)\n\n def test_stack_order(self):\n \"\"\"\n Push a sequence of elements in stack.\n Test that the element we push is at the top\n \"\"\"\n stack = Stack()\n elements = (1, 2, \"string\", None, 0, Stack())\n for element in elements:\n stack.push(element)\n top = stack.top()\n self.assertEqual(element, top)\n\n def test_stack_type(self):\n \"\"\"\n Create an empty Stack.\n Test that the type of Stack is list\n \"\"\"\n stack = Stack()\n self.assertEqual(stack.data, [])\n\n def test_stack_from_a_string(self):\n \"\"\"\n Create Stack with a string.\n Test that Stack data is split string\n \"\"\"\n stack = Stack(\"mama\")\n self.assertEqual(stack.data, [\"m\", \"a\", \"m\", \"a\"])\n\n def test_stack_from_a_string_order(self):\n \"\"\"\n Create Stack with a string.\n Test that the last letter is at the top\n \"\"\"\n stack = Stack(\"hero\")\n top = stack.top()\n self.assertEqual(top, \"o\")\n\n def test_merge_type(self):\n \"\"\"\n Merge Stack with other types.\n Test that call of merge function raises Type error\n \"\"\"\n stack = Stack()\n elements = (1, 2, \"string\", None, 0)\n for element in elements:\n self.assertRaises(TypeError, stack.merge, element)\n\n def test_merge_order(self):\n \"\"\"\n Create two Stacks.\n Test the top of changed Stack\n \"\"\"\n stack_1 = Stack([1, 2, 3])\n stack_2 = Stack([4, 5, 6])\n stack_1.merge(stack_2)\n self.assertEqual(stack_1.top(), 6)\n","sub_path":"stack/stack_test.py","file_name":"stack_test.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"399586571","text":"from utils import listAverage\nhtml = '''\n\n\n \n \n

[firstname] [lastname]

\n \n

Moyenne: [average]

\n \n \n\n'''\n# source de données\nstudent1 = {'firstname':'Yassine', 'lastname':'El Khazraji', 'country':'Maroc', 'notes':[4,20,13]}\nstudent2 = {'firstname':'Samba', 'lastname':'Lek', 'country':'Guinée', 'notes':[19,19,0]}\nstudent3 = {'firstname':'Zak', 'lastname':'Abdel Francis', 'country':'France', 'notes':[5,3,20]}\nstudent4 = {'firstname':'Antonio', 'lastname':'Vivaldi', 'country':'Italie', 'notes':[16,12]}\nstudents = [student1, student2, student3, student4]\n\nfor student in students:\n # on génère un fichier à chaque passage\n filename = 'webpage_' + student['firstname'].lower() + '.html'\n f = open('./webpages/' + filename, 'w') # crée le fichier\n\n # remplacements\n htmlChanged = html.replace('[firstname]', student['firstname'])\n htmlChanged = htmlChanged.replace('[lastname]', student['lastname'])\n # calcul de la moyenne, arrondi, conversion en str\n average = str(round(listAverage(student['notes']), 2))\n htmlChanged = htmlChanged.replace('[average]', average)\n htmlChanged = htmlChanged.replace('[country]', student['country'].lower())\n\n f.write(htmlChanged) # écrit dans le fichier\n f.close()\n","sub_path":"generateWebpage.py","file_name":"generateWebpage.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348375907","text":"\nimport numpy as np\nimport cv2 as cv\nfrom scipy import signal\n\n\ndef ssd(image: np.array, template: np.array):\n \"\"\"\n Template matching using sum of square differences\n :param image:\n :param template:\n :return:\n \"\"\"\n\n t_height, t_width, _ = template.shape\n i_height, i_width, _ = image.shape\n\n min_ssd = np.Inf\n coords = None\n\n for i in range(i_height - t_height):\n for j in range(i_width - t_width):\n\n ssd_temp = np.linalg.norm(template - image[i:i+t_height, j:j+t_width])\n\n if ssd_temp < min_ssd:\n min_ssd = ssd_temp\n coords = (i, j)\n i, j = coords\n\n return image[i:i+t_height, j:j+t_width]\n\n\ndef ncc(image: np.array, template: np.array):\n \"\"\"\n Template matching normalized cross correlation\n :param image:\n :param template:\n :return:\n \"\"\"\n reserve = image.copy()\n\n t_height, t_width, _ = template.shape\n\n image = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n template = cv.cvtColor(template, cv.COLOR_RGB2GRAY)\n\n image = image.astype(np.float64)\n template = template.astype(np.float64)\n\n template = template - np.mean(template)\n\n template = template / np.sqrt(np.sum(template ** 2))\n\n s_filter = np.ones(template.shape)\n\n image2ed = image ** 2\n\n image_sum = signal.correlate2d(image, s_filter, 'same')\n image2ed_sum = signal.correlate2d(image2ed, s_filter, 'same')\n\n num = signal.correlate2d(image, template, 'same')\n den = np.sqrt(image2ed_sum - np.square(image_sum) / np.size(template))\n\n scores = np.where(den == 0, 0, num / den)\n\n i, j = np.unravel_index(scores.argmax(), scores.shape)\n\n i = i - (t_height // 2) + 1\n j = j - (t_width // 2) + 1\n\n return reserve[i:i + t_height, j:j + t_width]\n\n\ndef sad(image: np.array, template: np.array):\n \"\"\"\n\n :param image:\n :param template:\n :return:\n \"\"\"\n reserve = image.copy()\n\n t_height, t_width, _ = template.shape\n i_height, i_width, _ = image.shape\n\n scores = np.empty((i_height - t_height, i_width - t_width))\n\n for i in range(0, i_height - t_height):\n for j in range(0, i_width - t_width):\n\n d = np.abs(image[i:i+t_height, j:j+t_width] - template)\n\n scores[i, j] = d.sum()\n\n i, j = np.unravel_index(scores.argmin(), scores.shape)\n\n return reserve[i:i+t_height, j:j+t_width]\n","sub_path":"lucas_canade/template_matching.py","file_name":"template_matching.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"640746990","text":"#This python script will download \"tagesschau 20Uhr\" on http://www.tagesschau.de/\n# Format: for Mobile (H264)\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport re\n\ntagesschau = requests.get('http://www.tagesschau.de/sendung/tagesschau/index.html')\n\nt_mp4url = re.findall('Klein', str(bs(tagesschau.text, \"lxml\")))\n\ndownload_file(t_mp4url[0])\n\n#function from http://stackoverflow.com/questions/35842873/is-there-a-way-to-download-a-video-from-a-webpage-with-python\ndef download_file(url):\n local_filename = url.split('/')[-1]\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n return local_filename\n \n","sub_path":"tagesschau.py","file_name":"tagesschau.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"519372846","text":"#\r\n# [953] Reverse Only Letters\r\n#\r\n# https://leetcode.com/problems/reverse-only-letters/description/\r\n#\r\n# algorithms\r\n# Easy (58.01%)\r\n# Total Accepted: 6.4K\r\n# Total Submissions: 11.1K\r\n# Testcase Example: '\"ab-cd\"'\r\n#\r\n# Given a string S, return the \"reversed\" string where all characters that are\r\n# not a letter stay in the same place, and all letters reverse their\r\n# positions.\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# Example 1:\r\n# \r\n# \r\n# Input: \"ab-cd\"\r\n# Output: \"dc-ba\"\r\n# \r\n# \r\n# \r\n# Example 2:\r\n# \r\n# \r\n# Input: \"a-bC-dEf-ghIj\"\r\n# Output: \"j-Ih-gfE-dCba\"\r\n# \r\n# \r\n# \r\n# Example 3:\r\n# \r\n# \r\n# Input: \"Test1ng-Leet=code-Q!\"\r\n# Output: \"Qedo1ct-eeLg=ntse-T!\"\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# Note:\r\n# \r\n# \r\n# S.length <= 100\r\n# 33 <= S[i].ASCIIcode <= 122 \r\n# S doesn't contain \\ or \"\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n#\r\nclass Solution:\r\n def reverseOnlyLetters(self, S):\r\n \"\"\"\r\n :type S: str\r\n :rtype: str\r\n \"\"\"\r\n # strss = \"\"\r\n # chr_dic = dict()\r\n # for i in range(0,len(S)):\r\n # if (S[i] >= \"a\" and S[i] <= \"z\") or (S[i] >= \"A\" and S[i] <= \"Z\"):\r\n # strss = strss + S[i]\r\n # else:\r\n # chr_dic[i] = S[i]\r\n\r\n # rstrss = \"\"\r\n # i = len(strss) - 1\r\n # while i >= 0:\r\n # rstrss = rstrss + strss[i]\r\n # i = i - 1\r\n\r\n # otrss = \"\"\r\n # j = 0\r\n # for i in range(0,len(S)):\r\n # if i not in chr_dic:\r\n # otrss = otrss + rstrss[j]\r\n # j = j + 1\r\n # else:\r\n # otrss = otrss + chr_dic[i]\r\n\r\n # return otrss\r\n\r\n p = [i for i in S if i.isalpha()]\r\n return ''.join([i if not i.isalpha() else p.pop() for i in S])\r\n\r\nif __name__ == \"__main__\":\r\n print(Solution().reverseOnlyLetters(\"Test1ng-Leet=code-Q!\"))","sub_path":"leetcode/917.reverse-only-letters.python3.py","file_name":"917.reverse-only-letters.python3.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"267305679","text":"from django.contrib import admin\nfrom polls.models import Question\n\nclass QuestionAdmin(admin.ModelAdmin):\n #fields = ['pub_date', 'question_text'] # Gives a form with pub_date first and question_text next;\n fieldsets = [\n (None, {'fields': ['question_text']}),\n ('Date information', {'fields': ['pub_date'],'classes':['collapse']}),\n ]\n\n\nadmin.site.register(Question,QuestionAdmin)\n\n# Register your models here.\n","sub_path":"competitive/polls/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"103835115","text":"import random\nnames = []\n\nf = open('female_first_names.txt')\nfor line in f:\n line = line.rstrip('\\n')\n names.append(line)\nf.close()\n\nf = open('male_first_names.txt')\nfor line in f:\n line = line.rstrip('\\n')\n names.append(line)\nf.close()\n\nshuffled_names = random.shuffle(names)\nprint(names)\n\nf = open('first_names.txt', 'w')\nfor name in names:\n f.write(name + '\\n')\nf.close()\n\n\n \n","sub_path":"name_combine.py","file_name":"name_combine.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421193342","text":"\n\nfrom xai.brain.wordbase.nouns._sceptic import _SCEPTIC\n\n#calss header\nclass _SCEPTICS(_SCEPTIC, ):\n\tdef __init__(self,): \n\t\t_SCEPTIC.__init__(self)\n\t\tself.name = \"SCEPTICS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sceptic\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sceptics.py","file_name":"_sceptics.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"10594214","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_messages', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='purged_for_recipient',\n field=models.BooleanField(db_index=True, verbose_name='Purged for recipient', default=False),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='message',\n name='purged_for_sender',\n field=models.BooleanField(db_index=True, verbose_name='Purged for sender', default=False),\n preserve_default=True,\n ),\n ]\n","sub_path":"django_messages/migrations/0002_auto_20140926_1746.py","file_name":"0002_auto_20140926_1746.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"190072958","text":"import unittest\nimport sys\nimport numpy as np\nsys.path.append('..')\nfrom sensor.sensor import PermanentEquationLibraryPath, Sensor, SensorArray,_sensor_equation_compiler,TemporaryEquationLibraryPath, _get_conversion_libs,add_conversion_library, remove_conversion_library\nfrom scipy.optimize import newton\nimport os\n\nclass TestEquationParsing(unittest.TestCase):\n\n def test_explicit_equation1(self):\n equation1 = 'y = 7*x**2 + 1'\n function = _sensor_equation_compiler(equation1)\n self.assertEqual(function(2),7*2**2+1)\n\n def test_explicit_equation2(self):\n\n func = lambda x: x**2 + np.sqrt(x)\n equation2 = 'x**2 + np.sqrt(x) = y'\n function = _sensor_equation_compiler(equation2)\n for i in range(0,10):\n self.assertEqual(func(i),function(i))\n \n \"\"\"\n def test_implicit_equation1(self):\n\n equation1 = 'y = y**(1/2) + 14'\n func = lambda y: y**(1/2) - y + 14\n\n output = newton(func,0)\n function = _sensor_equation_compiler(equation1)\n print(function(np.zeros([1])))\n self.assertEqual(function(0),output)\n\n \"\"\"\n\nclass TestEquationLibraryManagement(unittest.TestCase):\n\n def test_reading_equation_from_default_library(self):\n\n equation = ''\n function = _sensor_equation_compiler(equation)\n self.assertEqual(function(1),1)\n\n\n def test_reading_equation_from_default_library_that_does_not_exit(self):\n\n equation = ''\n with self.assertRaises(FileNotFoundError):\n function = _sensor_equation_compiler(equation)\n\n\n def test_temporarily_adding_library_and_reading_equation(self):\n\n libpath = os.path.join(os.getcwd(),'input_files\\\\test_equation_library.py')\n equation = ''\n with TemporaryEquationLibraryPath(libpath):\n libs = _get_conversion_libs()\n self.assertIn(libpath,libs)\n function = _sensor_equation_compiler(equation)\n \n self.assertEqual(function(3),3)\n\n def test_permanent_adding_library_and_reading_equation(self):\n \n libpath = os.path.join(os.getcwd(),'input_files\\\\test_permanent_adding_library.py')\n equation = ''\n\n add_conversion_library(libpath)\n\n function = _sensor_equation_compiler(equation)\n libs = _get_conversion_libs()\n self.assertIn(libpath,libs)\n\n self.assertEqual(function(4),4)\n \n remove_conversion_library(libpath)\n libs = _get_conversion_libs()\n self.assertNotIn(libpath,libs)\n \n \nunittest.main()\n","sub_path":"tests/test_equation.py","file_name":"test_equation.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"640212522","text":"from unit.div import div\nimport pytest\n\n@pytest.mark.happy\n# happy是组名 自定义命名\n# pytest.mark.parametrize参数化\n@pytest.mark.parametrize(\"number1,number2,excepection\", {\n (10, 2, 5),\n (10000, 1, 10000),\n (10, 3, 3.3333333333333335),\n (10.0, 5, 2.0),\n (9, -1, -9),\n (0, 1, 0),\n})\ndef test_div_case01(number1, number2, excepection):\n assert div(number1, number2) == excepection\n\n\n@pytest.mark.parametrize('num1,num2,excepection', {\n (1, 0, ZeroDivisionError),\n ('a', 'b', TypeError)\n})\ndef test_div_case02(num1, num2, excepection):\n assert div(num1, num2) == excepection\n\ndef test_div_case03(num1,num2,excepection):\n with pytest.raises(excepection):\n div(num1,num2)","sub_path":"lesson_001/unit/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"368935560","text":"#! /usr/bin/env python3\n\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom lib_metrics_spaces import lp, plot\n\nti = time.time()\n\npath_plot = './plots/euclidean_spaces'\n\nN = 1_000 # number of points to sample\nnn = np.arange(1,201) # dimensionality\npp = [i/10. for i in range(1,11)]+[2, 3, 5, 10, 20] # metric's parameter\nD_mm = np.empty(nn.size) # array to store distances\n\n## Sampling points\n\nnp.random.seed(0)\nX = np.random.rand(N, nn.size)\n\nfor p in pp:\n\n for n in nn:\n\n d = lp(X=X[:, :n+1], p=p)\n D_mm[n-1] = np.max(d) - np.min(d)\n\n plot(x=nn, y=D_mm, fname=f'contrast_euclidean_p_{p}', path=path_plot,\n title='Distance behavior in the n-d Euclidean Space', metric='d(x,y)=L_p(x,y)', euclidean=True, p=p)\n\ntf = time.time()\n\nprint(f'Running time: {tf-ti:.2f} seconds')\n","sub_path":"euclidean_space.py","file_name":"euclidean_space.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"570620619","text":"from discord.ext import commands\nimport discord\nfrom discord.ext.commands import TextChannelConverter, RoleConverter\nfrom PIL import Image\nimport random\nimport itertools\nimport toml\n\n\ndef setup(bot):\n bot.add_cog(Utility(bot))\n\n\nclass Utility(commands.Cog):\n \"\"\"Commands for server / bot utility.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def help(self, ctx, args=\"\"):\n \"\"\"Help command.\n\n Syntax:\n `//help `\"\"\"\n cmds = [i.name for i in self.bot.commands]\n cmds_lower = [i.lower() for i in cmds]\n if args.lower() in cmds_lower:\n cmd = self.bot.get_command(cmds[cmds_lower.index(args.lower())])\n embed_var = discord.Embed(color=0xff0008, title=cmd.name, description=cmd.help + \"\\n\")\n embed_var.set_author(name=\"HAL-9000\", icon_url=\"https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fchurchm.ag%2Fwp-content%2Fuploads%2F2015%2F12%2FHAL9000_iconic_eye.png&f=1&nofb=1\")\n else:\n embed_var = discord.Embed(color=0xff0008, title=\"Help Menu\", description=\"HAL-9000 is a multipurpose discord bot made by vi#7402. This is a list of commands. Do `//help ` for information on a command.\")\n embed_var.set_author(name=\"HAL-9000\", icon_url=\"https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fchurchm.ag%2Fwp-content%2Fuploads%2F2015%2F12%2FHAL9000_iconic_eye.png&f=1&nofb=1\")\n for cog in self.bot.cogs.values():\n embed_var.add_field(name=cog.qualified_name, value=\"`\"+\"`, `\".join([i.name for i in cog.get_commands()])+\"`\", inline=False)\n await ctx.send(embed=embed_var)\n\n @commands.has_permissions(administrator=True)\n @commands.command()\n async def config(self, ctx, *args):\n \"\"\"Configures the local server settings of HAL.\n Requires administrator.\n\n Subcommands:\n `logchannel `: Sets the channel for HAL to send mod log messages in (HAL Errors, message deletions)\n `systemchannel `: Sets the channel for HAL to send member joins/leaves in.\n `colorposition `: Sets the role list position of new color roles.\n `read`: Sends the config settings.\n\n Syntax:\n `//config `\"\"\"\n try:\n globalconfig = toml.loads(open(\"config.toml\", \"rt\").read())\n except KeyError:\n globalconfig = {}\n try:\n config = globalconfig[str(ctx.guild.id)]\n except KeyError:\n config = {}\n if args[0] == \"systemchannel\":\n systemchannel = await TextChannelConverter().convert(ctx, args[1])\n config.update({\"systemchannel\": systemchannel.id})\n elif args[0] == \"logchannel\":\n logchannel = await TextChannelConverter().convert(ctx, args[1])\n config.update({\"logchannel\": logchannel.id})\n elif args[0] == \"colorposition\":\n position = (len(ctx.guild.roles) - int(args[1])) + 1\n config.update({\"colorposition\": position})\n elif args[0] == \"read\":\n await ctx.send(\"Config file:\\n\" + toml.dumps(config))\n else:\n await ctx.send(\"Please select a valid configuration option.\")\n globalconfig.update({str(ctx.guild.id): config})\n open(\"config.toml\", \"w\").write(toml.dumps(globalconfig))\n await ctx.send(\"Configuration set.\")\n\n @commands.command()\n async def ping(self, ctx):\n \"\"\"Pings the bot and displays the time in milliseconds between your message being sent and the ping message\n being sent.\n Ignores arguments. \"\"\"\n m = await ctx.send(\"Pong?\")\n latency = m.created_at - ctx.message.created_at\n await m.edit(content=f\"Pong in {int(latency.microseconds / 1000)} ms! :ping_pong:\")\n\n @commands.command()\n async def invite(self, ctx):\n \"\"\"Sends an oath2 link for HAL.\n Ignores arguments.\"\"\"\n await ctx.send(\"https://discord.com/api/oauth2/authorize?client_id=717042126776434728&permissions=8&scope=bot\")\n\n @commands.command()\n async def repo(self, ctx):\n \"\"\"Sends the link to the GitHub repo for HAL.\n Ignores arguments.\"\"\"\n await ctx.send(\"https://github.com/Paradigmmmm/HAL-9000\")\n\n @commands.command()\n async def color(self, ctx, *args):\n \"\"\"Commands relating to the color system.\n\n Subcommands:\n `list`: Lists all color names.\n `set `: Sets your color to the specified color.\n `preview `: Sends an image containing the color of the role.\n `add `: Adds a color with the specified hex codes (Requires manage roles).\n `delete `: Deletes a color (Requires manage roles).\n `addexisting `: Adds an existing role to the color list. (Requires manage roles)\n\n Syntax:\n `//color `\"\"\"\n subcmd = args[0]\n args = args[1:]\n config = toml.loads(open(\"config.toml\", \"rt\").read())\n if subcmd == \"add\":\n if not ctx.author.guild_permissions.manage_roles:\n await ctx.send(\"Invalid permissions.\")\n return\n color = args[0]\n if \"#\" in color:\n color = color.replace(\"#\", \"\")\n try:\n color = discord.Colour(int(color, 16))\n except ValueError:\n await ctx.send(\"Invalid color.\")\n return\n name = ' '.join(args[1:])\n try:\n colorposition = config[\"colorposition\"]\n except KeyError:\n await ctx.send(\"You haven't set up a position to move colors to in this server yet. Do //config \"\n \"colorposition to set up a position. For now I've created the role at the bottom of \"\n \"the list.\")\n colorposition = 1\n try:\n colorrole = await ctx.guild.create_role(name=name, colour=color, reason=\"Automated colour addition.\")\n await ctx.guild.edit_role_positions({colorrole: colorposition})\n await ctx.send(\"Color created.\")\n except discord.Forbidden:\n await ctx.send(\"HAL-9000 does not have the manage roles permission.\")\n return\n except discord.InvalidArgument:\n await ctx.send(\"Invalid args.\")\n return\n except discord.HTTPException:\n await ctx.send(\"An unexpected exception occurred. Try again later.\")\n return\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n colors.append(colorrole.id)\n config.update({\"colors\": colors})\n open(\"config.toml\", \"w\").write(toml.dumps(config))\n elif subcmd == \"list\":\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n colorroles = []\n for x in colors:\n color = ctx.guild.get_role(x)\n print(type(color))\n if type(color) != discord.role.Role:\n colors.remove(x)\n else:\n colorroles.append(color)\n text = \"\"\n for x in colorroles:\n text = text + \"\\n**\" + str(colorroles.index(x) + 1) + \":** \" + x.name\n await ctx.send(text + \"\\n\\n *Do* `//color preview ` *for a preview of the color!*\")\n config.update({\"colors\": colors})\n open(\"config.toml\", \"w\").write(toml.dumps(config))\n elif subcmd == \"delete\":\n if not ctx.author.guild_permissions.manage_roles:\n raise discord.ext.commands.MissingPermissions\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n answer = ' '.join(args)\n try:\n answer = int(answer) - 1\n answer = colors[answer]\n role = await RoleConverter().convert(ctx, str(answer))\n colors.remove(answer)\n await role.delete()\n except ValueError:\n try:\n answer = await RoleConverter().convert(ctx, answer)\n await answer.delete()\n except commands.errors.BadArgument:\n await ctx.send(\"Invalid color.\")\n return\n if answer.id not in colors:\n await ctx.send(\"Invalid color in config list.\")\n return\n print(colors)\n print(answer.id)\n colors.remove(answer.id)\n except IndexError:\n await ctx.send(\"There is no color at that position.\")\n return\n await ctx.send(\"Color deleted successfully.\")\n config.update({\"colors\": colors})\n open(\"config.toml\", \"w\").write(toml.dumps(config))\n elif subcmd == \"forcedelete\":\n if not ctx.author.guild_permissions.manage_roles:\n raise discord.ext.commands.MissingPermissions\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n answer = args[0]\n try:\n colors.pop(int(answer))\n except ValueError:\n await ctx.sednd(\"Please enter an integer.\")\n return\n await ctx.send(\"Color removed.\")\n config.update({\"colors\": colors})\n open(\"config.toml\", \"w\").write(toml.dumps(config))\n elif subcmd == \"addexisting\":\n if not ctx.author.guild_permissions.manage_roles:\n raise discord.ext.commands.MissingPermissions\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n answer = ' '.join(args)\n try:\n colorrole = await RoleConverter().convert(ctx, answer)\n except commands.errors.BadArgument:\n await ctx.send(\"Invalid role.\")\n return\n colors.append(colorrole.id)\n config.update({\"colors\": colors})\n open(\"config.toml\", \"w\").write(toml.dumps(config))\n await ctx.send(\"Done!\")\n elif subcmd == \"preview\":\n color = ' '.join(args)\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n colorc = []\n for x in colors:\n x = await RoleConverter().convert(ctx, str(x))\n x = str(x.name)\n colorc.append(x)\n if color in colorc:\n color = await RoleConverter().convert(ctx, str(colors[colorc.index(color)]))\n color = str(color.color)\n if \"#\" in color:\n color = color.replace(\"#\", \"\")\n try:\n print(color)\n image = Image.new('RGB', (256, 256), color=tuple(int(color[i:i + 2], 16) for i in (0, 2, 4)))\n except ValueError:\n await ctx.send(\"Invalid color name/hex color.\")\n return\n image.save(\"previewimg.png\")\n await ctx.send(\"Here is the color preview:\", file=discord.File(open(\"previewimg.png\", \"rb\")))\n elif subcmd == \"set\":\n args = ' '.join(args)\n try:\n colors = config[\"colors\"]\n except KeyError:\n colors = []\n colorsthing = []\n colorsl = []\n for colorrrr in colors:\n colorsthing.append((ctx.guild.get_role(colorrrr)).name)\n colorsl.append((ctx.guild.get_role(colorrrr)).name.lower())\n if args.lower() in colorsl:\n try:\n colorrole = await RoleConverter().convert(ctx, colorsthing[colorsl.index(args.lower())])\n except ValueError:\n colorrole = await RoleConverter().convert(ctx, args)\n else:\n await ctx.send(\"That is not a valid color.\")\n return\n if colorrole.id not in colors:\n await ctx.send(\"That is not a valid color.\")\n return\n for x in ctx.author.roles:\n if x.id in colors:\n await ctx.author.remove_roles(x)\n await ctx.author.add_roles(colorrole)\n await ctx.send(\"Color set to \" + colorrole.name + \"!\")\n\n @commands.command()\n async def coinflip(self, ctx):\n \"\"\"Flips a coin.\n Ignores arguments.\"\"\"\n await ctx.send(random.choice([\"Heads!\"] * 50 + [\"Tails!\"] * 50 + [\"The coin landed on the side!!\"]))\n\n @commands.command()\n async def roll(self, ctx, args):\n \"\"\"Rolls dice of any quantity and size.\n\n Syntax:\n `//roll d`\"\"\"\n total = 0\n crits = 0\n critf = 0\n for die in args.split():\n if 'd' not in die:\n total += int(die)\n continue\n num, sides = die.split('d')\n if num == '':\n num = '1'\n num = int(num)\n sides = int(sides)\n if num >= 100:\n await ctx.send(\"That number is too large. To prevent crashes, I have exited the command.\")\n return\n rolls = [random.randint(1, sides) for _ in itertools.repeat(None, num)]\n if sides == '20':\n crits += rolls.count(20)\n critf += rolls.count(1)\n total += sum(rolls)\n await ctx.send(\n \"Result: \" + str(total) + \"\\n***CRITICAL SUCCESS!***\" * crits + \"\\n***CRITICAL FAILURE!***\" * critf)\n\n @commands.command()\n async def choose(self, ctx, *args):\n \"\"\"Chooses between multiple things if you can't decide yourself.\n\n Syntax:\n `//choose ...`\"\"\"\n await ctx.send(random.choice(args), allowed_mentions=discord.AllowedMentions(everyone=False, users=False,\n roles=False))\n","sub_path":"cogs/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":14264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"234965815","text":"import sys\n\ninput = sys.stdin.readline\n\ndef cut(length):\n cutted = 0\n for cable in cables:\n cutted += cable//length\n if cutted>=N: return True\n return False\n\nK, N = map(int, input().rstrip().split())\ncables = [int(input().rstrip()) for _ in range(K)]\nresult = 1\nstart, end = result, sum(cables)//N\nwhile start<=end:\n mid = (start+end) // 2\n if cut(mid):\n start = mid+1\n result = max(result, mid)\n else:\n end = mid-1\nprint(result)\n\n# https://www.acmicpc.net/problem/1654","sub_path":"알고리즘/[템플릿]/이분탐색/랜선 자르기.py","file_name":"랜선 자르기.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"307511818","text":"print('Please think of a number between 0 and 100!')\r\n\r\nstartp = 0\r\nendp = 100\r\nguess = 50\r\nip = ''\r\nwhile(guess != ip):\r\n print('Is your secret number ' + str(guess) + '?')\r\n print('Enter \\'h\\' to indicate the guess is too high.'),\r\n print('Enter \\'l\\' to indicate the guess is too low.'),\r\n print('Enter \\'c\\' to indicate I guessed correctly.'),\r\n ip = str(raw_input())\r\n if ip == 'h':\r\n endp = guess\r\n guess = (startp + endp) / 2\r\n elif ip == 'l':\r\n startp = guess\r\n guess = (startp + endp) / 2\r\n elif ip == 'c':\r\n print('Game over. Your secret number was: ' + str(guess))\r\n break\r\n else:\r\n print('Sorry, I did not understand your input.')","sub_path":"Python/6.00.1x/magicnumfind.py","file_name":"magicnumfind.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"563952847","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/5/27\n# @Author : Cary\n# @Function : 使用k-近邻算法改进约会网站配对效果\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport operator\n\n\ndef classify0(in_x, data_set, labels, k):\n \"\"\"\n k-近邻算法\n :param in_x: 用于分类的输入向量X\n :param data_set: 输入的训练样本集data_set\n :param labels: 标签向量,其元素数目与矩阵data_set的行数相同\n :param k: 选择最近邻居的数目\n :return: 发生频率最高的元素标签\n \"\"\"\n dataset_size = data_set.shape[0]\n\n # 原型:numpy.tile(A,reps)\n # tile共有2个参数,A指待输入数组,reps则决定A重复的次数。整个函数用于重复数组A来构建新的数组。\n # 计算距离,欧式距离公式:sqrt(pow(xA0-xB0, 2) + pow(xA1-xB1, 2))\n diff_mat = np.tile(in_x, (dataset_size, 1)) - data_set\n sq_diff_mat = diff_mat ** 2\n sq_distances = sq_diff_mat.sum(axis=1)\n\n distances = sq_distances ** 0.5\n # numpy.argsort() 返回排好序的序列的索引\n sorted_dist_indicies = distances.argsort()\n\n class_count = {}\n # 选择距离最小的k个节点\n for i in range(k):\n vote_I_label = labels[sorted_dist_indicies[i]]\n class_count[vote_I_label] = class_count.get(vote_I_label, 0) + 1\n\n sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)\n return sorted_class_count[0][0]\n\n\ndef file2matrix(filename):\n \"\"\"\n 将文本记录转换为NumPy解析程序\n :param filename: 文件名称\n :return: 特征矩阵、样本分类向量\n \"\"\"\n fr = open(filename)\n array_of_lines = fr.readlines()\n number_of_lines = len(array_of_lines)\n matrix_of_return = np.zeros((number_of_lines, 3))\n class_of_label_vector = []\n index = 0\n for line in array_of_lines:\n line = line.strip()\n list_of_line = line.split('\\t')\n matrix_of_return[index, :] = list_of_line[0:3]\n\n # 列表中最后一项存入class_of_label_vector中\n class_of_label_vector.append(int(list_of_line[-1]))\n index += 1\n return matrix_of_return, class_of_label_vector\n\n\ndef draw_scatter(matrix_of_dating, labels_of_dating, xlabel, ylabel):\n \"\"\"\n 绘制散点图\n \"\"\"\n plt.rcParams['font.sans-serif'] = ['SimHei']\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x=matrix_of_dating[:, 1], y=matrix_of_dating[:, 2],\n s=15.0*np.array(labels_of_dating), c=15.0*np.array(labels_of_dating))\n ax.set_xlim([0.0, 25.0])\n ax.set_ylim([0.0, 2.0])\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.show()\n\n\ndef auto_norm(data_set):\n \"\"\"\n 归一化特征值\n 此数值归一化公式:new_value = (old_value-min)/(max-min)\n :param data_set: 数据集\n :return:\n \"\"\"\n min_vals = data_set.min(0) # 每一列中的最小值,data_set.min(0) 其中参数0可以使函数从列中选出最小值\n max_vals = data_set.max(0)\n ranges = max_vals - min_vals\n norm_data_set = np.zeros(np.shape(data_set))\n m = data_set.shape[0]\n norm_data_set = data_set - np.tile(min_vals, (m, 1))\n norm_data_set = norm_data_set / np.tile(ranges, (m, 1))\n return norm_data_set, ranges, min_vals\n\n\ndef dating_class_test(filename, matrix_of_dating, labels_of_dating):\n \"\"\"\n 分类器针对约会网站的测试代码\n 错误率计算公式:分类器给出的错误结果的次数/测试数据的总数\n :param filename: 文件位置\n :return: None\n \"\"\"\n ratio_of_ho = 0.10 # 10%的测试数据,90%的训练数据\n matrix_of_norm, ranges, min_vals = auto_norm(matrix_of_dating)\n m = matrix_of_norm.shape[0]\n num_of_test_vecs = int(m*ratio_of_ho)\n error_counts = 0.0\n for i in range(num_of_test_vecs):\n classifier_result = classify0(matrix_of_norm[i, :], matrix_of_norm[num_of_test_vecs:m, :],\n labels_of_dating[num_of_test_vecs:m], 3)\n print(\"the classifier came back with: %d, the real answer is: %d\"\n % (classifier_result, labels_of_dating[i]))\n if classifier_result != labels_of_dating[i]:\n error_counts += 1.0\n print('the total error rate is %f' % (error_counts/float(num_of_test_vecs)))\n\n\ndef classify_person(matrix_of_dating, labels_of_dating):\n \"\"\"\n 约会网站预测函数\n \"\"\"\n print(\"Begin to predict.\")\n # 不喜欢的人、魅力一般的人、极具魅力的人\n result_list = ['not at all', 'in small doses', 'in large doses']\n percent_tats = float(input(\"percentage of time spent playing video games?\\n\")) # 玩视频游戏所花时间百分比\n ff_miles = float(input(\"frequent filer miles earned per year?\\n\")) # 每年获得的飞行常客里程数\n ice_creams = float(input(\"liters of ice cream consumed per year?\\n\"))\n matrix_of_dating, ranges, min_vals = auto_norm(data_set=matrix_of_dating)\n in_arr = np.array([ff_miles, percent_tats, ice_creams])\n classifier_result = classify0((in_arr-min_vals)/ranges, matrix_of_dating, labels_of_dating, 3)\n print(\"You will probably like this person: \", result_list[classifier_result-1])\n\n\nif __name__ == '__main__':\n # 可视化分析数据\n filename = 'datingTestSet2.txt'\n matrix_of_dating, labels_of_dating = file2matrix(filename)\n draw_scatter(matrix_of_dating, labels_of_dating,\n '玩视频游戏所耗时间百分比', '每周消耗的冰淇淋公升数')\n dating_class_test(filename, matrix_of_dating, labels_of_dating)\n classify_person(matrix_of_dating, labels_of_dating)\n","sub_path":"Machine Learning in Achtion/ch02/analysis_dating_website.py","file_name":"analysis_dating_website.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"195579392","text":"'''CS1 Final Honor Roll Problem 1: Better GUI\n Author: Bianca Yang'''\n\nimport wx \nfrom wx.lib.buttons import GenButton\n\nclass Connect4(wx.Frame):\n '''Connect4 game GUI.'''\n\n def __init__(self, *args):\n super(Connect4, self).__init__(*args, style=wx.DEFAULT_FRAME_STYLE & \\\n ~(wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))\n\n screen_width, screen_height = wx.GetDisplaySize()\n window_height = (3.0 / 4) * screen_height \n window_width = 1.6 * window_height \n title = 'Connect4'\n size = (window_width, window_height)\n \n self.initUI(size, title)\n\n def initUI(self, size, title): \n '''Displays the main window and manages events.'''\n\n self.SetSize(size)\n self.SetTitle(title)\n self.Centre()\n\n icon = wx.Icon('Blue.ico', wx.BITMAP_TYPE_ICO)\n self.SetIcon(icon)\n\n menuBar = wx.MenuBar()\n Connect4_Menu = wx.Menu()\n CAbout = Connect4_Menu.Append(102, '&About Connect4')\n CQuit = Connect4_Menu.Append(103, '&Quit Connect4\\tCtrl+Q')\n menuBar.Append(Connect4_Menu, 'Connect4')\n\n file_Menu = wx.Menu()\n fMainClose = file_Menu.Append(107, '&Close Window\\tCtrl+W')\n menuBar.Append(file_Menu, 'File')\n \n self.SetMenuBar(menuBar)\n self.Bind(wx.EVT_MENU, self.onAbout, CAbout)\n self.Bind(wx.EVT_MENU, self.onQuit, fMainClose)\n self.Bind(wx.EVT_MENU, self.onQuit, CQuit)\n self.Bind(wx.EVT_CLOSE, self.onQuit)\n\n vBox = wx.BoxSizer(wx.VERTICAL)\n hBox = wx.BoxSizer(wx.HORIZONTAL)\n\n windowWidth = self.GetClientSize()[0] \n windowHeight = self.GetClientSize()[1]\n\n newGameButton = homeButton(self, 113, 'New Game')\n helpButton = homeButton(self, 114, 'Help')\n creditsButton = homeButton(self, 115, 'Credits')\n quitButton = homeButton(self, 116, 'Quit Game')\n\n \n\n newGameButton.SetBackgroundColour('GREEN')\n helpButton.SetBackgroundColour('ORANGE')\n creditsButton.SetBackgroundColour('MEDIUM TURQUOISE')\n quitButton.SetBackgroundColour('SALMON')\n\n vBox.AddSpacer((windowWidth, .4 * windowHeight))\n\n hBox.AddSpacer((windowWidth * .19, windowHeight * .6))\n vBox2 = wx.BoxSizer(wx.VERTICAL)\n vBox2.Add(newGameButton, proportion=0)\n vBox2.Add(helpButton, proportion=0)\n vBox2.Add(creditsButton, proportion=0)\n vBox2.Add(quitButton, proportion=0)\n\n hBox.Add(vBox2)\n vBox.Add(hBox)\n\n self.SetSizer(vBox)\n\n quitButton.Bind(wx.EVT_BUTTON, self.onQuit) \n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.homeBackground)\n \n self.Show()\n\n def homeBackground(self, event):\n '''Loads Connect4 homepage background image.\n Also calls method to load control buttons.'''\n \n dc = event.GetDC()\n\n if not dc:\n dc = wx.ClientDC(self)\n rect = self.GetUpdateRegion().GetBox()\n dc.SetClippingRect(rect)\n\n dc.Clear()\n bmp = wx.Bitmap('Homepage.png')\n bmp = scaleBitmap(bmp, self.GetClientSize()[0], \\\n self.GetClientSize()[1])\n dc.DrawBitmap(bmp, 0, 0)\n\n #def gameScreen(self, event):\n # '''Takes user to the game screen.'''\n\n #def creditScreen(self, event):\n # '''Takes user to the credits screen.'''\n \n #def helpScreen(self, event):\n # '''Takes user to the help screen.'''\n\n def onAbout(self, event):\n '''Displays a custom about box.'''\n \n aboutFrame = aboutBox(None, 103, 'About Connect4', size=(300, 230),\\\n style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | \n wx.RESIZE_BOX | wx.MAXIMIZE_BOX))\n\n aboutFrame.Centre()\n aboutFrame.Show()\n\n def onQuit(self, event):\n '''Quits if yes button is clicked. Closes popup if no button \n is clicked.'''\n\n dlg = quitBox(None, 109, 'Quit', size=(420, 162),\\\n style=wx.DEFAULT_DIALOG_STYLE | wx.STAY_ON_TOP\\\n & ~wx.CLOSE_BOX)\n result = dlg.ShowModal()\n if result == 1:\n self.Destroy()\n wx.GetApp().ExitMainLoop()\n\ndef scaleBitmap(bitmap, width, height):\n '''Scales bitmaps to the desired width and height.'''\n\n img = wx.ImageFromBitmap(bitmap)\n img = img.Scale(width, height, wx.IMAGE_QUALITY_HIGH)\n result = wx.BitmapFromImage(img)\n return result\n\nclass aboutBox(wx.Dialog):\n '''Creates a custom about window.'''\n\n def __init__(self, *args, **kwargs):\n super(aboutBox, self).__init__(*args, **kwargs) \n \n aboutFont = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, \\\n wx.FONTWEIGHT_NORMAL, False, u'Helvetica')\n\n frameBox = wx.BoxSizer(wx.HORIZONTAL)\n vBox = wx.BoxSizer(wx.VERTICAL)\n\n bitmap = wx.Image('pieces.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n bitmap = scaleBitmap(bitmap, 80, 80)\n img = wx.StaticBitmap(self, 104, bitmap)\n \n aboutText = wx.StaticText(self, 105, 'This Connect4 game was ' \n 'made by Bianca Yang.') \n\n contactText = wx.StaticText(self, 106, 'Contact: '\n 'ipacifics@gmail.com')\n\n aboutText.SetFont(aboutFont)\n contactText.SetFont(aboutFont)\n\n vBox.AddSpacer((300, 30))\n vBox.Add(img, 1, wx.ALIGN_CENTER)\n vBox.AddSpacer((300, 30))\n vBox.Add(aboutText, 1, wx.ALIGN_CENTER)\n vBox.AddSpacer((300, 20))\n vBox.Add(contactText, 1, wx.ALIGN_CENTER)\n self.SetSizer(vBox)\n\nclass homeButton(GenButton):\n def __init__(self, *args, **kwargs):\n super(homeButton, self).__init__(*args, **kwargs)\n\n\nclass quitBox(wx.Dialog):\n '''Custom quit application dialog box.'''\n\n def __init__(self, *args, **kwargs):\n super(quitBox, self).__init__(*args, **kwargs)\n\n boldQuit = wx.Font(13, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL,\\\n wx.FONTWEIGHT_BOLD, False, u'Helvetica')\n dialogFont = wx.Font(11, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, \\\n wx.FONTWEIGHT_NORMAL, False, u'Helvetica')\n\n bitmap = wx.Image('Red.png', wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n bitmap = scaleBitmap(bitmap, 50, 50)\n img = wx.StaticBitmap(self, 110, bitmap)\n\n quitTitle = wx.StaticText(self, 111, 'Leaving so soon?')\n quitTitle.SetFont(boldQuit)\n\n quitDialog = wx.StaticText(self, 112, 'Are you done '\n 'playing the game?')\n quitDialog.SetFont(dialogFont)\n\n yes_button = wx.Button(self, wx.ID_YES)\n yes_button.id = 1 \n no_button = wx.Button(self, wx.ID_NO)\n no_button.id = 2\n\n vBox = wx.BoxSizer(wx.VERTICAL)\n vBox.AddSpacer((350, 20))\n\n hBox = wx.BoxSizer(wx.HORIZONTAL)\n hBox.AddSpacer((30, 210))\n hBox.Add(img, proportion=0) \n hBox.AddSpacer((40, 210)) \n\n vBox2 = wx.BoxSizer(wx.VERTICAL)\n vBox2.Add(quitTitle, proportion=0)\n vBox2.AddSpacer((130, 8)) \n vBox2.Add(quitDialog, proportion=0)\n vBox2.AddSpacer((130, 30))\n \n hBox2 = wx.BoxSizer(wx.HORIZONTAL)\n hBox2.AddSpacer((108, 30))\n hBox2.Add(yes_button, proportion=0, flag=wx.ALIGN_BOTTOM)\n hBox2.AddSpacer((12, 10))\n hBox2.Add(no_button, proportion=0, flag=wx.ALIGN_BOTTOM)\n vBox2.Add(hBox2)\n\n hBox.Add(vBox2)\n vBox.Add(hBox)\n\n self.SetSizer(vBox)\n\n yes_button.Bind(wx.EVT_BUTTON, self.onClose)\n no_button.Bind(wx.EVT_BUTTON, self.onClose)\n\n no_button.SetDefault()\n \n def onClose(self, event):\n '''Custom event handling for yes and no buttons.'''\n\n if event.GetEventObject().id == 1:\n self.EndModal(event.GetEventObject().id)\n else:\n self.EndModal(event.GetEventObject().id)\n\ndef main():\n app = wx.App()\n Connect4(None)\n app.MainLoop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"104995739","text":"import numpy as np\nimport fidimag.extensions.baryakhtar_clib as clib\nfrom fidimag.micro.llg import LLG\nfrom .relax import Relaxation\nfrom .relax import Laplace\n\n\nclass LLBarFull(LLG):\n\n def __init__(self, mesh, chi=1e-3, name='unnamed'):\n\n self.chi = chi\n super(LLBarFull, self).__init__(mesh, name=name)\n self.lap = Laplace(mesh)\n self.add(Relaxation(chi))\n\n self.beta = 0\n\n def sundials_rhs(self, t, y, ydot):\n\n self.t = t\n\n # already synchronized when call this funciton\n # self.spin[:]=y[:]\n\n self.compute_effective_field(t)\n delta_h = self.lap.compute_laplace_field(self.field, self._Ms)\n\n clib.compute_llg_rhs_baryakhtar(ydot,\n self.spin,\n self.field,\n delta_h,\n self.alpha,\n self.beta,\n self._pins,\n self.gamma,\n self.n,\n self.do_precession)\n\n #ydot[:] = self.dm_dt[:]\n\n return 0\n\n\nclass LLBar(LLG):\n\n def __init__(self, mesh, name='unnamed'):\n\n super(LLBar, self).__init__(mesh, name=name)\n self.lap = Laplace(mesh)\n\n self.field_perp = np.zeros(3 * self.n, dtype=np.float)\n\n self.beta = 0\n\n def sundials_rhs(self, t, y, ydot):\n\n self.t = t\n\n # already synchronized when call this funciton\n # self.spin[:]=y[:]\n\n self.compute_effective_field(t)\n clib.compute_perp_field(\n self.spin, self.field, self.field_perp, self.n)\n delta_h = self.lap.compute_laplace_field(self.field_perp, self._Ms)\n\n clib.compute_llg_rhs_baryakhtar_reduced(ydot,\n self.spin,\n self.field,\n delta_h,\n self.alpha,\n self.beta,\n self._pins,\n self.gamma,\n self.n,\n self.do_precession,\n self.default_c)\n\n #ydot[:] = self.dm_dt[:]\n\n return 0\n","sub_path":"fidimag/micro/baryakhtar.py","file_name":"baryakhtar.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"601506514","text":"import numpy as np\nimport scipy.sparse as sp\n\nfrom numpy.testing import assert_array_equal\n\nfrom spira.completion import DictMF\n\nfrom testing import assert_array_almost_equal\nfrom testing import assert_almost_equal\n\n# from spira.impl.dict_fact import _sample\n\n\ndef test_matrix_fact_cd():\n # Generate some toy data.\n rng = np.random.RandomState(0)\n U = rng.rand(50, 3)\n V = rng.rand(3, 20)\n X = np.dot(U, V)\n\n mf = DictMF(n_components=3, n_epochs=3, alpha=1e-3, random_state=0,\n verbose=0, normalize=False)\n\n mf.fit(X)\n\n Y = np.dot(mf.P_.T, mf.Q_)\n Y2 = mf.predict(X).toarray()\n\n assert_array_almost_equal(Y, Y2)\n\n rmse = np.sqrt(np.mean((X - Y) ** 2))\n rmse2 = mf.score(X)\n\n assert_almost_equal(rmse, rmse2)\n\ndef test_dict_fact_normalize():\n # Generate some toy data.\n rng = np.random.RandomState(0)\n U = rng.rand(50, 3)\n V = rng.rand(3, 20)\n X = np.dot(U, V)\n\n mf = DictMF(n_components=3, n_epochs=1, alpha=1e-3, random_state=0,\n verbose=0, normalize=True)\n\n mf.fit(X)\n\n Y = np.dot(mf.P_.T, mf.Q_)\n Y += mf.col_mean_[np.newaxis, :]\n Y += mf.row_mean_[:, np.newaxis]\n Y2 = mf.predict(X).toarray()\n\n assert_array_almost_equal(Y, Y2)\n\n rmse = np.sqrt(np.mean((X - Y) ** 2))\n rmse2 = mf.score(X)\n\n assert_almost_equal(rmse, rmse2)\n\n\n# def test_sample():\n# data = np.ones(5)\n# row = np.arange(5)\n# col = np.arange(0, 10, 2)\n# X = sp.coo_matrix((data, (row, col)), shape=(5, 10))\n# X = sp.csr_matrix(X)\n# y, idx, count = _sample(X.data, X.indices, X.indptr, 10, np.array([0, 1]))\n# assert_array_equal(y, np.array([[1., 0.], [0., 1.]]))\n# assert_array_equal(idx, np.array([0, 2]))\n# assert_array_equal(count, np.array([1, 1]))\n","sub_path":"spira/impl/tests/test_dict_fact.py","file_name":"test_dict_fact.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"227267094","text":"# -*- coding: utf-8 -*-\n\n\nimport mne\nimport numpy as np\n\nclass TimeFrequency(object):\n \n def __init__(self, data):\n \"\"\"\n Creates a 'TimeFrequency' class that applies TF decomposition procedures on the data via its sub-functions.\n Functions applied on a given class instance will adjust in-place\n Args:\n data: the result of a time frequency decomp via complex Morlet convolution \n \"\"\" \n self.MNE_struct_trials = data\n self.complex = self.MNE_struct_trials._data\n \n def extractPower(self): \n \"\"\"\n Computes power from the result of the complex Morlet convolution.\n Power = abs(complex) ** 2 \n = complex .* conj(complex)\n Keeps each trial/epoch separate.\n \"\"\"\n \n self.TFR_power_trials = np.multiply(self.complex,np.conj(self.complex)).real\n return\n \n def extractPhase(self): \n \"\"\"\n Computes phase from the result of the complex Morlet convolution.\n Phase = angle(complex)\n Keeps each trial/epoch separate.\n \"\"\"\n self.TFR_phase_trials = np.angle(self.complex)\n return\n \n \n def baselineNorm(self, baseline_tmin, baseline_tmax): \n \"\"\"\n Baseline normalises the input, not in-place\n Args: \n baseline_tmin: The start of baseline period, relative to the event lock (epoch t = 0)\n baseline_tmax: The end of baseline period, relative to the event lock (epoch t = 0)\n \"\"\" \n # Define baseline windowing parameters\n baseline_tmin_idx = (np.abs(self.MNE_struct_trials.times - baseline_tmin)).argmin()\n baseline_tmax_idx = (np.abs(self.MNE_struct_trials.times - baseline_tmax)).argmin()\n # Define the basline period itself for power and phase\n baseline_freqMeans_power = np.mean(self.TFR_power_trials[:,:,:,baseline_tmin_idx:baseline_tmax_idx],axis=3)\n baseline_freqMeans_phase = np.mean(self.TFR_phase_trials[:,:,:,baseline_tmin_idx:baseline_tmax_idx],axis=3)\n # Baseline normalisation for phase and power\n self.TFR_power_trials_BN = (10*np.log10(np.divide(self.TFR_power_trials, baseline_freqMeans_power[:,:,:,None])))\n self.TFR_phase_trials_BN = (10*np.log10(np.divide(self.TFR_phase_trials, baseline_freqMeans_phase[:,:,:,None])))\n return \n \n \ndef create_MNE_struct(mne_data): \n \"\"\"\n Copies the EpochTFR MNE data shell and strips out the data \n \"\"\"\n # Create a copy of the input MNE EpochTFR dict for multi- and single-trial data\n MNE_struct_trials = mne_data.copy()\n MNE_struct_single = mne_data.average().copy()\n # Clear out the complex values to create a MNE dict shell for multi-trial data\n MNE_struct_trials._data = np.zeros(MNE_struct_trials._data.shape)\n # Clear out the complex values to create a MNE dict shell for single-trial data\n MNE_struct_single._data = np.zeros(MNE_struct_single._data.shape)\n return(MNE_struct_single, MNE_struct_trials)","sub_path":"functions/tf_functions.py","file_name":"tf_functions.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"274823521","text":"\n\nimport requests\n\n\nres = requests.post(\n url=\"http://47.94.172.250:33334/api/v1/order/confirm/\",\n headers={\n \"Authorization\": \"Bearer fzC7KMsCiQuZiPifr7og65woqwvKdU\"\n },\n json={\n \"amount\": 599,\n \"productList\": [\n {\n \"courseId\": 18,\n \"validPeriodId\": 180,\n \"price\": 599,\n \"courseUrl\": ''\n },\n ]\n },\n)\n# print(res.text)\nreq = requests.post(\n url=\"http://localhost:8000/api/v1/order/balance/\",\n headers={\n \"Authorization\": \"Bearer fzC7KMsCiQuZiPifr7og65woqwvKdU\"\n },\n json={\n \"useBalanceStatus\": 1,\n \"sessionKey\": \"a1pmtf73z50omgkw2jx1c71f3atz417x\"\n },\n)\nprint(req.text)","sub_path":"20171010/order_confirm.py","file_name":"order_confirm.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"438415563","text":"import time\nimport Adafruit_CharLCD as LCD\n\nclass LCD_LIB:\n def __init__(self):\n self.lcd = LCD.Adafruit_CharLCDPlate()\n self.lcd.create_char(1, [2, 3, 2, 2, 14, 30, 12, 0])\n self.lcd.create_char(2, [0, 1, 3, 22, 28, 8, 0, 0])\n self.lcd.create_char(3, [0, 14, 21, 23, 17, 14, 0, 0])\n self.lcd.create_char(4, [31, 17, 10, 4, 10, 17, 31, 0])\n self.lcd.create_char(5, [8, 12, 10, 9, 10, 12, 8, 0])\n self.lcd.create_char(6, [2, 6, 10, 18, 10, 6, 2, 0])\n self.lcd.create_char(7, [31, 17, 21, 21, 21, 21, 17, 31])\n self.buttons = ( (LCD.SELECT, ' Press Again to\\nDectivate System', (1,1,1)),\n (LCD.LEFT, 'Left' , (1,0,0)),\n (LCD.UP, 'Up' , (0,0,1)),\n (LCD.DOWN, 'Down' , (0,1,0)),\n (LCD.RIGHT, 'Right' , (1,0,1)) )\n\n #Sets color and message on LCD Display\n def lcd_message(self, color, message):\n self.lcd.clear()\n color_codes = {'red':[[1],[0],[0]], 'green':[[0],[1],[0]], 'blue':[[0],[0],[1]], 'purple':[[1],[0],[1]], 'yellow':[[1],[1],[0]], 'white':[[1],[1],[1]]}\n for colors in color_codes:\n if color == colors:\n colorcode = color_codes.get(color)\n self.lcd.set_color(int(''.join(map(str,colorcode[0]))),int(''.join(map(str,colorcode[1]))),int(''.join(map(str,colorcode[2]))))\n # elif color != colors and colors == 'red':\n # print \"Sorry, color was not found!\"\n self.lcd.message(str(message))\n\n def countdown(self, secs):\n\n for i in range(secs,0, -1):\n self.lcd.clear()\n self.lcd.message(str(i) + \"...\")\n time.sleep(1)\n i+=1\n\n def is_select_pressed(self):\n for button in self.buttons:\n if self.lcd.is_pressed(button[0]):\n return True\n else:\n return False\n","sub_path":"picurity_system/Lcd.py","file_name":"Lcd.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"164520981","text":"import json\nimport serial\n\n\nclass ArduinoConnect:\n def __init__(self):\n self.port = \"/dev/cu.usbmodem142401\"\n # self.port = \"COM3\"\n self.baudrate = 9600\n self.baudrate = 9600\n self.exitThread = False\n self.ser = None\n\n def connect(self):\n print('아두이노와 통신 시작')\n print(f'아두이노의 접속 포트: {self.port}\\n')\n self.ser = serial.Serial(port=self.port, baudrate=self.baudrate, )\n\n def disconnect(self):\n if self.ser.is_open:\n print('아두이노 시리얼 통신을 중단합니다.\\n')\n self.ser.close()\n\n def get_data(self):\n try:\n res = self.ser.readline()\n except json.decoder.JSONDecodeError:\n return -1\n json_data = json.loads(res.decode()[:-1])\n return int(json_data['fsrState'])\n\n def connect_serial(self):\n while not self.exitThread:\n try:\n if self.ser.readable():\n self.get_data()\n\n except serial.serialutil.SerialException:\n print('아두이노가 연결되어 있지 않습니다.\\n')\n break\n\n\nif __name__ == \"__main__\":\n ac = ArduinoConnect()\n ac.connect_serial()\n","sub_path":"src/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"116836762","text":"#/usr/bin/env python\n\n\"\"\"\nOn-line repository of training programs for Pocket Trainer mobile app.\n\nUsage:\n$ python repository.py\n\nSetup:\n$ pip install Pillow\n\nMake sure there are ZIP archives in the same directory with the training\nprograms conforming to the expected JSON format.\n\"\"\"\n\nimport os\nimport zipfile\nimport json\nimport base64\nimport http.server\nimport io\nimport shutil\n\nfrom PIL import Image\n\n\nVERSION = '1.0'\nHOST = '0.0.0.0'\nPORT = 8080\nROOT_CONTEXT = '/repository'\nTHUMBNAIL_SIZE = (128, 128)\n\n\ndef list_files():\n \"\"\"Return a list of ZIP filenames in the current directory.\"\"\"\n return [x for x in os.listdir() if x.endswith('.zip')]\n\n\ndef make_thumbnail(archive, metadata):\n \"\"\"Return Base64-encoded str or None.\"\"\"\n image_path = metadata.get('image')\n if image_path is not None:\n image_bytes = archive.read(image_path)\n image = Image.open(io.BytesIO(image_bytes))\n image.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)\n image_bytes = io.BytesIO()\n image.save(image_bytes, format='PNG')\n return str(base64.b64encode(image_bytes.getvalue()))[2:-1]\n\n\ndef programs_index():\n \"\"\"Return a JSON string with the available programs.\"\"\"\n print('Scanning directory for ZIP archives...')\n programs = []\n for filename in list_files():\n with zipfile.ZipFile(filename, 'r') as archive:\n contents = json.loads(archive.read('program.json'))\n metadata = contents['program']['metadata']\n metadata.update(image=make_thumbnail(archive, metadata))\n programs.append({\n 'format_version': contents['format_version'],\n 'metadata': metadata,\n 'filename': filename\n })\n return json.dumps({\n 'format_version': VERSION,\n 'programs': programs\n })\n\n\ndef start_server():\n \"\"\"Spin up an HTTP server.\"\"\"\n\n class RequestHandler(http.server.BaseHTTPRequestHandler):\n \"\"\"Custom handler for HTTP requests.\"\"\"\n\n def do_GET(self):\n \"\"\"Handle all incoming HTTP GET requests.\"\"\"\n if self.path == ROOT_CONTEXT:\n self._send_response(programs_index())\n elif self.path.startswith(ROOT_CONTEXT):\n filename = os.path.basename(self.path)\n if os.path.exists(filename):\n self._send_file(filename)\n else:\n self._send_error(404, 'Not Found')\n else:\n self._send_error(400, 'Bad Request')\n\n def _send_response(self, content, content_type='application/json; charset=utf-8'):\n \"\"\"Return HTTP 200 OK with content.\"\"\"\n self.send_response(200)\n self.send_header('Content-Type', content_type)\n self.end_headers()\n self.wfile.write(content.encode('utf-8'))\n\n def _send_file(self, filename):\n with open(filename, 'rb') as fp:\n self.send_response(200)\n self.send_header('Content-Type', 'application/octet-stream')\n self.send_header('Content-Disposition', f'attachment; filename=\"{filename}\"')\n file_stat = os.fstat(fp.fileno())\n self.send_header('Content-Length', str(file_stat.st_size))\n self.end_headers()\n shutil.copyfileobj(fp, self.wfile)\n\n def _send_error(self, code, message):\n \"\"\"Return HTTP error message.\"\"\"\n self.send_response(code)\n self.send_header('Content-Type', 'text/plain; charset=utf-8')\n self.end_headers()\n self.wfile.write(f'{code} {message}'.encode('utf-8'))\n\n server = http.server.HTTPServer((HOST, PORT), RequestHandler)\n print(f'Server listening at {HOST}:{PORT}...')\n server.serve_forever()\n\n\ndef main():\n \"\"\"Application entry point.\"\"\"\n start_server()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"repository/simple_repository.py","file_name":"simple_repository.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"378180176","text":"import argparse\nimport sys\n\nfrom bxcommon.utils.blockchain_utils.eth import eth_common_constants\nfrom bxutils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass ParseEnode(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n assert isinstance(values, str)\n enode = values\n\n # Make sure enode is at least as long as the public key\n if len(enode) < 2 * eth_common_constants.PUBLIC_KEY_LEN:\n logger.fatal(\"Invalid enode. \"\n \"Invalid enode length: {}\", len(enode), exc_info=False)\n sys.exit(1)\n try:\n enode_and_pub_key, ip_and_port = enode.split(\"@\")\n if enode_and_pub_key.startswith(\"enode://\"):\n pub_key = enode_and_pub_key[8:]\n else:\n pub_key = enode_and_pub_key\n ip, port_and_disc = ip_and_port.split(\":\")\n port = port_and_disc.split(\"?\")[0]\n except ValueError:\n logger.fatal(\"Invalid enode: {}\", enode, exc_info=False)\n sys.exit(1)\n else:\n # Node public key gets validated in validate_eth_opts\n namespace.node_public_key = pub_key\n # blockchain IP gets validated in __init__()\n namespace.blockchain_ip = ip\n # Port validation\n if not port.isnumeric():\n logger.fatal(\"Invalid port: {}\", port, exc_info=False)\n sys.exit(1)\n namespace.blockchain_port = int(port)\n","sub_path":"src/bxgateway/argument_parsers.py","file_name":"argument_parsers.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"24131270","text":"\n\nfrom xai.brain.wordbase.nouns._chink import _CHINK\n\n#calss header\nclass _CHINKING(_CHINK, ):\n\tdef __init__(self,): \n\t\t_CHINK.__init__(self)\n\t\tself.name = \"CHINKING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"chink\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_chinking.py","file_name":"_chinking.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"105937358","text":"import argparse\nimport sys\n\nfrom resttest.console import Console\nfrom resttest.reportpublisher import ReportPublisher\nfrom resttest.testloader import TestLoader, TestLoadingError\nfrom resttest.testrunner import TestRunner\n\n\ndef _run(baseUri, suiteFilename):\n loader = TestLoader()\n suite = loader.loadSuiteFromFile(suiteFilename)\n\n console = Console()\n\n runner = TestRunner(console, baseUri)\n report = runner.runSuite(suite)\n\n publisher = ReportPublisher(console)\n publisher.publishReport(report)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('baseuri')\n parser.add_argument('suite')\n\n args = parser.parse_args()\n\n try:\n _run(args.baseuri, args.suite)\n except TestLoadingError as e:\n sys.exit(e.message)\n","sub_path":"resttest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"75885013","text":"#make one empty list and one non-empty list\n\nhot = [\"Red\", \"Orange\", \"Brown\"]\nwarm = []\nwarm = hot\nwarm.append(\"Pink\")\nprint(hot)\n# warm is pointing the same object and append is mutating the same copy\n\ncool = [\"White\", \"Grey\"]\nchill = []\nchill = cool[:]\nchill_sorted = sorted(chill)\n\nprint(chill, chill_sorted, cool)\n#chill is not changed because sorted() is not mutating\n#chill_sorted is returning sorted list\n#original list is unaltered","sub_path":"list_mutable.py","file_name":"list_mutable.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"100266889","text":"import sys\n\nsys.path.append('../../')\nfrom webcrawler_plus.parser import crawl_feeds\n\ncommon_settings = {\n 'COMPRESSION_ENABLED': False,\n 'HTTPCACHE_ENABLED': True,\n 'WCP_PIPELINE_API_URL': \"http://localhost:8000/extracted-data\",\n 'WCP_PIPELINE_API_HEADERS': {},\n 'LOG_LEVEL': 'INFO'\n}\n\nes_settings = {\n 'ITEM_PIPELINES': {'webcrawler_plus.pipelines.api.ApiPipeline': 1},\n\n 'HTTPCACHE_STORAGE': \"webcrawler_plus.httpcache.mongodb.MongoDBCacheStorage\",\n}\n\ncommon_settings.update(es_settings)\n\nif __name__ == '__main__':\n # crawl_feeds(\n # settings=common_settings, feed_urls=['http://www.jimmunol.org/rss/current.xml',\n # 'http://connect.iisc.ac.in/feed/', \"https://blog.google/rss/\"]\n # )\n\n crawl_feeds(\n settings=common_settings, feed_urls=[\"https://blog.google/rss/\"]\n )\n","sub_path":"examples/feeds/run_api.py","file_name":"run_api.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"36128219","text":"import numpy as np\nimport scipy.stats\n\nclass Sequence:\n def __init__(self,ntseq):\n self.seq = ntseq.upper()\n self.length = len(self.seq)\n \n def methylate(self,ind,replacement='M'):\n self.methseq = self.seq[:ind-1]+replacement+self.seq[ind:]\n return self.methseq \n \n def rev_comp(self):\n comp = {'A':'T','C':'G','T':'A','G':'C','N':'N','M':'M'}\n comp_seq = [comp[nt] for nt in list(self.seq)]\n self.revcomp = ''.join([comp[nt] for nt in list(self.seq)][::-1])\n try: \n self.revmethseq = ''.join([comp[nt] for nt in list(self.methseq)][::-1])\n except AttributeError:\n pass\n return self.revcomp\n\nclass Signal:\n #R7 example: (56.40406723484848, 8851.159030544488, 0.7926077052243929, 0.01095617529880478, 'GAAAAA', 57.16981709401989, 1, 0.9999896287918091, 0.31872811913490295, 'GAAAAA', 0.31872811913490295, 0.94491046667099, 0.012281710281968117, 0.04044119268655777, 0.002366444794461131)\n def __init__(self,events,shift,drift,scale,var,starttime):\n self.means = [x[0] for x in events]\n #self.stds = [x[2] for x in events]\n self.event_times = [x[1] for x in events]\n self.kmers = [x[4] for x in events]\n self.advances = [x[6] for x in events]\n self.read_start = starttime\n self.drift = drift\n self.shift = shift\n self.scale = scale\n self.var = var\n self.transformed = False\n \n def transform(self):\n \"\"\"double time = event.start_time - events[si][0].start_time;\n event.mean -= (time * pore_model[si].drift); \"\"\" \n if not self.transformed:\n self.means = np.array(self.means)/self.scale - (np.array(self.event_times)-self.read_start)*self.drift - self.shift\n self.transformed = True\n\ndef check_alignment(indices,seq1,seq2):\n \"\"\"verifies whether index of alignment contains indel or mismatch (read = seq1, ref = seq2)\"\"\"\n if all([index != None for index in indices]) and (seq1[indices[0]] == seq2[indices[1]]): \n return True\n else:\n return False\n\ndef find_perfect_match(bam_alignment,k,seq1,seq2):\n \"\"\"finds next closest perfect k-mer in alignment, avoiding any indels or mismatches (read = seq1, ref = seq2)\"\"\"\n match = []\n i = -1\n last_ind = 0\n \"\"\" \n while len(match) < k and i 0 and i == last_ind+1:\n match.append(bam_alignment[i]) #,seq1[bam_alignment[i][0]],seq2[bam_alignment[i][1]]))\n else:\n match = [bam_alignment[i]] #,seq1[bam_alignment[i][0]],seq2[bam_alignment[i][1]])]\n last_ind = i \n \n if len(match) == k:\n return match, last_ind\n else:\n return None, None\n\ndef extract_signal_fragment(anchor1,anchor2,hdf5,ind_mult,read_len):\n \"\"\"returns signal between two perfect kmer anchor points\"\"\"\n ind = min(0,ind_mult*read_len)\n all_events = hdf5['Analyses/Basecall_1D_000/BaseCalled_template/Events']\n read_start = hdf5['Analyses/Basecall_1D_000/BaseCalled_template/Events'].attrs['start_time']\n \n location = 'Analyses/Basecall_1D_000/BaseCalled_template/Model'\n shift = hdf5[location].attrs['shift']\n drift = hdf5[location].attrs['drift']\n scale = hdf5[location].attrs['scale']\n var = hdf5[location].attrs['var'] \n event_frag = [] \n \n #print ind, anchor1, anchor2\n for event in all_events:\n ind = ind + event[6]\n if ind_mult*ind >= anchor1 and ind_mult*ind < anchor2+1:\n #print ind_mult*ind, event\n event_frag.append(event)\n #print event_frag\n sig_frag = Signal(event_frag,shift,drift,scale,var,read_start)\n return sig_frag\n\ndef extract_transitions(hdf5,loc='Analyses/Basecall_1D_000/BaseCalled_template'):\n \"\"\"returns array with transition probabilities for stays, steps, skips, and double skips\"\"\"\n location = loc+\"/Model\"\n stay_rate = hdf5[location].attrs['stay_prob']\n step_rate = hdf5[location].attrs['step_prob']\n skip_rate = hdf5[location].attrs['skip_prob']\n dskip_rate = np.power(hdf5[location].attrs['skip_prob'],2)\n return (stay_rate,step_rate,skip_rate,dskip_rate)\n\ndef emission_prob(kmer_model,obs_mean,sig):\n \"\"\"returns probability density for an event given a Gaussian model for a kmer\"\"\"\n P = scipy.stats.norm(kmer_model[0],kmer_model[1]*sig.var).pdf(obs_mean)\n return P\n\ndef viterbiish(events,sequence,transition_probs,kmer_mod,k=6,m_sequence=None):\n \"\"\"implements a variation on the Viterbi algorithm where the first and last states are known\"\"\"\n nStates = len(sequence)-k+1\n nSamples = len(events.means)\n states = [sequence[i:i+k] for i in range(nStates)] \n \n if m_sequence:\n m_states = [m_sequence[i:i+k] for i in range(nStates)] \n final_states = m_states\n else:\n final_states = states\n \n vit = np.zeros((nSamples+1,nStates)) # initialize viterbi table\n bpt = np.zeros((nSamples+1,nStates)) # initialize the best path table\n best_path = np.zeros(nSamples+1); # initialize output vector\n\n #initialization \n vit[0,0] = 1 \n \n #first phase\n for j in xrange(1,nSamples):\n for i in xrange(0,nStates): \n results_across_i2 = [0]*max(0,i-3) + [vit[j-1,itState2]*transition_probs[i-itState2]*emission_prob(kmer_mod.model[states[i]],events.means[j],events) for itState2 in range(max(0,i-3),min(nStates,i+1))] + [0]*(nStates-i)\n bpt[j,i] = np.argmax(results_across_i2)\n vit[j,i] = results_across_i2[int(bpt[j,i])]\n \n #termination step\n #vit[-1,-1] = 1 \n bpt[-1,-1] = nStates-1 \n \n best_path[-1] = bpt[-1,-1]\n for j in range(nSamples-1,-1,-1):\n best_path[j] = bpt[j,int(best_path[j+1])]\n \n return [(x,final_states[int(x)],events.means[i]) for i,x in enumerate(best_path[1:])]\n\ndef update_signal_matrix(realigned_sig,model):\n \"\"\"adds differences between measured and expected currents for kmers surrounding a realigned base to the signal matrix\"\"\"\n A_signal = [x for x in realigned_sig if len(x[1].split('M')) == 2]\n \n kmer_currents = {i:[] for i in range(1,7)}\n \n for kmer in A_signal:\n base_ind = len(kmer[1].split('M')[0])+1\n orig_kmer = 'A'.join(kmer[1].split('M'))\n kmer_currents[base_ind].append(kmer[2]-model[orig_kmer][0])\n \n kmer_current_array = [0]*6\n for base_ind in kmer_currents:\n if len(kmer_currents[base_ind]) > 0:\n kmer_current_array[base_ind-1] = np.mean(kmer_currents[base_ind])\n \n return kmer_current_array\n","sub_path":"R7_mCaller/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574544362","text":"# -*- coding: utf-8 -*-\n\nlistRand = [7,3,1,10,5,4,6,2,8,9]\nlistSorted = []\nmin = 1000\nindex = 0\n\nfor m in listRand:\n for n in range(len(listRand)):\n if(listRand[n] < min):\n min = listRand[n]\n index = n\n\n listSorted.append(min)\n listRand[index] = 5000\n min = 5000\nprint(listSorted)","sub_path":"lars/sorting list.py","file_name":"sorting list.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"5840430","text":"\"\"\"Check the effect of synchronized synaptic input at random locations\nof specific branches of the GGN.\n\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport argparse\nfrom datetime import datetime\nimport h5py as h5\nimport nrnutils as nu\nimport neurograph as ng\n\ndef plot_Vm(fname):\n with h5.File(fname, 'r') as fd:\n syninfo = [s for s in fd['syninfo']]\n for nodename in fd:\n if nodename.startswith('v_'):\n segment = fd[nodename].attrs['section']\n if segment in syninfo:\n color = 'gray'\n alpha = 0.3\n lw = 1.0\n ls = '--'\n else:\n color=None\n alpha = 0.7\n lw = 2.0\n ls = '-'\n plt.plot(fd['time'], fd[nodename], color=color, alpha=alpha, ls=ls, lw=lw, label=segment)\n plt.legend()\n plt.show() \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"\"\"\n simulate synaptic input via alpha synapse in `syncomp`. The\n parameters `gmax` (uS), `tau` (ms) determine the properties of the\n synapse and `onset` (ms) determines the time for onset of the\n synaptic input. Vm from `reccount unique compartments from each\n major branch is reccorded. If specified, GGN_B99 template is read\n from the `celltemplate` file.\"\"\")\n parser.add_argument('-i', '--input', type=int,\n help='custom type no. identifying the branch where synaptic input should be applied. These are, 1: soma, 3: basal dendrite, 4: apical dendrite, 5: lateral calyx, 6: medial calyx, 7: lateral horn, 8: alpha lobe',\n dest='inputs',\n action='append',\n required=True)\n parser.add_argument('-g', type=float,\n help='peak conductance of the synapses (in uS)',\n dest='gmax',\n default=1e-3)\n parser.add_argument('-t', type=float,\n help='time constant of the alpha synapse (in ms)',\n dest='tau', default=1.0)\n parser.add_argument('-s', type=float,\n help='onset time of synaptic input (in ms)',\n dest='onset', default=50.0)\n parser.add_argument('-n', type=int,\n help='number of synapses',\n dest='syncount', default=10)\n parser.add_argument('--synfile', type=str,\n help='File containing list of synapse sections',\n dest='synfile', default=None)\n parser.add_argument('--reccount', type=int,\n help='number of sections to record from in each region',\n dest='reccount',\n default=10)\n parser.add_argument('--recfile', type=str,\n help='File containing list of recording sections',\n dest='recfile', default=None)\n parser.add_argument('-f', type=str,\n help='cell template file',\n dest='celltemplate',\n default='GGN_B99_20160725.hoc')\n parser.add_argument('-c', type=str,\n help='cell name',\n dest='cellname',\n default='GGN_B99')\n parser.add_argument('--dia-scale', type=float,\n help='scale diameter by this factor if it is less than `dia-lim`',\n default=None,\n dest='diascale')\n parser.add_argument('--dia-lim', type=float,\n help='minimum diameter over which it should be scaled up (um)',\n default=None,\n dest='dialim')\n \n args = parser.parse_args()\n inputs = args.inputs\n celltemplate = args.celltemplate\n syncount = args.syncount\n onset = args.onset\n tau = args.tau\n gmax = args.gmax\n diascale = args.diascale\n dialim = args.dialim\n mechs = {'pas': {'g': 3e-5, 'e': -51.0}}\n # {'name': 'caiiag', 'gbar': 2.5e-5},\n # {'name': 'ka', 'gbar': 0.03}]\n \n h.xopen(celltemplate)\n ggn = nu.create_cell(args.cellname, filename=args.celltemplate, mechparams=mechs)\n if (args.diascale is not None) and (args.dialim is not None):\n count = 0\n ggn.soma.push()\n ordered_tree = h.SectionList()\n ordered_tree.wholetree()\n h.pop_section()\n for sec in ordered_tree:\n sec.push()\n for ii, seg in enumerate(sec):\n if seg.diam < dialim:\n seg.diam = seg.diam * diascale\n # print('Changed diameter of segment', ii, seg.x, 'of section', sec.name(), 'to', seg.diam)\n count += 1\n h.pop_section()\n print('Scaled diameters of', count, 'sections whose diameters were <', dialim, 'by', diascale)\n g0 = nu.nrngraph(ggn)\n g, nmap = ng.renumber_nodes(g0, ggn.soma.name())\n type_node_map = defaultdict(list)\n # Reverse lookup table for node by stype\n for n, d in g.nodes_iter(data=True):\n stype = d['s']\n if g.node[n]['orig'] is not None: # Skip the dummy nodes at terminal\n type_node_map[stype].append(n)\n synnodes = []\n if args.synfile is None:\n for branch_id in inputs:\n nodes = type_node_map[branch_id]\n size = syncount\n if len(nodes) < syncount:\n size = len(nodes)\n for n in np.random.choice(nodes, size=size, replace=False):\n # print(n, g.node[n])\n synnodes.append(n)\n else:\n sec_node_map = get_section_node_map(g)\n with open(args.synfile) as fd:\n for line in fd:\n try:\n secname = line.strip()\n synnodes.append(sec_node_map[secname])\n except KeyError:\n print('Could not fidn section \"{}\"'.format(secname))\n raise\n \n # Select one random node from each stype\n recnodes = []\n if args.recfile is not None:\n sec_node_map = get_section_node_map(g)\n with open(args['recfile']) as fd:\n for line in fd:\n try:\n secname = line.strip()\n recnodes.append(sec_node_map[secname])\n except KeyError:\n print('Could not fidn section \"{}\"'.format(secname))\n raise \n else:\n for stype, nodes in type_node_map.items():\n size = args.reccount\n if args.reccount > len(nodes):\n size = len(nodes)\n recnodes += list(np.random.choice(nodes, size=size, replace=False))\n # print(recnodes)\n recnodes = list(set(recnodes + synnodes))\n recsecs = [g.node[n]['orig'] for n in recnodes]\n synapses = []\n for snode in synnodes:\n synsec = g.node[snode]['orig']\n alpha_syn = insert_alphasynapse(synsec(1.0), onset=onset, gmax=gmax, tau=tau)\n synapses.append(alpha_syn)\n t_vec = h.Vector()\n t_vec.record(h._ref_t) \n tabs = setup_recording(recsecs)\n h.tstop = onset + 100 * tau\n ts = datetime.now()\n h.finitialize(mechs['pas']['e'])\n h.fcurrent()\n while h.t < h.tstop:\n h.fadvance()\n \n # h.run()\n te = datetime.now()\n delta = te - ts\n print('Time for', h.tstop*1e-3, 's simulation =', delta.days * 86400 + delta.seconds + 1e-6 * delta.microseconds)\n outfilename = 'data/A_Vm_multi_syn_{}.h5'.format(ts.strftime('%Y%m%d_%H%M%S'))\n with h5.File(outfilename) as fd:\n sections = [g.node[s]['orig'].name() for s in synnodes]\n syninfo = fd.create_dataset('syninfo', data=sections)\n syninfo.attrs['tau'] = tau\n syninfo.attrs['onset'] = onset\n syninfo.attrs['gmax'] = gmax\n \n fd.attrs['g_pas'] = mechs['pas']['g']\n fd.attrs['e_pas'] = mechs['pas']['e']\n fd.attrs['RA'] = ggn.soma.Ra\n time = fd.create_dataset('time', data=np.asarray(t_vec))\n for v, sec, node in zip(tabs, recsecs, recnodes):\n ds = fd.create_dataset('v_{}'.format(node), data=np.array(v))\n ds.attrs['section'] = sec.name()\n model = fd.create_group('model')\n sec_names = []\n sec_RA = []\n sec_g = []\n sec_len = []\n sec_dia = []\n sec_parent = []\n for sec in ggn.allsec():\n sec_names.append(sec.name())\n sec_RA.append(sec.Ra)\n sec_g.append(sec(0.5).pas.g)\n sec_len.append(sec.L)\n sec_dia.append(sec.diam)\n ref = h.SectionRef(sec=sec)\n if ref.has_parent():\n sec_parent.append( ref.parent.name())\n else:\n sec_parent.append('')\n\n name_ds = model.create_dataset('section', data=sec_names)\n ra_ds = model.create_dataset('RA', data=sec_RA)\n gpas_ds = model.create_dataset('gpas', data=sec_g)\n len_ds = model.create_dataset('length', data=sec_len)\n dia_ds = model.create_dataset('diameter', data=sec_dia)\n parent_ds = model.create_dataset('parent', data=sec_parent)\n print('Data saved in', outfilename)\n plot_Vm(outfilename)\n \n \n\n \n\n\n# \n# localized_input_output.py ends here\n","sub_path":"nrn/localized_input_output.py","file_name":"localized_input_output.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"176087831","text":"import numpy as np\nimport os\nimport glob\nimport time\nfrom random import shuffle\nfrom imageio import imread\nimport pandas as pd\nimport tensorflow as tf\n\nimport keras.backend as K\nfrom keras.models import Model\nfrom keras.layers import Conv2D, ZeroPadding2D, \\\n BatchNormalization, Input, Dropout\nfrom keras.layers import Conv2DTranspose, Activation, Cropping2D\nfrom keras.layers import Concatenate\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.initializers import RandomNormal\nfrom keras.optimizers import Adam\nimport argparse\n\n\n# configure os environment\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n# configure keras\nK.set_image_data_format('channels_last')\nCH_AXIS = -1\n\n# configure tensorflow\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\ntf.Session(config=config)\n\n# parse the optional arguments:\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model_name\",\n help=\"name of model\",\n default='trial_2'\n )\nparser.add_argument(\"--input\",\n help=\"folder name of input data\",\n default='AIA'\n )\nparser.add_argument(\"--display_iter\",\n help=\"number of iterations between each test\",\n type=int,\n default=20000\n )\nparser.add_argument(\"--max_iter\",\n help=\"total number of iterations\",\n type=int,\n default=500000\n )\nargs = parser.parse_args()\n\n# Hyper parameters\nNITERS = args.max_iter # total number of iterations\n# number of iterations before display and model creation\nDISPLAY_ITERS = args.display_iter\n\n# the input data:\n# (originally AIA or Atmospheric Imaging Assembly)\nINPUT_DATA = args.input\n# The data we want to reproduce:\n# (originally HMI or Helioseismic and Magnetic Imager)\nOUTPUT_DATA = 'HMI'\n\nISIZE = 1024 # height of the image\nNC_IN = 1 # number of input channels (1 for greyscale, 3 for RGB)\nNC_OUT = 1 # number of output channels (1 for greyscale, 3 for RGB)\nBATCH_SIZE = 1 # number of images in each batch\n# max layers in the discriminator not including sigmoid activation:\n# 1 for 16, 2 for 34, 3 for 70, 4 for 142, and 5 for 286 (receptive field size)\nMAX_LAYERS = 3\n\nTRIAL_NAME = args.model_name\n\n\nMODE = INPUT_DATA + '_to_' + OUTPUT_DATA # folder name for saving the model\n\nIMAGE_PATH_INPUT = './DATA/TRAIN/'+INPUT_DATA+'/*.png' # input file path\nIMAGE_PATH_OUTPUT = './DATA/TRAIN/'+OUTPUT_DATA+'/*.png' # ouptut file path\n\n# make a folder for the trial if it doesn't already exist\nMODEL_PATH_MAIN = './MODELS/' + TRIAL_NAME + '/'\n# os.mkdir(MODEL_PATH_MAIN) if not os.path.exists(MODEL_PATH_MAIN) else None\nMODEL_PATH = MODEL_PATH_MAIN + MODE + '/'\nos.makedirs(MODEL_PATH) if not os.path.exists(MODEL_PATH) else None\n\n\n# generates tensors with a normal distribution with (mean, standard deviation)\n# this is used as a matrix of weights\nCONV_INIT = RandomNormal(0, 0.02)\nGAMMA_INIT = RandomNormal(1., 0.02)\n\n\n# The loss function\ndef LOSS_FN(OUTPUT, TARGET):\n return -K.mean(K.log(OUTPUT+1e-12)*TARGET+K.log(1-OUTPUT+1e-12)*(1-TARGET))\n\n\n# create a convolutional layer with f filters, and arguments a and k\ndef DN_CONV(f, *a, **k):\n return Conv2D(f, kernel_initializer=CONV_INIT, *a, **k)\n\n\n# create a deconvolutional layer with f filters, and arguments a and k\ndef UP_CONV(f, *a, **k):\n return Conv2DTranspose(f, kernel_initializer=CONV_INIT, *a, **k)\n\n\n# applies normalisation such that max is 1, and minimum is 0\ndef BATNORM():\n return BatchNormalization(\n momentum=0.9,\n axis=CH_AXIS,\n epsilon=1.01e-5,\n gamma_initializer=GAMMA_INIT\n )\n\n\n# leaky ReLU (y = alpha*x for x < 0, y = x for x > 0)\ndef LEAKY_RELU(alpha):\n return LeakyReLU(alpha)\n\n\n# the descriminator\ndef BASIC_D(ISIZE, NC_IN, NC_OUT, MAX_LAYERS):\n # combines the inputs from the generator and the desired input\n INPUT_A, INPUT_B = Input(shape=(ISIZE, ISIZE, NC_IN)),\\\n Input(shape=(ISIZE, ISIZE, NC_OUT))\n\n INPUT = Concatenate(axis=CH_AXIS)([INPUT_A, INPUT_B])\n\n if MAX_LAYERS == 0:\n N_FEATURE = 1 # number of filters to use\n # apply sigmoid activation\n L = DN_CONV(N_FEATURE,\n kernel_size=1,\n padding='same',\n activation='sigmoid'\n )(INPUT)\n\n else:\n N_FEATURE = 64 # number of filters to use\n # apply convolution\n L = DN_CONV(N_FEATURE,\n kernel_size=4,\n strides=2,\n padding=\"same\"\n )(INPUT)\n # Apply leaky ReLU activation with a slope of 0.2\n L = LEAKY_RELU(0.2)(L)\n\n # Apply convolution MAX_LAYERS times\n for i in range(1, MAX_LAYERS):\n N_FEATURE *= 2 # double the number of filters\n # Apply convolution\n L = DN_CONV(N_FEATURE,\n kernel_size=4,\n strides=2,\n padding=\"same\"\n )(L)\n # normalise\n L = BATNORM()(L, training=1)\n # Apply leaky ReLU activation with a slope of 0.2\n L = LEAKY_RELU(0.2)(L)\n\n N_FEATURE *= 2 # double the number of filters\n L = ZeroPadding2D(1)(L) # pads the model with 0s with a thickness of 1\n # Apply convolution\n L = DN_CONV(N_FEATURE, kernel_size=4, padding=\"valid\")(L)\n # normalise\n L = BATNORM()(L, training=1)\n # Apply leaky ReLU activation with a slope of 0.2\n L = LEAKY_RELU(0.2)(L)\n\n N_FEATURE = 1\n L = ZeroPadding2D(1)(L) # pads the model with 0s with a thickness of 1\n # Apply sigmoid activation\n L = DN_CONV(N_FEATURE,\n kernel_size=4,\n padding=\"valid\",\n activation='sigmoid'\n )(L)\n\n return Model(inputs=[INPUT_A, INPUT_B], outputs=L)\n\n\n# The generator (based on the U-Net architecture)\ndef UNET_G(ISIZE, NC_IN, NC_OUT, FIXED_INPUT_SIZE=True):\n MAX_N_FEATURE = 64 * 8 # max number of filters to use\n\n def BLOCK(X, S, NF_IN, USE_BATNORM=True, NF_OUT=None, NF_NEXT=None):\n # Encoder: (decreasing size)\n\n assert S >= 2 and S % 2 == 0\n if NF_NEXT is None: # number of filters in the next layer?\n # set number of filters to twice the number of filters in the\n # input, if it isn't more than the max number of filters\n NF_NEXT = min(NF_IN*2, MAX_N_FEATURE)\n if NF_OUT is None:\n NF_OUT = NF_IN\n # Apply convolution\n X = DN_CONV(NF_NEXT,\n kernel_size=4,\n strides=2,\n # don't use a bias if batch normalisation will be done\n # later, or if s > 2\n use_bias=(not (USE_BATNORM and S > 2)),\n padding=\"same\"\n )(X)\n if S > 2:\n # apply batch normalisation\n if USE_BATNORM:\n X = BATNORM()(X, training=1)\n # apply leaky ReLU with a slope of 0,2\n X2 = LEAKY_RELU(0.2)(X)\n # continue recursion until size = 2, halving size each time\n X2 = BLOCK(X2, S//2, NF_NEXT)\n # combine X and X2\n # this gives the \"skip connections\" between the encoder layers\n # and decoder layers.\n X = Concatenate(axis=CH_AXIS)([X, X2])\n\n # Decoder: (Increasing size)\n # This happens only when the recursive encoder has reached its maximum\n # depth (size = 2)\n # Note the minimum layer size is actually s = 4, as encoding stops when\n # s = 2\n\n # Apply ReLU activation\n X = Activation(\"relu\")(X)\n\n # Apply deconvolution\n X = UP_CONV(NF_OUT,\n kernel_size=4,\n strides=2,\n use_bias=not USE_BATNORM\n )(X)\n X = Cropping2D(1)(X)\n # Batch normalisation\n if USE_BATNORM:\n X = BATNORM()(X, training=1)\n # apply dropout\n # Randomly drops units which helps prevent overfitting\n if S <= 8:\n X = Dropout(0.5)(X, training=1)\n return X\n\n S = ISIZE if FIXED_INPUT_SIZE else None # size\n X = INPUT = Input(shape=(S, S, NC_IN)) # The input\n # Apply the U-Net convolution, deconvolution (see above function)\n X = BLOCK(X, ISIZE, NC_IN, False, NF_OUT=NC_OUT, NF_NEXT=64)\n # Apply tanh activation\n X = Activation('tanh')(X)\n\n return Model(inputs=INPUT, outputs=[X])\n\n\n# The discriminator model\nNET_D = BASIC_D(ISIZE, NC_IN, NC_OUT, MAX_LAYERS)\n# The generator model\nNET_G = UNET_G(ISIZE, NC_IN, NC_OUT)\n\n# tensor placeholders?\nREAL_A = NET_G.input # generator input (AIA)\nFAKE_B = NET_G.output # generator output (fake HMI)\nREAL_B = NET_D.inputs[1] # descriminator input (real HMI)\n\n# output of the discriminator for AIA and real HMI\nOUTPUT_D_REAL = NET_D([REAL_A, REAL_B])\n# output of the discriminator for AIA and fake HMI\nOUTPUT_D_FAKE = NET_D([REAL_A, FAKE_B])\n\n# set initial values for the loss\n# ones_like creates a tensor of the same shape full of ones\n# zeros_like creates a tensor of the same shape full of zeros\n# as the discriminator gives the probability that the input is a real HMI\n# picture, we want it to out put 1 when the input is real and 0 when the\n# input is fake.\nLOSS_D_REAL = LOSS_FN(OUTPUT_D_REAL, K.ones_like(OUTPUT_D_REAL))\nLOSS_D_FAKE = LOSS_FN(OUTPUT_D_FAKE, K.zeros_like(OUTPUT_D_FAKE))\n# while the generator, we want the discriminator to guess that the\n# generator output is the real HMI, which corresponds to the discriminator\n# outputting 1:\nLOSS_G_FAKE = LOSS_FN(OUTPUT_D_FAKE, K.ones_like(OUTPUT_D_FAKE))\n\n# total average difference between the real and generated HMIs\nLOSS_L = K.mean(K.abs(FAKE_B-REAL_B))\n\n# Total loss of the discriminator\nLOSS_D = LOSS_D_REAL + LOSS_D_FAKE\n# gives the updates for the discriminator training\nTRAINING_UPDATES_D = Adam(lr=2e-4, beta_1=0.5\n ).get_updates(NET_D.trainable_weights, [], LOSS_D)\n# creates a function that trains the discriminator\nNET_D_TRAIN = K.function([REAL_A, REAL_B], [LOSS_D/2.0], TRAINING_UPDATES_D)\n\n# The total loss of G, which includes the difference between the real and\n# generated HMIs, as well as the loss because of the descriminator\nLOSS_G = LOSS_G_FAKE + 100 * LOSS_L\n\n# operation to update the gradient of the generator using the adam optimizer\nTRAINING_UPDATES_G = Adam(\n lr=2e-4,\n beta_1=0.5\n ).get_updates(NET_G.trainable_weights, [], LOSS_G)\n# function to train the generator\nNET_G_TRAIN = K.function([REAL_A, REAL_B],\n [LOSS_G_FAKE, LOSS_L],\n TRAINING_UPDATES_G)\n\n\n# returns list of files that match FILE_PATTERN\ndef LOAD_DATA(FILE_PATTERN):\n return glob.glob(FILE_PATTERN)\n\n\ndef GET_DATE(file):\n filename = file.split(\"/\")[-1] # filename is at end of file path\n date_str = filename.split(\".\")[2] # date string is after second \".\"\n date_str = date_str.replace(\"_\", \"\") # remove underscores\n date_str = date_str.replace(\"-\", \"\") # remove hyphens\n date_str = date_str.replace(\"TAI\", \"z\") # TAI and Z are both UTC\n date = pd.Timestamp(date_str)\n return date\n\n\ndef GET_TIMESTAMP(file):\n date = GET_DATE(file)\n return date.timestamp()\n\n\n# FN = filenames, NC_IN = #channels in input, NC_OUT = #channels in output\n# This function essentially reads the image, and shifts it slightly by up\n# to 15 pixels any direction before returning it. This is probably to\n# prevent overfitting\ndef READ_IMAGE(FN, NC_IN, NC_OUT):\n IMG_A = imread(FN[0])\n IMG_B = imread(FN[1])\n X, Y = np.random.randint(31), np.random.randint(31)\n if NC_IN != 1:\n IMG_A = np.pad(IMG_A, ((15, 15), (15, 15), (0, 0)), 'constant')\n IMG_A = IMG_A[X:X + 1024, Y:Y + 1024, :] / 255.0 * 2 - 1\n else:\n IMG_A = np.pad(IMG_A, 15, 'constant')\n IMG_A = IMG_A[X:X + 1024, Y:Y + 1024] / 255.0 * 2 - 1\n\n if NC_OUT != 1:\n IMG_B = np.pad(IMG_B, ((15, 15), (15, 15), (0, 0)), 'constant')\n IMG_B = IMG_B[X:X + 1024, Y:Y + 1024, :] / 255.0 * 2 - 1\n else:\n IMG_B = np.pad(IMG_B, 15, 'constant')\n IMG_B = IMG_B[X:X + 1024, Y:Y + 1024] / 255.0 * 2 - 1\n\n return IMG_A, IMG_B\n\n\n# create mini batches for training (actually creates a generator\n# that generates each element of the batch)\ndef MINI_BATCH(DATA_AB, BATCH_SIZE, NC_IN, NC_OUT):\n LENGTH = len(DATA_AB)\n EPOCH = i = 0\n TMP_SIZE = None\n while True:\n SIZE = TMP_SIZE if TMP_SIZE else BATCH_SIZE\n # if we reach the end of the data (which corresponds to an\n # epoch), shuffle data and begin again\n if i + SIZE > LENGTH:\n shuffle(DATA_AB)\n i = 0\n EPOCH += 1\n DATA_A = []\n DATA_B = []\n # make batches of length: SIZE\n for J in range(i, i + SIZE):\n IMG_A, IMG_B = READ_IMAGE(DATA_AB[J], NC_IN, NC_OUT)\n DATA_A.append(IMG_A)\n DATA_B.append(IMG_B)\n DATA_A = np.float32(DATA_A)\n DATA_B = np.float32(DATA_B)\n i += SIZE\n TMP_SIZE = yield EPOCH, DATA_A, DATA_B\n\n\n# input data\nLIST_INPUT = LOAD_DATA(IMAGE_PATH_INPUT)\n# output data\nLIST_OUTPUT = LOAD_DATA(IMAGE_PATH_OUTPUT)\n\n# sort lists based on timestamp\nLIST_OUTPUT = sorted(LIST_OUTPUT, key=GET_TIMESTAMP)\nLIST_INPUT = sorted(LIST_INPUT, key=GET_TIMESTAMP)\n\n\ni = 0 # index of LIST_INPUT\nj = 0 # index of LIST_OUTPUT\n\n# only keep images that are in both input and output\nwhile i < len(LIST_INPUT) and j < len(LIST_OUTPUT):\n input = LIST_INPUT[i]\n in_time = GET_DATE(input)\n output = LIST_OUTPUT[j]\n out_time = GET_DATE(output)\n # if input is after output, delete output:\n if in_time.date() > out_time.date():\n del(LIST_OUTPUT[j])\n # if input is before output, delete input:\n elif in_time.date() < out_time.date():\n del(LIST_INPUT[i])\n # if input is after output, delete output:\n elif in_time.hour > out_time.hour:\n del(LIST_OUTPUT[j])\n # if input is before output, delete input:\n elif in_time.hour < out_time.hour:\n del(LIST_INPUT[i])\n # else, date and hours are the same, so we have a pair!\n else:\n # increment both lists\n i += 1\n j += 1\n\n# trim ends of lists so they are the same size\nlength = min(i, j)\nLIST_INPUT = LIST_INPUT[:length]\nLIST_OUTPUT = LIST_OUTPUT[:length]\n\nassert len(LIST_INPUT) == len(LIST_OUTPUT)\n\n# zips the data such that each element is a (input, output) pair\nLIST_TOTAL = list(zip(sorted(LIST_INPUT), sorted(LIST_OUTPUT)))\n\nprint(\"Input Output Pairs:\")\nprint(LIST_TOTAL)\n# creates a generator to use for training\nTRAIN_BATCH = MINI_BATCH(LIST_TOTAL, BATCH_SIZE, NC_IN, NC_OUT)\n\n# initialise training variables\nT0 = T1 = time.time()\nGEN_ITERS = 0\nERR_L = 0\nEPOCH = 0\nERR_G = 0\nERR_L_SUM = 0\nERR_G_SUM = 0\nERR_D_SUM = 0\n\n# training:\nwhile GEN_ITERS <= NITERS:\n EPOCH, TRAIN_A, TRAIN_B = next(TRAIN_BATCH)\n # input data set\n TRAIN_A = TRAIN_A.reshape((BATCH_SIZE, ISIZE, ISIZE, NC_IN))\n # output data set\n TRAIN_B = TRAIN_B.reshape((BATCH_SIZE, ISIZE, ISIZE, NC_OUT))\n\n # descriminator training and error\n ERR_D, = NET_D_TRAIN([TRAIN_A, TRAIN_B])\n ERR_D_SUM += ERR_D\n\n # generator training and error\n ERR_G, ERR_L = NET_G_TRAIN([TRAIN_A, TRAIN_B])\n ERR_G_SUM += ERR_G\n ERR_L_SUM += ERR_L\n\n GEN_ITERS += 1\n\n # print training summary and save model\n if GEN_ITERS % DISPLAY_ITERS == 0:\n print('[%d][%d/%d] LOSS_D: %5.3f LOSS_G: %5.3f LOSS_L: %5.3f T:'\n '%dsec/%dits, Total T: %d'\n % (\n EPOCH, GEN_ITERS, NITERS, ERR_D_SUM/DISPLAY_ITERS,\n ERR_G_SUM/DISPLAY_ITERS, ERR_L_SUM/DISPLAY_ITERS,\n time.time()-T1, DISPLAY_ITERS, time.time()-T0\n )\n )\n\n ERR_L_SUM = 0\n ERR_G_SUM = 0\n ERR_D_SUM = 0\n DST_MODEL = MODEL_PATH+MODE+'_ITER'+'%07d' % GEN_ITERS+'.h5'\n NET_G.save(DST_MODEL)\n T1 = time.time()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"103585543","text":"import torch\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\n# input array\r\nX = np.array([[1, 0, 1, 0], [1, 0, 1, 1], [0, 1, 0, 1]])\r\n\r\n# output array\r\ny = np.array([[1], [1], [0]])\r\n\r\n# sigmoid Function\r\n\r\n\r\ndef sigmoid(x):\r\n return 1/(1+(np.exp(-x)))\r\n\r\n# Derivative of sigmoid function\r\n\r\n\r\ndef derivative_sigmoid(x):\r\n return x*(1-x)\r\n\r\n\r\n# Variable initialization\r\nepoch = 5000 # setting training iterations\r\nlr = 0.1 # setting learning rate\r\ninputlayer_neurons = X.shape[1] # number of features in dataset\r\nhiddenlayer_neurons = 3 # number of hidden layers neurons\r\noutput_neurons = 1 # number of output layers neurons\r\n# weights and bias initialization\r\nwh = np.random.uniform(size=(inputlayer_neurons, hiddenlayer_neurons))\r\nbh = np.random.uniform(size=(1, hiddenlayer_neurons))\r\nwout = np.random.uniform(size=(hiddenlayer_neurons, output_neurons))\r\nbout = np.random.uniform(size=(1, output_neurons))\r\n\r\nfor i in range(epoch):\r\n\r\n # forward propagation\r\n hidden_layer_input1 = np.dot(X, wh)\r\n hidden_layer_input = hidden_layer_input1 + bh\r\n hiddenlayer_activations = sigmoid(hidden_layer_input)\r\n output_layer_input1 = np.dot(hiddenlayer_activations, wout)\r\n output_layer_input = output_layer_input1 + bout\r\n output = sigmoid(output_layer_input)\r\n\r\n # backward propagation\r\n E = y - output\r\n slope_output_layer = derivative_sigmoid(output)\r\n slope_hidden_layer = derivative_sigmoid(hiddenlayer_activations)\r\n d_output = E * slope_output_layer\r\n Error_at_hidden_layer = d_output.dot(wout.T)\r\n d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer\r\n wout += hiddenlayer_activations.T.dot(d_output) * lr\r\n bout += np.sum(d_output, axis=0, keepdims=True) * lr\r\n wh += X.T.dot(d_hiddenlayer) * lr\r\n bh += np.sum(d_output, axis=0, keepdims=True) * lr\r\n\r\n\r\nprint('actual=\\n', y, '\\n')\r\nprint('predict=\\n', output)\r\n","sub_path":"basic module/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574540049","text":"import tensorflow as tf\nimport numpy as np\nfrom random import sample\n\n################# Preprocessing #################\n\ndef split_dataset(x, y, ratio = [0.7, 0.15, 0.15] ):\n # number of examples\n data_len = len(x)\n lens = [ int(data_len*item) for item in ratio ]\n\n trainX, trainY = x[:lens[0]], y[:lens[0]]\n testX, testY = x[lens[0]:lens[0]+lens[1]], y[lens[0]:lens[0]+lens[1]]\n validX, validY = x[-lens[-1]:], y[-lens[-1]:]\n\n return (trainX,trainY), (testX,testY), (validX,validY)\n\ndef split_dataset_mlp(x, y, z, ratio = [0.7, 0.15, 0.15] ):\n # number of examples\n data_len = len(x)\n lens = [ int(data_len*item) for item in ratio ]\n\n trainX, trainY, trainZ = x[:lens[0]], y[:lens[0]], z[:lens[0]]\n testX, testY, testZ = x[lens[0]:lens[0]+lens[1]], y[lens[0]:lens[0]+lens[1]], z[lens[0]:lens[0]+lens[1]]\n validX, validY, validZ = x[-lens[-1]:], y[-lens[-1]:], z[-lens[-1]:]\n\n return (trainX,trainY,trainZ), (testX,testY,testZ), (validX,validY,validZ)\n\ndef batch_gen(x, y, batch_size):\n # infinite while\n while True:\n for i in range(0, len(x), batch_size):\n if (i+1)*batch_size < len(x):\n yield x[i : (i+1)*batch_size ].T, y[i : (i+1)*batch_size ].T\n\ndef rand_batch_gen(x, y, batch_size):\n while True:\n sample_idx = sample(list(np.arange(len(x))), batch_size)\n yield x[sample_idx].T, y[sample_idx].T\n\ndef rand_batch_mlp(x, y, z, batch_size):\n while True:\n sample_idx = sample(list(np.arange(len(x))), batch_size)\n yield np.array(x)[sample_idx], np.array(y)[sample_idx], np.array(z)[sample_idx]\n\n\ndef decode(sequence, lookup, separator=''): # 0 used for padding, is ignored\n return separator.join([ lookup[element] for element in sequence if element ])\n\n\ndef getRevVocab(vocab):\n return {v: k for k, v in vocab.items()}\n\n\ndef flattenStory(stories, lengths):\n out_sentences_dev1 = [item for sent in stories for item in sent]\n out_seq_len_dev1 = [item for sent in lengths for item in sent]\n return out_sentences_dev1, out_seq_len_dev1\n\ndef getBatchGen(trainX, trainY, batch_size):\n counter = 0\n while True:\n if counter >= shape(trainX)[0] // batch_size:\n counter = 0\n yield trainX[counter:counter+batch_size].T, trainY[counter:counter+batch_size].T\n counter += 1\n else:\n yield trainX[counter:counter+batch_size].T, trainY[counter:counter+batch_size].T\n counter += 1\n\n\ndef getBatchGenMLP(trainX, trainY, trainZ, batch_size):\n counter = 0\n while True:\n if counter >= shape(trainX)[0] // batch_size:\n counter = 0\n yield trainX[counter:counter+batch_size].T, trainY[counter:counter+batch_size].T, trainZ[counter:counter+batch_size]\n counter += 1\n else:\n yield trainX[counter:counter+batch_size].T, trainY[counter:counter+batch_size].T, trainZ[counter:counter+batch_size]\n counter += 1\n\ndef orderStories(data, order):\n out_sentences_orderd = []\n for i, story in enumerate(data):\n out_sentences_orderd.append([story[item] for item in order[i]])\n return out_sentences_orderd\n\ndef makeDataSeq2SeqReady(data):\n out_sentences_enc = []\n out_sentences_dec = []\n for i, item in enumerate(data):\n out_sentences_enc.append(item[:-1])\n out_sentences_dec.append(item[1:])\n\n return out_sentences_enc, out_sentences_dec\n\ndef w2vToNumpy():\n word2vec = {} #skip information on first line\n fin= open('glove.6B.50d.txt')\n for line in fin:\n items = line.replace('\\r','').replace('\\n','').split(' ')\n if len(items) < 10: continue\n word = items[0]\n vect = np.array([float(i) for i in items[1:] if len(i) > 1])\n word2vec[word] = vect\n\n\n return word2vec\n\n################# Pipeline #################\n\nOOV = ''\nPAD = ''\n\ndef ttokenize(scentence):\n import re\n #word=scentence.split(' ')\n word = scentence.lower()\n token = re.compile(\"[\\w]+(?=n't)|n't|\\'m|\\'ll|[\\w]+|[.?!;,\\-\\(\\)—\\:']\")\n t=token.findall(word)\n #t=list(reversed(t))\n return t\n\ndef tokenize(input):\n print(input.split(' '))\n return input.split(' ')\n\ndef my_pipeline(data, vocab=None, max_sent_len_=None):\n is_ext_vocab = True\n if vocab is None:\n is_ext_vocab = False\n vocab = {PAD: 0, OOV: 1}\n\n max_sent_len = -1\n data_sentences = []\n data_orders = []\n\n out_seq_len = []\n\n\n for instance in data:\n sents = []\n data_seq_len = []\n for sentence in instance['story']:\n sent = []\n tokenized = ttokenize(sentence)\n\n data_seq_len.append(len(tokenized))\n\n for token in tokenized:\n\n if not is_ext_vocab and token not in vocab:\n vocab[token] = len(vocab)\n if token not in vocab:\n token_id = vocab[OOV]\n else:\n token_id = vocab[token]\n sent.append(token_id)\n if len(sent) > max_sent_len:\n max_sent_len = len(sent)\n sents.append(sent)\n\n out_seq_len.append(data_seq_len)\n\n data_sentences.append(sents)\n data_orders.append(instance['order'])\n\n if max_sent_len_ is not None:\n max_sent_len = max_sent_len_\n out_sentences = np.full([len(data_sentences), 5, max_sent_len], vocab[PAD], dtype=np.int32)\n\n for i, elem in enumerate(data_sentences):\n for j, sent in enumerate(elem):\n out_sentences[i, j, 0:len(sent)] = sent\n\n out_orders = np.array(data_orders, dtype=np.int32)\n\n return out_sentences, out_orders, out_seq_len, vocab, max_sent_len\n\n################# Models #################\n\n######### Seq2Seq + MLP #########\n\nclass Seq2SeqOrdering(object):\n\n def __init__(self, xseq_len, yseq_len,\n xvocab_size, yvocab_size,\n emb_dim, num_layers, ckpt_path,\n lr=0.01,\n epochs=10, model_name='seq2seq_model'):\n\n # attach these arguments to self\n self.xseq_len = xseq_len\n self.yseq_len = yseq_len\n self.ckpt_path = ckpt_path\n self.epochs = epochs\n self.model_name = model_name\n self.emb_dim = emb_dim\n self.epochs = 10000\n\n\n self.mlp_hidden = 64\n self.n_classes = 2\n self.mlp_input = emb_dim\n self.mlp_epochs = 10000\n\n\n # build thy graph\n # attach any part of the graph that needs to be exposed, to the self\n def __graph__():\n\n\n ############### Placeholders for seq2seq ###############\n\n # placeholders\n tf.reset_default_graph()\n # encoder inputs : list of indices of length xseq_len\n self.enc_ip = [ tf.placeholder(shape=[None,],\n dtype=tf.int64,\n name='ei_{}'.format(t)) for t in range(xseq_len) ]\n\n # labels that represent the real outputs\n self.labels = [ tf.placeholder(shape=[None,],\n dtype=tf.int64,\n name='ei_{}'.format(t)) for t in range(yseq_len) ]\n\n # decoder inputs : 'GO' + [ y1, y2, ... y_t-1 ]\n self.dec_ip = [ tf.zeros_like(self.enc_ip[0], dtype=tf.int64, name='GO') ] + self.labels[:-1]\n\n\n # Basic LSTM cell wrapped in Dropout Wrapper\n self.keep_prob = tf.placeholder(tf.float32)\n # define the basic cell\n\n ############### Set Up LSTM Net ###############\n\n basic_cell = tf.nn.rnn_cell.DropoutWrapper(\n tf.nn.rnn_cell.BasicLSTMCell(self.emb_dim, state_is_tuple=True),\n output_keep_prob=self.keep_prob)\n # stack cells together : n layered model\n stacked_lstm = tf.nn.rnn_cell.MultiRNNCell([basic_cell]*num_layers, state_is_tuple=True)\n\n\n # for parameter sharing between training model\n # and testing model\n with tf.variable_scope('decoder') as scope:\n # build the seq2seq model\n # inputs : encoder, decoder inputs, LSTM cell type, vocabulary sizes, embedding dimensions\n self.decode_outputs, self.decode_states = tf.nn.seq2seq.embedding_rnn_seq2seq(self.enc_ip,self.dec_ip, stacked_lstm,\n xvocab_size, yvocab_size, emb_dim)\n # share parameters\n scope.reuse_variables()\n # testing model, where output of previous timestep is fed as input\n # to the next timestep\n self.decode_outputs_test, self.decode_states_test = tf.nn.seq2seq.embedding_rnn_seq2seq(\n self.enc_ip, self.dec_ip, stacked_lstm, xvocab_size, yvocab_size,emb_dim,\n feed_previous=True)\n\n\n ############### Seq2seq Loss ###############\n\n with tf.variable_scope('loss') as scope:\n # weighted loss\n # TODO : add parameter hint\n loss_weights = [ tf.ones_like(label, dtype=tf.float32) for label in self.labels ]\n self.loss = tf.nn.seq2seq.sequence_loss(self.decode_outputs, self.labels, loss_weights, yvocab_size)\n\n scope.reuse_variables()\n\n self.loss_permutation = tf.nn.seq2seq.sequence_loss(self.decode_outputs_test, self.labels, loss_weights, yvocab_size)\n\n\n ############### Seq2seq Optimisation ###############\n\n self.train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)\n\n\n self.n_hidden_1 = 512 # 1st layer number of features\n self.n_hidden_2 = 256 # 2nd layer number of features\n self.n_input = self.emb_dim * 4\n self.n_classes_mlp = 5\n self.learning_rate = 0.01\n self.output_size = 25\n\n # tf Graph input\n self.x = tf.placeholder(\"float\", [None, self.n_input])\n self.y = tf.placeholder(tf.int64, [None, self.n_classes_mlp])\n\n\n # Store layers weight & bias\n self.weights = {\n 'h1': tf.Variable(tf.random_normal([self.n_input, self.n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([self.n_hidden_1, self.n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([self.n_hidden_2, self.output_size]))\n }\n self.biases = {\n 'b1': tf.Variable(tf.random_normal([self.n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([self.n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([self.output_size]))\n }\n\n # Construct model\n self.logits = self.multilayer_perceptron(self.x, self.weights, self.biases)\n #self.y = tf.reshape(self.y, [35, 5])\n self.logits_reshaped = tf.reshape(self.logits, [-1, 5, 5])\n\n self.unpacked_logits = [tensor for tensor in tf.unpack(self.logits_reshaped, axis=1)]\n self.softmaxes = [tf.nn.softmax(tensor) for tensor in self.unpacked_logits ]\n self.softmaxed_logits = tf.pack(self.softmaxes, axis=1)\n self.mlp_predict = tf.arg_max(self.softmaxed_logits , 2)\n\n # Define loss and optimizer\n self.mlp_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))\n self.mlp_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.mlp_loss)\n\n sys.stdout.write('>> Graph Ready <<')\n # build comput graph\n __graph__()\n\n # get the feed dictionary\n def get_feed(self, X, Y, keep_prob):\n feed_dict = {self.enc_ip[t]: X[t] for t in range(self.xseq_len)}\n feed_dict.update({self.labels[t]: Y[t] for t in range(self.yseq_len)})\n feed_dict[self.keep_prob] = keep_prob # dropout prob\n #print(\">> Made feed dict.\")\n return feed_dict\n\n # Create model\n def multilayer_perceptron(self, x, weights, biases):\n # Hidden layer with RELU activation\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # Hidden layer with RELU activation\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n # Output layer with linear activation\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n # run one batch for training\n def train_batch(self, sess, train_batch_gen):\n # get batches\n batchX, batchY = train_batch_gen.__next__()\n # build feed\n feed_dict = self.get_feed(batchX, batchY, keep_prob=0.5)\n _, loss_v = sess.run([self.train_op, self.loss], feed_dict)\n return loss_v\n\n def eval_step(self, sess, eval_batch_gen):\n # get batches\n batchX, batchY = eval_batch_gen.__next__()\n # build feed\n feed_dict = self.get_feed(batchX, batchY, keep_prob=1.)\n loss_v, dec_op_v = sess.run([self.loss, self.decode_outputs_test], feed_dict)\n # dec_op_v is a list; also need to transpose 0,1 indices\n # (interchange batch_size and timesteps dimensions\n dec_op_v = np.array(dec_op_v).transpose([1,0,2])\n return loss_v, dec_op_v, batchX, batchY\n\n # evaluate 'num_batches' batches\n def eval_batches(self, sess, eval_batch_gen, num_batches):\n losses = []\n for i in range(num_batches):\n loss_v, dec_op_v, batchX, batchY = self.eval_step(sess, eval_batch_gen)\n losses.append(loss_v)\n return np.mean(losses)\n\n # finally the train function that\n # runs the train_op in a session\n # evaluates on valid set periodically\n # prints statistics\n def train(self, train_set, valid_set, sess=None ):\n # we need to save the model periodically\n #saver = tf.train.Saver()\n # if no session is given\n if not sess:\n # create a session\n sess = tf.Session()\n # init all variables\n sess.run(tf.global_variables_initializer())\n\n sys.stdout.write('>> Training started <<')\n # run M epochs\n for i in range(self.epochs):\n try:\n self.train_batch(sess, train_set)\n if i % 100 == 0: #and i% (self.epochs//1) == 0: # TODO : make this tunable by the user\n # save model to disk\n #saver.save(sess, self.ckpt_path + self.model_name + '.ckpt', global_step=i)\n # evaluate to get validation loss\n val_loss = self.eval_batches(sess, valid_set, 16) # TODO : and this\n # print stats\n print('\\nModel saved to disk at iteration #{}'.format(i))\n print('val loss : {0:.6f}'.format(val_loss))\n sys.stdout.flush()\n except KeyboardInterrupt: # this will most definitely happen, so handle it\n print('Interrupted by user at iteration {}'.format(i))\n self.session = sess\n return sess\n\n return sess\n\n def trainMLP(self, train_batch, eval_batch, sess):\n\n for i in range(self.mlp_epochs):\n try:\n # Get training batch\n train_batchX, train_batchY, train_batchZ = next(train_batch)\n\n # Determine batch size\n batch_size = np.shape(train_batchZ)[0]\n # Flatten story for embedding\n flatten_enc, flatten_dec = self.flattenBatch(train_batchX.T, train_batchY.T)\n # Embedd training batchX\n _, embedded_x = self.getSeq2SeqEmbedding(sess, flatten_enc, flatten_dec)\n\n\n final_h = embedded_x[0].h\n flatten_embeddings = final_h.reshape(batch_size, 4*self.emb_dim)\n feed ={self.x:flatten_embeddings,self.y:array(train_batchZ)}\n print(shape(sess.run(self.logits_reshaped, feed_dict=feed)))\n if i % 100 == 0:\n train_batchX, train_batchY, train_batchZ = next(eval_batch)\n flatten_enc, flatten_dec = self.flattenBatch(train_batchX.T, train_batchY.T)\n _, embedded_x = self.getSeq2SeqEmbedding(sess, flatten_enc, flatten_dec)\n final_h = embedded_x[0].h\n flatten_embeddings = final_h.reshape(batch_size, 4*self.emb_dim)\n feed ={self.x:flatten_embeddings,self.y:array(train_batchZ)}\n\n loss, pred = sess.run([self.mlp_loss, self.mlp_predict], feed_dict = feed)\n acc = calculate_accuracy(train_batchZ, pred)\n print(\"Iteration: {} Loss: {} Acc: {}\".format(i, loss, acc))\n\n except KeyboardInterrupt:\n print(\"Training Stopped\")\n break\n\n\n\n def restore_last_session(self):\n saver = tf.train.Saver()\n # create a session\n sess = tf.Session()\n # get checkpoint state\n ckpt = tf.train.get_checkpoint_state(self.ckpt_path)\n # restore session\n if ckpt and ckpt.model_checkpoint_path:\n print(\"Restoring last session at: \", ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n return sess\n # return to user\n else:\n sess.close()\n print(\"No session saved.\")\n\n def getSeq2SeqEmbedding(self, sess, x, y):\n feed = self.get_feed(x, y, keep_prob = 1)\n dec_out, dec_states = sess.run([self.decode_outputs, self.decode_states], feed_dict = feed)\n return dec_out, dec_states\n\n def flattenBatch(self, x, y):\n enc = array([item for something in x for item in something]).T\n dec = array([item for something in y for item in something]).T\n return enc, dec\n\n def predictLogits(self, sess, x, y, t):\n feed = self.get_feed(x, y, keep_prob = 1)\n feed.update({x: t})\n return sess.run(self.predict, feed_dict = feed)\n\n # prediction\n def predict(self, sess, X):\n feed_dict = {self.enc_ip[t]: X[t] for t in range(self.xseq_len)}\n feed_dict[self.keep_prob] = 1.\n dec_op_v = sess.run(self.decode_outputs_test, feed_dict)\n # dec_op_v is a list; also need to transpose 0,1 indices\n # (interchange batch_size and timesteps dimensions\n dec_op_v = np.array(dec_op_v).transpose([1,0,2])\n # return the index of item with highest probability\n return np.argmax(dec_op_v, axis=2)\n\n\n######### Base LSTM #########\n\ndef baselineLSTM():\n\n ## hidden 132, batch 50 - 55.5%\n target_size = 5\n vocab_size = len(vocab)\n input_size = 30\n n = 5460240\n hidden_size = 134\n BATCH_SIZE= 45\n n_stacks = 1\n embedding_dim = 60\n\n ### Base Line MODEL ###\n tf.reset_default_graph()\n ## PLACEHOLDERS\n story = tf.placeholder(tf.int64, [None, max_sent_len], \"story\") # [batch_size x 5 x max_length]\n order = tf.placeholder(tf.int64, [None, 5], \"order\") # [batch_size x 5]\n sen_len = tf.placeholder(tf.int64, [None], \"sen_len\")\n batch_size = tf.shape(story)[0]//5\n\n W = tf.Variable(tf.constant(0.0, shape=[vocab_size, embedding_dim]),\n trainable=False, name=\"W\")\n\n keep_prob = tf.placeholder(tf.float32)\n\n learning_rate = tf.placeholder(tf.float32)\n\n # Word embeddings\n initializer = tf.random_uniform_initializer(-0.1, 0.1)\n\n embeddings = tf.get_variable(\"W\", [vocab_size, input_size], initializer=initializer)\n\n sentences_embedded = tf.nn.embedding_lookup(embeddings, story)\n\n with tf.variable_scope(\"encoder\") as varscope:\n\n basic_cell = tf.nn.rnn_cell.DropoutWrapper(\n tf.nn.rnn_cell.BasicLSTMCell(hidden_size, state_is_tuple=True),\n output_keep_prob=keep_prob)\n\n _, final_first = tf.nn.dynamic_rnn(basic_cell, sentences_embedded, sequence_length=sen_len, dtype=tf.float32)\n\n final_firs_h = final_first.h\n\n reshape_final = tf.reshape(final_firs_h, [-1, hidden_size*5])\n\n logits_ = tf.contrib.layers.linear(reshape_final, 25)\n\n logits = tf.reshape(logits_, [-1, 5, 5])\n\n\n loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=order))\n\n ## Optimizer\n optim = tf.train.AdamOptimizer(learning_rate)\n optim_op = optim.minimize(loss)\n init =tf.initialize_all_variables()\n\n unpacked_logits = [tensor for tensor in tf.unpack(logits, axis=1)]\n softmaxes = [tf.nn.softmax(tensor) for tensor in unpacked_logits]\n softmaxed_logits = tf.pack(softmaxes, axis=1)#\n predict = tf.arg_max(softmaxed_logits, 2)\n\n saver = tf.train.Saver()\n\n sess= tf.Session()\n sess.run(init)\n\n return sess\n\n\n######### Train Baseline #########\n\ndef trainModel(sess = None):\n\n if not sess:\n sess = tf.Session()\n\n sess.run(tf.initialize_all_variables())\n\n for j in range(1):\n counter = 0\n slow_down = False\n slow_slow_down = False\n for i in range(n // BATCH_SIZE):\n #x, y, z = next(batch_gen)\n #x_flat, y_flat = flattenStory(x, y)\n if counter >= len(out_sentences)//BATCH_SIZE - BATCH_SIZE:\n counter =0\n try:\n if slow_down == True and slow_slow_down == False:\n l_r = 0.001\n elif slow_slow_down == True:\n l_r = 0.0001\n else:\n l_r = 0.01\n\n inst_story = out_sentences_flat[counter * BATCH_SIZE*5: (counter + 1) * BATCH_SIZE*5]\n inst_order = out_orders[counter * BATCH_SIZE: (counter + 1) * BATCH_SIZE]\n inst_seq_len = out_len_flat[counter * BATCH_SIZE*5: (counter + 1) * BATCH_SIZE*5]\n feed_dict = {story: inst_story, order: inst_order, sen_len: inst_seq_len, keep_prob:0.5, learning_rate: l_r}\n test = sess.run(optim_op, feed_dict = feed_dict)\n #print(np.shape(test))\n\n #print('hidden_size =', hidden_size, 'Epoch =' , j, \"Batch:\", i, 'out of ',n // BATCH_SIZE, \"Loss:\", loss1)\n\n if i%10 == 0:\n test_feed_dict = {story:test_stories1 , order: test_orders, sen_len:test_seq_len1, keep_prob:1.0, learning_rate:l_r}\n test_predicted = sess.run(predict, feed_dict=test_feed_dict)\n test_accuracy = nn.calculate_accuracy(test_orders, test_predicted)\n print('test_accuracy =', test_accuracy)\n if test_accuracy > 0.538 and test_accuracy < 0.55:\n slow_down = True\n slow_slow_down = False\n elif test_accuracy > 0.55:\n slow_down = False\n slow_slow_down = True\n else:\n slow_down = False\n\n if test_accuracy > 0.555:\n nn.save_model(sess)\n print(test_accuracy)\n break\n\n counter += 1\n except KeyboardInterrupt:\n print(\"Training Stopped\")\n nn.save_model(sess)\n break\n\n\nif __name__ in \"__main__\":\n import sys\n\n if \"run\" in sys.argv:\n model = baselineLSTM()\n trainModel(model)\n \n","sub_path":"nlp/story_understanding_lstm.py","file_name":"story_understanding_lstm.py","file_ext":"py","file_size_in_byte":23563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"151578589","text":"import math\n\ndef check_fermat(a,b,c,n):\n left=pow(a,n) + pow(b,n)\n right=pow(c, n)\n\n if left == right:\n print(\"wrong\")\n\n else:\n print(\"right\")\n\na,b,c,n=map(int,input(\"Enter three numbers separated by space:\\n\").split())\n\ncheck_fermat(a,b,c,n)","sub_path":"Exercise 5.2.py","file_name":"Exercise 5.2.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"417327722","text":"import sys\r\nfrom pprint import pprint\r\nfrom PyQt5.QtWidgets import (QWidget, QProgressBar, QPushButton, QApplication)\r\nfrom PyQt5.QtCore import QTimer\r\n\r\nclass Ventana(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.progressBar = QProgressBar(self)\r\n self.progressBar.setGeometry(30,40,200,25)\r\n\r\n self.btnStart = QPushButton('Start', self)\r\n self.btnStart.move(40,80)\r\n self.btnStart.clicked.connect(self.startProgress)\r\n\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.funcion)\r\n self.step = 0\r\n\r\n def startProgress(self):\r\n if self.timer.isActive():\r\n self.timer.stop()\r\n self.btnStart.setText('Start')\r\n else:\r\n self.timer.start(10)\r\n self.btnStart.setText('Stop ')\r\n\r\n def funcion(self):\r\n if self.step >= 100:\r\n self.timer.stop()\r\n self.btnStart.setText('Finish')\r\n return\r\n self.step += 0.5\r\n self.progressBar.setValue(self.step)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication([])\r\n ventana = Ventana()\r\n ventana.show()\r\n sys.exit(app.exec_())","sub_path":"ejemplos/progressBar.py","file_name":"progressBar.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"49663947","text":"m=[[1,4,6,-4,-12,1,5,-31,0,-3,-1],[4,-10,2,-5,-1,3,-18]]\n#4 Задание\ns=0\n#5 Задание\nn=0\nfor i in range(len(m)):\n for j in range(len(m[i])):\n #4 Задание\n if (m[i][j]<0):\n s+=m[i][j]\n #5 Задание\n elif (m[i][j]>0):\n n+=1\nprint('4. Сумма отрицательных элементов списка:',s)\nprint('5. Кол-во положительных элементов',n)\n\n","sub_path":"Python/Универ/Типовой Расчёт 5/TR5 - 4-5.py","file_name":"TR5 - 4-5.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"235628859","text":"#!/usr/bin/env python\nu\"\"\"\nracmo_extrap_mean.py\nWritten by Tyler Sutterley (01/2021)\nInterpolates and extrapolates downscaled RACMO products to times and coordinates\n\nUses fast nearest-neighbor search algorithms\nhttps://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html\nhttps://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html\nand inverse distance weighted interpolation to extrapolate spatially\n\nCALLING SEQUENCE:\n python racmo_extrap_mean.py --directory= --version=3.0 \\\n --product=SMB,PRECIP,RUNOFF --coordinate=[-39e4,-133e4],[-39e4,-133e4] \\\n --date=2016.1,2018.1\n\nCOMMAND LINE OPTIONS:\n -D X, --directory=X: Working data directory\n --version=X: Downscaled RACMO Version\n 1.0: RACMO2.3/XGRN11\n 2.0: RACMO2.3p2/XGRN11\n 3.0: RACMO2.3p2/FGRN055\n --product: RACMO product to calculate\n SMB: Surface Mass Balance\n PRECIP: Precipitation\n RUNOFF: Melt Water Runoff\n SNOWMELT: Snowmelt\n REFREEZE: Melt Water Refreeze\n --mean: Start and end year of mean (separated by commas)\n --coordinate=X: Polar Stereographic X and Y of point\n --date=X: Date to interpolate in year-decimal format\n --csv=X: Read dates and coordinates from a csv file\n --fill-value: Replace invalid values with fill value\n (default uses original fill values from data file)\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n scipy: Scientific Tools for Python\n https://docs.scipy.org/doc/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n scikit-learn: Machine Learning in Python\n https://scikit-learn.org/stable/index.html\n https://github.com/scikit-learn/scikit-learn\n\nUPDATE HISTORY:\n Updated 01/2021: using conversion protocols following pyproj-2 updates\n https://pyproj4.github.io/pyproj/stable/gotchas.html\n Updated 04/2020: reduced to interpolation function. output masked array\n Updated 09/2019: read subsets of DS1km netCDF4 file to save memory\n Written 09/2019\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport re\nimport pyproj\nimport netCDF4\nimport numpy as np\nimport scipy.interpolate\nfrom sklearn.neighbors import KDTree, BallTree\n\n#-- PURPOSE: read and interpolate downscaled RACMO products\ndef extrapolate_racmo_mean(base_dir, EPSG, VERSION, PRODUCT, tdec, X, Y,\n RANGE=[], SEARCH='BallTree', NN=10, POWER=2.0, FILL_VALUE=None):\n\n #-- Full Directory Setup\n DIRECTORY = 'SMB1km_v{0}'.format(VERSION)\n\n #-- netcdf variable names\n input_products = {}\n input_products['SMB'] = 'SMB_rec'\n input_products['PRECIP'] = 'precip'\n input_products['RUNOFF'] = 'runoff'\n input_products['SNOWMELT'] = 'snowmelt'\n input_products['REFREEZE'] = 'refreeze'\n #-- version 1 was in separate files for each year\n if (VERSION == '1.0'):\n RACMO_MODEL = ['XGRN11','2.3']\n VARNAME = input_products[PRODUCT]\n SUBDIRECTORY = '{0}_v{1}'.format(VARNAME,VERSION)\n input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY, SUBDIRECTORY)\n elif (VERSION == '2.0'):\n RACMO_MODEL = ['XGRN11','2.3p2']\n var = input_products[PRODUCT]\n VARNAME = var if PRODUCT in ('SMB','PRECIP') else '{0}corr'.format(var)\n input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY)\n elif (VERSION == '3.0'):\n RACMO_MODEL = ['FGRN055','2.3p2']\n var = input_products[PRODUCT]\n VARNAME = var if (PRODUCT == 'SMB') else '{0}corr'.format(var)\n input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY)\n\n #-- read mean from netCDF4 file\n arg = (RACMO_MODEL[0],RACMO_MODEL[1],VERSION,PRODUCT,RANGE[0],RANGE[1])\n mean_file = '{0}_RACMO{1}_DS1km_v{2}_{3}_Mean_{4:4d}-{5:4d}.nc'.format(*arg)\n with netCDF4.Dataset(os.path.join(input_dir,mean_file),'r') as fileID:\n MEAN = fileID[VARNAME][:,:].copy()\n\n #-- input cumulative netCDF4 file\n args = (RACMO_MODEL[0],RACMO_MODEL[1],VERSION,PRODUCT)\n input_file = '{0}_RACMO{1}_DS1km_v{2}_{3}_cumul.nc'.format(*args)\n\n #-- Open the RACMO NetCDF file for reading\n fileID = netCDF4.Dataset(os.path.join(input_dir,input_file), 'r')\n #-- input shape of RACMO data\n nt,ny,nx = fileID[VARNAME].shape\n #-- Get data from each netCDF variable\n d = {}\n #-- cell origins on the bottom right\n dx = np.abs(fileID.variables['x'][1]-fileID.variables['x'][0])\n dy = np.abs(fileID.variables['y'][1]-fileID.variables['y'][0])\n #-- latitude and longitude arrays at center of each cell\n d['LON'] = fileID.variables['LON'][:,:].copy()\n d['LAT'] = fileID.variables['LAT'][:,:].copy()\n #-- extract time (decimal years)\n d['TIME'] = fileID.variables['TIME'][:].copy()\n #-- mask object for interpolating data\n d['MASK'] = np.array(fileID.variables['MASK'][:],dtype=np.bool)\n i,j = np.nonzero(d['MASK'])\n #-- reduce mean to valid points\n var1 = MEAN[i,j]\n\n #-- convert RACMO latitude and longitude to input coordinates (EPSG)\n crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(EPSG))\n crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n direction = pyproj.enums.TransformDirection.INVERSE\n #-- convert projection from model coordinates\n xg,yg = transformer.transform(d['LON'], d['LAT'], direction=direction)\n\n #-- construct search tree from original points\n #-- can use either BallTree or KDTree algorithms\n xy1 = np.concatenate((xg[i,j,None],yg[i,j,None]),axis=1)\n tree = BallTree(xy1) if (SEARCH == 'BallTree') else KDTree(xy1)\n\n #-- output extrapolated arrays of variable\n extrap_var = np.zeros_like(tdec,dtype=np.float)\n #-- type designating algorithm used (1: interpolate, 2: backward, 3:forward)\n extrap_type = np.ones_like(tdec,dtype=np.uint8)\n\n #-- inverse distance weighting to extrapolate in space\n #-- query the search tree to find the NN closest points\n xy2 = np.concatenate((X[:,None],Y[:,None]),axis=1)\n dist,indices = tree.query(xy2, k=NN, return_distance=True)\n count = len(tdec)\n #-- normalized weights if POWER > 0 (typically between 1 and 3)\n #-- in the inverse distance weighting\n power_inverse_distance = dist**(-POWER)\n s = np.sum(power_inverse_distance, axis=1)\n w = power_inverse_distance/np.broadcast_to(s[:,None],(count,NN))\n #-- spatially extrapolate using inverse distance weighting\n dt = (tdec - d['TIME'][0])/(d['TIME'][1] - d['TIME'][0])\n extrap_var[:] = dt*np.sum(w*var1[indices],axis=1)\n\n #-- replace fill value if specified\n if FILL_VALUE:\n ind, = np.nonzero(extrap_type == 0)\n extrap_var[ind] = FILL_VALUE\n fv = FILL_VALUE\n else:\n fv = 0.0\n\n #-- close the NetCDF files\n fileID.close()\n\n #-- return the extrapolated values\n return (extrap_var,extrap_type,fv)\n","sub_path":"SMBcorr/racmo_extrap_mean.py","file_name":"racmo_extrap_mean.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"508197664","text":"'''\nCreated on Feb 3, 2011\n\n@author: qiaosic\n'''\n\nfrom direct.gui.DirectGui import *\n\n#from pandac.PandaModules import TextNode\nfrom pandac.PandaModules import *\nfrom direct.gui.DirectGui import DirectFrame\nfrom textwrap import *\n\nfrom direct.task import Task\n\nfrom JournalMgr import *\nfrom UIBase import *\nfrom direct.gui.OnscreenImage import OnscreenImage\n\nfrom direct.task import Task\n\nclass JournalUI(UIBase):\n '''\n This is the UI of journal\n '''\n\n def __init__(self,world):\n '''\n Constructor\n '''\n UIBase.__init__(self, world)\n\n \n '''\n init variables\n '''\n #load skin of journal button and scrollbar as egg files\n self.button_maps = self.world.loader.loadModel(\"./LEGameAssets/Models/button\") \n self.scrollbar_maps = self.world.loader.loadModel(\"./LEGameAssets/Models/scrollbar\")\n\n #skin for the journal tray\n self.img_files = [\"./LEGameAssets/Textures/journal_rollout_tray.png\"]\n\n #variables that store the UI elements which are going to be created\n self.journal_window_frame = None\n self.window_show = True\n self.journal_window = None\n self.frame = None\n self.canvas = None\n \n #variables that store list of title and statement of journal from journalMrg\n self.journal_entries= []\n self.quest_text = []\n\n #size of the screen\n self.screen_size_x = base.win.getXSize()\n self.screen_size_y = base.win.getYSize()\n \n \n #variables that help to make the popout window and arrange the text\n self.count = [0,0,0,0]\n self.char_size = 0.05\n self.last_char_size = None\n self.line_space = 0.2\n self.top_line = 0.1\n self.longest_line = {\"menu\":[0,0],\"quest\":[0,0]}\n self.buttons_1 = []\n self.buttons_2 = []\n \n self.page_canvas_size= {\"menu\": (0,0,0,0), \"quest\": (0,0,0,0)}\n self.page_buttons = {\"menu\": self.buttons_1, \"quest\": self.buttons_2}\n self.showing_page = \"menu\" \n \n #set the default canvas size and frame size\n self.canvas_size = (0.3, 1.25, 0.015, 0.75)\n self.frame_size = (0.3, 1.25, 0.015, 0.75)\n \n def dummyMethod(): # in order to make the button do nothing when clicked\n pass\n \n self.b = DirectButton( geom = (self.button_maps.find(\"**/ok\"),self.button_maps.find(\"**/click\"),self.button_maps.find(\"**/rolling_over\"),self.button_maps.find(\"**/disable\")),\n text_scale = (0.15,0.15), pos = (-0.23, 0, -0.12), relief=None, scale=0.38, command=dummyMethod, parent= base.a2dTopRight)\n world.accept(\"j\", self.popoutWindow);\n world.accept(\"j-up\", self.popoutWindow);\n \n\n \n def update(self):\n self.journal_entries = self.world.journalMgr.getOpenedJournalEntries()\n #self.journal_entries = [(\"title1\",\"Now if I take out the task argument all is fine obviously. However I need a way to access the time the task runs.This will give you a scrollbar at the lower left side of the screen. If you want to parent the scrollbar to a determined frame, you add the keyword parent to the set of keyboards like so.DirectScrollBar is available beginning in Panda3D 1.1. It consists of a long trough, a thumb that slides along the trough, and a pair of buttons on either side of the trough to scroll one line at a time. A DirectScrollBar can be oriented either vertically or horizontally.\"),(\"title2\",\"Completed\"),(\"title3\",\"statement3\")]\n #print base.win.getXSize(),base.win.getYSize()\n #print \"update\"\n \n #Create a popout window \n def popoutWindow(self):\n if self.window_show:\n self.longest_line = {\"menu\":[0,0],\"quest\":[0,0]}\n self.count = [0,0,0,0]\n self.update()\n self.journal_window_frame = DirectFrame(frameColor=(0, 0, 0, 0.12),frameSize=(0.28, 1.27, -0.005, 0.77), parent= base.a2dTopRight, pos = (-1.3,0,-1))\n self.journal_window = DirectScrolledFrame(canvasSize = (self.canvas_size), frameSize = (self.frame_size),frameColor=(0, 0, 0, 0.12),\n autoHideScrollBars = True, manageScrollBars = True, parent = self.journal_window_frame ) \n self.changeSkin(self.img_files)\n self.changeScrollbarSkin()\n self.canvas= self.journal_window.getCanvas() \n self.createMenu()\n self.createQuest()\n self.showMenu()\n self.changeButtonColor()\n try:\n self.showQuest(self.journal_entries[0][0]);\n except:\n pass\n\n \n \n else:\n self.destroy() \n self.window_show = not self.window_show\n \n #destroy the popout window\n def destroy(self):\n self.journal_window_frame.destroy()\n # if the button list has been detached when the parent(journal_window) is destroyed, them won't be destroyed\n for button in self.buttons_1:\n button.destroy()\n for button in self.buttons_2:\n button.destroy()\n for label in self.quest_text:\n label.destroy()\n # clean up the button list\n for i in range(len(self.buttons_1)):\n self.buttons_1.pop()\n for j in range(len(self.buttons_2)):\n self.buttons_2.pop()\n for k in range(len(self.quest_text)):\n self.quest_text.pop()\n \n '''\n These functions are for the other parts to call to hide/show the JournalUI\n ''' \n#----------------------------------------------\n def hideAll(self):\n #self.b.hide()\n if not self.window_show:\n self.journal_window_frame.hide()\n \n\n def showAll(self):\n #self.b.show()\n if not self.window_show:\n self.journal_window_frame.show()\n \n\n#-------------------------------------------------\n def destroyAll(self):\n self.destroy()\n #self.b.destroy()\n \n def createAll(self):\n pass\n#----------------------------------------------------------------------------------------- \n \n \n def switchPage(self, page):\n self.journal_window['canvasSize']=(self.page_canvas_size[page])\n self.hideButton(self.page_buttons[self.showing_page])\n self.showButton(self.page_buttons[page]) \n self.showing_page = page\n \n #------------quest------------------\n if page == \"quest\":\n for label in self.quest_text:\n label.reparentTo(self.canvas)\n else :\n self.hideText()\n \n \n \n def hideButton(self, page): \n for button in page:\n button.detachNode()\n \n \n def showButton(self, page):\n for button in page:\n button.reparentTo(self.canvas)\n \n def hideText(self):\n\n for label in self.quest_text:\n label.detachNode()\n \n for k in range(len(self.quest_text)):\n self.quest_text.pop()\n\n \n \n def showMenu(self):\n self.switchPage(\"menu\")\n\n \n def showQuest(self, tag): \n for entry in self.journal_entries:\n if entry[0] == tag:\n temp_text = entry[1] \n self.addText(temp_text, 0.05, \"quest\")\n break \n self.switchPage(\"quest\") \n \n\n def addButton(self, text, position, scale, func_call, type, page): \n if type == 1:\n button = DirectButton(text = (text),text_fg =(1,0,0,1),text_bg =(0,0,0,0),pos =(position),text_scale = (scale), relief = None,command=func_call, extraArgs = [text])\n else:\n button = DirectButton(text = (text),text_fg =(1,0,0,1),text_bg =(0,0,0,0),pos =(position),text_scale = (scale), relief = None,command=func_call)\n\n if(position[0] - button.getWidth()/2.0 < self.longest_line[page][0]):\n '''\n something different here -0.02 for the right side \n ''' \n self.count[0] = position[0] - button.getWidth()/2.0 - 0.02 #right\n self.longest_line[page][0] = self.count[0]\n if(position[1] + button.getWidth()/2.0 > self.longest_line[page][1]):\n self.count[1] = position[1] + button.getWidth()/2.0 #left\n self.longest_line[page][1] = self.count[1] \n self.count[2] = position[2]-button.getHeight()#bottom\n self.journal_window['canvasSize']=(self.count[0],self.count[1],self.count[2],self.count[3])\n \n button.detachNode() #make sure buttons won't be shown at the first time it is created \n self.page_buttons[page].append(button)\n self.page_canvas_size[page] = (self.count[0],self.count[1],self.count[2],self.count[3])\n \n def addText(self, show_text, text_scale, page):\n\n self.page_canvas_size[page]=(self.canvas_size)\n \n max_window_width = abs(self.canvas_size[1] - self.canvas_size[0])\n \n # the width of the DirectLabel object is only changing with the length of the string in text attribute, but this width is different from the width of the DriectFrame and DriectButton,\n # so I don't know what is the unit of this DriectLable width, if you change the scale of the label, the width is not going to change, and the text will going out of the boundary,\n # I should change the max number of character with scale\n text_len = len(show_text) \n\n offset = text_scale - 0.05\n \n if(offset > 0):\n max_text_len = 35 - offset * 500\n if(offset <= 0):\n max_text_len = 35 + abs(offset) * 500\n\n \n text_lines = []\n \n if text_len > max_text_len:\n wrapper = TextWrapper(initial_indent=\"* \", width = max_text_len)\n text_lines = wrapper.wrap(show_text)\n \n else:\n text_lines.append(show_text)\n \n line_space = 0.0\n for t in text_lines:\n temp_text = DirectLabel(text = t,pos = (0.75,0,0.52-line_space),scale = text_scale, text_bg = (0,1,1,0.0),text_fg=(0,0,0,1),frameColor=(0,0,0,0))\n self.quest_text.append(temp_text)\n if (0.5 - line_space)< self.canvas_size[2]:\n self.page_canvas_size[page]=(self.canvas_size[0],self.canvas_size[1]-0.1, 0.48 - line_space,self.canvas_size[3])\n line_space +=0.08\n\n for label in self.quest_text: \n label.detachNode() \n \n def createMenu(self):\n #initialize value for the beginning line, this will never change\n self.count[3]= self.top_line\n #add button\n offset = 0\n #self.journal_entries= [(\"title1\",\"Now if I take out the task argument all is fine obviously. However I need a way to access the time the task runs.\"),(\"title2\",\"statement2\"),(\"title3\",\"statement3\")]\n for entry in self.journal_entries:\n self.addButton(entry[0],(-0.08, 0, -0.02+offset),(0.05,0.05),self.showQuest,1 ,\"menu\")\n offset += -0.08\n \n\n def createQuest(self): \n self.addButton(\"Back\",(0.45, 0, 0.62),(0.08,0.08),self.showMenu,0,\"quest\" )\n \n \n \n def changeSkin(self, img_files):\n if len(img_files) == 1:\n self.journal_window['image']=(img_files[0])\n #self.journal_window['image_scale']=(0.43,0,0.43) #old scale without size 800*600\n #self.journal_window['image_pos']=(0.75,0,0.33) #old position without size 800*600\n self.journal_window['image_scale']=(1.335,1,1)\n self.journal_window['image_pos']=(0,0,0)\n self.journal_window.setTransparency(1) \n self.journal_window['frameColor']=(0,0,0,0)\n if len (img_files) == 2:\n self.journal_window_frame['image']=(img_files[1])\n #self.journal_window_frame['image_scale']=(0.44,0,0.44)#old scale without size 800*600\n #self.journal_window_frame['image_pos']=(0.75,0,0.33)#old position without size 800*600\n self.journal_window_frame['image_scale']=(1.335,1,1)\n self.journal_window_frame['image_pos']=(0,0,0)\n self.journal_window_frame.setTransparency(1)\n self.journal_window_frame['frameColor']=(0,0,0,0)\n \n def changeScrollbarSkin(self):\n self.journal_window['verticalScroll_relief']= None# Relief appearance of the frame\n self.journal_window['verticalScroll_image']= (\"./LEGameAssets/Textures/journal_scrollbar.png\")\n self.journal_window.verticalScroll['image_pos']=(1.21,0.1,0.36)\n self.journal_window.verticalScroll['image_scale']=(0.05,0.1,0.38)\n self.journal_window.verticalScroll.incButton['geom']=(self.scrollbar_maps.find(\"**/journal_scrollbutton_down\"))\n self.journal_window.verticalScroll.incButton['relief']=None# Relief appearance of the frame\n self.journal_window.verticalScroll.incButton['geom_scale']=(0.15,0.15,0.15)\n self.journal_window.verticalScroll.decButton['geom']=(self.scrollbar_maps.find(\"**/journal_scrollbutton_up\"))\n self.journal_window.verticalScroll.decButton['relief']=None# Relief appearance of the frame\n self.journal_window.verticalScroll.decButton['geom_scale']=(0.15,0.15,0.15)\n self.journal_window.verticalScroll.thumb['geom']=(self.scrollbar_maps.find(\"**/journal_scroll_button_updown\"))\n self.journal_window.verticalScroll.thumb['relief']=None# Relief appearance of the frame\n self.journal_window.verticalScroll.thumb['geom_scale']=(0.2,0.2,0.2)\n self.journal_window.verticalScroll.thumb['frameVisibleScale']=(0.5,0.25)#Relative scale of the visible frame to its clickable bounds. Useful for creating things like the paging region of a slider, which is visibly smaller than the acceptable click region \n self.journal_window.verticalScroll['resizeThumb']=(True)\n self.journal_window.verticalScroll['scrollSize']=(0.1) #change the amount to jump the thumb when user click up/right and down/left\n self.journal_window.verticalScroll['range']=(-1,1)# change the (min, max) range of the thumb\n \n \n #--------UI feedback----------------- \n def changeButtonColor(self):\n for entry in self.journal_entries:\n if (entry[1] == \"Completed\"):\n for b in self.buttons_1:\n if (b['text'] == entry[0]):\n b['text_fg'] = (0,1,0,1)\n \n def flashJournalButton(self,task):\n if task.time <= 0.5:\n self.b['geom_scale']=(0.01,0.01,0.01)\n return task.cont\n \n elif task.time <=1.0:\n self.b['geom_scale']=(1.0,1.0,1.0)\n return task.cont\n elif self.window_show != True:\n return task.done\n else:\n return task.again\n\n def startFlash(self): \n self.task = taskMgr.add(self.flashJournalButton, 'flashJournalButton') \n ","sub_path":"OrelliaSource/Scripts/JournalUI.py","file_name":"JournalUI.py","file_ext":"py","file_size_in_byte":15109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"63383337","text":"# Open file with the list of invited persons\nwith open(\"Input/People/mailing_list.txt\") as file:\n\n # Create a list of the names and clean it adding to a new list\n names = file.readlines()\n stripped_names = []\n for name in names:\n stripped_names.append(name.strip())\n\n # Going through the list of the cleaned names open the template and replace the placeholder [names] by actual name\n for person in stripped_names:\n with open(\"Input/Letter_templates/birthday_invitation.txt\", \"r\") as text:\n contents = text.read()\n contents = contents.replace(\"[name]\", person)\n\n # Create a letter and save it for each person in separate file\n with open(f\"Output/ReadyToSend/ invitation_for_{person}.txt\", mode=\"w\") as new_letter:\n new_letter.write(contents)\n","sub_path":"100 Days/Intermediate/Day 24 - Paths/invitator.py","file_name":"invitator.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"504445802","text":"import cv2\nlabels=[]\nfaces=[]\nsubjects = [\"\", \"Admin\", \"Reese\",\"Root\",\"Fusco\",\"Shaw\"]\nrecognizer=cv2.face.LBPHFaceRecognizer_create()\ndef detect_face(img):\n gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cascPath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascPath)\n if gray is None:\n return None\n faces=faceCascade.detectMultiScale(gray,1.2,10)\n if len(faces)==0:\n return None,None\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255, 0, 0), 2)\n return gray[y:y+w,x:x+h],faces[0]\ndef predict(test_img):\n img=test_img.copy()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cascPath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascPath)\n if gray is None:\n return None\n faces = faceCascade.detectMultiScale(gray, 1.3, 8)\n if len(faces) == 0:\n return None, None\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n face,rect=gray[y:y + w, x:x + h],(x,y,w,h)\n label,confidence=recognizer.predict(face)\n label_text = subjects[label]\n draw_text(img, label_text, rect[0], rect[1] - 5)\n draw_text(img, str(confidence), rect[0]-100, rect[1]+200)\n return img\nrecognizer.read('Trainers/poi.yml')\ndef draw_text(img, text, x, y):\n cv2.putText(img, text, (x+5, y+5), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 6)\ntest=cv2.imread(\"poi1.jpg\")\ntest2=cv2.imread(\"poi2.jpg\")\ntest3=cv2.imread(\"poi3.jpg\")\ntest4=cv2.imread(\"poi4.jpg\")\ntest5=cv2.imread(\"poi5.jpg\")\npredicted_img1=predict(test)\npredicted_img2=predict(test2)\npredicted_img3=predict(test3)\npredicted_img4=predict(test4)\npredicted_img5=predict(test5)\ncv2.imshow(subjects[1], cv2.resize(predicted_img1, (400, 500)))\ncv2.imshow(subjects[2], cv2.resize(predicted_img2, (400, 500)))\ncv2.imshow(subjects[3], cv2.resize(predicted_img3, (400, 500)))\ncv2.imshow(subjects[4], cv2.resize(predicted_img4, (400, 500)))\ncv2.imshow(subjects[5], cv2.resize(predicted_img5, (400, 500)))\ncv2.waitKey(100000)\ncv2.destroyAllWindows()\n","sub_path":"yuztanima3.py","file_name":"yuztanima3.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"652570032","text":"from flask import Flask\nfrom read_xml import find_by_xpath\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef main():\n try:\n xcfg = open('config.xml', 'r')\n file_str = xcfg.read()\n name = find_by_xpath(file_str, './/name')\n age = find_by_xpath(file_str, './/age')\n\n return 'asdasdas {} is {} yearsasdas old'.format(name, age)\n except IOError:\n return 'No config found'\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"627078367","text":"# Copyright (c) 2019. TsumiNa. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\nfrom typing import Union, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom xenonpy.model.training import Trainer\nfrom xenonpy.model.training.base import BaseExtension\n\n__all__ = ['TensorConverter']\n\nT_Data = Union[pd.DataFrame, pd.Series, np.ndarray, torch.Tensor]\n\n\nclass TensorConverter(BaseExtension):\n\n def __init__(self, dtype=None, empty_cache: bool = False):\n self.empty_cache = empty_cache\n if dtype is None:\n self.dtype = torch.get_default_dtype()\n else:\n self.dtype = dtype\n\n def input_proc(self, x_in, y_in, trainer: Trainer) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Convert data to :class:`torch.Tensor`.\n\n Parameters\n ----------\n y_in\n x_in\n\n Returns\n -------\n Union[Any, Tuple[Any, Any]]\n\n \"\"\"\n\n def _convert(t):\n if t is None:\n return t\n if isinstance(t, (tuple, list)):\n return tuple([_convert(t_) for t_ in t])\n\n # if tensor, do nothing\n if isinstance(t, torch.Tensor):\n return t.to(trainer.device, non_blocking=trainer.non_blocking)\n # if pandas, turn to numpy\n if isinstance(t, (pd.DataFrame, pd.Series)):\n t = t.values\n # if numpy, turn to tensor\n if isinstance(t, np.ndarray):\n t = torch.from_numpy(t).to(self.dtype)\n # return others\n if not isinstance(t, torch.Tensor):\n return t\n # reshape (1,) to (-1, 1)\n if len(t.size()) == 1:\n t = t.unsqueeze(-1)\n return t.to(trainer.device, non_blocking=trainer.non_blocking)\n\n return _convert(x_in), _convert(y_in)\n\n def step_forward(self):\n if self.empty_cache:\n torch.cuda.empty_cache()\n\n def output_proc(self,\n y_pred: Union[torch.Tensor, Tuple[torch.Tensor]],\n y_true: Union[torch.Tensor, Tuple[torch.Tensor], None],\n training: bool,\n ):\n \"\"\"\n Convert :class:`torch.Tensor` to :class:`numpy.ndarray`.\n\n Parameters\n ----------\n y_pred: Union[torch.Tensor, Tuple[torch.Tensor]]\n y_true : Union[torch.Tensor, Tuple[torch.Tensor]]\n training: bool\n Specify whether the model in the training mode.\n\n \"\"\"\n\n def _convert(y_):\n if y_ is None:\n return y_\n if isinstance(y_, tuple):\n y_ = tuple([t.detach().cpu().numpy() for t in y_])\n else:\n y_ = y_.detach().cpu().numpy()\n return y_\n\n if not training:\n return _convert(y_pred), _convert(y_true)\n else:\n return y_pred, y_true\n","sub_path":"xenonpy/model/training/extension/tensor_convert.py","file_name":"tensor_convert.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"197893820","text":"from bs4 import BeautifulSoup\nimport os\nimport matplotlib.pyplot as plt\n\n\n\ntotal = 0\ncorrect_count = 0\nhello = dict()\nhello['total'] = 0\nhello['correct'] = 0\n\nopen_file = open(\"frequency.txt\", \"r\").read()\n\ndict1 = eval(open_file)\n\ndict_word={} \ndict_tag={}\n\n\nfor key in dict1:\n\twords=key.split('_') \n\tif(len(words) != 2):\n\t\tcontinue\n\tif words[0] in dict_word :\n\t dict_word[words[0]]=dict_word[words[0]]+dict1[key]\n\telse :\n\t dict_word[words[0]]=dict1[key]\n\n\tif words[1] in dict_tag :\n\t dict_tag[words[1]]=dict_tag[words[1]]+dict1[key]\n\telse :\n\t dict_tag[words[1]]=dict1[key]\n\nsorted_words = sorted(dict_word.items(), key=lambda a : a[1])\nsorted_tag = sorted(dict_tag.items(), key=lambda a : a[1])\n\nsorted_words.reverse()\nsorted_tag.reverse()\n\nlen_words = len(sorted_words)\nlen_tag = len(sorted_tag)\n\nprint(len_words) # 192632\nprint(len_tag) # 87 total tags(including multi-tags), out of which only 57 are separated ones.\n\nprint(sorted_tag)\n\nwrite_file = open(\"top10.txt\", \"w\")\n\nwrite_file.write(\"TOP 10 words based on frequency\\n\")\nfor i in range(0,10):\n\twrite_file.write(str(sorted_words[i]))\n\nwrite_file.write(\"\\n\\n\\n\")\n\nwrite_file.write(\"TOP 10 tags based on frequency\\n\")\nfor i in range(0,10): # min(10,len_tag)\n\twrite_file.write(str(sorted_tag[i]))\n\n# print(len_tag)\n# print(len_words)\n\n# plt.bar(dict_tag.keys(), dict_tag.values())\n# plt.show()\n\nten_words = []\nten_freqs_words = []\n\nfor i in range(0,10):\n\tten_freqs_words.append(sorted_words[i][1])\n\tten_words.append(sorted_words[i][0])\n\nplt.bar(ten_words, ten_freqs_words)\n# plt.show()\n\nten_tags = []\nten_freqs_tags = []\n\nfor i in range(0,10):\n\tten_freqs_tags.append(sorted_tag[i][1])\n\tten_tags.append(sorted_tag[i][0])\n\nplt.bar(ten_tags, ten_freqs_tags)\n# plt.show()\n\n\n#---------------------------------------------------------- WEEK 4 ----------------------------------------------------------\n\n# words_prob = dict()\n# len_word_tag = len(dict1)\n# arr_tags = dict_tag.keys() \n# for key,val in dict_word.items() :\n# \tword_dict = dict()\n# \tfor tag in arr_tags:\n# \t\tw_t = key + \"_\" + tag\n# \t\tword_count = val\n# \t\ttry:\n# \t\t\tword_tag_count = dict1[w_t]\n# \t\texcept KeyError:\n# \t\t\tword_tag_count = 0\n# \t\tword_dict[tag] = word_tag_count/word_count\n# \twords_prob[key] = word_dict\n# \t# print(word_dict)\n\n# dict_size = len(words_prob)\n# print(dict_size) #192632\n\n# words_in_one_file = dict_size/500\n# index = 0\n# fno = 0\n# dic = dict()\n\n# prob_folder = \"word_probability/\"\n\n# prob_file = open(prob_folder+\"words_prob\" + str(fno), \"w\")\n# for key in words_prob:\n# if(index > words_in_one_file):\n# index = 0\n# prob_file.write(str(dic))\n# prob_file.close()\n# fno = fno + 1\n# prob_file = open(prob_folder+\"words_prob\" + str(fno), \"w\")\n# dic = dict()\n# dic[key] = words_prob[key]\n# index = index + 1\n# prob_file.close()\n# if(len(dic) > 0):\n# prob_file = open(prob_folder+\"words_prob\" + str(fno), \"w\")\n# prob_file.write(str(dic))\n# prob_file.close()\n\n\n#***************************************************** finding P(e/l) *****************************************************\n\nword_by_tag=dict()\n\nfor tag in dict_tag.keys() :\n\twdict=dict()\n\tfor word in dict_word.keys() :\n\t\twt=word+\"_\"+tag\n\t\tif wt in dict1.keys() :\n\t\t\tfreq=dict1[wt]\n\t\t\twdict[wt.split(\"_\")[0]]=(freq/dict_tag[tag])\n\t\telse :\n\t\t\twdict[wt.split(\"_\")[0]]=0\n\tword_by_tag[tag]=wdict\n\n\n# ------------------------------------------------------- #\n\n# Viterbi Algorithm #\n\n\n\n\ntransition = open('transition.txt', 'r').read()\n\ntransition_prob = eval(transition)\nemission_prob = word_by_tag\n\nprint(emission_prob.keys())\n\naccess_directory = os.listdir(\"Test-corpus/\")\ni=0\nfor ele in access_directory:\n\taccess_directory[i]=\"Test-corpus/\"+ele+\"/\"\n\ti=i+1\n\ntags =[x for x in dict_tag.keys()]\n\ndef printArray(matrix):\n\tfor row in matrix:\n\t\tprint(row)\n\n\n\ndef recurse_tags(rm, i, sentence, hello):\n\ttcount = 0\n\tccount = 0\n\twords = sentence.find_all('w')\n\tactual = []\n\tpredicted = []\n\tfor word in words:\n\t\tactual.append(word.get('c5'))\n\tsize = len(rm[i]) - 1\n\twhile size:\n\t\ttag = tags[i]\n\t\tpredicted.insert(0, tag)\n\t\ti = rm[i][size]\n\t\tsize = size - 1\n\tfor i in range(len(predicted)):\n\t\ttcount = tcount + 1\n\t\tif(actual[i] == predicted[i]):\n\t\t\tccount = ccount + 1\n\thello['total'] = hello['total'] + tcount\n\thello['correct'] = hello['correct'] + ccount\n\tprint(str(tcount) + str(ccount))\n\noutput = open('log.txt', 'w')\nfor dirs in access_directory:\n\tif dirs == \"Test-corpus/Cleaned_files/\":\n\t\tcontinue\n\tfiles = os.listdir(dirs)\n\tfor file in files:\n\t\tfilename = open(dirs+file)\n\t\tcontent = BeautifulSoup(filename, features=\"lxml\")\n\t\tsent_arr = content.find_all(\"s\") # handling the sentence tags\n\t\tfor sentence in sent_arr:\n\t\t\twords = sentence.find_all(\"w\")\n\t\t\tlength = len(words)\n\t\t\t# print(words)\n\t\t\t# print(length)\n\t\t\tdp = []\n\t\t\trm = []\n\t\t\tfor i in range(57):\n\t\t\t\tdp.append([])\n\t\t\t\trm.append([])\n\t\t\tfor i in range(57):\n\t\t\t\tdp[i].append(1)\n\t\t\t\trm[i].append(1)\n\t\t\t\tfor j in range(length):\n\t\t\t\t\tdp[i].append(0)\n\t\t\t\t\trm[i].append(0)\n\t\t\tfor k in range(1, length+1):\n\t\t\t\tfor i in range(57):\n\t\t\t\t\tmax_prob = -200000\n\t\t\t\t\ttag2 = tags[i]\t\n\t\t\t\t\t# print(tag2)\n\t\t\t\t\t# print(\"\\n\")\n\t\t\t\t\tfor j in range(57):\n\t\t\t\t\t\t\n\t\t\t\t\t\ttag1 = tags[j]\n\t\t\t\t\t\t# print(str(j) + \" \" + tag1)\n\t\t\t\t\t\tif k == 1:\n\t\t\t\t\t\t\ttag1 = '^'\n\n\t\t\t\t\t\t# print(tag1 + \" \" + tag2)\n\t\t\t\t\t\tTP = transition_prob[tag1 + '_' + tag2]\n\t\t\t\t\t\t# print(TP)\n\t\t\t\t\t\tword = words[k-1].get_text().strip()\n\t\t\t\t\t\tif word in emission_prob[tag2].keys() :\n\t\t\t\t\t\t\tEP = emission_prob[tag2][word]\n\t\t\t\t\t\t\t# print(word)\n\t\t\t\t\t\t\t# print(EP)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tEP = 0\n\t\t\t\t\t\tvalue = dp[j][k-1] * TP * EP\n\t\t\t\t\t\tif value > max_prob:\n\t\t\t\t\t\t\tmax_prob = value\n\t\t\t\t\t\t\trm[i][k] = j\n\t\t\t\t\tdp[i][k] = max_prob\n\t\t\t#print(rm)\n\t\t\tmax_proba = -1\n\t\t\tstart = 57\n\t\t\tfor i in range(57):\n\t\t\t\ttag1 = tags[i]\n\t\t\t\ttag2 = '.'\n\t\t\t\tTP = transition_prob[tag1 + '_' + tag2]\n\t\t\t\tvalue = dp[j][k-1] * TP\n\t\t\t\tif value > max_proba:\n\t\t\t\t\tmax_proba = value\n\t\t\t\t\tstart = i\n\n\t\t\trecurse_tags(rm, start, sentence, hello)\n\n\n\t\tbreak\n\tbreak\t\n\nprint(dict_tag.keys())\n\nprint(hello)","sub_path":"top10.py","file_name":"top10.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"66791166","text":"import turtle\r\nimport random\r\n\r\n \r\n#Define Variable\r\nartist = turtle.Turtle()\r\nturtle.colormode(255)\r\nwn = turtle.Screen()\r\nwn.title(\"Random Shape Spawner\")\r\nartist.speed(1000)\r\ndef randomColor():\r\n randomR = random.randrange(0,255)\r\n randomG = random.randrange(0,255)\r\n randomB = random.randrange(0,255)\r\n artist.pencolor((randomR,randomG,randomB))\r\n artist.fillcolor((randomR,randomG,randomB))\r\n\r\ndef randomLocation():\r\n #Define random RGB\r\n randomColor()\r\n artist.penup()\r\n #Detect the screen and find a random place\r\n screenX,screenY = turtle.screensize()\r\n randomScreenX = random.randrange(-(screenX)-50,screenX-50)\r\n randomScreenY = random.randrange(-(screenY),screenY+50)\r\n artist.goto(randomScreenX,randomScreenY)\r\n print(screenX,screenY)\r\n artist.pendown()\r\n\r\n#########################################################################################################################################\r\n#Draw Functions\r\ndef drawPolygon(polySides,polyLength):\r\n numberOfSides = polySides\r\n lengthOfSides = polyLength\r\n angleOfSides = 360/numberOfSides\r\n artist.begin_fill()\r\n for i in range(numberOfSides):\r\n artist.forward(lengthOfSides)\r\n artist.right(angleOfSides)\r\n artist.end_fill()\r\n\r\ndef drawStar():\r\n artist.begin_fill()\r\n for i in range(5):\r\n artist.forward(100)\r\n artist.right(144)\r\n artist.end_fill()\r\n\r\ndef main():\r\n times = 0\r\n while times < 100:\r\n drawPolygon(random.randrange(3,10),50)\r\n randomLocation()\r\n drawStar()\r\n randomLocation()\r\n times = times + 1\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"RandomTurtleScramble.py","file_name":"RandomTurtleScramble.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"347889493","text":"from flask import Flask, render_template, request, session, url_for, redirect, flash\nfrom flask.ext.api import FlaskAPI\nfrom flask_oauth import OAuth\nfrom werkzeug.wsgi import DispatcherMiddleware\nfrom werkzeug.serving import run_simple\nimport os\nimport re\n\napp = FlaskAPI(__name__)\napp.config.update(\n DEBUG = (os.environ.get(\"DEV\") or \"\").lower() == \"true\",\n DEFAULT_RENDERERS = [\"flask.ext.api.renderers.HTMLRenderer\"],\n SECRET_KEY = \"asdf\"\n)\n\noauth = OAuth()\nfb = oauth.remote_app(\"facebook\",\n base_url = \"https://graph.facebook.com/\",\n request_token_url = None,\n access_token_url = \"/oauth/access_token\",\n authorize_url = \"https://www.facebook.com/dialog/oauth\",\n consumer_key = os.environ.get(\"FB_APP_ID\"),\n consumer_secret = os.environ.get(\"FB_APP_SECRET\"),\n request_token_params = {\"scope\": \"email\"}\n)\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/login\")\ndef login():\n return fb.authorize(callback = \"%s%s\" % (\n re.sub(r\"/$\", \"\", os.environ.get(\"HOST\")),\n url_for(\"oauth_authorized\", next = request.args.get(\"next\") or request.referrer or None)\n ))\n\n@app.route(\"/oauth-authorized\")\n@fb.authorized_handler\ndef oauth_authorized(res):\n next_url = request.args.get(\"next\") or url_for(\"personal\")\n if res is None:\n flash(u\"Whatever, man.\")\n return redirect(next_url)\n\n session[\"access_token\"] = res[\"access_token\"]\n\n flash(\"Signed in as %s\" % res[\"access_token\"])\n return redirect(next_url)\n\n@app.route(\"/personal\")\ndef personal():\n return render_template(\"personal.html\", token = session.get(\"access_token\"))\n\n@fb.tokengetter\ndef get_fb_user_token(token = None):\n return session.get(\"access_token\")\n\nservice = DispatcherMiddleware(app)\n\nif __name__ == \"__main__\":\n run_simple(\n \"localhost\", 8000, service,\n use_reloader = app.debug,\n use_debugger = app.debug\n )\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"455357346","text":"import html\nimport json\nimport os\nimport subprocess\nimport sys\nimport threading\nimport re\nimport time\n\nfrom collections import OrderedDict\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nfrom urllib.request import pathname2url\nfrom urllib.request import url2pathname\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nimport sublime_plugin\nimport sublime\n\nimport mdpopups\n\n\nPLUGIN_NAME = 'LSP'\nSUBLIME_WORD_MASK = 515\nNO_HOVER_SCOPES = 'comment, constant, keyword, storage, string'\nNO_COMPLETION_SCOPES = 'comment, string'\nshow_status_messages = True\nshow_view_status = True\nauto_show_diagnostics_panel = True\nshow_diagnostics_phantoms = False\nshow_diagnostics_in_view_status = True\nonly_show_lsp_completions = False\ndiagnostics_highlight_style = \"underline\"\ndiagnostics_gutter_marker = \"dot\"\ncomplete_all_chars = False\nresolve_completion_for_snippets = False\nlog_debug = True\nlog_server = True\nlog_stderr = False\n\nwaiting_for_RLS = False\n\nglobal_client_configs = [] # type: List[ClientConfig]\nwindow_client_configs = dict() # type: Dict[int, List[ClientConfig]]\n\n\nclass DiagnosticSeverity(object):\n Error = 1\n Warning = 2\n Information = 3\n Hint = 4\n\n\ndiagnostic_severity_names = {\n DiagnosticSeverity.Error: \"error\",\n DiagnosticSeverity.Warning: \"warning\",\n DiagnosticSeverity.Information: \"info\",\n DiagnosticSeverity.Hint: \"hint\"\n}\n\ndiagnostic_severity_scopes = {\n DiagnosticSeverity.Error: 'markup.deleted.lsp sublimelinter.mark.error markup.error.lsp',\n DiagnosticSeverity.Warning: 'markup.changed.lsp sublimelinter.mark.warning markup.warning.lsp',\n DiagnosticSeverity.Information: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.lsp',\n DiagnosticSeverity.Hint: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.suggestion.lsp'\n}\n\n\nclass SymbolKind(object):\n File = 1\n Module = 2\n Namespace = 3\n Package = 4\n Class = 5\n Method = 6\n Property = 7\n Field = 8\n Constructor = 9\n Enum = 10\n Interface = 11\n Function = 12\n Variable = 13\n Constant = 14\n String = 15\n Number = 16\n Boolean = 17\n Array = 18\n\n\nsymbol_kind_names = {\n SymbolKind.File: \"file\",\n SymbolKind.Module: \"module\",\n SymbolKind.Namespace: \"namespace\",\n SymbolKind.Package: \"package\",\n SymbolKind.Class: \"class\",\n SymbolKind.Method: \"method\",\n SymbolKind.Function: \"function\",\n SymbolKind.Field: \"field\",\n SymbolKind.Variable: \"variable\",\n SymbolKind.Constant: \"constant\"\n}\n\n\nclass CompletionItemKind(object):\n Text = 1\n Method = 2\n Function = 3\n Constructor = 4\n Field = 5\n Variable = 6\n Class = 7\n Interface = 8\n Module = 9\n Property = 10\n Unit = 11\n Value = 12\n Enum = 13\n Keyword = 14\n Snippet = 15\n Color = 16\n File = 17\n Reference = 18\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\nclass Request:\n def __init__(self, method, params):\n self.method = method\n self.params = params\n self.jsonrpc = \"2.0\"\n\n @classmethod\n def initialize(cls, params):\n return Request(\"initialize\", params)\n\n @classmethod\n def hover(cls, params):\n return Request(\"textDocument/hover\", params)\n\n @classmethod\n def complete(cls, params):\n return Request(\"textDocument/completion\", params)\n\n @classmethod\n def signatureHelp(cls, params):\n return Request(\"textDocument/signatureHelp\", params)\n\n @classmethod\n def references(cls, params):\n return Request(\"textDocument/references\", params)\n\n @classmethod\n def definition(cls, params):\n return Request(\"textDocument/definition\", params)\n\n @classmethod\n def rename(cls, params):\n return Request(\"textDocument/rename\", params)\n\n @classmethod\n def codeAction(cls, params):\n return Request(\"textDocument/codeAction\", params)\n\n @classmethod\n def executeCommand(cls, params):\n return Request(\"workspace/executeCommand\", params)\n\n @classmethod\n def formatting(cls, params):\n return Request(\"textDocument/formatting\", params)\n\n @classmethod\n def documentSymbols(cls, params):\n return Request(\"textDocument/documentSymbol\", params)\n\n @classmethod\n def resolveCompletionItem(cls, params):\n return Request(\"completionItem/resolve\", params)\n\n def __repr__(self):\n return self.method + \" \" + str(self.params)\n\n def to_payload(self, id):\n r = OrderedDict() # type: OrderedDict[str, Any]\n r[\"jsonrpc\"] = \"2.0\"\n r[\"id\"] = id\n r[\"method\"] = self.method\n if self.params is not None:\n r[\"params\"] = self.params\n else:\n r[\"params\"] = dict()\n return r\n\n\nclass Notification:\n def __init__(self, method, params):\n self.method = method\n self.params = params\n self.jsonrpc = \"2.0\"\n\n @classmethod\n def initialized(cls):\n return Notification(\"initialized\", None)\n\n @classmethod\n def didOpen(cls, params):\n return Notification(\"textDocument/didOpen\", params)\n\n @classmethod\n def didChange(cls, params):\n return Notification(\"textDocument/didChange\", params)\n\n @classmethod\n def didSave(cls, params):\n return Notification(\"textDocument/didSave\", params)\n\n @classmethod\n def didClose(cls, params):\n return Notification(\"textDocument/didClose\", params)\n\n @classmethod\n def didChangeConfiguration(cls, params):\n return Notification(\"workspace/didChangeConfiguration\", params)\n\n @classmethod\n def exit(cls):\n return Notification(\"exit\", None)\n\n def __repr__(self):\n return self.method + \" \" + str(self.params)\n\n def to_payload(self):\n r = OrderedDict() # type: OrderedDict[str, Any]\n r[\"jsonrpc\"] = \"2.0\"\n r[\"method\"] = self.method\n if self.params is not None:\n r[\"params\"] = self.params\n else:\n r[\"params\"] = dict()\n return r\n\n\nclass Point(object):\n def __init__(self, row: int, col: int) -> None:\n self.row = int(row)\n self.col = int(col)\n\n def __repr__(self):\n return \"{}:{}\".format(self.row, self.col)\n\n @classmethod\n def from_lsp(cls, point: dict) -> 'Point':\n return Point(point['line'], point['character'])\n\n def to_lsp(self) -> dict:\n r = OrderedDict() # type: OrderedDict[str, Any]\n r['line'] = self.row\n r['character'] = self.col\n return r\n\n @classmethod\n def from_text_point(self, view: sublime.View, point: int) -> 'Point':\n return Point(*view.rowcol(point))\n\n def to_text_point(self, view) -> int:\n return view.text_point(self.row, self.col)\n\n\nclass Range(object):\n def __init__(self, start: Point, end: Point) -> None:\n self.start = start\n self.end = end\n\n def __repr__(self):\n return \"({} {})\".format(self.start, self.end)\n\n @classmethod\n def from_lsp(cls, range: dict) -> 'Range':\n return Range(Point.from_lsp(range['start']), Point.from_lsp(range['end']))\n\n def to_lsp(self) -> dict:\n r = OrderedDict() # type: OrderedDict[str, Any]\n r['start'] = self.start.to_lsp()\n r['end'] = self.end.to_lsp()\n return r\n\n @classmethod\n def from_region(self, view: sublime.View, region: sublime.Region) -> 'Range':\n return Range(\n Point.from_text_point(view, region.begin()),\n Point.from_text_point(view, region.end())\n )\n\n def to_region(self, view: sublime.View) -> sublime.Region:\n return sublime.Region(self.start.to_text_point(view), self.end.to_text_point(view))\n\n\nclass Diagnostic(object):\n def __init__(self, message, range, severity, source, lsp_diagnostic):\n self.message = message\n self.range = range\n self.severity = severity\n self.source = source\n self._lsp_diagnostic = lsp_diagnostic\n\n @classmethod\n def from_lsp(cls, lsp_diagnostic):\n return Diagnostic(\n # crucial keys\n lsp_diagnostic['message'],\n Range.from_lsp(lsp_diagnostic['range']),\n # optional keys\n lsp_diagnostic.get('severity', DiagnosticSeverity.Error),\n lsp_diagnostic.get('source'),\n lsp_diagnostic\n )\n\n def to_lsp(self):\n return self._lsp_diagnostic\n\n\ndef read_client_config(name, client_config):\n return ClientConfig(\n name,\n client_config.get(\"command\", []),\n client_config.get(\"scopes\", []),\n client_config.get(\"syntaxes\", []),\n client_config.get(\"languageId\", \"\"),\n client_config.get(\"enabled\", True),\n client_config.get(\"initializationOptions\", dict())\n )\n\n\ndef load_settings():\n settings_obj = sublime.load_settings(\"LSP.sublime-settings\")\n update_settings(settings_obj)\n settings_obj.add_on_change(\"_on_new_settings\", lambda: update_settings(settings_obj))\n\n\ndef read_bool_setting(settings_obj: sublime.Settings, key: str, default: bool) -> bool:\n val = settings_obj.get(key)\n if isinstance(val, bool):\n return val\n else:\n return default\n\n\ndef read_str_setting(settings_obj: sublime.Settings, key: str, default: str) -> str:\n val = settings_obj.get(key)\n if isinstance(val, str):\n return val\n else:\n return default\n\n\ndef update_settings(settings_obj: sublime.Settings):\n global show_status_messages\n global show_view_status\n global auto_show_diagnostics_panel\n global show_diagnostics_phantoms\n global show_diagnostics_in_view_status\n global only_show_lsp_completions\n global diagnostics_highlight_style\n global diagnostics_gutter_marker\n global complete_all_chars\n global resolve_completion_for_snippets\n global log_debug\n global log_server\n global log_stderr\n global global_client_configs\n\n global_client_configs = []\n client_configs = settings_obj.get(\"clients\", {})\n if isinstance(client_configs, dict):\n for client_name, client_config in client_configs.items():\n config = read_client_config(client_name, client_config)\n if config:\n debug(\"Config added:\", client_name, '(enabled)' if config.enabled else '(disabled)')\n global_client_configs.append(config)\n else:\n raise ValueError(\"client_configs\")\n\n show_status_messages = read_bool_setting(settings_obj, \"show_status_messages\", True)\n show_view_status = read_bool_setting(settings_obj, \"show_view_status\", True)\n auto_show_diagnostics_panel = read_bool_setting(settings_obj, \"auto_show_diagnostics_panel\", True)\n show_diagnostics_phantoms = read_bool_setting(settings_obj, \"show_diagnostics_phantoms\", False)\n show_diagnostics_in_view_status = read_bool_setting(settings_obj, \"show_diagnostics_in_view_status\", True)\n diagnostics_highlight_style = read_str_setting(settings_obj, \"diagnostics_highlight_style\", \"underline\")\n diagnostics_gutter_marker = read_str_setting(settings_obj, \"diagnostics_gutter_marker\", \"dot\")\n only_show_lsp_completions = read_bool_setting(settings_obj, \"only_show_lsp_completions\", False)\n complete_all_chars = read_bool_setting(settings_obj, \"complete_all_chars\", True)\n resolve_completion_for_snippets = read_bool_setting(settings_obj, \"resolve_completion_for_snippets\", False)\n log_debug = read_bool_setting(settings_obj, \"log_debug\", False)\n log_server = read_bool_setting(settings_obj, \"log_server\", True)\n log_stderr = read_bool_setting(settings_obj, \"log_stderr\", False)\n\n\nclass ClientConfig(object):\n def __init__(self, name, binary_args, scopes, syntaxes, languageId,\n enabled=True, init_options=dict(), settings=dict()):\n self.name = name\n self.binary_args = binary_args\n self.scopes = scopes\n self.syntaxes = syntaxes\n self.languageId = languageId\n self.enabled = enabled\n self.init_options = init_options\n self.settings = settings\n\n\ndef format_request(payload: 'Dict[str, Any]'):\n \"\"\"Converts the request into json and adds the Content-Length header\"\"\"\n content = json.dumps(payload, sort_keys=False)\n content_length = len(content)\n result = \"Content-Length: {}\\r\\n\\r\\n{}\".format(content_length, content)\n return result\n\n\nclass Client(object):\n def __init__(self, process, project_path):\n self.process = process\n self.stdout_thread = threading.Thread(target=self.read_stdout)\n self.stdout_thread.start()\n self.stderr_thread = threading.Thread(target=self.read_stderr)\n self.stderr_thread.start()\n self.project_path = project_path\n self.request_id = 0\n self.handlers = {} # type: Dict[int, Callable]\n self.capabilities = {} # type: Dict[str, Any]\n\n def set_capabilities(self, capabilities):\n self.capabilities = capabilities\n\n def get_project_path(self):\n return self.project_path\n\n def has_capability(self, capability):\n return capability in self.capabilities\n\n def get_capability(self, capability):\n return self.capabilities.get(capability)\n\n def send_request(self, request: Request, handler: 'Callable'):\n self.request_id += 1\n debug('request {}: {} '.format(self.request_id, request.method))\n if handler is not None:\n self.handlers[self.request_id] = handler\n self.send_payload(request.to_payload(self.request_id))\n\n def send_notification(self, notification: Notification):\n debug('notify: ' + notification.method)\n self.send_payload(notification.to_payload())\n\n def kill(self):\n self.process.kill()\n\n def send_payload(self, payload):\n try:\n message = format_request(payload)\n self.process.stdin.write(bytes(message, 'UTF-8'))\n self.process.stdin.flush()\n except BrokenPipeError as e:\n printf(\"client unexpectedly died:\", e)\n\n def read_stdout(self):\n \"\"\"\n Reads JSON responses from process and dispatch them to response_handler\n \"\"\"\n ContentLengthHeader = b\"Content-Length: \"\n\n while True:\n try:\n\n in_headers = True\n content_length = 0\n while in_headers:\n header = self.process.stdout.readline()\n if header == '':\n break\n else:\n header = header.strip()\n if (len(header) == 0):\n in_headers = False\n\n if header.startswith(ContentLengthHeader):\n content_length = int(header[len(ContentLengthHeader):])\n\n if (content_length > 0):\n content = self.process.stdout.read(content_length).decode(\n \"UTF-8\")\n\n payload = None\n try:\n payload = json.loads(content)\n limit = min(len(content), 200)\n if payload.get(\"method\") != \"window/logMessage\":\n debug(\"got json: \", content[0:limit], \"...\")\n except IOError:\n printf(\"Got a non-JSON payload: \", content)\n continue\n\n try:\n if \"error\" in payload:\n error = payload['error']\n printf(\"Got error from server: \", error)\n sublime.status_message(error.get('message'))\n elif \"method\" in payload:\n if \"id\" in payload:\n self.request_handler(payload)\n else:\n self.notification_handler(payload)\n elif \"id\" in payload:\n self.response_handler(payload)\n else:\n debug(\"Unknown payload type: \", payload)\n except Exception as err:\n printf(\"Error handling server content:\", err)\n\n except IOError:\n printf(\"LSP stdout process ending due to exception: \",\n sys.exc_info())\n self.process.terminate()\n self.process = None\n return\n\n debug(\"LSP stdout process ended.\")\n\n def read_stderr(self):\n \"\"\"\n Reads any errors from the LSP process.\n \"\"\"\n while True:\n try:\n content = self.process.stderr.readline()\n if len(content) == 0:\n break\n if log_stderr:\n printf(\"(stderr): \", content.strip())\n except IOError:\n printf(\"LSP stderr process ending due to exception: \",\n sys.exc_info())\n return\n\n debug(\"LSP stderr process ended.\")\n\n def response_handler(self, response):\n try:\n handler_id = int(response.get(\"id\")) # dotty sends strings back :(\n result = response.get('result', None)\n if (self.handlers[handler_id]):\n self.handlers[handler_id](result)\n else:\n debug(\"No handler found for id\" + response.get(\"id\"))\n except Exception as e:\n debug(\"error handling response\", handler_id)\n raise\n\n def request_handler(self, request):\n method = request.get(\"method\")\n if method == \"workspace/applyEdit\":\n apply_workspace_edit(sublime.active_window(),\n request.get(\"params\"))\n else:\n debug(\"Unhandled request\", method)\n\n def notification_handler(self, response):\n global waiting_for_RLS\n method = response.get(\"method\")\n if method == \"textDocument/publishDiagnostics\":\n Events.publish(\"document.diagnostics\", response.get(\"params\"))\n elif method == \"window/showMessage\":\n sublime.active_window().message_dialog(\n response.get(\"params\").get(\"message\"))\n elif method == \"window/logMessage\" and log_server:\n server_log(self.process.args[0],\n response.get(\"params\").get(\"message\"))\n elif method == \"window/progress\":\n waiting_for_RLS = response.get(\"params\").get(\"done\") != True\n if waiting_for_RLS:\n sublime.active_window().status_message(\"Waiting on RLS...\")\n else:\n debug(\"Unhandled notification:\", method)\n\n\ndef debug(*args):\n \"\"\"Print args to the console if the \"debug\" setting is True.\"\"\"\n if log_debug:\n printf(*args)\n\n\ndef server_log(binary, *args):\n printf(*args, prefix=binary)\n\n\ndef printf(*args, prefix=PLUGIN_NAME):\n \"\"\"Print args to the console, prefixed by the plugin name.\"\"\"\n print(prefix + \":\", *args)\n\n\ndef get_project_path(window: sublime.Window) -> 'Optional[str]':\n \"\"\"\n Returns the common root of all open folders in the window\n \"\"\"\n if len(window.folders()):\n folder_paths = window.folders()\n return folder_paths[0]\n else:\n filename = window.active_view().file_name()\n if filename:\n project_path = os.path.dirname(filename)\n debug(\"Couldn't determine project directory since no folders are open!\",\n \"Using\", project_path, \"as a fallback.\")\n return project_path\n else:\n debug(\"Couldn't determine project directory since no folders are open\",\n \"and the current file isn't saved on the disk.\")\n return None\n\n\ndef get_common_parent(paths: 'List[str]') -> str:\n \"\"\"\n Get the common parent directory of multiple paths.\n\n Python 3.5+ includes os.path.commonpath which does this, however Sublime\n currently embeds Python 3.3.\n \"\"\"\n return os.path.commonprefix([path + '/' for path in paths]).rstrip('/')\n\n\ndef is_in_workspace(window: sublime.Window, file_path: str) -> bool:\n workspace_path = get_project_path(window)\n if workspace_path is None:\n return False\n\n common_dir = get_common_parent([workspace_path, file_path])\n return workspace_path == common_dir\n\n\ndef plugin_loaded():\n load_settings()\n Events.subscribe(\"view.on_load_async\", initialize_on_open)\n Events.subscribe(\"view.on_activated_async\", initialize_on_open)\n if show_status_messages:\n sublime.status_message(\"LSP initialized\")\n start_active_view()\n\n\ndef start_active_view():\n window = sublime.active_window()\n if window:\n view = window.active_view()\n if view and is_supported_view(view):\n initialize_on_open(view)\n\n\ndef check_window_unloaded():\n global clients_by_window\n open_window_ids = list(window.id() for window in sublime.windows())\n iterable_clients_by_window = clients_by_window.copy()\n closed_windows = []\n for id, window_clients in iterable_clients_by_window.items():\n if id not in open_window_ids:\n debug(\"window closed\", id)\n closed_windows.append(id)\n for closed_window_id in closed_windows:\n unload_window_clients(closed_window_id)\n\n\ndef unload_window_clients(window_id: int):\n global clients_by_window\n if window_id in clients_by_window:\n window_clients = clients_by_window[window_id]\n del clients_by_window[window_id]\n for config, client in window_clients.items():\n debug(\"unloading client\", config, client)\n unload_client(client)\n\n\ndef unload_client(client: Client):\n debug(\"unloading client\", client)\n try:\n client.send_notification(Notification.exit())\n client.kill()\n except Exception as e:\n debug(\"error exiting\", e)\n\n\ndef plugin_unloaded():\n for window in sublime.windows():\n for client in window_clients(window).values():\n unload_client(client)\n\n\ndef get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n for config in configs:\n for scope in config.scopes:\n if len(view.sel()) > 0:\n if view.match_selector(view.sel()[0].begin(), scope):\n return config\n\n return None\n\n\ndef get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, global_client_configs)\n\n\ndef get_project_config(view: sublime.View) -> dict:\n view_settings = view.settings().get('LSP', dict())\n return view_settings if view_settings else dict()\n\n\ndef get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n if view.window():\n configs_for_window = window_client_configs.get(view.window().id(), [])\n return get_scope_client_config(view, configs_for_window)\n else:\n return None\n\n\ndef add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):\n global window_client_configs\n window_client_configs.setdefault(window.id(), []).append(config)\n\n\ndef apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':\n window_config = get_project_config(view)\n\n if client_config.name in window_config:\n overrides = window_config[client_config.name]\n debug('window has override for', client_config.name, overrides)\n merged_init_options = dict(client_config.init_options)\n merged_init_options.update(overrides.get(\"initializationOptions\", dict()))\n return ClientConfig(\n client_config.name,\n overrides.get(\"command\", client_config.binary_args),\n overrides.get(\"scopes\", client_config.scopes),\n overrides.get(\"syntaxes\", client_config.syntaxes),\n overrides.get(\"languageId\", client_config.languageId),\n overrides.get(\"enabled\", client_config.enabled),\n merged_init_options,\n overrides.get(\"settings\", dict()))\n else:\n return client_config\n\n\ndef config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':\n # check window_client_config first\n window_client_config = get_window_client_config(view)\n if not window_client_config:\n global_client_config = get_global_client_config(view)\n if global_client_config and view.window():\n window_client_config = apply_window_settings(global_client_config, view)\n add_window_client_config(view.window(), window_client_config)\n return window_client_config\n\n return window_client_config\n\n\ndef is_supported_syntax(syntax: str) -> bool:\n for config in global_client_configs:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_view(view: sublime.View) -> bool:\n # TODO: perhaps make this check for a client instead of a config\n if config_for_scope(view):\n return True\n else:\n return False\n\n\nTextDocumentSyncKindNone = 0\nTextDocumentSyncKindFull = 1\nTextDocumentSyncKindIncremental = 2\n\ndidopen_after_initialize = list()\nunsubscribe_initialize_on_load = None\nunsubscribe_initialize_on_activated = None\n\n\ndef filename_to_uri(path: str) -> str:\n return urljoin('file:', pathname2url(path))\n\n\ndef uri_to_filename(uri: str) -> str:\n return url2pathname(urlparse(uri).path)\n\n\ndef client_for_view(view: sublime.View) -> 'Optional[Client]':\n config = config_for_scope(view)\n if not config:\n debug(\"config not available for view\", view.file_name())\n return None\n clients = window_clients(view.window())\n if config.name not in clients:\n debug(config.name, \"not available for view\",\n view.file_name(), \"in window\", view.window().id())\n return None\n else:\n return clients[config.name]\n\n\nclients_by_window = {} # type: Dict[int, Dict[str, Client]]\n\n\ndef window_clients(window: sublime.Window) -> 'Dict[str, Client]':\n global clients_by_window\n if window.id() in clients_by_window:\n return clients_by_window[window.id()]\n else:\n debug(\"no clients found for window\", window.id())\n return {}\n\n\ndef initialize_on_open(view: sublime.View):\n if not view.window():\n return\n\n window = view.window()\n\n if window.id() in clients_by_window:\n unload_old_clients(window)\n\n global didopen_after_initialize\n config = config_for_scope(view)\n if config:\n if config.enabled:\n if config.name not in window_clients(window):\n didopen_after_initialize.append(view)\n get_window_client(view, config)\n else:\n debug(config.name, 'is not enabled')\n\n\ndef unload_old_clients(window: sublime.Window):\n project_path = get_project_path(window)\n clients_by_config = window_clients(window)\n clients_to_unload = {}\n for config_name, client in clients_by_config.items():\n if client and client.get_project_path() != project_path:\n debug('unload', config_name, 'project path changed from ', client.get_project_path())\n clients_to_unload[config_name] = client\n\n for config_name, client in clients_to_unload.items():\n unload_client(client)\n del clients_by_config[config_name]\n\n\ndef notify_did_open(view: sublime.View):\n config = config_for_scope(view)\n client = client_for_view(view)\n if client and config:\n view.settings().set(\"show_definitions\", False)\n if view.file_name() not in document_states:\n ds = get_document_state(view.file_name())\n if show_view_status:\n view.set_status(\"lsp_clients\", config.name)\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(view.file_name()),\n \"languageId\": config.languageId,\n \"text\": view.substr(sublime.Region(0, view.size())),\n \"version\": ds.version\n }\n }\n client.send_notification(Notification.didOpen(params))\n sublime.set_timeout_async(lambda: annotate_types(view, False), 100)\n\n\ndef notify_did_close(view: sublime.View):\n if view.file_name() in document_states:\n del document_states[view.file_name()]\n config = config_for_scope(view)\n clients = window_clients(sublime.active_window())\n if config and config.name in clients:\n client = clients[config.name]\n params = {\"textDocument\": {\"uri\": filename_to_uri(view.file_name())}}\n client.send_notification(Notification.didClose(params))\n\n\ndef notify_did_save(view: sublime.View):\n if view.file_name() in document_states:\n client = client_for_view(view)\n if client:\n params = {\"textDocument\": {\"uri\": filename_to_uri(view.file_name())}}\n client.send_notification(Notification.didSave(params))\n sublime.set_timeout_async(lambda: annotate_types(view, False), 300)\n else:\n debug('document not tracked', view.file_name())\n\n\n# TODO: this should be per-window ?\ndocument_states = {} # type: Dict[str, DocumentState]\n\n\nclass DocumentState:\n \"\"\"Stores version count for documents open in a language service\"\"\"\n def __init__(self, path: str) -> 'None':\n self.path = path\n self.version = 0\n\n def inc_version(self):\n self.version += 1\n return self.version\n\n\ndef get_document_state(path: str) -> DocumentState:\n if path not in document_states:\n document_states[path] = DocumentState(path)\n return document_states[path]\n\n\npending_buffer_changes = dict() # type: Dict[int, Dict]\n\n\ndef queue_did_change(view: sublime.View):\n buffer_id = view.buffer_id()\n buffer_version = 1\n pending_buffer = None\n if buffer_id in pending_buffer_changes:\n pending_buffer = pending_buffer_changes[buffer_id]\n buffer_version = pending_buffer[\"version\"] + 1\n pending_buffer[\"version\"] = buffer_version\n else:\n pending_buffer_changes[buffer_id] = {\n \"view\": view,\n \"version\": buffer_version\n }\n\n sublime.set_timeout_async(\n lambda: purge_did_change(buffer_id, buffer_version), 500)\n\n\ndef purge_did_change(buffer_id: int, buffer_version=None):\n if buffer_id not in pending_buffer_changes:\n return\n\n pending_buffer = pending_buffer_changes.get(buffer_id)\n\n if pending_buffer:\n if buffer_version is None or buffer_version == pending_buffer[\"version\"]:\n notify_did_change(pending_buffer[\"view\"])\n\n\ndef notify_did_change(view: sublime.View):\n if view.buffer_id() in pending_buffer_changes:\n del pending_buffer_changes[view.buffer_id()]\n # config = config_for_scope(view)\n client = client_for_view(view)\n if client:\n document_state = get_document_state(view.file_name())\n uri = filename_to_uri(view.file_name())\n params = {\n \"textDocument\": {\n \"uri\": uri,\n # \"languageId\": config.languageId, clangd does not like this field, but no server uses it?\n \"version\": document_state.inc_version(),\n },\n \"contentChanges\": [{\n \"text\": view.substr(sublime.Region(0, view.size()))\n }]\n }\n client.send_notification(Notification.didChange(params))\n\n point = view.sel()[0].begin()\n if view.substr(point - 1) == '(':\n sublime.set_timeout_async(lambda: annotate_types(view, True), 200)\n\n\ndocument_sync_initialized = False\n\n\ndef initialize_document_sync(text_document_sync_kind):\n global document_sync_initialized\n if document_sync_initialized:\n return\n document_sync_initialized = True\n # TODO: hook up events per scope/client\n Events.subscribe('view.on_load_async', notify_did_open)\n Events.subscribe('view.on_activated_async', notify_did_open)\n Events.subscribe('view.on_modified_async', queue_did_change)\n Events.subscribe('view.on_post_save_async', notify_did_save)\n Events.subscribe('view.on_close', notify_did_close)\n\n\ndef handle_initialize_result(result, client, window, config):\n global didopen_after_initialize\n capabilities = result.get(\"capabilities\")\n client.set_capabilities(capabilities)\n\n # TODO: These handlers is already filtered by syntax but does not need to\n # be enabled 2x per client\n # Move filtering?\n document_sync = capabilities.get(\"textDocumentSync\")\n if document_sync:\n initialize_document_sync(document_sync)\n\n Events.subscribe('document.diagnostics', handle_diagnostics)\n Events.subscribe('view.on_close', remove_diagnostics)\n\n client.send_notification(Notification.initialized())\n if config.settings:\n configParams = {\n 'settings': config.settings\n }\n client.send_notification(Notification.didChangeConfiguration(configParams))\n\n for view in didopen_after_initialize:\n notify_did_open(view)\n if show_status_messages:\n window.status_message(\"{} initialized\".format(config.name))\n didopen_after_initialize = list()\n\n\nstylesheet = '''\n \n '''\n\n\ndef create_phantom_html(text: str) -> str:\n global stylesheet\n return \"\"\"{}\n
\n
\n \"\"\".format(stylesheet, html.escape(text, quote=False))\n\n\ndef create_quiet_phantom_html(text: str) -> str:\n global stylesheet\n html_str = \"\"\"{} \"\"\".format(html.escape(text, quote=False))\n return html_str.replace(\"\\n\", \"
\")\n\n\ndef on_phantom_navigate(view: sublime.View, href: str, point: int):\n # TODO: don't mess with the user's cursor.\n sel = view.sel()\n sel.clear()\n sel.add(sublime.Region(point))\n view.run_command(\"lsp_code_actions\")\n\n\ndef create_phantom(view: sublime.View, diagnostic: Diagnostic) -> sublime.Phantom:\n region = diagnostic.range.to_region(view)\n # TODO: hook up hide phantom (if keeping them)\n content = create_phantom_html(diagnostic.message)\n return sublime.Phantom(\n region,\n '

' + content + '

',\n sublime.LAYOUT_BELOW,\n lambda href: on_phantom_navigate(view, href, region.begin())\n )\n\n\ndef create_quiet_phantom(view: sublime.View, diagnostic: Diagnostic, inline: bool) -> sublime.Phantom:\n region = diagnostic.range.to_region(view)\n # TODO: hook up hide phantom (if keeping them)\n content = create_quiet_phantom_html(diagnostic.message)\n alignment = sublime.LAYOUT_INLINE\n if not inline:\n alignment = sublime.LAYOUT_BELOW\n\n return sublime.Phantom(\n region,\n '' + content + '',\n alignment,\n lambda href: on_phantom_navigate(view, href, region.begin())\n )\n\n\ndef format_severity(severity: int) -> str:\n return diagnostic_severity_names.get(severity, \"???\")\n\n\ndef format_diagnostic(diagnostic: Diagnostic) -> str:\n location = \"{:>8}:{:<4}\".format(\n diagnostic.range.start.row + 1, diagnostic.range.start.col + 1)\n message = diagnostic.message.replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n return \" {}\\t{:<12}\\t{:<10}\\t{}\".format(\n location, diagnostic.source, format_severity(diagnostic.severity), message)\n\n\nclass LspSymbolRenameCommand(sublime_plugin.TextCommand):\n def is_enabled(self, event=None):\n # TODO: check what kind of scope we're in.\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('renameProvider'):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit, event=None):\n pos = get_position(self.view, event)\n params = get_document_position(self.view, pos)\n current_name = self.view.substr(self.view.word(pos))\n if not current_name:\n current_name = \"\"\n self.view.window().show_input_panel(\n \"New name:\", current_name, lambda text: self.request_rename(params, text),\n None, None)\n\n def request_rename(self, params, new_name):\n client = client_for_view(self.view)\n if client:\n params[\"newName\"] = new_name\n client.send_request(Request.rename(params), self.handle_response)\n\n def handle_response(self, response):\n if 'changes' in response:\n changes = response.get('changes')\n if len(changes) > 0:\n self.view.window().run_command('lsp_apply_workspace_edit',\n {'changes': response})\n\n def want_event(self):\n return True\n\n\nclass LspFormatDocumentCommand(sublime_plugin.TextCommand):\n def is_enabled(self):\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('documentFormattingProvider'):\n return True\n return False\n\n def run(self, edit):\n client = client_for_view(self.view)\n if client:\n pos = self.view.sel()[0].begin()\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n },\n \"options\": {\n \"tabSize\": 4,\n \"insertSpaces\": True\n }\n }\n request = Request.formatting(params)\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response, pos):\n self.view.run_command('lsp_apply_document_edit',\n {'changes': response})\n\n\nclass LspSymbolDefinitionCommand(sublime_plugin.TextCommand):\n def is_enabled(self, event=None):\n # TODO: check what kind of scope we're in.\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('definitionProvider'):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit, event=None):\n client = client_for_view(self.view)\n if client:\n pos = get_position(self.view, event)\n request = Request.definition(get_document_position(self.view, pos))\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response, position):\n window = sublime.active_window()\n if len(response) < 1:\n window.run_command(\"goto_definition\")\n else:\n location = response[0]\n file_path = uri_to_filename(location.get(\"uri\"))\n start = Point.from_lsp(location['range']['start'])\n file_location = \"{}:{}:{}\".format(file_path, start.row + 1, start.col + 1)\n debug(\"opening location\", location)\n window.open_file(file_location, sublime.ENCODED_POSITION)\n # TODO: can add region here.\n\n def want_event(self):\n return True\n\n\ndef format_symbol_kind(kind):\n return symbol_kind_names.get(kind, str(kind))\n\n\ndef format_symbol(item):\n \"\"\"\n items may be a list of strings, or a list of string lists.\n In the latter case, each entry in the quick panel will show multiple rows\n \"\"\"\n # file_path = uri_to_filename(location.get(\"uri\"))\n # kind = format_symbol_kind(item.get(\"kind\"))\n # return [item.get(\"name\"), kind]\n return [item.get(\"name\")]\n\n\nclass LspDocumentSymbolsCommand(sublime_plugin.TextCommand):\n def is_enabled(self):\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('documentSymbolProvider'):\n return True\n return False\n\n def run(self, edit):\n client = client_for_view(self.view)\n if client:\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n request = Request.documentSymbols(params)\n client.send_request(request, self.handle_response)\n\n def handle_response(self, response):\n symbols = list(format_symbol(item) for item in response)\n self.symbols = response\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n\n def on_symbol_selected(self, symbol_index):\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol['location']['range']\n region = Range.from_lsp(range).to_region(self.view)\n self.view.show_at_center(region)\n self.view.sel().clear()\n self.view.sel().add(region)\n\n\ndef get_position(view: sublime.View, event=None) -> int:\n if event:\n return view.window_to_text((event[\"x\"], event[\"y\"]))\n else:\n return view.sel()[0].begin()\n\n\ndef is_at_word(view: sublime.View, event) -> bool:\n pos = get_position(view, event)\n point_classification = view.classify(pos)\n if point_classification & SUBLIME_WORD_MASK:\n return True\n else:\n return False\n\n\nOUTPUT_PANEL_SETTINGS = {\n \"auto_indent\": False,\n \"draw_indent_guides\": False,\n \"draw_white_space\": \"None\",\n \"gutter\": False,\n 'is_widget': True,\n \"line_numbers\": False,\n \"margin\": 3,\n \"match_brackets\": False,\n \"scroll_past_end\": False,\n \"tab_size\": 4,\n \"translate_tabs_to_spaces\": False,\n \"word_wrap\": False\n}\n\n\ndef create_output_panel(window: sublime.Window, name: str) -> sublime.View:\n panel = window.create_output_panel(name)\n settings = panel.settings()\n for key, value in OUTPUT_PANEL_SETTINGS.items():\n settings.set(key, value)\n return panel\n\n\ndef ensure_references_panel(window: sublime.Window):\n return window.find_output_panel(\"references\") or create_references_panel(window)\n\n\ndef create_references_panel(window: sublime.Window):\n panel = create_output_panel(window, \"references\")\n panel.settings().set(\"result_file_regex\",\n r\"^\\s+\\S\\s+(\\S.+)\\s+(\\d+):?(\\d+)$\")\n panel.assign_syntax(\"Packages/\" + PLUGIN_NAME +\n \"/Syntaxes/References.sublime-syntax\")\n return panel\n\n\nclass LspSymbolReferencesCommand(sublime_plugin.TextCommand):\n def is_enabled(self, event=None):\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('referencesProvider'):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit, event=None):\n client = client_for_view(self.view)\n if client:\n pos = get_position(self.view, event)\n document_position = get_document_position(self.view, pos)\n document_position['context'] = {\n \"includeDeclaration\": False\n }\n request = Request.references(document_position)\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response, pos):\n window = self.view.window()\n word = self.view.substr(self.view.word(pos))\n base_dir = get_project_path(window)\n file_path = self.view.file_name()\n relative_file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path\n\n references = list(format_reference(item, base_dir) for item in response)\n\n if (len(references)) > 0:\n panel = ensure_references_panel(window)\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.set_read_only(False)\n panel.run_command(\"lsp_clear_panel\")\n panel.run_command('append', {\n 'characters': 'References to \"' + word + '\" at ' + relative_file_path + ':\\n'\n })\n window.run_command(\"show_panel\", {\"panel\": \"output.references\"})\n for reference in references:\n panel.run_command('append', {\n 'characters': reference + \"\\n\",\n 'force': True,\n 'scroll_to_end': True\n })\n panel.set_read_only(True)\n\n else:\n window.run_command(\"hide_panel\", {\"panel\": \"output.references\"})\n sublime.status_message(\"No references found\")\n\n def want_event(self):\n return True\n\n\ndef format_reference(reference, base_dir):\n start = Point.from_lsp(reference.get('range').get('start'))\n file_path = uri_to_filename(reference.get(\"uri\"))\n relative_file_path = os.path.relpath(file_path, base_dir)\n return \" ◌ {} {}:{}\".format(relative_file_path, start.row + 1, start.col + 1)\n\n\nclass LspClearPanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A clear_panel command to clear the error panel.\n \"\"\"\n\n def run(self, edit):\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n\n\nclass LspUpdatePanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A update_panel command to update the error panel with new text.\n \"\"\"\n\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n\n # Move cursor to the end\n selection = self.view.sel()\n selection.clear()\n selection.add(sublime.Region(self.view.size(), self.view.size()))\n\n\nUNDERLINE_FLAGS = (sublime.DRAW_SQUIGGLY_UNDERLINE\n | sublime.DRAW_NO_OUTLINE\n | sublime.DRAW_NO_FILL\n | sublime.DRAW_EMPTY_AS_OVERWRITE)\n\nBOX_FLAGS = sublime.DRAW_NO_FILL | sublime.DRAW_EMPTY_AS_OVERWRITE\n\nwindow_file_diagnostics = dict(\n) # type: Dict[int, Dict[str, Dict[str, List[Diagnostic]]]]\n\n\ndef update_file_diagnostics(window: sublime.Window, file_path: str, source: str,\n diagnostics: 'List[Diagnostic]'):\n if diagnostics:\n window_file_diagnostics.setdefault(window.id(), dict()).setdefault(\n file_path, dict())[source] = diagnostics\n else:\n if window.id() in window_file_diagnostics:\n file_diagnostics = window_file_diagnostics[window.id()]\n if file_path in file_diagnostics:\n if source in file_diagnostics[file_path]:\n del file_diagnostics[file_path][source]\n if not file_diagnostics[file_path]:\n del file_diagnostics[file_path]\n\n\nphantom_sets_by_buffer = {} # type: Dict[int, sublime.PhantomSet]\n\n\ndef update_diagnostics_phantoms(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n global phantom_sets_by_buffer\n\n # disable the normal LSP phantoms when using a Rust document,\n # since these interfere with the enhanced Rust phantoms\n syntax = view.settings().get('syntax')\n if \"Rust\" in syntax: # type: ignore\n return\n\n buffer_id = view.buffer_id()\n if not show_diagnostics_phantoms or view.is_dirty():\n phantoms = None\n else:\n phantoms = list(\n create_phantom(view, diagnostic) for diagnostic in diagnostics)\n if phantoms:\n phantom_set = phantom_sets_by_buffer.get(buffer_id)\n if not phantom_set:\n phantom_set = sublime.PhantomSet(view, \"lsp_diagnostics\")\n phantom_sets_by_buffer[buffer_id] = phantom_set\n phantom_set.update(phantoms)\n else:\n phantom_sets_by_buffer.pop(buffer_id, None)\n\n\ndef update_diagnostics_regions(view: sublime.View, diagnostics: 'List[Diagnostic]', severity: int):\n region_name = \"lsp_\" + format_severity(severity)\n if show_diagnostics_phantoms and not view.is_dirty():\n regions = None\n else:\n regions = list(diagnostic.range.to_region(view) for diagnostic in diagnostics\n if diagnostic.severity == severity)\n if regions:\n scope_name = diagnostic_severity_scopes[severity]\n view.add_regions(\n region_name, regions, scope_name, diagnostics_gutter_marker,\n UNDERLINE_FLAGS if diagnostics_highlight_style == \"underline\" else BOX_FLAGS)\n else:\n view.erase_regions(region_name)\n\n\ndef update_diagnostics_in_view(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n if view and view.is_valid():\n update_diagnostics_phantoms(view, diagnostics)\n for severity in range(DiagnosticSeverity.Error, DiagnosticSeverity.Information):\n update_diagnostics_regions(view, diagnostics, severity)\n\n\ndef remove_diagnostics(view: sublime.View):\n \"\"\"Removes diagnostics for a file if no views exist for it\n \"\"\"\n window = sublime.active_window()\n\n file_path = view.file_name()\n if not window.find_open_file(view.file_name()):\n update_file_diagnostics(window, file_path, 'lsp', [])\n update_diagnostics_panel(window)\n else:\n debug('file still open?')\n\n\ndef handle_diagnostics(update: 'Any'):\n file_path = uri_to_filename(update.get('uri'))\n window = sublime.active_window()\n\n if not is_in_workspace(window, file_path):\n debug(\"Skipping diagnostics for file\", file_path,\n \" it is not in the workspace\")\n return\n\n diagnostics = list(\n Diagnostic.from_lsp(item) for item in update.get('diagnostics', []))\n\n view = window.find_open_file(file_path)\n\n # diagnostics = update.get('diagnostics')\n\n update_diagnostics_in_view(view, diagnostics)\n\n # update panel if available\n\n origin = 'lsp' # TODO: use actual client name to be able to update diagnostics per client\n\n update_file_diagnostics(window, file_path, origin, diagnostics)\n update_diagnostics_panel(window)\n\n\nclass LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand):\n def run(self):\n ensure_diagnostics_panel(self.window)\n self.window.run_command(\"show_panel\", {\"panel\": \"output.diagnostics\"})\n\n\ndef create_diagnostics_panel(window):\n panel = create_output_panel(window, \"diagnostics\")\n panel.settings().set(\"result_file_regex\", r\"^\\s*\\S\\s+(\\S.*):$\")\n panel.settings().set(\"result_line_regex\", r\"^\\s+([0-9]+):?([0-9]+).*$\")\n panel.assign_syntax(\"Packages/\" + PLUGIN_NAME +\n \"/Syntaxes/Diagnostics.sublime-syntax\")\n return panel\n\n\ndef ensure_diagnostics_panel(window):\n return window.find_output_panel(\"diagnostics\") or create_diagnostics_panel(window)\n\n\ndef update_diagnostics_panel(window):\n assert window, \"missing window!\"\n base_dir = get_project_path(window)\n\n panel = ensure_diagnostics_panel(window)\n assert panel, \"must have a panel now!\"\n\n if window.id() in window_file_diagnostics:\n active_panel = window.active_panel()\n is_active_panel = (active_panel == \"output.diagnostics\")\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.set_read_only(False)\n file_diagnostics = window_file_diagnostics[window.id()]\n if file_diagnostics:\n to_render = []\n for file_path, source_diagnostics in file_diagnostics.items():\n relative_file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path\n if source_diagnostics:\n to_render.append(format_diagnostics(relative_file_path, source_diagnostics))\n panel.run_command(\"lsp_update_panel\", {\"characters\": \"\\n\".join(to_render)})\n if auto_show_diagnostics_panel and not active_panel:\n window.run_command(\"show_panel\",\n {\"panel\": \"output.diagnostics\"})\n else:\n panel.run_command(\"lsp_clear_panel\")\n if auto_show_diagnostics_panel and is_active_panel:\n window.run_command(\"hide_panel\",\n {\"panel\": \"output.diagnostics\"})\n panel.set_read_only(True)\n\n\ndef format_diagnostics(file_path, origin_diagnostics):\n content = \" ◌ {}:\\n\".format(file_path)\n for origin, diagnostics in origin_diagnostics.items():\n for diagnostic in diagnostics:\n item = format_diagnostic(diagnostic)\n content += item + \"\\n\"\n return content\n\n\ndef start_client(window: sublime.Window, config: ClientConfig):\n project_path = get_project_path(window)\n if project_path is None:\n return None\n\n if show_status_messages:\n window.status_message(\"Starting \" + config.name + \"...\")\n debug(\"starting in\", project_path)\n\n variables = window.extract_variables()\n expanded_args = list(sublime.expand_variables(os.path.expanduser(arg), variables) for arg in config.binary_args)\n\n client = start_server(expanded_args, project_path)\n if not client:\n window.status_message(\"Could not start \" + config.name + \", disabling\")\n debug(\"Could not start\", config.binary_args, \", disabling\")\n return None\n\n initializeParams = {\n \"processId\": client.process.pid,\n \"rootUri\": filename_to_uri(project_path),\n \"rootPath\": project_path,\n \"capabilities\": {\n \"textDocument\": {\n \"completion\": {\n \"completionItem\": {\n \"snippetSupport\": True\n }\n },\n \"synchronization\": {\n \"didSave\": True\n }\n },\n \"workspace\": {\n \"applyEdit\": True\n }\n }\n }\n if config.init_options:\n initializeParams['initializationOptions'] = config.init_options\n\n client.send_request(\n Request.initialize(initializeParams),\n lambda result: handle_initialize_result(result, client, window, config))\n return client\n\n\ndef get_window_client(view: sublime.View, config: ClientConfig) -> Client:\n global clients_by_window\n\n window = view.window()\n clients = window_clients(window)\n if config.name not in clients:\n client = start_client(window, config)\n clients_by_window.setdefault(window.id(), {})[config.name] = client\n debug(\"client registered for window\",\n window.id(), window_clients(window))\n else:\n client = clients[config.name]\n\n return client\n\n\ndef start_server(server_binary_args, working_dir):\n debug(\"starting \" + str(server_binary_args))\n si = None\n if os.name == \"nt\":\n si = subprocess.STARTUPINFO() # type: ignore\n si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore\n try:\n process = subprocess.Popen(\n server_binary_args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=working_dir,\n startupinfo=si)\n return Client(process, working_dir)\n\n except Exception as err:\n printf(err)\n\n\ndef get_document_range(view: sublime.View, region: sublime.Region) -> OrderedDict:\n d = OrderedDict() # type: OrderedDict[str, Any]\n d['textDocument'] = {\"uri\": filename_to_uri(view.file_name())}\n d['range'] = Range.from_region(view, region).to_lsp()\n return d\n\n\ndef get_document_position(view: sublime.View, point) -> OrderedDict:\n if not point:\n point = view.sel()[0].begin()\n d = OrderedDict() # type: OrderedDict[str, Any]\n d['textDocument'] = {\"uri\": filename_to_uri(view.file_name())}\n d['position'] = Point.from_text_point(view, point).to_lsp()\n return d\n\n\nclass Events:\n listener_dict = dict() # type: Dict[str, Callable[..., None]]\n\n @classmethod\n def subscribe(cls, key, listener):\n if key in cls.listener_dict:\n cls.listener_dict[key].append(listener)\n else:\n cls.listener_dict[key] = [listener]\n return lambda: cls.unsubscribe(key, listener)\n\n @classmethod\n def unsubscribe(cls, key, listener):\n if key in cls.listener_dict:\n cls.listener_dict[key].remove(listener)\n\n @classmethod\n def publish(cls, key, *args):\n if key in cls.listener_dict:\n for listener in cls.listener_dict[key]:\n listener(*args)\n\n\nclass HoverHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def on_hover(self, point, hover_zone):\n if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():\n return\n point_diagnostics = get_point_diagnostics(self.view, point)\n if point_diagnostics:\n self.show_diagnostics_hover(point, point_diagnostics)\n else:\n self.request_symbol_hover(point)\n\n def request_symbol_hover(self, point):\n if self.view.match_selector(point, NO_HOVER_SCOPES):\n return\n client = client_for_view(self.view)\n if client and client.has_capability('hoverProvider'):\n word_at_sel = self.view.classify(point)\n if word_at_sel & SUBLIME_WORD_MASK:\n client.send_request(\n Request.hover(get_document_position(self.view, point)),\n lambda response: self.handle_response(response, point))\n\n def handle_response(self, response, point):\n debug(response)\n if self.view.is_popup_visible():\n return\n contents = \"No description available.\"\n if isinstance(response, dict):\n # Flow returns None sometimes\n # See: https://github.com/flowtype/flow-language-server/issues/51\n contents = response.get('contents') or contents\n self.show_hover(point, contents)\n\n def show_diagnostics_hover(self, point, diagnostics):\n formatted = list(\"{}: {}\".format(diagnostic.source, diagnostic.message) for diagnostic in diagnostics)\n formatted.append(\"[{}]({})\".format('Code Actions', 'code-actions'))\n mdpopups.show_popup(\n self.view,\n \"\\n\".join(formatted),\n css=\".mdpopups .lsp_hover { margin: 4px; }\",\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=\"lsp_hover\",\n max_width=800,\n on_navigate=lambda href: self.on_diagnostics_navigate(self, href, point, diagnostics))\n\n def on_diagnostics_navigate(self, href, point, diagnostics):\n # TODO: don't mess with the user's cursor.\n # Instead, pass code actions requested from phantoms & hovers should call lsp_code_actions with\n # diagnostics as args, positioning resulting UI close to the clicked link.\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n self.view.run_command(\"lsp_code_actions\")\n\n def show_hover(self, point, contents):\n formatted = []\n if not isinstance(contents, list):\n contents = [contents]\n\n for item in contents:\n value = \"\"\n language = None\n if isinstance(item, str):\n value = item\n else:\n value = item.get(\"value\")\n language = item.get(\"language\")\n if language:\n formatted.append(\"```{}\\n{}\\n```\".format(language, value))\n else:\n formatted.append(value)\n\n mdpopups.show_popup(\n self.view,\n preserve_whitespace(\"\\n\".join(formatted)),\n css=\".mdpopups .lsp_hover { margin: 4px; } .mdpopups p { margin: 0.1rem; }\",\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=\"lsp_hover\",\n max_width=800)\n\n\ndef wait_on_RLS():\n global waiting_for_RLS\n\n # wait for up to 1 second for a rustDocument/diagnosticsBegin\n # after that, assume we missed it\n max_count = 20\n while waiting_for_RLS is False and max_count > 0:\n sublime.active_window().status_message(\"Waiting on RLS to start...\")\n time.sleep(0.05)\n max_count -= 1\n\n # wait on RLS to send rustDocument/diagnosticsEnd\n while waiting_for_RLS is True:\n sublime.active_window().status_message(\"Waiting on RLS to finish...\")\n time.sleep(0.05)\n\n\ntype_phantoms = [] # type: ignore\nphantoms_to_generate = 0\n\n\ndef annotate_types(view: sublime.View, current_function: bool):\n global type_phantoms, phantoms_to_generate\n syntax = view.settings().get('syntax')\n if \"Rust\" not in syntax: # type: ignore\n return 1\n wait_on_RLS()\n phantoms_to_generate = 0\n type_phantoms = []\n annotator = TypeAnnotator(view)\n annotator.annotate_var_decl(view)\n annotator.annotate_tuple_decl(view)\n annotator.annotate_for_loops(view)\n annotator.annotate_for_tuple_loops(view)\n annotator.annotate_match_stmt(view)\n annotator.annotate_use_stmt(view)\n annotator.annotate_use_multiple_stmt(view)\n annotator.annotate_closures(view)\n if current_function:\n annotator.annotate_function(view)\n sublime.set_timeout_async(lambda: show_type_phantoms(view), 100)\n return len(type_phantoms)\n\n\ndef show_type_phantoms(view: sublime.View):\n global phantoms_to_generate\n start = time.time()\n # wait for up to 5 seconds for the phantoms to generate\n while phantoms_to_generate > 0 and time.time() - start < 5:\n sublime.active_window().status_message(\"Waiting on RLS...\")\n time.sleep(0.05)\n time.sleep(0.1)\n buffer_id = view.buffer_id()\n phantom_set = sublime.PhantomSet(view, \"lsp_annotations\")\n phantom_sets_by_buffer[buffer_id] = phantom_set\n phantom_set.update(type_phantoms)\n\n\nclass TypeAnnotator(object):\n def __init__(self, view):\n self.view = view\n\n def request_symbol_annotate(self, point, category=\"\"):\n global phantoms_to_generate\n if self.view.match_selector(point, NO_HOVER_SCOPES):\n phantoms_to_generate -= 1\n return\n client = client_for_view(self.view)\n if client and client.has_capability('hoverProvider'):\n word_at_sel = self.view.classify(point)\n if word_at_sel & SUBLIME_WORD_MASK:\n client.send_request(\n Request.hover(get_document_position(self.view, point)),\n lambda response: self.handle_response(response, point, category))\n\n def handle_response(self, response, point, category):\n debug(response)\n contents = \"No description available.\"\n if isinstance(response, dict):\n # Flow returns None sometimes\n # See: https://github.com/flowtype/flow-language-server/issues/51\n contents = response.get('contents') or contents\n self.add_annotation(point, contents, category)\n\n def annotate_var_decl(self, view: sublime.View):\n global phantoms_to_generate\n all_vars = view.find_all('...\\\\blet\\\\b *(mut){0,1} *([a-zA-Z_][a-zA-Z0-9_]*) *[:=;]', 0)\n for var in all_vars:\n if var is None or var.begin() == -1:\n continue\n var_text = view.substr(var)\n if \":\" in var_text or var_text.startswith(\"if\"):\n continue\n phantoms_to_generate += 1\n var_text = var_text[:-1].rstrip()\n var_start = var.begin() + var_text.rfind(\" \") + 1\n self.request_symbol_annotate(var_start)\n\n def annotate_tuple_decl(self, view: sublime.View):\n global phantoms_to_generate\n tuple_vars = view.find_all('\\\\blet\\\\b *\\([a-zA-Z0-9_, ]*\\) *[:=;]', 0)\n for var in tuple_vars:\n if var is None or var.begin() == -1:\n continue\n var_text = view.substr(var)\n if \":\" in var_text:\n continue\n var_start = var.begin() + var_text.find('(') + 1\n var_text = re.sub(\"let *\\(\", \"\", var_text)\n var_text = re.sub(\"\\) *[:=;]\", \"\", var_text)\n tp_vars = var_text.split(\",\")\n first = True\n for var in tp_vars:\n if var.startswith(\" \"):\n var_start += 1\n var = var[1:]\n if var.startswith(\"mut \"):\n var_start += 4\n var = var[4:]\n phantoms_to_generate += 1\n self.request_symbol_annotate(var_start)\n if first:\n first = False\n var_start += 1\n var_start += 1 + len(var)\n\n def annotate_for_loops(self, view: sublime.View):\n global phantoms_to_generate\n iter_vars = view.find_all('\\\\bfor\\\\b *[a-zA-Z_][a-zA-Z0-9_]* *in', 0)\n for var in iter_vars:\n if var is None or var.begin() == -1:\n continue\n phantoms_to_generate += 1\n var_text = view.substr(var)\n var_text = var_text[:-2].rstrip()\n var_start = var.begin() + var_text.rfind(\" \") + 1\n self.request_symbol_annotate(var_start)\n\n def annotate_for_tuple_loops(self, view: sublime.View):\n global phantoms_to_generate\n iter_tuple_vars = view.find_all('\\\\bfor\\\\b *\\([a-zA-Z0-9_, ]*\\) *in', 0)\n for var in iter_tuple_vars:\n if var is None or var.begin() == -1:\n continue\n var_text = view.substr(var)\n var_start = var.begin() + var_text.find('(') + 1\n var_text = re.sub(\"for *\\(\", \"\", var_text)\n var_text = re.sub(\"\\) *in\", \"\", var_text)\n tp_vars = var_text.split(\",\")\n first = True\n for var in tp_vars:\n if var.startswith(\"mut \"):\n var_start += 4\n var = var[4:]\n phantoms_to_generate += 1\n self.request_symbol_annotate(var_start)\n if first:\n first = False\n var_start += 1\n var_start += 1 + len(var)\n\n def annotate_match_stmt(self, view: sublime.View):\n global phantoms_to_generate\n match_vars = view.find_all('\\\\bmatch\\\\b *[*]*[a-zA-Z_][a-zA-Z0-9_.]* *{', 0)\n for var in match_vars:\n if var is None or var.begin() == -1:\n continue\n phantoms_to_generate += 1\n var_text = view.substr(var)\n var_text = var_text[:-1].rstrip()\n offset = max(var_text.rfind(\" \"), var_text.rfind(\".\"))\n deref = var_text.count('*')\n offset += deref\n var_start = var.begin() + offset + 1\n self.request_symbol_annotate(var_start, \"deref=\" + str(deref))\n\n def annotate_use_stmt(self, view: sublime.View):\n global phantoms_to_generate\n use_vars = view.find_all('\\\\buse\\\\b *[a-zA-Z0-9_:]* *;', 0)\n for var in use_vars:\n if var is None or var.begin() == -1:\n continue\n phantoms_to_generate += 1\n var_text = view.substr(var)\n var_text = var_text[:-1].rstrip()\n offset = max(var_text.rfind(\" \"), var_text.rfind(\":\"))\n var_start = var.begin() + offset + 1\n self.request_symbol_annotate(var_start, \"docs\")\n\n def annotate_use_multiple_stmt(self, view: sublime.View):\n global phantoms_to_generate\n use_vars = view.find_all('\\\\buse\\\\b *[a-zA-Z0-9_:]*{[a-zA-Z0-9_:, ]*} *;', 0)\n for var in use_vars:\n if var is None or var.begin() == -1:\n continue\n var_text = view.substr(var)\n var_start = var.begin() + var_text.find('{') + 1\n var_text = re.sub(\"use *[a-zA-Z0-9_:]*{\", \"\", var_text)\n var_text = re.sub(\"} *;\", \"\", var_text)\n tp_vars = var_text.split(\",\")\n first = True\n for var in tp_vars:\n phantoms_to_generate += 1\n self.request_symbol_annotate(var_start, \"docs\")\n if first:\n first = False\n var_start += 1\n var_start += 1 + len(var)\n\n def annotate_function(self, view: sublime.View):\n global phantoms_to_generate\n point = view.sel()[0].begin()\n if view.substr(point - 1) != '(':\n return\n line = view.substr(view.line(point))\n line = re.sub(\" *\\t*\", \"\", line)\n if line.startswith(\"fn\"):\n return\n phantoms_to_generate += 1\n self.request_symbol_annotate(point - 2, \"fn\")\n\n def annotate_closures(self, view: sublime.View):\n global phantoms_to_generate\n all_vars = view.find_all('\\|[a-zA-Z0-9_,&: ]*\\|', 0)\n for var in all_vars:\n if var is None or var.begin() == -1:\n continue\n var_text = view.substr(var)\n var_text = var_text[1:-1].rstrip()\n var_start = var.begin() + 1\n closure_args = var_text.split(\",\")\n first = True\n for var in closure_args:\n if var.startswith(\"&\"):\n var_start += 1\n var = var[1:]\n if var.startswith(\"mut \"):\n var_start += 4\n var = var[4:]\n if \":\" not in var:\n phantoms_to_generate += 1\n self.request_symbol_annotate(var_start)\n if first:\n first = False\n var_start += 1\n var_start += 1 + len(var)\n\n def add_annotation(self, point, contents, category):\n global type_phantoms, phantoms_to_generate\n formatted = []\n if not isinstance(contents, list):\n contents = [contents]\n\n for item in contents:\n if isinstance(item, str):\n formatted.append(item)\n elif category != \"docs\":\n formatted.append(item.get(\"value\"))\n\n formatted_str = \"\\n\".join(formatted)\n\n if category == \"docs\":\n # we only want the first line of the doc string\n formatted_str = formatted_str.split(\"\\n\")[0]\n\n docs = []\n if category == \"fn\":\n text = formatted_str.split(\"\\n\")\n docs = text[:-1]\n formatted_str = text[-1]\n\n if \"No description\" in formatted_str or len(formatted_str) == 0:\n phantoms_to_generate -= 1\n return\n\n formatted_str = re.sub(\"<[a-zA-Z0-9_]*>\", \"\", formatted_str)\n formatted_str = re.sub(\"[^(& <]*::\", \"\", formatted_str)\n if formatted_str.startswith(\"[closure@\"):\n category = \"fn\"\n formatted_str = re.sub(\"@.*:[0-9]+\", \", captures \", formatted_str)\n if formatted_str.endswith(\"captures ]\"):\n formatted_str = formatted_str[:-1] + \"nothing ]\"\n formatted_str = \": \" + formatted_str\n\n region = self.view.word(point)\n region = Range(\n Point.from_text_point(self.view, region.end()),\n Point.from_text_point(self.view, region.end()+1)\n )\n if category == \"docs\":\n formatted_str = \"/* \" + formatted_str + \" */\"\n elif category != \"fn\":\n formatted_str = \": \" + formatted_str\n if category.startswith(\"deref=\"):\n deref = int(category.split(\"=\")[1])\n formatted_str = formatted_str.replace(\"&\" * deref, \"\", 1)\n\n diagnostic_msg = Diagnostic(formatted_str, region, DiagnosticSeverity.Hint, None, None)\n\n if category != \"fn\":\n type_phantoms.append(create_quiet_phantom(self.view, diagnostic_msg, True))\n else:\n type_phantoms.append(self.create_function_phantom(point, formatted_str, docs))\n phantoms_to_generate -= 1\n\n def create_function_phantom(self, point, formatted, docs):\n region = self.view.word(point)\n fn_name = self.view.substr(region)\n formatted = fn_name + re.sub(\" *fn *\", \"\", formatted)\n formatted = re.sub(\", *> *\", \">\", formatted)\n self_decl = re.search(\"&{0,1}('[a-zA-Z0-9]){0,1} *(mut){0,1} *self *[^:]\", formatted)\n offset = 0\n if self_decl is not None:\n self_decl_str = self_decl.group(0)[:-1].rstrip()\n formatted = formatted.replace(self_decl_str, \"\", 1).replace(\"(, \", \"(\", 1)\n offset = len(self_decl_str) + 1\n if \"'\" in self_decl_str:\n self_decl_str = \"(\" + self_decl_str + \")\"\n formatted = self_decl_str + \".\" + formatted\n location = region.begin() - offset\n location_row = self.view.rowcol(location)[0]\n point_row = self.view.rowcol(point)[0]\n\n # make sure it occurs on the right line, in case of long signatures\n if location_row < point_row:\n location = self.view.text_point(point_row, 1)\n\n region = Range(\n Point.from_text_point(self.view, location),\n Point.from_text_point(self.view, location+1)\n )\n\n docs_str = \"\"\n # if len(docs) > 0: # disabled for now because the docs returned for functions by RLS are not right\n # docs_str = \"\\n\".join(docs) + \"\\n\"\n\n diagnostic_msg = Diagnostic(docs_str + formatted, region, DiagnosticSeverity.Hint, None, None)\n return create_quiet_phantom(self.view, diagnostic_msg, False)\n\n\ndef preserve_whitespace(contents: str) -> str:\n \"\"\"Preserve empty lines and whitespace for markdown conversion.\"\"\"\n contents = contents.strip(' \\t\\r\\n')\n contents = contents.replace('\\t', ' ' * 4)\n contents = contents.replace(' ', ' ' * 2)\n contents = contents.replace('\\n\\n', '\\n \\n')\n return contents\n\n\nclass CompletionState(object):\n IDLE = 0\n REQUESTING = 1\n APPLYING = 2\n CANCELLING = 3\n\n\nresolvable_completion_items = [] # type: List[Any]\n\n\ndef find_completion_item(label: str) -> 'Optional[Any]':\n matches = list(filter(lambda i: i.get(\"label\") == label, resolvable_completion_items))\n return matches[0] if matches else None\n\n\nclass CompletionContext(object):\n\n def __init__(self, begin):\n self.begin = begin # type: Optional[int]\n self.end = None # type: Optional[int]\n self.region = None # type: Optional[sublime.Region]\n self.committing = False\n\n def committed_at(self, end):\n self.end = end\n self.region = sublime.Region(self.begin, self.end)\n self.committing = False\n\n\ncurrent_completion = None # type: Optional[CompletionContext]\n\n\ndef has_resolvable_completions(view):\n client = client_for_view(view)\n if client:\n completionProvider = client.get_capability(\n 'completionProvider')\n if completionProvider:\n if completionProvider.get('resolveProvider', False):\n return True\n return False\n\n\nclass CompletionSnippetHandler(sublime_plugin.EventListener):\n\n def on_query_completions(self, view, prefix, locations):\n global current_completion\n if resolve_completion_for_snippets and has_resolvable_completions(view):\n current_completion = CompletionContext(view.sel()[0].begin())\n\n def on_text_command(self, view, command_name, args):\n if resolve_completion_for_snippets and current_completion:\n current_completion.committing = command_name in ('commit_completion', 'insert_best_completion')\n\n def on_modified(self, view):\n global current_completion\n\n if resolve_completion_for_snippets and view.file_name():\n if current_completion and current_completion.committing:\n current_completion.committed_at(view.sel()[0].end())\n inserted = view.substr(current_completion.region)\n item = find_completion_item(inserted)\n if item:\n self.resolve_completion(item, view)\n else:\n current_completion = None\n\n def resolve_completion(self, item, view):\n client = client_for_view(view)\n if not client:\n return\n\n client.send_request(\n Request.resolveCompletionItem(item),\n lambda response: self.handle_resolve_response(response, view))\n\n def handle_resolve_response(self, response, view):\n # replace inserted text if a snippet was returned.\n if current_completion and response.get('insertTextFormat') == 2: # snippet\n insertText = response.get('insertText')\n try:\n sel = view.sel()\n sel.clear()\n sel.add(current_completion.region)\n view.run_command(\"insert_snippet\", {\"contents\": insertText})\n except Exception as e:\n debug('error inserting snippet', insertText, e)\n\n\nclass CompletionHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.initialized = False\n self.enabled = False\n self.trigger_chars = [] # type: List[str]\n self.resolve = False\n self.resolve_details = [] # type: List[Tuple[str, str]]\n self.state = CompletionState.IDLE\n self.next_request = None\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def initialize(self):\n self.initialized = True\n client = client_for_view(self.view)\n if client:\n completionProvider = client.get_capability(\n 'completionProvider')\n if completionProvider:\n self.enabled = True\n self.trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n self.has_resolve_provider = completionProvider.get('resolveProvider', False)\n\n def is_after_trigger_character(self, location):\n if location > 0:\n prev_char = self.view.substr(location - 1)\n return prev_char in self.trigger_chars\n\n def on_query_completions(self, prefix, locations):\n if self.view.match_selector(locations[0], NO_COMPLETION_SCOPES):\n return\n\n if not self.initialized:\n self.initialize()\n\n if self.enabled:\n if self.state == CompletionState.IDLE:\n self.do_request(prefix, locations)\n self.completions = [] # type: List[Tuple[str, str]]\n\n elif self.state in (CompletionState.REQUESTING, CompletionState.CANCELLING):\n self.next_request = (prefix, locations)\n self.state = CompletionState.CANCELLING\n\n elif self.state == CompletionState.APPLYING:\n self.state = CompletionState.IDLE\n\n return (\n self.completions,\n 0 if not only_show_lsp_completions\n else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n\n def do_request(self, prefix, locations):\n self.next_request = None\n view = self.view\n\n # don't store client so we can handle restarts\n client = client_for_view(view)\n if not client:\n return\n\n if complete_all_chars or self.is_after_trigger_character(locations[0]):\n purge_did_change(view.buffer_id())\n client.send_request(\n Request.complete(get_document_position(view, locations[0])),\n self.handle_response)\n self.state = CompletionState.REQUESTING\n\n def format_completion(self, item) -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n label = item.get(\"label\")\n detail = item.get(\"detail\")\n kind = item.get(\"kind\")\n if not detail:\n if kind is not None:\n detail = completion_item_kind_names[kind]\n insertText = item.get(\"insertText\", None)\n if not insertText:\n insertText = label\n if insertText[0] == '$': # sublime needs leading '$' escaped.\n insertText = '\\$' + insertText[1:]\n return \"{}\\t {}\".format(label, detail) if detail else label, insertText\n\n def handle_response(self, response):\n global resolvable_completion_items\n if self.state == CompletionState.REQUESTING:\n items = response[\"items\"] if isinstance(response,\n dict) else response\n self.completions = list(self.format_completion(item) for item in items)\n\n if self.has_resolve_provider:\n resolvable_completion_items = items\n\n self.state = CompletionState.APPLYING\n self.view.run_command(\"hide_auto_complete\")\n self.run_auto_complete()\n elif self.state == CompletionState.CANCELLING:\n self.do_request(*self.next_request)\n else:\n debug('Got unexpected response while in state {}'.format(self.state))\n\n def run_auto_complete(self):\n self.view.run_command(\n \"auto_complete\", {\n 'disable_auto_insert': True,\n 'api_completions_only': only_show_lsp_completions,\n 'next_completion_if_showing': False\n })\n\n\nclass SignatureHelpListener(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.signature_help_triggers = None\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n def initialize_triggers(self):\n client = client_for_view(self.view)\n if client:\n signatureHelpProvider = client.get_capability(\n 'signatureHelpProvider')\n if signatureHelpProvider:\n self.signature_help_triggers = signatureHelpProvider.get(\n 'triggerCharacters')\n return\n\n self.signature_help_triggers = []\n\n def on_modified_async(self):\n pos = self.view.sel()[0].begin()\n last_char = self.view.substr(pos - 1)\n # TODO: this will fire too often, narrow down using scopes or regex\n if self.signature_help_triggers is None:\n self.initialize_triggers()\n\n if self.signature_help_triggers:\n if last_char in self.signature_help_triggers:\n client = client_for_view(self.view)\n if client:\n purge_did_change(self.view.buffer_id())\n client.send_request(\n Request.signatureHelp(get_document_position(self.view, pos)),\n lambda response: self.handle_response(response, pos))\n else:\n # TODO: this hides too soon.\n if self.view.is_popup_visible():\n self.view.hide_popup()\n\n def handle_response(self, response, point):\n if response is not None:\n config = config_for_scope(self.view)\n signatures = response.get(\"signatures\")\n activeSignature = response.get(\"activeSignature\")\n debug(\"got signatures, active is\", len(signatures), activeSignature)\n if len(signatures) > 0 and config:\n signature = signatures[activeSignature]\n debug(\"active signature\", signature)\n formatted = []\n formatted.append(\n \"```{}\\n{}\\n```\".format(config.languageId, signature.get('label')))\n params = signature.get('parameters')\n if params is None: # for pyls TODO create issue?\n params = signature.get('params')\n debug(\"params\", params)\n for parameter in params:\n paramDocs = parameter.get('documentation')\n if paramDocs:\n formatted.append(\"**{}**\\n\".format(parameter.get('label')))\n formatted.append(\"* *{}*\\n\".format(paramDocs))\n\n formatted.append(signature.get('documentation'))\n\n mdpopups.show_popup(\n self.view,\n preserve_whitespace(\"\\n\".join(formatted)),\n css=\".mdpopups .lsp_signature { margin: 4px; } .mdpopups p { margin: 0.1rem; }\",\n md=True,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=\"lsp_signature\",\n max_width=800)\n\n\ndef get_line_diagnostics(view, point):\n row, _ = view.rowcol(point)\n diagnostics = get_diagnostics_for_view(view)\n return tuple(\n diagnostic for diagnostic in diagnostics\n if diagnostic.range.start.row <= row <= diagnostic.range.end.row\n )\n\n\ndef get_point_diagnostics(view, point):\n diagnostics = get_diagnostics_for_view(view)\n return tuple(\n diagnostic for diagnostic in diagnostics\n if diagnostic.range.to_region(view).contains(point)\n )\n\n\ndef get_diagnostics_for_view(view: sublime.View) -> 'List[Diagnostic]':\n window = view.window()\n file_path = view.file_name()\n origin = 'lsp'\n if window.id() in window_file_diagnostics:\n file_diagnostics = window_file_diagnostics[window.id()]\n if file_path in file_diagnostics:\n if origin in file_diagnostics[file_path]:\n return file_diagnostics[file_path][origin]\n return []\n\n\nclass LspCodeActionsCommand(sublime_plugin.TextCommand):\n def is_enabled(self, event=None):\n if is_supported_view(self.view):\n client = client_for_view(self.view)\n if client and client.has_capability('codeActionProvider'):\n return True\n return False\n\n def run(self, edit, event=None):\n client = client_for_view(self.view)\n if client:\n pos = get_position(self.view, event)\n row, col = self.view.rowcol(pos)\n line_diagnostics = get_line_diagnostics(self.view, pos)\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n },\n \"context\": {\n \"diagnostics\": list(diagnostic.to_lsp() for diagnostic in line_diagnostics)\n }\n }\n if len(line_diagnostics) > 0:\n # TODO: merge ranges.\n params[\"range\"] = line_diagnostics[0].range.to_lsp()\n else:\n params[\"range\"] = Range(Point(row, col), Point(row, col)).to_lsp()\n\n if event: # if right-clicked, set cursor to menu position\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(pos))\n\n client.send_request(Request.codeAction(params), self.handle_codeaction_response)\n\n def handle_codeaction_response(self, response):\n titles = []\n self.commands = response\n for command in self.commands:\n titles.append(\n command.get('title')) # TODO parse command and arguments\n if len(self.commands) > 0:\n self.view.show_popup_menu(titles, self.handle_select)\n else:\n self.view.show_popup('No actions available', sublime.HIDE_ON_MOUSE_MOVE_AWAY)\n\n def handle_select(self, index):\n if index > -1:\n client = client_for_view(self.view)\n if client:\n client.send_request(\n Request.executeCommand(self.commands[index]),\n self.handle_command_response)\n\n def handle_command_response(self, response):\n pass\n\n def want_event(self):\n return True\n\n\ndef apply_workspace_edit(window, params):\n edit = params.get('edit')\n window.run_command('lsp_apply_workspace_edit', {'changes': edit})\n\n\nclass LspRestartClientCommand(sublime_plugin.TextCommand):\n def is_enabled(self):\n return is_supported_view(self.view)\n\n def run(self, edit):\n window = self.view.window()\n unload_window_clients(window.id())\n\n\nclass LspApplyWorkspaceEditCommand(sublime_plugin.WindowCommand):\n def run(self, changes):\n debug('workspace edit', changes)\n if changes.get('changes'):\n for uri, file_changes in changes.get('changes').items():\n path = uri_to_filename(uri)\n view = self.window.open_file(path)\n if view:\n if view.is_loading():\n # TODO: wait for event instead.\n sublime.set_timeout_async(\n lambda: view.run_command('lsp_apply_document_edit', {'changes': file_changes}),\n 500\n )\n else:\n view.run_command('lsp_apply_document_edit',\n {'changes': file_changes})\n else:\n debug('view not found to apply', path, file_changes)\n\n\nclass LspApplyDocumentEditCommand(sublime_plugin.TextCommand):\n def run(self, edit, changes):\n regions = list(self.create_region(change) for change in changes)\n replacements = list(change.get('newText') for change in changes)\n\n self.view.add_regions('lsp_edit', regions, \"source.python\")\n\n index = 0\n # use regions from view as they are correctly updated after edits.\n for newText in replacements:\n region = self.view.get_regions('lsp_edit')[index]\n self.apply_change(region, newText, edit)\n index += 1\n\n self.view.erase_regions('lsp_edit')\n\n def create_region(self, change):\n return Range.from_lsp(change['range']).to_region(self.view)\n\n def apply_change(self, region, newText, edit):\n if region.empty():\n self.view.insert(edit, region.a, newText)\n else:\n if len(newText) > 0:\n self.view.replace(edit, region, newText)\n else:\n self.view.erase(edit, region)\n\n\nclass CloseListener(sublime_plugin.EventListener):\n def on_close(self, view):\n if is_supported_syntax(view.settings().get(\"syntax\")):\n Events.publish(\"view.on_close\", view)\n sublime.set_timeout_async(check_window_unloaded, 500)\n\n\nclass SaveListener(sublime_plugin.EventListener):\n def on_post_save_async(self, view):\n if is_supported_view(view):\n Events.publish(\"view.on_post_save_async\", view)\n\n\ndef is_transient_view(view):\n window = view.window()\n return view == window.transient_view_in_group(window.active_group())\n\n\nclass DiagnosticsCursorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.has_status = False\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n global show_diagnostics_in_view_status\n return show_diagnostics_in_view_status and syntax and is_supported_syntax(syntax)\n\n def on_selection_modified_async(self):\n pos = self.view.sel()[0].begin()\n line_diagnostics = get_line_diagnostics(self.view, pos)\n if len(line_diagnostics) > 0:\n self.show_diagnostics_status(line_diagnostics)\n elif self.has_status:\n self.clear_diagnostics_status()\n\n def show_diagnostics_status(self, line_diagnostics):\n self.has_status = True\n self.view.set_status('lsp_diagnostics', line_diagnostics[0].message)\n\n def clear_diagnostics_status(self):\n self.view.set_status('lsp_diagnostics', \"\")\n self.has_status = False\n\n\nclass DocumentSyncListener(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax)\n\n @classmethod\n def applies_to_primary_view_only(cls):\n return False\n\n def on_load_async(self):\n # skip transient views: if not is_transient_view(self.view):\n Events.publish(\"view.on_load_async\", self.view)\n\n def on_modified_async(self):\n if self.view.file_name():\n Events.publish(\"view.on_modified_async\", self.view)\n\n def on_activated_async(self):\n if self.view.file_name():\n Events.publish(\"view.on_activated_async\", self.view)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":93380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"469803905","text":"import requests\nimport os\nimport json\nimport PyPDF2\nfrom geoparserIO import parseWithGeoparser\nfrom googleNLP import parseWithGoogle\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\n\n\ndef load_stopwords():\n \"\"\" Loads NLTK stopwords \"\"\"\n import nltk\n nltk_dir = 'nltk'\n if nltk_dir not in nltk.data.path:\n nltk.data.path.append(nltk_dir)\n nltk.download('stopwords')\n\n\nclass annotateFile(object):\n \"\"\" Class to load, extract, and annotate a text file.\n\n Input\n file_path plain string path to text file to annotate. PDF only for now\n\n \"\"\"\n\n def __init__(self, file_path = 'usaid_evaluation_example.pdf'):\n self.file_path = file_path\n _, self.file_extension = os.path.splitext(self.file_path)\n\n # Straightaway preprocess some of the data\n self.extract_words_from_pdf()\n self.clean_pdf_text()\n\n\n def extract_words_from_pdf(self):\n pdfFileObj = open(self.file_path,'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n text = ''\n for page in range(0, pdfReader.getNumPages()):\n pageObj = pdfReader.getPage(page)\n text = text + ' ' + pageObj.extractText()\n \n self.pdf_words = text\n\n def clean_pdf_text(self):\n \"\"\" Starts with the raw text from a document, and returns a clean list of words.\n\n The list can still include duplicates and is in the original order.\n \"\"\"\n txt_clean = BeautifulSoup(self.pdf_words, \"html5lib\").get_text()\n txt_clean = re.sub(\"\\n\", ' ', txt_clean)\n txt_clean = re.sub(\"[^a-zA-Z]\", # The pattern to search for\n \" \", # The pattern to replace it with\n txt_clean)\n txt_clean = txt_clean.lower()\n # store for later use\n self.pdf_words_clean = txt_clean\n txt_clean_list = txt_clean.split() # Split into words, i.e. tokenize\n\n # remove stop words\n load_stopwords()\n from nltk.corpus import stopwords\n stop = set(stopwords.words('english'))\n txt_clean_list = [word for word in txt_clean_list if word not in stop]\n self.clean_list_of_words = txt_clean_list\n\n\n def parse_with_geoparserIO(self, start_at_char=10000, end_at_char=20000):\n self.df_geoparserIO = parseWithGeoparser(self.pdf_words_clean[start_at_char:end_at_char])\n return(self.df_geoparserIO)\n\n def parse_with_google_NLP(self, start_at_char=10000, end_at_char=20000):\n self.df_googleNLP = parseWithGoogle(self.pdf_words_clean[start_at_char:end_at_char])\n return(self.df_googleNLP)","sub_path":"geoparse_documents.py","file_name":"geoparse_documents.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"614418071","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@description: \n\n@author: baoqiang\n@time: 2018/11/28 下午10:05\n\"\"\"\n\nimport requests\nimport json\n\nfrom xiaoscript import config\n\nZIROOM = 'ziroom'\n\nkeywords = ['望京', '望京西', '阜通']\n\nout_file = '{}/ziru3.json'.format(config.get_root_path())\n\n\ndef run():\n for keyword in keywords:\n print('process {}'.format(keyword))\n run_item(keyword)\n\n\ndef run_item(keyword):\n datas = []\n\n for i in range(10, 10001, 10):\n # for i in range(10, 30, 10):\n payload = {'step': i, 'key_word': keyword}\n res = requests.post('http://m.ziroom.com/list/ajax-get-data', data=payload, headers=headers)\n if 'info' in res.json()['data'] and res.json()['data']['info'] == u'\\u6570\\u636e\\u52a0\\u8f7d\\u5b8c\\u6bd5':\n break\n for item in res.json()['data']:\n datas.append(item)\n\n print('process cnt: {}/{}'.format(i, 10000))\n\n with open(out_file, 'a', encoding='utf-8') as fw:\n for item in datas:\n json.dump(item, fw, ensure_ascii=False, sort_keys=True)\n fw.write('\\n')\n\n\nheaders = {'Referer': 'http://m.ziroom.com/BJ/search.html',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/57.0.2987.133 Safari/537.36'}\n\nif __name__ == '__main__':\n run()\n","sub_path":"xiaoscript/script/spider_ziru.py","file_name":"spider_ziru.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"104582478","text":"# -*- coding=utf-8 -*-\n\nfrom flask import request, session, redirect, url_for,\\\n render_template\nfrom flask import Blueprint\nfrom libs import login_required\n\n\nmember_app = Blueprint(\"member_app\", __name__)\n\n@member_app.before_request\n@login_required\ndef is_login():\n print(session['user'])\n\n\n@member_app.route(\"/\")\ndef member_index():\n print(\"hello\")\n return render_template(\"member/member_index.html\")","sub_path":"17.csrf攻击与防范/member/member_app.py","file_name":"member_app.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"288438110","text":"from optimizacion.Instrucciones.TablaSimbolos.InstruccionC3D import InstruccionC3D\n\nclass GotoC3D(InstruccionC3D):\n\n def __init__(self, valor, linea, columna):\n InstruccionC3D.__init__(self,linea,columna)\n self.valor = valor\n print(\"ENTRO A Goto\")\n \n\n def ejecutar(self, tabla, arbol):\n super().ejecutar(tabla,arbol)\n print(self.valor + \" linea: \" + str(self.linea) + \" columna: \" + str(self.columna))\n return \"goto .\" + self.valor\n","sub_path":"parser/fase2/team08/Tytus_SQLPARSER_G8/optimizacion/Instrucciones/C3D/GotoC3D.py","file_name":"GotoC3D.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"67434667","text":"import os\nimport numpy as np\nimport scipy.io as sio\nfrom numpy.core.records import fromarrays\nfrom scipy.io import savemat\nimport matplotlib.pyplot as plt\n#\n# folder_DB = '\\\\\\\\NAS\\\\Data\\\\R&D\\\\DB_Recs\\\\'\n#\n# for recFolder in os.listdir(folder_DB):\n# if os.path.isdir(recFolder):\n\nclass contrast_calculator:\n def __init__(self, block_size, patch_center_size, patch_perif_width):\n self.block_size = block_size\n self.patch_center_size = patch_center_size\n self.patch_perif_width = patch_perif_width\n self.patch_center_pos = int((block_size - 1) / 2)\n self.patch_center_d = int((patch_center_size - 1) / 2)\n\n def getContrasts(self, frame_patches):\n patch_width_to_ignore_center = int((self.block_size - 1) / 2) - self.patch_perif_width\n patch_perif_n = self.block_size ** 2 - (self.block_size - 2 * self.patch_perif_width) ** 2\n patch_centers = frame_patches[self.patch_center_pos - self.patch_center_d:self.patch_center_pos + self.patch_center_d + 1,\n self.patch_center_pos - self.patch_center_d:self.patch_center_pos + self.patch_center_d + 1, :]\n path_perifs = np.copy(frame_patches)\n path_perifs[self.patch_center_pos - patch_width_to_ignore_center:self.patch_center_pos + patch_width_to_ignore_center + 1,\n self.patch_center_pos - patch_width_to_ignore_center:self.patch_center_pos + patch_width_to_ignore_center + 1, :] = 0\n\n contrast_ratio_avg = (patch_centers.sum(axis=0).sum(axis=0) / patch_center_size**2) / (\n path_perifs.sum(axis=0).sum(axis=0) / patch_perif_n)\n contrast_ratio_max = patch_centers.max(axis=0).max(axis=0) / path_perifs.max(axis=0).max(axis=0)\n return contrast_ratio_avg, contrast_ratio_max\n\ndef load_calib_files(sensorFolder):\n X, Y, Z = [], [], []\n Xw, Yw = [], []\n for txtFile in ['Xp_' + str(n) + '.txt' for n in [1, 2, 3, 4]]:\n X.extend(np.array_split(np.loadtxt(sensorFolder + txtFile), 1)[0])\n for txtFile in ['Yp_' + str(n) + '.txt' for n in [1, 2, 3, 4]]:\n Y.extend(np.array_split(np.loadtxt(sensorFolder + txtFile), 1)[0])\n for txtFile in ['Xw_' + str(n) + '.txt' for n in [1, 2, 3, 4]]:\n Xw.extend(np.array_split(np.loadtxt(sensorFolder + txtFile), 1)[0])\n for txtFile in ['Yw_' + str(n) + '.txt' for n in [1, 2, 3, 4]]:\n Yw.extend(np.array_split(np.loadtxt(sensorFolder + txtFile), 1)[0])\n\n Z.extend(np.array_split(np.loadtxt(sensorFolder + 'Zw_1.txt'), 1)[0][0])\n X, Y, Z = np.array(X), np.array(Y), np.array(Z)\n return X, Y, Z , Xw, Yw\n\n\ndef gaussian_gen(sigma,offset,block_size):\n size_2 = (block_size - 1)/2\n x, y = np.meshgrid(np.linspace(-size_2, size_2, block_size), np.linspace(-size_2, size_2, block_size))\n x_mat = np.repeat(x, offset.shape[1], axis=1).reshape(block_size, block_size, -1)\n y_mat = np.repeat(y, offset.shape[1], axis=1).reshape(block_size, block_size, -1)\n offset_mat_x = offset[0,:].repeat(block_size*block_size).reshape(block_size,block_size,-1,order='F')\n offset_mat_y = offset[1,:].repeat(block_size*block_size).reshape(block_size,block_size,-1,order='F')\n return np.exp(-(((x_mat - offset_mat_x) ** 2 + (y_mat - offset_mat_y) ** 2)/ (2.0 * sigma ** 2)))\n\n\ndef frame_blocks_gen(frame,x,y,block_size):\n def get_im_patch(yx_coords):\n return frame[int(yx_coords[0] - (block_size - 1) / 2):int(yx_coords[0] + (block_size-1) / 2 + 1),\n int(yx_coords[1] - (block_size - 1) / 2):int(yx_coords[1] + (block_size - 1) / 2 + 1)]\n yx_coords_mat = np.array([y, x]).transpose().round()\n return np.apply_along_axis(get_im_patch, 1, yx_coords_mat)\n\ndef calc_depth(corr, contrast_avg, contrast_max, contrast_avg_thresh, contrast_max_thresh):\n corr[(contrast_avg < 1.1) | (contrast_max < 1.1)] = 0\n if sum(corr)==0:\n return -1\n else:\n return corr.argmax()\n\n\n# input parameters\nframe_width = 640\nframe_height = 480\n\n\n# output parameters\nsigma = 1.5\nblock_size = 7\npatch_center_size = 3\npatch_perif_width = 1\ncontrast_avg_thresh, contrast_max_thresh = 1.1, 1.1\n\n# internal parameters\neps = 10**-10 # for stability (division by zero)\nmat_pyt_idx_offset = 1 # matlab starts with ind=1 while python ind = 0\n\n\ndef main(argv=None):\n recFolder, sensorFolder, mat_filename = argv\n\n # load calibration files\n X, Y, Z, Xw, Yw = load_calib_files(sensorFolder)\n # convert indexing from MATLAB to python\n X, Y = X - mat_pyt_idx_offset, Y - mat_pyt_idx_offset\n # adding eps to force round(0.5)=1, as in matlab\n X, Y = X + eps, Y + eps\n\n mid_idx = round(X.shape[1]/2) - mat_pyt_idx_offset\n\n # load video\n raw_video = np.fromfile(recFolder + 'video_raw', dtype=np.uint16, count=-1)\n video = raw_video.reshape(frame_width,frame_height,-1,order='F').transpose([1,0,2])\n n_frames = video.shape[2]\n\n # initialize contrast object\n contrast_calc = contrast_calculator(block_size, patch_center_size, patch_perif_width)\n\n # iterate through frames\n spotStruct_dict = {'X': [], 'Y': [], 'Z': [], 'Xw': [], 'Yw': []}\n for frame_id in range(n_frames):#range(n_frames):\n frame = video[:, :, frame_id]\n if frame_id%10 == 0:\n print(frame_id,'/',str(n_frames))\n n_spots = X.shape[0]\n X_out, Y_out, Z_out = np.zeros(n_spots), np.zeros(n_spots), np.zeros(n_spots)\n Xw_out, Yw_out = np.zeros(n_spots), np.zeros(n_spots)\n for spot_id in range(n_spots):\n\n # generate gaussians\n offsets = np.array([X[spot_id] - np.round(X[spot_id]), Y[spot_id] - np.round(Y[spot_id])])\n gaussians = gaussian_gen(sigma, offsets, block_size)\n\n # calculate contrasts on spot patches\n frame_patches = frame_blocks_gen(frame,X[spot_id][:],Y[spot_id][:],block_size).transpose([1,2,0])\n contrast_avg, contrast_max = contrast_calc.getContrasts(frame_patches)\n\n # calculate correlations on spot patches\n g_std = np.std(gaussians, axis=(0,1), ddof=1) + eps\n g_avg = np.mean(gaussians, axis=(0, 1))\n f_patches_std = np.std(frame_patches, axis=(0, 1), ddof=1) + eps\n f_patches_avg = np.mean(frame_patches, axis=(0, 1))\n # TODO: handle cases of zeros in denominator\n corr_elements = (f_patches_std > 0) * (g_std>0) * (frame_patches - f_patches_avg) / f_patches_std * (gaussians - g_avg) / g_std\n corr = 1/(block_size**2 - 1)*corr_elements.sum(axis=(0,1))\n\n # print(spot_id)\n Zindex = calc_depth(corr, contrast_avg, contrast_max, contrast_avg_thresh, contrast_max_thresh)\n if Zindex > -1:\n X_out[spot_id] = X[spot_id][Zindex]\n Y_out[spot_id] = Y[spot_id][Zindex]\n Z_out[spot_id] = Z[Zindex]\n Xw_out[spot_id] = Xw[spot_id][Zindex]\n Yw_out[spot_id] = Yw[spot_id][Zindex]\n else:\n X_out[spot_id] = X[spot_id][mid_idx]\n Y_out[spot_id] = Y[spot_id][mid_idx]\n Z_out[spot_id] = 0\n Xw_out[spot_id] = Xw[spot_id][mid_idx]\n Yw_out[spot_id] = Yw[spot_id][mid_idx]\n \n # feed dictionary\n spotStruct_dict['X'].append(X_out.tolist())\n spotStruct_dict['Y'].append(Y_out.tolist())\n spotStruct_dict['Z'].append(Z_out.tolist())\n spotStruct_dict['Xw'].append(X_out.tolist())\n spotStruct_dict['Yw'].append(Y_out.tolist())\n\n # adjust dictionary for MATLAB\n spotStruct_dict['X'] = np.array(spotStruct_dict['X']).transpose() + mat_pyt_idx_offset\n spotStruct_dict['Y'] = np.array(spotStruct_dict['Y']).transpose() + mat_pyt_idx_offset\n spotStruct_dict['Z'] = np.array(spotStruct_dict['Z']).transpose()\n spotStruct_dict['Xw'] = np.array(spotStruct_dict['Xw']).transpose()\n spotStruct_dict['Yw'] = np.array(spotStruct_dict['Yw']).transpose()\n\n # save in mat file\n savemat(recFolder + mat_filename, {'spotStruct': spotStruct_dict})\n\n\nif __name__ == \"__main__\":\n recFolder = 'Z:\\\\R&D\\\\DB\\\\DB_Recs\\\\Rec_2017_10_15_15_42_14\\\\'\n sensorFolder = recFolder + 'Sensor\\\\'\n mat_filename = '_depth6.mat'\n main([recFolder, sensorFolder, mat_filename])\n#\n# spotStruct_dict[led][xyz2name['Y']].append(Y_out)\n# spotStruct_dict[led][xyz2name['Z']].append(Z_out)\n#\n# spotStruct(led).x = [spotStruct(led).x xPoint1];\n#\n# corr = 1/(block_size**2 - 1)*(frame_patches-f_patches_avg)\n#\n# sum(sum(((BinFrames - RepMeanBinFrames_resh). / RepSTDaMBiF_resh). * ...\n# (Gaussian - RepMeanGaussian_resh). / RepSTDaMG_resh));\n# savemat('_depth5c.mat', {'spotStruct2': fromarrays(spotStruct_dict, names=['X', 'Y', 'Z'])})\n# # spotStruct = fromarrays(np.array(spotStruct_dict).transpose(1,0,2,3), names=['X', 'Y', 'Z'])\n# # dummy_struct = [[[[1,1,1],[1,1,1]],2,3,4],[1,2,3,4],[1,2,3,4]]\n# dummy_struct = [[[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1]],[[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1]],[1,2,3,4,5]]\n# dummy_struct2 = list(np.array(spotStruct_dict).transpose(1,0,2,3))\n# # a=np.array(spotStruct_dict).transpose(1,0,2,3).tolist()\n# spotStruct = fromarrays(dummy_struct2, names=['X', 'Y', 'Z'])\n# # spotStruct = fromarrays([[X[0], Y[0], Z],[X[1], Y[1], Z]], names=['X', 'Y', 'Z', 'Depth'])\n# savemat('_depth5.mat', {'spotStruct2': spotStruct})\n#\n#\n","sub_path":"getDepthMap.py","file_name":"getDepthMap.py","file_ext":"py","file_size_in_byte":9348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"166516733","text":"import os\nimport mimetypes\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom .models import Document, Category\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseForbidden\nfrom django.contrib.sites.shortcuts import get_current_site\n\n\ndef index(request):\n context = {\n 'categories': Category.objects.all(),\n }\n return render(request, 'documents/index.html', context)\n\n\ndef check_category_permissions(request, category_id):\n category = Category.objects.get(id=category_id)\n if not category.public and not request.user.has_perm('documents.change_category'):\n raise Http404\n else:\n context = {\n 'documents': Document.objects.filter(category_id=category_id),\n 'category': Category.objects.get(id=category_id),\n }\n return render(request, 'documents/category.html', context)\n\n\ndef get_category_qr(request, category_id):\n category = Category.objects.get(id=category_id)\n current_site = str(get_current_site(request))\n host = str(current_site + '/documents/category/')\n image = category.get_qr(host)\n response = HttpResponse(content_type='image/png')\n image.save(response, \"PNG\")\n return response\n\n\ndef get_document_qr(request, document_id):\n document = Document.objects.get(id=document_id)\n current_site = str(get_current_site(request)) + '/'\n image = document.get_qr(current_site)\n print(request.get_host())\n response = HttpResponse(content_type='image/png')\n image.save(response, \"PNG\")\n return response\n\n\ndef protected_view(request, path):\n access_granted = False\n\n document = Document.objects.get(file=path)\n if document.category.public:\n access_granted = True\n else:\n user = request.user\n if user.is_authenticated:\n if user.is_staff:\n access_granted = True\n else:\n if user.has_perm('documents.change_category'):\n access_granted = True\n\n if access_granted:\n response = HttpResponse()\n del response['Content-Type']\n response[\"X-Accel-Redirect\"] = os.path.join(settings.PROTECTED_MEDIA_LOCATION_PREFIX, path).encode('utf-8')\n print(response[\"X-Accel-Redirect\"])\n # response['X-Accel-Redirect'] = os.path.join('/internal/' + path).encode('utf-8')\n return response\n else:\n return HttpResponseForbidden('Not authorized to access this media.')\n","sub_path":"documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"460778315","text":"from LightningStuff import Atingidos\nfrom LightningStuff import Raios\nfrom RainStuff import Radar\nimport pickle\nimport numpy as np\nimport geojson\n\n'''\nPor enquanto ta bonitinho tudo junto, todavia\nvai ser necessario separar em funções ou sei la, pq cada tipo de dado\ntem um tempo diferente de atualização, então é bom otimizar\n'''\n\nstarnet = 'http://www.zeus.iag.usp.br/zeus_google_v3/zeus_0-15.kml'\nlinet = 'http://www.zeus.iag.usp.br/linet/linet_0-15.kml'\nlocal = './debug_data/linet_0-15.kml'\ngit_redemet = 'https://raw.githubusercontent.com/lucascantos/WebLightning/master/rawdata/radar_img/last_redemet.png'\n\nwith open('./static/MUSP.geojson', encoding=\"utf8\") as f:\n raw_data = geojson.load(f)\n\ndebug = 'n'\nif debug == 'y':\n ''' with open('./static/states.geojson', encoding=\"utf8\") as f:\n brasil_shape = geojson.load(f)\n raw_data = brasil_shape\n '''\n raios_url = local\nelse:\n raios_url = starnet\n\n\ndef lightning():\n # Raios\n lightnings = Raios(raios_url).crawler()\n # pickle.dump(lightnings, open('output/raios.p', 'wb'))\n return lightnings\n\n# def rain():\n# # Chuvas\n# Radar.ipmet_radar()\n# chuva = Radar(radar='ipmet')\n# chuva.filter_range('static/radar_img/last_ipmet.png')\n# return chuva\n\ndef hit_city(redec=False):\n # Cidades Atingidas\n raios = Atingidos(raios_url, raw_data)\n raios.array = np.array(raios.crawler(array=True), dtype=float)\n raios.hitcheck(trimmed=True)\n cidades_atingidas = raios.struckCities()\n if not redec:\n return cidades_atingidas\n # pickle.dump(list(cidades_atingidas), open('output/atingidos.p', 'wb'))\n\n # Ordenação por REDEC\n struck_generator = cidades_atingidas\n struck_cidades = [x['properties']['NM_MUNICIP'] for x in struck_generator]\n redecs = raios.agrupaCidades('static/redecSP.json', struck_cidades)\n return redecs\n # pickle.dump(list(redecs), open('output/redecs.p', 'wb'))\n\n\n# x = hit_city()\n# print ('alsdkj')\n# print(x)","sub_path":"constructors.py","file_name":"constructors.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"260889203","text":"import xlrd\nimport pymysql\n\ndef getConn(database='SuperstoreSampleDB'):\n args = dict(\n host='localhost',\n user='root',\n passwd='time4@FUN',\n db=database,\n charset='utf8'\n )\n conn = pymysql.connect(**args)\n return conn\n\n\n# define function for field nomalization\ndef normalizingDBFields(fieldList):\n for i in range(len(fieldList)):\n fieldList[i] = fieldList[i].replace(' ', '_').replace('-', '_').lower()\n \n\ndef excel2mysql(excelName,database='SuperstoreSampleDB',table='test2'):\n #下面代码作用:获取到excel中的字段和数据\n excel = xlrd.open_workbook(excelName)\n sheet = excel.sheet_by_index(0)\n row_number = sheet.nrows\n column_number = sheet.ncols\n\n # prepare field list\n field_list = sheet.row_values(0)\n normalizingDBFields(field_list)\n\n data_list = []\n for i in range(1,row_number):\n data_list.append(sheet.row_values(i))\n\n #下面代码作用:根据字段创建表,根据数据执行插入语句\n conn = getConn(database)\n cursor = conn.cursor()\n drop_sql = \"drop table if exists {}\".format(table)\n cursor.execute(drop_sql)\n create_sql = \"create table {}(\".format(table)\n\n for field in field_list[:-1]:\n create_sql += \"{} varchar(200),\".format(field.replace(' ', '_'))\n create_sql += \"{} varchar(200))\".format(field_list[-1].replace(' ', '_'))\n print('='*100)\n print(create_sql)\n print('='*100)\n cursor.execute(create_sql)\n\n for data in data_list:\n new_data = [\"'{}'\".format(pymysql.escape_string(str(i))) for i in data]\n insert_sql = \"insert into {} values({})\".format(\\\n table,','.join(new_data))\n print('*'*100)\n print(insert_sql)\n print('*'*100)\n cursor.execute(insert_sql)\n\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n excel2mysql(\"./excel2mysql/test1.xls\")","sub_path":"other-sdk-excel2mysql/excel2mysql.py","file_name":"excel2mysql.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"141486162","text":"# 13 March 2018 Miroslav Gasparek\n# Python bootcamp, lesson 34: Seaborn and data display\n\n# Import modules\nimport numpy as np\nimport pandas as pd\n\n# This is how we import the module of Matplotlib we'll be using\nimport matplotlib.pyplot as plt\n\n# Some pretty Seaborn settings\nimport seaborn as sns\nrc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}\nsns.set(rc=rc)\n##############################\n# Close all open figures\nplt.close('all')\n\n# Load the data of the frog\ndf = pd.read_csv('data/frog_tongue_adhesion.csv', comment='#')\n# Rename impact force column\ndf = df.rename(columns={'impact force (mN)': 'impf'})\n\n# Mean impact force of frog I\nnp.mean(df.loc[df['ID']=='I', 'impf'])\n\n# Calculate the means and SEMs of all four frogs\n# For loop for mean and standard error of the mean\nmean_impf = np.empty(4)\nsem_impf = np.empty(4)\nfor i, frog in enumerate(['I', 'II', 'III', 'IV']):\n mean_impf[i] = np.mean(df.loc[df['ID']==frog, 'impf'])\n n = np.sum(df['ID']=='I')\n sem_impf = np.std(df.loc[df['ID']==frog, 'impf']) / np.sqrt(n)\n\nprint(mean_impf)\nprint(sem_impf)\n\n#####\n# More advanced: calculate by groupby() and mean() and sem()\n# gb_frog = dg.groupby('ID')\n# mean_impf = gb_frog['impf'].mean()\n# sem_impf = gb_frog['impf'].sem()\n####\n\n# Make a bar graph\nfig1 = plt.figure(1)\nplt.bar(np.arange(4), mean_impf, yerr=sem_impf, ecolor='black',\n tick_label=['I', 'II', 'III', 'IV'], align='center')\nplt.ylabel('impact force (nM)')\nfig1.show()\n\n# Easier plot with Seaborn\nfig2 = plt.figure(2)\nsns.barplot(data=df, x='ID', y='impf')\nplt.xlabel('')\nplt.ylabel('impact force (mN)')\nfig2.show()\n\n###\n# Message: do not make bar graphs.\n###\n\n# Bee swarm plot\nfig3 = plt.figure(3)\nsns.swarmplot(data=df, x='ID', y='impf')\nplt.margins(0.02)\nplt.xlabel('')\nplt.ylabel('impact force (mN)')\nfig3.show()\n\n# Bee swarm plot with the date of measurement\nfig4 = plt.figure(4)\nax = sns.swarmplot(data=df, x = 'ID', y = 'impf', hue='date')\nax.legend_.remove() # ???\nplt.margins(0.02)\nplt.xlabel('')\nplt.ylabel('impact force (mN)')\nfig4.show()\n\n# When too many data points for bee swarm plot, use box plot\nfig5 = plt.figure(5)\nsns.boxplot(data=df, x='ID', y='impf')\nplt.margins(0.02)\nplt.xlabel('frog ID')\nplt.ylabel('impact force (mN)')\nfig5.show()\n","sub_path":"seaborn_data_disp.py","file_name":"seaborn_data_disp.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"293320567","text":"import json\nimport requests\ndef check_rates(request_postal_code,reciepent_postal_code,weight):\n try:\n auth = 'grant_type=client_credentials&client_id=l751be1ed19c9b466c83c17565665323d4&client_secret=3746874d2d894e65a31d1d9475772db3'\n payload = {\n \"rateRequestControlParameters\": {\n \"returnTransitTimes\": True\n },\n \"requestedShipment\": {\n \"shipper\": {\n \"address\": {\n \"postalCode\": request_postal_code,\n \"countryCode\": \"US\"\n }\n },\n \"recipient\": {\n \"address\": {\n \"postalCode\": reciepent_postal_code,\n \"countryCode\": \"US\"\n }\n },\n \"pickupType\": \"DROPOFF_AT_FEDEX_LOCATION\",\n \"shippingChargesPayment\": {\n \"paymentType\": \"SENDER\",\n \"payor\": {\n \"responsibleParty\": {\n \"accountNumber\": {\n \"value\": \"740561073\"\n }\n }\n }\n },\n \"rateRequestType\": [\n \"ACCOUNT\",\n \"LIST\"\n ],\n \"requestedPackageLineItems\": [\n {\n \"weight\": {\n \"units\": \"LB\",\n \"value\": weight\n }\n }\n ]\n }\n }\n\n url_auth = \"https://apis-sandbox.fedex.com/oauth/token\"\n\n headers_auth = {\n 'Content-Type': \"application/x-www-form-urlencoded\"\n }\n\n token = requests.request(\"POST\", url_auth, data=auth, headers=headers_auth)\n\n token_text = token.text\n token_text=json.loads(token_text)\n\n\n payload = json.dumps(payload)\n\n\n url = \"https://apis-sandbox.fedex.com/rate/v1/rates/quotes\"\n\n # payload = input # 'input' refers to JSON Payload\n headers = {\n 'Content-Type': \"application/json\",\n 'X-locale': \"en_US\",\n 'Authorization': \"Bearer \"+ token_text['access_token']\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n response_text = response.text\n response_text=json.loads(response_text)\n message =response_text['output']['rateReplyDetails'][0]['ratedShipmentDetails'][0]['totalNetCharge']\n return message\n except Exception as e:\n message = response_text['errors'][0]['message']\n return message","sub_path":"hackathon/rates.py","file_name":"rates.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413658744","text":"#!/usr/bin/env python3\r\n# Author: Armit\r\n# Create Time: 2019/10/05 \r\n\r\n# minimal generator set - brute force\r\n\r\nimport time\r\nimport numpy as np\r\n\r\nNUMBERS = np.array([ # '0' ~ '9'\r\n 0x3F, 0x06, 0x5B, 0x4F, 0x66, \r\n 0x6D, 0x7D, 0x07, 0x7F, 0x6F,\r\n], dtype=np.uint8)\r\nNOT = lambda x, y: ~x\r\nAND = lambda x, y: x & y\r\nOR = lambda x, y: x | y\r\nXOR = lambda x, y: x ^ y\r\nOPS = [ NOT, AND, OR, XOR ]\r\n\r\nto_bin = lambda x, w=8: bin(x)[2:].rjust(w, '0')\r\n\r\ndef timer(fn):\r\n def wrapper(*args, **kwargs):\r\n s = time.time()\r\n ret = fn(*args, **kwargs)\r\n t = time.time()\r\n print('[Timer] %.4fs' % (t - s))\r\n return ret\r\n return wrapper\r\n\r\n@timer\r\ndef MGS_bf(N=NUMBERS, OP=OPS):\r\n target, solutions = set(N), [ ]\r\n nlen = len(N)\r\n for sel in range(2**nlen):\r\n sel_bin = to_bin(sel, nlen)\r\n partial = { N[i] for i in range(len(sel_bin)) if sel_bin[i] == '1' }\r\n partial_orig = partial.copy() # save orig for ans\r\n \r\n size = 0\r\n while size != len(partial): # search result extended\r\n size = len(partial)\r\n new_partial = { op(x, y) for op in OP for x in partial for y in partial }\r\n partial.update(new_partial)\r\n \r\n if partial.issuperset(target):\r\n solutions.append(partial_orig)\r\n break\r\n\r\n print('>> %d solutions found in total' % len(solutions))\r\n groups = { }\r\n for sol in solutions:\r\n sz = len(sol)\r\n if sz in groups: groups[sz].append(sol)\r\n else: groups[sz] = [ sol ]\r\n\r\n min_sols = groups.get(sorted(groups)[0]) # just need the shortest groups\r\n return [sorted([np.where(NUMBERS == g)[0][0] for g in sol]) for sol in min_sols]\r\n \r\nif __name__ == '__main__':\r\n print('over {OR, AND, XOR, NOT}')\r\n for sol in MGS_bf():\r\n print(sol)\r\n print()\r\n\r\n print('over {OR, AND}')\r\n for sol in MGS_bf(NUMBERS, [OR, AND]):\r\n print(sol)\r\n print()\r\n \r\n print('over {OR, NOT}')\r\n for sol in MGS_bf(NUMBERS, [OR, NOT]):\r\n print(sol)\r\n print()\r\n \r\n print('over {AND, NOT}')\r\n for sol in MGS_bf(NUMBERS, [AND, NOT]):\r\n print(sol)\r\n print()\r\n","sub_path":"source/downloads/src/MGS_bf.py","file_name":"MGS_bf.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"320815932","text":"#----------------------- Initialisation du script -----------------------\r\n# TensorFlow and tf.keras\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\n# Helper libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nprint(tf.__version__)\r\n\r\n#----------------------- Debut de l'apprentissage ------------------------------\r\n\r\n#Import de la base de donnée FashionMNIST\r\nfashion_mnist = keras.datasets.fashion_mnist\r\n\r\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\r\n\r\n#Definition des classes\r\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\r\n#Donne la dimension d'une array\r\n#train_images.shape\r\n\r\n#Donne le nb d'elements dans une array\r\n#len(train_labels)\r\n\r\n#train_labels\r\n\r\n#test_images.shape\r\n\r\n#len(test_labels)\r\n\r\n#Permet d'afficher une image avec un code couleur différent en fonction de la valeur de chaque pixel\r\n\r\n#plt.figure()\r\n#plt.imshow(train_images[0])\r\n#plt.colorbar()\r\n#plt.grid(False)\r\n#plt.plot()\r\n\r\n\r\n#On modifie les valeurs pour qu'elles se situent entre 0 et 1\r\n#Au lieu d'etre entre 0 et 255\r\ntrain_images = train_images / 255.0\r\n\r\ntest_images = test_images / 255.0\r\n\r\n#Affiche le set de training, sous la forme d'images avec leur classe indiqué en dessous\r\n\r\n#plt.figure(figsize=(10,10))\r\n#for i in range(25):\r\n# plt.subplot(5,5,i+1)\r\n# plt.xticks([])\r\n# plt.yticks([])\r\n# plt.grid(False)\r\n# plt.imshow(train_images[i], cmap=plt.cm.binary)\r\n# plt.xlabel(class_names[train_labels[i]])\r\n\r\n\r\n#Ajout des layers\r\n#Flatten() => Transforme les array(28,28) en un vecteur(784)\r\n#Dense(n,type) => Layer connectés avec n= nombre de noeuds\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(28, 28)),\r\n keras.layers.Dense(128, activation=tf.nn.relu),\r\n keras.layers.Dense(10, activation=tf.nn.softmax)\r\n])\r\n\r\n\r\n#Compilation du modèle\r\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n\r\n#Debut de l'apprentissage\r\n#epochs = nombre de passages (On atteint 95% de positifs aux alentours de 30 epochs)\r\nmodel.fit(train_images, train_labels, epochs=25)\r\n\r\n#On regarde les performances sur le groupe de test\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\r\n\r\nprint('Test accuracy:', test_acc)\r\n\r\n#---------------------------------- Fin de l'apprentissage\r\n\r\n#---------------------------------- Predictions\r\n\r\n#On stoke les predictions dans une array\r\npredictions = model.predict(test_images)\r\n\r\n#Correspond aux predictions de la premiere image\r\n#Contient une array de 10 elements, chacun donnant la valeur prédit pour la classe associée\r\n#predictions[0]\r\n\r\n#Affiche la prediction la plus haute, et donc la classe qui est \"validé\" par le reseau\r\n#np.argmax(predictions[0])\r\n\r\n#Donne l'id de la classe pour l'element 0, ce qui permet de verifier que la prediction est juste\r\n#test_labels[0]\r\n\r\n\r\n#def plot_image(i, predictions_array, true_label, img):\r\n# predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\r\n# plt.grid(False)\r\n# plt.xticks([])\r\n# plt.yticks([])\r\n# \r\n# plt.imshow(img, cmap=plt.cm.binary)\r\n#\r\n# predicted_label = np.argmax(predictions_array)\r\n# if predicted_label == true_label:\r\n# color = 'blue'\r\n# else:\r\n# color = 'red'\r\n# \r\n# plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\r\n# 100*np.max(predictions_array),\r\n# class_names[true_label]),\r\n# color=color)\r\n\r\n#def plot_value_array(i, predictions_array, true_label):\r\n# predictions_array, true_label = predictions_array[i], true_label[i]\r\n# plt.grid(False)\r\n# plt.xticks([])\r\n# plt.yticks([])\r\n# thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\r\n# plt.ylim([0, 1]) \r\n# predicted_label = np.argmax(predictions_array)\r\n# \r\n# thisplot[predicted_label].set_color('red')\r\n# thisplot[true_label].set_color('blue')\r\n \r\n#i = 0\r\n#plt.figure(figsize=(6,3))\r\n#plt.subplot(1,2,1)\r\n#plot_image(i, predictions, test_labels, test_images)\r\n#plt.subplot(1,2,2)\r\n#plot_value_array(i, predictions, test_labels)\r\n\r\n#i = 12\r\n#plt.figure(figsize=(6,3))\r\n#plt.subplot(1,2,1)\r\n#plot_image(i, predictions, test_labels, test_images)\r\n#plt.subplot(1,2,2)\r\n#plot_value_array(i, predictions, test_labels)\r\n\r\n# Plot the first X test images, their predicted label, and the true label\r\n# Color correct predictions in blue, incorrect predictions in red\r\n#num_rows = 5\r\n#num_cols = 3\r\n#num_images = num_rows*num_cols\r\n#plt.figure(figsize=(2*2*num_cols, 2*num_rows))\r\n#for i in range(num_images):\r\n# plt.subplot(num_rows, 2*num_cols, 2*i+1)\r\n# plot_image(i, predictions, test_labels, test_images)\r\n# plt.subplot(num_rows, 2*num_cols, 2*i+2)\r\n# plot_value_array(i, predictions, test_labels)\r\n \r\n# Prend une image du groupe de test pour effectuer une prediction\r\nimg = test_images[0]\r\n\r\n#print(img.shape)\r\n\r\n# Ajout de l'image au groupe d'image dont on veut prédire les classes\r\nimg = (np.expand_dims(img,0))\r\n\r\nprint(img.shape)\r\n\r\n#Prediction de la classe de l'image\r\npredictions_single = model.predict(img)\r\n\r\n#print(predictions_single)\r\n\r\n#plot_value_array(0, predictions_single, test_labels)\r\n#_ = plt.xticks(range(10), class_names, rotation=45)\r\n\r\n#Donne la classe qui est la plus \"probable\" pour l'image 0\r\n#np.argmax(predictions_single[0])","sub_path":"Reseau_Apprentissage/TensorFlow_Exemple.py","file_name":"TensorFlow_Exemple.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"444043245","text":"\"\"\"\nFunctions for creating XML output.\n\"\"\"\nimport logging\nfrom typing import Optional, Type\n\nfrom flask_saml2.signing import Digester, Signer, get_signature_xml\nfrom flask_saml2.types import X509\n\nfrom .xml_templates import ResponseTemplate, XmlTemplate\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_in_response_to(params):\n \"\"\"Insert InResponseTo if we have a RequestID.\"\"\"\n request_id = params.get('REQUEST_ID', None)\n if request_id:\n return {\n 'IN_RESPONSE_TO': request_id,\n **params,\n }\n else:\n return params\n\n\ndef get_assertion_xml(\n template_klass: Type[XmlTemplate],\n parameters: dict,\n *,\n digester: Optional[Digester] = None,\n signer: Optional[Signer] = None,\n certificate: Optional[X509] = None,\n) -> XmlTemplate:\n params = _get_in_response_to(parameters)\n\n assertion = template_klass(params)\n if signer is None:\n return assertion\n\n # Sign it.\n assertion.add_signature(get_signature_xml(\n certificate, digester, signer,\n assertion.get_xml_string(), params['ASSERTION_ID']))\n\n return assertion\n\n\ndef get_response_xml(\n parameters: dict,\n assertion: XmlTemplate,\n *,\n digester: Optional[Digester] = None,\n signer: Optional[Signer] = None,\n certificate: Optional[X509] = None,\n) -> XmlTemplate:\n \"\"\"Returns XML for response, with signatures if a signer is supplied.\"\"\"\n params = _get_in_response_to(parameters)\n\n response = ResponseTemplate(params, assertion.xml)\n\n if signer is None:\n return response\n\n # Sign it.\n response.add_signature(get_signature_xml(\n certificate, digester, signer,\n response.get_xml_string(), params['RESPONSE_ID']))\n\n return response\n","sub_path":"flask_saml2/idp/xml_render.py","file_name":"xml_render.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}