diff --git "a/6396.jsonl" "b/6396.jsonl" new file mode 100644--- /dev/null +++ "b/6396.jsonl" @@ -0,0 +1,715 @@ +{"seq_id":"379575534","text":"# -*- coding:utf-8 -*-\n\nfrom connexion_bd import connexion_bd\nfrom connexion_bd import deconnexion_bd\n\n\nclass Profil(object):\n def __init__(self):\n self.id = 0\n self.nom = \"\"\n self.prenom = \"\"\n self.courriel = \"\"\n self.mdp = \"\"\n self.date_creation = \"\"\n self.confirme = False\n\n\nclass GestionnaireProfil(object):\n def __init__(self, config):\n self.config = config\n\n\n def cree_profil(self, profil):\n connexion = connexion_bd(self.config)\n curseur = connexion.cursor()\n\n commande = \"\"\"\n INSERT INTO\n profils (nom, prenom, courriel, mdp)\n VALUES\n (%(nom)s, %(prenom)s, %(courriel)s, %(mdp)s)\n ;\n SELECT LAST_INSERT_ID();\n \"\"\"\n\n for execution in curseur.execute(commande, vars(profil), multi=True):\n pass\n\n resultat = curseur.fetchone()\n\n deconnexion_bd(connexion, True)\n\n return resultat[0]\n\n\n def cherche_profil(self, courriel):\n connexion = connexion_bd(self.config)\n curseur = connexion.cursor()\n\n commande = \"\"\"\n SELECT\n nom, prenom, id\n FROM\n profils\n WHERE\n courriel=%(courriel)s\n ;\n \"\"\"\n\n curseur.execute(commande, {'courriel':courriel})\n\n resultat = curseur.fetchone()\n\n deconnexion_bd(connexion)\n\n if resultat is None:\n return None\n\n profil = Profil()\n profil.nom = resultat[0]\n profil.prenom = resultat[1]\n profil.id = resultat[2]\n\n return profil\n\n\n def supprime_profil(self, profil):\n connexion = connexion_bd(self.config)\n curseur = connexion.cursor()\n\n commande = \"\"\"\n DELETE FROM\n profils\n WHERE\n id=%(id)s\n ;\n \"\"\"\n\n curseur.execute(commande, vars(profil))\n\n deconnexion_bd(connexion, True)\n","sub_path":"etincelle/corps/profil.py","file_name":"profil.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"557420454","text":"#!/usr/bin/python\n\"\"\" Setup.py for Cog\n http://nedbatchelder.com/code/cog\n\n Copyright 2004-2016, Ned Batchelder.\n\"\"\"\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nextra_options = {}\ntry:\n # For building on Windows, need to fix the tar file after it's made.\n # Install https://bitbucket.org/ned/fixtar, then this will work.\n from setuptools_fixtar import fixtar\nexcept ImportError:\n pass\nelse:\n extra_options['cmdclass'] = {\n 'fixtar': fixtar.FixtarCommand,\n }\n\nsetup(\n name = 'cogapp', # Because there's already a Cog in pypi! :(\n version = '2.5.1',\n url = 'http://nedbatchelder.com/code/cog',\n author = 'Ned Batchelder',\n author_email = 'ned@nedbatchelder.com',\n description =\n 'Cog: A code generator for executing Python snippets in source files.',\n\n long_description = '''\\\n Docs at `http://nedbatchelder.com/code/cog `_.\n\n Code repository and issue tracker are at\n `bitbucket.org `_.\n ''',\n\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Code Generators\",\n ],\n\n license = 'MIT',\n\n packages = [\n 'cogapp',\n ],\n\n scripts = [\n 'scripts/cog.py',\n ],\n\n **extra_options\n )\n","sub_path":"bin/lib/cogapp-2.5.1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"132821741","text":"import numpy as np\nfrom random import *\na=np.array([[randint(10,30) for i in range(10)],[randint(10,30) for i in range(10)]])\n#print(a,type(a))\n\nw=np.zeros(((3,3)))\n#print(w)\n\nr=np.eye(6)\n#print(r)\ns=np.empty((3,3))\n#print(s)\n\nq=np.arange(10,200,1)\n#print(q)\n\nn=np.linspace(1,20,300)\nprint(n)","sub_path":"test (2).py","file_name":"test (2).py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"41380846","text":"# -*- coding: utf-8 -*-\n# Copyright 2016 Mobicage NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.1@@\nimport httplib\nimport logging\nimport urllib\n\nfrom google.appengine.ext import ndb\nfrom mcfw.exceptions import HttpBadRequestException, HttpException, HttpForbiddenException\nfrom plugin_loader import get_config\nfrom plugins.its_you_online_auth.libs.itsyouonline import Client\nfrom plugins.its_you_online_auth.models import OauthLoginState, Profile\nfrom plugins.its_you_online_auth.plugin_consts import Scopes, OAUTH_BASE_URL, NAMESPACE\nfrom plugins.its_you_online_auth.plugin_utils import get_sub_organization\nimport requests\n\n\ndef get_access_response(config, login_state, code):\n params = {\n 'client_id': config.root_organization.name,\n 'client_secret': config.root_organization.web.client_secret,\n 'code': code,\n 'redirect_uri': config.root_organization.web.redirect_uri,\n 'state': login_state.state\n }\n access_token_url = '%s/access_token?%s' % (OAUTH_BASE_URL, urllib.urlencode(params))\n response = requests.post(access_token_url, params)\n\n content = response.json() if response.status_code == httplib.OK else response.content\n logging.debug('access_response: code %d, content %s', response.status_code, content)\n return response.status_code, content\n\n\ndef has_access_to_organization(client, organization_id, username):\n r = client.api.organizations.GetOrganizationUsers(organization_id).json()\n for u in r.get('users', []):\n if u['username'] == username:\n return True\n return False\n\n\ndef get_user_scopes(code, state):\n \"\"\"\n Args:\n code (unicode)\n state (unicode)\n \"\"\"\n if not (code or state):\n logging.debug('Code or state are missing.\\nCode: %s\\nState:%s', code, state)\n raise HttpBadRequestException()\n\n login_state = OauthLoginState.create_key(state).get()\n if not login_state:\n logging.debug('Login state not found')\n raise HttpBadRequestException()\n\n config = get_config(NAMESPACE)\n\n status_code, access_result = get_access_response(config, login_state, code)\n if status_code != httplib.OK:\n exception = HttpException()\n exception.http_code = status_code\n exception.error = access_result\n raise exception\n\n username = access_result['info']['username']\n scope = access_result.get('scope')\n\n if login_state.organization_id == config.root_organization.name:\n if login_state.source == \"app\":\n raise HttpForbiddenException()\n else:\n sub_org = login_state.organization_id\n else:\n sub_org = get_sub_organization(config, login_state.organization_id)\n\n expected_scope = 'user:memberof:%s' % sub_org\n if not scope or expected_scope not in scope:\n raise HttpForbiddenException()\n\n profile_key = Profile.create_key(login_state.source, username)\n profile = profile_key.get() or Profile(key=profile_key)\n profile.access_token = access_result.get('access_token')\n profile.organization_id = login_state.organization_id\n login_state.completed = True\n ndb.put_multi([profile, login_state])\n\n client = Client()\n client.oauth.LoginViaClientCredentials(config.root_organization.name, config.root_organization.web.client_secret)\n\n scopes = []\n uber_admin_organization = '%s.admins' % config.root_organization.name\n admin_organization = '%s.admins' % sub_org\n if has_access_to_organization(client, uber_admin_organization, username):\n scopes.append(Scopes.ADMIN)\n if has_access_to_organization(client, admin_organization, username):\n scopes.append(Scopes.get_organization_scope(Scopes.ORGANIZATION_ADMIN, login_state.organization_id))\n scopes.append(Scopes.get_organization_scope(Scopes.ORGANIZATION_MEMBER, login_state.organization_id))\n elif not has_access_to_organization(client, sub_org, username):\n raise HttpForbiddenException()\n return username, scopes\n","sub_path":"plugins/its_you_online_auth/bizz/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"373614037","text":"\nclass TimedCycle:\n def __init__(self, max_frame, ticks, movements, start_frame=0):\n self.current_tick = 0\n self.max_frame = max_frame\n self.frame = start_frame\n self.movements = movements\n self.max_ticks = ticks\n self.config = (max_frame, start_frame, movements, ticks)\n\n self.one = False\n\n def tick(self):\n self.current_tick += 1\n if self.current_tick > self.max_ticks[self.frame]:\n self.frame += 1\n self.current_tick = 0\n if self.frame >= self.max_frame:\n self.frame = 0\n self.one = True\n\n def get_movement(self):\n return self.movements[self.frame]\n\n def get_frame(self):\n return self.frame\n\n def reset(self):\n self.current_tick = 0\n self.max_frame, self.frame, self.movements, self.max_ticks = self.config\n self.one = False\n","sub_path":"server/server_cycles.py","file_name":"server_cycles.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"375054834","text":"import pytest\nimport math\n\ndef test_calc_rmsd():\n from pyxmolpp2.geometry import XYZ, AngleValue\n from pyxmolpp2.crystal import LatticeVectors, BestShiftFinder\n\n\n latticeVectors = LatticeVectors(XYZ(1,4,1),XYZ(5,1,1),XYZ(7,1,4))\n\n bestShiftFinder = BestShiftFinder(latticeVectors)\n\n latticeVectors.scale_by(0.5)\n bestShiftFinder.scale_lattice_by(0.5)\n\n ref = XYZ(0,0,0)\n var = latticeVectors.translate(ref,1,4,43)\n # print()\n # print(var.x,var.y,var.z)\n dr, shift = bestShiftFinder.find_best_shift(ref,var)\n # print(shift.x,shift.y,shift.z)\n var = var + shift\n\n assert var.x == pytest.approx(ref.x)\n assert var.y == pytest.approx(ref.y)\n assert var.z == pytest.approx(ref.z)\n assert dr == pytest.approx(0)\n\n\n\n\n","sub_path":"pytests/xmol/crystal/test_lattice_vectors.py","file_name":"test_lattice_vectors.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"168045363","text":"def pageCount(n, p):\r\n even=0\r\n if(n%2==0):\r\n even=1 \r\n if(p==1 or p==n):\r\n return('0')\r\n elif((p-1) < (n-p)):\r\n return(p//2)\r\n else:\r\n if (even==0):\r\n return((n-p)//2)\r\n else:\r\n if((n-p)%2 == 0):\r\n return((n-p)//2)\r\n else:\r\n return(((n-p)//2)+1)\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n p = int(input())\r\n\r\n result = pageCount(n, p)\r\n\r\n print(result)\r\n","sub_path":"Drawing Book.py","file_name":"Drawing Book.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"412562727","text":"from collections import OrderedDict\n\nfrom anytree import LevelOrderIter, findall\n\nfrom devito.ir.stree.tree import (ScheduleTree, NodeIteration, NodeConditional,\n NodeExprs, NodeSection, NodeHalo, insert)\nfrom devito.ir.support.space import IterationSpace\nfrom devito.mpi import HaloScheme, HaloSchemeException\nfrom devito.parameters import configuration\nfrom devito.tools import flatten\n\n__all__ = ['st_build']\n\n\ndef st_build(clusters):\n \"\"\"\n Create a :class:`ScheduleTree` from a :class:`ClusterGroup`.\n \"\"\"\n # ClusterGroup -> Schedule tree\n stree = st_schedule(clusters)\n\n # Add in section nodes\n stree = st_section(stree)\n\n # Add in halo update nodes\n stree = st_make_halo(stree)\n\n return stree\n\n\ndef st_schedule(clusters):\n \"\"\"\n Arrange an iterable of :class:`Cluster`s into a :class:`ScheduleTree`.\n \"\"\"\n stree = ScheduleTree()\n\n mapper = OrderedDict()\n for c in clusters:\n pointers = list(mapper)\n\n # Find out if any of the existing nodes can be reused\n index = 0\n root = stree\n for it0, it1 in zip(c.itintervals, pointers):\n if it0 != it1 or it0.dim in c.atomics:\n break\n root = mapper[it0]\n index += 1\n if it0.dim in c.guards:\n break\n\n # The reused sub-trees might acquire some new sub-iterators\n for i in pointers[:index]:\n mapper[i].ispace = IterationSpace.merge(mapper[i].ispace, c.ispace)\n # Later sub-trees, instead, will not be used anymore\n for i in pointers[index:]:\n mapper.pop(i)\n\n # Add in Iterations\n for i in c.itintervals[index:]:\n root = NodeIteration(c.ispace.project([i.dim]), root)\n mapper[i] = root\n\n # Add in Expressions\n NodeExprs(c.exprs, c.shape, c.ops, c.traffic, root)\n\n # Add in Conditionals\n for k, v in mapper.items():\n if k.dim in c.guards:\n node = NodeConditional(c.guards[k.dim])\n v.last.parent = node\n node.parent = v\n\n return stree\n\n\ndef st_make_halo(stree):\n \"\"\"\n Add :class:`NodeHalo` to a :class:`ScheduleTree`. A halo node describes\n what halo exchanges should take place before executing the sub-tree.\n \"\"\"\n if not configuration['mpi']:\n # TODO: This will be dropped as soon as stronger analysis will have\n # been implemented\n return stree\n\n processed = {}\n for n in LevelOrderIter(stree, stop=lambda i: i.parent in processed):\n if not n.is_Iteration:\n continue\n exprs = flatten(i.exprs for i in findall(n, lambda i: i.is_Exprs))\n try:\n halo_scheme = HaloScheme(exprs)\n if n.dim in halo_scheme.dmapper:\n processed[n] = NodeHalo(halo_scheme)\n except HaloSchemeException:\n # We should get here only when trying to compute a halo\n # scheme for a group of expressions that belong to different\n # iteration spaces. We expect proper halo schemes to be built\n # as the `stree` visit proceeds.\n # TODO: However, at the end, we should check that a halo scheme,\n # possibly even a \"void\" one, has been built for *all* of the\n # expressions, and error out otherwise.\n continue\n except RuntimeError as e:\n if configuration['mpi'] is True:\n raise RuntimeError(str(e))\n\n for k, v in processed.items():\n insert(v, k.parent, [k])\n\n return stree\n\n\ndef st_section(stree):\n \"\"\"\n Add :class:`NodeSection` to a :class:`ScheduleTree`. A section defines a\n sub-tree with the following properties: ::\n\n * The root is a node of type :class:`NodeSection`;\n * The immediate children of the root are nodes of type :class:`NodeIteration`\n and have same parent.\n * The :class:`Dimension` of the immediate children are either: ::\n * identical, OR\n * different, but all of type :class:`SubDimension`;\n * The :class:`Dimension` of the immediate children cannot be a\n :class:`TimeDimension`.\n \"\"\"\n\n class Section(object):\n def __init__(self, node):\n self.parent = node.parent\n self.dim = node.dim\n self.nodes = [node]\n\n def is_compatible(self, node):\n return (self.parent == node.parent\n and (self.dim == node.dim or node.dim.is_Sub))\n\n # Search candidate sections\n sections = []\n for i in range(stree.height):\n # Find all sections at depth `i`\n section = None\n for n in findall(stree, filter_=lambda n: n.depth == i):\n if any(p in flatten(s.nodes for s in sections) for p in n.ancestors):\n # Already within a section\n continue\n elif not n.is_Iteration or n.dim.is_Time:\n section = None\n elif section is None or not section.is_compatible(n):\n section = Section(n)\n sections.append(section)\n else:\n section.nodes.append(n)\n\n # Transform the schedule tree by adding in sections\n for i in sections:\n insert(NodeSection(), i.parent, i.nodes)\n\n return stree\n","sub_path":"devito/ir/stree/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"461107329","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n# author zhangchao 2016-06-15\r\n\r\n\r\n\r\nimport datetime\r\nfrom IPRedisService import IPRedisService\r\nfrom omen.lib.elasticsearchLib import elasticsearchLib\r\nfrom omen.db.SuspensionIpModel import SuspensionIpModel\r\nimport logging\r\nlogging.basicConfig(level = logging.INFO)\r\nclass IPAnalysiseService:\r\n\r\n def __init__(self,redis_config={},hash_key = 'suspension:ip'):\r\n self.redis_config = redis_config;\r\n print(self.redis_config)\r\n self.hash_key = hash_key\r\n\r\n # from es get ip data to es\r\n # url: es host\r\n # index :es index\r\n # ip_viste_total :设置封停上限\r\n # rangeMinute : 查询时间上限 ,已分钟作为单位\r\n # sort_type :设置排序方式\r\n # lte:设置查询的结束时间\r\n\r\n def analysise_ip_data_from_es(self,url,index,path,ip_viste_total = 5000,rangeMinute=5,sort_type = True,lte = datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S')):\r\n print(url)\r\n logging.info(\" analysise ip address start \")\r\n logging.info(\" ip limit value: %s\"%ip_viste_total )\r\n elasticsearch_service = elasticsearchLib(url)\r\n forbid_ip_address = []\r\n all_ip = elasticsearch_service.getDataByIndex(index,path,rangeMinute,sort_type,lte)\r\n if all_ip != \"notfound\":\r\n suspension_ip_model = SuspensionIpModel()\r\n forbid_ip_address = []\r\n for i in all_ip:\r\n if i[1] > ip_viste_total:\r\n logging.info(\" forbid ip addrss %s\"%i[0])\r\n forbid_ip_address.append(i[0])\r\n for i in forbid_ip_address:\r\n i = str(i)\r\n ip_exist = suspension_ip_model.check_ip_is_in_db(i)\r\n if ip_exist:\r\n logging.info(\" update exist ip address \")\r\n suspension_ip_model.update_ip_status(ip_address =i,Status= 0)\r\n else:\r\n ip_data = {}\r\n ip_data['Status'] = 0;\r\n ip_data['IpAddress'] = i\r\n ip_data['SuspensionTime'] = str(datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'))\r\n logging.info(\" add ip address to redis\")\r\n suspension_ip_model.add_ip_to_db(ip_data)\r\n self.add_violation_to_redis(self.hash_key,i)\r\n logging.info(\" analysise ip address end \")\r\n\r\n #将超出上限的ip存入到redis中\r\n def add_violation_to_redis(self,hash_key,ip_datas):\r\n ip_redis_service = IPRedisService(self.redis_config,0)\r\n ip_redis_service.add_ip_redis(hash_key,ip_datas)\r\n\r\n\r\n #将ip从对应的redis中移除\r\n def rem_violation_ip_from_redis(self,hash_key,ip_datas):\r\n logging.info(\" rem ip address from redis\")\r\n ip_redis_service = IPRedisService(self.redis_config,0)\r\n ip_redis_service.remove_ip_from_redis(hash_key,ip_datas)\r\n\r\n\r\n# test = IPAnalysiseService()\r\n# ip_data = test.analysise_ip_data_from_es(['http://172.16.9.80:9200/'],'ns',\"path:otv2\",100,30,True)\r\n\r\n","sub_path":"web_design/flask/src/omen/service/IPAnalysiseService.py","file_name":"IPAnalysiseService.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"534070784","text":"# -*- coding: utf-8 -*-\n\nimport ctypes\nfrom ctypes.util import find_library\n\nimport constants\nfrom info import strerror\n\nlibopus = ctypes.CDLL(find_library('opus'))\nc_int_pointer = ctypes.POINTER(ctypes.c_int)\n\n\nclass Encoder(ctypes.Structure):\n \"\"\"Opus encoder state.\n\n This contains the complete state of an Opus encoder.\n \"\"\"\n\n pass\n\nEncoderPointer = ctypes.POINTER(Encoder)\n\n_get_size = libopus.opus_encoder_get_size\n_get_size.argtypes = (ctypes.c_int,)\n_get_size.restype = ctypes.c_int\n\ndef get_size(channels):\n \"\"\"Gets the size of an OpusEncoder structure.\"\"\"\n\n if not channels in (1, 2):\n raise ValueError('Wrong channels value. Must be equal to 1 or 2')\n\n return _get_size(channels)\n\n_create = libopus.opus_encoder_create\n_create.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_pointer)\n_create.restype = EncoderPointer\n\ndef create(fs, channels, application):\n \"\"\"Allocates and initializes an encoder state.\"\"\"\n\n result_code = ctypes.c_int()\n\n result = _create(fs, channels, application, ctypes.byref(result_code))\n if result_code.value is not constants.OK:\n raise ValueError(strerror(result_code.value))\n\n return result\n\ndestroy = libopus.opus_encoder_destroy\ndestroy.argtypes = (EncoderPointer,)\ndestroy.restype = None\ndestroy.__doc__ = \"Frees an OpusEncoder allocated by opus_encoder_create()\"\n","sub_path":"opus/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"398265510","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 5 18:11:20 2021\n\n@author: psl\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 2 20:16:00 2021\n\n@author: psl\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 21 17:14:26 2020\n\n@author: psl\n\"\"\"\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport potentiel as pt\n\n# import asservissement as ass\nfrom psl_package import paris_saclay_league as psl\nimport cmath as c\nimport time\nimport numpy as np\nimport pygame\nimport os\n\npygame.init()\npygame.joystick.init()\n\ncontroller = pygame.joystick.Joystick(0)\ncontroller.init()\n\n# Three types of controls: axis, button, and hat\naxis = {}\nbutton = {}\nhat = {}\n\n# Assign initial data values\n# Axes are initialized to 0.0\nfor i in range(controller.get_numaxes()):\n\taxis[i] = 0.0\n# Buttons are initialized to False\nfor i in range(controller.get_numbuttons()):\n\tbutton[i] = False\n# Hats are initialized to 0\nfor i in range(controller.get_numhats()):\n\that[i] = (0, 0)\n\n# Labels for DS4 controller axes\nAXIS_LEFT_STICK_X = 0\nAXIS_LEFT_STICK_Y = 1\nAXIS_RIGHT_STICK_X = 4\nAXIS_RIGHT_STICK_Y = 3\nAXIS_R2 = 5\nAXIS_L2 = 4\n\n# Labels for DS4 controller buttons\n# Note that there are 14 buttons (0 to 13 for pygame, 1 to 14 for Windows setup)\nBUTTON_SQUARE = 3\nBUTTON_CROSS = 0\nBUTTON_CIRCLE = 1\nBUTTON_TRIANGLE = 2\n\nBUTTON_L1 = 4\nBUTTON_R1 = 5\nBUTTON_L2 = 6\nBUTTON_R2 = 7\n\nBUTTON_SHARE = 8\nBUTTON_OPTIONS = 9\n\nBUTTON_LEFT_STICK = 10\nBUTTON_RIGHT_STICK = 11\n\nBUTTON_PS = 12\nBUTTON_PAD = 5\n\n# Labels for DS4 controller hats (Only one hat control)\nHAT_1 = 0\n\n# Main loop, one can press the PS button to break\nquit = False\n\n\n\nvision = psl.SSLVisionClient(ip='224.5.23.2', port=10020)\nvision.connect()\ngrSim = psl.SSLgrSimClient('127.0.0.1', 20011)\ngrSim.connect()\n\nidr=0\nspin=False\ncross=False\nR1=False\n\nwhile not quit:\n\n # Get events\n for event in pygame.event.get():\n\n if event.type == pygame.JOYAXISMOTION:\n axis[event.axis] = round(event.value,3)\n elif event.type == pygame.JOYBUTTONDOWN:\n button[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n button[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n \that[event.hat] = event.value\n\n quit = button[BUTTON_PS]\n\n # Print out results\n # os.system('cls')\n # Axes\n vn=-axis[AXIS_LEFT_STICK_X]\n vt=-axis[AXIS_LEFT_STICK_Y]\n va=-axis[AXIS_RIGHT_STICK_Y]\n \n print(vt,vn,va,idr)\n tir=0\n \n \n if button[BUTTON_SQUARE]:\n tir=10\n \n if (cross!=button[BUTTON_CROSS])&(cross==False):\n spin=not spin\n cross=button[BUTTON_CROSS]\n \n if (R1!=button[BUTTON_R1])&(R1==False):\n print('r1')\n if idr==0:\n idr=1\n else:\n idr=0\n \n R1=button[BUTTON_R1]\n \n p = psl.packetCommandBot(False, \n idr, \n veltangent=0.5*vt, \n velnormal=0.5*vn, \n velangular=3*va,\n spinner=spin,\n kickspeedx=tir)\n grSim.send(p)\n \n \n time.sleep(0.05)\n\n# ani=animation.FuncAnimation(fig,animate,interval=30)\n# plt.show()\n# match_test.joueurs[2].commande_balle()\n# match_test.joueurs[2].Passe()\n# time.sleep(0.3)\n# match_test.joueurs[3].Tir()\n# match_test.joueurs[2].Tir()\n# match_test.balle.Position()\n# print(match_test.balle.position)\n\n# Y0=match_test.joueurs[0]\n# position_arrivee=Balle()\n# position_arrivee.x=-500\n# position_arrivee.y=500\n# Exb,Eyb=pt.Gradient(position_arrivee.potentiel)\n# a,b=np.polyfit([-100,Y0.x],[10,Y0.y],1)\n# Y0.champ_autre(Y0.x,0,a,b)\n# Ex,Ey=Exb+Y0.Ex_autre,Eyb+Y0.Ey_autre\n# Ex,Ey=pt.norme(Ex,Ey)\n# fig = plt.figure()\n# ax = fig.add_subplot(1, 1, 1)\n# ax.quiver(x,y,Ex,Ey,width=0.0008)\n# Y0.commande_position(500, 500, 0)\n# #Affichage\n# Exb,Eyb=pt.Gradient(potentielBalle)\n# # Exb,Eyb=norme(Exb,Eyb)\n# Exe=Ex1+Ex2+Ex3\n# Eye=Ey1+Ey2+Ey3\n# # Exe,Eye=norme(Exe,Eye)\n# Ex,Ey=Exb+Exe,Eyb+Eye\n# Ex,Ey=norme(Ex,Ey)\n\n# for bot in blueBots:\n# if bot[5]==0:\n# botInfo=bot\n# xbot,ybot,obot=botInfo[6],botInfo[7],botInfo[8]\n# ax.plot(xbot,ybot,'ro')\n# ax.plot(xbot1,ybot1,'ro',color='blue')\n# ax.plot([xbot2,xbot3],[ybot2,ybot3],'ro',color='yellow')\n# ax.quiver(x,y,Ex,Ey,width=0.0008)\n\nvision.close()\ngrSim.close()","sub_path":"manette.py","file_name":"manette.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"534016750","text":"#Copy all the groups having value with lettre \"o\" in a new set called oSet. Copy remaining groups into a list called \"notOList\". Print the elements of oSet and notOList. :dict={\"group1\": \"orange\", \"group2\": \"red\", \"group3\": \"yellow\"}\n\ndict={\"group1\": \"orange\", \"group2\": \"red\", \"group3\": \"yellow\"}\n\noSet = set()\nnotOList = []\n\nfor key, val in dict.items():\n if \"o\" in val:\n oSet.add(key)\n else:\n notOList.append(key)\nprint(oSet)\nprint(notOList)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"exam/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"161677667","text":"\nimport logging\nimport os\nimport sys\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport fmpy\n\nfrom fmpy import simulate_fmu, read_model_description, instantiate_fmu, extract, dump\n\nfrom mshoot import SimModel\n\ndef df_to_struct_arr_new(df):\n \"\"\"Converts a DataFrame to structured array.\"\"\"\n # time index must be reset to pass it to the struct_arr\n df = df.reset_index()\n struct_arr = df.to_records(index=False)\n #s = df.dtypes\n return struct_arr\n\ndef struct_arr_to_df(arr):\n \"\"\"Converts a structured array to DataFrame.\"\"\"\n df = pd.DataFrame(arr).set_index('time')\n\n return df\n\n\nclass SimFMU(SimModel):\n\n def __init__(self, fmupath, outputs=None, states=None, parameters=None,\n verbose=False):\n \"\"\"\n :param fmupath: str, path to FMU\n :param outputs: list(str), monitored outputs names\n :param states: list(str), monitored states names\n :param parameters: dict, parameters names and values\n :param verbose: bool, whether to suppress pyfmi prints\n \"\"\"\n if parameters is None:\n parameters = {}\n if states is None:\n states = []\n if outputs is None:\n outputs = []\n\n self.logger = logging.getLogger(type(self).__name__)\n print(fmupath)\n self.logger.debug(\"Loading FMU\")\n # Load FMU\n model_description = read_model_description(fmupath)\n self.model_description = model_description\n self.unzipdir = extract(fmupath)\n self.fmupath = fmupath\n self.fmu = instantiate_fmu(self.unzipdir, model_description)\n\n self.outputs = outputs\n self.states = states\n self.parameters = parameters\n self.verbose = verbose\n\n # Get initial state\n # Comment:\n # The model has to be initialized to read the state variables.\n #dummy_result = self.fmu.initialize(tStart=0, stopTime=None)\n self.fmu.setupExperiment(startTime=0)\n self.fmu.enterInitializationMode()\n self.fmu.exitInitializationMode()\n self.x0 = self._get_state()\n\n # Reset the FMU\n self.fmu.reset()\n\n # Set parameters\n #for n in parameters:\n # self.fmu.set(n, parameters[n])\n\n def _get_state(self):\n \"\"\"\n Return an ordered dictionary with state names as keys\n and state values as values.\n \"\"\"\n # Return dictionary, keys - state names, values - state values\n # get FMU model description object\n #model_description = read_model_description(self.fmupath)\n x = OrderedDict()\n # collect the value references\n # collect the value references\n self.vrs = {}\n for variable in self.model_description.modelVariables:\n self.vrs[variable.name] = variable.valueReference\n\n # collect list of states and derivatives\n states = []\n #derivatives = []\n for derivative in self.model_description.derivatives:\n #derivatives.append(derivative.variable.name)\n states.append(re.findall('^der\\((.*)\\)$', derivative.variable.name)[0])\n\n # collect the value references for states and derivatives\n #vr_states = [vrs[s] for s in states]\n #vr_derivatives = [vrs[x] for x in derivatives]\n for s in states:\n x[s]= self.read(s)# [0] because 1-element array\n return x\n\n def read(self, datapoint):\n name = self.vrs[datapoint]\n value = self.fmu.getReal([name])\n # print(value)\n return value\n\n def write(self, datapoint, value):\n name = self.vrs[datapoint]\n self.fmu.setReal([name], [value])\n\n def simulate(self, udf, x0, save_state=False):\n \"\"\"\n Simulate the model using the provided inputs `udf`\n and initial state `x0`.\n The DataFrame should have the following content:\n - index - time in seconds and equal steps, named 'time',\n - columns - input data,\n - column names - input variable names.\n The order of `x0` should reflect the one used in `states`.\n Return two DataFrames, `ydf` and `xdf`, with\n outputs and states, respectively, and with the same\n structure as `udf`.\n :param udf: DataFrame, shape (n_steps, n_variables)\n :param x0: vector, size (n_states, )\n :return: ydf, xdf\n \"\"\"\n assert udf.index.name == 'time'\n\n timeline = udf.index.values\n start = timeline[0]\n stop = timeline[-1]\n\n # Prepare inputs for fmpy:\n input_arr = df_to_struct_arr_new(udf)\n assert input_arr is not None, \"No inputs assigned\"\n output_interval = input_arr[1][0] - input_arr[0][0]\n\n # Initial condition\n start_values = dict()\n input_names = input_arr.dtype.names\n for name in input_names:\n if name != 'time':\n start_values[name] = input_arr[name][0]\n\n assert 'time' in input_names, \"time must be the first input\"\n\n # Set parameters\n for name, value in self.parameters.items():\n if name != 'time':\n start_values[name] = value\n\n # Initial states overriden by the user\n i = 0\n for n in self.states:\n start_values[n] = x0[i]\n i += 1\n\n # Simulate\n if not self.verbose:\n nullf = open(os.devnull, 'w')\n sys.stdout = nullf\n\n self.output_names = list(self.outputs)\n #derivative_names = [der.variable.name for der in self.model_description.derivatives]\n # names = [re.search(r'der\\((.*)\\)', n).group(1) for n in derivative_names]\n #for name in derivative_names:\n #self.output_names.append(name)\n for name in self.states:\n self.output_names.append(name)\n\n res = simulate_fmu(\n self.unzipdir,\n start_values=start_values,\n start_time=start,\n stop_time=stop,\n input=input_arr,\n output=self.output_names,\n output_interval=output_interval,\n fmu_instance=self.fmu\n # solver='Euler', # TODO: It might be useful to add solver/step to options\n # step_size=0.005\n )\n #states = self.fmu.getFMUstate()\n\n if not self.verbose:\n sys.stdout = sys.__stdout__\n nullf.close()\n\n # Update state (use only in emulation)\n if save_state:\n self.x0 = self._get_state()\n\n # Outputs\n res_df = struct_arr_to_df(res)\n ydf = pd.DataFrame()\n xdf = pd.DataFrame()\n\n for n in self.outputs:\n ydf[n] = res_df[n]\n\n for n in self.states:\n xdf[n] = res_df[n]\n\n self.fmu.reset()\n\n return ydf, xdf\n \nif __name__ == \"__main__\":\n # DEMO: SIMULATE\n # ==============\n # Load FMU\n fmupath = os.path.join('resources', 'fmus', 'R2C2', 'R2C2.fmu')\n parameters = {'C': 1e6}\n model = SimFMU(\n fmupath,\n outputs=['qout', 'Tr'],\n states=['heatCapacitor1.T'],\n parameters=parameters,\n verbose=True)\n\n # Inputs\n t = np.arange(0, 86401, 3600)\n udf = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])\n udf['q'] = np.full(t.size, 100)\n udf['Tout'] = np.full(t.size, 273.15)\n\n # Initial state\n x0 = [273.15 + 20]\n\n ydf, xdf = model.simulate(udf, x0)\n\n ydf.plot(subplots=True, title='ydf')\n xdf.plot(subplots=True, title='xdf')\n plt.show()\n","sub_path":"mshoot/interfaces/fmpy.py","file_name":"fmpy.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"279778579","text":"# 20190424 Lab10\n# 피타고라스 삼각형\n# 피타고라스의 정리를 만족하는 삼각형들을 모두 찾아보자.\n# 삼각형 한 변의 길이는 1부터 30 이하이다.\n\nnew_list = []\n\nfor x in range(1, 30):\n for y in range(x, 30):\n for z in range(y, 30):\n if x ** 2 + y ** 2 == z ** 2:\n new_list.append((x, y, z))\nprint(new_list)\n\n#################### 한줄로 표현하기 ####################\nnew_list = []\nnew_list = [(x, y, z) for x in range(1, 30) for y in range(x, 30) for z in range(y, 30) if x ** 2 + y ** 2 == z ** 2]\nprint(new_list)\n","sub_path":"20190424 Practice/Lab10.py","file_name":"Lab10.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"192464524","text":"#Write wrappers for bitmaps and photoimages that allow one to do (admittedly slow)\n#work on them for when PIL is unavailable\n\nimport tkinter as tk\nfrom GUITools.ColorTools import TkColorOperations\n\nclass TkPhotoImage(tk.PhotoImage):\n alpha_cutoff=3\n show_root=None\n def __init__(self,source=None,width=None,height=None,data=None):\n self.color_ops=TkColorOperations(self)\n if isinstance(source,str):\n try:\n f=open(file)\n except:\n data=source\n else:\n data=f.read()\n f.close()\n elif not source is None:\n try:\n width,height=source.dimensions\n except AttributeError:\n try:\n width=source.width()\n height=source.height()\n except AttributeError:\n if width is None:\n width=16\n if height is None:\n height=16\n data=self.format_data(source,width,height)\n super().__init__(width=width,height=height)\n## self.put('#000',(0,0))\n## print(self.get(0,0))\n## self.putdata(data)\n self.copy_pixels(source)\n\n def putdata(self,data,position=(0,0)):\n if isinstance(data,str):\n self.put(data,to=position)\n else:\n x,y=position\n for s in data:\n if isinstance(s,str):\n self.put(s,(x,y))\n y+=1\n else:\n pos,s=s\n self.put(s,pos)\n \n def copy_pixels(self,iterable):\n #should find a way to smooth out artefacts\n w=self.width()\n h=self.height()\n i=j=0\n## last=[]\n for p in iterable:\n if len(p)==2:\n p,pos=p\n if len(p)==4:\n a=p[3]\n p=p[:3]\n else:\n a=self.alpha_cutoff+1\n if a>self.alpha_cutoff:\n c=self.color_ops.rgb_hex(p)\n## last.append(c)\n self.put(c,(i,j))\n## last=0\n i+=1\n if i>=w:\n i=0\n j+=1\n if j>=h:\n break\n def format_data(self,iterable,width=None,height=None,to=(0,0)):\n## iterable.show()\n \n color_hex=self.color_ops.rgb_hex\n rows=[None]*height\n i,j=to\n if width is None:\n width=self.width()\n if height is None:\n height=self.height()\n iterable=iter(iterable)\n bit=next(iterable)\n row=[(j,i),'']\n j=1\n def bit_alpha(bit):\n if len(bit)==4:\n a=bit[3]\n bit=bit[:3]\n else:\n a=self.alpha_cutoff+1\n chex=' '+color_hex(bit)\n return (chex,a)\n def new_row(row=row):\n if row[1]:\n row[1]='{'+row[1]+'}'\n rows[i]=tuple(row)\n row[0]=(i,j);row[1]=''\n if len(bit)==2:\n bit,pos=bit\n x,y=pos\n## print(x,y)\n last_y=y\n bit,alpha=bit_alpha(bit)\n if alpha>self.alpha_cutoff:\n row[1]+=bit\n else:\n new_row(row)\n for bit in iterable:\n bit,pos=bit\n x,y=pos\n if y!=last_y:\n last_y=y\n new_row()\n i+=1\n row[0]=(j,i)\n if i>=height:\n break\n bit,a=bit_alpha(bit)\n if a>self.alpha_cutoff: \n row[1]+=bit\n else:\n new_row()\n else:\n bit,a=bit_alpha(bit)\n if a>self.alpha_cutoff:\n row[1]+=bit\n for bit in iterable:\n bit,a=bit_alpha(bit)\n if a>self.alpha_cutoff:\n## print('.',end='')\n row[1]+=bit\n else:\n new_row()\n j+=1\n if j>=width:\n new_row()\n j=0\n i+=1\n row[0]=(j,i)\n if i>=height:\n break\n rows=[row for row in rows if not row is None]\n## print([r[0] for r in rows])\n return rows\n \n def copy_from(self,source):\n if not isinstance(source,str):\n data=self.format_data(source)\n else:\n data=source\n self.blank()\n self.putdata(data)\n\n def show(self):\n## from GUITools.FormattingTools import FormattingGrid\n cls=type(self)\n if cls.show_root is None:\n cls.show_root=T=tk.Toplevel()\n T.bind('',lambda e,cls=cls:setattr(cls,'show_root',None))\n T=cls.show_root\n self.show_label=tk.Label(T,text=str(self),image=self)\n self.show_label.grid()\n \n","sub_path":"GUITools/ImageTools/tkImageTools.py","file_name":"tkImageTools.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"462422348","text":"import os\nimport matplotlib.pyplot as plt\n\ndef calc_drag_lift(file):\n with open(file) as infile:\n rows = infile.readlines()\n #print(rows[len(rows)-1])\n row = rows[len(rows)-1].split(\"\\t\")\n lift = row[1]\n drag = row[2].replace(\"\\n\",\"\")\n lift = float(lift)\n drag = float(drag)\n lift_drag = lift/drag\n\n return (lift_drag)\n \n\ndef plot(drag_lift, angles):\n plt.xlabel('Angle')\n plt.ylabel('Lift/Drag Ratio')\n plt.title('The Lift/Drag Ratio for different angles')\n plt.plot(angles, drag_lift)\n #plt.show()\n\n plt.savefig('lift_drag_ratio.png')\n\n\ndef result():\n angles = []\n lift_drag_ratios = []\n for dirName, subdirList, fileList in os.walk(\"result\"):\n for file in fileList:\n if file == \"drag_ligt.m\":\n angle = dirName.replace(\"result/r0a\",\"\")\n angle = angle.replace(\"n200_results\",\"\")\n angle = int(angle)\n path = os.path.abspath(dirName+\"/\"+file)\n lift_drag = calc_drag_lift(path)\n angles.append(angle)\n lift_drag_ratios.append(lift_drag)\n\n plot(lift_drag_ratios, angles)\n \n \n\nresult()\n#plot_drag_lift(0)\n","sub_path":"testdata/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"360258638","text":"from tkinter import *\nfrom resource import Resource\nfrom grid import Grid\nfrom hero import Hero\nfrom enemies import *\nfrom area import Area\nimport random\n\nimage_size = 72\nboard_size = 10\nroot = Tk()\ncanvas = Canvas(root, width=image_size * board_size, height=image_size * board_size + 100)\ngrid = Grid(board_size)\nhero = Hero()\narea = Area(grid)\nbackground = Resource()\nlevel = 1\n\n\ndef on_key_press(e):\n is_tile_occupied = hero.is_tile_occupied(area.enemy_list)\n control_keys = [65, 68, 83, 87]\n\n if not is_tile_occupied and e.keycode in control_keys:\n hero.erase(canvas, background, image_size)\n\n if e.keycode == 65:\n if grid.grid[hero.x - 1][hero.y].cell_type == \"floor\" and hero.x - 1 >= 0:\n hero.x = hero.x - 1\n\n hero.image = \"hero_left\"\n elif e.keycode == 68:\n if hero.x + 1 <= 9 and grid.grid[hero.x + 1][hero.y].cell_type == \"floor\":\n hero.x = hero.x + 1\n\n hero.image = \"hero_right\"\n elif e.keycode == 83:\n if hero.y + 1 <= 9 and grid.grid[hero.x][hero.y + 1].cell_type == \"floor\":\n hero.y = hero.y + 1\n\n hero.image = \"hero_down\"\n elif e.keycode == 87:\n if grid.grid[hero.x][hero.y - 1].cell_type == \"floor\" and hero.y - 1 >= 0:\n hero.y = hero.y - 1\n\n hero.image = \"hero_up\"\n\n hero.draw(canvas, background, image_size)\n\n elif e.keycode == 32 and is_tile_occupied:\n current_enemy = hero.get_enemy_on_same_tile(area.enemy_list)\n hero.strike(current_enemy, canvas, background, image_size, area)\n\n if hero.killed_boss and hero.has_key:\n area.level += 1\n area.draw(grid, hero, canvas, background, image_size)\n\n canvas.create_rectangle(0, 720, 720, 820, fill='white')\n\n if hero.is_tile_occupied(area.enemy_list):\n current_enemy = hero.get_enemy_on_same_tile(area.enemy_list)\n current_enemy.get_stats(canvas)\n\n if not hero.is_alive:\n canvas.create_rectangle(0, 720, 720, 820, fill='white')\n canvas.create_text(350, 770, fill=\"black\", font=\"Arial 14 bold\",\n text=\"GAME OVER, YOU ARE DEAD, WASTED\")\n else:\n hero.get_stats(canvas)\n\n\ncanvas.bind(\"\", on_key_press)\ncanvas.pack()\ncanvas.focus_set()\ncanvas.delete(\"all\")\n\narea.draw(grid, hero, canvas, background, image_size)\n\n\nroot.mainloop()\n","sub_path":"gamelogic.py","file_name":"gamelogic.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"492597830","text":"import cv2 as cv\n\n# Reading Image\n# img = cv.imread('Resources/Photos/cat_large.jpg')\n# cv.imshow('Cat', img)\n# cv.waitkey(0)\n\n\ndef rescaleFrame(frame, scale=0.75):\n # Image , Video, Camera so on\n width = int(frame.shape[1] * scale)\n height = int(frame.shape[0] * scale)\n dimensions = (width, height)\n \n return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)\n\n# Resize image\n#resized_image = rescaleFrame(img, .25)\n#cv.imshow('Image_resize', resized_image)\n\ndef changeRes(width, height):\n # Only for Live video\n capture.set(3, width) #capture.get(3) will get the width of the frame in the video stream\n capture.set(4, height) \n\n# Reading Videos\n# capture = cv.VideoCapture(0) # Reference the webcam\ncapture = cv.VideoCapture('Resources/Videos/dog.mp4')\n\nwhile True:\n isTrue, frame = capture.read()\n \n frame_resized = rescaleFrame(frame, .5)\n\n cv.imshow('Video', frame)\n cv.imshow('Video_resized', frame_resized)\n \n if cv.waitKey(20) & 0xFF==ord('d'):\n break\n\ncapture.release()\ncv.destroyAllWindows()\n","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289473672","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n__author__ = 'yeyaogang'\r\n\r\n\r\nclass FilterModule(object):\r\n ''' Custom filters are loaded by FilterModule objects '''\r\n\r\n def filters(self):\r\n ''' Filter Module objects return a dict mapping filter names to\r\n filter functions. '''\r\n return {\r\n 'substr': self.substr,\r\n }\r\n\r\n ''' def substr(self, check,checkin):\r\n return value1+value2'''\r\n def substr(self,check,checkin):\r\n if check in checkin:\r\n return True\r\n else:\r\n return False\r\n","sub_path":"library/filter_substr.py","file_name":"filter_substr.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"107268301","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Create ROCs\n#\n# Author:\n#\n# Peter Krusche \n#\nimport jinja2\nimport json\n\nfrom template import TEMPLATEDIR\n\n\ndef render_roc(roc_name, data):\n \"\"\"Render a ROC curve via D3\n\n :param roc_name: name of the ROC (must be unique in document)\n :param data: data as something that can be turned into JSON\n :return: HTML that can be included in output\n \"\"\"\n\n loader = jinja2.FileSystemLoader(TEMPLATEDIR)\n env = jinja2.Environment(loader=loader)\n\n template_vars = {\n \"roc_name\": roc_name,\n \"data\": json.dumps(json.dumps(data))\n }\n\n template = env.get_template(\"roc_plot.jinja2.html\")\n return template.render(**template_vars)\n\n","sub_path":"reporting/basic/src/python/report/roc_plot.py","file_name":"roc_plot.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"263846635","text":"from tkinter import Tk, Text, ttk\nfrom tkinter.constants import BOTH, BOTTOM, END, EW, RIGHT, TOP, LEFT, YES\nfrom googletrans import Translator\nimport uteis as u\n\nclass Fr(ttk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n\n # self.values = ('pt', 'es', 'en')\n self.values = u.lang_values\n\n self.fr_esquerdo = ttk.Frame(self)\n self.fr_direito = ttk.Frame(self)\n\n\n self.lb_entrada = ttk.Label(self.fr_esquerdo, text='De')\n self.combo_entrada = ttk.Combobox(self.fr_esquerdo, values=list(self.values.values()))\n self.txt1 = Text(self.fr_esquerdo)\n\n self.lb_saida = ttk.Label(self.fr_direito, text='Para')\n self.combo_saida = ttk.Combobox(self.fr_direito, values=list(self.values.values()))\n self.txt2 = Text(self.fr_direito)\n\n # botao traduzir \n self.bt_translate = ttk.Button(self, text='traduzir', command=self.traduzir)\n\n # definindo default\n self.combo_entrada.set('portugues')\n self.combo_saida.set('ingles')\n\n self.lb_entrada.grid()\n self.combo_entrada.grid()\n self.txt1.grid()\n\n self.lb_saida.grid()\n self.combo_saida.grid()\n self.txt2.grid()\n\n self.fr_esquerdo.grid(row=0, column=0, ipadx=2, ipady=2)\n self.fr_direito.grid(row=0, column=1, ipadx=2, ipady=2)\n self.bt_translate.grid(row=1, column=0, sticky=EW, columnspan=2, ipadx=1, ipady=5)\n \n self.lb_aviso = ttk.Label(self, text='')\n self.lb_aviso.grid(row=2, column=1, sticky=EW)\n\n def traduzir(self):\n msg_in = self.txt1.get(1.0, END)\n lang_in = u.get_key(u.lang_values, self.combo_entrada.get())\n lang_out = u.get_key(u.lang_values, self.combo_saida.get())\n # print(lang_in, lang_out)\n \n try: \n # print(msg_in, lang_in, lang_out)\n traducao = u.traduzir(texto=msg_in, src=lang_in, dest=lang_out)\n # print(traducao)\n self.txt2.delete(1.0, END)\n self.txt2.insert(1.0, traducao)\n self.lb_aviso.config(text='')\n except:\n self.lb_aviso.config(text='pode estar com erro de conexao', foreground='red')\n \nif __name__ == '__main__':\n import main\n main.main()\n # root = Tk()\n # Fr(root).pack()\n # root.mainloop()","sub_path":"17-sistemaDeTraducao/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"507388524","text":"import sys\nimport re\nimport os\nimport numpy as np\nfrom optparse import OptionParser\nimport glob\n\nparser = OptionParser()\n\nif len(sys.argv) < 3:\n sys.stderr.write('usage:\\t' + \\\n sys.argv[0] + \\\n ' ' + \\\n ' ' )\n sys.exit(1)\n\nescapee_file=sys.argv[1]\nout_dir=sys.argv[2]\n\nparser.add_option(\"-a\",\n \"--all\",\n dest=\"all_file\",\n help=\"Concatenated gene file name\")\n\n(options, args) = parser.parse_args()\ntmp0 = \"tmp0.bed\"\n\n\"\"\" Define a function that makes header array and splits up file into 2 temp files according to genome assembly \"\"\"\ndef parseExpression(esc,header,tmp):\n\ttemp = open(tmp0,'w')\n\tf1 = open(esc, 'r') \n\tfor line in f1:\n\t\tA = line.rstrip().split()\n\t\theader.append(A[1]) if A[1] not in header else header\n\t\t[chr, start, stop, strand] = re.split('\\.{2}|[:,]+', A[0])\n\t\tstart = str(int(start) - 500)\n\t\tstop = str(int(stop) + 500)\n\t\tprint >> temp, chr + '\\t' + start + '\\t' + stop + '\\t' + A[1] + '\\t' + strand + '\\t' + A[2]\n\n\ttemp.close()\n\treturn header\n\n\"\"\" Define a function that creates one bed file per sampe in expression header \"\"\"\ndef filesExpression(infile,header,files,dir):\n\tfile = open(infile, 'r')\n\tfor l in file:\n\t\tA = l.rstrip().split()\n\t\tfname = dir + '/' + A[3] + '.bed'\n\t\tif fname not in files:\n\t\t\tfiles.append(fname)\n\t\t\twith open(fname, 'w') as fwrite:\n\t\t\t\tfwrite.write(l)\n\n\"\"\" Define a function that filters gene file for TSS with largest differential expression \"\"\"\ndef filterExpression(dir):\n\tfor f in os.listdir(dir):\n\t\tmax = np.inf\n\t\tlwrite = ''\n\t\tfname = dir + '/' + f\n\t\tfread = open(fname, 'r')\n\t\tfor l in fread:\n\t\t\tA = l.rstrip().split()\n\t\t\tif float(A[5]) < float(max):\n\t\t\t\tlwrite = '\\t'.join(A[:-1])\n\t\t\t\tmax = A[5]\n\t\tfwrite = open(fname, 'w')\n\t\tfwrite.write(lwrite)\n\n\"\"\" Define a function that cats gene files \"\"\"\ndef joinGenes(dir,outfile):\n\tfwrite = open(outfile, 'w')\n\tfor f in os.listdir(dir):\n\t\tfname = dir + '/' + f\n\t\tfread = open(fname, 'r')\n\t\tfor l in fread:\n\t\t\tfwrite.write(l + '\\n')\n\nfiles = []\nheader = []\n\nheader = parseExpression(escapee_file, header, tmp0)\nfilesExpression(tmp0, header, files, out_dir)\n\nfilterExpression(out_dir)\nif(options.all_file):\n\tjoinGenes(out_dir,options.all_file)\n\nfor f in os.listdir(os.curdir):\n\tif re.search(r'tmp', f):\n\t\tos.remove(f)\n","sub_path":"scripts/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"526248007","text":"# greedy.py\nimport pandas as pd \nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn.datasets import make_classification\n\nclass GreedyFeatureSelection:\n \"\"\"\n A simple and custom class for greedy feature selection, you will need to \n to modify it quite a bit to make it suitable for your dataset\n \"\"\"\n def evaluate_score(self , x ,y):\n \"\"\"\n This function evaluates model on data and returns Area Under ROC Curve (AUC)\n NOTE: we fit the data and evaluate AUC on same data .\n WE ARE OVER FITTING HERE.\n But this is also a way to achieve greedy selection.\n k-fold will take k time longer.\n\n If you want to implement it in really correct way, calculate OOF AUC and return mean\n AUC over k folds. This require only a few lines of change.\n\n :params x : training data\n :params y: targets\n :returns : overfitted area under curve the roc curve\n \"\"\"\n # fit the logistic regression model,\n # and calculate AUC on the same data\n # again: BEWARE\n # you can choose any model that suits your data\n model = linear_model.LogisticRegression()\n model.fit(x,y)\n predictions = model.predict_proba(x)[:,1]\n auc = metrics.roc_auc_score(y , predictions)\n\n return auc\n\n def _feature_selection(self , x ,y):\n \"\"\"\n This function does the actual greedy selction\n :params x : data,numpy array\n :params y : targets, numpy array\n :return : (best scores , best features)\n \"\"\"\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]\n\n def __call__(self , x,y):\n \"\"\"\n Call function will call the class on a set of arguments\n \"\"\"\n # selcet features , return scores and selected indices\n scores , features = self._feature_selection(x , y)\n # transform data with selected features\n return x[:,features] , scores\n\nif __name__ == \"__main__\":\n # generate binary classification data\n x ,y = make_classification(n_samples=1000 , n_features= 100)\n\n # transform data by greedy feature selection \n x_transformed , scores = GreedyFeatureSelection()(x,y)\n print(scores)\n print(x_transformed.shape)","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"449014722","text":"\"\"\"\n akane.connection\n ~~~~~~~~~~~~~~~~\n\n All functionality regarding sending and receiving data to redis.\n\"\"\"\n\nimport sys\nimport socket\n\nfrom tornado.ioloop import IOLoop\nfrom tornado import iostream\n\nimport hiredis\n\nfrom .exceptions import PoolError\nfrom .utils import redis_request\n\n\nif sys.version_info[0] < 3:\n DELIMITER = '\\r\\n'\nelse:\n DELIMITER = b'\\r\\n'\n\n\nclass Connection(object):\n\n _busy = False\n _callback = None\n\n def __init__(self, host='localhost', port=6379, ioloop=None):\n self.host = host\n self.port = port\n self._ioloop = ioloop or IOLoop.instance()\n self._parser = hiredis.Reader(encoding=\"utf-8\")\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n s.settimeout(None)\n\n self._stream = iostream.IOStream(s, self._ioloop)\n self._stream.connect((host, port))\n\n def busy(self):\n return self._busy\n\n def closed(self):\n self._stream.closed()\n\n def send_request(self, callback, *args):\n self._busy = True\n self._callback = callback\n self._stream.write(redis_request(args))\n self._stream.read_until(DELIMITER, self._handle_read)\n\n def _handle_read(self, data):\n self._parser.feed(data)\n\n parsed_data = self._parser.gets()\n if parsed_data is False:\n next = True\n if data[0] == '$':\n next = int(data[1:-2])\n else:\n next = False\n\n if next is True:\n self._stream.read_until(DELIMITER, self._handle_read)\n elif next > 0:\n self._stream.read_bytes(next, self._handle_read)\n else: # if next is False\n self._busy = False\n cb = self._callback\n self._callback = None\n if cb is not None:\n cb(parsed_data)\n return\n\n\nclass Pool(object):\n\n closed = True\n\n def __init__(self, connections=1, *args, **kwargs):\n self.closed = False\n self._pool = set()\n\n for i in range(connections):\n self._pool.add(Connection(*args, **kwargs))\n\n def get_free_conn(self):\n if self.closed:\n raise PoolError('connection pool is closed')\n for conn in self._pool:\n if not conn.busy():\n return conn\n raise PoolError('connection pool exhausted')\n\n def close(self):\n if self.closed:\n raise PoolError('connection pool is closed')\n for conn in self._pool:\n if not conn.closed():\n conn.close()\n self._pool = set()\n self.closed = True\n","sub_path":"akane/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289593687","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\ntam=[]\ntam=input().split()\ntam = list(map(int,tam))\n#print(tam)\nn=[]\nn=input().split()\nn = list(map(int,n))\n#print(n)\na=[]\na=input().split()\na = list(map(int,a))\na=set(a)\n#print(a)\nb=[]\nb=input().split()\nb = list(map(int,b))\n#print(b)\nb=set(b)\n#print(b)\nout=0\nfor i in range(0,len(n)):\n if n[i] in a:\n out+=1\n elif n[i] in b:\n out-=1\n #print(out) \nprint(out)","sub_path":"Sets/No Idea/no_idea.py","file_name":"no_idea.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"640665968","text":"import cv2, sys, pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import sqrt\nfrom scipy.spatial.distance import euclidean\nfrom scipy.cluster.vq import whiten\nfrom scipy.spatial import cKDTree\n\nSCENE_TYPE = ['buildings', 'cars', 'food', 'people', 'trees']\norb = cv2.ORB()\n\n# i, ii) Collect orb features for all training images into one large vector and run kmeans\norb_features = []\nprint(\"1. Computing/Loading orb features for all training images.\")\nfor scene in SCENE_TYPE:\n try:\n scene_descriptors = pickle.load(open(scene + '_descriptors.p', 'rb'))\n except:\n scene_descriptors = []\n for i in range(51, 201):\n if i < 10:\n img_number = '00' + str(i)\n elif 10 <= i and i < 100:\n img_number = '0' + str(i)\n else:\n img_number = str(i)\n img = cv2.imread('./train/' + scene + '/f000' + img_number + '.jpg')\n keypoints = orb.detect(img, None)\n keypoints, descriptors = orb.compute(img, keypoints)\n try:\n scene_descriptors.extend(descriptors)\n except:\n pass\n pickle.dump(scene_descriptors, open(scene + '_descriptors.p', 'wb'))\n orb_features.extend(scene_descriptors)\nprint(\"==> SUCCESS\")\n\nfeatures = whiten(np.float32(orb_features))\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n\nprint(\"2. Computing/Loading kmeans using orb features with k = 800.\")\ntry:\n bag_of_words = pickle.load(open('bag_of_words.p', 'rb'))\nexcept:\n try:\n ret, label, bag_of_words = cv2.kmeans(data=features, K=800, criteria=criteria,\\\n attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)\n pickle.dump(bag_of_words, open('bag_of_words.p', 'wb')) \n except:\n print(\"==> FAILED: EXITING PROGRAM...\")\n sys.exit(1)\nbag_of_words = cKDTree(bag_of_words)\nprint(\"==> SUCCESS\")\n\n# iii) Create BoW encoding vector for each training image using BoW\nprint(\"3. Computing/Loading encoding vector for all training images.\")\nbow_vectors, matching_scenes = [], []\nfor scene in SCENE_TYPE:\n try:\n bow_vector_scene = pickle.load(open('bow_vector_' + scene + '.p', 'rb'))\n except:\n bow_vector_scene = []\n for i in range(51, 201):\n if i < 10:\n img_number = '00' + str(i)\n elif 10 <= i and i < 100:\n img_number = '0' + str(i)\n else:\n img_number = str(i)\n img = cv2.imread('./train/' + scene + '/f000' + img_number + '.jpg')\n keypoints = orb.detect(img, None)\n keypoints, descriptors = orb.compute(img, keypoints)\n bow_vector = np.array([0.] * 800)\n try:\n for descriptor in descriptors:\n dist, match = bag_of_words.query(descriptor, 5)\n for j in range(len(match)):\n bow_vector[match[j]] = 1.0 / (dist[j]**2 + 1)\n except:\n pass\n bow_vector_scene.append((bow_vector - np.mean(bow_vector)) / np.std(bow_vector))\n pickle.dump(bow_vector_scene, open('bow_vector_' + scene + '.p', 'wb'))\n bow_vectors.extend(bow_vector_scene)\n for i in range(len(bow_vector_scene)):\n matching_scenes.append(scene)\nprint(\"==> SUCCESS\")\n\n# iv) Match testing image with label and check accuracy\nprint(\"4. Computing/Loading encoding vector for all test images.\")\nbow_vectors = cKDTree(bow_vectors)\nnum_correct, num_images = 0.0, 0.0\nfor scene in SCENE_TYPE:\n for i in range(51):\n if i < 10:\n img_number = '00' + str(i)\n elif 10 <= i and i < 100:\n img_number = '0' + str(i)\n else:\n img_number = str(i)\n img = cv2.imread('./test/' + scene + '/f000' + img_number + '.jpg')\n num_images += 1.0\n keypoints = orb.detect(img, None)\n keypoints, descriptors = orb.compute(img, keypoints)\n bow_vector = np.array([0.] * 800)\n try:\n for descriptor in descriptors:\n dist, match = bag_of_words.query(descriptor, 5)\n for j in range(len(match)):\n bow_vector[match[j]] = 1.0 / (dist[j]**2 + 1)\n bow_vector = (bow_vector - np.mean(bow_vector)) / np.std(bow_vector) # Normalize\n except:\n pass\n dist, match = bow_vectors.query(bow_vector) # 1 Nearest Neighbor search to find best match\n min_scene = matching_scenes[match]\n if min_scene == scene:\n num_correct += 1.0\nprint(\"==> SUCCESS\")\n\nprint(\"============ Accuracy ============\")\nprint(str(num_correct / num_images) + \" (\" + str(num_correct) + \" / \" + str(num_images) + \")\")\nprint(\"==================================\")","sub_path":"HW3/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"27673707","text":"# Given a sorted array and a list of integers, output the index of each integer\n# in the array. If the integer is not in the array, print -1\n\nfrom math import floor\n\n\ndef binarySearch(arr, item, low=0, high=None):\n if high is None:\n high = len(arr)\n mid = low + (high-low) // 2\n if high - low + 1 <= 0 or mid == high:\n return -1\n else:\n guess = arr[mid]\n if guess == item:\n return mid + 1\n if item < guess:\n return binarySearch(arr, item, low, mid)\n else:\n return binarySearch(arr, item, (mid+1), high)\n\n\noutput = []\nsortedArray = []\nintegerList = []\n\nf = open(\"rosalind_bins.txt\", \"r\")\ndata = f.readlines()\nf.close()\nsortedArrayString = data[2].split(\" \")\nintegerListString = data[3].split(\" \")\n\n\nfor elem in sortedArrayString:\n sortedArray.append(int(elem))\nfor elem in integerListString:\n integerList.append(int(elem))\n\nfor n in integerList:\n output.append(binarySearch(sortedArray, n))\n\nf2 = open(\"output.txt\", \"w\")\nfor n in output:\n f2.write(str(n) + \" \")\n","sub_path":"Rosalind/Binary-Search/binary-search.py","file_name":"binary-search.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"216207901","text":"import uuid\nfrom django.db import models\nfrom django.contrib.auth.models import (\n BaseUserManager, AbstractBaseUser\n)\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.conf import settings\nfrom rest_framework.authtoken.models import Token\n\nDIV_CHOICES = (\n ('MLHC', 'MLHC'),\n ('NCS', 'NCS'),\n ('MTE', 'MTE'),\n ('NIT', 'NIT'),\n ('WTE', 'WTE'),\n ('PRC', 'PRC'),\n ('PTI', 'PTI'),\n ('NCSA', 'NCSA'),\n ('NCSAR', 'NCSAR'),\n ('NCSMD', 'NCSMD'),\n ('WLTIC', 'WLTIC'),\n)\nOFFICE_CHOICES = (\n ('2301', '2301'),\n ('2302', '2302'),\n ('2303', '2303'),\n ('4999', '4999'),\n)\nJOB_TITLES = (\n ('EO', 'Escrow Officer'),\n ('TO', 'Title Officer'),\n)\nSECURITY_LEVELS = (\n ('E', 'Employee'),\n ('M', 'Manager'),\n ('S', 'Supervisor'),\n)\nACTIVE_STATUS = (\n ('A', 'A'),\n ('I', 'I'),\n)\n\n\nclass EmployeeUserManager(BaseUserManager):\n\n def _create_user(self, username, security_level, password, **extra_fields):\n if not username:\n raise ValueError('A username is required.')\n if not security_level:\n raise ValueError('A security level is required.')\n user = self.model(username=username, security_level=security_level)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, username, security_level, password, **extra_fields):\n user = self._create_user(\n username, security_level, password=password, **extra_fields\n )\n if user.security_level == 'M' or 'S':\n extra_fields.setdefault('is_staff', True)\n else:\n extra_fields.setdefault('is_staff', False)\n\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, security_level, password, **extra_fields):\n user = self._create_user(\n username, security_level, password=password, **extra_fields\n )\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n user.is_admin = True\n user.save(using=self._db)\n return user\n\n\nclass EmployeeUser(AbstractBaseUser):\n\n username = models.CharField(\n max_length=30, unique=True\n )\n email = models.EmailField(\n max_length=40, unique=True, verbose_name='* Personal Email',\n )\n is_admin = models.BooleanField(default=False,)\n\n security_level = models.CharField(\n max_length=1, choices=SECURITY_LEVELS, verbose_name='Security Level'\n )\n gp_employee_id = models.AutoField(\n primary_key=True, verbose_name='GP Employee ID'\n )\n\n objects = EmployeeUserManager()\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['security_level', ]\n\n def get_full_name(self):\n username = self.clean(self.username)\n return username\n\n def get_short_name(self):\n return self.username\n\n def __str__(self):\n return self.username\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_superuser(self):\n return self.is_admin\n\n def is_staff(self):\n return self.is_admin\n\n class Meta:\n verbose_name_plural = \"Users\"\n\n\nclass Employee(models.Model):\n\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n primary_key=True, related_name='employee')\n\n def username(self):\n return self.user.get_username()\n username = property(username)\n\n def is_admin(self):\n return self.user.is_admin\n\n first_name = models.CharField(\n max_length=30, blank=True, verbose_name='* First Name'\n )\n last_name = models.CharField(\n max_length=30, blank=True, verbose_name='* Last Name'\n )\n middle_initial = models.CharField(\n max_length=1, blank=True, default='', verbose_name='* MI'\n )\n supervisor = models.CharField(\n max_length=40, blank=False\n )\n division = models.CharField(\n max_length=5, choices=DIV_CHOICES, default='NIT'\n )\n office = models.CharField(\n max_length=4, choices=OFFICE_CHOICES, default='2301'\n )\n date_joined = models.DateField(\n auto_now_add=True, verbose_name='Date Created'\n )\n date_modified = models.DateField(\n auto_now=True, verbose_name='Update Date'\n )\n job_title = models.CharField(\n max_length=2, choices=JOB_TITLES, default='EO', verbose_name='Job Title'\n )\n active = models.CharField(\n max_length=1, choices=ACTIVE_STATUS, verbose_name='Status'\n )\n group_email = models.EmailField(\n default='group@example.com', verbose_name='Group Email'\n )\n notary_id_number = models.CharField(\n max_length=11, blank=False, default='12345678901', verbose_name='* Notary ID Number'\n )\n fax_number = models.CharField(\n max_length=12, blank=False, default='123-456-7890', verbose_name='* FAX Number'\n )\n direct_phone_number = models.CharField(\n max_length=12, blank=False, default='123-456-7890', verbose_name='* Direct Phone Number'\n )\n cell_phone_number = models.CharField(\n max_length=12, blank=False, default='123-456-7890', verbose_name='* Cell Phone Number'\n )\n home_phone_number = models.CharField(\n max_length=12, blank=False, default='123-456-7890', verbose_name='* Home Phone Number'\n )\n manager = models.CharField(\n max_length=40, blank=False\n )\n\n @receiver(post_save, sender=settings.AUTH_USER_MODEL)\n def create_auth_token(sender, signal, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n","sub_path":"MLHC/employees/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"647935065","text":"stri=input('scrivi una frase o stringa di caratteri: ')\r\nl=len(stri)\r\nk=stri[0]\r\nls=stri[-1]\r\nc=''\r\nfor i in range(1,4):\r\n c=c+stri[-4+i]\r\nrts=''\r\nfor i in range(1,len(stri)+1):\r\n rts=rts+stri[-i]\r\nsette=stri[6]\r\nst=''\r\nfor i in range(1,len(stri)-1):\r\n st=st+stri[i]\r\nCAP=stri.upper()\r\nstru=stri\r\ncc='a'\r\nstra=stri+'\\n'\r\nstru=stru.replace(cc,'e') \r\nprint('lunghezza di stri: ',l)\r\nprint(stra*10)\r\nprint('primo carattere di stri: ',k)\r\nprint('ultimo carattere di stri: ',ls)\r\nprint('ultimi tre carattere di stri: ',c)\r\nprint('stri all\\'inverso: ',rts)\r\nprint('settimo carattere: ',sette)\r\nprint('stri con primo e ultimo carattere tolti: ',st)\r\nprint('stri in maiuscola: ',CAP)\r\nprint('stri con e in posto di a: ',stru)\r\n \r\n","sub_path":"caratteri.py","file_name":"caratteri.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"386764476","text":"# ONLY EDIT FUNCTIONS MARKED CLEARLY FOR EDITING\n\n# import numpy as np\n\ndef question06(numServers, targetServer, times):\n # modify and then return the variable below\n sofar = times[0][:]\n for i in range(numServers):\n times[i][0] = 9223372036854775807000000000000\n reached = set([0])\n reachedMins = {}\n reachedMinsInds = {}\n recalc = set([0])\n for iter in range(numServers-1):\n # print('Reached: ' + str(reached))\n minMinValue = 9223372036854775807000000000000000\n for r in recalc:\n minValue = min(times[r])\n minInd = times[r].index(minValue)\n reachedMins[r] = minValue\n reachedMinsInds[r] = minInd\n for r in reached:\n minValue = reachedMins[r]\n if minValue < minMinValue:\n minMinValue = minValue\n minMinInd = reachedMinsInds[r]\n minMinOwner = r\n recalc = set([minMinOwner])\n recalc.add(minMinInd)\n sofar[minMinInd] = minMinValue\n reached.add(minMinInd)\n for i in range(numServers):\n times[i][minMinInd] = 9223372036854775807000000000000\n for i in range(numServers):\n times[minMinInd][i] += sofar[minMinInd]\n if targetServer in reached:\n return sofar[targetServer]\n return sofar[targetServer]\n\n# import time\n# import random\n# SIZE = 1000\n# random.seed(42)\n# times = []\n# for i in range(SIZE):\n# row = []\n# for j in range(SIZE):\n# if i == j:\n# row.append(0)\n# else:\n# row.append(random.randint(1, 100))\n# times.append(row)\n# start = time.time()\n# print(question06(SIZE,1,times))\n# end = time.time()\n# print(end-start)\n# #0.054\n","sub_path":"q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"249590737","text":"from pprint import pprint\nimport requests\nAPI_BASE_URL = 'https://superheroapi.com/api/2619421814940190'\nsuperhero = ['Hulk', 'Captain America', 'Thanos']\ndict_superhero = {}\nsearch = '/search'\nfor name_heroes in superhero:\n r = requests.get(API_BASE_URL + search + f'/{name_heroes}')\n dict_superhero[name_heroes] = r.json()['results'][0]['powerstats']['intelligence']\n\nfor key, value in sorted(dict_superhero.items(), reverse=True):\n print(f'Самый умный супергерой это {key}, его интелект равен {value}')\n break\n","sub_path":"Desktop/ДЗ/HTTP Requests/HW №1.py","file_name":"HW №1.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"432715593","text":"\"\"\"\nCourse: EE2703-Applied Programming Lab\nName: Nihal Gajjala\nRoll Number: EE19B044\nAssignment 1\n\"\"\"\nfrom sys import argv, exit\n# Assigning Constant Variables\nCIRCUIT='.circuit'\nEND='.end'\n# Validating The Number Of Arguments\nif len(argv)!=2:\n print('\\nUsage: %s ' %argv[0])\n exit()\n# Validating The File Name\ntry:\n # Opening And Reading The File\n with open(argv[1]) as f:\n lines=f.readlines()\n start=-1\n end=-2\n # Locating The Beginning And End Of The Circuit By Checking For .circuit And .end\n for line in lines:\n if CIRCUIT==line[:len(CIRCUIT)]:\n start=lines.index(line)\n elif END==line[:len(END)]:\n end=lines.index(line)\n break\n # Validating The Content In The Netlist i.e, Checking If .circuit And .end Are Placed Correctly\n if start>=end or start<0 or end<0:\n print('Invalid circuit definition')\n exit(0)\n # Traverse The Circuit Definition From Last Element To First Element And Print Each Line With Words In Reverse Order\n while end-1>start:\n '''\n Removing Blank Spaces At The Beginning\n Removing Comments After '#'\n Splitting The String Into A List With Space As Separator\n '''\n line1=lines[end-1].split('#')[0].split()\n # Reversing The Order Of The Contents In The Given List\n line2=reversed(line1)\n # Joining The Contents Of The List Using spaces\n line3=' '.join(line2)\n # Printing The Final Line\n print(line3)\n end-=1\n # Closing The File\n f.close()\n# Printing Error Message For A Wrong Filename\nexcept IOError:\n print('Invalid file')\n exit()","sub_path":"Assignment.1/EE2703_Assign1_EE19B044.py","file_name":"EE2703_Assign1_EE19B044.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"256826803","text":"from tkinter import *\nimport time\nimport random\n\nclass Animacion():\n\n\tdef __init__(self):\n\t\tself.ventana = Tk()\n\t\tself.canvas = Canvas(self.ventana,width=400,height=400)\n\t\tself.ventana.geometry(\"400x400\")\n\t\tself.canvas.place(x=0,y=0)\n\t\tself.fondo = self.canvas.create_rectangle(0,0,400,400,fill='black')\n\t\tself.tronco = self.canvas.create_rectangle(170,210,230,310,fill='brown')\n\t\tself.estrella = self.canvas.create_oval(180,20,220,50,fill='yellow')\n\t\tself.arbol = self.canvas.create_polygon(60,210,340,210,200,50,fill='green')\n\t\tself.lista = []\n\t\tself.contador = 0\n\t\tself.crear()\n\t\tself.ventana.after(300,self.animacion)\n\t\tself.ventana.mainloop()\n\n\tdef crear(self):\n\t\tr = random.randint(-400,400)\n\t\tself.copo = self.canvas.create_oval(r,10,r+3,13,fill='white')\n\t\tself.contador+=1\n\t\tself.lista.append(self.copo)\n\t\tif self.contador<100:\n\t\t\tself.ventana.after(40,self.crear)\n\n\tdef animacion(self):\n\t\twhile True:\n\t\t\tfor copo in self.lista:\n\t\t\t\tcoor = self.canvas.coords(copo)\n\t\t\t\tx = 1\n\t\t\t\ty = 1\n\t\t\t\tif coor[0]>=400:\n\t\t\t\t\tx=-400\n\t\t\t\tif coor[1]>=400:\n\t\t\t\t\ty=-400\n\t\t\t\tself.canvas.move(copo,x,y)\n\t\t\tself.canvas.update()\n\t\t\t\nobj = Animacion()","sub_path":"navidad.py","file_name":"navidad.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"144382926","text":"import collections\nimport logging\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nclass AlgorithmSingleRunDataHandler(object):\n\n def __init__(self, data: pd.DataFrame, many_cols: bool, bacteria_key, bacteria_name, antibiotic_id: int,\n selection_method: str, test_set_fraction: float, number_of_folds: int):\n self.data = data\n self.column_wise = many_cols\n self.bacteria_key = bacteria_key\n self.bacteria_name = bacteria_name\n self.antibiotic_id = antibiotic_id\n self.fraction_to_use = test_set_fraction\n self.number_of_folds = number_of_folds\n self.split_method = selection_method\n self.logger = logging.getLogger(__name__)\n self.logger.info('AlgorithmSingleRunHandler for Bacteria: %s and Antibiotic: %d', self.bacteria_name,\n self.antibiotic_id)\n\n def get_antibiotic_name(self):\n \"\"\"Gets the name of the antibiotic associated with the SingleRunDataHandler\"\"\"\n antibiotic_name = self.data[self.data['Ab_ID'] == self.antibiotic_id].Ab_Name.unique()\n antibiotic_name = antibiotic_name[0]\n\n return antibiotic_name\n\n def get_masked_data_for_features(self, features, gc_range):\n \"\"\"My version of set attributes\"\"\"\n mask = (self.data[features].notnull().all(1))\n masked_data = self.data[mask]\n\n if gc_range != 0:\n\n gc_ratio_cols = self._get_growth_check_columns()\n\n if self._is_pseudomonas():\n mask = (masked_data[gc_ratio_cols] >= gc_range)\n else:\n mask = ((masked_data[gc_ratio_cols].isnull()) | (masked_data[gc_ratio_cols] >= gc_range))\n\n masked_data = masked_data[mask.values]\n\n return masked_data\n\n def _get_growth_check_columns(self):\n \"\"\"Gets the names of the columns containing a growth check ratio\"\"\"\n applicable_columns = [column_name for column_name in self.data.columns if 'GC_Ratio' in column_name]\n return applicable_columns\n\n def get_bacteria_level(self):\n \"\"\"Return the bacteria level id\"\"\"\n return self.bacteria_key[0]\n\n def get_bacteria_id(self):\n \"\"\"Return the bacteria id\"\"\"\n return self.bacteria_key[1]\n\n def get_concentrations(self):\n \"\"\"Return the concentrations present for this data set\"\"\"\n conc = list(self.data['Conc'].unique())\n conc.append(conc[-1] * 2)\n return conc\n\n def get_concentration_labels(self):\n \"\"\"Return the concentration labels for classification\"\"\"\n return [i for i in range(len(self.get_concentrations()))]\n\n def get_concentrations_and_labels(self):\n \"\"\"Return actual concentrations with labels\"\"\"\n return {conc: label for conc, label in zip(self.get_concentrations(), self.get_concentration_labels())}\n\n def get_labels_and_concentrations(self):\n \"\"\"Return actual concentrations with labels\"\"\"\n return {label: conc for conc, label in zip(self.get_concentrations(), self.get_concentration_labels())}\n\n def _is_pseudomonas(self):\n \"\"\"Check if the dataframe corresponds to pseudomonas data\"\"\"\n level = self.get_bacteria_level()\n id = self.get_bacteria_id()\n\n is_pseudomonas_species = (level == 3) and (id == 10)\n is_pseudomonas_genus = (level == 2) and (id == 13)\n return is_pseudomonas_species or is_pseudomonas_genus\n\n def _get_mic_flags(self, survival_flags):\n \"\"\"get the mic flag by doing an xor along the list\"\"\"\n combined_survival = [1] + survival_flags + [0]\n\n mic_flags = [0] * (len(combined_survival) - 1)\n\n for i in range(len(combined_survival) - 1):\n mic_flags[i] = combined_survival[i] ^ combined_survival[i + 1]\n\n return mic_flags\n\n def get_multilabel_x_y(self, features, gc_range):\n \"\"\"gets multi-label data for the given features and gc\"\"\"\n masked_data = self.get_masked_data_for_features(features, gc_range)\n plate_groups = masked_data['Plate_ID'].unique()\n\n x = np.array([])\n y = np.array([])\n\n for i, plate_id in enumerate(plate_groups):\n\n survivals = masked_data.loc[masked_data['Plate_ID'] == plate_id, 'Survival'].values.tolist()\n\n dep = self._get_mic_flags(survivals)\n\n ind_features = [feature for feature in features if feature != 'Conc']\n indep = np.ravel(masked_data[masked_data['Plate_ID'] == plate_id][ind_features].values.T)\n # indep = np.ravel(masked_data.loc[masked_data['Plate_ID'] == plate_id, ind_features].values.T)\n\n if i == 0:\n x = np.hstack((x, indep))\n y = np.hstack((y, dep))\n else:\n x = np.vstack((x, indep))\n y = np.vstack((y, dep))\n\n if np.rank(x) == 1:\n x = np.expand_dims(x, axis=0)\n y = np.expand_dims(y, axis=0)\n else:\n x = MinMaxScaler().fit_transform(x)\n\n return x, y\n\n def train_test_split(self, x, y, random_seed):\n \"\"\"Split the data into x and y groupings based on \"\"\"\n x_train, x_test, y_train, y_test = train_test_split(x, y, self.fraction_to_use, random_seed)\n\n return x_train, x_test, y_train, y_test\n\n def _num_samples(self, x):\n \"\"\"Return the number of samples in array-like x\"\"\"\n if hasattr(x, 'fit') and callable(x.fit):\n # Don't get num_samples from an ensembles length!\n raise TypeError('Expected sequence or array-like, got '\n 'estimator %s' % x)\n if not hasattr(x, '__len__') and not hasattr(x, 'shape'):\n if hasattr(x, '__array__'):\n x = np.asarray(x)\n else:\n raise TypeError(\"Expected sequence or array-like, got %s\" %\n type(x))\n if hasattr(x, 'shape'):\n if len(x.shape) == 0:\n raise TypeError(\"Singleton array %r cannot be considered\"\n \" a valid collection.\" % x)\n return x.shape[0]\n else:\n return len(x)\n\n def get_kfold_multi_label(self, random_seed, features, gc_range):\n \"\"\"Variation on existing multi-label kfolds finder that gets the labels as well\n the split data. Modified because we have no knowledge of breakpoints at this time\"\"\"\n\n self.logger.debug('Getting multi label kfold')\n masked_data = self.get_masked_data_for_features(features, gc_range)\n group_ids = masked_data['Plate_ID'].unique()\n\n x, y = self.get_multilabel_x_y(features, gc_range)\n\n data_frame_to_use = masked_data.loc[:, :].values\n stratify_data = masked_data.drop_duplicates(subset=['Plate_ID'], keep='first')\n g2_ = np.ravel(stratify_data.loc[:, 'MIC_Conc'].values)\n\n counter = collections.Counter(g2_)\n\n pop_out_list = []\n for key, value in counter.items():\n if value == 1:\n pop_out_list.append(key)\n\n remove_index_list = [index for index, item in enumerate(g2_) if item in pop_out_list]\n\n g1 = np.asarray([item for index, item in enumerate(x) if index not in remove_index_list])\n g2 = np.asarray([item for index, item in enumerate(g2_) if index not in remove_index_list])\n\n conc_label_dict = self.get_concentrations_and_labels()\n g2 = np.asarray([conc_label_dict[i] for i in list(g2)])\n\n n_samples = self._num_samples(g1)\n\n unique_y, y_inversed = np.unique(g2, return_inverse=True)\n y_counts = np.bincount(y_inversed)\n\n if np.all(self.number_of_folds > y_counts):\n self.split_method = 'sss'\n\n if self.number_of_folds > n_samples:\n self.split_method = 'sss'\n\n s_generator = None\n\n if self.split_method == 'sss':\n s_generator = model_selection.StratifiedShuffleSplit(n_splits=1, test_size=0.5,\n random_state=random_seed)\n elif self.split_method == 'skf':\n s_generator = model_selection.StratifiedKFold(n_splits=self.number_of_folds, random_state=random_seed,\n shuffle=True)\n\n kfold = []\n kfold_train_test_data = []\n\n if g1.size == 0:\n train_index = np.array([0])\n test_index = np.array([0])\n\n k_fold_i, k_fold_t_t_data_i = self._help_k_fold(x, y, train_index, test_index, masked_data, group_ids)\n\n kfold.append(k_fold_i)\n kfold_train_test_data.append(k_fold_t_t_data_i)\n\n else:\n for train_index, test_index in s_generator.split(g1, g2):\n train_index = np.asarray(self._resolve_shifted_ids(len(x), remove_index_list, train_index))\n train_index = np.append(train_index, remove_index_list).astype(int)\n\n test_index = np.asarray(self._resolve_shifted_ids(len(y), remove_index_list, test_index))\n\n k_fold_i, k_fold_t_t_data_i = self._help_k_fold(x, y, train_index, test_index, masked_data, group_ids)\n\n kfold.append(k_fold_i)\n kfold_train_test_data.append(k_fold_t_t_data_i)\n\n return kfold, kfold_train_test_data, x, y\n\n def _help_k_fold(self, x, y, train_index, test_index, data_frame_to_use, group_ids):\n \"\"\"Help with separating the data into cross validation folds\"\"\"\n k_fold = (train_index, test_index)\n\n x_train = x[train_index]\n y_train = y[train_index]\n x_test = x[test_index]\n y_test = y[test_index]\n\n train_group_id = group_ids[train_index]\n test_group_id = group_ids[test_index]\n\n train = data_frame_to_use[data_frame_to_use['Plate_ID'].isin(train_group_id)]\n test = data_frame_to_use[data_frame_to_use['Plate_ID'].isin(test_group_id)]\n\n k_fold_train_test_data = (train, test, x_train, y_train, x_test, y_test)\n\n return k_fold, k_fold_train_test_data\n\n @staticmethod\n def _get_shifted_ids_resolution_dict(total_ids, removed_ids):\n \"\"\"Removing IDs and then needing to add them back in post stratification is not as simple as appending them.\n It requires shifting the ids based on how many removed IDs have been passed. Assumes that all id sets come in\n least-to-greatest order\"\"\"\n\n all_ids = [i for i in range(total_ids) if i not in removed_ids]\n\n return {index: item for index, item in enumerate(all_ids)}\n\n @staticmethod\n def _resolve_shifted_ids(total_ids, removed_ids, list_to_resolve):\n \"\"\"Actually resolve the bad ids found in a shifted list\"\"\"\n\n resolving_dict = AlgorithmSingleRunDataHandler._get_shifted_ids_resolution_dict(total_ids, removed_ids)\n\n return [resolving_dict[i] for i in list_to_resolve]\n","sub_path":"algorithm development/algorithm_run/algorithm/algorithm_single_run_data_handler.py","file_name":"algorithm_single_run_data_handler.py","file_ext":"py","file_size_in_byte":10950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"387585246","text":"\"\"\"https://leetcode.com/problems/merge-intervals/\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n ret = []\n for i in sorted(intervals, key=lambda x: x[0]):\n if ret and (i[0] <= ret[-1][1]):\n ret[-1][1] = max(ret[-1][1], i[1])\n else:\n ret.append(i)\n return ret\n","sub_path":"leetcode/1-100/56-Merge Intervals/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"320167721","text":"#! python3\n\"\"\"\nCreate a function called isInteger()\nInput is a float number\nReturn True if the number is an integer\nReturn False if the number is not an integer\n(2 points)\n\"\"\"\nimport math\n\na= float(input(\"what is the length of A side\"))\n\nb= float(input(\"what is the length of B side\"))\n\nc = sqrt(a**2 + b**2)\n\nprint(\"The length of the hypotenuse is\", c )\n","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"557071711","text":"#using a function in a while loop\ndef get_formatted_name(first_name,last_name):\n \"\"\"Return a neatly formatted full name\"\"\"\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()\n\nwhile True:\n print(\"Pl enter your name\")\n print(\"(Enter 'q' to quit)\")\n f_name = input(\"First Name :\")\n if f_name == 'q':\n break\n l_name = input(\"Last Name :\")\n if l_name == 'q':\n break\n formatted_name = get_formatted_name(f_name,l_name)\n print(f\"\\nHello, {formatted_name}\")\n","sub_path":"ch8_5_greeter_while_loop.py","file_name":"ch8_5_greeter_while_loop.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"630381162","text":"\n\n#寻找同一子树\n#data:父点集(parentset),i:当前节点在集合中的偏移\ndef findSame(data,i):\n result=[]\n result.append(i)\n for index,d in enumerate(data):\n if d==i:\n result+=findSame(data,index)\n return result\n\n#查找集合中的最大值 data中的数据为位置\ndef findMax(valueset,data):\n maxdata=0\n for i in data:\n if valueset[i]>maxdata:\n maxdata=valueset[i]\n #print(i,maxdata)\n return maxdata\n\ndef calSum(n,parentset,valueset):\n nodeSet=[i for i in range(0,n)]\n result=0\n for i in range(1,n):\n sameSet=findSame(parentset,i)\n #print(sameSet)\n extraSet=list(set(nodeSet)-set(sameSet))\n #print(extraSet)\n temp=abs(findMax(valueset,sameSet)-findMax(valueset,extraSet))\n #print(temp)\n result+=temp\n\n return result\n\n \nif __name__=='__main__':\n \n nodeset=[]\n n=int(input())\n parentset=[]\n st=''\n parentset.append(st)\n s=input()\n for ss in s.split(' '):\n parentset.append(int(ss))\n s=input()\n valueset=[]\n for ss in s.split(' '):\n valueset.append(int(ss))\n \n \n '''\n for i in range(0,n):\n parentset.append(Node(parentset[i],i,valueset[i]))\n '''\n print(calSum(n,parentset,valueset)) \n \n \n ","sub_path":"algorithm/tree_value.py","file_name":"tree_value.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"382981533","text":"import pymongo\r\nfrom pymongo import MongoClient\r\nimport pandas as pd\r\nimport json\r\n\r\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\nmydb = myclient[\"excel_db\"]\r\nmycol = mydb[\"excel_data\"]\r\n\r\n\r\ndef InsertData(path=None):\r\n\r\n data = pd.read_excel(path)\r\n\r\n data = data.drop('Notes', axis=1)\r\n\r\n data.columns = data.columns.str.replace(' ', '_')\r\n\r\n data_json = json.loads(data.to_json(orient='records'))\r\n\r\n mycol.insert_many(data_json)\r\n\r\n print(\"All the Data inserted in Mongo DB Server .... \")\r\n\r\nif __name__ == \"__main__\":\r\n InsertData(path=\"task_S.xlsx\")\r\n\r\n\r\n","sub_path":"app_excel_file.py","file_name":"app_excel_file.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"479976803","text":"from django.contrib import admin\n\n# importar el modelo de models.py\nfrom .models import Article, Category\n\n# Register your models here.\n\nclass ArticleAdmin(admin.ModelAdmin):\n readonly_fields = ('created','updated')\n\n# registrar los modelos.\nadmin.site.register(Article, ArticleAdmin)\nadmin.site.register(Category)\n\n\n# configurar titulo panel admin\ntitulo = \"Master en Django\"\nadmin.site.site_header = titulo\nadmin.site.site_title = titulo\nadmin.site.index_title = \"Bienvenido sñr Toni\"","sub_path":"Django/Adjango/miapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"2741155","text":"# 这个游戏是神奇宝贝遭遇战\r\n\r\n# 开始游戏\r\ndef start():\r\n print(\"你正在绿叶森林里散步,突然遇到一只皮卡丘\")\r\n while True:\r\n print(\"\"\"请做出你的选择:\r\n 1. 与它作战\r\n 2. 逃跑\"\"\")\r\n select_choice = input(\"> \")\r\n if select_choice == '1':\r\n select_Pokemon()\r\n elif select_choice == '2':\r\n away(\"你逃跑了!\")\r\n else:\r\n print(\"输入无效,请重新输入\")\r\n\r\n# 选择你的精灵出战\r\ndef select_Pokemon():\r\n while True:\r\n print(\"\"\"请选择你的精灵出战:\r\n 1. 杰尼龟\r\n 2. 小火龙\r\n 3. 妙蛙种子\"\"\")\r\n pokemon_choice = input(\"> \")\r\n if pokemon_choice == '1':\r\n Jenny_Turtle()\r\n elif pokemon_choice == '2':\r\n Small_fire_dragon()\r\n elif pokemon_choice == '3':\r\n Miao_frog_seed()\r\n else:\r\n print(\"输入无效,请重新输入\")\r\n\r\n# 杰尼龟作战\r\ndef Jenny_Turtle():\r\n while True:\r\n print(\"\"\"请选择技能:\r\n 1. 冲撞\r\n 2. 水枪\"\"\")\r\n turtle_choice = input(\"> \")\r\n if turtle_choice == '1':\r\n print(\"\"\"杰尼龟使用了冲撞,皮卡丘的体力下降了。\r\n 皮卡丘使用了十万伏特。\r\n 杰尼龟倒下了。\"\"\")\r\n die(\"你被皮卡丘打败了。\")\r\n elif turtle_choice == '2':\r\n print(\"杰尼龟使用了水枪,皮卡丘倒下了。\")\r\n away(\"你打败了皮卡丘。\")\r\n else:\r\n print(\"输入无效,请重新输入\")\r\n\r\n# 小火龙作战\r\ndef Small_fire_dragon():\r\n while True:\r\n print(\"\"\"请选择技能:\r\n 1. 冲撞\r\n 2. 火苗\"\"\")\r\n dragon_choice = input(\"> \")\r\n if dragon_choice == '1':\r\n print(\"\"\"小火龙使用了冲撞,皮卡丘的体力下降了。\r\n 皮卡丘使用了十万伏特。\r\n 小火龙倒下了。\"\"\")\r\n die(\"你被皮卡丘打败了。\")\r\n elif dragon_choice == '2':\r\n print(\"小火龙使用了火苗,皮卡丘倒下了。\")\r\n away(\"你打败了皮卡丘。\")\r\n else:\r\n print(\"输入无效,请重新输入\")\r\n\r\n# 妙蛙种子作战\r\ndef Miao_frog_seed():\r\n while True:\r\n print(\"\"\"请选择技能:\r\n 1. 冲撞\r\n 2. 飞叶快刀\"\"\")\r\n seed_choice = input(\"> \")\r\n if seed_choice == '1':\r\n print(\"\"\"妙蛙种子使用了冲撞,皮卡丘的体力下降了。\r\n 皮卡丘使用了十万伏特。\r\n 妙蛙种子倒下了。\"\"\")\r\n die(\"你被皮卡丘打败了。\")\r\n elif seed_choice == '2':\r\n print(\"妙蛙种子使用了飞叶快刀,皮卡丘倒下了。\")\r\n away(\"你打败了皮卡丘。\")\r\n else:\r\n print(\"输入无效,请重新输入\")\r\n\r\n# 胜利离开\r\ndef away(str1):\r\n print(str1)\r\n exit(0)\r\n\r\n# 失败死亡\r\ndef die(str2):\r\n print(str2)\r\n exit(1)\r\n\r\nstart()\r\n","sub_path":"lpthw/ex36.py","file_name":"ex36.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"609899348","text":"import math \nimport binascii\nimport copy\nIPtable = [58, 50, 42, 34, 26, 18, 10, 2,\n 60, 52, 44, 36, 28, 20, 12, 4,\n 62, 54, 46, 38, 30, 22, 14, 6,\n 64, 56, 48, 40, 32, 24, 16, 8,\n 57, 49, 41, 33, 25, 17, 9, 1,\n 59, 51, 43, 35, 27, 19, 11, 3,\n 61, 53, 45, 37, 29, 21, 13, 5,\n 63, 55, 47, 39, 31, 23, 15, 7]\n\n\n\ndef text_to_bits(text, encoding='utf-8', errors='surrogatepass'):\n bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]\n return bits.zfill(8 * ((len(bits) + 7) // 8))\n\ndef text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):\n n = int(bits, 2)\n return int2bytes(n).decode(encoding, errors)\n\ndef int2bytes(i):\n hex_string = '%x' % i\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\ndef concat_bits(s):\n bits = ''\n for i in range(len(s)):\n bits += s[i]\n return (bits).ljust(64,'0')\n\n\ndef initial_permutation(num):\n text_split =list(num)\n text_split1 =list(num)\n\n num_blocks = math.ceil(len(text_split)/8)\n text_block = [[]for i in range(num_blocks)]\n for i in range(num_blocks+1):\n for j in range(8):\n if len(text_split) > 0:\n text_block[i].append(text_split.pop(0))\n else:\n break\n\n samp = text_block.copy()\n\n for i in range(len(text_block)):\n for j in range(len(text_block[i])):\n samp[i][j] = text_to_bits(text_block[i][j])\n\n # print(\"\\n\")\n # print(\"Bits of each characters in input - \",samp)\n # print(\"\\n\")\n\n\n\n final_code = [[]for i in range(len(text_block))]\n for i in range(len(text_block)):\n list_concat_64 = list(concat_bits(samp[i]))\n onlyListConcat = list_concat_64.copy()\n useful_array = [None for i in range(64)]\n for j in range(len(list_concat_64)):\n useful_array[IPtable.index(j+1)] = onlyListConcat[j]\n copy_list_concat_64 = useful_array.copy()\n for k in range(8):\n var = ''\n for l in range(8):\n var += copy_list_concat_64.pop(0)\n final_code[i].append(var)\n # print(final_code)\n\n full_final = ''\n for i in range(len(final_code)):\n join_final = ''\n for j in range(len(final_code[i])):\n join_final += final_code[i][j]\n full_final += join_final\n\n # print(\"After Initial Permutation = \",full_final)\n # print(\"\\n\")\n return full_final\n\n##################DECODING PART########################################\n\nFPtable = [40, 8, 48, 16, 56, 24, 64, 32,\n 39, 7, 47, 15, 55, 23, 63, 31,\n 38, 6, 46, 14, 54, 22, 62, 30,\n 37, 5, 45, 13, 53, 21, 61, 29,\n 36, 4, 44, 12, 52, 20, 60, 28,\n 35, 3, 43, 11, 51, 19, 59, 27,\n 34, 2, 42, 10, 50, 18, 58, 26,\n 33, 1, 41, 9, 49, 17, 57, 25]\n\ndef final_permut(full_final):\n num1 = full_final\n num_split =list(num1)\n sam = num_split\n num_blocks = int(len(num_split)//64)\n bits_arr_64 = [[]for i in range(num_blocks)]\n for i in range(num_blocks):\n for j in range(64):\n bits_arr_64[i].append(sam.pop(0))\n\n new_copy_bits_arr_64 = bits_arr_64.copy()\n useful_array = [[None for i in range(64)]for i in range(num_blocks)]\n\n for i in range(len(bits_arr_64)):\n for j in range(len(bits_arr_64[i])):\n useful_array[i][FPtable.index(j+1)] = new_copy_bits_arr_64[i][j]\n full_text = ''\n for i in range(len(useful_array)):\n var = ''\n for j in range(len(useful_array[i])):\n var += useful_array[i][j]\n full_text += var\n # print(\"After Final Permutation Bits = \",full_text)\n # print(\"\\n\")\n # print(\"After Final Permutation Text = \",text_from_bits(full_text),\"\\n\",end= \"\")\n return full_text\n# for i in range(len(text_split1)):\n# n=i*8\n# print(full_text[n:n+8])\n\n\n# num = input(\"Input = \")\n# out_initial_permut = initial_permutation(num)\n# out_final_permut = final_permut(out_initial_permut)\n# print(out_initial_permut)\n# print(out_final_permut,text_from_bits(out_final_permut))\n","sub_path":"lab4/P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"180936939","text":"import re, pickle\nimport numpy as np\nfrom konlpy.tag import Mecab\nfrom sklearn import preprocessing\n\n# my python file\nimport utils\nfrom model import SVM\n\n# include POS, MAG, VX to handle negation\nPOS = \"NN|XR|VA|VV|MAG|VX\"\n\nPOS_IDX = [\"NN\", \"VA\", \"VV\", \"XR\"]\n\n# \"못\"은 따로 처리\nNEG_PREV = [(\"아니하\", \"VX\"), (\"않\", \"VX\"), (\"없\", \"VA\"), (\"없이\", \"MAG\")]\nNEG_NEXT = [(\"안\", \"MAG\")]\n\ndef handle_negation(bag, words, counter):\n\tglobal NEG_PREV, NEG_NEXT\n\n\t# construct index to negate word except \"못\"\n\tneg_idx = []\n\tfor neg in NEG_PREV:\n\t\tfind = utils.find_dup_idx(words, neg)\n\t\tfor item in find:\n\t\t\tif item-1 > -1: neg_idx.append(item-1)\n\tfor neg in NEG_NEXT:\n\t\tfind = utils.find_dup_idx(words, neg)\n\t\tfor item in find:\n\t\t\tif item+1 < len(words): neg_idx.append(item+1)\n\n\t# handle \"못~\"\n\tfor w in words:\n\t\tloc = w[0].find(\"못\")\n\t\tif loc > 0 and w[1].find(\"VX\"): neg_idx.append(loc-1)\n\t# handle \"못\"\n\tfor w in words:\n\t\tloc = w[0].find(\"못\")\n\t\tif loc > -1 and w[1].find(\"MAG\"):\n\t\t\t# 긴 부정문 (못햇다, 못 했다..)\n\t\t\tif loc > 1 and words[loc-1][1].find(\"VV\"): neg_idx.append(loc-1)\n\t\t\t# 짧은 부정\n\t\t\telif loc < len(words)-1: neg_idx.append(loc+1)\n\t\t\t# 한계: 못 생겼다 같은 경우는 이상하게 나옴\n\n\t# negate word\n\tfor i in neg_idx:\n\t\tif words[i] in bag[0]:\n\t\t\ttry: idx = POS_IDX.index(words[i][1])\n\t\t\texcept ValueError: pass\n\t\t\telse:\t\n\t\t\t\tcounter[idx] -= 1\n\t\t\t\tcounter[idx+4] += 1\n\t\telif words[i] in bag[1]:\n\t\t\ttry: idx = POS_IDX.index(words[i][1])\n\t\t\texcept ValueError: pass\n\t\t\telse:\n\t\t\t\tcounter[idx] += 1\n\t\t\t\tcounter[idx+4] -= 1\n\n\treturn counter\t\n\ndef make_features(bag, sentence, words):\n\tglobal POS_IDX\n\n\t# feature vector:\n\t# [ pos_noun, pos_adj, pos_verb, pos_root,\n\t# neg_noun, neg_adj, neg_verb, neg_root ]\n\tcounter = [0, 0, 0, 0, 0, 0, 0, 0]\n\n\tif not words: return counter\n\t\n\tfor i, w in enumerate(words):\n\t\t# replace POS to sentiment dictionary type\n\t\twords[i] = list(words[i])\n\t\tif words[i][1].find(\"NN\") >= 0: words[i][1] = \"NN\"\n\t\telif words[i][1].find(\"VA\") >= 0: words[i][1] = \"VA\"\n\t\telif words[i][1].find(\"VV\") >= 0: words[i][1] = \"VV\"\n\t\telif words[i][1].find(\"XR\") >= 0: words[i][1] = \"XR\"\n\t\telif words[i][1].find(\"VX\") >= 0: words[i][1] = \"VX\"\n\t\telif words[i][1].find(\"MAG\") >= 0: words[i][1] = \"MAG\"\n\t\twords[i] = tuple(words[i])\n\n\t\t# count frequency of sentiment words\n\t\tif words[i] in bag[0]: # positive\n\t\t\ttry:\n\t\t\t\tidx = POS_IDX.index(words[i][1])\n\t\t\t\tcounter[idx] += 1\n\t\t\texcept ValueError: pass\n\t\telif words[i] in bag[1]: # negative\t\n\t\t\ttry:\n\t\t\t\tidx = POS_IDX.index(words[i][1])\n\t\t\t\tcounter[idx+4] += 1\n\t\t\texcept ValueError: pass\n\n\tcounter = handle_negation(bag, words, counter)\n\treturn counter\n\n\t\t\ndef feature_data(tagger, exp, bag, review):\n\tdata = []\n\tlabel = []\n\tfor r in review:\n\t\t# tagging review\n\t\tpos = tagger.pos(r[1])\n\t\twords = [ p for p in pos if exp.search(p[1]) ]\n\n\t\t# construct data sets\n\t\tdata.append(make_features(bag, r[1], words))\n\t\tlabel.append(r[0])\n\n\t# normalize features\n\tfor i, v in enumerate(data):\n\t\tarr = np.array(v, dtype=float)\n\t\tscaled = preprocessing.scale(arr).tolist()\n\t\tdata[i] = scaled\n\t\t\n\treturn data, label\n\n\ndef evaluate_model(result, labels):\n\terr = (result == labels).mean() * 100\n\n\tr_pos = labels.count('1')\n\tr_neg = labels.count('0')\n\ttp = 0\n\ttn = 0\n\n\tfor i, r in enumerate(labels):\n\t\tif r == '1' and r == result[i]: tp += 1\n\t\telif r == '0' and r == result[i]: tn += 1\n\n\tprint(\"accuracy : {:.2f}%\" .format(err))\n\tprint(\"TPR : {:.2f}%\" .format(100*(tp/r_pos)))\n\tprint(\"TNR : {:.2f}%\" .format(100*(tn/r_neg)))\n\t\t\t\n\nif __name__ == \"__main__\":\n\tglobal POS\n\t\n\t# initalize Mecab tagger\n\ttagger = Mecab()\n\n\t# initalize regular expression\t\n\texp = re.compile(POS, re.IGNORECASE)\n\t\n\t# load sentiment dictionary\n\tbag = utils.load_dictionary()\n\n\t# load model if exist\n\ttry:\n\t\twith open(\"../Resources/models/model\", \"rb\") as model_file:\n\t\t\tmodel = pickle.load(model_file)\n\texcept IOError as err:\n\t\t# load training reviews from file\t\n\t\ttrain_review = utils.load_reviews(\"../Resources/samples/train_data\")\n\t\t# get feature from train data\n\t\ttrain_data, train_label = feature_data(tagger, exp, bag, train_review)\n\t\t# initalize classifer class\n\t\tmodel = SVM()\n\t\t# train model\n\t\tmodel.train(train_data, train_label)\n\t\t#save model\n\t\twith open(\"../Resources/models/model\", \"wb\") as model_file:\n\t\t\tpickle.dump(model, model_file)\n\telse:\n\t\tprint(\"use saved model..\")\n\t\n\t# load test reviews from file\n\ttest_review = utils.load_reviews(\"../Resources/samples/test_data\")\n\t# get feature from test data\n\ttest_data, test_label = feature_data(tagger, exp, bag, test_review)\n\t\n\t# predict model\n\tresult = model.predict(test_data)\n\n\t# evaluate accuracy\n\tevaluate_model(result, test_label)\n","sub_path":"senti_model/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567411319","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/9/17 13:49\n# @Author : Seven\n# @Site : \n# @File : MobileNet.py\n# @Software: PyCharm\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Block(nn.Module):\n def __init__(self, inputs, outs, stride=1):\n super(Block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=inputs, out_channels=outs, stride=stride,\n kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(outs),\n nn.ReLU(),\n nn.Conv2d(in_channels=outs, out_channels=outs, stride=1,\n kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(outs),\n nn.ReLU()\n )\n\n def forward(self, inputs):\n out = self.conv(inputs)\n return out\n\n\nclass MobileNet(nn.Module):\n def __init__(self, num_classes=10):\n super(MobileNet, self).__init__()\n self.conv = nn.Sequential( # when shape = 32x32, stride =1\n nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(32)\n )\n self.layers = self._block(inputs=32)\n self.linear = nn.Linear(1024, num_classes)\n\n def forward(self, inputs):\n network = self.conv(inputs)\n network = self.layers(network)\n network = F.avg_pool2d(network, kernel_size=network.shape[2])\n network = network.view(network.size(0), -1)\n out = self.linear(network)\n\n return out, network\n\n @staticmethod\n def _block(inputs):\n layers = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]\n block_layers = []\n for layer in layers:\n outs = layer if isinstance(layer, int) else layer[0]\n stride = 1 if isinstance(layer, int) else layer[1]\n block_layers.append(Block(inputs=inputs, outs=outs, stride=stride))\n inputs = outs\n return nn.Sequential(*block_layers)\n","sub_path":"CNN/Cifar10/networks/MobileNet.py","file_name":"MobileNet.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"266066061","text":"import plotly_express as px\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport os\n\nOBD2=pd.read_csv(\"https://raw.githubusercontent.com/BanquetKuma/OBD/master/OBD_GPS_CSV\")\n\ncol_options = [dict(label=x, value=x) for x in OBD2.columns]\ndimensions = [\"x\", \"y\", \"color\"]\n\napp = dash.Dash(__name__, external_stylesheets=[\"https://codepen.io/chriddyp/pen/bWLwgP.css\"])\nserver = app.server\n\napp.layout = html.Div(\n [\n html.H1(\"Visualization of OBD by Dash\"),\n html.Div(\n [\n html.P([d + \":\", dcc.Dropdown(id=d, options=col_options)])\n for d in dimensions\n ],\n style={\"width\": \"25%\", \"float\": \"left\"},\n ),\n dcc.Graph(id=\"graph\", style={\"width\": \"75%\", \"display\": \"inline-block\"})\n ])\n\n@app.callback(Output(\"graph\", \"figure\"), [Input(d, \"value\") for d in dimensions])\ndef make_figure(x, y, color):\n return px.scatter(\n OBD2,\n x=x,\n y=y,\n color=color,\n height=700)\n\n#plotly_expressの描画部分\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"561948034","text":"import tkinter \r\nimport os\r\n\r\ndef set_menu(window, choices):\r\n menubar = tkinter.Menu(root)\r\n window.config(menu=menubar)\r\n\r\n def _set_choices(menu, choices):\r\n for label, command in choices.items():\r\n if isinstance(command, dict):\r\n # Submenu\r\n submenu = tkinter.Menu(menu)\r\n menu.add_cascade(label=label, menu=submenu)\r\n _set_choices(submenu, command)\r\n elif label == '-' and command == '-':\r\n # Separator\r\n menu.add_separator()\r\n else:\r\n # Simple choice\r\n menu.add_command(label=label, command=command)\r\n\r\n _set_choices(menubar, choices)\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n root = tkinter.Tk()\r\n\r\n from collections import OrderedDict\r\n\r\n set_menu(root, {\r\n 'Table of Contents': OrderedDict([\r\n ('Ecclesiastes', lambda: os.startfile('EcclesiastesBoot.Bat')),\r\n ('Ecclesiasticus', lambda: os.startfile('EcclesiaticusBoot.bat')),\r\n ('Job', lambda: os.startfile('JobBoot.bat')),\r\n\t\t\t('Proverbs', lambda: os.startfile('ProverbsBoot.bat')),\r\n\t\t\t('Psalms', lambda: os.startfile('PsalmsBoot.bat')),\r\n\t\t\t('Song of Solomon', lambda: os.startfile('SongofSolomonBoot.bat')),\r\n\t\t\t('Wisdom', lambda: os.startfile('WisdomBoot.bat')),\r\n\t\t\t('-', '-'),\r\n ('Quit', lambda: sys.exit(0))\r\n ])\r\n })\r\n root.mainloop()","sub_path":"Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"263566058","text":"'''\nChallenge:\nTake a list of directions (north, south, east, west) and simplify the travel plan by removing opposites next to each other.\nThis removal process should be repeated as many times as necessary.\n\nReflection:\nI had some problems with this challenge, most of which were related to index being out of range.\nI installed break statements if certains conditions were met. This helped me clear most tests, but not all.\nFinally decided on using a try and except statement. Try to test the conditions in the if-statement (line 29), if it fails then bail out of loop.\nAlso, I used recursion for the first time during this challenge. It seems to work well since it forces the list of directions to be rechecked, from the beginning, everytime adjustments are made.\nLastly, line 30 and and 31 gave me trouble. I had them switched. Problem: pop(i) changes the length and causes i+1 to move to i.\n'''\n\ndef isOpposite(a,b):\n if (a == \"NORTH\" and b == \"SOUTH\") or (b == \"NORTH\" and a == \"SOUTH\"):\n return True\n elif (a == \"EAST\" and b == \"WEST\") or (b == \"EAST\" and a == \"WEST\"):\n return True\n else:\n return False\n\ndef dirReduc(arr):\n #use recursion to continue simplifcation until unable to do so\n #maybe recursion isn't needed with the continue statement...instead of looping through the rest\n #then starting over, we can just start over each time.\n for i in range(len(arr)-1):\n try:\n if isOpposite(arr[i],arr[i+1]):\n arr.pop(i+1)\n arr.pop(i)\n dirReduc(arr) #or recursion? call dirReduc(arr)? continue?\n except:\n break\n return arr\n","sub_path":"directions_reduction.py","file_name":"directions_reduction.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"92536616","text":"import base64 as b64\nimport re\nimport threading\nimport time\n\nimport flask as f\nimport requests\n\nimport samehadaku as s\n\napp = f.Flask(__name__, template_folder='.')\napp.cache = {}\napp.init_time = time.time()\napp.bounded_semaphore = threading.BoundedSemaphore(12)\napp.client_bsemaphores = {}\n\n\n@app.before_request\ndef before_req():\n if time.time() - app.init_time >= 2*60*60:\n app.cache = {}\n app.init_time = time.time()\n if f.request.endpoint in ['query', 'get_dl']:\n ip_addr = f.request.remote_addr\n if ip_addr not in app.client_bsemaphores:\n app.client_bsemaphores[ip_addr] = threading.BoundedSemaphore(\n 3)\n app.client_bsemaphores[ip_addr].acquire()\n\n\n@app.after_request\ndef after_req(resp):\n try:\n ip_addr = f.request.remote_addr\n app.client_bsemaphores[ip_addr].release()\n except ValueError:\n app.client_bsemaphores.pop(ip_addr)\n except Exception:\n pass\n return resp\n\n\n@app.route('/')\ndef root():\n return f.render_template('index.html')\n\n\n@app.route('/')\ndef query(q):\n if len(q) < 4:\n f.abort(403)\n app.bounded_semaphore.acquire()\n try:\n smhdk = s.Samehadaku()\n smhdk.init(q)\n lists = smhdk.get_list()\n finally:\n app.bounded_semaphore.release()\n return f.render_template('eps_list.html',\n lists=lists, encode=b64.urlsafe_b64encode)\n\n@app.route('/_/')\ndef show_modal(url):\n app.bounded_semaphore.acquire()\n url = b64.urlsafe_b64decode(url).decode()\n try:\n smhdk = s.Samehadaku()\n smhdk.get_links(url)\n items = smhdk.rlinks\n finally:\n app.bounded_semaphore.release()\n return f.render_template('links.html', items=items,\n title=smhdk.title, encode=b64.urlsafe_b64encode)\n\n@app.route('/_/dl/')\ndef get_dl(link):\n try:\n link = b64.urlsafe_b64decode(link).decode()\n except Exception:\n f.abort(404)\n app.bounded_semaphore.acquire()\n if not link.startswith('http'):\n f.abort(404)\n if link.startswith('https://www.ahexa.com'): # bypass function 1\n for _ in range(3):\n r = requests.get(link)\n m = re.findall(\n r'''''',\n r.text, re.M | re.I)\n if len(m):\n link = b64.b64decode(m[0]).decode()\n else:\n break\n else: # bypass function 2\n r = requests.get(link)\n dLink = re.findall(\n '
',\n r.text, re.M | re.I)[0]\n dInput = re.findall(\n '',\n r.text, re.M | re.I)[0]\n data = {\n dInput[0]:dInput[1]\n }\n r = requests.post(dLink,data=data)\n link = re.findall(\n r'changeLink\\(\\)\\{var a\\=\\'(.+?)\\';window.open\\(a,\"_blank\"\\)\\};',\n r.text, re.M | re.I)[0]\n r = requests.get(link)\n link = re.findall(\n '.+',\n r.text, re.M | re.I)[0]\n link = b64.urlsafe_b64decode(link.split('?r=')[1]).decode()\n if not link.startswith('https://megaup.net/'):\n return f.redirect(link)\n try:\n ses = requests.Session()\n ses.get(link)\n time.sleep(6)\n r = ses.get(link, allow_redirects=False)\n finally:\n app.bounded_semaphore.release()\n if r.status_code != 302 or 'Location' not in r.headers:\n return f.redirect(link)\n else:\n return f.redirect(r.headers['Location'])\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False, threaded=True, port=20001)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567807800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nData Wrangling\n\n@input: Target Symbol (string)\n Market Data (DataFrame)\n Correlation Threshold (float)\n@output: Modeling Data (DataFrame)\n\nCreated on Oct 2020\n@author: Murilo Fregonesi Falleiros\n\"\"\"\n\ndef WrangleModelingData(sym, df_mkt, corrThreshold, Gui):\n\n #%% Prepare Modeling DataFrame\n \n import numpy as np\n import pandas as pd\n \n # Remove unavailable columns on Target Stock\n drop_list = df_mkt.loc[sym] == 0\n for i, item in enumerate(drop_list):\n if(item):\n df_mkt = df_mkt.drop(drop_list.index[i], axis='columns')\n \n # Find features correlation with price\n df_corr = pd.DataFrame(df_mkt.corr()['Cotação'])\n df_corr['CorrAbs'] = df_corr['Cotação'].abs()\n \n df_corr.sort_values(by='CorrAbs', axis=0, ascending=False, inplace=True)\n df_corr.columns = ['Corr','CorrAbs']\n Gui.AppendLog('\\nCorrelations:')\n Gui.AppendLog(str(df_corr['Corr']))\n \n # Select Model Features\n df_select = df_corr[df_corr['CorrAbs'] > corrThreshold]\n df_model = df_mkt[df_select.index]\n Gui.AppendLog('\\nConsidered Features:' + str(df_model.columns[1:]))\n \n # Remove Symbols with missing main features data\n df_model = df_model.replace(0,np.nan)\n df_model = df_model.dropna()\n \n if df_model.shape[1] > 2:\n \n import matplotlib.pyplot as plt\n \n y = df_model['Cotação']\n \n fig = plt.figure()\n fig.add_subplot(1,2,1)\n plt.scatter(df_model.iloc[:,1], y)\n plt.xlabel(df_model.columns[1])\n plt.ylabel('Price (R$)')\n \n fig.add_subplot(1,2,2)\n plt.scatter(df_model.iloc[:,2], y)\n plt.xlabel(df_model.columns[2])\n plt.ylabel('Price (R$)')\n \n return df_model\n","sub_path":"DataWrangling.py","file_name":"DataWrangling.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"402356576","text":"'''\n@Author: zhaoyang.liang\n@Github: https://github.com/LzyRapx\n@Date: 2020-01-20 12:03:33\n'''\nclass Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n if len(emails) == 0:\n return 0\n ans = set()\n for e in emails:\n local, domain = e.split('@')\n local = local.split(\"+\")[0]\n tmp = local.split(\".\")\n local = ''.join(tmp)\n real_email= local+\"@\"+domain\n ans.add(real_email)\n return len(ans)\n ","sub_path":"LeetCode/Easy/929.py","file_name":"929.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"458378877","text":"import win32com.client\nimport os\nfrom glob import glob\nimport re\n\napi = win32com.client.Dispatch(\"idrisi32.IdrisiAPIServer\")\n\n'''\n#Set project in Idrisi Explorer\ndef setProject(path):\n if path[-1] != '\\\\':\n path = path+'\\\\'\n''' \n\n#Access working directory filepath\nworkdir = api.GetWorkingDir()\n\n#Access resource directory (or directories) filepath(s)\ncount = api.GetResourceDirCount()\nresdirs = []\nfor i in range(count):\n resdirs.append(api.GetResourceDir(i+1))\n\n#List all available directories in the workspace\ndirlist = []\ndirlist.append(workdir)\ndirlist.extend(resdirs)\n\n'''\n#Set default project\nprojName = 'default'\nprojPath = 'C:\\\\Python27\\\\Lib\\\\site-packages\\\\idrtools\\\\Projects\\\\'\nif\nproject = projPath+projName+'.env\\\\'\n'''\n\n#Set default palette for file display\npalette = 'quant'\n\n#Sort files based upon Idrisi-specific sorting properties\ndef sortFile(list):\n numPair=[]\n newList = [file[:-4] for file in list if file[-5].isdigit() == True]\n for file in newList:\n i = len(file)-1\n while i >= 0:\n if file[i].isdigit()==True:\n i -=1\n else:\n break\n numPair.append((int(file[i+1:]),file[:i+1]))\n numDict = {}\n for item in numPair:\n keylist = numDict.keys()\n if item[1] in keylist:\n numDict[item[1]].append(item[0])\n else:\n numDict[item[1]] = [item[0]]\n i = len(numDict)-1\n while i >= 0:\n if len(numDict.values()[i]) <=1:\n del(numDict[numDict.keys()[i]])\n i -= 1\n [numDict[item].sort() for item in numDict]\n for item in numDict:\n searchFile = item+str(numDict[item][0])\n for i in range(len(list)):\n if list[i][:-4] == searchFile:\n filetype = list[i][-4:]\n j=i\n k=0\n for j in range(j, j+len(numDict[item])):\n list[j] = item+str(numDict[item][k])+filetype\n k += 1\n i = i+len(numDict[item])\n\n#List files in a chosen working or resource directory according to filetype\ndef listFile(dir, wildcard='', case=True, filetype=\\\n ['rst', 'rgf','vct', 'vlx','vgf']):\n list = []\n os.chdir(dir)\n tmplist = glob('*')\n for file in tmplist:\n if file[-3:].lower() in filetype:\n list.append(file)\n else:\n continue\n sortDict = {}\n for file in list:\n upperFile = file.upper()\n sortDict[upperFile] = file\n outlist = []\n upperKey = sortDict.keys()\n upperKey.sort()\n sortFile(upperKey)\n [outlist.append(sortDict[file]) for file in upperKey]\n if wildcard != '':\n newOutlist = []\n if case == False:\n wildcard = wildcard.upper()\n for file in upperKey:\n if re.findall(wildcard, file) == [wildcard]:\n newOutlist.append(sortDict[file])\n else:\n continue\n for file in outlist:\n if re.findall(wildcard, file) == [wildcard]:\n newOutlist.append(file)\n else:\n continue\n return newOutlist\n else:\n return outlist\n\n\n\n","sub_path":"idrtools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"182679852","text":"#!/usr/bin/python3\n# _*_ coding: utf-8 _*_\n\nimport base64\nimport datetime\nimport time\nimport email\nimport os\nimport poplib\nimport smtplib\nfrom datetime import timedelta\nfrom email.header import decode_header,Header\n#处理多种形态的邮件主体我们需要 MIMEMultipart 类\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n#发送字符串的邮件\nfrom email.mime.text import MIMEText\nfrom email.parser import Parser\nfrom email.utils import parseaddr\nfrom email.encoders import encode_base64\nfrom email.mime.base import MIMEBase\nimport dateutil.parser\n\n\ndef decode_str(s):\n value, charset = decode_header(s)[0]\n if charset:\n value = value.decode(charset)\n return value\n\n\ndef guess_charset(msg):\n # 先从msg对象获取编码:\n charset = msg.get_charset()\n if charset is None:\n # 如果获取不到,再从Content-Type字段获取:\n content_type = msg.get('Content-Type', '').lower()\n pos = content_type.find('charset=')\n if pos >= 0:\n charset = content_type[pos + 8:].strip()\n return charset\n\n\n\ndef get_email_headers(msg):\n # 邮件的From, To, Subject存在于根对象上:\n headers = {}\n for header in ['From', 'To', 'Subject', 'Date']:\n value = msg.get(header, '')\n if value:\n if header == 'Date':\n headers['date'] = value\n if header == 'Subject':\n # 需要解码Subject字符串:\n subject = decode_str(value)\n headers['subject'] = subject\n else:\n # 需要解码Email地址:\n hdr, addr = parseaddr(value)\n name = decode_str(hdr)\n value = u'%s <%s>' % (name, addr)\n if header == 'From':\n from_address = value\n headers['from'] = from_address\n else:\n to_address = value\n headers['to'] = to_address\n content_type = msg.get_content_type()\n return headers\n\n\n# indent用于缩进显示:\ndef get_email_content(message, base_save_path,dirPath,keyName):\n j = 0\n content = ''\n attachment_files = []\n for part in message.walk():\n j = j + 1\n file_name = part.get_filename()\n contentType = part.get_content_type()\n # 保存附件\n if file_name: # Attachment\n # Decode filename\n h = email.header.Header(file_name)\n dh = email.header.decode_header(h)\n filename = decode_str(file_name)\n if filename.find(keyName) != -1:\n data = part.get_payload(decode=True)\n att_file = open(dirPath+'/' + filename, 'wb')\n attachment_files.append(filename)\n att_file.write(data)\n att_file.close()\n elif contentType == 'text/plain' or contentType == 'text/html':\n # 保存正文\n data = part.get_payload(decode=True)\n charset = guess_charset(part)\n if charset:\n charset = charset.strip().split(';')[0]\n print('charset:' + charset)\n data = data.decode(charset)\n content = data\n return content, attachment_files\n\n\n\ndef downloadReports(emailaddress,password,pop3_server,teamNumber,upTimeBounding,downTimeBounding,flagName,dirPath):\n # 连接到POP3服务器:\n server = poplib.POP3(pop3_server)\n # 可以打开或关闭调试信息:\n # server.set_debuglevel(1)\n # POP3服务器的欢迎文字:\n print(server.getwelcome())\n # 身份认证:\n server.user(emailaddress)\n server.pass_(password)\n # stat()返回邮件数量和占用空间:\n messagesCount, messagesSize = server.stat()\n print('messagesCount:', messagesCount)\n print('messagesSize:', messagesSize)\n # list()返回所有邮件的编号:\n resp, mails, octets = server.list() \n # 获取最新10封邮件, 注意索引号从1开始:\n if teamNumber<1:\n length = len(mails)\n else:\n length = teamNumber*2 \n for i in range(length):\n print('---------- 正在处理'+str(i)+'/'+str(length)+' ----------')\n resp, lines, octets = server.retr(len(mails) - i)\n # lines存储了邮件的原始文本的每一行,\n # 可以获得整个邮件的原始文本:\n strLines = []\n for line in lines:\n strInfo = line.decode()\n strLines.append(strInfo)\n msg_content = '\\n'.join(strLines)\n # 把邮件内容解析为Message对象:\n msg = Parser().parsestr(msg_content)\n # 但是这个Message对象本身可能是一个MIMEMultipart对象,即包含嵌套的其他MIMEBase对象,\n # 嵌套可能还不止一层。所以我们要递归地打印出Message对象的层次结构: \n base_save_path = '/media/markliu/Entertainment/email_attachments/'\n msg_headers = get_email_headers(msg)\n dateStr=msg_headers['date']\n if dateStr.find('(')!=-1:\n dateStr=dateStr[0:dateStr.find('(')]\n if True: \n receiveDate = dateutil.parser.parse(dateStr)\n now = datetime.datetime.now()\n this_week_start = now - timedelta(days=now.weekday())\n this_week_end = now + timedelta(days=6 - now.weekday()) \n beforeDistance=(receiveDate.replace(tzinfo=None) - this_week_start)\n afterDistance=this_week_end-(receiveDate.replace(tzinfo=None))\n if (beforeDistance.days < upTimeBounding) and (beforeDistance.days >downTimeBounding) and (afterDistance.days >downTimeBounding) and (afterDistance.days < upTimeBounding):\n content, attachment_files = get_email_content(msg, base_save_path,dirPath,flagName)\n print('subject:' + msg_headers['subject'])\n print('from_address:' + msg_headers['from'])\n print('to_address:' + msg_headers['to'])\n print('date:' + msg_headers['date'])\n print('content:' + content)\n if len(attachment_files) > 0:\n print('attachment_files: ' + str(attachment_files))\n server.quit()\n return \n\ndef sendResults(fileNameArray,fromaddr,psw,serverAddress):\n timeStr= time.strftime(\"%Y%m%d\", time.localtime())\n topic='软件二组'+timeStr+'周报汇总'\n sendResults(fileNameArray,fromaddr,fromaddr,psw,serverAddress,topic)\n\ndef sendResults(fileNameArray,fromaddr,toaddr,psw,serverAddress,topic):\n server = smtplib.SMTP(serverAddress)\n server.login(fromaddr,psw)\n m = MIMEMultipart()\n for file in fileNameArray:\n fileApart = MIMEBase('application', 'octet-stream')\n fileApart.set_payload(open(file,'rb').read())\n relativeFileName=file\n if file.find('/'):\n names=relativeFileName.split('/')\n relativeFileName=names[len(names)-1]\n fileApart.add_header('Content-Disposition', 'attachment', filename=Header(relativeFileName, 'utf-8').encode())\n encode_base64(fileApart)\n m.attach(fileApart) \n m['Subject'] = topic\n server.sendmail(fromaddr, toaddr, m.as_string())\n print('send success')\n server.quit()\n\nif __name__ == '__main__':\n # 输入邮件地址, 口令和POP3服务器地址:\n emailaddress = '18622939753@163.com'\n # 注意使用开通POP,SMTP等的授权码\n password = '860124Ww'\n pop3_server = 'pop.163.com'\n teamNumber=7\n downloadReports(emailaddress,password,pop3_server,teamNumber)\n","sub_path":"mailOperate.py","file_name":"mailOperate.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"59845450","text":"from helper_functions import *\nfrom Product import *\nfrom Customer import *\n\nclass Receipt:\n def __init__(self):\n self.dict = {}\n \n def __updateLineItem (self, receiptLineItemList):\n receiptItemList = []\n total = 0\n for lineItem in receiptLineItemList:\n receiptLineItemDict = {}\n receiptLineItemDict[\"Invoice No\"] = lineItem[\"Invoice No\"]\n receiptLineItemDict[\"Amount Paid Here\"] = lineItem[\"Amount Paid Here\"]\n total += lineItem[\"Amount Paid Here\"]\n receiptItemList.append(receiptLineItemDict)\n return receiptItemList, total\n\n def create(self, receiptNo, receiptDate, customerCode, paymentMethod, paymentReference, remark, receiptLineItemList):\n if receiptNo in self.dict:\n return {'Is Error': True, 'Error Message': \"Receipt No '{}' already exists. Cannot Create. \".format(receiptNo)}\n else:\n receiptLineItemList,total = self.__updateLineItem(receiptLineItemList)\n \n self.dict[receiptNo] = {\"Receipt Date\" : receiptDate,\"Customer Code\" : customerCode,\"Payment Method\" : paymentMethod, \"Payment Reference\" : paymentReference, \"Total Received\" : total, \"Remarks\" : remark,\"Items List\" : receiptLineItemList}\n return {'Is Error': False, 'Error Message': \"\"}\n\n def read(self, receiptNo):\n if receiptNo in self.dict:\n retreceipt = self.dict[receiptNo]\n else:\n return ({'Is Error': True, 'Error Message': \"Receipt No '{}' not found. Cannot Read.\".format(receiptNo)},{})\n\n return ({'Is Error': False, 'Error Message': \"\"},retreceipt)\n \n def update(self, receiptNo, newReceiptDate, newCustomerCode, newPaymentMethod, newPaymentReference, newRemark, newReceiptLineItemList):\n if receiptNo in self.dict:\n self.dict[receiptNo][\"Receipt Date\"] = newReceiptDate\n self.dict[receiptNo][\"Customer Code\"] = newCustomerCode\n self.dict[receiptNo][\"Payment Method\"] = newPaymentMethod\n self.dict[receiptNo][\"Payment Reference\"] = newPaymentReference\n self.dict[receiptNo][\"Remarks\"] = newRemark\n receiptLineItemList,total = self.__updateLineItem(newReceiptLineItemList)\n\n self.dict[receiptNo][\"Total Received\"] = total\n self.dict[receiptNo][\"Items List\"] = newReceiptLineItemList\n else:\n return {'Is Error': True, 'Error Message': \"Receipt No '{}' not found. Cannot Update.\".format(receiptNo)}\n\n return {'Is Error': False, 'Error Message': \"\"}\n\n def delete(self, receiptNo):\n if receiptNo in self.dict:\n del self.dict[receiptNo]\n else:\n return {'Is Error': True, 'Error Message': \"Receipt No '{}' not found. Cannot Delete\".format(receiptNo)}\n return {'Is Error': False, 'Error Message': \"\"}\n\n def dump(self):\n # Will dump all products data by returning 1 dictionary as output.\n return (self.dict)\n\n def update_receipt_line(self, receiptNo, invoiceNo, amountPaid):\n if receiptNo in self.dict:\n receiptLineItemList = []\n bUpdated = False\n for lineItem in self.dict[receiptNo][\"Items List\"]:\n invoiceLineItem = {}\n if lineItem[\"Invoice No\"] == invoiceNo:\n invoiceLineItem[\"Invoice No\"] = invoiceNo\n invoiceLineItem[\"Amount Paid Here\"] = amountPaid\n\n receiptLineItemList.append(invoiceLineItem)\n bUpdated = True\n else:\n receiptLineItemList.append(lineItem)\n print(receiptLineItemList)\n \n if bUpdated:\n receiptLineItemList,total = self.__updateLineItem(receiptLineItemList)\n self.dict[receiptNo][\"Items List\"] = receiptLineItemList\n self.dict[receiptNo][\"Total Received\"] = total\n else:\n return {'Is Error': True, 'Error Message': \"Receipt Code '{}' not found in Invoice No '{}'. Cannot Update.\".format(receiptNo,invoiceNo)}\n else:\n return {'Is Error': True, 'Error Message': \"Receipt No '{}' not found. Cannot Update.\".format(receiptNo)}\n\n return {'Is Error': False, 'Error Message': \"\"}\n\n def delete_receipt_line(self, receiptNo, invoiceNo):\n # The line item of this invoice number is updated to delete this product code. \n # Note that all the related data in the invoice must be updated such as Total, VAT, and Amount Due. \n # Returns dictionary {‘Is Error’: ___, ‘Error Message’: _____}\n if receiptNo in self.dict:\n total = 0\n receiptLineItemList = []\n bDeleted = False\n for lineItem in self.dict[receiptNo][\"Items List\"]:\n if lineItem[\"Invoice No\"] == invoiceNo:\n bDeleted = True\n else:\n receiptLineItemList.append(lineItem)\n \n if bDeleted:\n receiptLineItemList, total= self.__updateLineItem(receiptLineItemList) \n self.dict[receiptNo][\"Items List\"] = receiptLineItemList\n self.dict[receiptNo][\"Total Received\"] = total\n \n else:\n return {'Is Error': True, 'Error Message': \"Receipt Code '{}' not found in Invoice No '{}'. Cannot Delete.\".format(receiptNo, invoiceNo)}\n else:\n return {'Is Error': True, 'Error Message': \"Receipt No '{}' not found. Cannot Delete.\".format(receiptNo)}\n\n return {'Is Error': False, 'Error Message': \"\"}\n\n","sub_path":"lab202/Receipt.py","file_name":"Receipt.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"214919313","text":"__author__ = 'hawrkchen'\r\n__date__ = '2017/5/22 0022 15:51'\r\n\r\nimport sys, socket\r\nimport time\r\nimport urllib\r\nimport os\r\nimport logging\r\n\r\n\r\nSERVER_IP = \"10.100.100.88\"\r\nSERVER_PORT = 13680\r\n\r\nbank_list = [\"1\"]\r\n\r\ndef string_msg(bank):\r\n content = {}\r\n content['ver'] = '1'\r\n content['cmd'] = '5010'\r\n content['src'] = '0'\r\n content['bm_id'] = bank\r\n content['pay_channel'] = \"WXPAY\"\r\n\r\n str_values = urllib.urlencode(content)\r\n return str_values\r\n\r\n#socket send\r\ndef send_notify(content):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.settimeout(20)\r\n result = sock.connect_ex((SERVER_IP, SERVER_PORT))\r\n if result != 0:\r\n #print >>sys.stderr, \"connect error, ip:\",ip, \"port:\", port\r\n sys.exit(2)\r\n #content = content.replace('\\\\', '')\r\n content += '\\r\\n'\r\n logging.info(\"sending data:%s\", content)\r\n result = sock.sendall(content)\r\n if result != None:\r\n logging.info(\"############## time out end#######################\")\r\n #print >>sys.stderr, \"send data error, ip:\",ip, \"port:\", port\r\n sys.exit(3)\r\n\r\n logging.info(\"receive data:%s\" ,sock.recv(128))\r\n sock.close()\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%d %b %Y %H:%M:%S',\r\n filename='/usr/local/services/spp_speedpos_bill-2.0/log/create_wxbill.log',\r\n filemode='a')\r\n\r\n logging.info('*************begin*******************')\r\n logging.info('notify ip:%s,port :%d',SERVER_IP,SERVER_PORT)\r\n\r\n for bank in bank_list:\r\n logging.info('process bank:%s',bank)\r\n send_msg = string_msg(bank)\r\n send_notify(send_msg)\r\n time.sleep(5)\r\n\r\n logging.info(\"############## process end#######################\")","sub_path":"module/billserver/conf/create_wxbill.py","file_name":"create_wxbill.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"175832894","text":"Inventar = [\"MacBook Pro 2015 (IT)\",\n \"PS5 (Spiel)\",\n \"Volkswagen Polo (Auto)\",\n \"Jaguar XF (Auto)\",\n \"FIFA 2020 (Spiel)\",\n \"Thermometer (Gesundheit)\",\n \"Red Dead Redemption (Spiel)\",\n \"Xbox 360 (Spiel)\",\n \"Razer Blade (IT)\",\n \"Microsoft Surface 3 (IT)\",\n \"Texas Instruments N-Spire (IT)\",\n \"BioNTech-Pfizer Impfstoff(Gesundheit)\"]\n\nlists = {\n \"IT\": [],\n \"Gesundheit\": [],\n \"Auto\": [],\n \"Spiel\": []\n}\n\n\ndef inventarPrint():\n Inventar.sort()\n print(Inventar)\n\n\ndef inventoryListInput():\n bereichKontroll = False\n\n while bereichKontroll == False:\n print(\"Neues Item für Inventar List...\")\n neuProduktName = input(print(\"Produkt Name: \"))\n neuProduktAnzahl = input(print(\"Anzahl der Produkt: \"))\n neuProduktBereich = input(print(\"Bereich der Produkt: \"))\n if (neuProduktBereich in lists):\n bereichKontroll = True\n print(\"Produkt wird in Invetar Liste hinzugefügt... \")\n print(\"(\" + neuProduktAnzahl + \") \" + neuProduktName + \" (\" + neuProduktBereich + \")\")\n else:\n print(\"Bereich ist nicht gültig, bitte versuchen Sie nochmal.\")\n\n return neuProduktName, neuProduktAnzahl, neuProduktBereich\n\n\ndef automaticInventorySorting():\n neuProduktName, neuProduktAnzahl, neuProduktBereich = inventoryListInput()\n\n print(neuProduktBereich)\n\n if neuProduktBereich in lists:\n print(\"INSERTING INTO LIST: \" + neuProduktBereich)\n lists[neuProduktBereich].append(neuProduktName + \" (\" + neuProduktAnzahl + \")\")\n\n #Das ist auch ein mögliche Lösung!\n # if neuProduktBereich == \"IT\":\n # itList.append(\"(\"+str(neuProduktAnzahl)+ \") \" + neuProduktName)\n # elif neuProduktBereich == \"Gesundheit\":\n # gesundheitList.append(\"(\"+str(neuProduktAnzahl)+ \") \" + neuProduktName)\n # elif neuProduktBereich == \"Auto\":\n # autoList.append(\"(\"+str(neuProduktAnzahl)+ \") \" + neuProduktName)\n # elif neuProduktBereich == \"Spiel\":\n # spielList.append(\"(\"+str(neuProduktAnzahl)+ \") \" + neuProduktName)\n\n\ndef automaticDuplicateUpdater():\n for listName in lists:\n list = lists[listName]\n ''' Check if given list contains any duplicates, rebuild list'''\n itemsDict = {}\n neuList = []\n for item in list:\n # item looks something like \"MacBook Pro 2015 (20)\"\n # separate quantity from name\n itemDetails = item.split(\"(\") # ['Macbook Pro 2015 ', '20)']\n itemName = itemDetails[0]\n itemQty = itemDetails[1].replace(')', '')\n\n if itemName in itemsDict:\n itemsDict[itemName] = itemsDict[itemName] + int(itemQty)\n else:\n itemsDict[itemName] = int(itemQty)\n\n for itemName in itemsDict:\n neuList.append(itemName + \" (\" + str(itemsDict[itemName]) + \")\")\n\n lists[listName] = neuList.sort()\n\n# GUI\nprint(\"- Klug IT GmbH Inventar Software -\")\nprint()\nmenuBeenden = False\nwhile (menuBeenden == False):\n print(\"Welche Funktion möchten Sie verwenden?...\")\n print(\"1 - Artikel im gesamte Inventar hinzufügen\")\n print(\"2 - Inventar anzeigen\")\n print(\"3 - Duplikat Prüfung\")\n print(\"4 - Programm Beenden\")\n print()\n auswahl = input(print(\"Ihre auswahl:\"))\n\n if auswahl == \"1\":\n inventoryListInput()\n elif auswahl == \"2\":\n inventarPrint()\n elif auswahl == \"3\":\n automaticDuplicateUpdater()\n elif auswahl == \"4\":\n menuBeenden = True\n print(\"Bye!\") #exit(0)\n else:\n print(\"Kein richtige Auswahl!\")\n","sub_path":"AdvExercise.py","file_name":"AdvExercise.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"331170383","text":"#!/usr/bin/env python\n\nimport sys\n\n# Ignore first line\nsys.stdin.readline()\n# And print the desired headline\nsys.stdout.write(\"id,repeatProbability\\n\")\n\nfor line in sys.stdin:\n\t# Remove the quotes, because Kaggle does not want them\n\toutputLine = line.replace('\"', '')\n\tsys.stdout.write(outputLine)","sub_path":"code/hive-to-kaggle.py","file_name":"hive-to-kaggle.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"512215112","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\n\nclass DialoGPT(object):\n\n def __init__(self, size, device, max_context_length=48):\n \"\"\"\n Modeling class for Dialo GPT\n\n Args:\n size (str): model size. must be one of ['small', 'medium', 'large']\n device (str): model device. should be one of ['cpu', 'cuda', 'cuda:n']\n max_context_length (int): max context laength (number of input history tokens)\n\n Notes:\n format of histories:\n self.histories = {\n user_1 : {'user': [] , 'bot': []},\n user_2 : {'user': [] , 'bot': []},\n ...more...\n user_n : {'user': [] , 'bot': []},\n }\n\n paper (arXiv):\n https://arxiv.org/abs/1911.00536\n\n Examples:\n >>> # chatting with DialoGPT on terminal mode.\n >>> # The model size must be one of the [small, medium, large].\n >>> # type '/exit' if you want to exit dialogue.\n >>> # type '/clear' if you want to clear all histories\n >>> gpt = DialoGPT(size=\"large\", device=\"cuda\")\n >>> gpt.run()\n user : Hello.\n bot : How are you?\n user : I'm great. it is a nice day.\n bot : That's good.\n user : Who is CEO of Apple?\n bot : Steve Jobs.\n user : /clear\n bot : history cleared.\n user : /exit\n bot : bye.\n\n >>> # chatting with DialoGPT by user id. (single-turn)\n >>> gpt = DialoGPT(size=\"large\", device=\"cuda\")\n >>> gpt.predict(user_id=\"USER_ID\", text=\"Hello.\")\n\n >>> # chatting with DialoGPT by user id. (multi-turn)\n >>> while True:\n ... _in = input('user : ')\n ... _out = gpt.predict(user_id=\"USER_ID\", text=_in)\n ... print(f\"bot : {_out}\")\n\n >>> # you can check dialogue histories\n >>> gpt.histories\n {\n user_1 : {'user': [] , 'bot': []},\n user_2 : {'user': [] , 'bot': []},\n ...more...\n user_n : {'user': [] , 'bot': []},\n }\n\n >>> # you can clear all dialogue histories\n >>> gpt.clear(user_id=\"USER_ID\")\n\n \"\"\"\n\n assert size in ['small', 'medium', 'large'], \\\n \"model size must be one of ['small', 'medium', 'large]\"\n\n self.model_name = f\"microsoft/DialoGPT-{size}\"\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)\n self.model = AutoModelForCausalLM.from_pretrained(self.model_name)\n self.model = self.model.eval().to(device)\n\n self.max_context_length = max_context_length\n self.histories = {}\n self.device = device\n self.eos = \"<|endoftext|>\"\n\n @torch.no_grad()\n def predict(\n self,\n user_id: str,\n text: str,\n num_beams: int = 10, # paper's setting\n top_k: int = 10, # paper's setting\n top_p: float = None, # do not use top-p sampling\n ) -> str:\n \"\"\"\n dialogue with Dialo GPT\n\n Args:\n user_id (str): user id\n text (str): user's input text\n num_beams (int): size of beam width\n top_k (int): K for top-K sampling\n top_p (float): P for top-P sampling\n\n Returns:\n (str): model's next utterance\n\n \"\"\"\n\n torch.cuda.empty_cache()\n input_ids_list: list = []\n num_of_stacked_tokens: int = 0\n\n if user_id not in self.histories.keys():\n self.clear(user_id)\n\n user_histories = reversed(self.histories[user_id]['user'])\n bot_histories = reversed(self.histories[user_id]['bot'])\n\n for user, bot in zip(user_histories, bot_histories):\n user_tokens = self.tokenizer.encode(user, return_tensors='pt')\n bot_tokens = self.tokenizer.encode(bot, return_tensors='pt')\n num_of_stacked_tokens += user_tokens.shape[-1] + bot_tokens.shape[-1]\n\n if num_of_stacked_tokens <= self.max_context_length:\n input_ids_list.append(bot_tokens)\n input_ids_list.append(user_tokens)\n\n else:\n break\n\n input_ids_list = list(reversed(input_ids_list))\n new_input = text + self.eos\n input_tokens = self.tokenizer.encode(new_input, return_tensors='pt')\n input_ids_list.append(input_tokens)\n\n input_tokens = torch.cat(input_ids_list, dim=-1)\n input_tokens = input_tokens.to(self.device)\n\n output_ids = self.model.generate(\n input_tokens,\n max_length=1024,\n pad_token_id=self.tokenizer.eos_token_id,\n num_beams=num_beams,\n top_k=top_k,\n top_p=top_p,\n no_repeat_ngram_size=4,\n )\n\n next_utterance = self.tokenizer.decode(\n output_ids[:, input_tokens.shape[-1]:][0],\n skip_special_tokens=True,\n )\n\n self.histories[user_id]['user'].append(text + self.eos)\n self.histories[user_id]['bot'].append(next_utterance + self.eos)\n\n return next_utterance\n\n def clear(self, user_id):\n self.histories[user_id] = {'user': [], 'bot': []}\n\n def run(self):\n while True:\n _in = input(\"user : \")\n\n if _in == \"/exit\":\n print(f\"bot : bye.\")\n break\n\n elif _in == \"/clear\":\n print(f\"bot : history cleared.\")\n self.clear(\"user_id\")\n\n else:\n _out = self.predict(user_id=\"user_id\", text=_in)\n print(f\"bot : {_out}\")\n","sub_path":"dialogpt_chat/dialogpt.py","file_name":"dialogpt.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"44420618","text":"import flask\n\nfrom myapp.models import TodoSchema, Todo\n\ntodo = flask.Blueprint('todo_api', __name__)\ntodo_schema = TodoSchema()\n\n\n@todo.route(\"/todo\", methods=[\"POST\"])\ndef create_todo():\n req_data = flask.request.get_json()\n data, error = todo_schema.load(req_data)\n\n if error:\n return flask.jsonify(error), 400\n\n todo = Todo(req_data[\"summary\"], req_data[\"content\"])\n todo.save()\n\n todo_data = todo_schema.dump(todo).data\n return flask.jsonify(todo_data), 200\n\n\n@todo.route('/todo', methods=[\"GET\"])\ndef list_todos():\n all_todos = Todo.get_all()\n result = todo_schema.dump(all_todos, many=True).data\n return flask.jsonify(result)\n\n\n@todo.route(\"/todo/\", methods=[\"DELETE\"])\ndef delete_todo(todo_id):\n todo = Todo.get_by_id(todo_id)\n if todo is None:\n return '', 404\n\n todo.delete()\n return '', 202\n\n\n@todo.route('/', methods=[\"GET\"])\ndef index():\n return flask.render_template(\"index.html\", todos=Todo.get_all())\n","sub_path":"flaskapp/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"263117458","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import Group\n\nfrom .forms import UserChangeForm, UserCreationForm\nfrom .models import User, Comment, Review, Title, Category, Genre, Rate\n\n\nclass UserAdmin(BaseUserAdmin):\n form = UserChangeForm\n add_form = UserCreationForm\n list_display = ('email', 'role', 'username')\n list_filter = ('role',)\n fieldsets = (\n (None, {'fields': ('email', 'password')}),\n ('Permissions', {'fields': ('role',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2'),\n }),\n )\n search_fields = ('email',)\n ordering = ('email',)\n filter_horizontal = ()\n list_editable = ('role', 'username')\n\n\nclass ReviewAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"title\", \"text\", \"author\", \"score\", \"pub_date\")\n\n\nclass CommentAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"review\", \"text\", \"author\", \"pub_date\")\n\n\nclass RateAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"title\", \"sum_vote\", \"count_vote\")\n\n\nclass TitleAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"name\", \"year\", \"rating\", \"description\", \"category\")\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"name\", \"slug\")\n\n\nclass GenreAdmin(admin.ModelAdmin):\n list_display = (\"pk\", \"name\", \"slug\")\n\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Title, TitleAdmin)\nadmin.site.register(Rate, RateAdmin)\nadmin.site.register(Review, ReviewAdmin)\nadmin.site.register(Comment, CommentAdmin)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(Group)\n","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"85488369","text":"def load_params_from_xacro():\n import rospkg\n import os\n\n params = {}\n rospack = rospkg.RosPack()\n pkg_path = rospack.get_path('my_robotic_arm')\n xacro_file_path = os.path.join(pkg_path, 'urdf', 'arm.xacro')\n\n properties = []\n with open(xacro_file_path) as f:\n line = f.readline()\n while line:\n if 'xacro:property' in line:\n properties.append(line)\n line = f.readline()\n\n prop = [i.strip() for i in properties]\n for i in prop:\n vals = i.split('\"')\n name = vals[1]\n value = vals[3]\n params[name] = float(value)\n\n return params\n","sub_path":"scripts/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"430848072","text":"from Polynomials import Polynomial\nfrom Computation.RootFinding import schoolyard_method_convergents\nfrom numpy import linspace\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nfig.set_size_inches(10, 6)\n\nP = Polynomial([-2,-1,0,1])\nprint(P)\n\nx = linspace(-.5,2,30)\ny = P.evaluate(x)[0]\n\ndef poly_eval(x):\n return P.evaluate(x)[0]\n\nplt.plot(x,y)\nplt.axhline(0)\ncon = schoolyard_method_convergents(0,poly_eval)\nprev = 0\nfor ctr,pos in enumerate(con):\n plt.scatter(pos,poly_eval(pos),zorder=3,color='black')\n plt.plot([prev,pos],[4-ctr*.2,4-ctr*.2],color='black')\n print(pos)\n \n prev = pos","sub_path":"Visualization/SchoolyardRootFinding.py","file_name":"SchoolyardRootFinding.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"269506356","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# import tensorflow as tf\n# def classifier(input_img, train=False):\n# input_image_dropped = tf.keras.layers.Dropout(0.2)(input_img)\n# conv1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_image_dropped)\n# conv1_dropped = tf.keras.layers.Dropout(0.2)(conv1)\n# pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1_dropped)\n\n# conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n# conv2_dropped = tf.keras.layers.Dropout(0.2)(conv2)\n# pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2_dropped)\n\n# conv3 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n# conv3_dropped = tf.keras.layers.Dropout(0.2)(conv3)\n# pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3_dropped)\n\n# conv4 = tf.keras.layers.Conv2D(512, (6, 6), activation='relu', padding='same')(pool3)\n# conv4_dropped = tf.keras.layers.Dropout(0.2)(conv4)\n\n# max1 = tf.keras.layers.MaxPooling2D(pool_size=(8, 6))(conv4_dropped)\n# flat1 = tf.keras.layers.Flatten()(max1)\n# drop1 = tf.keras.layers.Dropout(0.2)(flat1)\n# output = tf.keras.layers.Dense(8, activation='relu')(drop1)\n#\n# if train:\n# return output\n# else:\n# return output\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv2d(128, 512, kernel_size=6, stride=1, padding=3)\n self.conv4_bn = nn.BatchNorm2d(512)\n # self.conv5 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=0,)\n\n # self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(512, 512)\n self.fc2 = nn.Linear(512, 8)\n\n def forward(self, x):\n batch_size = x.shape[0]\n x = F.max_pool2d(F.relu(self.conv1(x.float())), (2, 2))\n # print(x[0][0])\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))\n x = F.max_pool2d(self.conv4_bn(F.relu(self.conv4(x))), (8, 6))\n\n x = x.view(-1, 512)\n x = self.fc1(x)\n x = self.fc2(x)\n return F.relu(x)","sub_path":"air_compressor/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"82952985","text":"## Sid Meier's Civilization 4\n## Copyright Firaxis Games 2005\n##\n## Sevopedia\n## sevotastic.blogspot.com\n## sevotastic@yahoo.com\n##\n\n\nfrom CvPythonExtensions import *\nimport CvUtil\nimport ScreenInput\nimport CvScreenEnums\n\n# globals\ngc = CyGlobalContext()\nArtFileMgr = CyArtFileMgr()\nlocalText = CyTranslator()\n\nclass CvPediaCivilization:\n\t\"Civilopedia Screen for Civilizations\"\n\n\tdef __init__(self, main):\n\t\tself.iCivilization = -1\n\t\tself.top = main\n\t\t\n\t\tself.X_MAIN_PANE = self.top.X_PEDIA_PAGE + 20\n\t\tself.Y_MAIN_PANE = self.top.Y_PEDIA_PAGE + 10\n\t\tself.W_MAIN_PANE = 160 #200 #Rhye\n\n\t\tself.X_ICON = self.X_MAIN_PANE + 5 # + 25 #Rhye\n\t\tself.W_ICON = 150\n\t\tself.H_ICON = 150\n\t\tself.ICON_SIZE = 128 #64 #Rhye\n\n\t\tself.X_TECH = self.X_MAIN_PANE + self.W_MAIN_PANE + 10\n\t\tself.Y_TECH = 65\n\t\tself.W_TECH = 1000 - self.X_TECH\n\t\tself.H_TECH = 110\n\n\t\tself.X_UNIT = self.X_TECH\n\t\tself.Y_UNIT = self.Y_TECH + self.H_TECH\n\t\tself.W_UNIT = 200\n\t\tself.H_UNIT = 110\n\n\t\tself.X_LEADER = self.X_TECH \n\t\tself.Y_LEADER = self.Y_UNIT + self.H_UNIT\n\t\tself.W_LEADER = 1000 - self.X_LEADER\n\t\tself.H_LEADER = 250 #110 #Rhye\n\n\t\tself.X_TEXT = self.X_MAIN_PANE\n\t\tself.Y_TEXT = self.Y_LEADER + self.H_LEADER\n\t\tself.W_TEXT = 1000 - self.X_TEXT\n\t\tself.H_TEXT = 700 - self.Y_TEXT\n\n\t\tself.H_MAIN_PANE = (self.Y_LEADER + self.H_LEADER) - self.Y_MAIN_PANE\n\t\tself.Y_ICON = self.Y_MAIN_PANE + (self.H_MAIN_PANE - self.H_ICON)/2\n\t\t\n\t# Screen construction function\n\tdef interfaceScreen(self, iCivilization):\t\n\t\t\t\n\t\tself.iCivilization = iCivilization\n\t\n\t\tself.top.deleteAllWidgets()\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tbNotActive = (not screen.isActive())\n\t\tif bNotActive:\n\t\t\tself.top.setPediaCommonWidgets()\n\n\t\t# Header...\n\t\tszHeader = u\"\" + gc.getCivilizationInfo(self.iCivilization).getDescription().upper() + u\"\"\n\t\tszHeaderId = self.top.getNextWidgetName()\n\t\tscreen.setLabel(szHeaderId, \"Background\", szHeader, CvUtil.FONT_CENTER_JUSTIFY, self.top.X_SCREEN, self.top.Y_TITLE, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)\n\t\t\n\t\t# Top\n\t\tscreen.setText(self.top.getNextWidgetName(), \"Background\", self.top.MENU_TEXT, CvUtil.FONT_LEFT_JUSTIFY, self.top.X_MENU, self.top.Y_MENU, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_PEDIA_MAIN, CivilopediaPageTypes.CIVILOPEDIA_PAGE_CIV, -1)\n\n\t\tif self.top.iLastScreen\t!= CvScreenEnums.PEDIA_CIVILIZATION or bNotActive:\t\n\t\t\tif self.top.iLastScreen != CvScreenEnums.PEDIA_MAIN:\n\t\t\t\tself.placeLinks()\t\n\t\t\tself.top.iLastScreen = CvScreenEnums.PEDIA_CIVILIZATION\n\t\t\t\n\t\t# Icon\n\t\tscreen.addPanel( self.top.getNextWidgetName(), \"\", \"\", False, False,\n\t\t self.X_MAIN_PANE, self.Y_MAIN_PANE, self.W_MAIN_PANE, self.H_MAIN_PANE, PanelStyles.PANEL_STYLE_BLUE50)\n\t\tscreen.addPanel(self.top.getNextWidgetName(), \"\", \"\", false, false,\n\t\t self.X_ICON, self.Y_ICON, self.W_ICON, self.H_ICON, PanelStyles.PANEL_STYLE_MAIN)\n\t\tscreen.addDDSGFC(self.top.getNextWidgetName(), ArtFileMgr.getCivilizationArtInfo(gc.getCivilizationInfo(self.iCivilization).getArtDefineTag()).getButton(),\n\t\t self.X_ICON + self.W_ICON/2 - self.ICON_SIZE/2, self.Y_ICON + self.H_ICON/2 - self.ICON_SIZE/2, self.ICON_SIZE, self.ICON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1 )\n\n\t\tself.placeTech()\n\t\tself.placeUnit()\n\t\tself.placeLeader()\n\t\tself.placeText()\n\n\t\treturn\n\n\tdef placeTech(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\t#Rhye - comment\n##\t\tpanelName = self.top.getNextWidgetName()\n##\t\tscreen.addPanel( panelName, localText.getText(\"TXT_KEY_FREE_TECHS\", ()), \"\", false, true,\n##\t\t\t\t self.X_TECH, self.Y_TECH, self.W_TECH, self.H_TECH, PanelStyles.PANEL_STYLE_BLUE50 )\n##\t\tscreen.attachLabel(panelName, \"\", \" \")\n##\t\tfor iTech in range(gc.getNumTechInfos()):\n##\t\t\tif (gc.getCivilizationInfo(self.iCivilization).isCivilizationFreeTechs(iTech)):\n##\t\t\t\tscreen.attachImageButton( panelName, \"\", gc.getTechInfo(iTech).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iTech, 1, False )\n\n\t\t#Rhye - start\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, \"\", \"\", true, true,\n\t\t\t\t self.X_TECH, self.Y_TECH - 4, self.W_LEADER, self.Y_TEXT - self.Y_TECH + 4, PanelStyles.PANEL_STYLE_BLUE50 ) \n\t\tszText = CyGameTextMgr().parseCivInfos(self.iCivilization, True)\n\t\tscreen.attachMultilineText( panelName, \"\", szText, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\t\t#Rhye - end\n\n\n\t\t\t\n\tdef placeUnit(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t#Rhye - comment\n##\t\tpanelName = self.top.getNextWidgetName()\n##\t\tscreen.addPanel( panelName, localText.getText(\"TXT_KEY_FREE_UNITS\", ()), \"\", false, true,\n##\t\t\t\t self.X_UNIT, self.Y_UNIT, self.W_UNIT, self.H_UNIT, PanelStyles.PANEL_STYLE_BLUE50 )\n##\t\tscreen.attachLabel(panelName, \"\", \" \")\n##\t\t\t\t\t\n##\t\tfor iUnit in range(gc.getNumUnitClassInfos()):\n##\t\t\tiUniqueUnit = gc.getCivilizationInfo(self.iCivilization).getCivilizationUnits(iUnit);\n##\t\t\tiDefaultUnit = gc.getUnitClassInfo(iUnit).getDefaultUnitIndex();\n##\t\t\tif (iDefaultUnit > -1 and iUniqueUnit > -1 and iDefaultUnit != iUniqueUnit):\n##\t\t\t\tscreen.attachImageButton( panelName, \"\", gc.getUnitInfo(iUniqueUnit).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT, iUniqueUnit, 1, False )\n##\t\t\n\tdef placeLeader(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t#Rhye - comment\n##\t\tpanelName = self.top.getNextWidgetName()\n##\t\tscreen.addPanel( panelName, localText.getText(\"TXT_KEY_CONCEPT_LEADERS\", ()), \"\", false, true,\n##\t\t\t\t self.X_LEADER, self.Y_LEADER, self.W_LEADER, self.H_LEADER, PanelStyles.PANEL_STYLE_BLUE50 )\n##\t\tscreen.attachLabel(panelName, \"\", \" \")\n##\n##\t\tfor iLeader in range(gc.getNumLeaderHeadInfos()):\n##\t\t\tciv = gc.getCivilizationInfo(self.iCivilization)\n##\t\t\tif civ.isLeaders(iLeader):\n##\t\t\t\tscreen.attachImageButton( panelName, \"\", gc.getLeaderHeadInfo(iLeader).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_LEADER, iLeader, 1, False )\n##\t\t\n\tdef placeText(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, \"\", \"\", true, true,\n\t\t\t\t self.X_TEXT, self.Y_TEXT, self.W_TEXT, self.H_TEXT, PanelStyles.PANEL_STYLE_BLUE50 )\n \n\t\tszText = gc.getCivilizationInfo(self.iCivilization).getCivilopedia()\n\t\tscreen.attachMultilineText( panelName, \"Text\", szText, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tdef placeLinks(self):\n\n\t\tself.top.placeLinks()\n\t\tself.top.placeCivs()\n\t\t\t\n\n\t# Will handle the input for this screen...\n\tdef handleInput (self, inputClass):\n\t\treturn 0\n\n\n","sub_path":"Rhye's and Fall RAND/Assets/Python/screens/CvPediaCivilization.py","file_name":"CvPediaCivilization.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"236031391","text":"from django.urls import path\nfrom .views import devices, index, unbind, subscription, config_editor\n\nurlpatterns = [\n # path('devices', devices),\n # path('change_gate_id', change_gate_id),\n # path('unbind/a=&b=', unbind),\n # path('subscription', subscription),\n # path('', index)\n path('gate/', devices),\n path('unbind/a=&b=', unbind),\n path('subscribe/',subscription),\n path('config_editor/', config_editor)\n]\n","sub_path":"Website/binding/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"419412627","text":"#!/usr/bin/python\n\ndef format(message):\n\n outmsg = dict(message)\n \n if 'src_ip' in outmsg:\n outmsg['src'] = outmsg['src_ip']\n del outmsg['src_ip']\n\n if 'dest_ip' in outmsg:\n outmsg['dest'] = outmsg['dest_ip']\n del outmsg['dest_ip']\n\n return u' '.join(['{}={}'.format(name, value) for name, value in outmsg.items() if value])\n","sub_path":"splunk.py","file_name":"splunk.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"63996067","text":"\nimport string\nclass REGION:\n\tdef __init__(self, region_name, region_chr, region_start, region_end):\n\t\tself.name = region_name\n\t\tself.Chr = region_chr\n\t\tself.start = region_start\n\t\tself.end = region_end\n\ndef find_region(gene_tuple, region_dic):\n\tgene_chr = gene_tuple[0]\n\tgene_start = gene_tuple[1]\n\tgene_end = gene_tuple[2]\n\tout = \"\"\n\tfor region in region_dic:\n\t\tRegion = region_dic[region]\n\t\tif Region.Chr != gene_chr:\n\t\t\tcontinue\n\t\tif (gene_start > Region.start) and (gene_start < Region.end):\n\t\t\tout = region\n\t\t\tbreak\n\t\telif (gene_end > Region.start) and (gene_end < Region.end):\n\t\t\tout = region\n\t\t\tbreak\n\treturn out\n\n\n\ngtf_gene_file = \"/home/tw83/twang/reference/hg19/gencode_v19_gene_annotation.txt\"\nregion_pos_file = \"/home/tw83/twang/AMP/eQTLs/PDGWAS/PDRegions_pos_range.txt\"\nout_region_gene_count_file = \"/home/tw83/twang/AMP/eQTLs/PDGWAS/PDRegions_gene_count.txt\"\ngtf_gene_fp = open(gtf_gene_file)\nregion_pos_fp = open(region_pos_file)\nout_region_gene_count_fp = open(out_region_gene_count_file, 'w')\nheader = region_pos_fp.readline()\nregion_dic = {}\nout_dic = {}\nwhile True:\n\tline = region_pos_fp.readline()\n\tif not line:\n\t\tbreak\n\tlinesplit = line.strip().split('\\t')\n\tregion = linesplit[0]\n\tregion_chr = linesplit[1]\n\tregion_start = string.atoi(linesplit[4])\n\tregion_end = string.atoi(linesplit[5])\n\tRegion = REGION(region, region_chr, region_start, region_end)\n\tregion_dic[region] = Region\n\nwhile True:\n\tline = gtf_gene_fp.readline()\n\tif not line:\n\t\tbreak\n\tlinesplit = line.strip().split('\\t')\n\tgene_chr = linesplit[0]\n\tgene_start = string.atoi(linesplit[1])\n\tgene_end = string.atoi(linesplit[2])\n\tregion = find_region((gene_chr, gene_start, gene_end), region_dic)\n\tif len(region) >0:\n\t\tif out_dic.has_key(region):\n\t\t\tout_dic[region] += 1\n\t\telse:\n\t\t\tout_dic[region] = 1\n\nfor region in region_dic:\n\tRegion = region_dic[region]\n\tChr = Region.Chr\n\tregion_start = Region.start\n\tregion_end = Region.end\n\tif out_dic.has_key(region):\n\t\tregion_gene_count = out_dic[region]\n\telse:\n\t\tregion_gene_count = 0\n\tout = \"\\t\".join([region, Chr, str(region_start), str(region_end), str(region_gene_count)]) + '\\n'\n\tout_region_gene_count_fp.write(out)\n\n\n","sub_path":"progress_report/region_gene_count.py","file_name":"region_gene_count.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"586099937","text":"from flask import render_template\r\nimport feedparser\r\nfrom flask import Flask\r\nfrom flask import request\r\n\r\nimport json\r\nimport urllib.parse\r\nimport urllib.request\r\n\r\n# buat cookies\r\nimport datetime\r\nfrom flask import make_response\r\n\r\napp = Flask(__name__)\r\n\r\nrss_feed = {'bbc': 'http://feeds.bbci.co.uk/news/rss.xml',\r\n 'cnn': 'http://rss.cnn.com/rss/edition.rss',\r\n 'fox': 'http://feeds.foxnews.com/foxnews/latest',\r\n 'iol': 'http://www.iol.co.za/cmlink/1.640'\r\n}\r\n\r\nDEFAULTS = {\r\n 'sumber':'bbc',\r\n 'kota' : 'Pontianak, ID',\r\n 'kurs_dari' : 'USD',\r\n 'kurs_ke' : 'IDR'\r\n}\r\n\r\nCUACA_URL = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=b13ccb872798645a995b1db18a37779f'\r\n\r\nKURS_URL = 'https://openexchangerates.org/api/latest.json?app_id=99aa693a4b73495f9dc2e5282f8a07de'\r\n\r\n@app.route('/')\r\ndef home():\r\n # ambil dari get, jika kosong cookies kemudian default\r\n\r\n sumber = get_value_with_fallback(\"sumber\")\r\n artikels = tarik_berita(sumber)\r\n \r\n kota = get_value_with_fallback(\"kota\")\r\n cuaca = ambil_cuaca(kota)\r\n\r\n kurs_dari = get_value_with_fallback(\"kurs_dari\")\r\n kurs_ke = get_value_with_fallback(\"kurs_ke\")\r\n\r\n rate, currencies = ambil_kurs(kurs_dari, kurs_ke)\r\n\r\n # return render_template(\"home.html\", \r\n # artikels=artikels,\r\n # cuaca=cuaca,\r\n # kurs_dari=kurs_dari,\r\n # kurs_ke=kurs_ke,\r\n # rate=rate,\r\n # currencies=sorted(currencies))\r\n\r\n response = make_response(render_template(\"home.html\",\r\n artikels=artikels,\r\n cuaca=cuaca,\r\n kurs_dari=kurs_dari,\r\n kurs_ke=kurs_ke,\r\n rate=rate,\r\n currencies=sorted(currencies)\r\n ))\r\n\r\n expires = datetime.datetime.now() + datetime.timedelta(days=365)\r\n response.set_cookie(\"sumber\", sumber, expires=expires)\r\n response.set_cookie(\"kota\", kota, expires=expires)\r\n response.set_cookie(\"kurs_dari\", kurs_dari, expires=expires)\r\n response.set_cookie(\"kurs_ke\", kurs_ke, expires=expires)\r\n\r\n return response\r\n\r\ndef tarik_berita(query):\r\n if not query or query.lower() not in rss_feed:\r\n sumber = DEFAULTS['sumber']\r\n else:\r\n sumber = query.lower()\r\n\r\n feed = feedparser.parse(rss_feed[sumber])\r\n\r\n return feed['entries']\r\n\r\ndef ambil_cuaca(query):\r\n \r\n query = urllib.parse.quote(query) # url no space, clean url space to %20\r\n url = CUACA_URL.format(query)\r\n data = urllib.request.urlopen(url).read() # load data from HTTP to string\r\n parsed = json.loads(data.decode('utf-8')) # json to dictionary\r\n cuaca = None\r\n if parsed.get(\"weather\"):\r\n cuaca = {\"description\":parsed[\"weather\"][0][\"description\"],\r\n \"temperature\":parsed[\"main\"][\"temp\"],\r\n \"city\":parsed[\"name\"],\r\n \"negara\":parsed[\"sys\"][\"country\"]\r\n }\r\n return cuaca\r\n\r\ndef ambil_kurs(dari, ke):\r\n semua_kurs = urllib.request.urlopen(KURS_URL).read()\r\n\r\n parsed = json.loads(semua_kurs.decode('utf-8')).get('rates') # ambil rates\r\n dari_rate = parsed.get(dari.upper())\r\n ke_rate = parsed.get(ke.upper())\r\n return (ke_rate/dari_rate, parsed.keys())\r\n\r\n\r\ndef get_value_with_fallback(key):\r\n if request.args.get(key):\r\n return request.args.get(key)\r\n\r\n if request.cookies.get(key):\r\n return request.cookies.get(key)\r\n\r\n return DEFAULTS[key]\r\n\r\nif __name__ == \"__main__\":\r\n app.run(port=5000, debug=True)","sub_path":"headlines.py","file_name":"headlines.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"570047431","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom setuptools import find_packages, setup\n\nimport splunk_eventgen\n\nVERSION = splunk_eventgen.__version__\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError):\n long_description = open('README.md').read()\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\nsetup(\n name='splunk_eventgen',\n version=VERSION,\n description='Splunk Event Generator to produce real-time, representative data',\n long_description=long_description,\n author='Splunk, Inc.',\n classifiers=[\n 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Testing', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7'],\n keywords='splunk eventgen container containers docker automation',\n entry_points={'console_scripts': [\"splunk_eventgen = splunk_eventgen.__main__:main\"]},\n include_package_data=True,\n packages=find_packages(),\n package_data={\"splunk_eventgen\": ['*.sh', '*.txt', '*.yml'], '': ['*.sh', '*.txt', '*.yml']},\n install_requires=[\n 'pytest>=3.0.0', # Required to test functional tests in eventgen.\n 'pytest-mock>=1.10.4',\n 'boto3',\n 'requests>=2.18.4',\n 'requests[security]',\n 'logutils>=0.3.4.1',\n 'futures>=3.0.5',\n 'ujson>=1.35', # way faster implementation of JSON processing\n 'pyyaml',\n 'httplib2',\n 'jinja2',\n 'pyrabbit==1.1.0',\n 'urllib3==1.24.2',\n 'pyOpenSSL',\n 'flake8>=3.7.7',\n 'yapf>=0.26.0',\n 'isort>=4.3.15'])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"419676353","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Produtos\nfrom django.db.models import Q\nfrom .forms import ProdutosForm\nfrom django.core.paginator import Paginator, InvalidPage\n\n\n# Create your views here.\ndef produtos_lista(request):\n if request.GET:\n slug = request.GET['search_box']\n produtos = Produtos.objects.filter(Q(cod_barra__icontains=slug) | Q(nome__icontains=slug) | Q(preco__icontains=slug) | Q(etc__icontains=slug))\n else:\n produtos = Produtos.objects.all()\n paginator = Paginator(produtos, 10)\n page = request.GET.get('page')\n produtos = paginator.get_page(page)\n return render(request, 'produtos_lista.html', {'produtos': produtos})\n\n\ndef produtos_cadastro(request):\n form = ProdutosForm(request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n return redirect('produtos')\n return render(request, 'produtos_cadastro.html', {\"form\":form})\n\n\ndef produtos_editar(request, id):\n produtos = get_object_or_404(Produtos, pk=id)\n form = ProdutosForm(request.POST or None, request.FILES or None, instance=produtos)\n\n if form.is_valid():\n form.save()\n return redirect('produtos_lista')\n return render(request, 'produtos_editar.html', {'form':form})\n\n\ndef produtos_delete(request, id):\n produtos = get_object_or_404(Produtos, pk=id)\n\n if request.method == \"POST\":\n produtos.delete()\n return redirect('produtos_lista')\n return render(request, 'produtos_delete.html', {'produtos':produtos})\n","sub_path":"produtos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"582876226","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse,\\\n HttpResponseBadRequest, HttpResponseNotAllowed\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django import forms\n\nfrom capi.views import proxiable\n\nfrom ..uimodel.models import PageLayout\nfrom ..layout.models import LayerBox\nfrom ..layout.views import ctx_dict_for_layout\nfrom ..baseclib.models import CItem\n\nfrom .models import PageState, ContentForBox, UIProfile\nfrom .forms import UIProfileForm, ProfileCloneForm\n\n\n@login_required\ndef profiles_home(request,):\n '''Home page for the UIProfiles'''\n # TODO: This should probably move to separate UIProfile app\n profiles = UIProfile.objects.for_user(request.user)\n ctx_dict = {'profiles': profiles}\n return render_to_response('mcms/uiprofile/profile_home.html',\n ctx_dict,\n context_instance=RequestContext(request))\n\n\n@login_required\ndef profile_overview(request, p_id):\n profile = get_object_or_404(UIProfile, pk=p_id)\n uim = profile.uimodel\n\n if request.method == 'POST':\n form = UIProfileForm(request.POST, instance=profile)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('mcms:profiles:profile_overview', args=(profile.pk,)))\n else:\n form = UIProfileForm(instance=profile)\n ctx_dict = dict(profile=profile, form=form,\n box_mapping=profile.get_box_mapping())\n\n ctx_dict['clone_form']=ProfileCloneForm()\n ctx_dict['boxspec'] = 666\n return render_to_response('mcms/uiprofile/profile_overview.html',\n ctx_dict,\n context_instance=RequestContext(request),)\n\n\n@login_required\ndef profile_clone(request, p_id):\n form = ProfileCloneForm()\n\n parent_profile = get_object_or_404(UIProfile, pk=p_id)\n uim = parent_profile.ui_model\n\n if uim.is_frozen:\n return render(request, 'mcms/uiprofile/frozen.html', dict(uimodel=uim))\n\n if request.method == \"POST\":\n form = ProfileCloneForm(request.POST)\n if form.is_valid():\n new_profile = parent_profile.clone(form.data['name'])\n return HttpResponseRedirect(reverse('mcms:uimodels:details', kwargs={'mid': new_profile.ui_model.id}))\n\n return render_to_response('mcms/uiprofile/profile_clone.html',\n {'form': form, 'profile': parent_profile}, context_instance=RequestContext(request))\n\n\n@login_required\ndef profile_content_assign(request, p_id=None, page_state_name=None,\n box_id=None):\n if p_id is None:\n return HttpResponseBadRequest()\n\n profile = get_object_or_404(UIProfile, pk=p_id)\n\n if profile.is_frozen:\n return render(request, 'mcms/uiprofile/frozen.html', dict(uimodel=uim))\n\n page_state = get_object_or_404(PageState, uiprofile=profile,\n name=page_state_name)\n target_box = get_object_or_404(LayerBox, pk=box_id)\n if request.method == \"POST\":\n post = request.POST\n box_id = int(post['set_box_id'])\n target_box = get_object_or_404(LayerBox, pk=box_id)\n selected_file_id = post.get('selected_file_id', \"\")\n\n if selected_file_id != \"\":\n item = CItem.objects.get(id=selected_file_id)\n try:\n cfb = ContentForBox.objects.get(box=target_box,\n page_state=page_state)\n cfb.citem=item\n cfb.save()\n except ContentForBox.DoesNotExist:\n cfb = ContentForBox.objects.create(box=target_box,\n citem=item,\n page_state=page_state)\n ctx_dict=dict()\n ctx_dict = ctx_dict_for_layout(ctx_dict, page_state.page_layout.layout)\n ctx_dict['target_layout'] = page_state.page_layout\n ctx_dict['page_state'] = page_state\n ctx_dict['target_box'] = target_box\n ctx_dict['edition'] = profile.edition\n\n try:\n cfb = page_state.contentforboxes.filter(box__id=box_id)[0]\n ctx_dict['content_item'] = cfb.get_content()\n ctx_dict['cfb'] = cfb\n except:\n pass\n\n ctx_dict['profile'] = profile\n\n # Get all the boxes for this page.\n cfbs = page_state.contentforboxes.all()\n\n data = []\n for cfb in cfbs:\n data.append({\n 'id': cfb.box.id,\n 'mimetype': cfb.citem.mime_type,\n 'x_pos': cfb.box.x_pos,\n 'y_pos': cfb.box.y_pos,\n 'width': cfb.box.width,\n 'height':cfb.box.height,\n 'content_url': str(cfb.citem.contentfile),\n })\n\n ctx_dict['content_preview_data'] = data\n\n return render_to_response('mcms/uiprofile/profile_content_assign.html',\n ctx_dict,\n context_instance=RequestContext(request))\n\n\nclass PageStateJSONForm(forms.Form):\n page_state_id = forms.IntegerField()\n\n\n@csrf_exempt\n@proxiable([PageStateJSONForm,])\ndef get_page_state(request, __proxied=False):\n if request.method != 'POST':\n return HttpResponseNotAllowed()\n form = PageStateJSONForm(request.POST)\n if not form.is_valid():\n return HttpResponseBadRequest()\n carton = get_object_or_404(PageState,\n id=form.cleaned_data['page_state_id'])\n return HttpResponse(carton.ajax_json())\n","sub_path":"mcms/uiprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"153764568","text":"from tkinter import *\r\nimport tkinter as tk\r\nimport tkinter.messagebox\r\nimport tkinter.font as tkFont\r\nfrom PIL import Image,ImageTk\r\ndef best(p,k,k1,k2):\r\n t=tk.Tk()\r\n t.title('CRIME')\r\n w, h = t.winfo_screenwidth(), t.winfo_screenheight()\r\n t.geometry(\"%dx%d+0+0\" %(w,h))\r\n def back():\r\n t.destroy()\r\n from constable_home import const_home\r\n const_home(p)\r\n\r\n fih = tkFont.Font(family=\"Times New Roman\", size=20)\r\n\r\n noi=Label(t, text='NUMBER OF INJURIES',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n nod=Label(t, text='NUMBER OF DEATHS',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n\r\n noi1=Label(t,text=k[0][2],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n nod1=Label(t,text=k[0][3],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n\r\n fir_no=Label(t, text='FIR NUMBER',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n poc=Label(t, text='PLACE OF CRIME',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n fir_no1=Label(t,text=k[0][0],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n poc1=Label(t,text=k[0][5],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n\r\n OptionList=[k1[0][1]]\r\n v = tk.StringVar(t)\r\n v.set('CRIMINAL ID')\r\n c_id= tk.OptionMenu(t, v, *OptionList)\r\n doc=Label(t, text='DATE OF CRIME',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n doc1=Label(t,text=k[0][4],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n\r\n OptionList2=[k2[0][2]]\r\n OptionList3=[k2[0][1]]\r\n v2 = tk.StringVar(t)\r\n v2.set('SECTION NO')\r\n sn= tk.OptionMenu(t, v2, *OptionList2)\r\n v3 = tk.StringVar(t)\r\n v3.set('PENAL CODE')\r\n pc=tk.OptionMenu(t, v3, *OptionList3)\r\n\r\n da=Label(t, text='DAMAGE AMOUNT',font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=18,height=2)\r\n da1=Label(t,text=k[0][1],font=tkFont.Font(family=\"Times New Roman\", size=16), borderwidth=2, relief=\"solid\", width=15,height=2)\r\n back_button = Button(t, text='GO BACK',font=fih ,command=back, borderwidth=2, relief=\"solid\", width=20, height=2).place(\r\n x=950, y=700)\r\n noi.place(x=50, y=490)\r\n nod.place(x=50, y=430)\r\n # name1.place(x=225, y=150, width=150, height=70)\r\n noi1.place(x=300, y=490)\r\n nod1.place(x=300, y=430)\r\n fir_no.place(x=50, y=10)\r\n\r\n fir_no1.place(x=300, y=10)\r\n\r\n poc.place(x=50, y=205)\r\n poc1.place(x=300, y=205)\r\n\r\n pc.place(x=50, y=270,width=300,height=70)\r\n\r\n sn.place(x=50, y=350,width=300,height=70)\r\n\r\n c_id.place(x=50, y=65,width=300,height=70)\r\n\r\n\r\n da.place(x=50, y=555)\r\n da1.place(x=300, y=555)\r\n doc.place(x=50, y=140)\r\n doc1.place(x=300, y=140)\r\n\r\n\r\n\r\n\r\n\r\n\r\n mainloop()","sub_path":"open_crime.py","file_name":"open_crime.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"593915349","text":"class Rectangle:\r\n def __init__(self):\r\n self.Name = [\"矩形\",\"正方形\"]\r\n def enter(self):\r\n self.a = True\r\n x = input(\"请输入x1,y1,x2,y2,以空格为间隔:\")\r\n while self.a:\r\n self.data = []\r\n self.D = []\r\n try:\r\n for a in x:\r\n if a != \" \":\r\n self.D.append(float(a))\r\n if self.D[0] == self.D[2] or self.D[1] == self.D[3]:\r\n raise Datawrong\r\n except Datawrong:\r\n print(\"数据错误!\")\r\n x = input(\"请输入x1,y1,x2,y2,以空格为间隔:\")\r\n else:\r\n self.data = self.D\r\n self.a = False\r\n def width(self):\r\n self.w = abs(self.data[0]-self.data[2])\r\n print(\"矩形的宽度为:%f\"% self.w)\r\n def height(self):\r\n self.h = abs(self.data[1]-self.data[3])\r\n print(\"矩形的长度为:%f\"% self.h)\r\n def area(self):\r\n s = self.w * self.h\r\n if self.w == self.h:\r\n n = str(self.Name[1])\r\n else:\r\n n = str(self.Name[0])\r\n print(\"%s面积为:%f\"%(n,s))\r\n def circumference(self):\r\n c = 2*(self.w + self.h)\r\n if self.w == self.h:\r\n n = str(self.Name[1])\r\n else:\r\n n = str(self.Name[0])\r\n print(\"%s的周长为%f\"%(n,c))\r\nclass Datawrong(Exception):\r\n pass\r\nclass Square(Rectangle):\r\n def side_length(self):\r\n self.w = abs(self.data[0]-self.data[2])\r\n self.h = self.w\r\n print(\"正方形边长为%f.\"% self.w)\r\nif __name__ == \"__main__\":\r\n S = Square()\r\n S.enter()\r\n S.side_length()\r\n S.circumference()\r\n S.area()\r\n","sub_path":"矩形.py","file_name":"矩形.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"305717020","text":"# Simple ABM simulator in Python\r\n#\r\n# *** Diffusion-Limited Aggregation ***\r\n#\r\n# Copyright 2008-2012 Hiroki Sayama\r\n# sayama@binghamton.edu\r\n\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\n\r\nimport pylab as PL\r\nimport random as RD\r\nimport scipy as SP\r\n\r\nRD.seed()\r\n\r\nwidth = 100\r\nheight = 100\r\npopulationSize = 1000\r\nnoiseLevel = 1\r\ncollisionDistance = 2\r\nCDsquared = collisionDistance ** 2\r\ntoBeRemoved = -1\r\n\r\ndef init():\r\n global time, free, fixed\r\n\r\n time = 0\r\n \r\n free = []\r\n for i in xrange(populationSize - 1):\r\n free.append([RD.uniform(0, width), RD.uniform(0, height)])\r\n\r\n fixed = []\r\n fixed.append([width / 2, height / 2])\r\n\r\ndef draw():\r\n PL.cla()\r\n if free != []:\r\n x = [ag[0] for ag in free]\r\n y = [ag[1] for ag in free]\r\n PL.scatter(x, y, color = 'cyan')\r\n if fixed != []:\r\n PL.hold(True)\r\n x = [ag[0] for ag in fixed]\r\n y = [ag[1] for ag in fixed]\r\n PL.scatter(x, y, color = 'blue')\r\n PL.hold(False)\r\n PL.axis('scaled')\r\n PL.axis([0, width, 0, height])\r\n PL.title('t = ' + str(time))\r\n\r\ndef clip(a, amin, amax):\r\n if a < amin: return amin\r\n elif a > amax: return amax\r\n else: return a\r\n\r\ndef step():\r\n global time, free, fixed\r\n\r\n time += 1\r\n\r\n # simulate random motion\r\n for ag in free:\r\n ag[0] += RD.gauss(0, noiseLevel)\r\n ag[1] += RD.gauss(0, noiseLevel)\r\n ag[0] = clip(ag[0], 0, width)\r\n ag[1] = clip(ag[1], 0, height)\r\n\r\n # detect collision and change state\r\n for i in xrange(len(free)):\r\n for j in xrange(len(fixed)):\r\n if (free[i][0]-fixed[j][0])**2 + (free[i][1]-fixed[j][1])**2 < CDsquared:\r\n fixed.append(free[i])\r\n free[i] = toBeRemoved\r\n break\r\n\r\n # remove \"toBeRemoved\" free particles\r\n while toBeRemoved in free:\r\n free.remove(toBeRemoved)\r\n\r\nimport pycxsimulator\r\npycxsimulator.GUI().start(func=[init,draw,step])\r\n","sub_path":"pycx-0.32/abm-DLA.py","file_name":"abm-DLA.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"497020026","text":"import pytest\n\nfrom kasa import SmartDeviceException\n\nfrom .conftest import has_emeter, no_emeter, pytestmark\nfrom .newfakes import CURRENT_CONSUMPTION_SCHEMA\n\n\n@no_emeter\nasync def test_no_emeter(dev):\n assert not dev.has_emeter\n\n with pytest.raises(SmartDeviceException):\n await dev.get_emeter_realtime()\n with pytest.raises(SmartDeviceException):\n await dev.get_emeter_daily()\n with pytest.raises(SmartDeviceException):\n await dev.get_emeter_monthly()\n with pytest.raises(SmartDeviceException):\n await dev.erase_emeter_stats()\n\n\n@has_emeter\nasync def test_get_emeter_realtime(dev):\n if dev.is_strip:\n pytest.skip(\"Disabled for strips temporarily\")\n\n assert dev.has_emeter\n\n current_emeter = await dev.get_emeter_realtime()\n CURRENT_CONSUMPTION_SCHEMA(current_emeter)\n\n\n@has_emeter\nasync def test_get_emeter_daily(dev):\n if dev.is_strip:\n pytest.skip(\"Disabled for strips temporarily\")\n\n assert dev.has_emeter\n\n assert await dev.get_emeter_daily(year=1900, month=1) == {}\n\n d = await dev.get_emeter_daily()\n assert len(d) > 0\n\n k, v = d.popitem()\n assert isinstance(k, int)\n assert isinstance(v, float)\n\n # Test kwh (energy, energy_wh)\n d = await dev.get_emeter_daily(kwh=False)\n k2, v2 = d.popitem()\n assert v * 1000 == v2\n\n\n@has_emeter\nasync def test_get_emeter_monthly(dev):\n if dev.is_strip:\n pytest.skip(\"Disabled for strips temporarily\")\n\n assert dev.has_emeter\n\n assert await dev.get_emeter_monthly(year=1900) == {}\n\n d = await dev.get_emeter_monthly()\n assert len(d) > 0\n\n k, v = d.popitem()\n assert isinstance(k, int)\n assert isinstance(v, float)\n\n # Test kwh (energy, energy_wh)\n d = await dev.get_emeter_monthly(kwh=False)\n k2, v2 = d.popitem()\n assert v * 1000 == v2\n\n\n@has_emeter\nasync def test_emeter_status(dev):\n if dev.is_strip:\n pytest.skip(\"Disabled for strips temporarily\")\n\n assert dev.has_emeter\n\n d = await dev.get_emeter_realtime()\n\n with pytest.raises(KeyError):\n assert d[\"foo\"]\n\n assert d[\"power_mw\"] == d[\"power\"] * 1000\n # bulbs have only power according to tplink simulator.\n if not dev.is_bulb:\n assert d[\"voltage_mv\"] == d[\"voltage\"] * 1000\n\n assert d[\"current_ma\"] == d[\"current\"] * 1000\n assert d[\"total_wh\"] == d[\"total\"] * 1000\n\n\n@pytest.mark.skip(\"not clearing your stats..\")\n@has_emeter\nasync def test_erase_emeter_stats(dev):\n assert dev.has_emeter\n\n await dev.erase_emeter()\n\n\n@has_emeter\nasync def test_current_consumption(dev):\n if dev.is_strip:\n pytest.skip(\"Disabled for strips temporarily\")\n\n if dev.has_emeter:\n x = await dev.current_consumption()\n assert isinstance(x, float)\n assert x >= 0.0\n else:\n assert await dev.current_consumption() is None\n","sub_path":"venv/Lib/site-packages/kasa/tests/test_emeter.py","file_name":"test_emeter.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"305207680","text":"from typing import List\n\nfrom tkdet.utils.registry import Registry\nfrom .base import Neck\nfrom typing import Union\n\n__all__ = [\n \"NECK_REGISTRY\",\n \"build_neck\",\n \"get_neck_list\",\n]\n\nNECK_REGISTRY = Registry(\"NECK\")\n\n\ndef build_neck(cfg, input_shape) -> Union[Neck, None]:\n if not cfg.MODEL.NECK.ENABLE:\n return None\n\n neck = NECK_REGISTRY.get(cfg.MODEL.NECK.NAME)(cfg, input_shape)\n assert isinstance(neck, Neck)\n\n return neck\n\n\ndef get_neck_list() -> List[str]:\n return list(NECK_REGISTRY.keys())\n","sub_path":"tkdet/models/neck/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"161652816","text":"import pymysql,random\nfrom collections import deque\njhm='ABCDEFGHIJKLMNOPQLSTUVWXYZ1234567890'\n\ndb=pymysql.connect('localhost','root','Mring1993','yangming')\ncursor=db.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS JHM\")\nsql=\"\"\"CREATE TABLE JHM (JM VARCHAR(60) NOT NULL)\"\"\"\ncursor.execute(sql)\ndb.close()\ndef p_m(cnt):\n\tn=0\n\tpm=[]\n\tm=[]\n\twhile n\" + self.call +\n \"\" + self.band +\n \"\" + self.mode +\n \"\" + self.freq +\n \"\" + self.qso_date +\n \"\" + self.time_on +\n \"\" + self.time_off +\n \"\" + self.rst_rcvd +\n \"\" + self.rst_sent +\n \"NN\" +\n \"\" + self.country +\n \"\" + self.gridsquare +\n \"\" + self.comment +\n \"\" + self.cont +\n \"\" + self.country +\n \"\" + self.dxcc +\n \"\" + self.cqz +\n \"\" + self.ituz +\n \"\" + self.lat +\n \"\" + self.lon +\n \"\"\n }\n print(data)\n loop = asyncio.get_event_loop()\n loop.run_in_executor(None, qso, self.cloudlog_uri, data)\n #qso(self.cloudlog_uri, data)\n\n# if self.contest == 'FD':\n# command = \"\"\"TRUE\n# TRUE\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\\r\\n\"\"\" % (self.computer_name, self.operator,\n# self.name_s, self.initials,\n# self.county, self.call,\n# self.name_r, self.date,\n# self.time_on, self.time_off,\n# self.band, self.mode,\n# self.frequency, self.power,\n# self.grid_r, self.grid_s,\n# self.comments, self.points,\n# self.arrl_class_r,\n# self.arrl_section_r)\n# else:\n# command = \"\"\"TRUE\n# TRUE\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\n# %s\\r\\n\"\"\" % (self.computer_name, self.operator,\n# self.name_s, self.initials,\n# self.county, self.call,\n# self.name_r, self.date,\n# self.time_on, self.time_off,\n# self.band, self.mode,\n# self.frequency, self.power,\n# self.rst_r, self.rst_s,\n# self.grid_r, self.grid_s,\n# self.comments, self.points,\n# self.arrl_class_r,\n# self.arrl_section_r)\n# print(\"\\nSending log entry to Cloudlog...\")\n# print(command)\n# try:\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# sock.connect((self.config['DEFAULT']['Cloudlog_HOST'],\n# int(self.config['DEFAULT']['Cloudlog_PORT'])))\n# self.tcp_send_string(sock, command)\n# time.sleep(.2)\n# command = \"\\r\\n\"\n# print(\"Sending log refresh...\")\n# self.tcp_send_string(sock, command)\n# sock.close()\n# except socket.error as msg:\n# sys.stderr.write(\"[ERROR] Failed to connect to Cloudlog: %s\\n\" % msg)\n\n\nif __name__ == \"__main__\":\n W = WsjtxToCloudlog()\n print(\"WSJT-X to Cloudlog by Joeri Van Dooren ON3URE\\n\")\n while True:\n W.udp_recv_string()\n W.parse_adif()\n W.log_new_qso()\n W.reset_vals()\n W.sock.close()\n\nloop = asyncio.get_event_loop()\ntry:\n loop.run_forever()\nfinally:\n loop.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"567274590","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findBottomLeftValue(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root is None:\n return\n parent, level ={root:None}, {root:0}\n frontier = [root]\n i = 1\n while frontier:\n next = []\n for u in frontier:\n for v in [u.left, u.right]:\n if v not in level and v is not None:\n level[v] = i\n parent[v] = u\n next.append(v)\n if next == []:\n return frontier[0].val\n else:\n frontier = next\n i += 1\n\nclass Solution2:\n def findBottomLeftValue(self, root):\n if root is None:\n return \n queue = [root]\n for node in queue:\n queue += filter(None, [node.right, node.left])\n print(queue)\n return node.val\n\n ","sub_path":"BFS/FindBottomLeftTreeValue.py","file_name":"FindBottomLeftTreeValue.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"406586898","text":"import requests\nimport logging\nfrom APIimports import constants\nfrom APIimports.models import ApiElement\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef oneRingToBindThem():\n for name, url in constants.APIS:\n response = requests.get(url)\n\n try:\n response.raise_for_status()\n geojson = response.json()\n\n for element in geojson:\n apiElement = ApiElement(\n payload=element,\n url=url,\n name=name\n )\n apiElement.save()\n # except HTTPError:\n # logger.exception(\"non 200 response from api request \" + url)\n # except ValueError:\n # logger.exception(\"exception parsing json in response from api request against \" + url)\n\n except Exception as e:\n print(e)\n\n\n","sub_path":"transDjango/APIimports/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"378555191","text":"\"\"\"\nWrite a program to print all permutations of a given string\n\"\"\"\n\n\ndef permutation(s, l, r):\n if l == r:\n print(\"\".join(s))\n return\n for i in range(l, r):\n s[l], s[i] = s[i], s[l]\n permutation(s, l+1, r)\n s[i], s[l] = s[l], s[i]\n\n\ns = ['A', 'B', 'C', 'D']\npermutation(s, 0, len(s))","sub_path":"algorithm/mathematical/permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"17689564","text":"# -*- coding: utf-8 -*-\nimport uuid\nfrom saml2.entity_category.at_egov_pvp2 import PVP2\nfrom saml2 import BINDING_HTTP_REDIRECT, BINDING_HTTP_POST\nfrom saml2.cert import OpenSSLWrapper\nfrom saml2.extension.idpdisc import BINDING_DISCO\nfrom saml2.saml import NAME_FORMAT_URI, NAMEID_FORMAT_PERSISTENT\nimport service_conf\n\ntry:\n from saml2.sigver import get_xmlsec_binary\nexcept ImportError:\n get_xmlsec_binary = None\n\nif get_xmlsec_binary:\n xmlsec_path = get_xmlsec_binary([\"/opt/local/bin\"])\nelse:\n xmlsec_path = '/usr/bin/xmlsec1'\n\nHOST = 'localhost'\nPORT = service_conf.PORT\n\nBASE = \"https://%s:%s\" % (HOST, PORT)\n\ndef generate_cert():\n sn = uuid.uuid4().urn\n cert_info = {\n \"cn\": \"localhost\",\n \"country_code\": \"se\",\n \"state\": \"ac\",\n \"city\": \"Umea\",\n \"organization\": \"ITS\",\n \"organization_unit\": \"DIRG\"\n }\n osw = OpenSSLWrapper()\n ca_cert_str = osw.read_str_from_file(\"root_cert/localhost.ca.crt\")\n ca_key_str = osw.read_str_from_file(\"root_cert/localhost.ca.key\")\n req_cert_str, req_key_str = osw.create_certificate(cert_info, request=True, sn=sn, key_length=2048)\n cert_str = osw.create_cert_signed_certificate(ca_cert_str, ca_key_str, req_cert_str)\n return cert_str, req_key_str\n\nCONFIG = {\n \"entityid\": \"%s/testsp.xml\" % BASE,\n \"description\": \"Test local SP\",\n \"entity_category\": [PVP2],\n \"generate_cert_func\": generate_cert,\n \"service\": {\n \"sp\": {\n \"authn_requests_signed\": \"true\",\n \"want_assertions_signed\": \"false\",\n \"want_response_signed\": \"true\",\n \"allow_unsolicited\": \"false\",\n \"name\": \"LocalTestSP\",\n \"endpoints\": {\n \"assertion_consumer_service\": [\n (\"%s/acs/redirect\" % BASE, BINDING_HTTP_REDIRECT),\n (\"%s/acs/post\" % BASE, BINDING_HTTP_POST)\n ],\n \"single_logout_service\": [(BASE + \"/slo\", BINDING_HTTP_REDIRECT)],\n \"discovery_response\": [(\"%s/disco\" % BASE, BINDING_DISCO)]\n },\n \"required_attributes\": [\"pvp-version\", \"pvp-principal-name\", ],\n \"optional_attributes\": [\"pvp-givenname\", \"pvp-birthdate\", \"pvp-userid\", ],\n \"name_id_format\": [NAMEID_FORMAT_PERSISTENT],\n },\n },\n \"debug\": 1,\n \"key_file\": \"pki/mykey.pem\",\n \"cert_file\": \"pki/mycert.pem\",\n \"metadata\": {\n \"local\": [\n \"metadata/pefim_proxy_conf.xml\"\n ],\n },\n\n\n # -- below used by make_metadata --\n \"organization\": {\n \"name\": \"Test SP\",\n \"display_name\": [(\"Test SP\", \"en\")],\n \"url\": \"http://localhost:%s\" % PORT,\n },\n \"contact_person\": [\n {\n \"contact_type\": \"technical\",\n \"given_name\": \"Test\",\n \"sur_name\": \"Testsson\",\n \"email_address\": \"test.testsson@test.com\"\n },\n ],\n \"xmlsec_binary\": xmlsec_path,\n \"name_form\": NAME_FORMAT_URI,\n \"logger\": {\n \"rotating\": {\n \"filename\": \"sp.log\",\n \"maxBytes\": 100000,\n \"backupCount\": 5,\n },\n \"loglevel\": \"debug\",\n }\n}\n\n","sub_path":"example/etc/sp_conf.py","file_name":"sp_conf.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"633637666","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"SHO\"\n addresses_name = (\n \"2021-04-16T14:09:22.768202/S Holland Democracy_Club__06May2021.tsv\"\n )\n stations_name = \"2021-04-16T14:09:22.768202/S Holland Democracy_Club__06May2021.tsv\"\n elections = [\"2021-05-06\"]\n csv_delimiter = \"\\t\"\n\n def address_record_to_dict(self, record):\n uprn = record.property_urn.strip().lstrip(\"0\")\n\n if uprn in [\n \"100030894500\", # BUSLEY, SOUTH DROVE, SPALDING\n \"100030888845\", # AMOW, MIDDLE MARSH ROAD, MOULTON MARSH, SPALDING\n \"100030887883\", # 1 FARM COTTAGE, GEDNEY DYKE, SPALDING\n \"100032311625\", # 2 FARM COTTAGE, GEDNEY DYKE, SPALDING\n ]:\n return None\n\n if record.addressline6 in [\n \"PE12 8LT\",\n \"PE12 0BL\",\n \"PE12 8SE\",\n \"PE12 8BP\",\n \"PE6 0LR\",\n \"PE12 8EP\",\n \"PE12 0HY\",\n \"PE11 3TB\",\n \"PE11 3NB\",\n \"PE12 9QJ\",\n \"PE11 4JH\",\n \"PE12 0XA\",\n \"PE12 6SD\",\n \"PE12 6DN\",\n \"PE12 7FG\",\n \"PE12 0HZ\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n\n def station_record_to_dict(self, record):\n\n # Scout & Guide Headquarters Park Lane Long Sutton Spalding PE12 9DH\n if record.polling_place_id == \"3793\":\n record = record._replace(polling_place_postcode=\"\")\n\n return super().station_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_south_holland.py","file_name":"import_south_holland.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"54263800","text":"\n\"\"\"\nSome useful functions\n\"\"\"\n\nimport inspect\nfrom pprint import pprint\n\ndef add_css_class(css_string, css_class):\n css_classes = css_string.split(' ')\n try:\n for cls in css_class:\n if not cls in css_classes:\n css_classes.append(cls)\n except TypeError:\n if not css_class in css_classes:\n css_classes.append(css_class)\n\n return ' '.join(css_classes)\n\ndef remove_css_class(css_string, css_class):\n css_classes = css_string.split(' ')\n try:\n css_classes.remove(css_class)\n except ValueError:\n pass\n return ' '.join(css_classes)\n\n\ndef update_if_not_defined(dct, key, default):\n \"\"\"\n performs dct.update({key : default}) if key not in dct or dct[key] is None\n \"\"\"\n val = dct.get(key, None)\n if val is None:\n dct.update({key:default})\n\n return dct\n\ndef debug(obj, out, label = None):\n print ('\\n\\n--------------------------------------------------')\n print (' Debug output')\n print ('--------------------------------------------------')\n print ('I am {}'.format(obj))\n print ('In method: {}'.format(inspect.stack()[1][3]))\n print ('Called from: {}'.format(inspect.stack()[2][3]))\n if label:\n print ('Now showing: {}'.format(label))\n pprint (out)\n print('---------------- End debug -----------------------\\n')\n\n\ndef get_model_verbose_name(model):\n try:\n return model.get_verbose_name()\n except AttributeError:\n return model._meta.verbose_name\n\nLIST_OF_ELEMENTS = [\n ('H', 'H (Wasserstoff)'),\n ('He', 'He (Helium)'),\n ('Li', 'Li (Lithium)'),\n ('Be', 'Be (Beryllium)'),\n ('B', 'B (Bor)'),\n ('C', 'C (Kohlenstoff)'),\n ('N', 'N (Stickstoff)'),\n ('O', 'O (Sauerstoff)'),\n ('F', 'F (Fluor)'),\n ('Ne', 'Ne (Neon)'),\n ('Na', 'Na (Natrium)'),\n ('Mg', 'Mg (Magnesium)'),\n ('Al', 'Al (Aluminium)'),\n ('Si', 'Si (Silicium)'),\n ('P', 'P (Phosphor)'),\n ('S', 'S (Schwefel)'),\n ('Cl', 'Cl (Chlor)'),\n ('Ar', 'Ar (Argon)'),\n ('K', 'K (Kalium)'),\n ('Ca', 'Ca (Calcium)'),\n ('Sc', 'Sc (Scandium)'),\n ('Ti', 'Ti (Titan)'),\n ('V', 'V (Vanadium)'),\n ('Cr', 'Cr (Chrom)'),\n ('Mn', 'Mn (Mangan)'),\n ('Fe', 'Fe (Eisen)'),\n ('Co', 'Co (Cobalt)'),\n ('Ni', 'Ni (Nickel)'),\n ('Cu', 'Cu (Kupfer)'),\n ('Zn', 'Zn (Zink)'),\n ('Ga', 'Ga (Gallium)'),\n ('Ge', 'Ge (Germanium)'),\n ('As', 'As (Arsen)'),\n ('Se', 'Se (Selen)'),\n ('Br', 'Br (Brom)'),\n ('Kr', 'Kr (Krypton)'),\n ('Rb', 'Rb (Rubidium)'),\n ('Sr', 'Sr (Strontium)'),\n ('Y', 'Y (Yttrium)'),\n ('Zr', 'Zr (Zirconium)'),\n ('Nb', 'Nb (Niob)'),\n ('Mo', 'Mo (Molybdän)'),\n ('Tc', 'Tc (Technetium)'),\n ('Ru', 'Ru (Ruthenium)'),\n ('Rh', 'Rh (Rhodium)'),\n ('Pd', 'Pd (Palladium)'),\n ('Ag', 'Ag (Silber)'),\n ('Cd', 'Cd (Cadmium)'),\n ('In', 'In (Indium)'),\n ('Sn', 'Sn (Zinn)'),\n ('Sb', 'Sb (Antimon)'),\n ('Te', 'Te (Tellur)'),\n ('I', 'I (Iod)'),\n ('Xe', 'Xe (Xenon)'),\n ('Cs', 'Cs (Caesium)'),\n ('Ba', 'Ba (Barium)'),\n ('La', 'La (Lanthan)'),\n ('Ce', 'Ce (Cer)'),\n ('Pr', 'Pr (Praseodym)'),\n ('Nd', 'Nd (Neodym)'),\n ('Pm', 'Pm (Promethium)'),\n ('Sm', 'Sm (Samarium)'),\n ('Eu', 'Eu (Europium)'),\n ('Gd', 'Gd (Gadolinium)'),\n ('Tb', 'Tb (Terbium)'),\n ('Dy', 'Dy (Dysprosium)'),\n ('Ho', 'Ho (Holmium)'),\n ('Er', 'Er (Erbium)'),\n ('Tm', 'Tm (Thulium)'),\n ('Yb', 'Yb (Ytterbium)'),\n ('Lu', 'Lu (Lutetium)'),\n ('Hf', 'Hf (Hafnium)'),\n ('Ta', 'Ta (Tantal)'),\n ('W', 'W (Wolfram)'),\n ('Re', 'Re (Rhenium)'),\n ('Os', 'Os (Osmium)'),\n ('Ir', 'Ir (Iridium)'),\n ('Pt', 'Pt (Platin)'),\n ('Au', 'Au (Gold)'),\n ('Hg', 'Hg (Quecksilber)'),\n ('Tl', 'Tl (Thallium)'),\n ('Pb', 'Pb (Blei)'),\n ('Bi', 'Bi (Bismut)'),\n ('Po', 'Po (Polonium)'),\n ('At', 'At (Astat)'),\n ('Rn', 'Rn (Radon)'),\n ('Fr', 'Fr (Francium)'),\n ('Ra', 'Ra (Radium)'),\n ('Ac', 'Ac (Actinium)'),\n ('Th', 'Th (Thorium)'),\n ('Pa', 'Pa (Protactinium)'),\n ('U', 'U (Uran)'),\n ('Np', 'Np (Neptunium)'),\n ('Pu', 'Pu (Plutonium)'),\n ('Am', 'Am (Americium)'),\n ('Cm', 'Cm (Curium)'),\n ('Bk', 'Bk (Berkelium)'),\n ('Cf', 'Cf (Californium)'),\n ('Es', 'Es (Einsteinium)'),\n ('Fm', 'Fm (Fermium)'),\n ('Md', 'Md (Mendelevium)'),\n ('No', 'No (Nobelium)'),\n ('Lr', 'Lr (Lawrencium)'),\n ('Rf', 'Rf (Rutherfordium)'),\n ('Db', 'Db (Dubnium)'),\n ('Sg', 'Sg (Seaborgium)'),\n ('Bh', 'Bh (Bohrium)'),\n ('Hs', 'Hs (Hassium)'),\n ('Mt', 'Mt (Meitnerium)'),\n ('Ds', 'Ds (Darmstadtium)'),\n ('Rg', 'Rg (Roentgenium)'),\n ('Cn', 'Cn (Copernicium)'),\n ('Nh', 'Nh (Nihonium)'),\n ('Fl', 'Fl (Flerovium)'),\n ('Mc', 'Mc (Moscovium)'),\n ('Lv', 'Lv (Livermorium)'),\n ('Ts', 'Ts (Tenness)'),\n ('Og', 'Og (Oganesson)')\n]\n","sub_path":"rai/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"273826481","text":"from selenium import webdriver as wb\nimport pandas as pd\nfrom tqdm import tqdm\nimport time\nimport json\n\nwebD = wb.Chrome('C:\\\\Users\\\\Admin\\\\Downloads\\\\chromedriver.exe')\nwebD.get('https://tiki.vn/may-anh/c28806?src=c.1801.hamburger_menu_fly_out_banner')\n\nhrefLinkList = []\ncondition = True\n\nwhile condition:\n time.sleep(3)\n allInfo = webD.find_elements_by_class_name('product-item ')\n for eEle in allInfo:\n temp = eEle.find_element_by_tag_name('a')\n hrefLink = temp.get_property('href')\n hrefLinkList.append(hrefLink)\n print(len(hrefLinkList))\n try:\n webD.find_elements_by_class_name('next')[-1].click()\n except:\n condition = False\n\ndata = []\n\n#list brand\ndell = 1\nasus = 2\nacer = 3\nmsi = 4\nlenovo = 5\nlg = 6\napple = 7\nhp = 8\nsamsung = 9\nsony = 10\nphilips = 11\ncasper = 12\ntcl = 13\nsharp = 14\npanasonic = 15\noppo = 16\nnokia = 17\nvivo = 18\nitel = 19\nvsmart = 21\nrealme = 22\nxiaomi = 23\naqua = 24\nhitachi = 25\ntoshiba = 26\nelectrolux = 27\nwhirlpool = 28\nsanaky = 29\nmitsubishi_electric = 30\ncanon = 31\nfujifilm = 32\nsanco = 33\nasanzo = 34\nfpt = 35\nhuawei = 36\nkhac = 37\n\n#list category\ntivi = 1\ntulanh = 2\nmaygiat = 3\nmayanh = 4\nlaptop = 5\ndienthoai = 6\n\n#list company\nnguyenkim = 1\nphongvu = 2\nhc = 3\nsendo = 4\ntiki = 5\nlazada = 6\nshopee = 7\nmediasmart = 8\n\nfor i in tqdm(hrefLinkList):\n webD.get(i)\n time.sleep(1)\n try:\n productName = webD.find_element_by_class_name('title').text\n price = webD.find_element_by_class_name('product-price__current-price').text\n linkProductImage = webD.find_element_by_xpath('//*[@id=\"__next\"]/div[1]/main/div[4]/div/div[1]/div[1]/div[1]/div/div/img')\n src = linkProductImage.get_property('src')\n # dieukien = True\n # list = webD.find_elements_by_tag_name('tr')\n # while dieukien:\n # for item in list:\n # itemTemp = item.find_elements_by_tag_name('td')\n # itemChild = itemTemp[0].text.lower()\n # if itemChild == 'thương hiệu':\n # supplier = itemTemp[1].text.lower()\n # dieukien = False\n # else:\n # continue\n try:\n supplier = webD.find_element_by_xpath('//*[@id=\"__next\"]/div[1]/main/div[4]/div/div[3]/div[1]/div/span/h6/a')\n except:\n try:\n supplier = webD.find_element_by_xpath('//*[@id=\"__next\"]/div[1]/main/div[4]/div/div[3]/div[1]/div[1]/span/h6/a')\n except:\n break\n\n supID = khac\n if supplier == 'dell':\n supID = dell\n elif supplier == 'asus':\n supID = asus\n elif supplier == 'acer':\n supID = acer\n elif supplier == 'msi':\n supID = msi\n elif supplier == 'lenovo':\n supID = lenovo\n elif supplier == 'lg':\n supID = lg\n elif supplier == 'apple':\n supID = apple\n elif supplier == 'hp':\n supID = hp\n elif supplier == 'sony':\n supID = sony\n elif supplier == 'samsung':\n supID = samsung\n elif supplier == 'philips':\n supID = philips\n elif supplier == 'casper':\n supID = casper\n elif supplier == 'tcl':\n supID = tcl\n elif supplier == 'sharp':\n supID = sharp\n elif supplier == 'oppo':\n supID = oppo\n elif supplier == 'nokia':\n supID = nokia\n elif supplier == 'vivo':\n supID = vivo\n elif supplier == 'itel':\n supID = itel\n elif supplier == 'vsmart':\n supID = vsmart\n elif supplier == 'realme':\n supID = realme\n elif supplier == 'xiaomi':\n supID = xiaomi\n elif supplier == 'panasonic':\n supID = panasonic\n elif supplier == 'aqua':\n supID = aqua\n elif supplier == 'hitachi':\n supID = hitachi\n elif supplier == 'toshiba':\n supID = toshiba\n elif supplier == 'electrolux':\n supID = electrolux\n elif supplier == 'whirlpool':\n supID = whirlpool\n elif supplier == 'sanaky':\n supID = sanaky\n elif supplier == 'mitsubishi electric':\n supID = mitsubishi_electric\n elif supplier == 'canon':\n supID = canon\n elif supplier == 'fujifilm':\n supID = fujifilm\n elif supplier == 'sanco':\n supID = sanco\n elif supplier == 'asanzo':\n supID = asanzo\n elif supplier == 'fpt':\n supID = fpt\n elif supplier == 'huawei':\n supID = huawei\n\n tempJ = {'productName': productName,\n 'price': price,\n 'CategoryID': mayanh,\n 'CompanyID': tiki,\n 'hyperlink': i,\n 'LinkOfProductImage': src,\n 'SupplierID': supID\n }\n data.append(tempJ)\n except:\n continue\n\npd.DataFrame(data)\nprint(len(data))\n#Writing to JSON File\n\ndef writeToJSONFile(path, fileName, data):\n filePathNameWExt = './' + path + '/' + fileName + '.json'\n with open(filePathNameWExt, 'w') as fp:\n json.dump(data, fp)\n\npath = './'\nfileName = 'MayAnh_Tiki'\n\n\nwriteToJSONFile(path, fileName, data)\n\nfor i in data:\n print(i)","sub_path":"PycharmProjects/CrawlOfficial/TIKI/crawl_TIKI_MayAnh.py","file_name":"crawl_TIKI_MayAnh.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"53884253","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 샘플데이터\n\"\"\"\nm_a_id = 2330981 # 여자친구 3집\ntitle = \"오늘부터 우리는 (Me Gustas Tu)\"\n\"\"\"\n\ndef get_melon_song_id(m_a_id, title):\n url = \"http://www.muse.com/album/detail.htm?albumId=%s\"%(str(m_a_id))\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n song_list = list(soup.find_all(\"a\", class_=\"btn btn_icon_detail\"))\n for a in song_list:\n #print(a)\n t0 = a.find(\"span\", class_=\"odd_span\").text\n t = t0.split(\" 상세정보 페이지 이동\")[0]\n if(t==title):\n s_id = str(a).split(\"'\")[1]\n print(\"title:\",t , \"\\tsong_id:\", s_id)\n return s_id\n\n\"\"\"\nz = get_melon_song_id(2663668, \"시간을 달려서 (Rough)\")\nprint(z)\n\"\"\"","sub_path":"seolab/get_melon_song_id.py","file_name":"get_melon_song_id.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"248311528","text":"# Copyright 2012,2013 James McCauley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCreates a spanning tree.\n\nThis component uses the discovery component to build a view of the network\ntopology, constructs a spanning tree, and then disables flooding on switch\nports that aren't on the tree by setting their NO_FLOOD bit. The result\nis that topologies with loops no longer turn your network into useless\nhot packet soup.\n\nThis component is inspired by and roughly based on the description of\nGlenn Gibb's spanning tree module for NOX:\n http://www.openflow.org/wk/index.php/Basic_Spanning_Tree\n\nNote that this does not have much of a relationship to Spanning Tree\nProtocol. They have similar purposes, but this is a rather different way\nof going about it.\n\"\"\"\n#import sys; sys.path.append('/Users/fenghhk/a/Dev/pox/')\n\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.revent import *\nfrom collections import defaultdict, namedtuple\nfrom pox.openflow.discovery import Discovery, Link\nfrom pox.lib.util import dpid_to_str, str_to_dpid\nfrom pox.lib.recoco import Timer\nimport time\n\nlog = core.getLogger()\n\ntestadj = []\ndef find_path_test():\n testadj.append(Link(1,0,2,0))\n testadj.append(Link(1,0,4,0))\n testadj.append(Link(2,0,3,0))\n testadj.append(Link(2,0,4,0))\n testadj.append(Link(3,0,4,0))\n testadj.append(Link(5,0,8,0))\n \n#find_path_test() \n\n\n# Keep a list of previous port states so that we can skip some port mods\n# If other things mess with port states, these may not be correct. We\n# could also refer to Connection.ports, but those are not guaranteed to\n# be up to date.\n_prev = defaultdict(lambda : defaultdict(lambda : None))\n\n# If True, we set ports down when a switch connects\n_noflood_by_default = False\n\n# If True, don't allow turning off flood bits until a complete discovery\n# cycle should have completed (mostly makes sense with _noflood_by_default).\n_hold_down = False \n\n \nclass PathSetsGenerator(object):\n # Collect the conns for l2_all_entries to speedup the inverted indexing\n dpid_conn_dict = dict();\n\n # (src, dest) -> set((dpid1, port1), (dpid2, port2), ...), )\n path_dict = dict()\n\n # Expose the link matrix to let l2_all_entries easily find output ports\n link_matrix = defaultdict(lambda:defaultdict(lambda:[]))\n \n \n def __init__ (self):\n \"\"\n def start_gen ():\n core.openflow.addListeners(self)\n core.openflow_discovery.addListeners(self)\n #core.openflow.addListenerByName(\"ConnectionUp\", _handle_ConnectionUp)\n #core.openflow_discovery.addListenerByName(\"LinkEvent\", _handle_LinkEvent)\n log.debug(\"Path Sets Generator component ready\")\n\n core.call_when_ready(start_gen, \"openflow_discovery\")\n\n def _calc_all_paths(self):\n\n switches = set()\n # Add all links and switches\n for l in core.openflow_discovery.adjacency:\n #for l in testadj:\n self.link_matrix[int(l.dpid1)][int(l.dpid2)] = l\n self.link_matrix[int(l.dpid2)][int(l.dpid1)] = self.flip(l)\n switches.add(int(l.dpid1))\n switches.add(int(l.dpid2))\n\n log.debug('connected switches: ' + str(switches))\n # for row in self.link_matrix:\n # for col in self.link_matrix:\n # if isinstance(self.link_matrix[row][col], Link) :\n # print 1,\n # else:\n # print 0,\n # print ''\n\n for srcid in self.link_matrix:\n for dstid in self.link_matrix:\n if srcid == dstid: continue\n\n if (srcid, dstid) not in self.path_dict:\n self.path_dict[(srcid, dstid)] = set()\n\n self.find_path(srcid, dstid, self.path_dict[(srcid, dstid)])\n\n def flip (self, link):\n return Discovery.Link(link[2],link[3], link[0],link[1])\n\n def find_path(self, src, dst, res_set):\n visited = set()\n visited.add(src)\n path = [src]\n\n log.debug('searching paths from ' + str(src) + ' to ' + str(dst))\n self.find_path_helper(src, dst, visited, path, res_set)\n log.debug(str(src) + ' -> ' + str(dst) + ': ' + str(res_set))\n\n return path\n\n def find_path_helper(self, src, dst, visited, path, res_set):\n #import pdb; pdb.set_trace()\n if len(path) > len(self.link_matrix):\n log.debug(str(path) + ' : lost in recursion, stop it')\n return \n\n if len(path) > 0 and src == dst:\n log.debug('find path: ' + str(path))\n res_set.add(tuple(path[:]))\n else:\n for next_hop in self.link_matrix:\n if next_hop == src or next_hop in visited:\n continue\n # Ensure there is a conn between src and next_hop\n if not isinstance(self.link_matrix[src][next_hop], Link) :\n #log.debug('whats :' + str(adj[src][next_hop]))\n continue\n\n visited.add(next_hop)\n path.append(next_hop)\n self.find_path_helper(next_hop, dst, visited, path, res_set)\n visited.remove(next_hop)\n path.remove(next_hop)\n\n def _handle_ConnectionUp (self, event):\n\n self.dpid_conn_dict[event.dpid] = event.connection\n \n #log.debug(event.dpid) # dpid is an integer\n # When a switch connects, forget about previous port states\n _prev[event.dpid].clear()\n\n if _noflood_by_default:\n con = event.connection\n log.debug(\"Disabling flooding for %i ports on sw %s\",\n len(con.ports), dpid_to_str(event.dpid))\n for p in con.ports.itervalues():\n if p.port_no >= of.OFPP_MAX: continue\n _prev[con.dpid][p.port_no] = False\n pm = of.ofp_port_mod(port_no=p.port_no,\n hw_addr=p.hw_addr,\n config = of.OFPPC_NO_FLOOD,\n mask = of.OFPPC_NO_FLOOD)\n con.send(pm)\n #_invalidate_ports(con.dpid)\n\n #if _hold_down:\n # t = Timer(core.openflow_discovery.send_cycle_time + 1, _update_tree,\n # kw={'force_dpid':event.dpid})\n\n\n def _handle_LinkEvent (self, event):\n # When links change, update spanning tree\n # TODO: should seperate the event into two kinds: linkup and linkdown\n # when it's linkup, recalc all;\n # when it's linkdown, just remove all paths containing the link\n\n self._calc_all_paths()\n\n # (dp1,p1),(dp2,p2) = event.link.end\n # if _prev[dp1][p1] is False:\n # if _prev[dp2][p2] is False:\n # # We're disabling this link; who cares if it's up or down?\n # #log.debug(\"Ignoring link status for %s\", event.link)\n # return\n\ndef launch (no_flood = False, hold_down = False):\n global _noflood_by_default, _hold_down\n if no_flood is True:\n _noflood_by_default = True\n if hold_down is True:\n _hold_down = True\n\n core.registerNew(PathSetsGenerator)\n","sub_path":"pox-ext/path_sets_gen.py","file_name":"path_sets_gen.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"555065504","text":"import sys\nsys.stdin = open('input.txt')\n\n\ndef quick_sort(array, start, end):\n if start >= end:\n return\n\n pivot = start\n left = start + 1\n right = end\n\n while left <= right:\n while left <= end and array[pivot] >= array[left]:\n left += 1\n while right > start and array[pivot] <= array[right]:\n right -= 1\n if left > right:\n array[pivot], array[right] = array[right], array[pivot]\n else:\n array[left], array[right] = array[right], array[left]\n\n quick_sort(array, start, right-1)\n quick_sort(array, right+1, end)\n\nT = int(input())\n\nfor tc in range(1, T+1):\n n = int(input())\n array = list(map(int, input().split()))\n quick_sort(array, 0, len(array)-1)\n # print(f'#{tc} {array[n//2]}')\n print(f'#{tc} {array}')","sub_path":"1007/L5205_퀵정렬/임건호.py","file_name":"임건호.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"412836107","text":"import os\nimport sys\nimport mimetypes\nimport email\nfrom email import policy\nfrom email.parser import BytesParser\nfrom fpdf import FPDF\npdf = FPDF()\n\noutput_count = 0\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nod=__location__+'/attach'\noPdf=__location__+'/pdf'\nos.path.exists(od) or os.makedirs(od)\nos.path.exists(oPdf) or os.makedirs(oPdf)\nwith open(__location__+'/mail.eml', 'rb') as fp:\n msg = BytesParser(policy=policy.default).parse(fp)\nwith open(os.path.join(oPdf, 'mail.txt'), \"w\") as txtOut:\n txtOut.write('To: {}\\n'.format(msg['to']))\n txtOut.write('From: {}\\n'.format(msg['from']))\n txtOut.write('Subject: {}\\n'.format(msg['subject']))\n simpleBody = msg.get_body(preferencelist=('plain', 'html'))\n txtOut.write('\\n')\n txtOut.write(''.join(simpleBody.get_content().splitlines(keepends=True)))\n txtOut.write('Attachments:\\n')\n for attachment in msg.iter_attachments():\n output_filename = attachment.get_filename()\n if output_filename:\n output_count += 1\n txtOut.write('Attachment {}: {}\\n'.format(output_count,output_filename))\n with open(os.path.join(od, output_filename), \"wb\") as of:\n of.write(attachment.get_payload(decode=True))\n if output_count == 0:\n txtOut.write(\"No attachment found\")\n\npdf = FPDF()\npdf.add_page()\npdf.set_font(\"Courier\", size = 11)\nfile = open(os.path.join(oPdf, 'mail.txt'))\nfor g in file:\n pdf.cell(200, 10, txt = g, ln = 1, align = 'L')\npdf.output(os.path.join(oPdf,'PDF.pdf'))","sub_path":"Python/test/email/emlParser.py","file_name":"emlParser.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"353607994","text":"import re\nimport os\nimport sys\nimport glob\nimport urllib.request\n#findall \nstr='''Window\nUnix\nLinux\nSolaris'''\n\np=re.compile('^.+',re.M)\nprint(p.findall(str))\n\np=re.compile('^.+',re.S)\nresult=p.search(str)\nprint(result)\n\nm = re.match(r\"(?P\\w+) (?P\\w+)\",\n\"Malcolm Reynolds\")\nprint(m.group('first_name', 'last_name'))\nprint(m.groups())\nprint(m.groupdict())\nm = re.match(r\"(\\d+)\\.?(\\d+)?\", \"24.25\")\nprint(m.groups())\nprint(m.groups(0))\n\np=re.compile(\".+:\")\nm=p.search(\"http://google.com\")\nprint(m.group())\n\np=re.compile(\".+(?=:)\")\nm=p.search(\"http://google.com\")\nprint(m.group())\n\nos.chdir(\"c:\\\\\")\ncurrent=os.getcwd()\ns=glob.glob(\"*\")\ns.sort()\nprint(s)\np = re.compile('.*[.](?!bat$|exe$).*$')\nprint(p)\n\np = re.compile(\"(?<=abc)def\")\nm = p.search(\"abcdef\")\nprint(m.group())\n\nm = re.search('(?<=-)\\w+', 'spam-egg') \nprint(m.group())\n\n\nemail = \"tony@tiremove_thisger.net\"\nm = re.search(\"remove_this\", email)\nresult = email[:m.start()] + email[m.end():]\nprint(result)\n\np=re.compile(\"(?<=)\\w+\")","sub_path":"final2015-10-26/final2015-10-26/final2015_10_26.py","file_name":"final2015_10_26.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"391034881","text":"import os\nimport sys\n\nimport numpy as np\n\nfrom ocpmodels.trainers import ForcesTrainer\n\nif __name__ == \"__main__\":\n task = {\n \"dataset\": \"trajectory\",\n \"description\": \"Regressing to binding energies for an MD trajectory of CO on Cu\",\n \"labels\": [\"potential energy\"],\n \"metric\": \"mae\",\n \"type\": \"regression\",\n \"grad_input\": \"atomic forces\",\n }\n\n model = {\n \"name\": \"schnet\",\n \"hidden_channels\": 128,\n \"num_filters\": 128,\n \"num_interactions\": 3,\n \"num_gaussians\": 200,\n \"cutoff\": 6.0,\n }\n\n dataset = {\n \"src\": \"data/data/2020_06_03_rattle_emt\",\n \"traj\": \"COCu_emt_5images.traj\",\n \"train_size\": 5,\n \"val_size\": 0,\n \"test_size\": 0,\n \"normalize_labels\": True,\n }\n\n optimizer = {\n \"batch_size\": 5,\n \"lr_gamma\": 0.1,\n \"lr_initial\": 0.001,\n \"lr_milestones\": [100, 125],\n \"max_epochs\": 200,\n \"warmup_epochs\": 50,\n \"warmup_factor\": 0.2,\n \"force_coefficient\": 10,\n }\n\n trainer = ForcesTrainer(\n task=task,\n model=model,\n dataset=dataset,\n optimizer=optimizer,\n identifier=\"schnet-debug\",\n print_every=1,\n is_debug=False,\n seed=1,\n )\n\n trainer.train()\n predictions = trainer.predict(dataset, verbose=False, batch_size=5)\n print(predictions[\"energy\"])\n","sub_path":"scripts/run_cu_md_schnet.py","file_name":"run_cu_md_schnet.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"619771365","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n'''\nNB: Surprisingly, we don't need any special cleanup for the `mount` operations\n performed by `build` and `clone_mounts` -- it appears that subvolume\n deletion, as performed by `subvolume_garbage_collector.py`, implicitly\n lazy-unmounts any mounts therein.\n'''\nimport json\nimport os\n\nfrom dataclasses import dataclass\nfrom typing import Mapping, NamedTuple\n\nfrom fs_image.subvol_utils import Subvol\nfrom fs_image.find_built_subvol import find_built_subvol\n\nfrom fs_image.compiler import procfs_serde\nfrom fs_image.compiler.requires_provides import (\n ProvidesDoNotAccess, require_directory\n)\n\nfrom .common import coerce_path_field_normal_relative, ImageItem, LayerOpts\nfrom .mount_utils import META_MOUNTS_DIR, MOUNT_MARKER, ro_rbind_mount\n\n\nclass _BuildSource(NamedTuple):\n type: str\n # This is overloaded to mean different things depending on `type`.\n source: str\n\n def to_path(\n self, *, target_to_path: Mapping[str, str], subvolumes_dir: str,\n ) -> str:\n if self.type == 'layer':\n out_path = target_to_path.get(self.source)\n if out_path is None:\n raise AssertionError(\n f'MountItem could not resolve {self.source}'\n )\n subvol = find_built_subvol(out_path, subvolumes_dir=subvolumes_dir)\n # If we allowed mounting a layer that has other mounts inside,\n # it would force us to support nested mounts. We don't want to\n # do this (yet).\n if os.path.exists(subvol.path(META_MOUNTS_DIR)):\n raise AssertionError(\n f'Refusing to mount {subvol.path()} since that would '\n 'require the tooling to support nested mounts.'\n )\n return subvol.path()\n elif self.type == 'host':\n return self.source\n else: # pragma: no cover\n raise AssertionError(\n f'Bad mount source \"{self.type}\" for {self.source}'\n )\n\n\n@dataclass(init=False, frozen=True)\nclass MountItem(ImageItem):\n mountpoint: str\n build_source: _BuildSource\n runtime_source: str\n is_directory: bool\n\n @classmethod\n def customize_fields(cls, kwargs):\n layer_opts = kwargs.pop('layer_opts', None)\n target = kwargs.pop('target')\n cfg = kwargs.pop('mount_config')\n assert (target is None) ^ (cfg is None), \\\n f'Exactly one of `target` or `mount_config` must be set in {kwargs}'\n if cfg is not None:\n cfg = cfg.copy() # We must not mutate our input!\n else:\n with open(os.path.join(target, 'mountconfig.json')) as f:\n cfg = json.load(f)\n\n default_mountpoint = cfg.pop('default_mountpoint', None)\n if kwargs.get('mountpoint') is None: # Missing or None => use default\n kwargs['mountpoint'] = default_mountpoint\n if kwargs['mountpoint'] is None:\n raise AssertionError(f'MountItem {kwargs} lacks mountpoint')\n coerce_path_field_normal_relative(kwargs, 'mountpoint')\n\n kwargs['is_directory'] = cfg.pop('is_directory')\n\n kwargs['build_source'] = _BuildSource(**cfg.pop('build_source'))\n if kwargs['build_source'].type == 'host' and not (\n kwargs['from_target'] in layer_opts.allowed_host_mount_targets\n or kwargs['from_target'].startswith('//fs_image/compiler/test')\n ):\n raise AssertionError(\n 'Host mounts cause containers to be non-hermetic and '\n 'fragile, so they must be located under one of '\n f'{layer_opts.allowed_host_mount_targets} '\n 'to enable close review by the owners of `fs_image`.'\n )\n\n # This is supposed to be the run-time equivalent of `build_source`,\n # but for us it's just an opaque JSON blob that the runtime wants.\n # Hack: We serialize this back to JSON since the compiler expects\n # items to be hashable, and the source WILL contain dicts.\n runtime_source = cfg.pop('runtime_source', None)\n # Future: once runtime_source grows a schema, use it here?\n if (runtime_source and runtime_source.get('type') == 'host'):\n raise AssertionError(\n f'Only `build_source` may specify host mounts: {kwargs}'\n )\n kwargs['runtime_source'] = json.dumps(runtime_source, sort_keys=True)\n\n assert cfg == {}, f'Unparsed fields in {kwargs} mount_config: {cfg}'\n\n def provides(self):\n # For now, nesting of mounts is not supported, and we certainly\n # cannot allow regular items to write inside a mount.\n yield ProvidesDoNotAccess(path=self.mountpoint)\n\n def requires(self):\n # We don't require the mountpoint itself since it will be shadowed,\n # so this item just makes it with default permissions.\n yield require_directory(os.path.dirname(self.mountpoint))\n\n def build(self, subvol: Subvol, layer_opts: LayerOpts):\n mount_dir = os.path.join(META_MOUNTS_DIR, self.mountpoint, MOUNT_MARKER)\n for name, data in (\n # NB: Not exporting self.mountpoint since it's implicit in the path.\n ('is_directory', self.is_directory),\n ('build_source', self.build_source._asdict()),\n ('runtime_source', json.loads(self.runtime_source)),\n ):\n procfs_serde.serialize(data, subvol, os.path.join(mount_dir, name))\n source_path = self.build_source.to_path(\n target_to_path=layer_opts.target_to_path,\n subvolumes_dir=layer_opts.subvolumes_dir,\n )\n # Support mounting directories and non-directories... This check\n # follows symlinks for the mount source, which seems correct.\n is_dir = os.path.isdir(source_path)\n assert is_dir == self.is_directory, self\n if is_dir:\n subvol.run_as_root([\n 'mkdir', '--mode=0755', subvol.path(self.mountpoint),\n ])\n else: # Regular files, device nodes, FIFOs, you name it.\n # `touch` lacks a `--mode` argument, but the mode of this\n # mountpoint will be shadowed anyway, so let it be whatever.\n subvol.run_as_root(['touch', subvol.path(self.mountpoint)])\n ro_rbind_mount(source_path, subvol, self.mountpoint)\n","sub_path":"fs_image/compiler/items/mount.py","file_name":"mount.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"417092599","text":"# Given a list of numbers and a number k, return whether any two numbers from the list add up to k.\n# For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n\n# Approach 1: Using one-pass HashTable [O(n) & O(n)]\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], k: int) -> bool:\n Dict = {}\n\n for i in range(len(nums)):\n competent = k - nums[i]\n\n if competent in Dict:\n return True\n else:\n Dict[nums[i]] = i\n\n return False\n\n\nif __name__ == \"__main__\":\n nums = [10, 15, 3, 7]\n k = 17\n\n s = Solution()\n print(s.twoSum(nums, k))\n","sub_path":"python/0-Google/1-problem.py","file_name":"1-problem.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"640326700","text":"import numpy as np\nimport cv2\n\nym_per_pix = 30/720 # meters per pixel in y dimension\nxm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n\ndef find_left_right_lane(img_topdown, img_size):\n bottom = img_topdown[img_size[1]//2:, :] / 255\n hist = np.sum(bottom, axis=0)\n n_cols = len(hist)\n left_lane = np.argmax(hist[:n_cols//2])\n right_lane = np.argmax(hist[n_cols//2:]) + n_cols//2\n return left_lane, right_lane\n \n \ndef find_lane_points(lane, img_rowed, h_window, v_window):\n initial_lane = lane\n curr_lane = lane\n\n lane_r = list()\n lane_c = list()\n\n for i in range(len(img_rowed)-1, -1, -1):\n rel_r_coords, c_coords = np.where(img_rowed[i])\n indices = np.where(np.abs(c_coords - curr_lane) < h_window / 2)\n lane_r.append(rel_r_coords[indices] + i*v_window)\n lane_c.append(c_coords[indices])\n\n if len(c_coords[indices]) > 0:\n curr_lane = np.mean(c_coords[indices])\n\n\n lane_r = np.concatenate(lane_r)\n lane_c = np.concatenate(lane_c)\n return lane_r, lane_c\n\n\ndef vehicle_center(left_fit, right_fit, img_size):\n base_row = img_size[1] - 1\n left_base = left_fit[0]*base_row**2 + left_fit[1]*base_row + left_fit[2]\n right_base = right_fit[0]*base_row**2 + right_fit[1]*base_row + right_fit[2]\n\n vehicle_center = img_size[0] / 2 # About 600\n lane_center = (left_base + right_base) / 2\n offset_pix = vehicle_center - lane_center\n return xm_per_pix * offset_pix\n\n\ndef lane_center(fit, img_size):\n base_row = img_size[1] - 1\n base_col = fit[0]*base_row**2 + fit[1]*base_row + fit[2]\n\n vehicle_center = img_size[0] / 2 # About 600\n offset_pix = base_col - vehicle_center\n return xm_per_pix * offset_pix\n\n \ndef pipeline_polyfit(img_topdown, img_size, n_windows=20, h_window=70):\n img_rowed = np.vsplit(img_topdown, n_windows)\n v_window = len(img_topdown) / n_windows\n \n left_lane, right_lane = find_left_right_lane(img_topdown, img_size)\n \n left_laney, left_lanex = find_lane_points(left_lane, img_rowed, h_window, v_window)\n right_laney, right_lanex = find_lane_points(right_lane, img_rowed, h_window, v_window) \n\n left_fit = np.polyfit(left_laney, left_lanex, 2)\n right_fit = np.polyfit(right_laney, right_lanex, 2)\n \n return left_fit, right_fit\n\n \ndef lane_curvature(laney, lanex, height=720):\n ploty = np.linspace(0, height-1, num=height)# to cover same y-range as image\n ploty = ploty[::-1]\n y_eval = np.max(ploty)\n\n # Fit new polynomials to x,y in world space\n fit_cr = np.polyfit(laney*ym_per_pix, lanex*xm_per_pix, 2)\n # Calculate the new radii of curvature\n curverad = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])\n # Now our radius of curvature is in meters\n return curverad\n\n\ndef extract_line(fit, ploty):\n fitx = fit[0]*ploty**2 + fit[1]*ploty + fit[2]\n return ploty, fitx\n ","sub_path":"polynomial_fit.py","file_name":"polynomial_fit.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"459472836","text":"#!/usr/bin/env python\n\nimport time\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\n\n\nclass RobotControl():\n\n def __init__(self, robot_name=\"turtlebot\"):\n rospy.loginfo(\"Robot Turtlebot...\")\n\n # parameters\n self.cmd = Twist()\n self.ctrl_c = False\n\n # rate and shutdown hook\n self.rate = rospy.Rate(1)\n self.rate10 = rospy.Rate(10)\n rospy.on_shutdown(self.shutdownhook)\n\n # cmd_vel publisher\n cmd_vel_topic = '/cmd_vel'\n self.vel_publisher = rospy.Publisher(\n cmd_vel_topic, Twist, queue_size=1)\n\n def publish_once_in_cmd_vel(self):\n while not self.ctrl_c:\n connections = self.vel_publisher.get_num_connections()\n if connections > 0:\n self.vel_publisher.publish(self.cmd)\n break\n else:\n self.rate.sleep()\n\n def shutdownhook(self):\n stop_robot()\n self.ctrl_c = True\n\n def stop_robot(self):\n self.cmd.linear.x = 0.0\n self.cmd.angular.z = 0.0\n self.publish_once_in_cmd_vel()\n\n def move_straight(self):\n self.cmd.linear.x = 0.5\n self.cmd.linear.y = 0\n self.cmd.linear.z = 0\n self.cmd.angular.x = 0\n self.cmd.angular.y = 0\n self.cmd.angular.z = 0\n self.publish_once_in_cmd_vel()\n\n def move_straight_time(self, motion, speed, time):\n self.cmd.linear.y = 0\n self.cmd.linear.z = 0\n self.cmd.angular.x = 0\n self.cmd.angular.y = 0\n self.cmd.angular.z = 0\n\n if motion == \"forward\":\n self.cmd.linear.x = speed\n elif motion == \"backward\":\n self.cmd.linear.x = - speed\n\n i = 0\n while (i <= time):\n self.vel_publisher.publish(self.cmd)\n i += 0.1\n self.rate10.sleep()\n\n self.stop_robot()\n s = \"Moved robot \" + motion + \" for \" + str(time) + \" seconds\"\n return s\n\n def turn(self, clockwise, speed, time):\n self.cmd.linear.x = 0\n self.cmd.linear.y = 0\n self.cmd.linear.z = 0\n self.cmd.angular.x = 0\n self.cmd.angular.y = 0\n\n if clockwise == \"clockwise\":\n self.cmd.angular.z = -speed\n else:\n self.cmd.angular.z = speed\n\n i = 0\n while (i <= time):\n self.vel_publisher.publish(self.cmd)\n i += 0.1\n self.rate10.sleep()\n\n self.stop_robot()\n s = \"Turned robot \" + clockwise + \" for \" + str(time) + \" seconds\"\n return s\n\n\nif __name__ == '__main__':\n\n robotcontrol_object = RobotControl()\n try:\n robotcontrol_object.move_straight()\n time.sleep(4)\n robotcontrol_object.stop_robot()\n\n except rospy.ROSInterruptException:\n pass\n","sub_path":"src/pp_single/my_spp_2/src/robot_control_class.py","file_name":"robot_control_class.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"542600610","text":"class Solution(object):\r\n def missingNumber(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n N = len(nums)\r\n sum = (1 + N) * N // 2\r\n for x in nums:\r\n sum -= x\r\n return sum\r\n\r\nnums = [0, 1, 3]\r\nMy = Solution()\r\nprint(My.missingNumber(nums))\r\n","sub_path":"src/MissingNumber/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"97338321","text":"import processing\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.decomposition import PCA\nfrom sklearn import metrics\nfrom sklearn.svm import SVC\nimport numpy as np\nimport random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport itertools as it\nimport operator\nimport sys\nimport math\nimport csv\nimport time\n\n\ndef RFmodelScore(model, trainingdata, traininglabels, testingdata, testinglabels):\n model.fit(trainingdata, traininglabels.values.ravel())\n return model.score(testingdata, testinglabels)\n\ndef RandomForest(data_features, data_labels, randomforestmodel):\n\n folds = StratifiedKFold(n_splits=10)\n # m = RandomForestClassifier()\n\n scores = []\n\n for train_index, test_index in folds.split(data_features, data_labels):\n X_train, X_test, y_train, y_test = data_features.iloc[train_index], data_features.iloc[test_index], \\\n data_labels.iloc[train_index], data_labels.iloc[test_index]\n scores.append(100 * RFmodelScore(randomforestmodel,\n X_train, y_train, X_test, y_test))\n\n #print('these are the scores: ', scores)\n #print('mean:', np.mean(scores))\n return np.mean(scores)\n\n\nRF_STD_means = []\nRF_PCA_means = []\n\ndef mainRandomForestImplementation(randomforestmodel, linPCA, training_dataframe, pca_option):\n\n df = pd.read_csv(\"BreastCancerData.csv\")\n column_titles = None\n column_titles = processing.processData(df, column_titles)\n column_titles = np.array(column_titles)\n\n training_data = []\n final_validation = []\n\n processing.splitData(training_dataframe, .8,\n training_data, final_validation)\n\n training_data = np.array(training_data)\n final_validation = np.array(final_validation)\n\n features = []\n labels = []\n\n features, labels = processing.createFeatures_Labels(training_data)\n np.transpose(labels)\n\n features_data = None\n labels_data = None\n\n features_data, labels_data = processing.convertToDataFrame(\n features, labels, column_titles)\n\n if(pca_option == 'both'):\n\n #print('Random Forest model without PCA')\n RF_STD_means.append(RandomForest(\n features_data, labels_data, randomforestmodel))\n # print()\n\n features_data_PCA = processing.linearPCAReduction(\n features_data, linPCA)\n features_df_PCA = pd.DataFrame(\n features_data_PCA, columns=column_titles[1:(features_data_PCA.shape[1] + 1)])\n\n #print('Random Forest model with PCA')\n RF_PCA_means.append(RandomForest(\n features_df_PCA, labels_data, randomforestmodel))\n # print()\n\n elif(pca_option == 'yes'):\n\n features_data_PCA = processing.linearPCAReduction(\n features_data, linPCA)\n features_df_PCA = pd.DataFrame(\n features_data_PCA, columns=column_titles[1:(features_data_PCA.shape[1] + 1)])\n\n #print('Random Forest model with PCA')\n RF_PCA_means.append(RandomForest(\n features_df_PCA, labels_data, randomforestmodel))\n\n else:\n\n RF_STD_means.append(RandomForest(\n features_data, labels_data, randomforestmodel))\n\n\ndef RandomForestSimulation(randomforestmodel, linPCA, training_dataframe, pca_option):\n\n print()\n if(pca_option == 'both'):\n\n number = 20\n print('Simulating random forest models...')\n start = time.time()\n for i in range(0, number):\n #print('random forest simulation number', i, 'finished')\n mainRandomForestImplementation(\n randomforestmodel, linPCA, training_dataframe, pca_option)\n\n end = time.time()\n print('Random Forest Simulation time:', end - start)\n\n m = None\n\n if np.mean(RF_STD_means) > np.mean(RF_PCA_means):\n m = 'STANDARD MODEL'\n else:\n m = 'PCA TRANSFORMED MODEL'\n\n count = 0\n\n for i in range(0, number):\n if(RF_PCA_means[i] > RF_STD_means[i]):\n count = count + 1\n\n print()\n print('number of times random forest pca transformed model had greater accuracy than random forest standard model: ',\n count, 'out of ', number)\n print('random forest variance in accuracy for standard model: ',\n np.var(RF_STD_means))\n print('random forest variance in accuracy for pca transform model: ',\n np.var(RF_PCA_means))\n print('random forest standard model accuracy on 10fold cross-val test data: ',\n np.mean(RF_STD_means))\n print('random forest pca transformed model accuracy on 10fold cross-val test data: ',\n np.mean(RF_PCA_means))\n print('maximum random forest accuracy on 10fold cross-val test data attained by', m, \"with an accuracy of: \", max(\n np.mean(RF_PCA_means), np.mean(RF_STD_means)), '%')\n print()\n\n elif(pca_option == 'yes'):\n\n number = 20\n print('Simulating PCA transformed random forest model...')\n start = time.time()\n for i in range(0, number):\n #print('random forest simulation number', i, 'finished')\n mainRandomForestImplementation(\n randomforestmodel, linPCA, training_dataframe, pca_option)\n\n end = time.time()\n print('Random Forest Simulation time:', end - start)\n\n print()\n print('pca transformed random forest model variance in accuracy: ',\n np.var(RF_PCA_means))\n print('pca transformed random forest model accuracy on 10fold cross-val test data: ',\n np.mean(RF_PCA_means), '%')\n print()\n\n else:\n\n number = 20\n print('Simulating standard random forest model...')\n start = time.time()\n for i in range(0, number):\n #print('random forest simulation number', i, 'finished')\n mainRandomForestImplementation(\n randomforestmodel, linPCA, training_dataframe, pca_option)\n\n end = time.time()\n print('Random Forest Simulation time:', end - start)\n\n print()\n print('standard random forest variance in accuracy: ',\n np.var(RF_STD_means))\n print('standard random forest accuracy on 10fold cross-val test data: ',\n np.mean(RF_STD_means))\n print()\n\n\nrf = RandomForestClassifier()\n\"\"\"\nstart = time.time()\nmainRandomForestImplementation(\n rf, processing.linear_pca, processing.overall_training_data)\nend = time.time()\nprint('Time of mainRandomForestImplementation', end - start)\n\"\"\"\n# RandomForestSimulation(rf, processing.linear_pca,\n# processing.overall_training_data)\n","sub_path":"RF.py","file_name":"RF.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"565421168","text":"from __future__ import division\nimport os\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nfrom Datasets import *\nimport torch\n# Real samples\nimport seaborn as sns\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n#python CGAN.py --fig_name creative_with_missing_4_ordered --label --notebook--missing_mixt 4\n\n\"\"\"parsing and configuration\"\"\"\n\ndef parse_args():\n desc = \"Tensorflow implementation of GAN\"\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument('--nhidden', type=int, default=64, help='number of hidden neurons')\n parser.add_argument('--nlayers', type=int, default=4, help='number of hidden layers')\n parser.add_argument('--missing_mixt', type=int, default=0, help='label of the missing mixture')\n parser.add_argument('--label', action='store_true', default=False, help='if data labeled')\n parser.add_argument('--notebook', action='store_true', default=False, help='if you are running in python notebook')\n parser.add_argument('--fig_name', type=str, default='loss-fig', help='file name of loss plot')\n parser.add_argument('--niters', type=int, default=2500, help='number of iterations')\n parser.add_argument('--batch_size', type=int, default=400, help='batch size')\n parser.add_argument('--lrg', type=float, default=3e-3, help='lr for G')\n parser.add_argument('--lrd', type=float, default=9e-3, help='lr for D')\n parser.add_argument('--dataset', type=str, default='8Gaussians', help='dataset to use: 8Gaussians | 25Gaussians | swissroll | mnist')\n parser.add_argument('--scale', type=float, default=2., help='data scaling')\n parser.add_argument('--loss', type=str, default='gan', help='gan | wgan')\n parser.add_argument('--optim', type=str, default='SGD', help='optimizer to use')\n parser.add_argument('--minibatch_discriminate', action='store_true', default=False,help='minibatch_discriminate flag')\n\n return parser.parse_args()\n\n\ndef plot_loss(prefix, g_loss_list, d_loss_list, d_loss_fake_list, d_loss_real_list):\n f, ax = plt.subplots(1)\n g_loss_array = np.array(g_loss_list)\n d_loss_array = np.array(d_loss_list)\n d_loss_fake_array = np.array(d_loss_fake_list)\n d_loss_real_array = np.array(d_loss_real_list)\n if len(g_loss_list):\n ax.plot(g_loss_array[:, 0], g_loss_array[:, 1], color=\"k\", label='g_loss')\n ax.plot(d_loss_array[:, 0], d_loss_array[:, 1], color=\"r\", label='d_loss')\n ax.plot(d_loss_fake_array[:, 0], d_loss_fake_array[:, 1], color=\"g\", label='d_loss_fake_array')\n ax.plot(d_loss_real_array[:, 0], d_loss_real_array[:, 1], color=\"b\", label='d_loss_real_array')\n plt.title('GAN Metrics (2D Gaussians)')\n plt.xlabel('Step')\n plt.ylabel('Metrics')\n plt.legend()\n plt.savefig(prefix + 'metrics.png')\n\n\ndef draw_density(samps, scale, fname):\n fig = plt.figure(frameon=False, dpi= 160)\n fig.set_size_inches(5, 5)\n ax = fig.add_subplot(1, 1, 1)\n\n sns.kdeplot(samps[:, 0], samps[:, 1], shade=True, cmap='Greys', gridsize=200, n_levels=100)\n\n ax.set_xlim((-scale, scale))\n ax.set_ylim((-scale, scale))\n ax.set_axis_off()\n ax.set_aspect('equal')\n ax.spines['bottom'].set_color('0.5')\n ax.spines['top'].set_color('0.5')\n ax.spines['right'].set_color('0.5')\n ax.spines['left'].set_color('0.5')\n plt.savefig('figs/density/critic_' + fname, bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\neps = 1e-20\ndef inception_score(X):\n kl = X * ((X+eps).log()-(X.mean(0)+eps).log().expand_as(X))\n score = np.exp(kl.sum(1).mean())\n\n return score\ndef mode_score(X, Y):\n kl1 = X * ((X+eps).log()-(X.mean(0)+eps).log().expand_as(X))\n kl2 = X.mean(0) * ((X.mean(0)+eps).log()-(Y.mean(0)+eps).log())\n score = np.exp(kl1.sum(1).mean() - kl2.sum())\n\n return score\nclass GAN(object):\n model_name = \"GAN\" # name for checkpoint\n\n def __init__(self, sess, args, data, noise):\n self.sess = sess\n self.nhidden = args.nhidden\n self.nlayers = args.nlayers\n self.niters = args.niters\n self.batch_size = args.batch_size\n self.labeled = args.label\n self.notebook = args.notebook\n self.z_dim = 2\n self.label_dim=8\n self.x_dim = 2\n self.fig_name = args.fig_name\n self.missing_mixt = args.missing_mixt\n self.lrg = args.lrg\n self.lrd = args.lrd\n self.data = data\n self.noise = noise\n self.scale = args.scale\n self.minibatch_discriminate = args.minibatch_discriminate\n\n def minibatch(self, x, num_kernels=5, kernel_dim=3):\n net = tf.layers.dense(inputs=x, units=num_kernels * kernel_dim, name='minibatch')\n activation = tf.reshape(net, (-1, num_kernels, kernel_dim))\n diffs = tf.expand_dims(activation, 3) - \\\n tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)\n abs_diffs = tf.reduce_sum(tf.abs(diffs), 2)\n minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), self.z_dim)\n return tf.concat([x, minibatch_features], 1)\n\n def discriminator(self, x, is_training=True, reuse=False):\n with tf.variable_scope(\"discriminator\", reuse=reuse):\n\n net = tf.layers.dense(inputs=x, units=self.nhidden, activation=None, name='d_fc1')\n # net = tf.contrib.layers.batch_norm(net, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training)\n net = tf.nn.relu(net, name='d_rl1')\n for i in range(self.nlayers - 2):\n net = tf.layers.dense(inputs=net, units=self.nhidden, activation=None, name='d_fc' + str(i + 2))\n # net = tf.contrib.layers.batch_norm(net, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training)\n net = tf.nn.relu(net, name='d_rl' + str(i + 2))\n if self.minibatch_discriminate:\n net = self.minibatch(net)\n out_logit = tf.layers.dense(inputs=net, units=1, name='d_fc' + str(self.nlayers))\n\n out = tf.nn.sigmoid(out_logit)\n return out, out_logit\n\n def generator(self, z, is_training=True, reuse=False):\n with tf.variable_scope(\"generator\", reuse=reuse):\n net = tf.layers.dense(inputs=z, units=self.nhidden, activation=None, name='g_fc1')\n # net = tf.contrib.layers.batch_norm(net, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training)\n net = tf.nn.relu(net, name='g_rl1')\n for i in range(self.nlayers - 2):\n net = tf.layers.dense(inputs=net, units=self.nhidden, activation=None, name='g_fc' + str(i + 2))\n # net = tf.contrib.layers.batch_norm(net, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training)\n net = tf.nn.relu(net, name='g_rl' + str(i + 2))\n out = tf.layers.dense(inputs=net, units=self.z_dim, activation=None, name='g_fc' + str(self.nlayers))\n return out, z[:,2:10]\n\n def build_model(self):\n \"\"\" Graph Input \"\"\"\n # images\n self.inputs = tf.placeholder(tf.float32, [None, self.x_dim+self.label_dim], name='real_placeholder')\n self.z = tf.placeholder(tf.float32, [None, self.z_dim+self.label_dim], name='z_placeholder')\n\n # noises\n \"\"\" Loss Function \"\"\"\n # output of D for real images\n\n D_real, D_real_logits = self.discriminator(self.inputs, is_training=True, reuse=False)\n # output of D for fake images\n self.generates,self.fake_labels = self.generator(self.z, is_training=True, reuse=False)\n D_fake, D_fake_logits = self.discriminator(tf.concat([self.generates,self.fake_labels],1), is_training=True, reuse=True)\n print(self.inputs[:, 2:10].shape)\n uniform_one_hot_indices = np.random.uniform(0, 8, self.batch_size)\n tf.one_hot(uniform_one_hot_indices, 8)\n\n # get loss for discriminator\n self.d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real)))\n self.d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake)))\n self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))\n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n\n\n\n # get loss for generator\n \"\"\" Training \"\"\"\n # divide trainable variables into a group for D and a group for G\n t_vars = tf.trainable_variables()\n d_vars = [var for var in t_vars if 'd_' in var.name]\n g_vars = [var for var in t_vars if 'g_' in var.name]\n\n # optimizers\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n self.d_optim = tf.train.AdamOptimizer(self.lrd, beta1=0.5).minimize(self.d_loss, var_list=d_vars)\n self.g_optim = tf.train.AdamOptimizer(self.lrg, beta1=0.5).minimize(self.g_loss, var_list=g_vars)\n\n \"\"\"\" Testing \"\"\"\n # for test\n self.fake_samples, self.fake_labels = self.generator(self.z, is_training=False, reuse=True)\n self.fake_sigmoid, self.fake_logit = self.discriminator(tf.concat([self.fake_samples,self.fake_labels],1),is_training=False, reuse=True)\n self.fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.fake_logit, labels=tf.ones_like(self.fake_sigmoid)))\n self.fake_saliency = tf.gradients(self.fake_loss, self.fake_samples)[0]\n\n \"\"\" Summary \"\"\"\n d_loss_real_sum = tf.summary.scalar(\"d_loss_real\", self.d_loss_real)\n d_loss_fake_sum = tf.summary.scalar(\"d_loss_fake\", self.d_loss_fake)\n d_loss_sum = tf.summary.scalar(\"d_loss\", self.d_loss)\n g_loss_sum = tf.summary.scalar(\"g_loss\", self.g_loss)\n\n # final summary operations\n self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])\n self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])\n\n # saver to save model\n self.saver = tf.train.Saver()\n\n def train(self, prefix, teacher=None, sess_teacher=None):\n # initialize all variables\n tf.global_variables_initializer().run()\n\n # summary writer\n self.writer = tf.summary.FileWriter(prefix)\n g_loss_list, d_loss_list, d_loss_fake_list, d_loss_real_list = [], [], [], []\n\n # graph inputs for visualize training results\n self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))\n n_rows = int(np.ceil(self.niters / 500))\n\n if self.notebook:\n fig = plt.figure(figsize=(20, n_rows*4), dpi= 160)\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n else:\n fig, ax = plt.subplots(1, 1, figsize=(4, 4))\n\n plot_idx=0\n # loop for epoch\n start_time = time.time()\n fake=[]\n real=[]\n for it in range(self.niters):\n\n noise_batch = self.noise.next_batch(self.batch_size)\n # update D network\n\n real_batch, labels = self.data.next_batch(self.batch_size, missing_mixt=self.missing_mixt)\n\n _, d_loss, d_loss_fake, d_loss_real, summary_str = self.sess.run(\n [self.d_optim, self.d_loss, self.d_loss_fake, self.d_loss_real, self.d_sum],\n feed_dict={self.inputs: torch.cat([real_batch,labels],1), self.z: noise_batch})\n\n self.writer.add_summary(summary_str, it)\n d_loss_list.append((it, d_loss))\n d_loss_fake_list.append((it, d_loss_fake))\n d_loss_real_list.append((it, d_loss_real))\n\n _, g_loss, summary_str = self.sess.run([self.g_optim, self.g_loss, self.g_sum],\n feed_dict={self.z: noise_batch})\n\n self.writer.add_summary(summary_str, it)\n g_loss_list.append((it, g_loss))\n\n fake_batch, fake_saliency, fake_logit = self.sess.run(\n [self.fake_samples, self.fake_saliency, self.fake_logit], feed_dict={self.z: noise_batch})\n\n # display training status\n if it % 100 == 0:\n print(\"Iter: %d, d_loss: %.8f, g_loss: %.8f\" % (it, d_loss, g_loss))\n plot_idx+=1\n if self.notebook:\n ax = fig.add_subplot(n_rows, 5, plot_idx)\n else:\n ax.clear()\n print(noise_batch.shape,fake_batch.shape)\n colors = ['b', 'g', 'r', 'y', 'c', '#aaff00', 'm', '#aabbcc']\n colors_fake=[]\n for i in range(noise_batch.shape[0]):\n for j in range(0,8):\n if noise_batch[i,j+2]==1:\n colors_fake.append(colors[j])\n\n\n ax.scatter(real_batch[:, 0], real_batch[:, 1], s=2, c='k')\n ax.scatter(fake_batch[:, 0], fake_batch[:, 1], s=2, c=colors_fake, marker='o')\n #ax.set_xlim(-self.scale,self.scale)\n #ax.set_ylim(-self.scale, self.scale)\n ax.set_title(\"It #{:d}: g = {:.4f}, d = {:.4f}\".format(it, g_loss, d_loss),fontsize=10)\n plt.savefig(prefix + 'fig_%05d.png' % it, bbox_inches='tight')\n\n if not self.notebook:\n plt.draw()\n plt.pause(1e-6)\n #plt.show()\n\n fake=np.asarray(fake_batch)\n real=np.asarray(real_batch)\n draw_density(real,2,'real'+str(it))\n draw_density(fake,2,'generated'+str(it))\n plt.savefig('figs/'+self.fig_name, bbox_inches='tight')\n #plt.show()\n # self.saver.save(self.sess, self.dir_model + 'gan')\n plot_loss('figs/'+self.fig_name+'-', g_loss_list, d_loss_list, d_loss_fake_list, d_loss_real_list)\n\n def visualize_results(self, epoch):\n z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))\n samples = self.sess.run(self.fake_samples, feed_dict={self.z: z_sample})\n\n\n\"\"\"main\"\"\"\n\n\ndef main(args):\n # open session\n tf.reset_default_graph()\n\n with tf.Session() as sess:\n # declare instance for GAN\n data = ToyDataset(distr=args.dataset, scale=args.scale, labeled=args.label)\n noise = NoiseDataset(labeled=args.label)\n gan = GAN(sess, args, data, noise)\n\n # build graph\n gan.build_model()\n\n # train\n prefix = 'figs/tf_default_lrd_' + str(args.lrd) + '_lrg_' + str(args.lrg) + '/'\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n gan.train(prefix)\n print(\" [*] Training finished!\")\n\nif __name__ == '__main__':\n np.set_printoptions(precision=4, suppress=True)\n # parse arguments\n args = parse_args()\n print(args)\n main(args)\n","sub_path":"2D-codes/CGAN.py","file_name":"CGAN.py","file_ext":"py","file_size_in_byte":14820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"431607364","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# _author:\"sidalin\"\n\nimport tornado.web\nfrom api_homePage import HomePage_api\nimport handler_all\nfrom handler_all import Speech\nimport listener\nfrom compare import Compare\nimport manger.config.global_variables\nimport manger.config.globalvar as gl\nimport time\nfrom log import log1\nfrom funcName import variable\n\n\n# 首页\nclass HomePageHandler(tornado.web.RequestHandler):\n def get(self):\n self.startTime=time.time()\n\n handler_all.changePage('homePage')\n #开启对话\n handler_all.changeFlag(True)\n\n #从后台获取六大类首页的数据\n hp = HomePage_api()\n self.home_date = hp.main()\n #如果数据正常获取,传输获取数据,如果数据不能正常获取,传输空列表\n if self.home_date:\n #self.render('xxx','xxx'),页面跳转,并发送homeDate数据给页面\n self.render('HomePage.html', homeDate=self.home_date)\n else:\n self.home_date=[]\n self.render('HomePage.html', homeDate=self.home_date)\n listener.instance.set_listener(\"view\", self.on_listen)\n\n def on_listen(self, msg):\n listen = msg['listen']\n #六大类返回的接口数据\n homepageDate = self.home_date\n homepageList=[]\n d={}\n #homepageList:[{'menuName':'模块一,'menuId':'37'},{'menuName':'模块二,'menuId':'38'}]\n if homepageDate:\n for h in homepageDate:\n d['menuName']=h['menuName']\n d['menuId']=h['id']\n homepageList.append(d)\n d = {}\n\n #获取语音相似度最大的值best_compare{'menuId':37,'compareValue':0.6}\n best_compare = None\n for index,value in enumerate(homepageList):\n if Compare().conmpare(value['menuName'],listen)>0.4:\n if best_compare==None:\n best_compare = {}\n #功能的menuId和相似度的值\n best_compare['menuId'],best_compare['compareValue']=value['menuId'], Compare().conmpare(value['menuName'], listen)\n elif best_compare['compareValue'] < Compare().conmpare(value['menuName'], listen):\n best_compare['menuId'], best_compare['compareValue'] = value['menuId'],Compare().conmpare(value['menuName'], listen)\n\n\n if best_compare != None:\n Speech().speech('好的请稍等')\n global flag_speech\n handler_all.changeFlag(False)\n #往日志中插入信息\n runMsg = {'pageName': variable['homePage'], 'startTime': self.startTime, 'endTime': time.time()}\n runMsg = str(runMsg)\n log1.addLog('log.txt', runMsg)\n gl.get_value('func').webview(gl.get_value('session'),gl.get_value('homepage_url')+'pepper/FUNCTION?menuId='+ str(best_compare['menuId']))\n\n\n if Compare().conmpare(u'返回首页', listen) > 0.4:\n Speech().speech('好的请稍等')\n handler_all.changeFlag(False)\n runMsg = {'pageName': variable['homePage'], 'startTime': self.startTime, 'endTime': time.time()}\n runMsg = str(runMsg)\n log1.addLog('log.txt', runMsg)\n gl.get_value('func').webview(gl.get_value('session'), gl.get_value('homepage_url') + 'pepper/HOMEPAGE')\n\n if Compare().conmpare(u'过来', listen) > 0.4:\n Speech().speech('好的请稍等')\n #关闭除了walking之外的所有程序\n gl.get_value('func').stop_behavior(gl.get_value('session'), 'walking-148f83/behavior_1')\n #打开walking程序\n gl.get_value('func').start_behavior(gl.get_value('session'), 'walking-148f83/behavior_1')\n","sub_path":"untitled/venv/myproject/control/handler_homePage.py","file_name":"handler_homePage.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"598397646","text":"__author__ = 'Stévillis Sousa'\n\n'''\nFaça um algoritmo que solicite ao usuário números e os armazene em um vetor de 20 posições. Crie uma função que receba o vetor \npreenchido e substitua todas as ocorrências de valores negativos por 0, as de valores menores do que 10 por 1 e as demais por 2.\n'''\n\ncont = 0\nvetor = []\n\ndef modifica_vetor(vetor):\n for cont, num in enumerate(vetor): \n if num < 0: \n vetor[cont] = 0\n elif num < 10:\n vetor[cont] = 1\n else:\n vetor[cont] = 2\n return vetor\n \n \nwhile cont < 20:\n num = int(input(f'Digite o {cont+1}º número: '))\n vetor.append(num)\n cont += 1\n\n\nprint(f'Vetor original: {vetor}')\nvetor_modificado = modifica_vetor(vetor)\nprint(f'Vetor modificado: {vetor_modificado}')\n","sub_path":"modifica_vetor.py","file_name":"modifica_vetor.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"498822076","text":"import numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.figure_factory as ff\nimport plotly.express as px\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\n\n# x is a list of days in data set starting at 1\n# y_baseline is a pandas dataframe of the model output for baseline\n# y_pred is a pandas dataframe of the model output for baseline\n\n# Need to produce a figure for each of the model outputs for each of the age categories. \n# Tempted to make age category a drop down selection.\n# Drop down selection was attempted but had issues and was impossible to fix within time costraints.\n# Attempt left in but function call commented out so that someone else could potentially take this up.\n\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n# population of the camp\npopulation = 18700\n\nbaseline_output = \"CM_output_sample1.csv\"\nmodel_output = \"CM_output_sample2.csv\"\n\n# read example csvs\nage_categories = pd.read_csv(\"age_categories.csv\")['age'].to_list()\ncase_cols = pd.read_csv(\"cm_output_columns.csv\")['columns'].to_list()\n\n# Process baseline csv\ndf_baseline = pd.read_csv(baseline_output)\ndf = df_baseline[\"Time\"]\nbaseline_n_days = df.nunique() # Count distinct observations over requested axis.\nbaseline_n_rows = df.shape[0]\n# num of simuls\nbaseline_n_simul = df[df == 0].count()\n\n# Get df for population\n# Use this as the benchmark for the age group\ncols_overall = [\"Time\"] + case_cols\ndf_baseline_all_simul = df_baseline[cols_overall]\ndf_baseline_all_sum = df_baseline_all_simul.groupby(['Time']).sum() * population\ndf_baseline_all = df_baseline_all_sum / baseline_n_simul\ndf_baseline_all_mean = df_baseline_all.mean()\ndf_baseline_all_std = df_baseline_all.std()\n\n# Process Model Output and compare with baseline;\ndf_model = pd.read_csv(model_output)\ndf = df_model[\"Time\"]\nn_days = df.nunique()\nn_rows = df.shape[0]\n# num of simuls\nn_simul = df[df == 0].count()\n\n# Generates the y data for the graph using the collumn name and df\ndef generate_y_data(model_output_df, col_age): \n y_data = model_output_df[col_age]\n return y_data\n\n# Generates a string of the column title we want from the df\ndef generate_col_age(col, age):\n col_age = f\"{col}: {age}\"\n return col_age\n\ndef generate_model_age_df(category, age, df_model, n_simul):\n cols = [category + \": \" + age, \"Time\"]\n df_model_age_simul = df_model[cols]\n # Calculate averages for all simulations\n df_model_age_sum = df_model_age_simul.groupby(['Time']).sum() * population\n df_model_age = df_model_age_sum / n_simul \n return df_model_age \n\ndef gen_traces_to_show(traces, index):\n traces_to_show = traces\n actual_index = 2*index # as two plots per age category. first index must be 0\n traces_to_show[actual_index] = True\n traces_to_show[(actual_index+1)] = True\n return traces_to_show\n\n# Generates the drop down list for selecting age category graphs\ndef generate_drop_down_list(traces_to_show_all_false, age_categories):\n buttons_list = []\n traces_to_show_all_true = []\n for i in range (0,len(traces_to_show_all_false)):\n traces_to_show_all_true.append(not traces_to_show_all_false[i])\n\n buttons_list.append(\n dict(label = \"All\",\n method = \"update\",\n args = [{\"visible\": traces_to_show_all_true},\n {\"showlegend\": True}])\n )\n\n # adds drop down option for each age category\n for i in range(len(age_categories)):\n traces_to_show_all_false_copy =traces_to_show_all_false\n traces_to_show = gen_traces_to_show(traces_to_show_all_false_copy, i)\n buttons_list.append(\n dict(label = age_categories[i],\n method = \"update\",\n args = [{\"visible\": traces_to_show},\n {\"showlegend\": True}])\n )\n return buttons_list\n\n# Plots series graph with drop down menu of each age category\ndef plot_series(x, df_baseline_age, df_model_age, category, age_categories, baseline_n_simul, n_simul):\n # plotting the series\n fig = go.Figure()\n traces_to_show = []\n for age in age_categories:\n\n df_baseline_age = generate_model_age_df(category, age, df_baseline, baseline_n_simul)\n df_model_age = generate_model_age_df(category, age, df_model, n_simul)\n\n col_age = generate_col_age(category, age) # generated once for each age group for efficiency\n\n fig.add_trace(go.Scatter(x=x, y=generate_y_data(df_baseline_age, col_age),\n mode = \"lines+markers\",\n name = f\"Baseline {age}\"))\n\n fig.add_trace(go.Scatter(x=x, y=generate_y_data(df_model_age, col_age),\n mode = \"lines+markers\",\n name = f\"Predicted {age}\"))\n\n traces_to_show.append(False) # Probably a cleaner way of doing this but need an item in list for every trace with default value of True\n traces_to_show.append(False)\n\n # Add title and axis labels\n fig.update_layout(\n title=f\"Comparison of {category} over different age groups\",\n xaxis_title=\"Day\",\n yaxis_title=\"Output\",\n # Drop down menu\n #updatemenus=[go.layout.Updatemenu(\n # active = 0,\n # buttons=generate_drop_down_list(traces_to_show, age_categories)\n #)] \n # Drop down menu proved too difficult within time constraints - didn't update properly\n annotations = [dict(x=0.5,\n y=-0.25,\n showarrow=False,\n text = \"To isolate two traces double click on one in the legend and then single click on the second one to show.\"\n )]\n )\n\n return fig\n\ndef plot_histogram(x, df_baseline_age, df_model_age, category, age_categories, baseline_n_simul, n_simul):\n fig = go.Figure()\n\n traces_to_show = []\n for age in age_categories:\n col_age = generate_col_age(category, age)\n\n df_baseline_age = generate_model_age_df(category, age, df_baseline, baseline_n_simul)\n df_model_age = generate_model_age_df(category, age, df_model, n_simul)\n\n # Add histogram for baseline\n fig.add_histogram(x=x, y=generate_y_data(df_baseline_age, col_age),\n name = f\"Baseline {age}\"\n )\n # Add histogram for predicted\n fig.add_histogram(x=x, y=generate_y_data(df_model_age, col_age),\n name = f\"Predicted {age}\"\n )\n \n traces_to_show.append(False) # Probably a cleaner way of doing this but need an item in list for every trace with default value of True\n traces_to_show.append(False)\n\n fig.update_layout(\n # Add title and axis labels\n title=f\"Histogram of {category} over different age groups\",\n xaxis_title=\"Day\",\n yaxis_title=\"Output\",\n # Overlay both histograms\n barmode=\"overlay\",\n # Drop down menu\n #updatemenus=[go.layout.Updatemenu(\n # active = 0,\n # buttons=generate_drop_down_list(traces_to_show, age_categories)\n #)] \n # Drop down menu proved too difficult within time constraints - didn't update properly \n annotations = [dict(x=0.5,\n y=-0.25,\n showarrow=False,\n text = \"To isolate two traces double click on one in the legend and then single click on the second one to show.\"\n )]\n )\n\n #Reduce Opacity to see both histograms\n fig.update_traces(opacity=0.75)\n\n return fig\n\n# Plots a graph of kde distribution\ndef plot_distribution(x, df_baseline_age, df_model_age, category, age_categories, baseline_n_simul, n_simul):\n\n \n traces_to_show = []\n data_to_plot = []\n group_labels = []\n for age in age_categories:\n\n df_baseline_age = generate_model_age_df(category, age, df_baseline, baseline_n_simul)\n df_model_age = generate_model_age_df(category, age, df_model, n_simul)\n\n col_age = generate_col_age(category, age) # generated once for each age group for efficiency\n\n data_to_plot.append(generate_y_data(df_baseline_age, col_age))\n data_to_plot.append(generate_y_data(df_model_age, col_age))\n\n group_labels.append(f\"Baseline {age}\")\n group_labels.append(f\"Predicted {age}\")\n \n traces_to_show.append(False) # Probably a cleaner way of doing this but need an item in list for every trace with default value of True\n traces_to_show.append(False)\n \n fig = ff.create_distplot(data_to_plot, group_labels, show_hist=False)\n # Add title and axis labels\n fig.update_layout(\n title=f\"Distribution of {category} over different age groups\",\n xaxis_title=\"Day\",\n yaxis_title=\"Output\",\n # Drop down menu\n #updatemenus=[go.layout.Updatemenu(\n # active = 0,\n # buttons=generate_drop_down_list(traces_to_show, age_categories)\n #)]\n # Drop down menu proved too difficult within time constraints - didn't update properly\n annotations = [dict(x=0.5,\n y=-0.25,\n showarrow=False,\n text = \"To isolate two traces double click on one in the legend and then single click on the second one to show.\"\n )]\n )\n return fig\n\ndef plot_autocorrelation(df_baseline_age, df_model_age, col, age_categories, category, shifts=31):\n\n def get_autocorrelation(sequence, shifts=31):\n correlations = []\n \n for shift in range(1, shifts):\n correlation = np.corrcoef(sequence[:-shift], sequence[shift:])[0, 1]\n correlations.append(correlation)\n return [1] + correlations # correlation with 0 shift -> 1\n\n def get_partial_autocorrelation(sequence, shifts=31):\n p_correlations = []\n\n residuals = sequence\n for shift in range(1, shifts):\n correlation = np.corrcoef(sequence[:-shift], residuals[shift:])[0, 1]\n p_correlations.append(correlation)\n\n m, c = np.polyfit(sequence[:-shift], residuals[shift:], 1) # m -> grad.; c -> intercept\n residuals[shift:] = residuals[shift:] - (m * sequence[:-shift] + c)\n return [1] + p_correlations\n\n autocorrelations, p_autocorrelations = [], []\n for age in age_categories:\n col_age = col + \": \" + age\n y = df_baseline_age[col_age]\n pred = df_model_age[col_age]\n input = y - pred\n\n autocorrelations.append([np.linspace(0, shifts-1, shifts), get_autocorrelation(pred.to_numpy().copy(), shifts=shifts), [age for __ in range(shifts)]])\n p_autocorrelations.append([np.linspace(0, shifts-1, shifts), get_partial_autocorrelation(pred.to_numpy(), shifts=shifts), [age for __ in range(shifts)]])\n\n autocorrelations, p_autocorrelations = np.asarray(autocorrelations), np.asarray(p_autocorrelations)\n\n ac_df = pd.DataFrame(data={\"shift\": autocorrelations[:,0].flatten(), \"ac\": autocorrelations[:,1].flatten(), \"colour\": autocorrelations[:,2].flatten()})\n pac_df = pd.DataFrame(data={\"shift\": p_autocorrelations[:,0].flatten(), \"pac\": p_autocorrelations[:,1].flatten(), \"colour\": p_autocorrelations[:,2].flatten()})\n\n ac_fig = px.line(ac_df, x=\"shift\", y=\"ac\", color=\"colour\", title=f\"Autocorrelation of {category} over different age groups\")\n pac_fig = px.line(pac_df, x=\"shift\", y=\"pac\", color=\"colour\", title=f\"Partial Autocorrelation of {category} over different age groups\")\n\n return ac_fig, pac_fig\n\n\ncase_cols = pd.read_csv(\"cm_output_columns.csv\")['columns'].to_list()\n\nx = [i+1 for i in range(n_days)]\n\ngraph_divs = []\nfor col in case_cols:\n graph_divs.append(html.Div(dcc.Graph(figure=plot_series(x, df_baseline, df_model, col, age_categories, baseline_n_simul, n_simul))))\n graph_divs.append(html.Div(dcc.Graph(figure=plot_histogram(x, df_baseline, df_model, col, age_categories, baseline_n_simul, n_simul))))\n graph_divs.append(html.Div(dcc.Graph(figure=plot_distribution(x, df_baseline, df_model, col, age_categories, baseline_n_simul, n_simul))))\n ac_fig, pac_fig = plot_autocorrelation(df_baseline, df_model, col, age_categories, col)\n graph_divs.append(html.Div(dcc.Graph(figure=ac_fig)))\n graph_divs.append(html.Div(dcc.Graph(figure=pac_fig)))\n\n\napp = dash.Dash()\napp.layout = html.Div(graph_divs)\napp.run_server(debug=True)\n","sub_path":"cm_compare_validation_output_plots.py","file_name":"cm_compare_validation_output_plots.py","file_ext":"py","file_size_in_byte":12206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"411880285","text":"#!/usr/bin/env python3\r\n# encoding: utf-8\r\n\r\n# algorithms based on PyTorch\r\n\r\nimport importlib\r\n\r\nfrom typing import (Tuple,\r\n Callable,\r\n Dict)\r\n\r\nfrom rls.common.yaml_ops import load_config\r\nfrom rls.utils.display import colorize\r\nfrom rls.utils.logging_utils import get_logger\r\n\r\nlogger = get_logger(__name__)\r\n\r\n\r\nclass AlgoRegistry(object):\r\n\r\n def __init__(self):\r\n self.algo_specs = {}\r\n\r\n def register(self, name, **attrs):\r\n if name in self.algo_specs.keys():\r\n raise Exception(f'Cannot re-register algorithms: {name}')\r\n self.algo_specs[name] = dict(attrs)\r\n\r\n def get_model_info(self, name):\r\n if name in self.algo_specs.keys():\r\n return self.algo_specs[name]\r\n raise Exception(f'Cannot find algorithm: {name}')\r\n\r\n\r\nregistry = AlgoRegistry()\r\n\r\n\r\ndef register(name, **attrs):\r\n registry.register(name, **attrs)\r\n\r\n\r\ndef get_model_info(name: str) -> Tuple[Callable, Dict, str, str]:\r\n '''\r\n Args:\r\n name: name of algorithms\r\n Return:\r\n algo_class of the algorithm model named `name`.\r\n defaulf config of specified algorithm.\r\n policy_type of policy, `on-policy` or `off-policy`\r\n '''\r\n algo_info = registry.get_model_info(name)\r\n logger.info(colorize(algo_info.get('logo', ''), color='green'))\r\n model_class = getattr(importlib.import_module(f\"rls.algorithms.{algo_info['folder']}.{name}\"), algo_info['algo_class'])\r\n return model_class, algo_info['policy_mode'], algo_info['is_multi']\r\n","sub_path":"rls/algorithms/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"197977664","text":"import pandas as pd\nimport numpy as np\nimport math\n\n\n#EnergyPlus Weatherfile Parameters, columns/fields in order\n### [0, 5] year, month, day, hour, minute, data source and uncertaintity flags, \n### [6, 9] tdb, tdp, rh, patm, \n### [10. 12] extraterrestrial horizontal radiation, extrat direct normal radiation, extrast infrared radiaton, \n### [14, 16] golabl horizontal radiation, direct normal radiation, diffuse horizontal radiation\n\n\n\n#############\n# Schedules to be used for ventilation evaluation. OFFICE BUILDINGS.\nsch_occOff_8to5 = []\nfor i in range(1,8): sch_occOff_8to5.append((i,0))\nfor i in range(8,19): sch_occOff_8to5.append((i,1))\nfor i in range(19,25): sch_occOff_8to5.append((i,0))\n\n### Occupancy Schedule\nsch_occOff = []\nfor i in range(1,7): sch_occOff.append((i,0))\nsch_occOff.append((7,0.1))\nsch_occOff.append((8,0.2))\nfor i in range(9,13): sch_occOff.append((i,0.95))\nsch_occOff.append((13,0.5))\nfor i in range(14,18): sch_occOff.append((i,0.95))\nsch_occOff.append((18,0.3))\nsch_occOff.append((19,0.1))\nsch_occOff.append((20,0.1))\nfor i in range(21,25): sch_occOff.append((i,0.05))\n\nsch_occOff_Sat = []\nfor i in range(1,8): sch_occOff_Sat.append((i,0))\nsch_occOff_Sat.append((8,0.1))\nfor i in range(9,13): sch_occOff_Sat.append((i,0.3))\nfor i in range(13,18): sch_occOff_Sat.append((i,0.1))\nfor i in range(18,25): sch_occOff_Sat.append((i,0.0))\n\n#No occupancy, off schedule \nsch_Off_Sun = []\nfor i in range(1,25): sch_Off_Sun.append((i,0))\n\n### Light schedule\nsch_lgtOff = []\nfor i in range(1,7): sch_lgtOff.append((i,0.05))\nsch_lgtOff.append((7,0.1))\nsch_lgtOff.append((8,0.3))\nfor i in range(9,17): sch_lgtOff.append((i,0.90))\nsch_lgtOff.append((17,0.5))\nsch_lgtOff.append((18,0.5))\nfor i in range(19,21): sch_lgtOff.append((i,0.30))\nfor i in range(21,23): sch_lgtOff.append((i,0.20))\nsch_lgtOff.append((23,0.1))\nsch_lgtOff.append((24,0.05))\n\nsch_lgtOff_Sat = []\nfor i in range(1,7): sch_lgtOff_Sat.append((i,0.05))\nfor i in range(7,9): sch_lgtOff_Sat.append((i,0.1))\nfor i in range(9,13): sch_lgtOff_Sat.append((i,0.3))\nfor i in range(13,18): sch_lgtOff_Sat.append((i,0.15))\nfor i in range(18,25): sch_lgtOff_Sat.append((i,0.05))\n\nsch_lgtOff_Sun = []\nfor i in range(1,25): sch_lgtOff_Sun.append((i,0.05))\n\n#Equipment schedule\nsch_eqpOff = []\nfor i in range(1,8): sch_eqpOff.append((i,0))\nsch_eqpOff.append((8,0.4))\nfor i in range(9,13): sch_eqpOff.append((i,0.90))\nsch_eqpOff.append((13,0.8))\nfor i in range(14,18): sch_eqpOff.append((i,0.90))\nsch_eqpOff.append((18,0.5))\nfor i in range(19,25): sch_eqpOff.append((i,0.4))\n\nsch_eqpOff_Sat = []\nfor i in range(1,7): sch_eqpOff_Sat.append((i,0.3))\nfor i in range(7,8): sch_eqpOff_Sat.append((i,0.4))\nfor i in range(8,13): sch_eqpOff_Sat.append((i,0.5))\nfor i in range(13,18): sch_eqpOff_Sat.append((i,0.35))\nfor i in range(18,25): sch_eqpOff_Sat.append((i,0.30))\n\nsch_eqpOff_Sun = []\nfor i in range(1,25): sch_eqpOff_Sun.append((i,0.3))\n######\n\nsch_L_E = []\nfor i in range(1,8): sch_L_E.append((i,0.1))\nfor i in range(8,12): sch_L_E.append((i,0.2))\nfor i in range(12,19): sch_L_E.append((i,0.5))\nfor i in range(19,25): sch_L_E.append((i,0.1))\n\nsch_nightV = []\nfor i in range(1,6): sch_nightV.append((i,1))\nfor i in range(6,22): sch_nightV.append((i,0))\nfor i in range(22,25): sch_nightV.append((i,1))\n\nsch_preCool = []\nfor i in range(1,6): sch_preCool.append((i,0))\nfor i in range(6,9): sch_preCool.append((i,1))\nfor i in range(9,18): sch_preCool.append((i,0))\nfor i in range(18,22): sch_preCool.append((i,1))\nfor i in range(22,25): sch_preCool.append((i,0))\n\n\ndef totResistanceShoebox(A_facade, WWR, U_wall, U_glazing, V_dot, A_TM, h_convection):\n\tRes_1=1/((A_facade*U_wall*(1-WWR))+(A_facade*WWR*U_glazing)+ (V_dot*1.2*1000))\n\tRes_2 = 1/(A_TM*h_convection)\n\tRes_tot = Res_1 + Res_2\n\treturn Res_tot\n\ndef totResistance(zone_input):\n\tW, L, H= zone_input['W'],zone_input['L'],zone_input['H']\n\tWWRs, WWRw, WWRn, WWRe, U_wall, U_roof, U_glazing, V_dot, A_TM_factor, h_convection=zone_input['WWRs'],zone_input['WWRw'],zone_input['WWRn'],zone_input['WWRe'],zone_input['U_wall'],zone_input['U_roof'],zone_input['U_glazing'],zone_input['V_dot'],zone_input['A_TM_factor'],zone_input['h_convection']\n\t_s = (L*H*U_wall*(1-WWRs))+(L*H*WWRs*U_glazing)\n\t_w = (W*H*U_wall*(1-WWRw))+(W*H*WWRw*U_glazing)\n\t_n = (L*H*U_wall*(1-WWRn))+(L*H*WWRn*U_glazing)\n\t_e = (W*H*U_wall*(1-WWRw))+(W*H*WWRw*U_glazing)\n\t_r = (W*L*U_roof)\n\tRes_1=1/(_s +_w +_n +_e + _r + (V_dot*1.2*1000))\n\tRes_2 = 1/(A_TM_factor*L*W*h_convection)\n\tRes_tot = Res_1 + Res_2\n\treturn Res_tot\n\ndef intGain(W, L, H, WWRs, WWRw, WWRn, WWRe, SHGC,qradHS,qradHW,qradHN,qradHE,shadingFac):\n\t_allGlazing = (L*H*WWRs*qradHS)+(W*H*WWRw*qradHW)+(L*H*WWRn*qradHN)+(W*H*WWRw*qradHE)\n\t# print ('********solar gain calc ',L*H*WWRs*qradHS, qradHS )\n\tsolarGain = _allGlazing*SHGC*shadingFac\n\treturn solarGain\n\ndef timeConstant(Res_tot, zone_input):\n\tW, L, A_TM_factor, tickness_TM, cp_TM, density_TM = zone_input['W'],zone_input['L'], zone_input['A_TM_factor'],zone_input['T_TM'],zone_input['cp_TM'],zone_input['density_TM']\n\tthermalCapacitance = A_TM_factor*W*L*tickness_TM*cp_TM*density_TM\n\tt_tau = Res_tot * thermalCapacitance #result in seconds. Return result in hr.\n\treturn (t_tau/3600)\n\ndef Tin_new (Res_tot, internalLoad, Tout, Tin, timeConstant):\n\ta = Res_tot*internalLoad + (Tout+273)\n\tb = 1 - math.exp(-1/timeConstant)\n\tc = (Tin+273) * math.exp(-1/timeConstant)\n\tTin_ = a*b + c \n\treturn (Tin_-273)\n\ndef Qsys (Res_tot, internalLoad, Tout, TinPrevious, Ttarget_L, Ttarget_U, timeConstant):\n\t# if heating is needed\n\tqsys_h = 0\n\n\tif TinPrevious<=Ttarget_L:\n\t\ta = (Ttarget_L+273) - (TinPrevious+273)* math.exp(-1/timeConstant) \t\n\t\tb = 1 - math.exp(-1/timeConstant)\n\t\tc = a/b - (Tout+273)\n\t\tif c/Res_tot > internalLoad:\n\t\t\tqsys_h = (c/Res_tot) - internalLoad\n\n\t# if cooling is needed\n\tif TinPrevious >= Ttarget_U:\n\t\ta = (Ttarget_U+273) - (TinPrevious+273)* math.exp(-1/timeConstant)\n\t\tb = 1 - math.exp(-1/timeConstant)\n\t\tc = a/b - (Tout+273)\n\t\tqsys_c = (c/Res_tot) - internalLoad # cooling energy is negative.\n\telse: qsys_c = 0\n\n\treturn [qsys_h, qsys_c]\n\ndef Qcool (Res_tot, internalLoad, Tout, TinPrevious, Ttarget_U, timeConstant): #for hourly calculation\n\tqsys_c = 0\n\tif TinPrevious >= Ttarget_U:\n\t\ta = (Ttarget_U+273) - (TinPrevious+273)* math.exp(-1/timeConstant)\n\t\tb = 1 - math.exp(-1/timeConstant)\n\t\tc = a/b - (Tout+273)\n\t\t# if c/Res_tot<0:\n\t\tqsys_c = (c/Res_tot) - internalLoad # cooling energy is negative.\n\t\t# else: qsys_c = -1 * internalLoad\n\treturn qsys_c\n\ndef Qheat (Res_tot, internalLoad, Tout, TinPrevious, Ttarget_L, timeConstant): #for hourly calculation\n\t# if heating is needed\n\tqsys_h = 0\n\tif TinPrevious<=Ttarget_L:\n\t\ta = (Ttarget_L+273) - (TinPrevious+273)* math.exp(-1/timeConstant) \t\n\t\tb = 1 - math.exp(-1/timeConstant)\n\t\tc = a/b - (Tout+273)\n\t\tif c/Res_tot > internalLoad:\n\t\t\tqsys_h = (c/Res_tot) - internalLoad\n\treturn qsys_h\n\ndef nv_Operable (zone_input, Tin, Tout, wind_angle, wind_speed, Fschedule,operableAreaFraction): #for hourly calculation\n\tL,W,H,WWRs,WWRw,WWRn,WWRe = zone_input[\"L\"], zone_input[\"W\"],zone_input[\"H\"],zone_input[\"WWRs\"],zone_input[\"WWRw\"],zone_input[\"WWRn\"],zone_input[\"WWRe\"]\n\tCwn,Cwe,Cws,Cww=0,0,0,0\n\t#Based on energyplus, ventilation by wind and stack with open area\n\t#Wind ventilation\n\tif wind_angle == 0: Cwn,Cwe,Cws,Cww=0.55,0,0,0\n\telif wind_angle == 90: Cwn,Cwe,Cws,Cww=0,0.55,0,0\n\telif wind_angle == 180: Cwn,Cwe,Cws,Cww=0,0,0.55,0\n\telif wind_angle == 270: Cwn,Cwe,Cws,Cww=0,0,0,0.55\n\telif 00: print (\"no opening scenarios \",WWRn,WWRe,WWRs,WWRw,Cwn,Cwe,Cws,Cww)\n\n\t#buoyancy ventilation\n\tCd = 0.4 + 0.0045*np.absolute(Tin-Tout)\n\tHnpl= H/4 #to be checked based on ASHRAE guidance\n\tg = 9.8\n\teff_operableWin_buoy = ((L*H*WWRs)+(W*H*WWRw)+(L*H*WWRn)+(W*H*WWRe))*operableAreaFraction\n\tQs = Cd*eff_operableWin_buoy*Fschedule*np.sqrt(2*g*Hnpl*np.absolute(Tin-Tout)/(Tout+273))\n\tQ=np.sqrt(np.square(Qw)+np.square(Qs))\n\n\treturn (round(Q,0))\n\n## to store discomfort hours from low temperature and high temperatures.\ndef comfortEval(tempin_, lowerBound, upperBound,occupancy):\n\tdiscomfort_coldhrs, discomfort_hothrs = 0,0\t\n\thr_len = len(tempin_)\n\thours = list(range(0,hr_len))\n\t\n\tfor i in range (0,hr_len):\n\t\tif tempin_[i] > upperBound+0.9 and occupancy[i]>0.25: #\"+0.9\" to match the frequency chart, where all indoor temp between 26 and 26.9 are binned together. \t\t\t\n\t\t\tdiscomfort_hothrs+=1\n\t\tif tempin_[i] < lowerBound and occupancy[i]>0.25: \t\t\t\n\t\t\tdiscomfort_coldhrs+=1\n\t#print (wth+' discomfort hours ', discomforthrs)\n\treturn ((discomfort_coldhrs, discomfort_hothrs))\n\n\ndef runRC(toutL,windDL,windVL,qradHS,qradHW,qradHN,qradHE,zone_input,LPD,EPD):\n\tt_inL, t_inL_q, t_inL_q_, qsystemL_h, qsystemL_c, qsystem_h, qsystem_c, occ, nightV, l_e, solarGL, internalGT, heatLT, ach, nvonL, nvind = [],[],[],[],[],0,0,[],[],[],[],[],[],[],[],-100\n\teqpSch, lgtSch = [],[]\n\toccDensityOff = 0.055 #18 m2 per person, \n\tloadPerPerson = 100 #w/person\n\tPPD = occDensityOff*loadPerPerson\n\n\tAfloor = zone_input['W']*zone_input['L']\n\tdepth_daylit = 2.5\n\tdA_perimeter = ((zone_input['WWRw']+zone_input['WWRe'])*zone_input['W'] + (zone_input['WWRs']+zone_input['WWRn'])*zone_input['L'])\n\tdA_penetration = (zone_input['H']/2 + zone_input['H']*depth_daylit/2)\n\tdA_area= dA_penetration*dA_perimeter\n\tdA_floorPercentage = dA_area/Afloor\n\tLPD_floorPercentage = 1-dA_floorPercentage\n\n\tt_in0 = 20 #toutL[0] + 5\n\tt_in0q = t_in0\n\tq_light, q_eqpt = 0,0\n\tt_lowerSetBack, t_upperSetBack = 16,28\n\n\toperableAreaFraction,Fschedule,qL,Qinfilloss = 0.5,1,[],[]\n\n\tdays = int(len(toutL)/24)\n\n\tocc_=[s[1] for s in sch_occOff]\n\tocc_Sat=[s[1] for s in sch_occOff_Sat]\n\tocc_Sun=[s[1] for s in sch_Off_Sun]\n\t# for d in range(1,days+1):\n\t# \tfor i in range(1,6):\n\t# \t\tif d%i == 0:occ.extend(occ_)\n\t# \tif d%6 == 0:\n\t# \t\tocc.extend(occ_Sat)\n\t# \t\tocc_Sat_+=1\n\t# \t\tprint (d, d%6)\n\t# \tif d%7 == 0:occ.extend(occ_Sun)\n\t\n\tocc_week = []\n\tfor d in range (0,5):occ_week.extend(occ_)\n\tocc_week.extend(occ_Sat)\n\tocc_week.extend(occ_Sun)\n\tfor w in range(0,52): occ.extend(occ_week)\n\tocc.extend(occ_)\n\n\teqp_=[s[1] for s in sch_eqpOff]\n\teqp_Sat=[s[1] for s in sch_eqpOff_Sat]\n\teqp_Sun=[s[1] for s in sch_eqpOff_Sun]\n\teqp_week = []\n\tfor d in range (0,5):eqp_week.extend(eqp_)\n\teqp_week.extend(eqp_Sat)\n\teqp_week.extend(eqp_Sun)\n\tfor w in range(0,52): eqpSch.extend(eqp_week)\n\teqpSch.extend(eqp_)\n\t# for d in range(1,days+1):\n\t# \tfor i in range(1,6):\n\t# \t\tif d%i == 0:eqpSch.extend(eqp_)\n\t# \tif d%6 == 0:eqpSch.extend(eqp_Sat)\n\t# \tif d%7 == 0:eqpSch.extend(eqp_Sun)\n\n\tlgt_=[s[1] for s in sch_lgtOff]\n\tlgt_Sat=[s[1] for s in sch_lgtOff_Sat]\n\tlgt_Sun=[s[1] for s in sch_lgtOff_Sun]\n\tlgtSch_week = []\n\tfor d in range (0,5):lgtSch_week.extend(lgt_)\n\tlgtSch_week.extend(lgt_Sat)\n\tlgtSch_week.extend(lgt_Sun)\n\tfor w in range(0,52): lgtSch.extend(lgtSch_week)\n\tlgtSch.extend(lgt_)\n\t# for d in range(1,days+1):\n\t# \tfor i in range(1,6):\n\t# \t\tif d%i == 0:lgtSch.extend(lgt_)\n\t# \tif d%6 == 0:lgtSch.extend(lgt_Sat)\n\t# \tif d%7 == 0:lgtSch.extend(lgt_Sun)\n\n\t# print (len(eqp_week))\n\t# print (len(lgtSch), len(occ), len(eqpSch))\n\tl_e_=[s[1] for s in sch_L_E]\n\tfor i in range(0,days):l_e.extend(l_e_)\n\t\n\tnightV_=[s[1] for s in sch_nightV]\n\tfor i in range(0,days):nightV.extend(nightV_)\n\n\toccPlot = [x*-20 for x in occ]\n\n\tV_dot0 = zone_input['V_dot']\n\n\tmaxVdot = 40*zone_input['W']*zone_input['L']*zone_input['H']/3600 #control for mazimum ventilation rate. \n\n\thr = 0\n\twhile hr < len(toutL):\n\t\t#IF NV is turned on:\n\t\tNV_V_dot = 0\n\t\tif zone_input['NVach']>1 and (t_in0q > 20) and (t_in0q > toutL[hr]) and zone_input['sp_lower'] < toutL[hr] < zone_input['sp_upper']:\n\t\t\t# if zone_input['NVach']==5: \n\t\t\t# \tNV_V_dot = zone_input['NVach']*zone_input['W']*zone_input['L']*zone_input['H']/3600\n\t\t\t# \tqL.append(NV_V_dot)\n\t\t\t# \tnvind = -20\n\t\t\t# elif zone_input['NVach']>5:\n\t\t\tNV_V_dot=nv_Operable(zone_input, t_in0q, toutL[hr], windDL[hr], windVL[hr], Fschedule, operableAreaFraction)#*3600\n\t\t\tqL.append(NV_V_dot)\n\t\t\t\n\t\t\tif NV_V_dot>0:nvind = -20\n\t\t\tzone_input['V_dot'] = zone_input['V_dot'] + NV_V_dot\n\t\t\t\n\t\t# qL.append(q_)\n\t\t# elif 5 < toutL[hr] < zone_input['sp_lower']: zone_input['V_dot'] = 0.6*zone_input['W']*zone_input['L']*zone_input['H']/3600\n\t\telse: \n\t\t\tzone_input['V_dot'] = V_dot0\n\t\t\tnvind = -100\n\n\t\tach.append(zone_input['V_dot'])\n\t\t#ventilation heat losses, considered only when within the comfort range\n\t\tif t_in0q>toutL[hr] and zone_input['V_dot']= zone_input['sp_upper'] and zone_input['Ccop']>0:\n\t\t\tzone_input['V_dot'] = V_dot0\n\t\t\tR = totResistance(zone_input)\n\t\t\tt_tau = timeConstant(R,zone_input)\n\t\t\tif occ[hr]==0 and Tin_n_ >= t_upperSetBack: \n\t\t\t\tqcool = Qcool(R, loadBalance, toutL[hr], t_in0q, t_upperSetBack, t_tau)\n\t\t\t\tTin_n_ = t_upperSetBack\n\t\t\telse: \n\t\t\t\tqcool = Qcool(R, loadBalance, toutL[hr], t_in0q, zone_input['sp_upper'], t_tau)\n\t\t\t\tTin_n_ = zone_input['sp_upper']\n\n\t\t\tqsystem_c += qcool\n\t\t\t# qsystemL_c.append(round(qcool/1000,1))\n\t\t\tqsystemL_c.append(-20)\n\t\t\t\n\t\t\t# internalG_Total += qcool\n\t\t\t# Tin_n_ = Tin_new (R, internalG_Total, toutL[hr], t_in0q, t_tau) ##Indoor temperture after cooling\n\t\t\t\n\t\telse: \n\t\t\tqsystemL_c.append(-100)\n\n\t\tif Tin_n_ <= zone_input['sp_lower'] and zone_input['Hcop']>0:\n\t\t\tzone_input['V_dot'] = V_dot0\n\t\t\tR = totResistance(zone_input)\n\t\t\tt_tau = timeConstant(R,zone_input)\n\t\t\tif occ[hr]>0: \n\t\t\t\tqheat = Qheat(R, loadBalance, toutL[hr], t_in0q, zone_input['sp_lower'], t_tau)\n\t\t\t\tTin_n_ = zone_input['sp_lower']\n\t\t\telse: \n\t\t\t\tqheat = Qheat(R, loadBalance, toutL[hr], t_in0q, t_lowerSetBack, t_tau)\n\t\t\t\tTin_n_ = t_lowerSetBack\n\t\t\t\n\t\t\tqsystem_h += qheat\n\t\t\t# qsystemL_h.append(round(qheat/1000,1))\n\t\t\tqsystemL_h.append(-20)\n\t\n\t\t\t# internalG_Total += qheat\n\t\t\t# Tin_n_ = Tin_new (R, internalG_Total, toutL[hr], t_in0q, t_tau) ##Indoor temperture after heating\n\t\t\t\n\t\telse: \n\t\t\tqsystemL_h.append(-100)\n\n\t\tnvonL.append(nvind)\n\t\tt_inL_q.append(round(Tin_n_,1))\n\t\tt_inL_q_.append(Tin_n_)\n\t\tt_in0q = Tin_n_\n\t\thr+=1\n\t\n\thcElecCO2, hcGasCO2, hEnergy, cEnergy, hcElecCost, hcGasCost = 0,0,0,0,0,0\n\tEUI_C, EUI_H = 0, 0\n\tEUI_L = round(q_light/1000)\n\tEUI_E = round(q_eqpt/1000)\n\n\tif zone_input['Hcop']>0: \n\t\tEUI_H = round(qsystem_h/(zone_input['Hcop']*Afloor*1000))\n\t\tif zone_input['Hcop']<1: \n\t\t\thcGasCO2 = EUI_H*zone_input[\"coef_CO2gas\"]\n\t\t\thcGasCost = EUI_H*zone_input[\"coef_Costgas\"]\n\t\telse: \n\t\t\thcElecCO2 = EUI_H*zone_input[\"coef_CO2elec\"] #(kgCO2eq), Cost ($)\n\t\t\thcElecCost = EUI_H*zone_input[\"coef_Costelec\"]\n\tif zone_input['Ccop']>0: \n\t\tEUI_C = abs(round(qsystem_c/(zone_input['Ccop']*Afloor*1000)))\n\t\thcElecCO2 += EUI_C*zone_input[\"coef_CO2elec\"]\n\t\thcElecCost += EUI_C*zone_input[\"coef_Costelec\"]\n\n\tCO2_ELEC = round(hcElecCO2+(EUI_L+EUI_E)*zone_input[\"coef_CO2elec\"],1)\n\tCO2_GAS = round(hcGasCO2,1)\n\tCOST_ELEC = round(hcElecCost+(EUI_L+EUI_E)*zone_input[\"coef_Costelec\"],1)\n\tCOST_GAS = round(hcGasCost,1)\n\n\tt_inL_occFreq = []\n\tfor hr in range(0, len(t_inL_q)):\n\t\tif occ[hr]>0.25: t_inL_occFreq.append(t_inL_q[hr])\n\ta,b,c=Qinfilloss[5040:5208],internalGT[5040:5208],solarGL[5040:5208]\n\tbalance = [i-j for i, j in zip(b,a)]\n\tdisHrs = comfortEval(t_inL_q_, zone_input['sp_lower'], zone_input['sp_upper'], occ)\n\t\n\treturn (qsystemL_h, qsystemL_c, disHrs,t_inL_q,EUI_H,EUI_C, EUI_L,EUI_E, nvonL, CO2_ELEC, CO2_GAS, COST_ELEC, COST_GAS,t_inL_occFreq)","sub_path":"climabox.py","file_name":"climabox.py","file_ext":"py","file_size_in_byte":16617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"252240117","text":"m,n,a,b = map(int,input().split())\nd = {}\nans = 0\nfor i in range(n):\n\tfor j in range(m):\n\t\tt = (a+i)**(b+j)\n\t\tif not t in d:\n\t\t\tans += 1\n\t\t\td[t] = True\nprint(ans)\n","sub_path":"51nod/1024.py","file_name":"1024.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"332337115","text":"class Solution:\n def minBitFlips(self, start: int, goal: int) -> int:\n a = \"{0:b}\".format(start)\n b = \"{0:b}\".format(goal)\n n = max(len(a), len(b))\n count = 0\n a = a.zfill(n)\n b = b.zfill(n)\n for i in range(n):\n if a[i] != b[i]:\n count += 1\n \n return count\n","sub_path":"leetcode/minimum-bit-flips-to-convert-number.py","file_name":"minimum-bit-flips-to-convert-number.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"456101578","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-Today INECO LTD,. PART. ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nimport time\nfrom openerp.osv import fields, osv\nimport openerp.addons.decimal_precision as dp\nfrom operator import itemgetter\nfrom openerp.tools.translate import _\n\nclass account_period_close(osv.osv_memory):\n _inherit = \"account.period.close\"\n \n \n def data_save(self, cr, uid, ids, context=None):\n \"\"\"\n This function close period\n @param cr: the current row, from the database cursor,\n @param uid: the current user’s ID for security checks,\n @param ids: account period close’s ID or list of IDs\n \"\"\"\n period_pool = self.pool.get('account.period')\n account_move_obj = self.pool.get('account.move')\n account_account = self.pool.get('account.account')\n ineco_close_accont_obj = self.pool.get('ineco.close.account')\n \n mode = 'done'\n for form in self.read(cr, uid, ids, context=context):\n if form['sure']:\n for id in context['active_ids']:\n account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', 'draft')], context=context)\n if account_move_ids:\n raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.'))\n\n account_account_ids = account_account.search(cr, uid, [('type', '!=', \"view\")])\n account_account_obj = account_account.browse(cr, uid, account_account_ids)\n for acc_line in account_account_obj:\n debit = 0.00\n credit = 0.00\n before_balance = 0.00\n balance = 0.00\n cr.execute('select sum(round(debit,2)) as debit from account_move_line where period_id = %s and account_id = %s',(id,acc_line.id))\n row_debit = map(itemgetter(0), cr.fetchall())\n cr.execute('select sum(round(credit,2)) as credit from account_move_line where period_id = %s and account_id = %s',(id,acc_line.id))\n row_credit = map(itemgetter(0), cr.fetchall())\n cr.execute('select round(balance,2) from ineco_close_account where account_id ='+ str(acc_line.id) +'order by id desc limit 1')\n row_balance = map(itemgetter(0), cr.fetchall())\n \n if row_debit != [] and row_debit[0] != None:\n debit = row_debit[0]\n if row_credit != [] and row_credit[0] != None:\n credit = row_credit[0]\n if row_balance != [] and row_balance[0] != None:\n before_balance = row_balance[0]\n \n balance = debit - credit + before_balance\n ineco_id = ineco_close_accont_obj.create(cr, uid, {\n 'account_id': acc_line.id,\n 'period_id': id,\n 'debit': debit,\n 'credit': credit,\n 'balance': balance,\n 'balance_before' : before_balance,\n })\n \n cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id))\n cr.execute('update account_period set state=%s where id=%s', (mode, id))\n\n return {'type': 'ir.actions.act_window_close'}\n \n\nclass account_period(osv.osv):\n _inherit = \"account.period\"\n _description = \"Add close account in period\"\n \n def _sale_amount(self, cr, uid, ids, name, args, context=None):\n \n res = {}\n invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','out_invoice'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.invoice').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'sale_amount_untaxed': 0.0,\n 'sale_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount_untaxed\n sale_tax += line.amount_tax\n res[invoce_sale.id]['sale_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['sale_amount_tax'] = sale_tax\n return res\n \n def _sale_refund_amount(self, cr, uid, ids, name, args, context=None):\n \n res = {}\n invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','out_refund'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.invoice').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'sale_refund_amount_untaxed': 0.0,\n 'sale_refund_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount_untaxed\n sale_tax += line.amount_tax\n res[invoce_sale.id]['sale_refund_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['sale_refund_amount_tax'] = sale_tax\n return res \n \n def _purchase_amount(self, cr, uid, ids, name, args, context=None):\n \n res = {}\n invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','in_invoice'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.invoice').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'purchase_amount_untaxed': 0.0,\n 'purchase_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount_untaxed\n sale_tax += line.amount_tax\n res[invoce_sale.id]['purchase_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['purchase_amount_tax'] = sale_tax\n return res\n\n def _purchase_refund_amount(self, cr, uid, ids, name, args, context=None):\n \n res = {}\n invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','in_refund'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.invoice').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'purchase_refund_amount_untaxed': 0.0,\n 'purchase_refund_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount_untaxed\n sale_tax += line.amount_tax\n res[invoce_sale.id]['purchase_refund_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['purchase_refund_amount_tax'] = sale_tax\n return res\n \n def _sale_receipt_amount(self, cr, uid, ids, name, args, context=None): \n res = {}\n invoice_ids = self.pool.get('account.voucher').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','sale'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.voucher').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'sale_receipt_amount_untaxed': 0.0,\n 'sale_receipt_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount or 0.0\n sale_tax += line.tax_amount or 0.0\n res[invoce_sale.id]['sale_receipt_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['sale_receipt_amount_tax'] = sale_tax\n return res\n\n def _purchase_receipt_amount(self, cr, uid, ids, name, args, context=None): \n res = {}\n invoice_ids = self.pool.get('account.voucher').search(cr, uid, [('journal_id.print_sale_tax','!=',False),('period_tax_id','in',ids),('type','=','purchase'),('state','not in',('draft','cancel'))], context=context)\n invoice_obj = self.pool.get('account.voucher').browse(cr, uid, invoice_ids, context=context) \n for invoce_sale in self.browse(cr, uid, ids, context=context):\n res[invoce_sale.id] = {'purchase_receipt_amount_untaxed': 0.0,\n 'purchase_receipt_amount_tax': 0.0\n }\n sale_untaxed = 0.0\n sale_tax = 0.0\n for line in invoice_obj:\n sale_untaxed += line.amount or 0.0\n sale_tax += line.tax_amount or 0.0\n res[invoce_sale.id]['purchase_receipt_amount_untaxed'] = sale_untaxed\n res[invoce_sale.id]['purchase_receipt_amount_tax'] = sale_tax\n return res\n \n _columns = {\n 'close_line_ids': fields.one2many('ineco.close.account', 'period_id', 'Account', readonly=True),\n 'customer_invoice_ids': fields.one2many('account.invoice', 'period_tax_id', 'Customer Invoice', domain=[('type','=','out_invoice'),('journal_id.print_sale_tax','!=',False)], readonly=True), \n 'customer_refund_ids': fields.one2many('account.invoice', 'period_tax_id', 'Customer Refund', domain=[('type','=','out_refund'),('journal_id.print_sale_tax','!=',False)], readonly=True), \n 'supplier_invoice_ids': fields.one2many('account.invoice', 'period_tax_id', 'Supplier Invoice', domain=[('type','=','in_invoice'),('journal_id.print_sale_tax','!=',False)], readonly=True), \n 'supplier_refund_ids': fields.one2many('account.invoice', 'period_tax_id', 'Supplier Refund', domain=[('type','=','in_refund'),('journal_id.print_sale_tax','!=',False)], readonly=True),\n 'sale_receipt_ids': fields.one2many('account.voucher', 'period_tax_id', 'Sale Receipt', domain=[('type','=','sale'),('journal_id.print_sale_tax','!=',False)], readonly=True), \n 'purchase_receipt_ids': fields.one2many('account.voucher', 'period_tax_id', 'Purchase Receipt', domain=[('type','=','purchase'),('journal_id.print_sale_tax','!=',False)], readonly=True), \n 'sale_amount_untaxed': fields.function(_sale_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sums'),\n 'sale_amount_tax': fields.function(_sale_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sums'),\n 'sale_refund_amount_untaxed': fields.function(_sale_refund_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sumsr'),\n 'sale_refund_amount_tax': fields.function(_sale_refund_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sumsr'),\n 'purchase_amount_untaxed': fields.function(_purchase_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sumss'),\n 'purchase_amount_tax': fields.function(_purchase_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sumss'),\n 'purchase_refund_amount_untaxed': fields.function(_purchase_refund_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sumsp'),\n 'purchase_refund_amount_tax': fields.function(_purchase_refund_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sumsp'),\n 'sale_receipt_amount_untaxed': fields.function(_sale_receipt_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sumss1'),\n 'sale_receipt_amount_tax': fields.function(_sale_receipt_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sumss1'),\n 'purchase_receipt_amount_untaxed': fields.function(_purchase_receipt_amount,digits_compute=dp.get_precision('Account'), string='Amount Untaxed',multi='sumss2'),\n 'purchase_receipt_amount_tax': fields.function(_purchase_receipt_amount,digits_compute=dp.get_precision('Account'), string='Amount Tax',multi='sumss2'),\n 'date_pp30': fields.date('Date Vat'),\n 'date_wht': fields.date('Date WHT'),\n }\n \n def action_draft(self, cr, uid, ids, *args):\n mode = 'draft'\n cr.execute('delete from ineco_close_account where period_id =%s',tuple(ids))\n cr.execute('update account_journal_period set state=%s where period_id in %s', (mode, tuple(ids),))\n cr.execute('update account_period set state=%s where id in %s', (mode, tuple(ids),))\n return True\n\nclass ineco_close_account(osv.osv):\n _name = \"ineco.close.account\"\n _description = \"Close Account for Account Code\" \n _columns = {\n 'name': fields.related('account_id', 'name', string='Account Name', size=256, store=True, type='char'),\n 'code': fields.related('account_id', 'code', string='Account Code', size=64, store=True, type='char' ),\n 'account_id': fields.many2one('account.account', 'Account',required=True,readonly=True), \n 'period_id': fields.many2one('account.period', 'Period',required=True,readonly=True), \n 'debit': fields.float('Debit', required=True, digits_compute= dp.get_precision('Account'), readonly=True),\n 'credit': fields.float('Credit', required=True, digits_compute= dp.get_precision('Account'), readonly=True),\n 'balance_before': fields.float('Balance Before', required=True, digits_compute= dp.get_precision('Account'), readonly=True),\n 'balance': fields.float('Balance', required=True, digits_compute= dp.get_precision('Account'), readonly=True),\n 'company_id': fields.many2one('res.company', 'Company', required=True),\n }\n _defaults = {\n 'balance': 0.00,\n 'debit': 0.00,\n 'credit': 0.00,\n 'balance_before': 0.00,\n 'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),\n }\n _sql_constraints = [\n ('account_period_uniq', 'unique(account_id, period_id)', 'Account and Period Name must be unique per company!'),\n ] \n \nineco_close_account()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"ineco_thai_account/close_account.py","file_name":"close_account.py","file_ext":"py","file_size_in_byte":16332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"607621188","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/8/22 8:11\r\n# @Author : Youpeng Li\r\n# @Site : \r\n# @File : 0191_hammingWeight.py\r\n# @Software: PyCharm\r\n\r\n'''\r\n191. Number of 1 Bits\r\n\r\nWrite a function that takes an unsigned integer and return the number of '1' bits it has (also known as the Hamming weight).\r\n\r\nExample 1:\r\nInput: 00000000000000000000000000001011\r\nOutput: 3\r\nExplanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.\r\n\r\nExample 2:\r\nInput: 00000000000000000000000010000000\r\nOutput: 1\r\nExplanation: The input binary string 00000000000000000000000010000000 has a total of one '1' bit.\r\n\r\nExample 3:\r\nInput: 11111111111111111111111111111101\r\nOutput: 31\r\nExplanation: The input binary string 11111111111111111111111111111101 has a total of thirty one '1' bits.\r\n\r\nNote:\r\nNote that in some languages such as Java, there is no unsigned integer type. In this case,\r\nthe input will be given as signed integer type and should not affect your implementation,\r\nas the internal binary representation of the integer is the same whether it is signed or unsigned.\r\nIn Java, the compiler represents the signed integers using 2's complement notation.\r\nTherefore, in Example 3 above the input represents the signed integer -3.\r\n\r\nFollow up:\r\nIf this function is called many times, how would you optimize it?\r\n'''\r\n\r\nclass Solution(object):\r\n def hammingWeight(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n if not n:\r\n return 0\r\n res, tmp = 0, 1\r\n for i in range(32):\r\n if n & tmp:\r\n res += 1\r\n tmp <<= 1\r\n return res\r\n\r\n def hammingWeight_1(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n mybin = bin(n)\r\n return mybin.count('1')\r\n\r\n def hammingWeight_2(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n count = 0\r\n while n != 0:\r\n if (n ^ (n - 1)) == 1:\r\n count += 1\r\n n = n >> 1\r\n return count\r\n\r\nif __name__ == \"__main__\":\r\n a = Solution()\r\n n = 11\r\n print(a.hammingWeight(n))\r\n print(a.hammingWeight_1(n))\r\n print(a.hammingWeight_2(n))\r\n n = 128\r\n print(a.hammingWeight(n))\r\n print(a.hammingWeight_1(n))\r\n print(a.hammingWeight_2(n))","sub_path":"Solutions/0191_hammingWeight.py","file_name":"0191_hammingWeight.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"433525490","text":"import numpy as np\nimport cv2\nimport math\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.callbacks import TensorBoard\nimport time\nimport pickle\nimport datetime as dt\nfrom datetime import datetime\nimport keyboard\n\n\n\ndef findlocation(ListL):\n\n A = 0\n B = 0\n C = 0\n D = 0\n loc = \"temploc\"\n\n for x in ListL:\n if (x == \"A\"):\n A+=1\n if (x == \"B\"):\n B+=1\n if (x == \"C\"):\n C+=1\n if (x == \"D\"):\n D+=1\n\n if A > max(B, C, D):\n loc = \"Bathroom\"\n elif B > max(A, C, D):\n loc = \"SRC\"\n elif C > max(A, B, D):\n loc = \"Other Teacher\"\n elif D > max(A, C, B):\n loc = \"Office\"\n\n return loc\n\n\ndef findrotation(image_1):\n\n orb = cv2.ORB_create()\n cap = cv2.VideoCapture(1)\n image_1 = resize(image_1)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 500)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 500)\n goodmatch = True\n time = True\n ListL = []\n count = 0\n\n for lp in range(4):\n ret, frame = cap.read() # return a single frame in variable `frame\n cv2.imwrite(r\"C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\StudentFound.jpg\", frame)\n\n\n while goodmatch:\n count+=1\n ret,frame = cap.read()\n\n image_2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n kp1, des1 = orb.detectAndCompute(image_1, None)\n kp2, des2 = orb.detectAndCompute(image_2, None)\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n matches = bf.match(des1, des2)\n good = []\n\n for m in matches:\n if m.distance < 30:\n good.append(m)\n\n #matches = sorted(matches, key=lambda x: x.distance)\n #good = matches[:15]\n\n # print(good)\n\n img1_p = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n img2_p = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n M, mask = cv2.findHomography(img1_p, img2_p, cv2.RANSAC, 5.0)\n\n # print(M)\n\n theta = - math.atan2(M[0, 1], M[0, 0]) * 180 / math.pi\n\n destination(theta, ListL)\n\n print(theta)\n\n img3 = cv2.drawMatches(image_1, kp1, image_2, kp2, good, None, flags=2)\n cv2.imshow(\"output\", img3)\n cv2.waitKey(1)\n if (count == 70):\n goodmatch = False\n\n currentDT = datetime.now()\n ti = (currentDT.strftime(\"%I:%M:%S %p\"))\n print(ti)\n datee = (currentDT.strftime(\"%a, %b %d, %Y\"))\n Final = findlocation(ListL)\n\n while(time):\n if keyboard.is_pressed('b'):\n currentT = datetime.now()\n to = (currentT.strftime(\"%I:%M:%S %p\"))\n print (to)\n time = False\n\n\n return datee, ti, to, Final\n\n\ndef SignInOut(student,datee,ti,to,location):\n sign = open(r\"C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Sign in out sheet.txt\", \"a\")\n\n sign.write(\"/n\" \"Name: {0} \\n\" \"Date: {1} \\n\" \"Time out: {2} \\n\" \"Time in: {3} \\n\" \"Location: {4}\\n\".format(student,datee,ti,to,location) )\n\n\n\n\ndef whichstudent():\n pickle_in = open(r\"\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\TrianData\\X\", \"rb\")\n X = pickle.load(pickle_in)\n\n pickle_in = open(r\"\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\TrianData\\Y\", \"rb\")\n y = pickle.load(pickle_in)\n\n CATEGORIES = [\"Student1\", \"Student2\", \"Student3\", \"Student4\", \"Student5\"]\n\n def prepare(filepath):\n IMG_SIZE = 50 # 50 in txt-based\n img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # read in the image, convert to grayscale\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1) # return the image with shaping that TF wants.\n\n model = tf.keras.models.load_model(\n r\"C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\TrianData\\64x3-CNN.model\")\n\n prediction = model.predict(\n [prepare(r\"C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\StudentFound.jpg\")])\n return(CATEGORIES[int(prediction[0][0])])\n\n#def AddStudent():\n\ndef AddStudentt():\n\n cap = cv2.VideoCapture(1) # video capture source camera (Here webcam of laptop)\n\n while True:\n ret, frame = cap.read() # return a single frame in variable `frame\n cv2.imshow('img1', frame) # display the captured image\n if cv2.waitKey(1) & 0xFF == ord('s'): # save on pressing 's'\n ret, frame = cap.read() # return a single frame in variable `frame\n cv2.imwrite(r\"C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student1.jpg\",frame)\n\n\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef resize(image):\n\n #scale_percent = 10 # percent of original size\n width = 500 #int(image.shape[1] * scale_percent / 100)\n height = 500 #int(image.shape[0] * scale_percent / 100)\n dim = (width, height)\n\n resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA) # resize image\n return resized\n\ndef destination(degree, ListL):\n\n degree = int(degree)\n\n if (-45 <= degree <= 45):\n location = \"A\"\n ListL.append(location)\n\n #print (\"Correct:\" + str(degree))\n\n if (45 < degree <= 135):\n location = \"D\"\n ListL.append(location)\n #print(\"D: Office\")\n #print(\"Correct:\" + str(degree))\n\n if (135 < degree <= 180 or -180 < degree <= -135 ):\n location = \"C\"\n ListL.append(location)\n #print(\"C: Bathroom\")\n #print (\"Correct:\" + str(degree))\n\n if ( -135 < degree <= -45 ):\n location = \"B\"\n ListL.append(location)\n #print(\"B: Teacher\")\n #print(\"Correct:\" + str(degree))\n\n\n\n\nfor lp in range(1000):\n Student = whichstudent()\n\n if (Student == \"Student1\"):\n image_1 = cv2.imread(r'C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student1.jpg',cv2.IMREAD_GRAYSCALE)\n s = \"Student1\"\n\n if (Student == \"Student2\"):\n image_1 = cv2.imread(r'C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student2.jpg',\n cv2.IMREAD_GRAYSCALE)\n s = \"Student2\"\n if (Student == \"Student3\"):\n image_1 = cv2.imread(r'C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student3.jpg',\n cv2.IMREAD_GRAYSCALE)\n s = \"Student3\"\n if (Student == \"Student4\"):\n image_1 = cv2.imread(r'C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student4.jpg',\n cv2.IMREAD_GRAYSCALE)\n s = \"Student4\"\n if (Student == \"Student5\"):\n image_1 = cv2.imread(r'C:\\Users\\gaell\\OneDrive\\Documents\\SCE- Schepens\\Official project\\Students\\Student5.jpg',\n cv2.IMREAD_GRAYSCALE)\n s = \"Student5\"\n\n d,ti,to,l = findrotation(image_1)\n SignInOut(s,d,ti,to,l)\n\n\n\n\n","sub_path":"Sign in_out program.py","file_name":"Sign in_out program.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"325949567","text":"\nclass CassandraWrite:\n @classmethod\n def object_created(cls):\n \"\"\"\n Wrapper when new object created\n \"\"\"\n return {'data': [],\n 'reason': 'object created successfully',\n 'status_code': 2006}\n\n @classmethod\n def one_row_found(cls, row):\n \"\"\"\n Wrapper when only one row detected\n \"\"\"\n return {'data': row,\n 'reason': 'OK',\n 'status_code': 2007}\n\n @classmethod\n def many_rows_found(cls, rows):\n \"\"\"\n Wrapper when multiple rows detected\n \"\"\"\n return {'data': rows,\n 'reason': 'OK',\n 'status_code': 2008}\n\nclass CassandraRead:\n @classmethod\n def no_rows_found(cls):\n \"\"\"\n Wrapper when no rows detected\n \"\"\"\n return {'data': [],\n 'reason': 'No rows found',\n 'status_code': 2009}\n\n @classmethod\n def one_row_found(cls, row):\n \"\"\"\n Wrapper when only one row detected\n \"\"\"\n return {'data': row,\n 'reason': 'OK',\n 'status_code': 2010}\n\n @classmethod\n def many_rows_found(cls, rows):\n \"\"\"\n Wrapper when multiple rows detected\n \"\"\"\n return {'data': rows,\n 'reason': 'OK',\n 'status_code': 2011}\n\n","sub_path":"connectors/cassandra/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"238165352","text":"\nimport unittest, os\nimport raise_morale\n\nfrom raise_morale import SummonerDto\nfrom raise_morale.etc.exceptions import (RiotException,\n SummonerException, UnauthorizedException)\n\n\nclass GetSummonerByNameTests(unittest.TestCase):\n\n def test_summoner_should_return_summoner_dto(self):\n api_key = os.getenv('API_KEY')\n summoner = raise_morale.get_summoner_by_name('kalibat', api_key)\n self.assertIsInstance(summoner, SummonerDto)\n\n def test_unknown_summoner_name_should_raise_exception(self):\n api_key = os.getenv('API_KEY')\n with self.assertRaises(SummonerException):\n raise_morale.get_summoner_by_name('unknownplayername', api_key)\n\n\nclass GetSummonerByAccountIdTests(unittest.TestCase):\n\n def test_account_id_returns_summoner(self):\n api_key = os.getenv('API_KEY')\n summoner = raise_morale.get_summoner_by_name('takizawaaki', api_key)\n summoner_by_id = raise_morale.get_summoner_by_account_id(summoner.account_id, api_key)\n self.assertEqual(summoner.account_id, summoner_by_id.account_id)\n","sub_path":"tests/summoner_tests.py","file_name":"summoner_tests.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"523153842","text":"\"\"\"\nThis component provides support for a virtual light.\n\n\"\"\"\n\nimport logging\nimport pprint\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.light import (\n ATTR_BRIGHTNESS,\n SUPPORT_BRIGHTNESS,\n LightEntity,\n)\nfrom homeassistant.helpers.config_validation import (PLATFORM_SCHEMA)\nfrom . import COMPONENT_DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nDEPENDENCIES = [COMPONENT_DOMAIN]\n\nCONF_NAME = \"name\"\nCONF_INITIAL_VALUE = \"initial_value\"\nCONF_INITIAL_BRIGHTNESS = \"initial_brightness\"\n\nDEFAULT_INITIAL_VALUE = \"off\"\nDEFAULT_INITIAL_BRIGHTNESS = \"100\"\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_NAME): cv.string,\n vol.Optional(CONF_INITIAL_VALUE, default=DEFAULT_INITIAL_VALUE): cv.string,\n vol.Optional(CONF_INITIAL_BRIGHTNESS, default=DEFAULT_INITIAL_BRIGHTNESS): cv.string,\n})\n\n\nasync def async_setup_platform(_hass, config, async_add_entities, _discovery_info=None):\n lights = [VirtualLight(config)]\n async_add_entities(lights, True)\n\n\nclass VirtualLight(LightEntity):\n\n def __init__(self, config):\n \"\"\"Initialize an Virtual light.\"\"\"\n self._name = config.get(CONF_NAME)\n self._unique_id = self._name.lower().replace(' ', '_')\n self._state = config.get(CONF_INITIAL_VALUE)\n self._brightness = config.get(CONF_INITIAL_BRIGHTNESS)\n _LOGGER.info('VirtualLight: %s created', self._name)\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return self._unique_id\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return True if light is on.\"\"\"\n return self._state.lower() == \"on\"\n\n @property\n def supported_features(self):\n \"\"\"Flag features that are supported.\"\"\"\n return SUPPORT_BRIGHTNESS\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the light on.\"\"\"\n brightness = kwargs.get(ATTR_BRIGHTNESS, None)\n if brightness is not None:\n self._brightness = brightness\n\n _LOGGER.info(\"turn_on: {}\".format(pprint.pformat(kwargs)))\n self._state = \"on\"\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the light off.\"\"\"\n _LOGGER.info(\"turn_off: {}\".format(pprint.pformat(kwargs)))\n self._state = \"off\"\n\n @property\n def brightness(self):\n \"\"\"Return the brightness of the light.\"\"\"\n return self._brightness\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n\n attrs = {\n name: value for name, value in (\n ('friendly_name', self._name),\n ('brightness', self._brightness),\n ) if value is not None\n }\n\n return attrs","sub_path":"config/custom_components/virtual/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"256921141","text":"import os.path\nimport pathlib\nimport subprocess\nimport argparse\nimport json\nfrom itertools import groupby\nfrom pathlib import Path\n\n\n# TODO: Handle possible functions called main in essence not handsanitizer\n\n\ndef get_filepath_in_output_dir(output_dir: str, input_file: str, ext: str) -> Path:\n input_path = Path(input_file)\n stem = input_path.stem\n return Path(output_dir).joinpath(stem).with_suffix(ext)\n\n\ndirname = os.path.dirname(__file__)\nhandsan_path = dirname + \"/../../handsan\"\n\n\n# main entry point\ndef essence():\n parser = argparse.ArgumentParser()\n parser.add_argument('input', help='input bitcode file')\n parser.add_argument('-o', '--output',\n help='folder in which executables will be saved / or output path for spec file',\n nargs='?', default='output')\n parser.add_argument('-b', '--build', help='build executables for functions',\n action=\"store_true\")\n parser.add_argument('-g', '--generate-spec', help='generates specification of bitcode module', action=\"store_true\")\n parser.add_argument('--no-template', help='prevents the generation of json input templates', action=\"store_true\")\n parser.add_argument('--build-read-none', help='build all functions that have purity level: read none', action=\"store_true\")\n parser.add_argument('--build-write-only', help='build all functions that have purity level: write only', action=\"store_true\")\n parser.add_argument('functions', nargs='*',\n help='functions from the specified bitcode module which need to be build')\n args = parser.parse_args()\n\n input_file = Path(args.input)\n output_dir = args.output\n build_execs = args.build\n generate_spec = args.generate_spec\n generate_input_template = args.no_template != True\n functions_to_build = args.functions\n build_read_none = args.build_read_none\n build_write_only = args.build_write_only\n\n if input_file.suffix == '.c':\n print(\"got .c file as input, you probably meant .bc\", file=sys.stderr)\n return\n\n if input_file.suffix == '.bc':\n if build_execs:\n essence_build(input_file, output_dir, generate_input_template, functions_to_build)\n\n elif build_read_none or build_write_only:\n if build_read_none:\n essence_build_read_none(input_file, output_dir, generate_input_template)\n if build_write_only:\n essence_build_write_only(input_file, output_dir, generate_input_template)\n\n elif generate_spec:\n essence_generate_spec(input_file, output_dir)\n\n else:\n essence_generate_spec(input_file, output_dir)\n path_to_spec_file = get_filepath_in_output_dir(output_dir, input_file, \".spec.json\")\n essence_list_signatures(path_to_spec_file)\n else:\n essence_list_signatures(input_file)\n\n\n\n# # lists function signatures\ndef essence_list_signatures(spec_file: Path):\n with spec_file.open(\"r\") as j:\n contents = json.load(j)\n funcs = contents['functions']\n\n funcs.sort(key=lambda content: content['purity'])\n groups = groupby(funcs, lambda content: content['purity'])\n\n print(\"-------- Functions --------\")\n for purity, funcs in groups:\n print(purity + \":\")\n for func in funcs:\n print(\"\\t\", func['name'] + \":\", func['signature'])\n\n\n# generates and prints spec\ndef essence_generate_spec(bc_file: Path, output: str):\n subprocess.run([handsan_path, \"-o\", output, bc_file])\n\n\n# build the actual functions\ndef essence_build(bc_file: Path, output: str, generate_input_template: bool, function_names: [str]):\n print(\"----------------- building functions --------------------\")\n for func_name in function_names:\n print(f\"building: {func_name}\")\n build_functions_for(bc_file, output, generate_input_template, func_name)\n\n\n\ndef essence_build_read_none(bc_file: Path, output: str, generate_input_template: bool):\n essence_build_for_purity_level(bc_file, output, generate_input_template, 'read_none')\n\ndef essence_build_write_only(bc_file: Path, output: str, generate_input_template: bool):\n essence_build_for_purity_level(bc_file, output, generate_input_template, 'write_only')\n\n\ndef essence_build_for_purity_level(bc_file: Path, output: str, generate_input_template: bool, purity_level :str):\n essence_generate_spec(bc_file, output)\n spec_path = get_filepath_in_output_dir(output, bc_file, '.spec.json')\n with spec_path.open(\"r\") as j:\n contents = json.load(j)\n funcs = contents['functions']\n funcs = [func for func in funcs if func['purity'] == purity_level]\n print(\"----------------- building \" + purity_level + \" functions --------------------\")\n for func in funcs:\n print('building', func['name'])\n build_functions_for(bc_file, output, generate_input_template, func['name'])\n\n\n\n\n\ndef build_functions_for(bc_file: Path, output_dir: str, template: bool, func_name: str):\n extracted_bc_path = get_filepath_in_output_dir(output_dir, bc_file.stem, \".extracted.bc\")\n subprocess.run([\"llvm-extract\", bc_file, \"--recursive\", \"-o\",extracted_bc_path, \"--func\", func_name])\n\n if template:\n subprocess.run([handsan_path, \"--build\", \"-o\", output_dir, extracted_bc_path, func_name])\n else:\n subprocess.run([handsan_path, \"--build\", \"--no-template\", \"-o\", output_dir, extracted_bc_path, func_name])\n\n output_obj_file_path = get_filepath_in_output_dir(output_dir, extracted_bc_path, \".o\")\n subprocess.run([\"llc-11\", \"-filetype=obj\", extracted_bc_path, \"-o\", output_obj_file_path])\n\n func_exec_file_path = get_filepath_in_output_dir(output_dir, func_name, \"\")\n func_generated_cpp_file_path = get_filepath_in_output_dir(output_dir, func_name, \".cpp\")\n\n #TODO turn this into a path\n argparse_include_path = dirname + \"/../../vendor/include\"\n subprocess.run(\n [\"clang++\", \"-std=c++17\", output_obj_file_path, func_generated_cpp_file_path, \"-o\",\n func_exec_file_path, \"-I\" + argparse_include_path])\n\n # subprocess.run([\"rm\", extracted_bc_path, output_obj_file_path])\n\n # TODO later\n# def essence_build_from_spec(bc_file: str):\n","sub_path":"src/essence/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"632828932","text":"# Print the Welcome Statements\nprint(\" \")\nprint(\"Welcome to Employee Data Cleaner!\")\nprint('*' * 40)\n\n# Prompt user to select the file they want to run the analysis on\n# Warning about the Resource folder and the file format\nprint(\"WARNING! The file you are going to enter has to be properly formatted(Check Constraints in README.md) and is placed in the 'Resources' Folder\")\nprint(\" \")\nfile_name = input(\"Enter the name of the file along with .csv extension: \")\n\nprint(\" \")\nprint(\"************************Starting Data Cleaning************************\")\n\n# import the os module\n# Use path join to create file paths across operating systems\nimport os\ncsvpath = os.path.join('Resources', file_name)\n\n#Lists for storing the Finance Data\nemp_id = []\nnames = []\ndate_of_birth = []\nssn = []\nstate = []\nfirst_name = []\nlast_name = []\nformatted_date_of_birth = []\nstate_abbrv = []\nformatted_ssn = []\n\n#US State Abbreviation\nus_state_dict = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\n# import csv to parse the csv file\nimport csv\nwith open(csvpath, newline='') as csvfile:\n\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Skipping the Titles\n next(csvreader)\n\n for data in csvreader:\n emp_id.append(data[0])\n names.append(data[1])\n date_of_birth.append(data[2])\n ssn.append(data[3])\n state.append(data[4])\n\n#Module for time delays\nimport time\n\ntime.sleep(2)\n\n#Spliting the Name\nfor name in names:\n f_name,l_name = name.split(\" \")\n first_name.append(f_name)\n last_name.append(l_name)\nprint('Task 1 : Done')\n\n\n#Changing the Dates to MM/DD/YYYY\nfor date in date_of_birth:\n year,month,day = date.split(\"-\")\n new_date = str(month) + '/' + str(day) + '/' + str(year)\n formatted_date_of_birth.append(new_date)\nprint('Task 2 : Done')\n\n#Hiding the SSN Values for Protection\nfor number in ssn:\n hidder = list(number)\n hidder[0] = '*'\n hidder[1] = '*'\n hidder[2] = '*'\n hidder[4] = '*'\n hidder[5] = '*'\n ssn_handler = ''.join(hidder)\n formatted_ssn.append( ssn_handler)\nprint('Task 3 : Done')\n\n#State Abbreviation \nfor state_name in state:\n for full_form,abbrv in us_state_dict.items():\n if state_name == full_form:\n state_abbrv.append(abbrv)\nprint('Task 4 : Done')\n\nprint(\"************************Data Cleaning Finished************************\")\nprint(\"Check the 'Cleaned_Data_Files' Folder\")\nprint(\" \")\n\n#Module for getting the current time\nimport datetime\n\n#New File creator/appender\ncurrent_time_date = datetime.datetime.today().strftime('%d-%m-%y')\noutput_file_name = \"Employee_Data_\" + str(current_time_date) + '.csv'\noutput_path = os.path.join('Cleaned_Data_Files', output_file_name)\n\n#Zipping the new data \nemployees = zip(emp_id,first_name,last_name,formatted_date_of_birth,formatted_ssn,state_abbrv)\n\n#Pasting it to the new created file\nwith open(output_path, 'w', newline='') as file:\n csvwriter = csv.writer(file, delimiter=',')\n csvwriter.writerow(['Emp ID','First Name','Last Name','DOB','SSN','State'])\n\n for employee in employees:\n csvwriter.writerow(employee)\n\n\n\n\n\n\n \n\n","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"214459258","text":"import http.server\nimport socketserver\nimport cgi\nimport gzip\nimport logging\nimport mimetypes\nimport os\nimport shutil\nimport socket\nfrom io import BytesIO\nfrom email.utils import formatdate\nfrom urllib.parse import unquote_plus, quote, parse_qs\nfrom xml.sax.saxutils import escape\n\nfrom Cheetah.Template import Template\nimport config\nfrom plugin import GetPlugin\n\nSCRIPTDIR = os.path.dirname(__file__)\n\nPYTIVO_VERSION = '2.6.2'\nSERVER_INFO = \"\"\"\n\n\"\"\" + PYTIVO_VERSION + \"\"\"\npy3Tivo\n\"\"\" + PYTIVO_VERSION + \"\"\"\npyTivo Developers\nhttp://pytivo.sf.net/\n\"\"\"\n\nVIDEO_FORMATS = \"\"\"\n\nvideo/x-tivo-mpeg\n\"\"\"\n\nVIDEO_FORMATS_TS = \"\"\"\n\nvideo/x-tivo-mpeg\nvideo/x-tivo-mpeg-ts\n\"\"\"\n\nBASE_HTML = \"\"\"\n py3Tivo\n\n %s \"\"\"\n\nRELOAD = '

The page will reload in %d seconds.

'\nUNSUP = '

Unsupported Command

Query:

    %s
'\n\nclass TivoHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):\n def __init__(self, server_address, RequestHandlerClass):\n self.containers = {}\n self.beacon = None\n self.in_service = None\n self.stop = False\n self.restart = False\n self.logger = logging.getLogger('pyTivo')\n http.server.HTTPServer.__init__(self, server_address,\n RequestHandlerClass)\n self.daemon_threads = True\n\n def add_container(self, name, settings):\n if name in self.containers or name == 'TiVoConnect':\n raise Exception(\"Container Name in use\")\n try:\n self.containers[name] = settings\n except KeyError:\n self.logger.error('Unable to add container ' + name)\n\n def reset(self):\n self.containers.clear()\n for section, settings in config.getShares():\n self.add_container(section, settings)\n\n def handle_error(self, request, client_address):\n self.logger.exception('Exception during request from %s',\n client_address)\n\n def set_beacon(self, beacon):\n self.beacon = beacon\n\n def set_service_status(self, status):\n self.in_service = status\n\nclass TivoHTTPHandler(http.server.BaseHTTPRequestHandler):\n def __init__(self, request, client_address, server):\n self.wbufsize = 0x10000\n self.server_version = 'pyTivo/1.0'\n self.protocol_version = 'HTTP/1.1'\n self.sys_version = ''\n self.container = None\n self.cname = None\n\n try:\n http.server.BaseHTTPRequestHandler.__init__(self, request,\n client_address, server)\n except:\n server.logger.exception('Exception initializing the BaseHTTPRequestHandler')\n\n def setup(self):\n \"\"\"\n Called before the handle() method to perform any initialization actions required.\n see https://docs.python.org/3/library/socketserver.html\n \"\"\"\n http.server.BaseHTTPRequestHandler.setup(self)\n\n # This allows pyTivo to die when user selects Stop Transfer on the TiVo\n # (If no request is received within timeout seconds, handle_timeout() will be called,\n # see https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.handle_request\n # also note that a \"Request timed out:\" info message will be logged.)\n self.request.settimeout(180)\n\n\n def address_port_string(self):\n host, port = self.client_address[:2]\n return \"{}:{}\".format(host, port)\n\n def version_string(self):\n \"\"\" Override version_string() so it doesn't include the Python\n version.\n\n \"\"\"\n return self.server_version\n\n def do_GET(self):\n tsn = self.headers.get('TiVo_TCD_ID',\n self.headers.get('tsn', ''))\n if not self.authorize(tsn):\n return\n\n if tsn and (not config.tivos_found or tsn in config.tivos):\n attr = config.tivos.get(tsn, {})\n updated_tivo = False\n if 'address' not in attr:\n attr['address'] = self.address_string()\n updated_tivo = True\n if 'name' not in attr:\n attr['name'] = self.server.beacon.get_name(attr['address'])\n updated_tivo = True\n config.tivos[tsn] = attr\n if updated_tivo:\n self.server.logger.info('TiVo identified from request: %s %s',\n attr['address'], attr['name'])\n\n if '?' in self.path:\n path, opts = self.path.split('?', 1)\n query = parse_qs(opts)\n else:\n path = self.path\n query = {}\n\n if path == '/TiVoConnect':\n self.handle_query(query, tsn)\n else:\n ## Get File\n splitpath = [x for x in unquote_plus(path).split('/') if x]\n if splitpath:\n self.handle_file(query, splitpath)\n else:\n ## Not a file not a TiVo command\n self.infopage()\n\n def do_POST(self):\n tsn = self.headers.get('TiVo_TCD_ID',\n self.headers.get('tsn', ''))\n if not self.authorize(tsn):\n return\n ctype, pdict = cgi.parse_header(self.headers.get('content-type'))\n if ctype == 'multipart/form-data':\n query = cgi.parse_multipart(self.rfile, pdict)\n # I'm not sure if this code works after the python3 conversion\n # there may be some string/bytes issues. Saving settings does not\n # come through here, I'm leaving this debugging line commented\n # out for the time being -mjl 2017-06-01\n #self.server.logger.info(\"POST query: {}\".format(query))\n else:\n length = int(self.headers.get('content-length'))\n qs = self.rfile.read(length).decode('utf-8')\n query = parse_qs(qs, keep_blank_values=1)\n self.handle_query(query, tsn)\n\n def do_command(self, query, command, target, tsn):\n for name, container in config.getShares(tsn):\n if target == name:\n plugin = GetPlugin(container['type'])\n if hasattr(plugin, command):\n self.cname = name\n self.container = container\n method = getattr(plugin, command)\n method(self, query)\n return True\n else:\n break\n return False\n\n def handle_query(self, query, tsn):\n if 'Command' in query and len(query['Command']) >= 1:\n\n command = query['Command'][0]\n\n # If we are looking at the root container\n if (command == 'QueryContainer' and\n (not 'Container' in query or query['Container'][0] == '/')):\n self.root_container()\n return\n\n if 'Container' in query:\n # Dispatch to the container plugin\n basepath = query['Container'][0].split('/')[0]\n if self.do_command(query, command, basepath, tsn):\n return\n\n elif command == 'QueryItem':\n path = query.get('Url', [''])[0]\n splitpath = [x for x in unquote_plus(path).split('/') if x]\n if splitpath and not '..' in splitpath:\n if self.do_command(query, command, splitpath[0], tsn):\n return\n\n elif (command == 'QueryFormats' and 'SourceFormat' in query and\n query['SourceFormat'][0].startswith('video')):\n if config.is_ts_capable(tsn):\n self.send_xml(VIDEO_FORMATS_TS)\n else:\n self.send_xml(VIDEO_FORMATS)\n return\n\n elif command == 'QueryServer':\n self.send_xml(SERVER_INFO)\n return\n\n elif command in ('GetActiveTransferCount', 'GetTransferStatus'):\n plugin = GetPlugin('video')\n if hasattr(plugin, command):\n method = getattr(plugin, command)\n method(self, query)\n return True\n\n elif command in ('FlushServer', 'ResetServer'):\n # Does nothing -- included for completeness\n self.send_response(200)\n self.send_header('Content-Length', '0')\n self.end_headers()\n self.wfile.flush()\n return\n\n # If we made it here it means we couldn't match the request to\n # anything.\n self.unsupported(query)\n\n def send_content_file(self, path):\n lmdate = os.path.getmtime(path)\n try:\n handle = open(path, 'rb')\n except:\n self.send_error(404)\n return\n\n # Send the header\n mime = mimetypes.guess_type(path)[0]\n self.send_response(200)\n if mime:\n self.send_header('Content-Type', mime)\n self.send_header('Content-Length', os.path.getsize(path))\n self.send_header('Last-Modified', formatdate(lmdate))\n self.end_headers()\n\n # Send the body of the file\n try:\n shutil.copyfileobj(handle, self.wfile)\n except:\n pass\n handle.close()\n self.wfile.flush()\n\n def handle_file(self, query, splitpath):\n if '..' not in splitpath: # Protect against path exploits\n ## Pass it off to a plugin?\n for name, container in self.server.containers.items():\n if splitpath[0] == name:\n self.cname = name\n self.container = container\n base = os.path.normpath(container['path'])\n path = os.path.join(base, *splitpath[1:])\n plugin = GetPlugin(container['type'])\n plugin.send_file(self, path, query)\n return\n\n ## Serve it from a \"content\" directory?\n base = os.path.join(SCRIPTDIR, *splitpath[:-1])\n path = os.path.join(base, 'content', splitpath[-1])\n\n if os.path.isfile(path):\n self.send_content_file(path)\n return\n\n ## Give up\n self.send_error(404)\n\n def authorize(self, tsn=None):\n # if allowed_clients is empty, we are completely open\n allowed_clients = config.getAllowedClients()\n if not allowed_clients or (tsn and config.isTsnInConfig(tsn)):\n return True\n client_ip = self.client_address[0]\n for allowedip in allowed_clients:\n if client_ip.startswith(allowedip):\n return True\n\n self.send_fixed('Unauthorized.', 'text/plain', 403)\n return False\n\n def log_message(self, format, *args):\n # pylint: disable=redefined-builtin\n\n # we really don't need to log the \"Request timed out:\" messages\n if isinstance(args[0], socket.timeout):\n return\n\n self.server.logger.debug(\"[%s] %s %s\", self.log_date_time_string(),\n self.address_string(), format%args)\n\n def send_fixed(self, page, mime, code=200, refresh=''):\n squeeze = (len(page) > 256 and mime.startswith('text') and\n 'gzip' in self.headers.get('Accept-Encoding', ''))\n if squeeze:\n out = BytesIO()\n gzip.GzipFile(mode='wb', fileobj=out).write(page)\n page = out.getvalue()\n out.close()\n self.send_response(code)\n self.send_header('Content-Type', mime)\n self.send_header('Content-Length', len(page))\n if squeeze:\n self.send_header('Content-Encoding', 'gzip')\n self.send_header('Expires', '0')\n if refresh:\n self.send_header('Refresh', refresh)\n #uncomment for angular development in browser\n #self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(page)\n self.wfile.flush()\n\n def send_xml(self, page):\n if not isinstance(page, bytes):\n page = bytes(page, 'utf-8')\n\n self.send_fixed(page, 'text/xml')\n\n def send_json(self, page):\n if not isinstance(page, bytes):\n page = bytes(page, 'utf-8')\n\n self.send_fixed(page, 'application/json; charset=utf-8')\n\n def send_html(self, page, code=200, refresh=''):\n if not isinstance(page, bytes):\n page = bytes(page, 'utf-8')\n\n self.send_fixed(page, 'text/html; charset=utf-8', code, refresh)\n\n def root_container(self):\n tsn = self.headers.get('TiVo_TCD_ID', '')\n tsnshares = config.getShares(tsn)\n tsncontainers = []\n for section, settings in tsnshares:\n try:\n mime = GetPlugin(settings['type']).CONTENT_TYPE\n if mime.split('/')[1] in ('tivo-videos', 'tivo-music',\n 'tivo-photos'):\n settings['content_type'] = mime\n tsncontainers.append((section, settings))\n except Exception as msg:\n self.server.logger.error('%s - %s', section, str(msg))\n t = Template(file=os.path.join(SCRIPTDIR, 'templates', 'root_container.tmpl'))\n if self.server.beacon.bd:\n t.renamed = self.server.beacon.bd.renamed\n else:\n t.renamed = {}\n t.containers = tsncontainers\n t.hostname = socket.gethostname()\n t.escape = escape\n t.quote = quote\n self.send_xml(str(t))\n\n def infopage(self):\n t = Template(file=os.path.join(SCRIPTDIR, 'templates', 'info_page.tmpl'))\n t.version = PYTIVO_VERSION\n t.admin = ''\n\n if config.get_server('tivo_mak') and config.get_togo('path'):\n t.togo = '
Pull from TiVos:
'\n else:\n t.togo = ''\n\n for section, settings in config.getShares():\n plugin_type = settings.get('type')\n if plugin_type == 'settings':\n t.admin += ('Settings
'\n .format(quote(section)))\n elif plugin_type == 'togo' and t.togo:\n for tsn in config.tivos:\n if tsn and 'address' in config.tivos[tsn]:\n t.togo += ('{}
'\n .format(quote(section), config.tivos[tsn]['address'], config.tivos[tsn]['name']))\n\n self.send_html(str(t))\n\n def unsupported(self, query):\n message = UNSUP % '\\n'.join(['
  • %s: %s
  • ' % (key, repr(value))\n for key, value in list(query.items())])\n text = BASE_HTML % message\n self.send_html(text, code=404)\n\n def redir(self, message, seconds=2):\n url = self.headers.get('Referer')\n if url:\n message += RELOAD % (url, seconds)\n refresh = '%d; url=%s' % (seconds, url)\n else:\n refresh = ''\n text = (BASE_HTML % message).encode('utf-8')\n self.send_html(text, refresh=refresh)\n","sub_path":"httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":15873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"83349794","text":"#This function assumes that the TPCT is current examination\n#This will be enforced because the Set Primary CT script will be\n#forced to run prior to any scripts in the import Role being executed\n\nfrom connect import *\nfrom collections import defaultdict\n\n\nclass register_images:\n def __init__(self):\n self.case = get_current(\"Case\")\n self.ui = get_current('ui')\n self.TPCT = get_current(\"Examination\")\n\n \n def perform_registration(self):\n ##Find unique frames of reference\n unique_setup_reference_points = defaultdict(list)\n for exam in self.case.Examinations:\n setup_uid = exam.EquipmentInfo.FrameOfReference\n if setup_uid != self.TPCT.EquipmentInfo.FrameOfReference:\n unique_setup_reference_points[setup_uid].append(exam.Name)\n\n if len(unique_setup_reference_points) == 0:\n show_warning('No scansets found for registration to TPCT. Image registration not performed.')\n #For each unique frame of reference, choose best scan in set \n #to use for registration to primary CT\n #'Best' scan is T1 followed by T2 for MR, CT for PetCT,\n #and just the first scan in the sequence for other/unknown scan type\n for group in unique_setup_reference_points.keys():\n\n registration_exists = self.check_for_existing_registration(self.TPCT.EquipmentInfo.FrameOfReference,self.case.Examinations[unique_setup_reference_points[group][0]].EquipmentInfo.FrameOfReference)\n if registration_exists == True:\n show_warning('Registration already exists for scans ' + ', '.join(unique_setup_reference_points[group]) + '.\\nNew registration not performed for these.\\nDelete existing registration before trying again.')\n elif registration_exists == False:\n \n #Ask user to select scan only if there is more than one scan to choose from in the group\n if len(unique_setup_reference_points[group]) > 1:\n registrationScan = self.user_registration_scan_choice_popup(group)\n else:\n registrationScan = unique_setup_reference_points[group][0]\n\n\n self.case.ComputeRigidImageRegistration(FloatingExaminationName=registrationScan, \n ReferenceExaminationName=self.TPCT.Name,\n UseOnlyTranslations=False,\n HighWeightOnBones=False,\n InitializeImages=True,\n FocusRoisNames=[],\n RegistrationName=None)\n self.ui.TitleBar.MenuItem['Patient modeling'].Button_Patient_modeling.Click()\n self.ui.TabControl_Modules.TabItem['Image registration'].Button_Image_registration.Click()\n else:\n show_warning('Registration already exists. New registration not performed.\\nDelete existing registration before trying again.')\n \n \n\n def user_registration_scan_choice_popup(self, group):\n #scan_dict is dictionary in which the keys are the string to display in the\n #dropdown menu that describe the scans, and the values are the exam objects\n #\n #display_list is the list of keys (ie scan descriptors) in scan_dict\n scan_dict = {}\n display_list =['']\n months = ['JAN','FEB','MAR',\"APR\",\"MAY\",'JUN','JUL','AUG','SEP', 'OCT',\n 'NOV','DEC']\n for exam in self.case.Examinations:\n if exam.EquipmentInfo.FrameOfReference == group:\n dcmdata = exam.GetAcquisitionDataFromDicom()\n date_time = exam.GetExaminationDateTime()\n date = str(date_time.Day) + months[date_time.Month] + str(date_time.Year)\n modality = exam.EquipmentInfo.Modality\n name = exam.Name\n description = dcmdata['SeriesModule']['SeriesDescription'] \n key = ' '.join((date, modality, name, description))\n scan_dict[key] = exam\n \n #create the display list\n for key in scan_dict.keys():\n if key not in display_list:\n display_list.append(key)\n\n message = modality + ' ' + date + ' Group: Please select which scan to use for registration for this scan group.'\n registration_scan_popup = popup(message, options = display_list)\n registrationScan = scan_dict[registration_scan_popup['option']].Name\n return registrationScan\n\n \n \n def check_for_existing_registration(self, TPCT_FoR, scan_FoR):\n registration_pairs = [[registration.FromFrameOfReference, registration.ToFrameOfReference] for registration in self.case.Registrations]\n if [scan_FoR, TPCT_FoR] in registration_pairs:\n return True\n else:\n return False\n\n\n\ndef do_task(**options):\n register_images().perform_registration()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"import/register_images.py","file_name":"register_images.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"517605005","text":"# Import required modules\nimport sys, time\nfrom PySide.QtGui import QApplication, QWidget, QIcon, QLabel\n\n# Our main window class\nclass SampleWindow(QWidget):\n # Constructor function\n def __init__(self):\n super(SampleWindow, self).__init__()\n self.initGUI()\n\n def setIconModes(self):\n myIcon1 = QIcon('cell.png')\n myLabel1 = QLabel('sample', self)\n pixmap1 = myIcon1.pixmap(50, 50, QIcon.Active, QIcon.On)\n myLabel1.setPixmap(pixmap1)\n myLabel1.show()\n\n myIcon2 = QIcon('cell.png')\n myLabel2 = QLabel('sample', self)\n pixmap2 = myIcon2.pixmap(50, 50, QIcon.Disabled, QIcon.Off)\n myLabel2.setPixmap(pixmap2)\n myLabel2.move(50, 0)\n myLabel2.show()\n\n myIcon3 = QIcon('cell.png')\n myLabel3 = QLabel('sample', self)\n pixmap3 = myIcon3.pixmap(50, 50, QIcon.Selected, QIcon.On)\n myLabel3.setPixmap(pixmap3)\n myLabel3.move(100, 0)\n myLabel3.show()\n\n myIcon4 = QIcon('cell.png')\n myLabel4 = QLabel('sample', self)\n pixmap4 = myIcon3.pixmap(50, 50, QIcon.Selected, QIcon.On)\n myLabel4.setPixmap(pixmap3)\n myLabel4.move(150, 0)\n myLabel4.show()\n\n def initGUI(self):\n self.setWindowTitle(\"Icon Sample\")\n self.setGeometry(300, 300, 200, 150)\n # Function to set Icon\n appIcon = QIcon('uart.png')\n self.setWindowIcon(appIcon)\n self.setIconModes()\n self.show()\n \n \n \nif __name__ == '__main__':\n # Exception Handling\n try:\n myApp = QApplication(sys.argv)\n myWindow = SampleWindow()\n myApp.exec_()\n sys.exit(0)\n except NameError:\n print(\"Name Error:\", sys.exc_info()[1])\n except SystemExit:\n print(\"Closing Window...\")\n except Exception:\n print(sys.exc_info()[1])\n\n\n\n\n\n","sub_path":"multi_icon.py","file_name":"multi_icon.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"569471025","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\nclass ErkyContract(models.Model):\n _name = 'erky.purchase.contract'\n _order = \"id desc, date desc\"\n _rec_name = 'contract_no'\n\n STATUS = [('draft', \"Purchase Contract\"),\n ('payment', \"Waiting Payment\"),\n ('done', \"Done\"),\n ('cancel', \"Canceled\")]\n\n def _get_default_currency_id(self):\n return self.env.user.company_id.currency_id.id\n\n def _get_default_tax_id_number(self):\n return self.env.user.company_id.vat\n\n def _get_default_export_partner(self):\n return self.env.user.company_id.partner_id\n\n name = fields.Char(string=\"Sequence\", default=\"New\", readonly=1)\n contract_no = fields.Char(stirng=\"Contract No\", required=1)\n date = fields.Date(\"Date\", default=fields.Date.context_today, required=1)\n importer_id = fields.Many2one(\"res.partner\", string=\"Importer Name\", required=1, domain=[('is_importer', '=', True)])\n phone_no = fields.Char(related=\"importer_id.phone\", store=True, string=\"Phone\")\n exporter_id = fields.Many2one(\"res.partner\", string=\"Exporter Name\", required=1, domain=[('is_exporter', '=', True)], default=_get_default_export_partner)\n importer_street = fields.Char(related=\"importer_id.street\", string='Street')\n importer_street2 = fields.Char(related=\"importer_id.street2\", string='Street2')\n importer_zip = fields.Char(related=\"importer_id.zip\", string='Zip', change_default=True)\n importer_city = fields.Char(related=\"importer_id.city\", string='City')\n importer_state_id = fields.Many2one(related=\"importer_id.state_id\", string='State')\n importer_country_id = fields.Many2one(related=\"importer_id.country_id\", string='Country')\n product_id = fields.Many2one(\"product.product\", \"Product\", required=1, domain=[('type', '=', 'product')])\n product_uom_id = fields.Many2one(\n 'uom.uom', 'Product Unit of Measure', related='product_id.uom_id',\n readonly=True, required=True)\n package_uom_id = fields.Many2one(\n 'uom.uom', 'Package UOM', required=True, domain=[('is_weight_packing', '=', True)])\n qty = fields.Integer(\"Qty\", default=1)\n allowed_percentage = fields.Char(default=\"(10% plus minus allowed)\")\n unit_price = fields.Float(\"Unit Price\", required=1)\n currency_id = fields.Many2one(\"res.currency\", string=\"Currency\", default=_get_default_currency_id, required=1)\n total_amount = fields.Float(\"Total Amount\", compute=\"_compute_amount_total\")\n total_amount_in_importer_curr = fields.Float(\"Total Amount In Importer Currency\", compute=\"_compute_amount_total\")\n importer_port_id = fields.Many2one(\"erky.port\", \"Discharge Port\", required=1, default=lambda self: self.env['erky.port'].search([('default_importer_port', '=', True)], limit=1), domain=[('default_importer_port', '=', True)])\n shipment_method = fields.Many2one(\"erky.shipment.method\", string=\"Shipment Method\")\n payment_method = fields.Many2one(\"erky.payment.method\", string=\"Payment Method\")\n advance_percentage = fields.Integer(string=\"Advance Percentage\")\n payment_percentage = fields.Integer(string=\"Payment Percentage\", default=15)\n payment_account_id = fields.Many2one(\"erky.payment.account\", string=\"Account Name\", required=1)\n account_no = fields.Char(related=\"payment_account_id.account_no\", store=True, readonly=1, string=\"Account No\")\n partner_id = fields.Many2one(related=\"payment_account_id.partner_id\", store=True, readonly=1, string=\"Company Name\")\n street = fields.Char(related=\"payment_account_id.street\", store=True, readonly=1, string='Street')\n street2 = fields.Char(related=\"payment_account_id.street2\", store=True, readonly=1, string='Street2')\n zip = fields.Char(related=\"payment_account_id.zip\", store=True, readonly=1, string='Zip', change_default=True)\n city = fields.Char(related=\"payment_account_id.city\", store=True, readonly=1, string='City')\n state_id = fields.Many2one(related=\"payment_account_id.state_id\", store=True, readonly=1, string='State')\n country_id = fields.Many2one(related=\"payment_account_id.country_id\", store=True, readonly=1, string='Country')\n account_bank_id = fields.Many2one(related=\"payment_account_id.bank_id\", store=True, readonly=1, string=\"Bank Name\")\n swift_code = fields.Char(related=\"payment_account_id.swift_code\", store=True, readonly=1, string=\"Swift Code\")\n iban = fields.Char(related=\"payment_account_id.iban\", store=True, readonly=1, string=\"IBAN\")\n account_currency_id = fields.Many2one(related=\"payment_account_id.currency_id\", store=True, readonly=1, string=\"Currency\")\n required_document_ids = fields.Many2many(\"erky.required.document\")\n contract_document_ids = fields.One2many(\"erky.contract.document\", 'purchase_contract_id')\n product_specification_ids = fields.One2many(\"contract.product.specification\", \"contract_id\")\n payment_condition = fields.Text(\"Payment Condition\", default=\"USD (price * qty * ___% = ___) In Advance Payment __% Cash Against Copy of Shipment Documents.\")\n shipment_condition = fields.Text(\"Shipment Condition\", default=\"One Month After Receiving Advance Payment.\")\n packing_condition = fields.Text(\"Packing Condition\", default=\"New pp bags of ___kgs each leaded in ___container.\")\n state = fields.Selection(STATUS, default=\"draft\", readonly=True)\n internal_contract_id = fields.Many2one(\"erky.contract\")\n export_form_ids = fields.One2many(\"erky.export.form\", 'purchase_contract_id')\n export_form_no = fields.Integer(compute=\"compute_export_forms_no\")\n more_attribute_ids = fields.One2many(\"erky.contract.attributes\", \"purchase_contract_id\")\n note = fields.Text(default=\"The undersigned seller and buyer have confirm this contract in accordance with the termsand conditions stipulated below\")\n\n _sql_constraints = [\n ('contract_no_uniq', 'unique(contract_no)', 'The contract no must be unique !'),\n ]\n\n @api.model\n def create(self, vals):\n if vals.get('name', _('New')) == _('New'):\n vals['name'] = self.env['ir.sequence'].next_by_code('erky.contract') or _('New')\n export_form_id = self._context.get('export_form_id', False)\n print (\"export for \", export_form_id, vals)\n res = super(ErkyContract, self).create(vals)\n if export_form_id:\n export_form_id = self.env['erky.export.form'].browse(export_form_id)\n export_form_id.purchase_contract_id = res.id\n return res\n\n\n @api.onchange('product_id')\n def get_default_product_price(self):\n for rec in self:\n rec.unit_price = rec.product_id.lst_price\n\n # @api.onchange(\"package_uom_id\")\n # def get_package_condition(self):\n # for rec in self:\n # rec.packing_condition = \"New pp \" + (rec.package_uom_id.name or \"____\") + \" of \" + str((rec.package_uom_id.net_weight_kgs) or \"___\") + (rec.package_uom_id.packing_uom_id.name or \"___\") + \" each leaded in ___container.\"\n\n @api.constrains('qty', 'unit_price')\n def check_qty_and_price(self):\n for rec in self:\n if rec.unit_price <= 0:\n raise ValidationError(\"Unit Price must be greater than zero\")\n if rec.qty <= 0:\n return ValidationError(\"Qty must be greater than zero\")\n\n @api.depends('qty', 'unit_price', 'currency_id')\n def _compute_amount_total(self):\n for rec in self:\n rec.total_amount = rec.qty * rec.unit_price\n\n @api.onchange('qty', 'unit_price', 'payment_percentage', 'advance_percentage')\n def _set_condition_data(self):\n for rec in self:\n total = rec.unit_price * rec.qty * (rec.payment_percentage/100)\n self.payment_condition = rec.currency_id.name + \" ( \" + str(rec.unit_price) + \" * \" + str(rec.qty) + \" * \" + str(rec.payment_percentage) + \"% = \" + str(total) + \" ) In Advance Payment \" + str(rec.advance_percentage) + \" % Cash Against Copy of Shipment Documents.\"\n\n @api.multi\n def action_create_mc_contract(self):\n ctx = self.env.context.copy()\n ctx.update({'default_purchase_contract_id': self.id,\n 'default_name': self.contract_no,\n 'default_importer_id': self.importer_id.id,\n 'default_exporter_id': self.exporter_id.id,\n 'default_product_id': self.product_id.id,\n 'default_qty': self.qty,\n 'default_payment_method': self.payment_method,\n 'default_importer_port_id': self.importer_port_id.id})\n return {\n 'res_model': 'erky.contract',\n 'type': 'ir.actions.act_window',\n 'context': ctx,\n 'view_mode': 'form',\n 'view_type': 'form',\n 'view_id': self.env.ref(\"erky_base.erky_contract_form_view\").id,\n 'target': 'current'\n }\n\n @api.multi\n def action_cancel(self):\n for rec in self:\n rec.state = \"cancel\"\n\n def action_open_export_forms(self):\n res = self.env['ir.actions.act_window'].for_xml_id('erky_base', 'erky_export_form_action')\n res['domain'] = [('id', '=', self.export_form_ids.ids)]\n return res\n\n @api.depends('export_form_ids')\n def compute_export_forms_no(self):\n for rec in self:\n rec.export_form_no = len(rec.export_form_ids.ids)\n\nclass ContractProductSpecification(models.Model):\n _name = \"contract.product.specification\"\n\n name = fields.Many2one(\"product.template.specification\", string=\"Attribute\", required=1)\n value = fields.Char(\"Value\")\n contract_id = fields.Many2one(\"erky.purchase.contract\")\n\n @api.onchange(\"name\")\n def attribute_domain(self):\n product_template_id = self.contract_id.product_id.product_tmpl_id\n self.value = self.name.default_value\n return {'domain': {'name': [('product_template_id', '=', product_template_id.id)]}}\n\nclass NewContractAttribures(models.Model):\n _name = \"erky.contract.attributes\"\n\n sequence = fields.Integer(\"Sequence\")\n purchase_contract_id = fields.Many2one(\"erky.purchase.contract\")\n name = fields.Char(\"Attribute\")\n value = fields.Text(\"Value\")\n\n\n\n\n\n\n\n\n\n","sub_path":"erky_base/models/erky_purchase_contract.py","file_name":"erky_purchase_contract.py","file_ext":"py","file_size_in_byte":10261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"217238493","text":"from flask import Flask, jsonify\nfrom flask_restful import reqparse, abort, Api, Resource\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef get(image):\n result = image\n return jsonify({'result': result})\n\n## Actually setup the Api resource routing here\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"563642139","text":"import requests\r\nimport re\r\n\r\n\r\nheaders = {\r\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\r\n 'referer':'https://www.gushiwen.org/default.aspx?page=1',\r\n}\r\npoems = []\r\n\r\n# 抓取数据\r\ndef parse_pages(url):\r\n response = requests.get(url,headers=headers)\r\n text = response.text\r\n poem = {}\r\n titles = re.findall(r'
    .*?(.*?)',text,re.DOTALL)\r\n dynasties = re.findall(r'

    .*?(.*?)',text,re.DOTALL)\r\n authors = re.findall(r'

    .*?(.*?)',text,re.DOTALL)\r\n contents_1 = re.findall(r'

    (.*?)
    ',text,re.DOTALL)\r\n contents = []\r\n for content in contents_1:\r\n x = re.sub(r'<.*?>',\"\",content)\r\n contents.append(x.strip())\r\n for value in zip(titles,dynasties,authors,contents):\r\n title, dynasty, author, content = value\r\n poem['title'] = title\r\n poem['dynasty'] = dynasty\r\n poem['author'] = author\r\n poem['content'] = content\r\n poems.append(poem)\r\n\r\n\r\ndef main():\r\n pages = int(input(\"输入页码\"))\r\n for page in range(1,pages+1):\r\n url = 'https://www.gushiwen.org/default_%s.aspx' % page\r\n parse_pages(url)\r\n for poem in poems:\r\n print(poem)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"gushiwen.py","file_name":"gushiwen.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"575055961","text":"import numpy as np\nimport random\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport os\nimport csv\nimport itertools\nimport tensorflow.contrib.slim as slim\nimport cv2\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport imutils\nimport time\n\n#from helper import *\n\nfrom Sim import Sim as sim\nfrom Sim import r_Sim as r_sim\n\nclass Qnetwork():\n\tdef __init__(self,h_size,rnn_cell,myScope):\n\t\t#The network recieves a frame from the game, flattened into an array.\n\t\t#It then resizes it and processes it through four convolutional layers.\n\t\tself.scalarInput = tf.placeholder(shape=[None,21168],dtype=tf.float32)\n\t\tself.imageIn = tf.reshape(self.scalarInput,shape=[-1,84,84,3])\n\t\tself.conv1 = slim.convolution2d( \\\n\t\t\tinputs=self.imageIn,num_outputs=32,\\\n\t\t\tkernel_size=[8,8],stride=[4,4],padding='VALID', \\\n\t\t\tbiases_initializer=None,scope=myScope+'_conv1')\n\t\tself.conv2 = slim.convolution2d( \\\n\t\t\tinputs=self.conv1,num_outputs=64,\\\n\t\t\tkernel_size=[4,4],stride=[2,2],padding='VALID', \\\n\t\t\tbiases_initializer=None,scope=myScope+'_conv2')\n\t\tself.conv3 = slim.convolution2d( \\\n\t\t\tinputs=self.conv2,num_outputs=64,\\\n\t\t\tkernel_size=[3,3],stride=[1,1],padding='VALID', \\\n\t\t\tbiases_initializer=None,scope=myScope+'_conv3')\n\t\tself.conv4 = slim.convolution2d( \\\n\t\t\tinputs=self.conv3,num_outputs=h_size,\\\n\t\t\tkernel_size=[7,7],stride=[1,1],padding='VALID', \\\n\t\t\tbiases_initializer=None,scope=myScope+'_conv4')\n\t\t\n\t\tself.trainLength = tf.placeholder(dtype=tf.int32)\n\t\t#We take the output from the final convolutional layer and send it to a recurrent layer.\n\t\t#The input must be reshaped into [batch x trace x units] for rnn processing, \n\t\t#and then returned to [batch x units] when sent through the upper levles.\n\t\tself.batch_size = tf.placeholder(dtype=tf.int32,shape=[])\n\t\tself.convFlat = tf.reshape(slim.flatten(self.conv4),[self.batch_size,self.trainLength,h_size])\n\t\tself.state_in = rnn_cell.zero_state(self.batch_size, tf.float32)\n\t\tself.rnn,self.rnn_state = tf.nn.dynamic_rnn(\\\n\t\t\t\tinputs=self.convFlat,cell=rnn_cell,dtype=tf.float32,initial_state=self.state_in,scope=myScope+'_rnn')\n\t\tself.rnn = tf.reshape(self.rnn,shape=[-1,h_size])\n\t\t#The output from the recurrent player is then split into separate Value and Advantage streams\n\t\tself.streamA,self.streamV = tf.split(self.rnn,2,1)\n\t\tself.AW = tf.Variable(tf.random_normal([h_size//2,5]))\n\t\tself.VW = tf.Variable(tf.random_normal([h_size//2,1]))\n\t\tself.Advantage = tf.matmul(self.streamA,self.AW)\n\t\tself.Value = tf.matmul(self.streamV,self.VW)\n\t\t\n\t\tself.salience = tf.gradients(self.Advantage,self.imageIn)\n\t\t#Then combine them together to get our final Q-values.\n\t\tself.Qout = self.Value + tf.subtract(self.Advantage,tf.reduce_mean(self.Advantage,axis=1,keep_dims=True))\n\t\tself.predict = tf.argmax(self.Qout,1)\n\t\t\n\t\t#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\n\t\tself.targetQ = tf.placeholder(shape=[None],dtype=tf.float32)\n\t\tself.actions = tf.placeholder(shape=[None],dtype=tf.int32)\n\t\tself.actions_onehot = tf.one_hot(self.actions,5,dtype=tf.float32)\n\t\t\n\t\tself.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), axis=1)\n\t\t\n\t\tself.td_error = tf.square(self.targetQ - self.Q)\n\t\t\n\t\t#In order to only propogate accurate gradients through the network, we will mask the first\n\t\t#half of the losses for each trace as per Lample & Chatlot 2016\n\t\tself.maskA = tf.zeros([self.batch_size,self.trainLength//2])\n\t\tself.maskB = tf.ones([self.batch_size,self.trainLength//2])\n\t\tself.mask = tf.concat([self.maskA,self.maskB],1)\n\t\tself.mask = tf.reshape(self.mask,[-1])\n\t\tself.loss = tf.reduce_mean(self.td_error * self.mask)\n\t\t\n\t\tself.trainer = tf.train.AdamOptimizer(learning_rate=0.0001)\n\t\tself.updateModel = self.trainer.minimize(self.loss)\n\nclass experience_buffer():\n\tdef __init__(self, buffer_size = 1000):\n\t\tself.buffer = []\n\t\tself.buffer_size = buffer_size\n\t\n\tdef add(self,experience):\n\t\tif len(self.buffer) + 1 >= self.buffer_size:\n\t\t\tself.buffer[0:(1+len(self.buffer))-self.buffer_size] = []\n\t\tself.buffer.append(experience)\n\t\t\t\n\tdef sample(self,batch_size,trace_length):\n\t\tsampled_episodes = random.sample(self.buffer,batch_size)\n\t\tsampledTraces = []\n\t\tfor episode in sampled_episodes:\n\t\t\tpoint = np.random.randint(0,len(episode)+1-trace_length)\n\t\t\tsampledTraces.append(episode[point:point+trace_length])\n\t\tsampledTraces = np.array(sampledTraces)\n\t\treturn np.reshape(sampledTraces,[batch_size*trace_length,5])\n\nclass Agent :\n\tdef __init__(self) :\n\t\t#Setting the training parameters\n\t\tself.batch_size = 4 #How many experience traces to use for each training step.\n\t\tself.trace_length = 8 #How long each experience trace will be when training\n\t\tself.update_freq = 5 #How often to perform a training step.\n\t\tself.y = .99 #Discount factor on the target Q-values\n\t\tself.startE = 1 #Starting chance of random action\n\t\tself.endE = 0.1 #Final chance of random action\n\t\tself.anneling_steps = 10000 #How many steps of training to reduce startE to endE.\n\t\tself.num_episodes = 10000 #How many episodes of game environment to train network with.\n\t\tself.pre_train_steps = 10000 #How many steps of random actions before training begins.\n\t\tself.load_model = True #Whether to load a saved model.\n\t\tself.path = \"./drqn\" #The path to save our model to.\n\t\tself.h_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\n\t\tself.max_epLength = 300 #The max allowed length of our episode.\n\t\tself.time_per_step = 1 #Length of each step used in gif creation\n\t\tself.summaryLength = 100 #Number of epidoes to periodically save for analysis\n\t\tself.tau = 0.001\n\n\t\t# for Tracking\n\t\tself.cap = None\n\t\tself.col = -1\n\t\tself.width = -1\n\t\tself.row = -1\n\t\tself.height = -1\n\t\tself.frame = None\n\t\tself.frame2 = None\n\t\tself.inputmode = False\n\t\tself.rectangle = False\n\t\tself.trackWindow = None\n\t\tself.roi_hist= None\n\t\tself.roi = None\n\t\tself.caffe_model_path = './MobileNetSSD_deploy.caffemodel'\n\t\tself.prorotxt_path = './MobileNetSSD_deploy.prototxt.txt'\n\t\tself.net = None\n\t\tself.obstacle_points = []\n\t\tself.target_point = None\n\t\tself.obstacle_box_color = (0, 0, 255)\n\t\tself.tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']\n\t\tself.tracker_type = self.tracker_types[1]\n\n\t\tif self.tracker_type == 'BOOSTING':\n\t\t\tself.tracker = cv2.TrackerBoosting_create()\n\t\tif self.tracker_type == 'MIL':\n\t\t\tself.tracker = cv2.TrackerMIL_create()\n\t\tif self.tracker_type == 'KCF':\n\t\t\tself.tracker = cv2.TrackerKCF_create()\n\t\tif self.tracker_type == 'TLD':\n\t\t\tself.tracker = cv2.TrackerTLD_create()\n\t\telse :\n\t\t\tself.tracker = cv2.TrackerMedianFlow_create()\n\n\t\tself.game = sim(200, True)\n\n\tdef processState(self, states) :\n\t\treturn np.reshape(states,[21168])\n\n\tdef updateTargetGraph(self, tfVars, tau) :\n\t\ttotal_vars = len(tfVars)\n\t\top_holder = []\n\t\tfor idx,var in enumerate(tfVars[0:total_vars//2]):\n\t\t\top_holder.append(tfVars[idx+total_vars//2].assign((var.value()*tau) + ((1-tau)*tfVars[idx+total_vars//2].value())))\n\t\treturn op_holder\n\n\tdef updateTarget(self, op_holder, sess) :\n\t\tfor op in op_holder:\n\t\t\tsess.run(op)\n\n\tdef Check_path(path) :\n\t\tif not os.path(path) :\n\t\t\tos.makedirs(path)\n\n\t# def saveToCenter(self, i, rList, bufferArray, summaryLength, h_size, sess, mainQN, time_per_step) :\n\t# with open('.Center/log.csv', 'a') as myfile :\n\t# state_display = (np.zeros([1, h_size]), np.zeros([1, h_size]))\n\t# imagesS = []\n\n\t# for idx, z in enumerate(np.vstack(bufferArray[:, 0])) :\n\n\t# img, state_display = sess.run([])\n\n\n\n\tdef Train(self) :\n\t\ttf.reset_default_graph()\n\t\t#We define the cells for the primary and target q-networks\n\t\tcell = tf.contrib.rnn.BasicLSTMCell(num_units=self.h_size,state_is_tuple=True)\n\t\tcellT = tf.contrib.rnn.BasicLSTMCell(num_units=self.h_size,state_is_tuple=True)\n\t\tmainQN = Qnetwork(self.h_size,cell,'main')\n\t\ttargetQN = Qnetwork(self.h_size,cellT,'target')\n\n\t\tinit = tf.global_variables_initializer()\n\n\t\tsaver = tf.train.Saver(max_to_keep=5)\n\n\t\ttrainables = tf.trainable_variables()\n\n\t\ttargetOps = self.updateTargetGraph(trainables,self.tau)\n\n\t\tmyBuffer = experience_buffer()\n\n\t\t#Set the rate of random action decrease. \n\t\te = self.startE\n\t\tstepDrop = (self.startE - self.endE)/self.anneling_steps\n\n\n\t\t#create lists to contain total rewards and steps per episode\n\t\tjList = []\n\t\trList = []\n\t\ttotal_steps = 0\n\n\t\t#Make a path for our model to be saved in.\n\t\tif not os.path.exists(self.path):\n\t\t\tos.makedirs(self.path)\n\n\t\t##Write the first line of the master log-file for the Control Center\n\t\twith open('./Center/log.csv', 'a') as myfile:\n\t\t\twr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n\t\t\twr.writerow(['Episode','Length','Reward','IMG','LOG','SAL']) \n\t\tself.net = cv2.dnn.readNetFromCaffe(self.prorotxt_path ,self.caffe_model_path)\n\t\tCLASSES = ['bottle', \"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\"sofa\", \"train\", \"tvmonitor\" ]\n\n\t\twith tf.Session() as sess:\n\t\t\tif self.load_model == True:\n\t\t\t\tprint ('Loading Model...')\n\t\t\t\tckpt = tf.train.get_checkpoint_state(self.path)\n\t\t\t\tsaver.restore(sess,ckpt.model_checkpoint_path)\n\t\t\tsess.run(init)\n \n\t\t\tself.updateTarget(targetOps, sess) #Set the target network to be equal to the primary network.\n\t\t\tfor i in range(self.num_episodes):\n\n\t\t\t\tself.col = -1\n\t\t\t\tself.width = -1\n\t\t\t\tself.row = -1\n\t\t\t\tself.height = -1\n\t\t\t\tself.frame = None\n\t\t\t\tself.frame2 = None\n\t\t\t\tself.inputmode = False\n\t\t\t\tself.rectangle = False\n\t\t\t\tself.trackWindow = None\n\t\t\t\tself.roi_hist = None\n\t\t\t\tself.roi = None\n\n\t\t\t\tself.cap = VideoStream('').start()\n\t\t\t\ttime.sleep(2.0)\n\t\t\t\tfps = FPS().start()\n\n\t\t\t\tcv2.namedWindow('frame')\n\t\t\t\tcv2.setMouseCallback('frame', self.onMouse, param = (self.frame, self.frame2))\n\n\t\t\t\tepisodeBuffer = []\n\t\t\t\t#Reset environment and get first new observation\n\t\t\t\tsP = self.game.Reset()\n\t\t\t\ts = self.processState(sP)\n\t\t\t\td = False\n\t\t\t\trAll = 0\n\t\t\t\tj = 0\n\t\t\t\tstate = (np.zeros([1,self.h_size]),np.zeros([1,self.h_size])) #Reset the recurrent layer's hidden state\n\t\t\t\t#The Q-Network\n\t\t\t\twhile j < self.max_epLength: \n\t\t\t\t\tj+=1\n\n\n\t\t\t\t\tis_game_start = False\n\t\t\t\t\tself.frame = self.cap.read()\n\t\t\t\t\t#print(self.frame)\n\n\t\t\t\t\tself.frame = imutils.resize(self.frame, width = 200, height = 200)\n\n\t\t\t\t\t(h, w) = self.frame.shape[:2]\n\t\t\t\t\tblob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)), 0.007843, (300, 300), 127.5)\n\n\t\t\t\t\tself.net.setInput(blob)\n\t\t\t\t\tdetections = self.net.forward()\n\n\t\t\t\t\tself.obstacle_points = []\n\t\t\t\t\tfor x in np.arange(0, detections.shape[2]) :\n\t\t\t\t\t\tconfidence = detections[0, 0, x, 2]\n\n\t\t\t\t\t\tif confidence > 0.2 : ### set for changing\n\t\t\t\t\t\t\tidx = int(detections[0, 0, x, 1])\n\t\t\t\t\t\t\tbox = detections[0, 0, x, 3:7] * np.array([w, h, w, h])\n\t\t\t\t\t\t\t(startX, startY, endX, endY) = box.astype('int')\n\n\t\t\t\t\t\t\tlabel = \"{}: {:.2f}%\".format('obstacle', confidence * 100)\n\t\t\t\t\t\t\tcv2.rectangle(self.frame, (startX, startY), (endX, endY), self.obstacle_box_color, 2)\n\t\t\t\t\t\t\tself.obstacle_points.append({'row' : startY, 'col' : startX, 'row_size' : endY - startY, 'col_size' : endX - startX})\n\n\t\t\t\t\tif self.trackWindow is not None :\n\t\t\t\t\t\tok, self.trackWindow = self.tracker.update(self.frame)\n\n\t\t\t\t\t\tif ok :\n\t\t\t\t\t\t\tx, y, w, h = self.trackWindow\n\t\t\t\t\t\t\tx, y, w, h = int(x), int(y), int(w), int(h)\n\t\t\t\t\t\t\tself.target_point = {'row' : int((2*y+h)/2), 'col' : int((2*x+w)/2)}\n\t\t\t\t\t\t\tcv2.rectangle(self.frame, (x, y), (x+w, y+w), (0, 255, 0), 3)\n\t\t\t\t\t\t\tis_game_start = True\n\t\t\t\t\t\telse : \n\t\t\t\t\t\t\tcv2.putText(self.frame, \"Tracking failure detected\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\t\t\t\t\t\t\tself.target_point = {'row' : -1, 'col' : -1}\n\n\t\t\t\t\tshow_frame = cv2.resize(self.frame, None, fx = 2, fy = 2)\n\n\t\t\t\t\tcv2.imshow('frame', show_frame)\n\n\t\t\t\t\tprint(self.target_point)\n\n\t\t\t\t\tkey = cv2.waitKey(60) & 0xFF\n\n\t\t\t\t\tif key == ord('i') :\n\t\t\t\t\t\tprint('select target')\n\t\t\t\t\t\tself.inputmode = True\n\t\t\t\t\t\tself.frame2 = self.frame.copy()\n\n\t\t\t\t\t\twhile self.inputmode :\n\t\t\t\t\t\t\tcv2.imshow('frame', self.frame)\n\t\t\t\t\t\t\tcv2.waitKey(0)\n\n\t\t\t\t\tfps.update() ### Idont know where it locatied\n\n\t\t\t\t\tif not is_game_start :\n\t\t\t\t\t\tepi -=1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse : \n\t\t\t\t\t\tself.game.Update(self.target_point, self.obstacle_points)\n\n\t\t\t\t\t#Choose an action by greedily (with e chance of random action) from the Q-network\n\t\t\t\t\tif np.random.rand(1) < e or total_steps < self.pre_train_steps:\n\t\t\t\t\t\tstate1 = sess.run(mainQN.rnn_state, feed_dict={mainQN.scalarInput:[s/255.0], mainQN.trainLength:1, mainQN.state_in:state, mainQN.batch_size:1})\n\t\t\t\t\t\ta = np.random.randint(0,5)\n\t\t\t\t\telse:\n\t\t\t\t\t\ta, state1 = sess.run([mainQN.predict,mainQN.rnn_state],\\\n\t\t\t\t\t\t\tfeed_dict={mainQN.scalarInput:[s/255.0],mainQN.trainLength:1,mainQN.state_in:state,mainQN.batch_size:1})\n\t\t\t\t\t\ta = a[0]\n\n\t\t\t\t\ts1P,r,d = self.game.Step(a)\n\t\t\t\t\ts1 = self.processState(s1P)\n\t\t\t\t\ttotal_steps += 1\n\t\t\t\t\tepisodeBuffer.append(np.reshape(np.array([s,a,r,s1,d]),[1,5]))\n\t\t\t\t\tif total_steps > self.pre_train_steps:\n\t\t\t\t\t\tif e > self.endE:\n\t\t\t\t\t\t\te -= stepDrop\n\n\t\t\t\t\t\tif total_steps % (self.update_freq) == 0:\n\t\t\t\t\t\t\tself.updateTarget(targetOps,sess)\n\t\t\t\t\t\t\t#Reset the recurrent layer's hidden state\n\t\t\t\t\t\t\tstate_train = (np.zeros([self.batch_size,self.h_size]),np.zeros([self.batch_size,self.h_size])) \n\t\t\t\t\t\n\t\t\t\t\t\t\ttrainBatch = myBuffer.sample(self.batch_size, self.trace_length) #Get a random batch of experiences.\n\t\t\t\t\t\t\t#Below we perform the Double-DQN update to the target Q-values\n\t\t\t\t\t\t\tQ1 = sess.run(mainQN.predict,feed_dict={mainQN.scalarInput:np.vstack(trainBatch[:,3]/255.0),mainQN.trainLength:self.trace_length,mainQN.state_in:state_train,mainQN.batch_size: self.batch_size})\n\t\t\t\t\t\t\tQ2 = sess.run(targetQN.Qout,feed_dict={targetQN.scalarInput:np.vstack(trainBatch[:,3]/255.0),targetQN.trainLength:self.trace_length,targetQN.state_in:state_train,targetQN.batch_size: self.batch_size})\n\t\t\t\t\t\t\tend_multiplier = -(trainBatch[:,4] - 1)\n\t\t\t\t\t\t\tdoubleQ = Q2[range(self.batch_size*self.trace_length),Q1]\n\t\t\t\t\t\t\ttargetQ = trainBatch[:,2] + (self.y*doubleQ * end_multiplier)\n\t\t\t\t\t\t\t#Update the network with our target values.\n\t\t\t\t\t\t\tsess.run(mainQN.updateModel, feed_dict={mainQN.scalarInput:np.vstack(trainBatch[:,0]/255.0),mainQN.targetQ:targetQ, mainQN.actions:trainBatch[:,1],mainQN.trainLength:self.trace_length, mainQN.state_in:state_train,mainQN.batch_size:self.batch_size})\n\t\t\t\t\trAll += r\n\t\t\t\t\ts = s1\n\t\t\t\t\tsP = s1P\n\t\t\t\t\tstate = state1\n\t\t\t\t\tif d == True:\n\n\t\t\t\t\t\tbreak\n\t\t\t\tprint(str(i) + '_th scorce : ' + str(rAll) + '/ episode : ' + str(j))\n\t\t\t\tself.game.Print_action_log()\n\t\t\t\t#Add the episode to the experience buffer\n\t\t\t\tbufferArray = np.array(episodeBuffer)\n\t\t\t\tepisodeBuffer = list(zip(bufferArray))\n\t\t\t\tmyBuffer.add(episodeBuffer)\n\t\t\t\tjList.append(j)\n\t\t\t\trList.append(rAll)\n\t\t\t\tf = open('graph.txt', 'a')\n\t\t\t\tf.write(str(rAll))\n\t\t\t\tf.write('\\n')\n\t\t\t\tf.close()\n\n\t\t\t\t#Periodically save the model. \n\t\t\t\tif i % 90 == 0 and i != 0:\n\t\t\t\t\tsaver.save(sess,self.path+'/model-'+str(i)+'.cptk')\n\t\t\t\t\tprint (\"Saved Model\")\n\t\t\t\tif len(rList) % self.summaryLength == 0 and len(rList) != 0:\n\t\t\t\t\tprint (total_steps, np.mean(rList[-self.summaryLength:]), e)\n\t\t\t\t\t#self.saveToCenter(i, rList, jList,np.reshape(np.array(episodeBuffer),[len(episodeBuffer),5]),self.summaryLength,self.h_size,sess, mainQN, self.time_per_step)\n\t\t\tsaver.save(sess,self.path+'/model-'+str(i)+'.cptk')\n\n\n\tdef onMouse(self, event, x, y, flags, param) :\n\t\tif self.inputmode : \n\t\t\tif event == cv2.EVENT_LBUTTONDOWN :\n\t\t\t\t#print('DOWN')\n\t\t\t\tself.rectangle = True\n\t\t\t\tself.col, self.row = x, y\n\n\t\t\telif event == cv2.EVENT_MOUSEMOVE :\n\t\t\t\t#print('MOVE')\n\t\t\t\tif self.rectangle :\n\t\t\t\t\t#print('Move - rec+true')\n\t\t\t\t\tself.frame = self.frame2.copy()\n\t\t\t\t\tcv2.rectangle(self.frame, (self.col, self.row), (x, y), (0, 255, 0), 2)\n\t\t\t\t\tcv2.imshow('frame', self.frame)\n\n\t\t\telif event == cv2.EVENT_LBUTTONUP :\n\t\t\t\t#print('UP')\n\t\t\t\tself.inputmode = False\n\t\t\t\tself.rectangle = False\n\t\t\t\tcv2.rectangle(self.frame, (self.col, self.row), (x, y), (0, 255, 0), 2)\n\t\t\t\tself.height, self.width = abs(self.row - y), abs(self.col - x)\n\t\t\t\tself.trackWindow = (self.col, self.row, self.width, self.height)\n\t\t\t\tself.roi = self.frame[self.row : self.row + self.height, self.col : self.col+self.width]\n\t\t\t\tok = self.tracker.init(self.frame, self.trackWindow)\n\t\t\t\t# roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\t\t\t\t# self.roi_hist = cv2.calcHist([roi], [0], None, [180], [0, 180])\n\t\t\t\t# cv2.normalize(self.roi_hist, self.roi_hist, 0, 255, cv2.NORM_MINMAX)\n\t\n\t\treturn\n\n\tdef Replay(self) :\n\t\te = 0.01 #The chance of chosing a random action\n\t\tnum_episodes = 10000 #How many episodes of game environment to train network with.\n\t\tload_model = True #Whether to load a saved model.\n\t\tpath = \"./drqn\" #The path to save/load our model to/from.\n\t\th_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\n\t\th_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\n\t\tmax_epLength = 50 #The max allowed length of our episode.\n\t\ttime_per_step = 1 #Length of each step used in gif creation\n\t\tsummaryLength = 100 #Number of epidoes to periodically save for analysis\n\n\t\ttf.reset_default_graph()\n\t\tcell = tf.contrib.rnn.BasicLSTMCell(num_units=h_size,state_is_tuple=True)\n\t\tcellT = tf.contrib.rnn.BasicLSTMCell(num_units=h_size,state_is_tuple=True)\n\t\tmainQN = Qnetwork(h_size,cell,'main')\n\t\ttargetQN = Qnetwork(h_size,cellT,'target')\n\n\t\tinit = tf.global_variables_initializer()\n\n\t\tsaver = tf.train.Saver(max_to_keep=2)\n\n\t\tgame = r_sim(200)\n\n\t\t#create lists to contain total rewards and steps per episode\n\t\tjList = []\n\t\trList = []\n\t\ttotal_steps = 0\n\n\t\t#Make a path for our model to be saved in.\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\n\t\t##Write the first line of the master log-file for the Control Center\n\t\twith open('./Center/log.csv', 'a') as myfile:\n\t\t\twr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n\t\t\twr.writerow(['Episode','Length','Reward','IMG','LOG','SAL']) \n\t\tprint('load_detector...')\n\t\tself.net = cv2.dnn.readNetFromCaffe(self.prorotxt_path ,self.caffe_model_path)\n\t\tCLASSES = ['bottle', \"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\"sofa\", \"train\", \"tvmonitor\" ]\n\t\t\t\n\t\twith tf.Session() as sess:\n\t\t\tprint ('Loading Model...')\n\t\t\tckpt = tf.train.get_checkpoint_state(path)\n\t\t\tsaver.restore(sess,ckpt.model_checkpoint_path)\n\t\t\n\t\t\tfor i in range(num_episodes):\n\t\t\t\t#Set Video\n\t\t\t\tself.col = -1\n\t\t\t\tself.width = -1\n\t\t\t\tself.row = -1\n\t\t\t\tself.height = -1\n\t\t\t\tself.frame = None\n\t\t\t\tself.frame2 = None\n\t\t\t\tself.inputmode = False\n\t\t\t\tself.rectangle = False\n\t\t\t\tself.trackWindow = None\n\t\t\t\tself.roi_hist = None\n\t\t\t\tself.roi = None\n\n\t\t\t\tself.cap = VideoStream('http://192.168.137.2:8080/?action=stream').start()\n\t\t\t\ttime.sleep(2.0)\n\t\t\t\tfps = FPS().start()\n\n\t\t\t\tcv2.namedWindow('frame')\n\t\t\t\tcv2.setMouseCallback('frame', self.onMouse, param = (self.frame, self.frame2))\n\n\t\t\t\tepisodeBuffer = []\n\t\t\t\t#Reset environment and get first new observation\n\t\t\t\tsP = game.Reset()\n\t\t\t\ts = self.processState(sP)\n\t\t\t\td = False\n\t\t\t\trAll = 0\n\t\t\t\tj = 0\n\t\t\t\tstate = (np.zeros([1,h_size]),np.zeros([1,h_size]))\n\t\t\t\t#The Q-Network\n\t\t\t\twhile True : #If the agent takes longer than 200 moves to reach either of the blocks, end the trial.\n\t\t\t\t\tj+=1\n\t\t\t\t\t#Choose an action by greedily (with e chance of random action) from the Q-network\n\t\t\t\t\tis_game_start = False\n\t\t\t\t\tself.frame = self.cap.read()\n\t\t\t\t\t#print(self.frame)\n\n\t\t\t\t\tself.frame = imutils.resize(self.frame, width = 200, height = 200)\n\n\t\t\t\t\t(h, w) = self.frame.shape[:2]\n\t\t\t\t\tblob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)), 0.007843, (300, 300), 127.5)\n\n\t\t\t\t\tself.net.setInput(blob)\n\t\t\t\t\tdetections = self.net.forward()\n\n\t\t\t\t\tself.obstacle_points = []\n\t\t\t\t\tfor x in np.arange(0, detections.shape[2]) :\n\t\t\t\t\t\tconfidence = detections[0, 0, x, 2]\n\n\t\t\t\t\t\tif confidence > 0.2 : ### set for changing\n\t\t\t\t\t\t\tidx = int(detections[0, 0, x, 1])\n\t\t\t\t\t\t\tbox = detections[0, 0, x, 3:7] * np.array([w, h, w, h])\n\t\t\t\t\t\t\t(startX, startY, endX, endY) = box.astype('int')\n\n\t\t\t\t\t\t\tlabel = \"{}: {:.2f}%\".format('obstacle', confidence * 100)\n\t\t\t\t\t\t\tcv2.rectangle(self.frame, (startX, startY), (endX, endY), self.obstacle_box_color, 2)\n\t\t\t\t\t\t\tself.obstacle_points.append({'row' : startY, 'col' : startX, 'row_size' : endY - startY, 'col_size' : endX - startX})\n\n\t\t\t\t\tif self.trackWindow is not None :\n\t\t\t\t\t\tok, self.trackWindow = self.tracker.update(self.frame)\n\n\t\t\t\t\t\tif ok :\n\t\t\t\t\t\t\tx, y, w, h = self.trackWindow\n\t\t\t\t\t\t\tx, y, w, h = int(x), int(y), int(w), int(h)\n\t\t\t\t\t\t\tself.target_point = {'row' : int((2*y+h)/2), 'col' : int((2*x+w)/2)}\n\t\t\t\t\t\t\tcv2.rectangle(self.frame, (x, y), (x+w, y+w), (0, 255, 0), 3)\n\n\t\t\t\t\t\t\tis_game_start = True\t\t\t\t\t\n\t\t\t\t\t\n\n\t\t\t\t\t\telse : \n\t\t\t\t\t\t\tcv2.putText(self.frame, \"Tracking failure detected\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\n\t\t\t\t\t\t\tself.target_point = {'row' : -1, 'col' : -1}\n\n\n\t\t\t\t\tif self.target_point is not None and self.target_point['row'] == -1 :\n\t\t\t\t\t\tis_game_start = True\n\n\t\t\t\t\tshow_frame = cv2.resize(self.frame, None, fx = 2, fy = 2)\n\n\t\t\t\t\tcv2.imshow('frame', show_frame)\n\n\t\t\t\t\tprint(self.target_point)\n\n\t\t\t\t\tkey = cv2.waitKey(60) & 0xFF\n\n\t\t\t\t\tif key == ord('i') :\n\t\t\t\t\t\tprint('select target')\n\t\t\t\t\t\tself.inputmode = True\n\t\t\t\t\t\tself.frame2 = self.frame.copy()\n\n\t\t\t\t\t\twhile self.inputmode :\n\t\t\t\t\t\t\tcv2.imshow('frame', self.frame)\n\t\t\t\t\t\t\tcv2.waitKey(0)\n\n\t\t\t\t\tfps.update() ### Idont know where it locatied\n\n\t\t\t\t\tprint(is_game_start)\n\n\t\t\t\t\tif not is_game_start :\n\t\t\t\t\t\tj -=1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse : \n\t\t\t\t\t\tgame.Update(self.target_point, self.obstacle_points)\n\t\t\t\t\t\n\t\t\t\t\ta, state1 = sess.run([mainQN.predict,mainQN.rnn_state],\\\n\t\t\t\t\t\tfeed_dict={mainQN.scalarInput:[s/255.0],mainQN.trainLength:1,\\\n\t\t\t\t\t\tmainQN.state_in:state,mainQN.batch_size:1})\n\n\t\t\t\t\t#a = game.getting_fake_action()\n\n\n\t\t\t\t\ta = a[0]\n\n\t\t\t\t\tprint('a : ' + str(a))\n\n\t\t\t\t\ts1P,r,d = game.Step(a)\n\t\t\t\t\ts1 = self.processState(s1P)\n\t\t\t\t\ttotal_steps += 1\n\t\t\t\t\tepisodeBuffer.append(np.reshape(np.array([s,a,r,s1,d]),[1,5])) #Save the experience to our episode buffer.\n\t\t\t\t\trAll += r\n\t\t\t\t\ts = s1\n\t\t\t\t\tsP = s1P\n\t\t\t\t\tstate = state1\n\t\t\t\t\tif d == True:\n\t\t\n\t\t\t\t\t\tbreak\n\t\t\n\t\t\t\tbufferArray = np.array(episodeBuffer)\n\t\t\t\tjList.append(j)\n\t\t\t\trList.append(rAll)\n\n\t\t\t\t#Periodically save the model. \n\t\t\t\tif len(rList) % summaryLength == 0 and len(rList) != 0:\n\t\t\t\t\tprint (total_steps,np.mean(rList[-summaryLength:]), e)\n\t\t\t\t\tsaveToCenter(i,rList,jList,np.reshape(np.array(episodeBuffer),[len(episodeBuffer),5]),\\\n\t\t\t\t\t\tsummaryLength,h_size,sess,mainQN,time_per_step)\n\t\tprint (\"Percent of succesful episodes: \" + str(sum(rList)/num_episodes) + \"%\")\n\t\t","sub_path":"Last file/For_replay/DQN_REPLY_R.py","file_name":"DQN_REPLY_R.py","file_ext":"py","file_size_in_byte":22755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"602851944","text":"# *****************************************************************************\n# Copyright (c) 2019, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\nimport numpy as np\nimport time\nimport pandas as pd\nimport sdc\n\n\n@sdc.jit\ndef accel_infer(n):\n\n t1 = time.time()\n X = np.random.ranf(n)\n Y = np.random.ranf(n)\n Z = np.random.ranf(n)\n\n df = pd.DataFrame({'X': X, 'Y': Y, 'Z': Z})\n\n g = 9.81\n df['accel'] = np.sqrt(df.X**2 + df.Y**2 + (df.Z - g)**2)\n threshold = df.accel.mean() + 5 * df.accel.std()\n df['is_brake'] = (df.rolling(10)['accel'].mean() > threshold)\n\n df.is_brake.fillna(False, inplace=True)\n checksum = df.is_brake.sum()\n t2 = time.time()\n print(\"exec time:\", t2 - t1)\n return checksum\n\n\nn = 10**8\naccel_infer(n)\n","sub_path":"examples/accel_example.py","file_name":"accel_example.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"192588588","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n# いろいろ書き直してもエラーに出くわす\n\nimport sys\nimport numpy as np\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Variable, Chain, ChainList, Link, optimizers\nfrom collections import defaultdict\n\nclass MLP(Chain):\n def __init__(self, d_Vocab, d_Hidden):\n super(MLP, self).__init__(\n L1 = L.Linear(d_Vocab, d_Hidden), # 語彙次元を隠れ層次元に変換\n L2 = L.Linear(d_Hidden, d_Hidden), # 隠れ層次元を隠れ層次元に変換\n L3 = L.Linear(d_Hidden, 1) # 隠れ層次元を出力層次元に変換\n )\n def __call__(self, data):\n# def forward(self,data):\n h1 = F.tanh(self.L1(data))\n h2 = F.tanh(self.L2(h1))\n h3 = self.L3(h2)\n return h3\n\n\ndef make_w2id(input_file):\n\n word2id = defaultdict(lambda: len(word2id))\n\n for line in input_file:\n label, sentence = line.strip().split(\"\\t\")\n words = sentence.split(\" \") \n for word in words:\n word2id[word]\n\n input_file.seek(0)\n return word2id\n\n\ndef make_feature(sentence, word2id):\n\n d_Vocab = len(word2id)\n feature = np.zeros(d_Vocab, dtype=np.int32)\n\n words = sentence.split(\" \")\n for word in words:\n position = word2id[word]\n feature[position] += 1\n\n return np.array([feature], dtype=np.float32)\n\ndef Neural_Net(input_file, epoch, d_Hidden):\n word2id = make_w2id(input_file)\n\n model = L.Classifier(MLP(len(word2id), d_Hidden))\n# model = MLP(len(word2id), d_Hidden)\n# print(model.L1.W.data)\n# print(model.L2.W.data)\n# print(model.L3.W.data)\n# for line in input_file:\n# sentence = line.strip().split(\"\\t\")[1]\n# feature = Variable(make_feature(sentence, word2id))\n# print(feature.data)\n# a = model.forward(feature)\n# print(a.data)\n\n\n optimizer = optimizers.SGD()\n optimizer.setup(model)\n\n for each in range(epoch):\n print(\"epoch {}\".format(each+1))\n for line in input_file:\n label, sentence = line.strip().split(\"\\t\")\n label = Variable(np.array([int(label)], dtype=np.int32))\n feature = Variable(make_feature(sentence, word2id))\n #print(type(label))\n #print(type(feature))\n #print(model)\n #exit()\n optimizer.update(model, feature, label)\n\n input_file.seek(0)\n\n return None\n\ndef main():\n train_file = open(sys.argv[1], \"r\")\n# test_file = open(sys.argv[2], \"r\")\n weight = Neural_Net(train_file, epoch=5, d_Hidden=3) # 引数はファイル、epoch数、隠れ層の次元\n train_file.close()\n# test_file.close()\n\nif __name__==\"__main__\":\n main()\n","sub_path":"NN_chainer_botsu.py","file_name":"NN_chainer_botsu.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"326968758","text":"#Python Script to Read the Inverse Rom Code\n#Find the length of the hex file\t\t\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\t\n#Enter the Firmware File\nfirm_file=raw_input('Enter the firmware file:')\nfirm_noce = file_len(firm_file) -1\t\t\t\t\t#Read the no of lines in Firmware File\nfirm_inp = open(firm_file)\t\t\t\t\t\t #Open Firmware File\nfirm_inp_lines = firm_inp.readlines()\t\t\t\t#Read the lines of Firmware File\n\n#Open Bootcode file\nboot_file=raw_input('Enter the Bootcode file:')\nboot_noce = file_len(boot_file) -1\t\t\t\t\t#Read the no of lines in Firmware File\nboot_inp = open(boot_file)\t\t\t\t\t\t#Open Firmware File\nboot_inp_lines = boot_inp.readlines()\t\t\t\t#Read the lines of Firmware File\n\ni=0\nbreak_count =0\nwhile(i < 30720):\n\tfirm_line = firm_inp_lines[i]\n\tboot_inp_lines[i] = firm_line\n\ti = i + 1\n\t\t\n#Creating Firmware + Bootcode File\nfirm_boot_out = open('firm_boot_code.hex','wb')\nfirm_boot_out.writelines(boot_inp_lines)\nfirm_boot_out.close()\nfirm_inp.close()\nboot_inp.close()\n\t","sub_path":"Flash_download/firm_boot_creation.py","file_name":"firm_boot_creation.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"419964835","text":"import json\nimport sys\n\n\ndef add_common_arguments(parser):\n parser.add_argument(\"--result-file\", dest=\"result_file\",\n help=\"File for storing json results of Universum run. Set it to \\\"${CODE_REPORT_FILE}\\\" \"\n \"for running from Universum, variable will be handled during run. If you run this \"\n \"script separately from Universum, just name the result file or leave it empty.\")\n\n\ndef analyzers_output(json_file: str, issues_loads) -> None:\n issues = json.dumps(issues_loads, indent=4)\n if json_file:\n open(json_file, \"w\").write(issues)\n else:\n sys.stdout.write(issues)\n","sub_path":"universum/analyzers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"545758597","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.pipeline import Pipeline\nimport numpy as np\n\nimport json\n\ndef train(model, tweet):\n return model.predict(tweet)\n\ndef get_model():\n training, target = (get_training())\n text_clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB())\n ])\n\n text_clf_svm = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42)),\n ])\n\n return text_clf_svm.fit(training, target)\n\ndef get_training():\n with open(\"bot/training.json\", \"r\") as training_json:\n training_data = training_json.read()\n training_json.close()\n\n training = []\n target = []\n training_data = json.loads(training_data)\n for status in training_data:\n training.append(str(f'{status[\"training\"][\"hasUrl\"]} ' +\n f'{status[\"training\"][\"user\"]} ' + \n f'{status[\"training\"][\"text\"]} ' + \n f'{status[\"training\"][\"isReply\"]}'))\n\n target.append(status[\"target\"])\n\n return training, target\n","sub_path":"bot/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"454004728","text":"\nfrom scanstotext.StaccaredProject import PdfInput\nfrom scanstotext.Tesseract import Tesseract\nfrom optparse import OptionParser\nimport string\nimport random\nimport json\nimport sys\n\n\ndef main():\n input_file = get_opts()\n file_id = get_random_string(8)\n\n texts = []\n\n pdf_texts = extract_pdf_texts(input_file)\n texts.extend(pdf_texts)\n\n ocr_tesseract_texts = extract_ocr_tesseract_texts(input_file)\n texts.extend(ocr_tesseract_texts)\n\n file = {\n \"filename\": input_file,\n \"file_id\": file_id,\n \"texts\": texts\n }\n\n result = [file]\n\n print(json.dumps(result, indent=2, ensure_ascii=False))\n\n\ndef extract_pdf_texts(input_file):\n with PdfInput(input_file) as loader:\n pdf_texts = []\n for page_no in range(1, loader.get_number_of_pages() + 1):\n pdf_text = loader.get_page_text(page_no)\n pdf_texts.append(new_text(page_no, \"PDF_TEXT\", pdf_text))\n return pdf_texts\n\n\ndef extract_ocr_tesseract_texts(input_file):\n texts = []\n with PdfInput(input_file) as loader:\n for page in range(1, loader.get_number_of_pages() + 1):\n with loader.get_page_png_file(page) as page_image_file:\n print(\"tesseract...\", file=sys.stderr)\n text = Tesseract().extract_text(page_image_file.name)\n texts.append(new_text(page, \"OCR_TESSERACT_TEXT\", text))\n return texts\n\n\ndef new_text(page, source, text):\n return {\n \"page\": page,\n \"source\": source,\n \"text\": text\n }\n\n\ndef get_opts():\n parser = OptionParser(usage=\"Usage: %prog \")\n (options, args) = parser.parse_args()\n\n if len(args) < 1:\n parser.error(\"Incorrect number of arguments\")\n\n input_file = args[0]\n\n return input_file\n\n\ndef get_random_string(length):\n letters = string.ascii_lowercase\n return ''.join(random.sample(letters, length))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"very_simple_pipeline.py","file_name":"very_simple_pipeline.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"468999234","text":"from mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn import manifold, datasets\r\nfrom pylab import *\r\nfrom sklearn.decomposition import PCA\r\n\"\"\"\r\nBased on scikits.learn example at\r\nhttp://scikit-learn.org/stable/auto_examples/manifold/plot_swissroll.html\r\n\"\"\"\r\n\r\n# load Swiss roll dataset\r\nX, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)\r\nprint(len(color))\r\n# print(X)\r\n# run Isomap on the points in X with 2 dim output\r\nn_neighbors = 1\r\nY_iso = manifold.Isomap(n_neighbors, 2).fit_transform(X)\r\n\r\n# run PCA on the points in X with 2 dim output\r\npca = PCA(n_components=2)\r\npca.fit(X)\r\npca_score = pca.explained_variance_ratio_\r\n# print(pca_score)\r\nV = pca.components_\r\nY_pca = pca.transform(X)\r\n\r\n# 3D plot\r\nfig = figure()\r\nax = fig.gca(projection='3d')\r\nax.scatter(X[:,0], X[:,1], X[:,2], c=color)\r\n\r\n# 2D projection\r\nfigure()\r\nscatter(Y_iso[:,0], Y_iso[:,1], c=color)\r\n\r\nfigure()\r\nscatter(Y_pca[:,0], Y_pca[:,1], c=color)\r\n\r\nshow()\r\n","sub_path":"11/swiss_roll.py","file_name":"swiss_roll.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"330388056","text":"from django.shortcuts import render,HttpResponse\nfrom django.template import Context,Template\nfrom firstapp.models import People,Article,Comment\nfrom django import forms\n\n\n# Create your views here.\n\nclass CommentForm(forms.Form):\n name = forms.CharField(max_length=50)\n comment = forms.CharField()\ndef index(request):\n print (request)\n print('==='*20)\n print (dir(request))\n queruset = request.GET.get('tag')\n print (queruset)\n if queruset:\n article_list = Article.objects.filter(tag=queruset)\n else:\n article_list = {}\n context = {}\n context['article_list'] = article_list\n index_page = render(request,'first_web_2.html', context)\n #assert False\n return index_page\n\ndef detail(request):\n form = CommentForm\n context = {}\n comment_list = Comment.objects.all()\n context['comment_list'] = comment_list\n context['form'] = form\n return render(request,'detail.html',context)\n","sub_path":"leve2/firstsite/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"408435188","text":"from time import time\nimport contextlib\n\n\n# Context manager that shows how long a context was active\n@contextlib.contextmanager\ndef timer(name):\n s_t = time()\n yield\n e_t = time()\n print(f\"{name} took {e_t-s_t:.2f}\")\n\n\n# The write to log function writes all stdout (regular print data) to a file.\n# The contextlib.redirect_stdout context wrapper temporarily redirects standard\n# output to a given file handle, in this case the file we just opened for writing.\n\n@contextlib.contextmanager\ndef write_to_log(name):\n with open('%s.txt' % name, 'w') as fh:\n with contextlib.redirect_stdout(fh):\n with timer(name):\n yield # this is like a pause :)\n\n\n# Use the context manager as a decorator\n@write_to_log('some_function')\ndef some_function():\n print('This function takes a bit of time to execute')\n print('Do more...')\n\n\nsome_function()\n","sub_path":"Codes/Chapter_06_generator_corutines/generators/11_generator_context_manager.py","file_name":"11_generator_context_manager.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"639420808","text":"\"\"\"\n\nWrite a function that selects all words that have all the same vowels (in any order and/or number) as the first word, including the first word.\n\nExamples\nsame_vowel_group([\"toe\", \"ocelot\", \"maniac\"]) ➞ [\"toe\", \"ocelot\"]\n\nsame_vowel_group([\"many\", \"carriage\", \"emit\", \"apricot\", \"animal\"]) ➞ [\"many\"]\n\nsame_vowel_group([\"hoops\", \"chuff\", \"bot\", \"bottom\"]) ➞ [\"hoops\", \"bot\", \"bottom\"]\nNotes\nWords will contain only lowercase letters, and may contain whitespaces.\nFrequency does not matter (e.g. if the first word is \"loopy\", then you can include words with any number of o's, so long as they only contain o's, \nand not any other vowels).\n\n\"\"\"\n\ndef same_vowel_group(w):\n res = [w[0]]\n vowels = list(set(list(w[0])) & {'a', 'e', 'i', 'o', 'u', 'y'})\n for word in w[1:]:\n if list(set(list(word)) & {'a', 'e', 'i', 'o', 'u', 'y'}) == vowels:\n res.append(word)\n return res\n","sub_path":"Edabit/VowelFamilies.py","file_name":"VowelFamilies.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"234735344","text":"import argparse\n\nimport tensorflow.keras as tfk\nfrom tqdm import trange\n\nfrom midifile.processing import *\nfrom model import create_lstm_rnn_model\nfrom train import load_shubham_training_data, notes_to_matrix\nfrom utils import *\n\nROOT_DIR = root_dir(2, __file__)\nDATA_DIR = path.join(ROOT_DIR, 'data')\nLMD_DIR = path.join(DATA_DIR, 'lmd')\nMAESTRO_DIR = path.join(DATA_DIR, 'maestro')\nDATASET_DIR = MAESTRO_DIR\n\nWEIGHTS_DIR = path.join(ROOT_DIR, 'weights', 'rnn')\nOUTPUTS_DIR = path.join(ROOT_DIR, 'outputs', 'rnn')\n\n\ndef main(training_id: str, gen_seq_len: int, name: str):\n config_filename = path.join(WEIGHTS_DIR, f'{training_id}.config.json')\n weights_filename = path.join(WEIGHTS_DIR, f'{training_id}.weights.hdf5')\n output_filename = path.join(OUTPUTS_DIR, f'{training_id}.{name}.mid')\n assert_all_exist([config_filename, weights_filename])\n\n config = load_dict(config_filename)\n\n print('Loading data.')\n song_filenames = get_maestro_midi_list_by_composer(DATASET_DIR)[config['artist']]\n notes = load_shubham_training_data(song_filenames, config['seq_len'])\n\n song = generate_song(notes, weights_filename, output_filename, config['seq_len'], gen_seq_len)\n song.plot()\n\n\ndef generate_song(data: [str], weights_filename: str, output_filename: str, seq_len: int,\n gen_seq_len: int) -> stream.Stream:\n note_names = sorted(set(data))\n vocab_size = len(note_names)\n\n inputs, _ = notes_to_matrix(data, seq_len)\n\n print('Loading model.')\n model = create_lstm_rnn_model(inputs.shape, vocab_size)\n print(model.summary())\n model.load_weights(weights_filename)\n\n generated = generate_notes(model, inputs, note_names, gen_seq_len)\n song = notes_to_midi(generated)\n song.write('midi', fp=output_filename)\n return song\n\n\ndef generate_notes(model: tfk.Model, inputs: np.ndarray, note_names: [str], gen_seq_len: int):\n vocab_size = len(note_names)\n\n start_note_idx = np.random.randint(0, len(inputs) - 1)\n int_to_note = {i: n for i, n in enumerate(note_names)}\n\n note_enc = list(inputs[start_note_idx])\n generated = []\n\n print('Generating notes.')\n for _ in trange(gen_seq_len):\n model_input = np.reshape(note_enc, (1, -1, 1)).astype('float32')\n\n gen_enc = model.predict(model_input)\n gen_note_idx = np.argmax(gen_enc)\n gen_note = int_to_note[gen_note_idx]\n generated.append(gen_note)\n\n note_enc.append(gen_note_idx / float(vocab_size))\n note_enc = note_enc[1:len(note_enc)]\n\n print('Done')\n return generated\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Song generation.')\n parser.add_argument('--training_id', type=str, nargs='?')\n parser.add_argument('--seq_len', type=int, nargs='?', default=150)\n parser.add_argument('--name', type=str, nargs='?', default='sample')\n args = parser.parse_args()\n main(args.training_id or input('Training ID? '), args.seq_len, args.name)\n","sub_path":"src/rnn/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"645431095","text":"import json\nimport timeit\n\nimport requests\nfrom elasticsearch import Elasticsearch\n\nfrom config import root_path\n\n\ndef set_up():\n es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\n check_status = requests.get('http://localhost:9200')\n if check_status.status_code != 200:\n raise ('Cannot connect to ElasticSearch server')\n return es\n\n\ndef test_cran():\n es = set_up()\n es_version = es.info()['version']['number']\n\n file_name = es_version + '_cran.txt'\n\n with (root_path() / 'test' / 'out' / file_name).open(mode='w') as outf:\n outf.write('query_index result_index score')\n # outf.write('elastic search version %s \\n' % es_version)\n es.indices.delete(index='cran', ignore=[404])\n with (root_path() / 'data' / 'cran' / 'cran_documents.json').open() as f:\n docs = json.load(f)\n start_indexing = timeit.default_timer()\n for d in docs:\n es.index(index='cran', doc_type='document', id=d['I'], body=d)\n # outf.write('indexing took %.4f\\n' %(timeit.default_timer() - start_indexing))\n\n with (root_path() / 'data' / 'cran' / 'cran_queries.json').open() as f:\n queries = json.load(f)\n\n for q in queries:\n q_index = int(q['I'])\n elastic_query = {\"query\": {\"match\": {\"W\": q['W']}}}\n res = es.search(index='cran', body=elastic_query)\n for hit in res['hits']['hits']:\n outf.write('%d %s %s\\n' %(q_index, hit['_id'], hit['_score']))\n\n\ntest_cran()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"280581239","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This package contains a scaffold of a handler.\"\"\"\n\nimport logging\nimport pprint\nfrom typing import List, Optional, cast, TYPE_CHECKING\n\nfrom aea.configurations.base import ProtocolId\nfrom aea.mail.base import Envelope\nfrom aea.skills.base import Handler\nfrom aea.protocols.base import Message\nfrom aea.protocols.default.message import DefaultMessage\nfrom aea.protocols.default.serialization import DefaultSerializer\nfrom aea.protocols.fipa.message import FIPAMessage\nfrom aea.protocols.fipa.serialization import FIPASerializer\nfrom aea.protocols.oef.models import Query, Description\nfrom aea.decision_maker.messages.transaction import TransactionMessage\n\nif TYPE_CHECKING:\n from packages.skills.fipa_negotiation.dialogues import Dialogue, Dialogues\n from packages.skills.fipa_negotiation.helpers import generate_transaction_id\n from packages.skills.fipa_negotiation.strategy import Strategy\n from packages.skills.fipa_negotiation.transactions import Transactions\nelse:\n from fipa_negotiation_skill.dialogues import Dialogue, Dialogues\n from fipa_negotiation_skill.helpers import generate_transaction_id\n from fipa_negotiation_skill.strategy import Strategy\n from fipa_negotiation_skill.transactions import Transactions\n\nlogger = logging.getLogger(\"aea.fipa_negotiation_skill\")\n\n\nclass FIPANegotiationHandler(Handler):\n \"\"\"This class implements the fipa negotiation handler.\"\"\"\n\n SUPPORTED_PROTOCOL = FIPAMessage.protocol_id # type: Optional[ProtocolId]\n\n def setup(self) -> None:\n \"\"\"\n Implement the setup.\n\n :return: None\n \"\"\"\n pass\n\n def handle(self, message: Message, sender: str) -> None:\n \"\"\"\n Dispatch message to relevant handler and respond.\n\n :param message: the message\n :param sender: the sender\n :return: None\n \"\"\"\n fipa_msg = cast(FIPAMessage, message)\n fipa_msg_performative = fipa_msg.get(\"performative\") # FIPAMessage.Performative(fipa_msg.get(\"performative\"))\n\n logger.debug(\"[{}]: Identifying dialogue of FIPAMessage={}\".format(self.context.agent_name, fipa_msg))\n dialogues = cast(Dialogues, self.context.dialogues)\n if dialogues.is_belonging_to_registered_dialogue(fipa_msg, sender, self.context.agent_public_key):\n dialogue = dialogues.get_dialogue(fipa_msg, sender, self.context.agent_public_key)\n dialogue.incoming_extend(fipa_msg)\n elif dialogues.is_permitted_for_new_dialogue(fipa_msg, sender):\n dialogue = dialogues.create_opponent_initiated(fipa_msg, sender)\n dialogue.incoming_extend(fipa_msg)\n else:\n logger.debug(\"[{}]: Unidentified dialogue.\".format(self.context.agent_name))\n default_msg = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b'This message belongs to an unidentified dialogue.')\n msg_bytes = DefaultSerializer().encode(default_msg)\n self.context.outbox.put_message(to=sender, sender=self.context.agent_public_key, protocol_id=DefaultMessage.protocol_id, message=msg_bytes)\n return\n\n logger.debug(\"[{}]: Handling FIPAMessage of performative={}\".format(self.context.agent_name, fipa_msg_performative))\n fipa_msg = cast(FIPAMessage, fipa_msg)\n if fipa_msg_performative == FIPAMessage.Performative.CFP:\n self._on_cfp(fipa_msg, dialogue)\n elif fipa_msg_performative == FIPAMessage.Performative.PROPOSE:\n self._on_propose(fipa_msg, dialogue)\n elif fipa_msg_performative == FIPAMessage.Performative.DECLINE:\n self._on_decline(fipa_msg, dialogue)\n elif fipa_msg_performative == FIPAMessage.Performative.ACCEPT:\n self._on_accept(fipa_msg, dialogue)\n elif fipa_msg_performative == FIPAMessage.Performative.MATCH_ACCEPT:\n self._on_match_accept(fipa_msg, dialogue)\n\n def teardown(self) -> None:\n \"\"\"\n Implement the handler teardown.\n\n :return: None\n \"\"\"\n dialogues = cast(Dialogues, self.context.dialogues)\n dialogues.reset()\n\n def _on_cfp(self, cfp: FIPAMessage, dialogue: Dialogue) -> None:\n \"\"\"\n Handle a CFP.\n\n :param cfp: the fipa message containing the CFP\n :param dialogue: the dialogue\n\n :return: None\n \"\"\"\n strategy = cast(Strategy, self.context.strategy)\n transactions = cast(Transactions, self.context.transactions)\n ownership_state_after_locks = transactions.ownership_state_after_locks(self.context.ownership_state, is_seller=dialogue.is_seller)\n own_service_description = strategy.get_own_service_description(ownership_state_after_locks, is_supply=dialogue.is_seller)\n new_msg_id = cast(int, cfp.get(\"message_id\")) + 1\n cfp_query = cfp.get(\"query\")\n cfp_query = cast(Query, cfp_query)\n decline = False\n if not cfp_query.check(own_service_description):\n decline = True\n logger.debug(\"[{}]: Current holdings do not satisfy CFP query.\".format(self.context.agent_name))\n else:\n proposal_description = strategy.get_proposal_for_query(cfp_query, self.context.preferences, ownership_state_after_locks, is_seller=dialogue.is_seller, tx_fee=1.0)\n if proposal_description is None:\n decline = True\n logger.debug(\"[{}]: Current strategy does not generate proposal that satisfies CFP query.\".format(self.context.agent_name))\n\n if decline:\n logger.debug(\"[{}]: sending to {} a Decline{}\".format(self.context.agent_name, dialogue.dialogue_label.dialogue_opponent_pbk,\n pprint.pformat({\n \"msg_id\": new_msg_id,\n \"dialogue_id\": cfp.get(\"dialogue_id\"),\n \"origin\": dialogue.dialogue_label.dialogue_opponent_pbk,\n \"target\": cfp.get(\"target\")\n })))\n msg = FIPAMessage(message_id=new_msg_id, dialogue_id=cfp.get(\"dialogue_id\"), performative=FIPAMessage.Performative.DECLINE, target=cfp.get(\"message_id\"))\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n dialogues = cast(Dialogues, self.context.dialogues)\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_CFP, dialogue.is_self_initiated)\n else:\n assert proposal_description is not None\n transaction_id = generate_transaction_id(self.context.agent_public_key, dialogue.dialogue_label.dialogue_opponent_pbk, dialogue.dialogue_label, dialogue.is_seller)\n transaction_msg = TransactionMessage(transaction_id=transaction_id,\n sender=self.context.agent_public_key,\n counterparty=dialogue.dialogue_label.dialogue_opponent_pbk,\n currency='FET',\n amount=proposal_description.values['amount'],\n is_sender_buyer=not dialogue.is_seller,\n sender_tx_fee=1,\n counterparty_tx_fee=1,\n quantities_by_good_pbk=proposal_description.values['description'])\n transactions = cast(Transactions, self.context.transactions)\n transactions.add_pending_proposal(dialogue.dialogue_label, new_msg_id, transaction_msg)\n logger.debug(\"[{}]: sending to {} a Propose{}\".format(self.context.agent_name, dialogue.dialogue_label.dialogue_opponent_pbk,\n pprint.pformat({\n \"msg_id\": new_msg_id,\n \"dialogue_id\": cfp.get(\"dialogue_id\"),\n \"origin\": dialogue.dialogue_label.dialogue_opponent_pbk,\n \"target\": cfp.get(\"message_id\"),\n \"propose\": proposal_description.values\n })))\n msg = FIPAMessage(performative=FIPAMessage.Performative.PROPOSE, message_id=new_msg_id, dialogue_id=cfp.get(\"dialogue_id\"), target=cfp.get(\"message_id\"), proposal=[proposal_description])\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n self.context.outbox.put(result)\n\n def _on_propose(self, propose: FIPAMessage, dialogue: Dialogue) -> None:\n \"\"\"\n Handle a Propose.\n\n :param propose: the message containing the Propose\n :param dialogue: the dialogue\n :return: None\n \"\"\"\n logger.debug(\"[{}]: on propose as {}.\".format(self.context.agent_name, dialogue.role))\n proposals = cast(List[Description], propose.get(\"proposal\"))\n for num, proposal_description in enumerate(proposals):\n if num > 0: continue # TODO: allow for dialogue branching with multiple proposals\n transaction_id = generate_transaction_id(self.context.agent_public_key, dialogue.dialogue_label.dialogue_opponent_pbk, dialogue.dialogue_label, dialogue.is_seller)\n transaction_msg = TransactionMessage(transaction_id=transaction_id,\n sender=self.context.agent_public_key,\n counterparty=dialogue.dialogue_label.dialogue_opponent_pbk,\n currency='FET',\n amount=proposal_description.values['amount'],\n is_sender_buyer=not dialogue.is_seller,\n sender_tx_fee=1,\n counterparty_tx_fee=1,\n quantities_by_good_pbk=proposal_description.values['description'])\n new_msg_id = cast(int, propose.get(\"message_id\")) + 1\n strategy = cast(Strategy, self.context.strategy)\n transactions = cast(Transactions, self.context.transactions)\n ownership_state_after_locks = transactions.ownership_state_after_locks(self.context.ownership_state, is_seller=dialogue.is_seller)\n if strategy.is_profitable_transaction(self.context.preferences, ownership_state_after_locks, transaction_msg):\n logger.debug(\"[{}]: Accepting propose (as {}).\".format(self.context.agent_name, dialogue.role))\n transactions.add_locked_tx(transaction_msg, as_seller=dialogue.is_seller)\n transactions.add_pending_initial_acceptance(dialogue.dialogue_label, new_msg_id, transaction_msg)\n msg = FIPAMessage(message_id=new_msg_id, dialogue_id=propose.get(\"dialogue_id\"), target=propose.get(\"message_id\"), performative=FIPAMessage.Performative.ACCEPT)\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n else:\n logger.debug(\"[{}]: Declining propose (as {})\".format(self.context.agent_name, dialogue.role))\n msg = FIPAMessage(message_id=new_msg_id, dialogue_id=propose.get(\"dialogue_id\"), target=propose.get(\"message_id\"), performative=FIPAMessage.Performative.DECLINE)\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n dialogues = cast(Dialogues, self.context.dialogues)\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_PROPOSE, dialogue.is_self_initiated)\n self.context.outbox.put(result)\n\n def _on_decline(self, decline: FIPAMessage, dialogue: Dialogue) -> None:\n \"\"\"\n Handle a Decline.\n\n :param decline: the Decline message\n :param dialogue: the dialogue\n :return: None\n \"\"\"\n logger.debug(\"[{}]: on_decline: msg_id={}, dialogue_id={}, origin={}, target={}\"\n .format(self.context.agent_name, decline.get(\"message_id\"), decline.get(\"dialogue_id\"), dialogue.dialogue_label.dialogue_opponent_pbk, decline.get(\"target\")))\n target = decline.get(\"target\")\n dialogues = cast(Dialogues, self.context.dialogues)\n if target == 1:\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_CFP, dialogue.is_self_initiated)\n elif target == 2:\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_PROPOSE, dialogue.is_self_initiated)\n transactions = cast(Transactions, self.context.transactions)\n transaction_msg = transactions.pop_pending_proposal(dialogue.dialogue_label, target)\n elif target == 3:\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_ACCEPT, dialogue.is_self_initiated)\n transactions = cast(Transactions, self.context.transactions)\n transaction_msg = transactions.pop_pending_initial_acceptance(dialogue.dialogue_label, target)\n transactions.pop_locked_tx(transaction_msg)\n\n def _on_accept(self, accept: FIPAMessage, dialogue: Dialogue) -> None:\n \"\"\"\n Handle an Accept.\n\n :param accept: the Accept message\n :param dialogue: the dialogue\n :return: None\n \"\"\"\n transactions = cast(Transactions, self.context.transactions)\n assert dialogue.dialogue_label in transactions.pending_proposals \\\n and accept.get(\"target\") in transactions.pending_proposals[dialogue.dialogue_label]\n logger.debug(\"[{}]: on_accept: msg_id={}, dialogue_id={}, origin={}, target={}\"\n .format(self.context.agent_name, accept.get(\"message_id\"), accept.get(\"dialogue_id\"), dialogue.dialogue_label.dialogue_opponent_pbk, accept.get(\"target\")))\n new_msg_id = cast(int, accept.get(\"message_id\")) + 1\n transaction_msg = transactions.pop_pending_proposal(dialogue.dialogue_label, cast(int, accept.get(\"target\")))\n strategy = cast(Strategy, self.context.strategy)\n ownership_state_after_locks = transactions.ownership_state_after_locks(self.context.ownership_state, is_seller=dialogue.is_seller)\n if strategy.is_profitable_transaction(self.context.preferences, ownership_state_after_locks, transaction_msg):\n logger.debug(\"[{}]: Locking the current state (as {}).\".format(self.context.agent_name, dialogue.role))\n transactions.add_locked_tx(transaction_msg, as_seller=dialogue.is_seller)\n self.context.decision_maker_message_queue.put(transaction_msg)\n msg = FIPAMessage(message_id=new_msg_id, dialogue_id=accept.get(\"dialogue_id\"), target=accept.get(\"message_id\"), performative=FIPAMessage.Performative.MATCH_ACCEPT)\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n else:\n logger.debug(\"[{}]: Decline the accept (as {}).\".format(self.context.agent_name, dialogue.role))\n msg = FIPAMessage(message_id=new_msg_id, dialogue_id=accept.get(\"dialogue_id\"), target=accept.get(\"message_id\"), performative=FIPAMessage.Performative.DECLINE)\n dialogue.outgoing_extend(msg)\n msg_bytes = FIPASerializer().encode(msg)\n result = Envelope(to=dialogue.dialogue_label.dialogue_opponent_pbk, sender=self.context.agent_public_key, protocol_id=FIPAMessage.protocol_id, message=msg_bytes)\n dialogues = cast(Dialogues, self.context.dialogues)\n dialogues.dialogue_stats.add_dialogue_endstate(Dialogue.EndState.DECLINED_ACCEPT, dialogue.is_self_initiated)\n self.context.outbox.put(result)\n\n def _on_match_accept(self, match_accept: FIPAMessage, dialogue: Dialogue) -> None:\n \"\"\"\n Handle a matching Accept.\n\n :param match_accept: the MatchAccept message\n :param dialogue: the dialogue\n :return: None\n \"\"\"\n transactions = cast(Transactions, self.context.transactions)\n assert dialogue.dialogue_label in transactions.pending_initial_acceptances \\\n and match_accept.get(\"target\") in transactions.pending_initial_acceptances[dialogue.dialogue_label]\n logger.debug(\"[{}]: on_match_accept: msg_id={}, dialogue_id={}, origin={}, target={}\"\n .format(self.context.agent_name, match_accept.get(\"message_id\"), match_accept.get(\"dialogue_id\"), dialogue.dialogue_label.dialogue_opponent_pbk, match_accept.get(\"target\")))\n transaction_msg = transactions.pop_pending_initial_acceptance(dialogue.dialogue_label, cast(int, match_accept.get(\"target\")))\n self.context.decision_maker_message_queue.put(transaction_msg)\n","sub_path":"packages/skills/fipa_negotiation/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":18952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"508826717","text":"import unittest\nfrom mock import patch, Mock, ANY\n\nfrom arena_acestream import fetch\n\nclass TestWebFetcher(unittest.TestCase):\n @patch('arena_acestream.web_fetcher.urllib2')\n def test_fetch(self, urllib_mock):\n # GIVEN\n url = 'http://test.com'\n connection_mock = Mock()\n request_mock = Mock()\n urllib_mock.Request.side_effect = [ request_mock ]\n urllib_mock.urlopen.side_effect = [ connection_mock ]\n\n expected_doc = ''\n connection_mock.read.side_effect = [ expected_doc ]\n\n # TEST\n actual_doc = fetch(url)\n\n # EXPECT\n self.assertEqual(actual_doc, expected_doc)\n urllib_mock.Request.assert_called_once_with(url, headers=ANY)\n urllib_mock.urlopen.assert_called_once_with(request_mock)\n connection_mock.read.assert_called_once()","sub_path":"arena_acestream/tests/test_web_fetcher.py","file_name":"test_web_fetcher.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"320504839","text":"import sys\n#sys.path.insert(0, \"/search/odin/Nick/_python_build2\")\nimport time\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom ModelCore import *\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.contrib import lookup\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import array_ops\n\nfrom tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer\nfrom tensorflow.contrib.layers.python.layers.embedding_ops import embedding_lookup_unique\nfrom tensorflow.contrib.rnn import MultiRNNCell, AttentionCellWrapper, GRUCell, LSTMCell, LSTMStateTuple\nfrom tensorflow.python.ops.rnn import dynamic_rnn, bidirectional_dynamic_rnn\nfrom tensorflow.contrib.seq2seq.python.ops import loss\n\nfrom tensorflow.python.layers import core as layers_core\nfrom tensorflow.contrib.session_bundle import exporter\nfrom tensorflow.python.util import nest\n\nimport Nick_plan\nimport logging as log\n\ngraphlg = log.getLogger(\"graph\")\nDynamicAttentionWrapper = dynamic_attention_wrapper.DynamicAttentionWrapper\nDynamicAttentionWrapperState = dynamic_attention_wrapper.DynamicAttentionWrapperState \nBahdanau = dynamic_attention_wrapper.BahdanauAttention\nLuong = dynamic_attention_wrapper.LuongAttention\n\ndef relu(x, alpha=0.2, max_value=None):\n\t'''ReLU.\n\t\talpha: slope of negative section.\n\t'''\n\tnegative_part = tf.nn.relu(-x)\n\tx = tf.nn.relu(x)\n\tif max_value is not None:\n\t\tx = tf.clip_by_value(x, tf.cast(0., dtype=tf.float32), tf.cast(max_value, dtype=tf.float32))\n\tx -= tf.constant(alpha, dtype=tf.float32) * negative_part\n\treturn x\n\ndef FeatureMatrix(conv_conf, inps, scope=None, dtype=tf.float32):\n\twith variable_scope.variable_scope(scope) as scope: \n\t\tfor i, each in enumerate(conv_conf):\n\t\t\th, w, ci, co = each[0] \n\t\t\th_s, w_s = each[1]\n\t\t\tph, pw = each[2]\n\t\t\tph_s, pw_s = each[3]\n\t\t\tk = tf.get_variable(\"filter_%d\" % i, [h, w, ci, co], initializer=tf.random_uniform_initializer(-0.4, 0.4))\n\t\t\tconved = tf.nn.conv2d(inps, k, [1, h_s, w_s, 1], padding=\"SAME\")\n\t\t\t#conved = relu(conved)\n\t\t\tconved = tf.nn.tanh(conved)\n\t\t\t# TODO Max pooling (May be Dynamic-k-max-pooling TODO)\n\t\t\tmax_pooled = tf.nn.max_pool(value=conved, ksize=[1, ph, pw, 1], strides=[1, ph, pw, 1], data_format=\"NHWC\", padding=\"SAME\") \n\t\t\tinps = max_pooled\n\treturn inps \n\ndef FC(inputs, h_size, o_size, act):\n\tfc1 = tf.contrib.layers.fully_connected(inputs=inputs, num_outputs=h_size, activation_fn=relu,\n\t\t\t\t\t\t\t\t\t\t\tweights_initializer=tf.random_uniform_initializer(-0.4, 0.4),\n\t\t\t\t\t\t\t\t\t\t\tbiases_initializer=tf.random_uniform_initializer(-0.4, 0.4))\n\tfc2 = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=o_size, activation_fn=act,\n\t\t\t\t\t\t\t\t\t\t\tweights_initializer=tf.random_uniform_initializer(-0.3, 0.3),\n\t\t\t\t\t\t\t\t\t\t\tbiases_initializer=tf.random_uniform_initializer(-0.4, 0.4))\n\n\treturn fc2\n\ndef CreateMultiRNNCell(cell_name, num_units, num_layers=1, output_keep_prob=1.0, reuse=False):\n\tcells = []\n\tfor i in range(num_layers):\n\t\tif cell_name == \"GRUCell\":\n\t\t\tsingle_cell = GRUCell(num_units=num_units, reuse=reuse)\n\t\telif cell_name == \"LSTMCell\":\n\t\t\tsingle_cell = LSTMCell(num_units=num_units, reuse=reuse)\n\t\telse:\n\t\t\tgraphlg.info(\"Unknown Cell type !\")\n\t\t\texit(0)\n\t\tif output_keep_prob < 1.0:\n\t\t\tsingle_cell = tf.contrib.rnn.DropoutWrapper(single_cell, output_keep_prob=output_keep_prob) \n\t\t\tgraphlg.info(\"Layer %d, Dropout used: output_keep_prob %f\" % (i, output_keep_prob))\n\n\t\t#single_cell = DeviceWrapper(ResidualWrapper(single_cell), device='/gpu:%d' % i)\n\t\t#single_cell = DeviceWrapper(single_cell, device='/gpu:%d' % i)\n\n\t\tcells.append(single_cell)\n\treturn MultiRNNCell(cells) \n\n\nclass RNNClassification(ModelCore):\n\tdef __init__(self, name, job_type=\"single\", task_id=0, dtype=tf.float32):\n\t\tsuper(RNNClassification, self).__init__(name, job_type, task_id, dtype) \n\t\tself.embedding = None\n\t\tself.out_proj = None\n\tdef build(self, for_deploy, variants=\"\"):\n\t\tconf = self.conf\n\t\tname = self.name\n\t\tjob_type = self.job_type\n\t\tdtype = self.dtype\n\t\tself.beam_size = 1 if (not for_deploy or variants==\"score\") else sum(self.conf.beam_splits)\n\n\t\t# Input maps\n\t\tself.in_table = lookup.MutableHashTable(key_dtype=tf.string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t value_dtype=tf.int64,\n\t\t\t\t\t\t\t\t\t\t\t\t\t default_value=UNK_ID,\n\t\t\t\t\t\t\t\t\t\t\t\t\t shared_name=\"in_table\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t name=\"in_table\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t checkpoint=True)\n\n\t\tself.enc_str_inps = tf.placeholder(tf.string, shape=(None, conf.input_max_len), name=\"enc_inps\")\n\t\tself.enc_lens = tf.placeholder(tf.int32, shape=[None], name=\"enc_lens\")\n\t\tself.tags = tf.placeholder(tf.int32, shape=[None, conf.tag_num], name=\"tags\")\n\t\tself.down_wgts = tf.placeholder(tf.float32, shape=[None], name=\"down_wgts\")\n\n\t\t# lookup\n\t\tself.enc_inps = self.in_table.lookup(self.enc_str_inps)\n\t\t#self.enc_inps = tf.Print(self.enc_inps, [self.enc_inps], message=\"enc_inps\", summarize=100000)\n\n\t\twith variable_scope.variable_scope(self.model_kind, dtype=dtype) as scope: \n\t\t\t# Create encode graph and get attn states\n\t\t\tgraphlg.info(\"Creating embeddings and embedding enc_inps.\")\n\t\t\twith ops.device(\"/cpu:0\"):\n\t\t\t\tself.embedding = variable_scope.get_variable(\"embedding\", [conf.output_vocab_size, conf.embedding_size], initializer=tf.random_uniform_initializer(-0.08, 0.08))\n\t\t\t\tself.emb_enc_inps = embedding_lookup_unique(self.embedding, self.enc_inps)\n\n\t\t\tgraphlg.info(\"Creating dynamic rnn...\")\n\t\t\tif conf.bidirectional:\n\t\t\t\twith variable_scope.variable_scope(\"encoder\", dtype=dtype) as scope: \n\t\t\t\t\tcell_fw = CreateMultiRNNCell(conf.cell_model, conf.num_units, conf.num_layers, conf.output_keep_prob)\n\t\t\t\t\tcell_bw = CreateMultiRNNCell(conf.cell_model, conf.num_units, conf.num_layers, conf.output_keep_prob)\n\t\t\t\tself.enc_outs, self.enc_states = bidirectional_dynamic_rnn(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcell_fw=cell_fw, cell_bw=cell_bw,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinputs=self.emb_enc_inps,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsequence_length=self.enc_lens,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=dtype,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tparallel_iterations=16,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscope=scope)\n\n\t\t\t\tfw_s, bw_s = self.enc_states \n\t\t\t\tself.enc_states = []\n\t\t\t\tfor f, b in zip(fw_s, bw_s):\n\t\t\t\t\tif isinstance(f, LSTMStateTuple):\n\t\t\t\t\t\tself.enc_states.append(LSTMStateTuple(tf.concat([f.c, b.c], axis=1), tf.concat([f.h, b.h], axis=1)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.enc_states.append(tf.concat([f, b], 1))\n\t\t\t\tself.enc_outs = tf.concat([self.enc_outs[0], self.enc_outs[1]], axis=2)\n\t\t\t\tmem_size = 2 * conf.num_units\n\t\t\t\tenc_state_size = 2 * conf.num_units \n\t\t\telse:\n\t\t\t\twith variable_scope.variable_scope(\"encoder\", dtype=dtype) as scope: \n\t\t\t\t\tcell = CreateMultiRNNCell(conf.cell_model, conf.num_units, conf.num_layers, conf.output_keep_prob)\n\t\t\t\tself.enc_outs, self.enc_states = dynamic_rnn(cell=cell,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinputs=self.emb_enc_inps,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsequence_length=self.enc_lens,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tparallel_iterations=16,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscope=scope,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=dtype)\n\t\t\t\tmem_size = conf.num_units\n\t\t\t\tenc_state_size = conf.num_units\n\n\t\tself.enc_outs = tf.expand_dims(self.enc_outs, -1)\n\t\twith variable_scope.variable_scope(\"cnn\", dtype=dtype, reuse=None) as scope: \n\t\t\tfeature_map = FeatureMatrix(conf.conv_conf, self.enc_outs, scope=scope, dtype=dtype)\n\n\t\tvec = tf.contrib.layers.flatten(feature_map)\n\n\t\twith variable_scope.variable_scope(\"fc\", dtype=dtype, reuse=False) as scope: \n\t\t\tfc_out = FC(inputs=vec, h_size=conf.fc_h_size, o_size=conf.tag_num, act=relu)\n\t\tself.outputs = fc_out\n\n\t\tif not for_deploy:\n\t\t\t#self.tags = tf.Print(self.tags, [self.tags], message=\"tags\", summarize=10000)\n\t\t\tloss = tf.losses.softmax_cross_entropy(self.tags, self.outputs)\n\t\t\tsee_loss = loss\n\t\t\ttf.summary.scalar(\"loss\", see_loss)\n\t\t\tself.summary_ops = tf.summary.merge_all()\n\t\t\tself.update = self.backprop(loss) \n\n\t\t\tself.train_outputs_map[\"loss\"] = see_loss\n\t\t\tself.train_outputs_map[\"update\"] = self.update\n\n\t\t\tself.fo_outputs_map[\"loss\"] = see_loss\n\n\t\t\tself.debug_outputs_map[\"loss\"] = see_loss\n\t\t\tself.debug_outputs_map[\"outputs\"] = self.outputs,\n\t\t\tself.debug_outputs_map[\"update\"] = self.update\n\t\t\t#saver\n\t\t\tself.trainable_params.extend(tf.trainable_variables())\n\t\t\tself.saver = tf.train.Saver(max_to_keep=conf.max_to_keep)\n\t\telse:\n\t\t\tif variants == \"\":\n\t\t\t\tself.infer_outputs_map[\"tags\"] = tf.nn.softmax(self.outputs)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t\t#saver\n\t\t\tself.trainable_params.extend(tf.trainable_variables())\n\t\t\tself.saver = tf.train.Saver(max_to_keep=conf.max_to_keep)\n\n\t\t\t# Exporter for serving\n\t\t\tself.model_exporter = exporter.Exporter(self.saver)\n\t\t\tinputs = {\n\t\t\t\t\"enc_inps:0\":self.enc_str_inps,\n\t\t\t\t\"enc_lens:0\":self.enc_lens\n\t\t\t} \n\t\t\toutputs = self.infer_outputs_map\n\t\t\tself.model_exporter.init(\n\t\t\t\ttf.get_default_graph().as_graph_def(),\n\t\t\t\tnamed_graph_signatures={\n\t\t\t\t\t\"inputs\": exporter.generic_signature(inputs),\n\t\t\t\t\t\"outputs\": exporter.generic_signature(outputs)\n\t\t\t\t})\n\t\t\tgraphlg.info(\"Graph done\")\n\t\t\tgraphlg.info(\"\")\n\t\treturn\n\n\tdef get_restorer(self):\n\t\trestorer = tf.train.Saver(self.global_params +\n\t\t\t\t\t\t\t\t self.trainable_params + self.optimizer_params +\n\t\t\t\t\t\t\t\t tf.get_default_graph().get_collection(\"saveable_objects\"))\n\t\treturn restorer\n\n\tdef get_init_ops(self):\n\t\tinit_ops = []\n\t\tif self.conf.embedding_init:\n\t\t\tinit_ops = [tf.variables_initializer(set(self.optimizer_params + self.global_params + self.trainable_params)- set([self.embedding]))]\n\t\t\tw2v = np.load(self.conf.embedding_init)\n\t\t\tinit_ops.append(self.embedding.assign(w2v))\n\t\telse:\n\t\t\tinit_ops = [tf.variables_initializer(set(self.optimizer_params + self.global_params + self.trainable_params))]\n\n\t\tif self.task_id == 0:\n\t\t\tvocab_file = filter(lambda x: re.match(\"vocab\\d+\\.all\", x) != None, os.listdir(self.conf.data_dir))[0]\n\t\t\tf = codecs.open(os.path.join(self.conf.data_dir, vocab_file))\n\t\t\tk = [line.strip() for line in f]\n\t\t\tk = k[0:self.conf.input_vocab_size]\n\t\t\tv = [i for i in range(len(k))]\n\t\t\top_in = self.in_table.insert(constant_op.constant(k), constant_op.constant(v, dtype=tf.int64))\n\t\t\tinit_ops.extend([op_in])\n\t\treturn init_ops\n\n\tdef preproc(self, records, use_seg=True, for_deploy=False, default_wgt=1.0):\n\t\t# parsing\n\t\tdata = []\n\t\tfor each in records:\n\t\t\tif for_deploy:\n\t\t\t\tp = each.strip()\n\t\t\t\twords, _ = tokenize_word(p) if use_seg else (p.split(), None)\n\t\t\t\tp_list = words #re.split(\" +\", p.strip())\n\t\t\t\tdata.append([p_list, len(p_list) + 1, [], 1.0])\n\t\t\telse:\n\t\t\t\tsegs = re.split(\"\\t\", each.strip())\n\t\t\t\tif len(segs) < 2:\n\t\t\t\t\tcontinue\n\t\t\t\tp, tag = segs[0], segs[1]\n\t\t\t\tp_list = re.split(\" +\", p)\n\t\t\t\ttag_list = re.split(\" +\", tag)\n\n\t\t\t\tdown_wgts = segs[-1] if len(segs) > 2 else default_wgt \n\t\t\t\tdata.append([p_list, len(p_list) + 1, tag_list, down_wgts])\n\n\t\t# batching\n\t\tconf = self.conf\n\t\tbatch_enc_inps, batch_enc_lens, batch_tags, batch_down_wgts = [], [], [], []\n\t\tfor encs, enc_len, tag, down_wgts in data:\n\t\t\t# Encoder inputs are padded, reversed and then padded to max.\n\t\t\tenc_len = enc_len if enc_len < conf.input_max_len else conf.input_max_len\n\t\t\tencs = encs[0:conf.input_max_len]\n\t\t\tif conf.enc_reverse:\n\t\t\t\tencs = list(reversed(encs + [\"_PAD\"] * (enc_len - len(encs))))\n\t\t\tenc_inps = encs + [\"_PAD\"] * (conf.input_max_len - len(encs))\n\n\t\t\tbatch_enc_inps.append(enc_inps)\n\t\t\tbatch_enc_lens.append(np.int32(enc_len))\n\t\t\tif not for_deploy:\n\t\t\t\t# Merge dec inps and targets \n\t\t\t\tbatch_tags.append(tag)\t\n\t\t\t\tbatch_down_wgts.append(down_wgts)\n\t\tfeed_dict = {\n\t\t\t\"enc_inps:0\": batch_enc_inps,\n\t\t\t\"enc_lens:0\": batch_enc_lens,\n\t\t\t\"tags:0\": batch_tags,\n\t\t\t\"down_wgts:0\": batch_down_wgts\n\t\t}\n\t\tfor k, v in feed_dict.items():\n\t\t\tif not v: \n\t\t\t\tdel feed_dict[k]\n\t\treturn feed_dict\n\n\tdef after_proc(self, out):\n\t\treturn out[\"tags\"][0]\n\t\t\nif __name__ == \"__main__\":\n\tname = \"rnncls-bi-judge_poem\"\n\tmodel = RNNClassification(name)\n\tif len(sys.argv) == 2:\n\t\tgpu = 0\n\tflag = sys.argv[1]\n\t#model(flag, use_seg=False)\n\tmodel(flag, use_seg=False, gpu=1)\n","sub_path":"models/RNNClassification.py","file_name":"RNNClassification.py","file_ext":"py","file_size_in_byte":11940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"201135422","text":"import json\nimport os\nfrom operator import itemgetter\n\nimport requests\nfrom dateutil.relativedelta import relativedelta\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\nstock_no_list = []\n\npath = \"fund/\"\nclient = MongoClient('localhost', 27017)\ndb = client['stock']\ncollectStock = db['stock']\ncollectTWSE = db['twse_list']\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36\"}\n\nurl4 = \"http://www.tpex.org.tw/web/stock/aftertrading/otc_quotes_no1430/stk_wn1430_download.php?l=zh-tw&d=20170522&se=EW&s=0,asc,0\"\nurl2 = \"http://www.twse.com.tw/ch/trading/exchange/MI_INDEX/MI_INDEX.php\"\nurl3 = \"http://www.tpex.org.tw/web/stock/3insti/daily_trade/3itrade_hedge_download.php?l=zh-tw&se=EW&t=D&d=20170523&s=0,asc,0\"\n\nclient = MongoClient('localhost', 27017)\ndb = client['stock']\ncollect = db['stock']\n\n\ndef download_fund(start, end):\n delta = end - start\n for d in range(delta.days):\n\n new_date = startDate + relativedelta(days=d)\n date_str = '{0:%Y%m%d}'.format(new_date)\n url = \"http://www.twse.com.tw/fund/T86?response=json&date={0}&selectType=ALLBUT0999\".format(date_str)\n res = requests.post(url, headers=headers)\n print('Start download date {0} file'.format(date_str))\n try:\n data = json.loads(res.text)\n parse(data)\n except Exception as e:\n print(\"except no {0}\".format(e))\n\n\ndef parse(json_data):\n if json_data['stat'] == 'OK':\n date = json_data['date']\n data = json_data['data']\n\n total_count = len(data)\n for index, item in enumerate(data):\n item = [value.strip().replace(',', '').replace('\"', '').replace('=', '').replace('--', '') for value in\n item]\n\n new_date = datetime.strptime(date + '00:00:00', '%Y%m%d%H:%M:%S')\n stock_no = item[0]\n\n collectStock.update({\"stockNo\": stock_no, \"details.date\": new_date},\n {'$set': {\"details.$.foreign_investor_buy\": int(item[2]),\n \"details.$.foreign_investor_sell\": int(item[3]),\n \"details.$.foreign_investor_total\": int(item[4]),\n \"details.$.investment_trust_buy\": int(item[5]),\n \"details.$.investment_trust_sell\": int(item[6]),\n \"details.$.investment_trust_total\": int(item[7]),\n \"details.$.dealer_total\": int(item[8]),\n \"details.$.dealer_buy_self\": int(item[9]),\n \"details.$.dealer_sell_self\": int(item[10]),\n \"details.$.dealer_total_self\": int(item[11]),\n \"details.$.dealer_buy_hedge\": int(item[12]),\n \"details.$.dealer_sell_hedge\": int(item[13]),\n \"details.$.dealer_total_hedge\": int(item[14]),\n 'details.$.institutional_investors_total': int(item[15])}})\n print(' {0}/{1} {2} : '.format(index, total_count, item))\n else:\n print(json_data['stat'])\n\n\ndate_format = \"%m/%d/%Y\"\nstartDate = datetime.strptime('6/12/2017', date_format)\nendDate = datetime.today()\ndownload_fund(startDate, endDate)\n","sub_path":"download_fund.py","file_name":"download_fund.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"154723969","text":"\nimport itertools\n\nif __name__ == \"__main__\":\n\n N = int(input())\n L = list(input().rstrip().split())\n K = int(input())\n num = 0\n den = 0\n for e in itertools.permutations(L, K):\n den += 1\n num += 'a' in e[:K]\n print(num * 1.0 / den)","sub_path":"Python/Iterables_and_Iterators/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"68593089","text":"from ..base.generator_data_storage_base import GeneratorDataStorageBase\nfrom ..base.generator_data_storage_base import get_data_list_name, get_data_name\nfrom ..core.object import Object, AccessSpecifier, Objects\nfrom ..core.function import Function\n\n\nclass GeneratorDataStorage(GeneratorDataStorageBase):\n\n def __init__(self):\n GeneratorDataStorageBase.__init__(self)\n\n def create_shared_method(self):\n GeneratorDataStorageBase.create_shared_method(self)\n\n obj = Object()\n obj.type = self.name\n obj.name = '__xml'\n obj.initial_value = 'NULL'\n obj.is_static = True\n obj.access = AccessSpecifier.private\n self.members.append(obj)\n\n obj = Object()\n obj.type = Objects.STRING\n obj.name = 'PATH_TO_DATA'\n obj.initial_value = '\"assets/data/data.xml\"'\n obj.is_static = True\n self.members.append(obj)\n\n obj = Object()\n obj.type = self.name\n obj.name = '__json'\n obj.initial_value = 'NULL'\n obj.is_static = True\n obj.access = AccessSpecifier.private\n self.members.append(obj)\n\n method = Function()\n method.name = 'deserialize_xml'\n method.args.append(['xml', Objects.VOID])\n self.functions.append(method)\n method = Function()\n method.name = 'deserialize_json'\n method.args.append(['xml', Objects.VOID])\n self.functions.append(method)\n\n method = Function()\n method.name = 'serialize_xml'\n method.args.append(['xml', Objects.VOID])\n self.functions.append(method)\n method = Function()\n method.name = 'serialize_json'\n method.args.append(['xml', Objects.VOID])\n self.functions.append(method)\n\n def get_shared_method_body(self):\n return SHARED_METHOD\n\n def create_getters(self, classes):\n for class_ in classes:\n if class_.is_storage and class_.side in [self.model.side, 'both']:\n map_name = get_data_list_name(get_data_name(class_.name))\n method = Function()\n method.name = 'get' + class_.name\n method.args.append(['name', Objects.VOID])\n method.operations.append(PATTERN_GETTER)\n method.operations[0] = method.operations[0].replace('@{array}', map_name)\n method.operations[0] = method.operations[0].replace('@{type}', class_.name)\n self.functions.append(method)\n\n method = Function()\n method.name = 'loadAll' + get_data_list_name(class_.name)\n method.operations.append(PATTERN_LOAD_ALL)\n method.operations[0] = method.operations[0].replace('@{array}', map_name)\n method.operations[0] = method.operations[0].replace('@{type}', class_.name)\n self.functions.append(method)\n\n def add_initialize_function_json(self):\n method = Function()\n method.name = 'initialize_json'\n method.return_type = Objects.VOID\n method.is_const = True\n method.args.append(['content', Objects.STRING])\n method.translated = True\n\n method.operations.append('$json = json_decode(content);')\n method.operations.append('$this->deserialize(json);')\n method.operations.append('$this->_loaded = true;')\n self.functions.append(method)\n\n def add_initialize_function_xml(self):\n method = Function()\n method.name = 'initialize_xml'\n method.return_type = Objects.VOID\n method.is_const = True\n method.args.append(['content', Objects.STRING])\n method.translated = True\n\n method.operations.append('$root = simplexml_load_string(content);')\n method.operations.append('$this->deserialize(root);')\n method.operations.append('$this->_loaded = true;')\n self.functions.append(method)\n\n\nSHARED_METHOD = '''\nif(DataStorage::$__instance == NULL)\n{\n function endsWith($haystack, $needle)\n {\n //https://stackoverflow.com/questions/834303/startswith-and-endswith-functions-in-php\n // search forward starting from end minus needle length characters\n if ($needle === '')\n {\n return true;\n }\n $diff = \\\\strlen($haystack) - \\\\strlen($needle);\n return $diff >= 0 && strpos($haystack, $needle, $diff) !== false;\n }\n\n DataStorage::$__instance = new DataStorage();\n if(endsWith(DataStorage::$PATH_TO_DATA, '.xml'))\n {\n DataStorage::$__xml = simplexml_load_file(DataStorage::$PATH_TO_DATA);\n }\n else if(endsWith(DataStorage::$PATH_TO_DATA, '.json'))\n {\n $string = file_get_contents(DataStorage::$PATH_TO_DATA);\n DataStorage::$__json = json_decode($string);\n }\n\n}\nreturn DataStorage::$__instance;\n'''\n\nPATTERN_GETTER = '''\n if (array_key_exists($name, $this->@{array}))\n {\n return $this->@{array}[$name];\n }\n else if(DataStorage::$__xml != null)\n {\n $data = new @{type}();\n if(DataStorage::$__xml->@{array})\n {\n foreach (DataStorage::$__xml->@{array}->pair as $node)\n {\n if ($node[\"key\"] == $name)\n {\n $this->@{array}[$name] = $data;\n $data->deserialize_xml($node->value);\n }\n }\n }\n return $data;\n }\n else if(DataStorage::$__json != null)\n {\n $data = new @{type}();\n if(DataStorage::$__json->@{array})\n {\n foreach (DataStorage::$__json->@{array} as $node)\n {\n if ($node->key == $name)\n {\n $this->@{array}[$name] = $data;\n $data->deserialize_json($node->value);\n }\n }\n }\n return $data;\n }\n '''\n\nPATTERN_LOAD_ALL = '''\n if(DataStorage::$__xml != null && DataStorage::$__xml->@{array})\n {\n foreach (DataStorage::$__xml->@{array}->pair as $node)\n {\n $name = (string)$node[\"key\"];\n $this->@{array}[$name] = new @{type}();\n $this->@{array}[$name]->deserialize_xml($node->value);\n }\n }\n else if(DataStorage::$__json != null && DataStorage::$__json->@{array})\n {\n foreach (DataStorage::$__json->@{array} as $node)\n {\n $name = (string)$node->key;\n $this->@{array}[$name] = new @{type}();\n $this->@{array}[$name]->deserialize_json($node->value);\n }\n }\n '''\n","sub_path":"mlc_tools/module_php/generator_data_storage.py","file_name":"generator_data_storage.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"365209207","text":"# Greatest common divisor\n# https://en.wikipedia.org/wiki/Greatest_common_divisor\ndef gcd(a, b):\n\tif b == 0:\n\t\treturn a\n\treturn gcd(b, a % b)\n\nassert gcd(6, 9) == 3\nassert gcd(2336, 1314) == 146\n\n# Least common multiple\n# https://en.wikipedia.org/wiki/Least_common_multiple\ndef lcm(a, b):\n\treturn (a * b)/gcd(a, b)\n\nassert lcm(6, 9) == 18\nassert lcm(2336, 1314) == 21024\n","sub_path":"lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"446333344","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os\n\nfrom sacred.observers.base import RunObserver\nfrom sacred.serializer import flatten\nfrom sacred.utils import PathType\n\n\nDEFAULT_JSON_PRIORITY = 20\n\n\nclass JSONObserver(RunObserver):\n def __init__(self, base_dir: PathType, _id: int = None, number_format: str = '%03d',\n indent: str = 4, ensure_ascii: bool = False,\n priority: int = DEFAULT_JSON_PRIORITY):\n self.base_dir = base_dir\n self._id = int(_id) if _id is not None else -1\n self.number_format = number_format\n self.indent = indent\n self.ensure_ascii = ensure_ascii\n self.run_dir = None\n self.run_entry = None\n self.config = None\n self.cout = \"\"\n self.priority = priority\n\n def create_dirs(self, name: str, _id: int):\n experiments_dir = self.base_dir.format(name=name, experiment_name=name)\n if not os.path.exists(experiments_dir):\n os.makedirs(experiments_dir)\n\n if _id is None and self._id == -1:\n self._id = sum(1 for e in os.scandir(experiments_dir)\n if e.is_dir() and e.name.isdigit())\n self.run_dir = os.path.join(experiments_dir,\n self.number_format % self._id)\n if not os.path.exists(self.run_dir):\n os.makedirs(self.run_dir)\n\n def queued_event(self, ex_info, command, host_info, queue_time, config,\n meta_info, _id):\n self.create_dirs(ex_info['name'], _id)\n\n self.run_entry = {\n '_id': self.number_format % self._id,\n 'experiment': dict(ex_info),\n 'command': command,\n 'host': dict(host_info),\n 'meta': meta_info,\n 'status': 'QUEUED'\n }\n self.config = config\n self.save()\n return self._id if _id is None else _id\n\n def started_event(self, ex_info, command, host_info, start_time, config,\n meta_info, _id):\n self.create_dirs(ex_info['name'], _id)\n\n self.cout = \"\"\n self.run_entry = {\n '_id': self.number_format % self._id,\n 'experiment': dict(ex_info),\n 'command': command,\n 'host': dict(host_info),\n 'start_time': start_time.isoformat(),\n 'meta': meta_info,\n 'status': 'RUNNING',\n 'resources': [],\n 'artifacts': [],\n 'heartbeat': None,\n }\n self.config = config\n self.save()\n return self._id if _id is None else _id\n\n def heartbeat_event(self, info, captured_out, beat_time, result):\n self.run_entry['heartbeat'] = beat_time.isoformat()\n self.run_entry['result'] = result\n if info:\n self.run_entry['info'] = info\n self.cout = captured_out\n self.save()\n\n def completed_event(self, stop_time, result):\n self.run_entry['stop_time'] = stop_time.isoformat()\n self.run_entry['result'] = result\n self.run_entry['status'] = 'COMPLETED'\n self.save()\n\n def interrupted_event(self, interrupt_time, status):\n self.run_entry['stop_time'] = interrupt_time.isoformat()\n self.run_entry['status'] = status\n self.save()\n\n def failed_event(self, fail_time, fail_trace):\n self.run_entry['stop_time'] = fail_time.isoformat()\n self.run_entry['status'] = 'FAILED'\n self.run_entry['fail_trace'] = fail_trace\n self.save()\n\n def resource_event(self, filename):\n self.run_entry['resources'].append(filename)\n self.save()\n\n def artifact_event(self, name, filename, metadata=None, content_type=None):\n self.run_entry['artifacts'].append(name)\n self.save()\n\n def log_metrics(self, metrics_by_name, info):\n \"\"\"Store new measurements into COMMAND-metrics.json.\n \"\"\"\n if os.path.exists(self.run_dir):\n filename = f'{self.run_entry[\"command\"]}-metrics.json'\n\n metrics = {}\n metrics_path = os.path.join(self.run_dir, filename)\n if os.path.exists(metrics_path):\n with open(metrics_path, 'r', encoding='utf-8') as f:\n metrics = json.load(f)\n\n for k, v in metrics_by_name.items():\n if k not in metrics:\n metrics[k] = {'values': [], 'steps': [], 'timestamps': []}\n\n metrics[k]['values'] += v['values']\n metrics[k]['steps'] += v['steps']\n\n # Manually convert them to avoid passing a datetime dtype handler\n # when we're trying to convert into json.\n timestamps = [ts.isoformat() for ts in v['timestamps']]\n metrics[k]['timestamps'] += timestamps\n self.save_json(filename, metrics)\n\n def save(self):\n if os.path.exists(self.run_dir):\n self.save_json(f'{self.run_entry[\"command\"]}-run.json', self.run_entry)\n self.save_json(f'{self.run_entry[\"command\"]}-config.json', self.config)\n\n filename = f'{self.run_entry[\"command\"]}-cout.txt'\n with open(os.path.join(self.run_dir, filename), 'wb') as f:\n f.write(self.cout.encode('utf-8'))\n\n def save_json(self, filename, obj):\n with open(os.path.join(self.run_dir, filename), 'w', encoding='utf-8') as f:\n f.write(json.dumps(flatten(obj), sort_keys=True, indent=self.indent,\n ensure_ascii=self.ensure_ascii))\n f.write('\\n')\n\n def get_path(filename: str) -> str:\n blacklist = ['-run.json', '-config.json', '-cout.txt', '-metrics.json']\n for i in blacklist:\n if filename.endswith(i):\n raise FileExistsError(\n \"You are trying to overwrite a file necessary for the \"\n \"JSONObserver. The list of blacklisted files is: \"\n f\"{','.join(f'*{i}' for i in blacklist)}\"\n )\n return os.path.join(self.run_dir, filename)\n\n def __eq__(self, other):\n if isinstance(other, JSONObserver):\n return self.run_dir == other.run_dir\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n","sub_path":"sacred/observers/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"165304230","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sys import argv\n\nscript, var1 = argv\n\nKL_list = []\nfor i in xrange(1, int(var1)+1):\n KL = np.loadtxt(\"chi2_out_%s.dat\" % i)\n KL_list.append(KL[1])\n\nKL_list_6dp = [ '%.6f' % elem for elem in KL_list ]\n#print KL_list_6dp\n\nnp.savetxt('ALL_KL_chi2.dat', KL_list_6dp, fmt=\"%s\")\n","sub_path":"0_Analysis/Scripts/4_collate_KL_CHI2.py","file_name":"4_collate_KL_CHI2.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"136078941","text":"nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef reverse(data):\n numRevers = []\n n = len(data) - 1\n for i in range(n, -1, -1):\n numRevers.append(data[i])\n return numRevers\n\nprint(nums)\nprint(reverse(nums))\n","sub_path":"Creativity/C-1.13_reverseList.py","file_name":"C-1.13_reverseList.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"487874755","text":"import subprocess\nimport os\nimport sys\nimport re\n\nworking_dir = sys.argv[1]\n\nfiles = [os.path.join(working_dir, f) for f in os.listdir(working_dir) if os.path.isfile(os.path.join(working_dir, f)) and (os.path.splitext(f)[1] == '.sv' or os.path.splitext(f)[1] == '.v')]\n\ntest_benches = []\nmodules = []\n\nfor f in files:\n match = [line for line in open(f) if re.match(r'module .*\\(',line)][0].split(' ')[1].split('(')[0]\n if match.endswith('_tb'):\n test_benches.append(match)\n modules.append(f)\n\nfor m in modules:\n print(\"Compiling \" + m)\n subprocess.run(['vlog', '-work', 'work', m])\n #errors_and_warnings = [line for line in open(working_dir + '/transcript') if re.match(r'Errors: [0-9]+ Warnings: [0-9]+', line)]\n #print(errors_and_warnings)\n\nfor tb in test_benches:\n cmd = \"vsim -c work.\" + tb + ' -do \"run -all\"'\n print(\"Simulating \" + tb + \" :: \" + cmd)\n subprocess.run([\"vsim\", \"-c\", \"work.\" + tb, \"-do\", 'run -all'])#, stdout=subprocess.DEVNULL)\n #errors_and_warnings = [line for line in open(working_dir + '/transcript') if re.match(r'Errors: [0-9]+ Warnings: [0-9]+', line)]\n #print(errors_and_warnings)\n\n","sub_path":"regr_testing.py","file_name":"regr_testing.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"385422664","text":"import scipy.sparse as sps\nimport numpy as np\nimport pdb\n\n__all__=['MyGraph','random_graph']\n\nclass MyGraph(object):\n '''\n Attributes:\n :connections: list, i,j,weight.\n :node_positions: 2darray, the positions of graph nodes.\n :must_nodes: list, nodes must be passed.\n '''\n def __init__(self,connections,node_positions,must_nodes=None,must_connections=None):\n self.connections=connections\n self.node_positions=np.asarray(node_positions)\n self.must_nodes=must_nodes if must_nodes is not None else []\n self.must_connections=must_connections if must_connections is not None else []\n il,jl,wl=zip(*self.connections)\n num_nodes=int(max(max(il),max(jl))+1)\n if num_nodes!=self.node_positions.shape[0] or self.node_positions.shape[1]!=2:\n raise ValueError()\n\n #initialize matrices.\n il,jl,weights=zip(*self.connections)\n il,jl=np.concatenate([il,jl]),np.concatenate([jl,il])\n weights=np.concatenate([weights,weights])\n self.sparse_matrix=sps.coo_matrix((weights,(il,jl)),dtype='float64')\n self.dense_matrix=np.zeros([num_nodes]*2)\n self.dense_matrix[il,jl]=weights\n\n def __str__(self):\n return 'Graph(%s nodes, %s legs)\\n %s'%(self.num_nodes,self.num_paths,'\\n '.join(str(con) for con in self.connections))\n\n @property\n def num_nodes(self):\n '''Number of nodes'''\n return self.dense_matrix.shape[0]\n\n @property\n def num_paths(self):\n '''Number of paths'''\n return len(self.connections)\n\n def get_cost(self,path):\n '''Calculate the cost for given path.'''\n il,jl=path[:-1],path[1:]\n diss=self.dense_matrix[il,jl]\n if any(diss==0): raise ValueError('Invalid Path!')\n return sum(diss)\n\ndef random_graph(num_nodes,density=1):\n m=np.random.random([num_nodes]*2)*3\n m[np.random.random([num_nodes]*2)>density]=0\n np.fill_diagonal(m,0)\n il,jl=np.where(m)\n weights=m[il,jl]\n return MyGraph(zip(il,jl,weights),node_positions=(np.random.random([num_nodes,2])-0.5)*num_nodes)\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"228872692","text":"# Setup sqlite database to support query logging from web_exec.py\n\nimport os, sqlite3\n\nDB_FILE = 'opt-query-log.sqlite3'\n\ndef create_db():\n con = sqlite3.connect(DB_FILE)\n cur = con.cursor()\n\n cur.execute('''CREATE TABLE query_log\n (id INTEGER PRIMARY KEY,\n timestamp TEXT,\n ip_addr TEXT,\n http_user_agent TEXT,\n http_referer TEXT,\n user_script TEXT,\n cumulative_mode INTEGER)''')\n con.commit()\n cur.close()\n\n\nif __name__ == \"__main__\":\n assert not os.path.exists(DB_FILE)\n create_db()\n print('Created ' + DB_FILE)\n\n","sub_path":"v3/create_log_db.py","file_name":"create_log_db.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"473816426","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 2017/3/14 1:53\n\n@author: asnju\n\"\"\"\n\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask\nimport os\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URL'] = 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\n\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n users = db.relationship('User', backref='role', lazy='dynamic')\n\n def __repr__(self):\n return '' % self.name\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(64), unique=True, index=True)\n role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n\n def __repr__(self):\n return '' % self.username\n\n\n\n\n\n","sub_path":"db_oper.py","file_name":"db_oper.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"226488637","text":"def to_giraffe(a):\n translation=\"\"\n for i in a:\n #if i=='a' or i=='e' or i=='o' or i=='i' or i=='u':\n if i in \"AEOIUaouie\":\n translation+='g'\n else:\n translation+=i\n return translation\n\n#a=input()\n#print(to_giraffe(a))\nprint(to_giraffe(input()))","sub_path":"Not_that_important/giraffe_lang.py","file_name":"giraffe_lang.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"384812959","text":"import urllib.request\n\n# 定义常用变量\nurl = 'http://www.baidu.com'\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'}\n# 1.创建请求对象\nreq = urllib.request.Request(url,\n headers=headers)\n# 2.获取响应对象\nres = urllib.request.urlopen(req)\n# 3.获取响应内容\nhtml = res.read().decode('utf-8')\n\n# 响应对象的方法\n# 获取HTTP的响应码\nprint(res.getcode())\n# 获取返回实际数据的URL\nprint(res.geturl())\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"15-Crawl/day01/02_Request示例.py","file_name":"02_Request示例.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"644983186","text":"\"\"\"Hermes MQTT server for Rhasspy wakeword with snowboy\"\"\"\nimport asyncio\nimport logging\nimport queue\nimport socket\nimport threading\nimport typing\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom rhasspyhermes.audioserver import AudioFrame\nfrom rhasspyhermes.base import Message\nfrom rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs\nfrom rhasspyhermes.wake import (\n GetHotwords,\n Hotword,\n HotwordDetected,\n HotwordError,\n Hotwords,\n HotwordToggleOff,\n HotwordToggleOn,\n HotwordToggleReason,\n)\n\nWAV_HEADER_BYTES = 44\n_LOGGER = logging.getLogger(__name__)\n\n# -----------------------------------------------------------------------------\n\n\n@dataclass\nclass SnowboyModel:\n \"\"\"Settings for a single snowboy model\"\"\"\n\n model_path: Path\n sensitivity: str = \"0.5\"\n audio_gain: float = 1.0\n apply_frontend: bool = False\n\n def float_sensitivity(self) -> float:\n \"\"\"Get float of first sensitivity value.\"\"\"\n # 0.5,0.5\n return float(self.sensitivity.split(\",\")[0])\n\n\n# -----------------------------------------------------------------------------\n\n\nclass WakeHermesMqtt(HermesClient):\n \"\"\"Hermes MQTT server for Rhasspy wakeword with snowboy.\"\"\"\n\n def __init__(\n self,\n client,\n models: typing.List[SnowboyModel],\n wakeword_ids: typing.List[str],\n model_dirs: typing.Optional[typing.List[Path]] = None,\n site_ids: typing.Optional[typing.List[str]] = None,\n enabled: bool = True,\n sample_rate: int = 16000,\n sample_width: int = 2,\n channels: int = 1,\n chunk_size: int = 960,\n udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,\n udp_chunk_size: int = 2048,\n ):\n super().__init__(\n \"rhasspywake_snowboy_hermes\",\n client,\n sample_rate=sample_rate,\n sample_width=sample_width,\n channels=channels,\n site_ids=site_ids,\n )\n\n self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff, GetHotwords)\n\n self.models = models\n self.wakeword_ids = wakeword_ids\n self.model_dirs = model_dirs or []\n\n self.enabled = enabled\n self.disabled_reasons: typing.Set[str] = set()\n\n # Required audio format\n self.sample_rate = sample_rate\n self.sample_width = sample_width\n self.channels = channels\n\n self.chunk_size = chunk_size\n\n # Queue of WAV audio chunks to process (plus site_id)\n self.wav_queue: queue.Queue = queue.Queue()\n\n self.first_audio: bool = True\n self.audio_buffer = bytes()\n\n # Load detector\n self.detectors: typing.List[typing.Any] = []\n self.model_ids: typing.List[str] = []\n\n # Start threads\n threading.Thread(target=self.detection_thread_proc, daemon=True).start()\n\n # Listen for raw audio on UDP too\n self.udp_chunk_size = udp_chunk_size\n\n if udp_audio:\n for udp_host, udp_port, udp_site_id in udp_audio:\n threading.Thread(\n target=self.udp_thread_proc,\n args=(udp_host, udp_port, udp_site_id),\n daemon=True,\n ).start()\n\n # -------------------------------------------------------------------------\n\n def load_detectors(self):\n \"\"\"Load snowboy detectors from models\"\"\"\n from snowboy import snowboydecoder, snowboydetect\n\n self.model_ids = []\n self.detectors = []\n\n for model in self.models:\n assert model.model_path.is_file(), f\"Missing {model.model_path}\"\n _LOGGER.debug(\"Loading snowboy model: %s\", model)\n\n detector = snowboydetect.SnowboyDetect(\n snowboydecoder.RESOURCE_FILE.encode(), str(model.model_path).encode()\n )\n\n detector.SetSensitivity(model.sensitivity.encode())\n detector.SetAudioGain(model.audio_gain)\n detector.ApplyFrontend(model.apply_frontend)\n\n self.detectors.append(detector)\n self.model_ids.append(model.model_path.stem)\n\n # -------------------------------------------------------------------------\n\n async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = \"default\"):\n \"\"\"Process a single audio frame\"\"\"\n self.wav_queue.put((wav_bytes, site_id))\n\n async def handle_detection(\n self, model_index: int, wakeword_id: str, site_id: str = \"default\"\n ) -> typing.AsyncIterable[\n typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]\n ]:\n \"\"\"Handle a successful hotword detection\"\"\"\n try:\n assert len(self.model_ids) > model_index, f\"Missing {model_index} in models\"\n\n yield (\n HotwordDetected(\n site_id=site_id,\n model_id=self.model_ids[model_index],\n current_sensitivity=self.models[model_index].float_sensitivity(),\n model_version=\"\",\n model_type=\"personal\",\n ),\n {\"wakeword_id\": wakeword_id},\n )\n except Exception as e:\n _LOGGER.exception(\"handle_detection\")\n yield HotwordError(error=str(e), context=str(model_index), site_id=site_id)\n\n async def handle_get_hotwords(\n self, get_hotwords: GetHotwords\n ) -> typing.AsyncIterable[typing.Union[Hotwords, HotwordError]]:\n \"\"\"Report available hotwords\"\"\"\n try:\n if self.model_dirs:\n # Add all models from model dirs\n model_paths = []\n for model_dir in self.model_dirs:\n if not model_dir.is_dir():\n _LOGGER.warning(\"Model directory missing: %s\", str(model_dir))\n continue\n\n for model_file in model_dir.iterdir():\n if model_file.is_file() and (\n model_file.suffix in [\".umdl\", \".pmdl\"]\n ):\n model_paths.append(model_file)\n else:\n # Add current model(s) only\n model_paths = [Path(model.model_path) for model in self.models]\n\n hotword_models: typing.List[Hotword] = []\n for model_path in model_paths:\n model_words = \" \".join(model_path.with_suffix(\"\").name.split(\"_\"))\n model_type = \"universal\" if model_path.suffix == \".umdl\" else \"personal\"\n\n hotword_models.append(\n Hotword(\n model_id=model_path.name,\n model_words=model_words,\n model_type=model_type,\n )\n )\n\n yield Hotwords(\n models=hotword_models, id=get_hotwords.id, site_id=get_hotwords.site_id\n )\n\n except Exception as e:\n _LOGGER.exception(\"handle_get_hotwords\")\n yield HotwordError(\n error=str(e), context=str(get_hotwords), site_id=get_hotwords.site_id\n )\n\n def detection_thread_proc(self):\n \"\"\"Handle WAV audio chunks.\"\"\"\n try:\n while True:\n wav_bytes, site_id = self.wav_queue.get()\n\n if not self.detectors:\n self.load_detectors()\n\n # Extract/convert audio data\n audio_data = self.maybe_convert_wav(wav_bytes)\n\n # Add to persistent buffer\n self.audio_buffer += audio_data\n\n # Process in chunks.\n # Any remaining audio data will be kept in buffer.\n while len(self.audio_buffer) >= self.chunk_size:\n chunk = self.audio_buffer[: self.chunk_size]\n self.audio_buffer = self.audio_buffer[self.chunk_size :]\n\n for detector_index, detector in enumerate(self.detectors):\n # Return is:\n # -2 silence\n # -1 error\n # 0 voice\n # n index n-1\n result_index = detector.RunDetection(chunk)\n\n if result_index > 0:\n # Detection\n if detector_index < len(self.wakeword_ids):\n wakeword_id = self.wakeword_ids[detector_index]\n else:\n wakeword_id = \"\"\n\n if not wakeword_id:\n if detector_index < len(self.models):\n # Use file name\n wakeword_id = self.models[\n detector_index\n ].model_path.stem\n else:\n # Fall back to default\n wakeword_id = \"default\"\n\n _LOGGER.debug(\n \"Wake word detected: %s (site_id=%s)\",\n wakeword_id,\n site_id,\n )\n\n asyncio.run_coroutine_threadsafe(\n self.publish_all(\n self.handle_detection(\n detector_index, wakeword_id, site_id=site_id\n )\n ),\n self.loop,\n )\n except Exception:\n _LOGGER.exception(\"detection_thread_proc\")\n\n # -------------------------------------------------------------------------\n\n def udp_thread_proc(self, host: str, port: int, site_id: str):\n \"\"\"Handle WAV chunks from UDP socket.\"\"\"\n try:\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_socket.bind((host, port))\n _LOGGER.debug(\"Listening for audio on UDP %s:%s\", host, port)\n\n while True:\n wav_bytes, _ = udp_socket.recvfrom(\n self.udp_chunk_size + WAV_HEADER_BYTES\n )\n\n if self.enabled:\n self.wav_queue.put((wav_bytes, site_id))\n except Exception:\n _LOGGER.exception(\"udp_thread_proc\")\n\n # -------------------------------------------------------------------------\n\n async def on_message_blocking(\n self,\n message: Message,\n site_id: typing.Optional[str] = None,\n session_id: typing.Optional[str] = None,\n topic: typing.Optional[str] = None,\n ) -> GeneratorType:\n \"\"\"Received message from MQTT broker.\"\"\"\n # Check enable/disable messages\n if isinstance(message, HotwordToggleOn):\n if message.reason == HotwordToggleReason.UNKNOWN:\n # Always enable on unknown\n self.disabled_reasons.clear()\n else:\n self.disabled_reasons.discard(message.reason)\n\n if self.disabled_reasons:\n _LOGGER.debug(\"Still disabled: %s\", self.disabled_reasons)\n else:\n self.enabled = True\n self.first_audio = True\n _LOGGER.debug(\"Enabled\")\n elif isinstance(message, HotwordToggleOff):\n self.enabled = False\n self.disabled_reasons.add(message.reason)\n _LOGGER.debug(\"Disabled\")\n elif isinstance(message, AudioFrame):\n if self.enabled:\n assert site_id, \"Missing site_id\"\n await self.handle_audio_frame(message.wav_bytes, site_id=site_id)\n elif isinstance(message, GetHotwords):\n async for hotword_result in self.handle_get_hotwords(message):\n yield hotword_result\n else:\n _LOGGER.warning(\"Unexpected message: %s\", message)\n","sub_path":"rhasspywake_snowboy_hermes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"625342166","text":"\nimport yaml\n\nfrom unittest import TestCase\nfrom mock import patch, mock_open\nfrom parameterized import parameterized, param\n\nfrom samcli.commands._utils.template import get_template_data\n\n\nclass TestInvokeContext_get_template_data(TestCase):\n\n def test_must_raise_if_file_does_not_exist(self):\n filename = \"filename\"\n\n with self.assertRaises(ValueError) as exception_ctx:\n get_template_data(filename)\n\n ex = exception_ctx.exception\n self.assertEquals(str(ex), \"Template file not found at {}\".format(filename))\n\n @patch(\"samcli.commands._utils.template.yaml_parse\")\n @patch(\"samcli.commands._utils.template.pathlib\")\n def test_must_read_file_and_parse(self, pathlib_mock, yaml_parse_mock):\n filename = \"filename\"\n file_data = \"contents of the file\"\n parse_result = \"parse result\"\n\n pathlib_mock.Path.return_value.exists.return_value = True # Fake that the file exists\n\n m = mock_open(read_data=file_data)\n yaml_parse_mock.return_value = parse_result\n\n with patch(\"samcli.commands._utils.template.open\", m):\n result = get_template_data(filename)\n\n self.assertEquals(result, parse_result)\n\n m.assert_called_with(filename, 'r')\n yaml_parse_mock.assert_called_with(file_data)\n\n @parameterized.expand([\n param(ValueError()),\n param(yaml.YAMLError())\n ])\n @patch(\"samcli.commands._utils.template.yaml_parse\")\n @patch(\"samcli.commands._utils.template.pathlib\")\n def test_must_raise_on_parse_errors(self, exception, pathlib_mock, yaml_parse_mock):\n filename = \"filename\"\n file_data = \"contents of the file\"\n\n pathlib_mock.Path.return_value.exists.return_value = True # Fake that the file exists\n\n m = mock_open(read_data=file_data)\n yaml_parse_mock.side_effect = exception\n\n with patch(\"samcli.commands._utils.template.open\", m):\n\n with self.assertRaises(ValueError) as ex_ctx:\n get_template_data(filename)\n\n actual_exception = ex_ctx.exception\n self.assertTrue(str(actual_exception).startswith(\"Failed to parse template: \"))\n","sub_path":"tests/unit/commands/_utils/test_template.py","file_name":"test_template.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"546593809","text":"import filecmp\nimport os\nfrom tempfile import TemporaryDirectory\n\nimport boto3\nimport botocore\nfrom django.conf import settings\n\nfrom .models import Image\n\n\nclass S3:\n\n def __init__(self):\n self.api = boto3.resource('s3')\n\n def copy_to_production(self, filename):\n \"\"\"\n Copy image from draft to production on S3.\n Production images are located in the root of the bucket.\n Draft images are located in a directory specified by environment\n variable AWS_S3_BUCKET.\n \"\"\"\n copy_source = {\n 'Bucket': settings.AWS_S3_BUCKET,\n 'Key': settings.AWS_S3_DRAFT_DIR + filename,\n }\n self.api.meta.client.copy_object(\n ACL='public-read',\n Bucket=settings.AWS_S3_BUCKET,\n CopySource=copy_source,\n Key=filename,\n )\n\n def delete(self, filename):\n \"\"\"Delete an image from production location.\"\"\"\n self.api.meta.client.delete_object(\n Bucket=settings.AWS_S3_BUCKET,\n Key=filename,\n )\n\n def delete_draft_images(self):\n \"\"\"Delete all draft images at once.\"\"\"\n objects = []\n for item in self.iter_objects(prefix=settings.AWS_S3_DRAFT_DIR):\n objects.append({'Key': item['Key']})\n if objects:\n self.api.meta.client.delete_objects(\n Bucket=settings.AWS_S3_BUCKET,\n Delete={'Objects': objects},\n )\n\n def iter_objects(self, prefix=None):\n \"\"\"Iterate over all objects in the bucket.\"\"\"\n kwargs = {'Bucket': settings.AWS_S3_BUCKET}\n if prefix:\n kwargs['Prefix'] = prefix\n while True:\n response = self.api.meta.client.list_objects_v2(**kwargs)\n if 'Contents' not in response:\n break\n for item in response['Contents']:\n yield item\n if response['IsTruncated']:\n kwargs['ContinuationToken'] = response['NextContinuationToken']\n else:\n break\n\n def process_image(self, filename, bundle):\n \"\"\"Upload image file to S3 if needed.\"\"\"\n basename = os.path.basename(filename)\n key = settings.AWS_S3_DRAFT_DIR + basename\n with TemporaryDirectory() as tempdir:\n s3localname = os.path.join(tempdir, basename)\n try:\n # download image from root (production) dir for comparison\n self.api.meta.client.download_file(\n settings.AWS_S3_BUCKET,\n basename,\n s3localname,\n )\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == '404':\n # image does not exist on S3, create a new one\n self.upload_image(filename, key)\n Image.objects.create(\n bundle=bundle,\n filename=basename,\n status=Image.STATUS_NEW,\n )\n return\n else:\n raise\n # image already in production; compare it to local image\n if filecmp.cmp(filename, s3localname):\n # files are the same, no update\n return\n else:\n # files differ, update image\n self.upload_image(filename, key)\n Image.objects.create(\n bundle=bundle,\n filename=basename,\n status=Image.STATUS_CHANGED,\n )\n return\n\n def upload_image(self, filename, key):\n with open(filename, 'rb') as f:\n self.api.meta.client.put_object(\n ACL='public-read',\n Body=f,\n Bucket=settings.AWS_S3_BUCKET,\n Key=key,\n )\n","sub_path":"sfdoc/publish/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"455594787","text":"from __future__ import division, print_function, absolute_import\n\nimport numpy\nfrom rep.report import metrics\n\n__author__ = 'Alex Rogozhnikov'\n\n\ndef test_optimal_metrics(size=1000):\n prediction = numpy.random.random(size=size)\n random_labels = numpy.random.choice(2, size=size)\n\n def ams_like(s, b):\n return s / (b + 1. / 100. / size)\n\n # setting 'the best event' to be signal\n random_labels[numpy.argmax(prediction)] = 1\n optimal_ams = metrics.OptimalMetric(ams_like)\n proba = numpy.ndarray((len(prediction), 2))\n proba[:, 0] = 1 - prediction\n proba[:, 1] = prediction\n score = optimal_ams(random_labels, proba)\n\n assert score >= 100\n\n\ndef test_logloss(size=1000):\n from sklearn.metrics import log_loss\n prediction = numpy.random.random(size=size)\n random_labels = numpy.random.choice(2, size=size)\n\n proba = numpy.ndarray((len(prediction), 2))\n proba[:, 0] = 1 - prediction\n proba[:, 1] = prediction\n\n loss = metrics.LogLoss().fit(proba, y=random_labels, sample_weight=None)\n value = log_loss(random_labels, prediction)\n value2 = loss(random_labels, proba)\n\n print(value, value2)\n\n assert numpy.allclose(value, value2)\n\n\ndef test_roc_auc(size=1000):\n from sklearn.metrics import roc_auc_score\n prediction = numpy.random.random(size=size)\n random_labels = numpy.random.choice(2, size=size)\n\n proba = numpy.ndarray((len(prediction), 2))\n proba[:, 0] = 1 - prediction\n proba[:, 1] = prediction\n\n roc_auc_metric = metrics.RocAuc().fit(proba, y=random_labels, sample_weight=None)\n value = roc_auc_score(random_labels, prediction)\n value2 = roc_auc_metric(random_labels, proba)\n\n print(value, value2)\n\n assert numpy.allclose(value, value2)\n\n\ndef fpr_tpr(size, prediction):\n from sklearn.metrics import roc_curve\n random_labels = numpy.random.choice(2, size=size)\n\n proba = numpy.ndarray((len(prediction), 2))\n proba[:, 0] = 1 - prediction\n proba[:, 1] = prediction\n sample_weight = numpy.random.random(size=size)\n\n threshold = 0.75\n loss_fpr, loss_tpr = metrics.FPRatTPR(threshold), metrics.TPRatFPR(threshold)\n fprs, tprs, _ = roc_curve(random_labels, prediction, sample_weight=sample_weight)\n value_fpr = loss_fpr(random_labels, proba, sample_weight=sample_weight)\n value_tpr = loss_tpr(random_labels, proba, sample_weight=sample_weight)\n value_fpr2 = fprs[numpy.searchsorted(tprs, threshold)]\n value_tpr2 = tprs[numpy.searchsorted(fprs, threshold) - 1]\n print(value_fpr, value_fpr2)\n print(value_tpr, value_tpr2)\n\n assert numpy.allclose(value_fpr, value_fpr2, atol=1e-3)\n assert numpy.allclose(value_tpr, value_tpr2, atol=1e-3)\n\n\ndef test_fpr_tpr(size=10000):\n prediction = numpy.random.permutation(size)\n fpr_tpr(size, prediction)\n prediction = numpy.ones(size)\n fpr_tpr(size, prediction)\n","sub_path":"tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"244071714","text":"import discord\nimport rename\nimport img_cmd\nimport re\nimport dont_touch\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(client.user.id)\n print(\"ready\")\n game = discord.Game(\"앙기무링\")\n await client.change_presence(status=discord.Status.online, activity=game)\n\n@client.event\nasync def on_message(message):\n if message.content.startswith(\"!정훈티콘\"):\n await img_cmd.JH_emoticon(message)\n elif message.content.startswith(\"!재원티콘\"):\n await img_cmd.JW_emoticon(message)\n elif len(re.findall(\"현[^현구]*구\", message.content)) != 0:\n await rename.rename(message)\n\nclient.run(dont_touch.token)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"519525225","text":"'''\nCreated on Oct 27, 2019\n\n@author: snake91\n'''\n\n\n'''\n checking distribution of max_n Fn(x) - F(x)\n\n'''\nimport numpy as np\n#import matplotlib.pyplot as plt\n\n\nn_size = 10\nn_series = 1000000\nx = np.random.uniform(size = (n_series * n_size)) #for i in range(n_series)]\n\ndef onesample_theor_ks(x):\n \n u = np.linspace(0,1, len(x))\n x = np.sort(x)\n \n diff = np.max(np.abs(x - u))\n \n \n return diff\n\n\ndef twosample_emp_ks(x, y):\n \n x = np.sort(x)\n y = np.sort(y)\n\n diff = np.max(np.abs(x-y))\n \n return diff\n\ndef nsample_theor_ks(x):\n\n u = np.linspace(0,1, len(x[0]))\n x = list(x)\n \n maxList = []\n for i in range(len(x)):\n x[i] = np.sort(x[i])\n \n diff = np.max(np.abs(x[i] - u))\n \n maxList.append(diff)\n \n \n return np.max(maxList)\n\n\n# onesample_theor_statsks = np.array([onesample_theor_ks(x[i]) for i in range(len(x)) ])\n# twosample_emp_statsks = np.array([twosample_emp_ks(x[i], x[i-1]) for i in range(1,len(x))])\n\n# sample_theor_statsks1 = np.array([nsample_theor_ks(x[i]) for i in range(0,len(x))])\n# sample_theor_statsks2 = np.array([nsample_theor_ks(x[i], x[i-1]) for i in range(1,len(x))])\n# sample_theor_statsks3 = np.array([nsample_theor_ks(x[i], x[i-1], x[i-2]) for i in range(2,len(x))])\n# sample_theor_statsks4 = np.array([nsample_theor_ks(x[i], x[i-1], x[i-2], x[i-3]) for i in range(3,len(x))])\n# \n# \n# plt.hist(sample_theor_statsks1, bins = 300, histtype='step', label = 'one sample theor')\n# plt.hist(sample_theor_statsks2, bins = 300, histtype='step', label = 'two sample emp')\n# plt.hist(sample_theor_statsks3, bins = 300, histtype='step', label = 'three sample emp')\n# plt.hist(sample_theor_statsks4, bins = 300, histtype='step', label = 'four sample emp')\n\n# maxid = 10\n# i = 5\n# for i in range(1, maxid):\ndef genStats(nobs, n_series, universe, maxid = 10, nsim = 1000):\n universe = np.random.choice(universe, int(nsim))\n sample = [np.random.choice(universe, size = (nobs)) for i in range(int(nsim))]\n sample_theor_statsks = [nsample_theor_ks(sample[j-int(n_series): j]) for j in range(int(n_series), int(len(sample) - (maxid - nobs)))] \n return sample_theor_statsks\n\nnobs = 100\nn_series = 2 \nrng = x\nmaxid = 50\nnsim = 50000\nres = genStats(nobs, n_series, rng, maxid, nsim) \nprint(\"\")\n# for i in range(len(statsList)):\n# \n# plt.hist(statsList[i], bins = 400, histtype='step', label = 'sample theor ' + str(i))\n# \n# plt.legend()\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"stats/gof/ks/maxmv.py","file_name":"maxmv.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"285949023","text":"from lib.money import Money\nfrom decimal import Decimal \n\nfrom cryptofeed import pairs\n\nfriendly_name = 'Gemini BTC-USD'\n\nprice_decimal_precision = 2\nvolume_decimal_precision = None\nvolume_currency = 'BTC'\ncurrency = 'USD'\n\nbid_string = \"buy\"\nask_string = \"sell\"\norderbook_depth = 100000\n\n# Configurable properties with defaults.\nfee = Decimal('0.0005')\nmarket_order_fee = Decimal('0.0005')\nlimit_order_fee = Decimal('0.0005')\nfiat_balance_tolerance = Money('0.0001', 'USD')\nvolume_balance_tolerance = Money('0.00000001', 'BTC')\nmin_order_size = Money('0.0001', 'USD')\nmax_tick_speed = 2\nuse_cached_orderbook = False\n\nwithdrawal_fee = Money('0.01', 'BTC')\nbtc_credit_limit = Money('0', 'BTC')\n\npairs = [x for x in pairs.gemini_pairs().keys()]","sub_path":"app/user_config/exchanges/parameters/gemini_config.py","file_name":"gemini_config.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"405859311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nConvert Halpha map\n\n@author: Tom Williams\n\"\"\"\n\nimport numpy as np\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\n\ndef ellipse(data,x_centre,y_centre,a,b,theta=22.5):\n theta *= np.pi/180\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n x = x_centre-j\n y = y_centre-i\n if ((x*np.cos(theta)+y*np.sin(theta))**2/a**2) + ((x*np.sin(theta)-y*np.cos(theta))**2/b**2) > 1:\n data[i,j] = 0\n \n return data\n\ndata,header = fits.getdata('/home/daedalusdata/c1625914/M33/Halpha/m33_ha_imreplace_imedit.fits',header=True)\n\nmapunit = 2e-18 * 4 #account for pixel size\n\ndata *= mapunit\n\n#Convert to solar luminosities\n\ndata *= 8.44e49 #cm^2 term\ndata /= 3.826e33 #Solar luminosity in ergs/s\n\nfits.writeto('/home/daedalusdata/c1625914/M33/masked/halpha.fits',data,header,clobber=True)","sub_path":"skirt/halpha_convert.py","file_name":"halpha_convert.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"153330214","text":"def has_all_parameters(courier):\n \"\"\"\n Check whether given courier is has all parameters\n and they are valid.\n :return bool\n \"\"\"\n if courier.get('courier_id', None) is None:\n return False\n\n if courier.get('courier_type', None) is None:\n return False\n\n if courier.get('regions', None) is None:\n return False\n\n if courier.get('working_hours', None) is None:\n return False\n\n # Check if all parameters are valid\n\n checks = (\n is_courier_id_valid,\n is_courier_type_valid,\n are_regions_valid,\n )\n\n for check in checks:\n if not (check(courier)):\n return False\n\n return True\n\n\ndef has_bad_property(dictionary):\n for key in dictionary:\n if key not in \\\n ('courier_id', 'courier_type', 'rating',\n 'regions', 'working_hours', 'earnings',):\n return True\n return False\n\n\ndef is_courier_id_valid(courier):\n return courier['courier_id'] > 0\n\n\ndef is_courier_type_valid(courier):\n return courier['courier_type'] in ('foot', 'bike', 'car')\n\n\ndef are_regions_valid(courier):\n bad_regions = list(filter(lambda x: x <= 0, courier['regions']))\n return len(bad_regions) == 0\n\n\ndef are_working_hours_valid(courier):\n for working_hour in courier['working_hours']:\n split_time = working_hour.split('-')\n\n if len(split_time) < 2:\n return False\n for time in split_time:\n hour, minute = None, None\n try:\n hour, minute = time.split(':')\n except ValueError:\n # Not in the format HH:MM\n return False\n\n if int(hour) >= 24 or int(hour) < 0:\n return False\n if int(minute) >= 59 or int(minute) < 0:\n return False\n return True\n","sub_path":"candy_delivery_app/server/couriers/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"621414819","text":"bday = { 'anubhav': '26 july', 'jai': '10th august' }\nwhile True:\n\tprint('enter the name of person for b\\'day date or q to exit')\n\tname = input()\n\tif name == 'q':\n\t\tbreak\n\t\n\tif name in bday:\n\t\tprint (bday[name] + 'is b\\'day for' + name)\n\telse:\n\t\tprint ('I do not have information please add this entry')\n\t\tprint ('Please tell his birthdate')\n\t\tday = input()\n\t\tbday[name] = day\n\t\tprint ('database updated!!')\n\t\t\n","sub_path":"bday_dict.py","file_name":"bday_dict.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"137351811","text":"#!/usr/bin/env python3\n\nimport datetime\n\nfrom discord.ext import commands\n\n\nclass Ping:\n \"\"\"Ping command.\"\"\"\n\n @commands.command()\n @commands.cooldown(6, 12)\n async def ping(self, ctx):\n \"\"\"Ping the bot.\"\"\"\n pingtime = int(round((datetime.datetime.utcnow() -\n ctx.message.created_at).total_seconds() * 1000, 0))\n message = f\":ping_pong: {pingtime} ms!\"\n await ctx.send(message)\n\n\ndef setup(bot):\n \"\"\"Set up the extension.\"\"\"\n bot.add_cog(Ping())\n","sub_path":"cogs/core/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"231016840","text":"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport os\n\nimport click\nfrom jina import Flow\n\nfrom dataset import input_index_data\n\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef config(model_name):\n os.environ['JINA_PARALLEL'] = os.environ.get('JINA_PARALLEL', '1')\n os.environ['JINA_SHARDS'] = os.environ.get('JINA_SHARDS', '1')\n os.environ['JINA_PORT'] = '45678'\n os.environ['JINA_USE_REST_API'] = 'true'\n if model_name == 'clip':\n os.environ['JINA_IMAGE_ENCODER'] = os.environ.get('JINA_IMAGE_ENCODER', 'docker://jinahub/pod.encoder.clipimageencoder:0.0.1-1.0.7')\n os.environ['JINA_TEXT_ENCODER'] = os.environ.get('JINA_TEXT_ENCODER', 'docker://jinahub/pod.encoder.cliptextencoder:0.0.1-1.0.7')\n os.environ['JINA_TEXT_ENCODER_INTERNAL'] = 'yaml/clip/text-encoder.yml'\n elif model_name == 'vse':\n os.environ['JINA_IMAGE_ENCODER'] = os.environ.get('JINA_IMAGE_ENCODER', 'docker://jinahub/pod.encoder.vseimageencoder:0.0.5-1.0.7')\n os.environ['JINA_TEXT_ENCODER'] = os.environ.get('JINA_TEXT_ENCODER', 'docker://jinahub/pod.encoder.vsetextencoder:0.0.6-1.0.7')\n os.environ['JINA_TEXT_ENCODER_INTERNAL'] = 'yaml/vse/text-encoder.yml'\n\n\n@click.command()\n@click.option('--task', '-t', type=click.Choice(['index', 'query'], case_sensitive=False), default='query')\n@click.option('--num_docs', '-n', default=50)\n@click.option('--request_size', '-s', default=16)\n@click.option('--data_set', '-d', type=click.Choice(['f30k', 'f8k'], case_sensitive=False), default='f8k')\n@click.option('--model_name', '-m', type=click.Choice(['clip', 'vse'], case_sensitive=False), default='clip')\ndef main(task, num_docs, request_size, data_set, model_name):\n config(model_name)\n if task == 'index':\n with Flow.load_config('flow-index.yml') as f:\n f.index(\n input_fn=input_index_data(num_docs, request_size, data_set),\n request_size=request_size\n )\n elif task == 'query':\n with Flow.load_config('flow-query.yml') as f:\n f.use_rest_gateway()\n f.block()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cross-modal-search/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"69643059","text":"from utils.dataset import SeismicDataset, ToTensor\r\nfrom utils.models import AccelerationPredictor\r\n\r\nfrom torchvision import transforms\r\nimport os\r\nfrom tqdm import tqdm\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom tensorboardX import SummaryWriter\r\nfrom torch import nn\r\n\r\nuse_gpu = torch.cuda.is_available()\r\ndevice = torch.device(\"cuda\" if use_gpu else \"cpu\")\r\n\r\n\r\nclass AverageMeter(object):\r\n def __init__(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n\r\ndef train(model, loader, optimizer, criterion, seismo_mean, seismo_std, velocity_mean, velocity_std,\r\n writer=None, global_step=None, name=None, normalize=True,):\r\n model.train()\r\n train_losses = AverageMeter()\r\n\r\n for idx, batch in enumerate((loader)):\r\n x = torch.FloatTensor(batch['seismogram']).to(device)\r\n y = torch.FloatTensor(batch['velocity']).to(device)\r\n\r\n if normalize:\r\n x = (x - seismo_mean) / seismo_std\r\n else:\r\n x = torch.log(torch.abs(x)) # torch.log(x)\r\n x[1 - torch.isfinite(x)] = 0.0\r\n y = (y - velocity_mean) / velocity_std\r\n\r\n y_pred = model(x)\r\n\r\n loss = criterion(y, y_pred)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n train_losses.update(loss.item(), x.size(0))\r\n\r\n if writer is not None:\r\n writer.add_scalar(f\"{name}/train_loss.avg\", train_losses.avg, global_step=global_step + idx)\r\n\r\n return train_losses.avg\r\n\r\n\r\ndef validate(model, loader, criterion, seismo_mean, seismo_std, velocity_mean, velocity_std,\r\n writer=None, global_step=None, name=None, normalize=True):\r\n model.eval()\r\n validate_losses = AverageMeter()\r\n\r\n for idx, batch in enumerate((loader)):\r\n x = torch.FloatTensor(batch['seismogram']).to(device)\r\n y = torch.FloatTensor(batch['velocity']).to(device)\r\n\r\n if normalize:\r\n x = (x - seismo_mean) / seismo_std\r\n else:\r\n x = torch.log(torch.abs(x)) # torch.log(x)\r\n x[1 - torch.isfinite(x)] = 0.0\r\n y = (y - velocity_mean) / velocity_std\r\n\r\n y_pred = model(x)\r\n\r\n loss = criterion(y, y_pred)\r\n validate_losses.update(loss.item(), x.size(0))\r\n\r\n if writer is not None:\r\n writer.add_scalar(f\"{name}/val_loss.avg\", validate_losses.avg, global_step=global_step + idx)\r\n return validate_losses.avg\r\n\r\n\r\ndef calculate_mean_and_std(train_dataset):\r\n seismogram_stack = train_dataset[0]['seismogram'][None]\r\n velocity_stack = torch.FloatTensor(train_dataset[0]['velocity'][None])\r\n\r\n for i in range(1, len(train_dataset)):\r\n _el = train_dataset[i]\r\n seismogram_stack = torch.cat([seismogram_stack, (_el['seismogram'][None])], dim=0)\r\n velocity_stack = torch.cat([velocity_stack, torch.FloatTensor(_el['velocity'][None])], dim=0)\r\n\r\n seismo_mean = torch.mean(seismogram_stack).to(device)\r\n seismo_std = torch.std(seismogram_stack).to(device)\r\n\r\n velocity_mean = torch.mean(velocity_stack).to(device)\r\n velocity_std = torch.std(velocity_stack).to(device)\r\n\r\n return seismo_mean, seismo_std, velocity_mean, velocity_std\r\n\r\n\r\nif __name__ == '__main__':\r\n batch_size = 32\r\n experiment_dir_name = 'training_logs/1'\r\n save_model_each = 10\r\n normalize = True\r\n num_epoch = 100\r\n\r\n train_dataset = SeismicDataset(seismo_dir='data/train/raw/',\r\n velocity_dir='data/train/outputs/',\r\n transform=transforms.Compose([ToTensor()]))\r\n val_dataset = SeismicDataset(seismo_dir='data/val/raw/',\r\n velocity_dir='data/val/outputs/',\r\n transform=transforms.Compose([ToTensor()]))\r\n\r\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=0, shuffle=True)\r\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=0, shuffle=False)\r\n print(\"Dataset is loaded.\")\r\n\r\n seismo_mean, seismo_std, velocity_mean, velocity_std = calculate_mean_and_std(train_dataset)\r\n print(\"Mean and std are calculated.\")\r\n\r\n model = AccelerationPredictor().to(device)\r\n\r\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\r\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.999)\r\n\r\n criterion = nn.MSELoss().to(device)\r\n\r\n writer = SummaryWriter(log_dir=os.path.join(experiment_dir_name, 'logs'))\r\n os.makedirs(experiment_dir_name, exist_ok=True)\r\n\r\n print(\"Training is started:\")\r\n for epoch in tqdm(range(num_epoch)):\r\n\r\n train_loss = train(model=model, loader=train_loader, optimizer=optimizer, criterion=criterion,\r\n writer=writer, global_step=len(train_loader.dataset) * epoch,\r\n name=f\"{experiment_dir_name}_by_batch\", normalize=normalize,\r\n seismo_mean=seismo_mean, seismo_std=seismo_std,\r\n velocity_mean=velocity_mean, velocity_std=velocity_std)\r\n\r\n val_loss = validate(model=model, loader=val_loader, criterion=criterion,\r\n writer=writer, global_step=len(train_loader.dataset) * epoch,\r\n name=f\"{experiment_dir_name}_by_batch\", normalize=normalize,\r\n seismo_mean=seismo_mean, seismo_std=seismo_std,\r\n velocity_mean=velocity_mean, velocity_std=velocity_std)\r\n\r\n model_name = f\"emd_loss_epoch_{epoch}_train_{train_loss}_{val_loss}.pth\"\r\n\r\n if epoch % save_model_each == 0:\r\n torch.save(model.state_dict(), os.path.join(experiment_dir_name, model_name))\r\n\r\n writer.add_scalar(f\"{experiment_dir_name}_by_epoch/train_loss\", train_loss, global_step=epoch)\r\n writer.add_scalar(f\"{experiment_dir_name}_by_epoch/val_loss\", val_loss, global_step=epoch)\r\n\r\n lr_scheduler.step()\r\n\r\n print(\"Epoch: {}, Train: {}, Val: {}\".format(epoch, train_loss, val_loss))\r\n\r\n writer.export_scalars_to_json(os.path.join(experiment_dir_name, 'all_scalars.json'))\r\n writer.close()\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"146439905","text":"import random\nplayer=''\nname=input(\"enter the name:\")\nwhile name.isdigit()==True and len(name)<4:\n name=input(\"please consider your name:\")\nprint(\"User name:\",name)\nage=input(\"enter the age:\")\nwhile age.isalpha()==True and len(age)>18:\n age=input(\"please consider your name:\")\nprint(\"User age:\",age)\npassword=input(\"enter the password:\")\nif password=='admin':\n password=(\"enter the password:\")\ndef menu():\n print(\"MENU,\\n 1-PLAY GAME\\n 2-HIGH SCORE\\n 3-QUIT\")\n choice=input(\"enter the choice:\")\n if choice==1:\n print(\"Let's play\")\n if choice==2:\n print(\"high score\")\n if choice==3:\n\n print(\"quit\")\n\n\nwhile player != 'q':\n\n sum=0\n print('enter the choice:')\n print('R or r for rock\\n P or p for paper\\n S for scissors\\n Q or q to quit')\n player=input('')\n player.lower()\n select = 'rps'\n if player == 'q':\n break\n if player not in select:\n print('invalid choice')\n continue\n player=select.find(player)\n comp=random.randrange(1,4)\n select = ['rock', 'paper', 'scissors']\n print('Computer picked:', [comp])\n if player == 'r':\n player=1\n elif player == 'p':\n player=2\n elif player == 's':\n player=3\n if comp==1:\n choice='rock'\n elif comp==2:\n choice='paper'\n elif comp==3:\n choice='scissors'\n if player==1 and comp==3:\n sum = sum+1\n print(\"You win against comp\\n\"+\"score is \",sum)\n elif player==3 and comp==1:\n\n print(\"You lose against comp\\n\")\n #elif player < comp:\n\n print(\"You lost against comp\\n\")\n #elif player > comp:\n sum = sum+1\n print(\"You win against comp\\n\")\n elif player==comp:\n print(\"You tie the against comp\\n\")\n else:\n print(\"Try again\")\n print(\"Your Score:\",sum)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"program of rock paper sccisors.py","file_name":"program of rock paper sccisors.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"550580210","text":"# -*- coding: utf-8 -*-\nimport re\nfrom flask import render_template, request, redirect, flash, session, jsonify\nfrom whoosh import qparser\n\nfrom web.app import app, auth\nfrom web.model import Label, Inspiration, LabelInspirationRelationShip\nfrom web.util import get_whoosh_ix, q\n\n@app.route('/')\ndef main():\n inspiration_list = Inspiration.select().order_by(Inspiration.id.desc()).limit(20)\n labels = Label.select().order_by(Label.count.desc())\n return render_template(\"main.html\",inspiration_list=inspiration_list, labels=labels)\n\n\n@app.route('/login', methods=[\"POST\"])\ndef boring_user_login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n user = auth.authenticate(username, password)\n if user:\n auth.login_user(user)\n session.pop('_flashes', None)\n flash('login successfully')\n return redirect(\"/write\")\n\n else:\n flash('Incorrect username or password')\n return redirect(\"/\")\n\n\n@app.route('/write', methods=[\"GET\", \"POST\"])\n@auth.login_required\ndef write_inspiration():\n if request.method == \"GET\":\n labels = Label.select()\n\n return render_template(\"write.html\", labels=labels)\n else:\n user = auth.get_logged_in_user()\n content = request.form.get(\"content\", \"\").strip()\n if len(content) < 5:\n flash(\"no enough words in content area\")\n return redirect(\"/write\")\n \n\n ## make labels\n label_name_set = set(filter(lambda s: len(s.strip()) > 0, request.values.getlist(\"labels\")))\n label_list = [Label.get_or_create(name=label_name)[0] for label_name in label_name_set]\n ## make inspiration\n inspiration = Inspiration.post(inpiration_kwg={\"author\":user.id, \"content\":content}, label_list=label_list)\n ## we can defer this by using message-queue\n q.enqueue(inspiration.make_keyword_index, label_list)\n ## make rs\n flash(\"make inspiration successfully\")\n return redirect(\"/\")\n\n@app.route('/inspiration//modify', methods=[\"GET\", \"POST\"])\n@auth.login_required\ndef modify_inspiration(inspiration_id):\n inspiration = Inspiration.select().where(Inspiration.id==inspiration_id).get()\n if request.method == \"GET\":\n labels = Label.select()\n return render_template(\"modify.html\", \n labels=labels,\n inspiration=inspiration)\n else:\n content = request.form.get(\"content\")\n label_name_set = set(filter(lambda s: len(s.strip()) > 0, request.values.getlist(\"labels\")))\n label_list = [Label.get_or_create(name=label_name)[0] for label_name in label_name_set]\n inspiration.modify(content=content, label_list=label_list)\n q.enqueue(inspiration.remake_keyword_index, label_list)\n flash(\"modify inspiration successfully\")\n return redirect(\"/\")\n\n\n@app.route('/api/inspiration//modify', methods=[\"POST\"])\n@auth.login_required\ndef api_modify_inspiration(inspiration_id):\n inspiration = Inspiration.select().where(Inspiration.id==inspiration_id).get()\n content = request.form.get(\"content\")\n label_name_set = set(filter(lambda s: len(s.strip()) > 0, request.values.getlist(\"labels\")))\n label_list = [Label.get_or_create(name=label_name)[0] for label_name in label_name_set]\n inspiration.modify(content=content, label_list=label_list)\n q.enqueue(inspiration.remake_keyword_index, label_list)\n return jsonify(rcode=200)\n\n@app.route('/api/nolabel-inspiration/')\ndef nolabel_inspiration():\n LIR = LabelInspirationRelationShip\n LIR_list = LIR.select(LIR.inspiration).distinct()\n\n inspiration_list = Inspiration.select().where(Inspiration.id.not_in(LIR_list))\n\n inspiration_json = [inspiration.to_json() for inspiration in inspiration_list]\n\n return jsonify({\"objects\": inspiration_json})\n\n\n\n\n\n\n\n\n","sub_path":"web/controllers/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"270913915","text":"from element_infos.login_page import LoginPage\nfrom common.base_page import BasePage\nfrom common.conf_utils import conf\nfrom common.set_driver import set_driver\nfrom common.element_yaml_utils import read_yaml\n\nclass MainPage(BasePage):\n def __init__(self,driver):\n super().__init__(driver)\n element = read_yaml('main_page')\n self.companyname_showbox = element['companyname_showbox']\n self.myzone_menu = element['myzone_menu']\n self.product_menu = element['product_menu']\n self.project_menu = element['project_menu']\n self.username_showspan = element['username_showspan']\n # print(self.companyname_showbox)\n\n def get_companyname(self,text): #获取公司名称\n value = self.get_element_attribute(self.companyname_showbox,text)\n print(value)\n\n def goto_myzone(self): #进入我的地盘\n self.click(self.myzone_menu)\n def goto_product(self): #进入产品菜单\n self.click(self.product_menu)\n def goto_project(self): #进入项目菜单\n self.click(self.project_menu)\n def get_usename(self): #获取用户名\n value = self.text(self.username_showspan)\n print(value)\n\n\nif __name__ ==\"__main__\":\n driver = set_driver(conf.get_chandao_path)\n login_page = LoginPage(driver)\n main_page = MainPage(driver)\n login_page.input_username('admin')\n login_page.input_password('Lrh19960912')\n login_page.click_login()\n main_page.get_companyname('title')\n main_page.goto_project()\n main_page.goto_product()\n main_page.get_usename()\n","sub_path":"element_infos/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"502848051","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 15:44:28 2019\n\n@author: utilisateur\n\"\"\"\n\n#Ecrire un programme qui vérifie si un trinagle est rectangle\n\n#Si ABC est un triangle rectangle en A, alors BC² =AB² + AC² .\n\nimport pandas as pd\n\nimport os\n\na = float(input(\"Entrer BC\")) #BC\nb = float(input(\"Entrer AB\")) #AB \nc = float(input(\"Entrer AC\")) #AC\n\n\nv = [a,b,c]\nh = max(v)\n\nif h == (a**2 == b**2 + c**2) :\n print(\"Votre triangle est rectangle en BC\")\nelif h == (b**2 == a**2 + c**2) :\n print(\"Votre triangle est rectangle en AB\")\nelif h == (c**2 == a**2 + b**2) :\n print(\"Votre triangle est rectangle en AC\")\nelse : \n print(\"Votre triangle n'est pas rectangle\")\n\nos.system (\"pause\")\n \n \n\n","sub_path":"unclassed/Hypoténuse.py","file_name":"Hypoténuse.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"215254637","text":"import unittest\nfrom ...src.entities.deck import Deck\nfrom ...src.entities.card import Card\nfrom ...src.entities.cardvalue import CardValue\nfrom ...src.entities.cardsuit import CardSuit\nfrom ...src.services.deckshuffler import DeckShuffler\n\nclass DeckShufflerTest(unittest.TestCase):\n\n def setUp(self):\n cardList = [Card(CardValue.JOKER, CardSuit.NONE),\n Card(CardValue.ACE, CardSuit.SPADES),\n Card(CardValue.JACK, CardSuit.DIAMONDS),\n Card(CardValue.SEVEN, CardSuit.CLUBS),\n Card(CardValue.TWO, CardSuit.HEARTS),]\n self.testDeck = Deck(cardList)\n self.unshuffledTestDeck = self.testDeck[:]\n\n def testShuffle(self):\n DeckShuffler().shuffle(self.testDeck)\n self.assertCountEqual(self.testDeck, self.unshuffledTestDeck)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"jass/tests/services/testdeckshuffler.py","file_name":"testdeckshuffler.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"615926436","text":"#!F:\\python\n# Filename: try_except.py\n# python version: 3.4\n\ntry:\n\ttext = input('Enter something --> ')\nexcept EOFEroor:\n\tprint('Why did you do an EOF on me?')\nexcept KeyboardInterrupt:\n\tprint('You cancelled the operation.')\nelse:\n\tprint('You entered {0}'.format(text))\n","sub_path":"Python/aByteofPython/try_except.py","file_name":"try_except.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"43901215","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfig = plt.figure()\ndata = [np.random.normal(0, std, 1000) for std in range(1, 6)]\n\nbox = plt.boxplot(data, notch=True, patch_artist=True)\n\ncolors = ['cyan', 'lightblue', 'lightgreen', 'tan', 'pink']\nfor patch, color in zip(box['boxes'], colors):\n patch.set_facecolor(color)\n\nplt.show()","sub_path":"Literature/Schmidli_Jeremias_Master_Thesis/Old Files/Python/examples/boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"390433797","text":"\"\"\"\nindex of N/A in one_hot feature 0,41,73,112,129,146\n39 in case of only seoul & not integrate china\n\"\"\"\n\nimport utils\nimport tensorflow as tf\nimport numpy as np\nimport math\nimport os\nimport sys\nimport time\nimport re\nfrom datetime import datetime, timedelta\nimport argparse\nimport properties as p\nimport heatmap\nimport craw_seoul_aqi as aqi\nimport craw_aws as aws\n\nimport process_sp_vector as psv\nfrom baseline_cnnlstm import BaselineModel\nfrom NeuralNet import NeuralNetwork\nfrom adain import Adain\nfrom stack_autoencoder import StackAutoEncoder\nfrom mask_gan import MaskGan\nfrom apgan import APGan\nfrom mask_gan_2 import MaskGan2\nfrom capgan import CAPGan\nfrom tgan import TGAN\nimport matplotlib\nimport matplotlib.pyplot as plt\n# from spark_engine import SparkEngine\nimport district_neighbors as dd\n\n\ndef convert_element_to_grid(self, context):\n res = []\n for b in context:\n res_t = []\n for t in b:\n p = heatmap.fill_map(t, self.map, False) \n res_t.append(p)\n res.append(res_t)\n return np.asarray(res, dtype=np.float)\n\n\ndef execute(path, attention_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer=None, offset=0):\n print(\"==> Loading dataset\")\n dataset = utils.load_file(path)\n global_t = offset\n if dataset:\n dataset = np.asarray(dataset, dtype=np.float32)\n lt = len(dataset)\n train, valid = utils.process_data_grid(lt, batch_size, encoder_length, decoder_length, is_test)\n if attention_url:\n attention_data = utils.load_file(attention_url)\n else:\n attention_data = None\n model.set_data(dataset, train, valid, attention_data)\n model.assign_datasets(session)\n if not is_test:\n best_val_epoch = 0\n best_val_loss = float('inf')\n # best_overall_val_loss = float('inf')\n print('==> starting training')\n train_losses = []\n train_f, valid_f = train_writer\n for epoch in xrange(p.total_iteration):\n print('Epoch {}'.format(epoch))\n start = time.time()\n global_t = offset + epoch\n train_loss, _ = model.run_epoch(session, train, global_t, train_f,train_op=model.train_op, train=True)\n train_losses.append(train_loss)\n print('Training loss: {}'.format(train_loss))\n\n valid_loss, _ = model.run_epoch(session, valid, global_t, train_writer=valid_f)\n print('Validation loss: {}'.format(valid_loss))\n\n if valid_loss < best_val_loss:\n best_val_loss = valid_loss\n best_val_epoch = epoch\n # if best_val_loss < best_overall_val_loss:\n print('Saving weights')\n # best_overall_val_loss = best_val_loss\n saver.save(session, 'weights/%s.weights' % url_weight)\n\n if (epoch - best_val_epoch) > p.early_stopping:\n break\n print('Total time: {}'.format(time.time() - start))\n tm = utils.clear_datetime(datetime.strftime(utils.get_datetime_now(), \"%Y-%m-%d %H:%M:%S\"))\n l_fl = \"train_loss/train_loss_%s_%s\" % (url_weight, tm)\n utils.save_file(l_fl, train_losses)\n else:\n # saver.restore(session, url_weight)\n print('==> running model')\n loss, preds = model.run_epoch(session, model.train, shuffle=False)\n l_str = 'Test mae loss: %.4f' % loss\n print(l_str)\n pt = re.compile(\"weights/([A-Za-z0-9_.]*).weights\")\n name = pt.match(url_weight)\n if name:\n name_s = name.group(1)\n else:\n name_s = url_weight\n utils.save_file(\"test_sp/%s_loss.txt\" % name_s, l_str, use_pickle=False)\n utils.save_file(\"test_sp/%s\" % name_s, preds)\n return global_t\n\n\ndef get_gpu_options():\n gpu_options = None\n device_count = None\n if \"gpu\" in p.device:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=p.gpu_fraction)\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=p.gpu_devices\n else:\n device_count={\"GPU\":0}\n configs = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options, device_count=device_count)\n return configs\n\n\ndef main(url_feature=\"\", attention_url=\"\", url_weight=\"sp\", batch_size=128, encoder_length=24, embed_size=None, loss=None, decoder_length=24, decoder_size=4, grid_size=25, rnn_layers=1,\n dtype=\"grid\", is_folder=False, is_test=False, use_cnn=True, restore=False):\n model = BaselineModel(encoder_length=encoder_length, encode_vector_size=embed_size, batch_size=batch_size, decode_vector_size=decoder_size, rnn_layers=rnn_layers,\n dtype=dtype, grid_size=grid_size, use_cnn=use_cnn, loss=loss)\n print('==> initializing models')\n with tf.device('/%s' % p.device):\n model.init_ops()\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n utils.assert_url(url_feature)\n tconfig = get_gpu_options()\n sum_dir = 'summaries'\n if not utils.check_file(sum_dir):\n os.makedirs(sum_dir)\n\n train_writer = None\n valid_writer = None\n with tf.Session(config=tconfig) as session:\n if not restore:\n session.run(init)\n else:\n print(\"==> Reload pre-trained weights\")\n saver.restore(session, url_weight)\n url_weight = url_weight.split(\"/\")[-1]\n url_weight = url_weight.rstrip(\".weights\")\n \n if not is_test:\n suf = time.strftime(\"%Y.%m.%d_%H.%M\")\n train_writer = tf.summary.FileWriter(sum_dir + \"/\" + url_weight + \"_train\", session.graph, filename_suffix=suf)\n valid_writer = tf.summary.FileWriter(sum_dir + \"/\" + url_weight + \"_valid\", session.graph, filename_suffix=suf)\n\n folders = None\n \n if is_folder:\n folders = os.listdir(url_feature)\n if attention_url:\n a_folders = os.listdir(attention_url)\n folders = zip(folders, a_folders)\n last_epoch = 0\n for i, files in enumerate(folders):\n if attention_url:\n x, y = files\n att_url = os.path.join(attention_url, y)\n print(\"==> Training set (%i, %s, %s)\" % (i + 1, x, y))\n else: \n x = files\n print(\"==> Training set (%i, %s)\" % (i + 1, x))\n last_epoch = execute(os.path.join(url_feature, x), att_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, (train_writer, valid_writer), last_epoch)\n else:\n _ = execute(url_feature, attention_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, (train_writer, valid_writer))\n\n\ndef save_gan_preds(url_weight, preds):\n shape = np.shape(preds)\n pt = re.compile(\"weights/([A-Za-z0-9_.]*).weights\")\n name = pt.match(url_weight)\n if name:\n name_s = name.group(1)\n else: \n name_s = url_weight\n pr_s = shape[0] * p.batch_size\n if shape[-1] == 1 and len(shape) == 4:\n preds = np.reshape(preds, (pr_s, shape[-3], shape[-2]))\n else:\n preds = np.reshape(preds, (pr_s, shape[-2], shape[-1]))\n utils.save_file(\"test_sp/%s\" % name_s, preds)\n\n\ndef execute_gan(path, attention_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer=None, offset=0, gpu_nums=1):\n if gpu_nums > 1:\n if not is_test:\n _ = model.run_multiple_gpu(session, path, attention_url, url_weight, train_writer, offset, train=True, gpu_nums=gpu_nums)\n else:\n preds = model.run_multiple_gpu(session, path, attention_url, url_weight, train=False, shuffle=False, gpu_nums=gpu_nums)\n save_gan_preds(preds, url_weight)\n else:\n print(\"==> Loading dataset\")\n dataset = utils.load_file(path)\n if dataset:\n dataset = np.asarray(dataset, dtype=np.float32)\n lt = len(dataset)\n train, _ = utils.process_data_grid(lt, batch_size, encoder_length, decoder_length, True)\n attention_data = None\n if attention_url:\n attention_data = utils.load_file(attention_url)\n model.set_data(dataset, train, None, attention_data)\n model.assign_datasets(session)\n if not is_test:\n print('==> starting training')\n train_f = train_writer\n suffix = p.weight_saving_break\n for epoch in xrange(p.total_iteration):\n _ = model.run_epoch(session, train, offset + epoch, train_f, train=True, verbose=False)\n tmp_e = epoch + 1\n if tmp_e % 100 == 0:\n suffix = math.ceil(float(tmp_e) / p.weight_saving_break)\n # utils.update_progress((epoch + 1) * 1.0 / p.total_iteration)\n saver.save(session, 'weights/%s_%i.weights' % (url_weight, suffix))\n saver.save(session, 'weights/%s_%i.weights' % (url_weight, suffix))\n else:\n # saver.restore(session, url_weight)\n print('==> running model')\n preds = model.run_epoch(session, train, train=False, verbose=False, shuffle=False)\n save_gan_preds(url_weight, preds)\n\n\ndef train_gan(url_feature=\"\", attention_url=\"\", url_weight=\"sp\", batch_size=128, encoder_length=24, embed_size=None, \n decoder_length=24, decoder_size=4, grid_size=25, is_folder=False, is_test=False, restore=False, model_name=\"APGAN\"):\n if model_name == \"APGAN\":\n model = APGan(encoder_length=encoder_length, encode_vector_size=embed_size, batch_size=batch_size, decode_vector_size=decoder_size, grid_size=grid_size)\n elif model_name == \"MASKGAN\":\n model = MaskGan(encoder_length=encoder_length, encode_vector_size=embed_size, batch_size=batch_size, decode_vector_size=decoder_size, grid_size=grid_size, use_cnn=1)\n elif model_name == \"MASKGAN2\":\n model = MaskGan2(encoder_length=encoder_length, encode_vector_size=embed_size, batch_size=batch_size, decode_vector_size=decoder_size, grid_size=grid_size)\n elif model_name == \"CAPGAN\":\n model = CAPGan(encoder_length=encoder_length, encode_vector_size=embed_size, batch_size=batch_size, decode_vector_size=decoder_size, grid_size=grid_size)\n else:\n model = TGAN(encoder_length=8, decoder_length=8, grid_size=32)\n #dv = p.gpu_devices.split(\",\")\n dv=[1]\n tconfig = get_gpu_options()\n utils.assert_url(url_feature)\n sum_dir = 'summaries'\n saver = None\n if not utils.check_file(sum_dir):\n os.makedirs(sum_dir)\n if \"gpu\" in p.device and len(dv) > 1:\n model.add_placeholders()\n with tf.Session(config=tconfig) as session: \n csn = int(time.time())\n if not is_test:\n url_weight = url_weight.split(\"/\")[-1]\n url_weight = url_weight.rstrip(\".weights\")\n suf = time.strftime(\"%Y.%m.%d_%H.%M\")\n train_writer = tf.summary.FileWriter(\"%s/%s_%i\" % (sum_dir, url_weight, csn), session.graph, filename_suffix=suf)\n folders = None\n if is_folder:\n folders = os.listdir(url_feature)\n if attention_url:\n a_folders = os.listdir(attention_url)\n folders = zip(folders, a_folders)\n for i, files in enumerate(folders):\n if attention_url:\n x, y = files\n att_url = os.path.join(attention_url, y)\n print(\"==> Training set (%i, %s, %s)\" % (i + 1, x, y))\n else: \n x = files\n print(\"==> Training set (%i, %s)\" % (i + 1, x))\n execute_gan(os.path.join(url_feature, x), att_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer, i * p.total_iteration, gpu_nums=len(dv))\n else:\n execute_gan(url_feature, attention_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer, gpu_nums=len(dv))\n model.run_multiple_gpu(session, data)\n else:\n print('==> initializing models')\n with tf.device('/%s' % p.device):\n model.init_ops(not is_test)\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n \n train_writer = None\n \n with tf.Session(config=tconfig) as session: \n if not restore:\n session.run(init)\n else:\n print(\"==> Reload pre-trained weights\")\n saver.restore(session, url_weight)\n csn = int(time.time())\n if not is_test:\n url_weight = url_weight.split(\"/\")[-1]\n url_weight = url_weight.rstrip(\".weights\")\n suf = time.strftime(\"%Y.%m.%d_%H.%M\")\n train_writer = tf.summary.FileWriter(\"%s/%s_%i\" % (sum_dir, url_weight, csn), session.graph, filename_suffix=suf)\n folders = None\n if is_folder:\n folders = os.listdir(url_feature)\n if attention_url:\n a_folders = os.listdir(attention_url)\n folders = zip(folders, a_folders)\n for i, files in enumerate(folders):\n if attention_url:\n x, y = files\n att_url = os.path.join(attention_url, y)\n print(\"==> Training set (%i, %s, %s)\" % (i + 1, x, y))\n else: \n x = files\n print(\"==> Training set (%i, %s)\" % (i + 1, x))\n execute_gan(os.path.join(url_feature, x), att_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer, i * p.total_iteration)\n else:\n execute_gan(url_feature, attention_url, url_weight, model, session, saver, batch_size, encoder_length, decoder_length, is_test, train_writer)\n\n\n# def get_districts_preds(preds):\n# res = []\n# for d_t in preds:\n# r_t = []\n# for x, y in p.dis_points:\n# # x = col, y = row\n# r_t.append(d_t[y][x] * 500)\n# res.append(r_t)\n# return res\n\n\n\"\"\"\naverage data of area => 25 points\noutput should be 24 x 25\n\"\"\"\ndef aggregate_predictions(preds):\n outputs = []\n # loop over timesteps\n for t in preds:\n # 25 x 25\n out_ = []\n for d in p.dis_points:\n val = 0.0\n for x, y in d:\n val += t[y][x]\n if val != 0.0:\n val = val / len(d) * 300\n out_.append(val)\n outputs.append(out_)\n return outputs\n\n\n\"\"\"\nactivate spark engine & real time prediction service\n\"\"\"\ndef get_prediction_real_time(sparkEngine, url_weight=\"\", dim=15):\n # continuously crawl aws and aqi & weather\n encoder_length = 24\n decoder_length = 24\n end = utils.get_datetime_now()\n # end = datetime.strptime(\"2018-06-19 11:01:00\", p.fm)\n # e_ = end.strftime(p.fm)\n start = end - timedelta(days=1)\n start = start.replace(minute=0, second=0, microsecond=0)\n # s_ = start.strftime(p.fm)\n # 2. process normalize data\n vectors, w_pred, china_vectors, timestamp = sparkEngine.process_vectors(start, end, dim)\n v_l = len(vectors)\n if v_l:\n sp_vectors = psv.convert_data_to_grid_exe(vectors)\n if v_l < encoder_length:\n sp_vectors = np.pad(sp_vectors, ((encoder_length - v_l,0), (0,0), (0,0), (0, 0)), 'constant', constant_values=0)\n # repeat for 25 districts\n if w_pred:\n w_pred = np.repeat(np.expand_dims(w_pred, 1), p.grid_size, 1)\n de_vectors = psv.convert_data_to_grid_exe(w_pred)\n # pad to fill top elements of decoder vectors\n de_vectors = np.pad(de_vectors, ((0, 0), (0, 0), (0, 0), (6, 0)), 'constant', constant_values=0)\n else:\n # know nothing about future weather forecast\n de_vectors = np.zeros((decoder_length, p.grid_size, p.grid_size, dim))\n sp_vectors = np.concatenate((sp_vectors, de_vectors), axis=0)\n # 4. Feed to model\n # model = BaselineModel(encoder_length=encoder_length, encode_vector_size=12, batch_size=1, decoder_length=decoder_length, rnn_layers=1,\n # dtype='grid', grid_size=25, use_cnn=True)\n # model.set_data(sp_vectors, [0], None)\n model = MaskGan(encoder_length=encoder_length, encode_vector_size=15, batch_size=1, decode_vector_size=9, grid_size=25, use_cnn=True)\n model.set_data(sp_vectors, [0], None, china_vectors)\n with tf.device('/%s' % p.device):\n model.init_ops()\n saver = tf.train.Saver()\n \n tconfig = get_gpu_options()\n with tf.Session(config=tconfig) as session:\n model.assign_datasets(session) \n print('==> restore model')\n saver.restore(session, 'weights/%s' % p.prediction_weight)\n print('==> running model')\n preds = model.run_epoch(session, model.train, train=False, verbose=False, shuffle=False)\n preds = np.reshape(preds, (decoder_length, p.grid_size, p.grid_size))\n utils.save_file(\"test_acc/current_preds\", preds) \n aggregate_predictions(preds)\n return preds, timestamp\n return [], []\n \n\n# call neural networks, stack autoencoder, or adain \ndef run_neural_nets(url_feature=\"\", attention_url=\"\", url_weight=\"sp\", encoder_length=24, encoder_size=15, decoder_length=8, decoder_size=9, is_test=False, restore=False, model=\"NN\", pre_train=False):\n if model == \"NN\":\n model = NeuralNetwork(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, decoder_vector_size=decoder_size)\n elif model == \"SAE\":\n model = StackAutoEncoder(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, pre_train=pre_train)\n else:\n model = Adain(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length)\n print('==> initializing models')\n with tf.device('/%s' % p.device):\n model.init_model()\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n utils.assert_url(url_feature)\n\n tconfig = get_gpu_options()\n sum_dir = 'summaries'\n if not utils.check_file(sum_dir):\n os.makedirs(sum_dir)\n\n train_writer = None\n with tf.Session(config=tconfig) as session:\n if not restore:\n session.run(init)\n else:\n print(\"==> Reload pre-trained weights\")\n saver.restore(session, url_weight)\n url_weight = url_weight.split(\"/\")[-1]\n url_weight = url_weight.rstrip(\".weights\")\n \n if not is_test:\n suf = time.strftime(\"%Y.%m.%d_%H.%M\")\n train_writer = tf.summary.FileWriter(sum_dir + \"/\" + url_weight + \"_train\", session.graph, filename_suffix=suf)\n valid_writer = tf.summary.FileWriter(sum_dir + \"/\" + url_weight + \"_valid\", session.graph, filename_suffix=suf)\n\n print(\"==> Loading dataset\")\n dataset = utils.load_file(url_feature)\n if dataset:\n dataset = np.asarray(dataset, dtype=np.float32)\n lt = len(dataset)\n st = int(lt/2)\n lt = lt - st\n dataset = dataset[st:,:,:]\n train, valid = utils.process_data_grid(lt, p.batch_size, encoder_length, decoder_length, is_test)\n if attention_url:\n attention_data = utils.load_file(attention_url)\n else:\n attention_data = None\n model.set_data(dataset, train, valid, attention_data, session)\n if not is_test:\n best_val_epoch = 0\n best_val_loss = float('inf')\n # best_overall_val_loss = float('inf')\n print('==> starting training')\n for epoch in xrange(p.total_iteration):\n print('Epoch {}'.format(epoch))\n start = time.time()\n train_loss, _ = model.run_epoch(session, train, epoch, train_writer, train_op=model.train_op, train=True)\n print('Training loss: {}'.format(train_loss))\n\n valid_loss, _ = model.run_epoch(session, valid, epoch, valid_writer)\n print('Validation loss: {}'.format(valid_loss))\n\n if valid_loss < best_val_loss:\n best_val_loss = valid_loss\n best_val_epoch = epoch\n print('Saving weights')\n saver.save(session, 'weights/%s.weights' % url_weight)\n\n if (epoch - best_val_epoch) > p.early_stopping:\n break\n print('Total time: {}'.format(time.time() - start))\n else:\n # saver.restore(session, url_weight)\n print('==> running model')\n _, preds = model.run_epoch(session, model.train, shuffle=False)\n pt = re.compile(\"weights/([A-Za-z0-9_.]*).weights\")\n name = pt.match(url_weight)\n if name:\n name_s = name.group(1)\n else:\n name_s = url_weight\n utils.save_file(\"test_sp/%s\" % name_s, preds)\n\n\nif __name__ == \"__main__\":\n # 10110 -> 10080 -> 126 batch \n # python train.py -pr \"vectors/labels\" -f \"vectors/full_data\" -fl \"vectors/full_data_len\" -p \"train_basic_64b_tanh_12h_\" -fw \"basic\" -dc 1 -l mae -r 10 -usp 1 -e 13 -bs 126 -sl 24 -ir 0 \n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--feature\", help=\"a path to datasets (either to a file or a folder)\")\n parser.add_argument(\"-f\", \"--folder\", default=0, type=int, help=\"either train a folder or just train a file\")\n parser.add_argument(\"-w\", \"--url_weight\", type=str, default=\"\")\n parser.add_argument(\"-au\", \"--attention_url\", type=str, default=\"\")\n parser.add_argument(\"-bs\", \"--batch_size\", type=int, default=64)\n parser.add_argument(\"-l\", \"--loss\", default='mse')\n parser.add_argument(\"-e\", \"--embed_size\", type=int, default=15)\n parser.add_argument(\"-el\", \"--encoder_length\", type=int, default=24)\n parser.add_argument(\"-dl\", \"--decoder_length\", type=int, default=24)\n parser.add_argument(\"-ds\", \"--decoder_size\", type=int, default=9)\n parser.add_argument(\"-g\", \"--grid_size\", type=int, default=25, help=\"size of grid\")\n parser.add_argument(\"-dt\", \"--dtype\", default='grid', help=\"dtype is either 'grid' or 'dis' that mean use grid data of just station data\")\n parser.add_argument(\"-t\", \"--is_test\", default=0, help=\"is testing\", type=int)\n parser.add_argument(\"-cnn\", \"--use_cnn\", default=1, help=\"using cnn or not in mining input's vectors\", type=int)\n parser.add_argument(\"-r\", \"--rnn_layers\", default=1, help=\"number of rnn layers\", type=int)\n parser.add_argument(\"-m\", \"--model\", default=\"GAN\")\n parser.add_argument(\"-rs\", \"--restore\", default=0, help=\"Restore pre-trained model\", type=int)\n parser.add_argument(\"-p\", \"--pretrain\", default=0, help=\"Pretrain model: only use of SAE networks\", type=int)\n \n args = parser.parse_args()\n \"\"\" \n sparkEngine = SparkEngine()\n preds, timestamp = get_prediction_real_time(sparkEngine)\n \n \"\"\"\n \n if \"GAN\" in args.model:\n train_gan(args.feature, args.attention_url, args.url_weight, args.batch_size, args.encoder_length, args.embed_size, args.decoder_length, args.decoder_size, \n args.grid_size, is_folder=bool(args.folder), is_test=bool(args.is_test), restore=bool(args.restore), model_name=args.model)\n elif args.model == \"CNN_LSTM\":\n main(args.feature, args.attention_url, args.url_weight, args.batch_size, args.encoder_length, args.embed_size, args.loss, args.decoder_length, args.decoder_size, \n args.grid_size, args.rnn_layers, dtype=args.dtype, is_folder=bool(args.folder), is_test=bool(args.is_test), use_cnn=bool(args.use_cnn), restore=bool(args.restore))\n elif args.model == \"ADAIN\":\n run_neural_nets(args.feature, args.attention_url, args.url_weight, args.encoder_length, args.embed_size, args.decoder_length, args.decoder_size, bool(args.is_test), bool(args.restore), args.model)\n elif args.model == \"SAE\":\n run_neural_nets(args.feature, args.attention_url, args.url_weight, args.encoder_length, args.embed_size, args.decoder_length, args.decoder_size, bool(args.is_test), bool(args.restore), args.model, bool(args.pretrain))\n elif args.model == \"NN\":\n run_neural_nets(args.feature, args.attention_url, args.url_weight, args.encoder_length, args.embed_size, args.decoder_length, args.decoder_size, bool(args.is_test), bool(args.restore))\n elif args.model == \"TGAN\":\n train_gan(args.feature, \"\", args.url_weight, args.batch_size, args.encoder_length, 1, args.decoder_length, 1, 32, False, is_test=bool(args.is_test), restore=bool(args.restore), model_name=args.model)","sub_path":"train_sp.py","file_name":"train_sp.py","file_ext":"py","file_size_in_byte":25426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"286051972","text":"import random\n\nfrom game import Person, bcolors\nfrom inventory import Item\nfrom magic import Spell\n\nprint(\"\\n\\n\")\n# Create Black Magic\nfire = Spell(\"Fire\", 25, 600, \"black\")\nlight = Spell(\"Light\", 25, 600, \"black\")\nwater = Spell(\"Water\", 25, 600, \"black\")\nshadow = Spell(\"Shadow\", 25, 600, \"black\")\n\n# Create White Magic\ncure = Spell(\"Cure\", 25, 620, \"white\")\ncure2 = Spell(\"Cure II\", 32, 1500, \"white\")\ncure3 = Spell(\"Cure III\", 50, 6000, \"white\")\n\n\n# Create some Items\npotion = Item(\"Potion\", \"potion\", \"Heals 50 HP\", 50)\nhipotion = Item(\"Hi-Potion\", \"potion\", \"Heals 100 HP\", 100)\nsuperpotion = Item(\"Super Potion\", \"potion\", \"Heals 1000 HP\", 1000)\nelixir = Item(\"Elixir\", \"elixir\",\n \"Fully restores HP/MP of one party members\", 9999)\nhielixir = Item(\"Megaelixir\", \"elixir\", \"Fully restores party's HP/MP\", 9999)\n\ngrenade = Item(\"Grenade\", \"attack\", \"Deals 500 damage\", 500)\n\nplayer_spells = [fire, light, water, cure, cure2]\nenemy_spells = [shadow, light, fire, cure3]\nplayer_items = [{\"item\": potion, \"quantity\": 15}, {\"item\": hipotion, \"quantity\": 5},\n {\"item\": superpotion, \"quantity\": 5}, {\n \"item\": elixir, \"quantity\": 5},\n {\"item\": hielixir, \"quantity\": 2}, {\"item\": grenade, \"quantity\": 5}]\n\n# Instantiate People\nplayer1 = Person(\"Crono\", 3260, 132, 300, 34, player_spells, player_items)\nplayer2 = Person(\"Ayla \", 4160, 138, 311, 34, player_spells, player_items)\nplayer3 = Person(\"Frog \", 3089, 174, 288, 34, player_spells, player_items)\n\nenemy1 = Person(\"Flea \", 1250, 130, 560, 325, enemy_spells, [])\nenemy2 = Person(\"Ozzie \", 18200, 701, 525, 25, enemy_spells, [])\nenemy3 = Person(\"Slash \", 1250, 130, 560, 325, enemy_spells, [])\n\nplayers = [player1, player2, player3]\nenemies = [enemy1, enemy2, enemy3]\n\ndefeated_enemies = 0\ndefeated_players = 0\n\nrunning = True\ni = 0\n\nprint(bcolors.FAIL + bcolors.BOLD + \"AN ENEMY ATTACKS!\" + bcolors.ENDC)\n\nwhile running:\n try:\n print(\"==============================\")\n print(\"\\n\")\n\n print(\"NAME HP MP\")\n for player in players:\n player.get_stats()\n\n print(\"\\n\")\n\n for enemy in enemies:\n enemy.get_enemy_stats()\n\n for player in players:\n player.print_action()\n choice = input(\" Choose action\\n >>> \")\n index = int(choice) - 1\n\n if index == 0:\n dmg = player.generate_damage()\n enemy = player.print_target(enemies)\n\n enemies[enemy].take_damage(dmg)\n print(\"\\n\" + \"You attacked \" + enemies[enemy].name.replace(\n \" \", \"\") + \" for\", dmg, \"points of damage.\")\n\n if enemies[enemy].get_hp() == 0:\n print(enemies[enemy].name.replace(\" \", \"\") + \" has died.\")\n del enemies[enemy]\n\n elif index == 1:\n player.print_magic()\n magic_choice = int(input(\" Choose magic: \")) - 1\n\n if magic_choice == -1:\n continue\n\n spell = player.magic[magic_choice]\n magic_dmg = spell.generate_damage()\n\n current_mp = player.get_mp()\n\n if spell.cost > current_mp:\n print(bcolors.FAIL + \"\\nNot enough MP\\n\" + bcolors.ENDC)\n continue\n\n player.reduce_mp(spell.cost)\n\n if spell.type == \"white\":\n player.heal(magic_dmg)\n print(bcolors.OKBLUE + \"\\n\" + spell.name +\n \" heals for\", str(magic_dmg), \"HP.\" + bcolors.ENDC)\n elif spell.type == \"black\":\n\n enemy = player.print_target(enemies)\n\n enemies[enemy].take_damage(magic_dmg)\n\n print(bcolors.OKBLUE + \"\\n\" + spell.name + \" deals\", str(magic_dmg),\n \"points of damage to \" + enemies[enemy].name.replace(\" \", \"\") + bcolors.ENDC)\n\n if enemies[enemy].get_hp() == 0:\n print(enemies[enemy].name + \" has died.\")\n del enemies[enemy]\n defeated_enemies += 1\n\n elif index == 2:\n player.print_item()\n item_choice = int(input(\" Choose item: \")) - 1\n\n if item_choice == -1:\n continue\n\n item = player.items[item_choice][\"item\"]\n\n if player.items[item_choice][\"quantity\"] == 0:\n print(bcolors.FAIL + \"\\n\" + \"None left...\" + bcolors.ENDC)\n continue\n\n player.items[item_choice][\"quantity\"] -= 1\n\n if item.type == \"potion\":\n player.heal(item.prop)\n print(bcolors.OKGREEN + \"\\n\" + item.name +\n \" heals for\", str(item.prop), \"HP\", bcolors.ENDC)\n elif item.type == \"elixir\":\n\n if item.name == \"Megaelixir\":\n for i in players:\n i.hp = i.maxhp\n i.mp = i.maxmp\n else:\n player.hp = player.maxhp\n player.mp = player.maxmp\n print(bcolors.OKGREEN + \"\\n\" + item.name +\n \" fully restores HP/MP\" + bcolors.ENDC)\n elif item.type == \"attack\":\n\n enemy = player.print_target(enemies)\n\n enemies[enemy].take_damage(item.prop)\n\n print(bcolors.FAIL + \"\\n\" + item.name + \" deals\", str(item.prop),\n \" points of damage to \" + enemies[enemy].name.replace(\" \", \"\") + \".\" + bcolors.ENDC)\n\n except ValueError:\n continue\n\n except IndexError:\n continue\n\n except NameError:\n continue\n\n if enemies[enemy].get_hp() < 1:\n print(enemies[enemy].name.replace(\" \", \"\") + \" has died.\")\n del enemies[enemy]\n\n for player in players:\n if player.get_hp() < 1:\n defeated_players += 1\n\n for enemy in enemies:\n if enemy.get_hp() < 1:\n defeated_enemies += 1\n\n # Check if Player won\n if defeated_enemies == 3:\n print(bcolors.OKGREEN + \"You win!\" + bcolors.ENDC)\n running = False\n\n print(\"\\n\")\n # Enemy attack phase\n for enemy in enemies:\n enemy_choice = random.randrange(0, 2)\n\n if enemy_choice == 0:\n target = random.randrange(0, len(players))\n enemy_dmg = enemy.generate_damage()\n\n players[target].take_damage(enemy_dmg)\n print(enemy.name.replace(\" \", \"\") + \" attacks \" +\n players[target].name.replace(\" \", \"\") + \" for\", str(enemy_dmg) + \".\")\n\n elif enemy_choice == 1:\n spell, magic_dmg = enemy.choose_enemy_spell()\n enemy.reduce_mp(spell.cost)\n\n if spell.type == \"white\":\n enemy.heal(magic_dmg)\n print(bcolors.OKBLUE + spell.name + \" heals \" + enemy.name.replace(\" \",\n \"\") + \" for\", str(magic_dmg), \"HP.\" + bcolors.ENDC)\n elif spell.type == \"black\":\n\n target = random.randrange(0, len(players))\n\n players[target].take_damage(magic_dmg)\n\n print(bcolors.OKBLUE + \"\\n\" + enemy.name.replace(\" \", \"\") + \"'s \" + spell.name + \" deals\", str(magic_dmg),\n \"points of damage to \" + players[target].name.replace(\" \", \"\") + bcolors.ENDC)\n\n if players[target].get_hp() == 0:\n print(players[target].name.replace(\n \" \", \"\") + \" has died.\")\n del players[target]\n defeated_players += 1\n\n # print(\"Enemy chose\", spell, \"damage is\", magic_dmg)\n\n # Check if Enemy won\n if defeated_players == 3:\n print(bcolors.FAIL + \"Your enemies have defeated you!\" + bcolors.ENDC)\n running = False\n break\n\n print(\"\\n\")\n","sub_path":"Nork/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"302191289","text":"try:\n\timport rhinoscriptsyntax as rs\n\nexcept ImportError:\n compas.raise_if_ironpython()\n\n__author__ = ['Robin Oval']\n__copyright__ = 'Copyright 2018, Block Research Group - ETH Zurich'\n__license__ = 'MIT License'\n__email__ = 'oval@arch.ethz.ch'\n\n\n__all__ = [\n\t'input_add',\n\t'input_delete',\n\t'input_change'\n\t'input_objects',\n]\n\ndef input_add():\n\t\"\"\"Add input to input layer.\n\n\tParameters\n\t----------\n\n\tReturns\n\t-------\n\n\t\"\"\"\n\n\tobjects = rs.GetObjects('input to add')\n\tif objects is not None:\n\t\trs.ObjectLayer(objects, 'input')\n\ndef input_delete():\n\t\"\"\"Delete input from input layer.\n\n\tParameters\n\t----------\n\n\tReturns\n\t-------\n\n\t\"\"\"\n\n\tobjects = rs.GetObjects('input to delete')\n\tif objects is not None:\n\t\trs.ObjectLayer(objects, 'default')\n\n\ndef input_change():\n\t\"\"\"Change input in input layer.\n\n\tParameters\n\t----------\n\n\tReturns\n\t-------\n\n\t\"\"\"\n\n\tinput_delete()\n\tinput_add()\n\ndef input_objects():\n\t\"\"\"Get input objects.\n\n\tParameters\n\t----------\n\n\tReturns\n\t-------\n\tsurfaces : list\n\t\tThe guids of the surfaces\n\tpoints : list\n\t\tThe guids of the points\n\tcurves : list\n\t\tThe guids of the curves\n\n\t\"\"\"\n\n\tinput_objects = rs.ObjectsByLayer('input')\n\tsurfaces = [obj for obj in input_objects if rs.ObjectType(obj) == 8]\n\tpoints = [obj for obj in input_objects if rs.ObjectType(obj) == 1]\n\tcurves = [obj for obj in input_objects if rs.ObjectType(obj) == 4]\n\n\treturn surfaces, points, curves\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == '__main__':\n\n\timport compas","sub_path":"src/compas_pattern/cad/rhino/input_functions.py","file_name":"input_functions.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"515084576","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 18:12:57 2019\n\n@author: robert\n\"\"\"\n\nimport sys\n\ntotalCost = 0.0\n\ncost = sys.stdin.readline()\ncpms = float(cost)\nnumLawns = int(sys.stdin.readline())\nfor lawn in sys.stdin:\n wl = lawn.split()\n w = float(wl[0])\n l = float(wl[1])\n cfl = w * l * cpms\n totalCost = totalCost + cfl\n \nprint(totalCost) \n \n ","sub_path":"lawn.py","file_name":"lawn.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"71838803","text":"from Acquisition import aq_parent\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom opengever.base.sentry import log_msg_to_sentry\nfrom opengever.base.txnutils import registered_objects\nfrom opengever.base.txnutils import txn_is_dirty\nfrom opengever.dossier.behaviors.dossier import IDossierMarker\nfrom persistent.mapping import PersistentMapping\nfrom plone import api\nfrom zope.annotation import IAnnotations\nimport logging\nimport transaction\n\n\nlogger = logging.getLogger('opengever.dossier')\n\n\nRESOLVE_LOCK_KEY = 'opengever.dossier.resolve_lock'\nRESOLVE_LOCK_LIFETIME = timedelta(hours=24)\n\n\nclass ResolveLock(object):\n \"\"\"Locking mechanism to prevent concurrent dossier resolution.\n\n This mechanism is intended to prevent users from simultaneously triggering\n the 'resolve' transition for a dossier (which would result in degraded\n performance because retries of conflicting transactions).\n\n We do this by issuing a persistent lock that gets committed in its own\n transaction as the very first thing in the resolution process. Further\n attempts at resolving a dossier are then rejected as long as such a lock\n exists (and hasn't expired). Once the dossier is resolved successfully,\n the lock is removed and the removal will be committed again.\n\n In case of an exception, we catch it, abort the transaction, remove the\n lock, commit the removal, and re-reaise the exception to be handled by\n the usual error handling in ZPublisher.\n\n This class implements the necessary primitives for this locking mechanism.\n The high-level implementation of the strategy described above is actually\n done in the view in opengever.dossier.resolve.\n \"\"\"\n\n def __init__(self, context):\n self.context = context\n self.catalog = api.portal.get_tool('portal_catalog')\n\n def acquire(self, commit=False):\n \"\"\"Acquire a resolve lock for a dossier.\n\n Will overwrite a possibly existing expired lock.\n \"\"\"\n self.log(\"Acquiring resolve lock for %s...\" % self.context)\n\n if txn_is_dirty():\n # Acquiring and committing the lock should always be the first\n # thing that's being done when resolving the dossier, otherwise\n # we would be committing unrelated, unexpected changes.\n #\n # Detect if that happens, but still proceed and log to sentry.\n msg = 'Dirty transaction when comitting resolve lock'\n self.log(msg)\n self.log('Registered objects: %r' % registered_objects())\n log_msg_to_sentry(msg, level='warning', extra={\n 'registered_objects': repr(registered_objects())}\n )\n\n ann = IAnnotations(self.context)\n lockinfo = PersistentMapping({\n 'timestamp': datetime.now(),\n 'userid': api.user.get_current().id,\n })\n ann[RESOLVE_LOCK_KEY] = lockinfo\n self.invalidate_cache()\n\n if commit:\n transaction.commit()\n\n self.log(\"Resolve lock acquired.\")\n\n def release(self, commit=False):\n \"\"\"Release a previously acquired lock.\n\n Will raise a KeyError if no lock has been acquired.\n \"\"\"\n self.log(\"Releasing resolve lock...\")\n ann = IAnnotations(self.context)\n del ann[RESOLVE_LOCK_KEY]\n self.invalidate_cache()\n\n if commit:\n transaction.commit()\n\n self.log(\"Resolve lock released for %s\" % self.context)\n\n def invalidate_cache(self):\n \"\"\"Increment catalog counter to invalidate plone.app.caching ETAGs.\n \"\"\"\n self.catalog._increment_counter()\n\n def is_expired(self, lockinfo):\n \"\"\"Determine whether a lock is expired.\n \"\"\"\n ts = lockinfo['timestamp']\n age = datetime.now() - ts\n expired = age > RESOLVE_LOCK_LIFETIME\n if expired:\n self.log(\"Resolve lock is expired (age: %s): %s\" % (age, lockinfo))\n return expired\n\n def is_locked(self, recursive=True):\n \"\"\"Determine whether a dossier currently is resolve locked.\n\n By default also considers a subdossier locked if any of its parent\n dossiers have a lock on them.\n\n If recursive=False is given, only the current dossier is checked for\n a lock (cheaper, this is used to display the state in the byline).\n\n If a lock exists (somewhere) but is older than RESOLVE_LOCK_LIFETIME,\n it is considered expired and treated as if it wouldn't exist.\n \"\"\"\n item = self.context\n\n while IDossierMarker.providedBy(item):\n\n lockinfo = self.get_lockinfo(item)\n if lockinfo is not None and not self.is_expired(lockinfo):\n self.log(\"%s is resolve locked via lock on %r\" % (self.context, item))\n return True\n\n if not recursive:\n return False\n\n item = aq_parent(item)\n\n return False\n\n def get_lockinfo(self, context):\n return IAnnotations(context).get(RESOLVE_LOCK_KEY)\n\n def log(self, msg):\n \"\"\"Log a message including the current connection identifier.\n \"\"\"\n conn = self.context._p_jar\n logger.info('[%r] %s' % (conn, msg))\n","sub_path":"opengever/dossier/resolve_lock.py","file_name":"resolve_lock.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"170178087","text":"import boto3\nimport json\n\nfrom utils.sqs import SQSManager\nfrom utils.batch import BatchManager\n\ndef lambda_handler(event, context):\n print(json.dumps(event))\n try:\n reponse_status = get_batch_status(event['batchjob_id'])\n \n output_hash = dict(event)\n output_hash['batchjob_status'] = reponse_status\n \n return output_hash\n except Exception as e:\n print(\"Exception: {}\".format(e))\n raise e\n\n\n","sub_path":"lambda/get_batch_status.py","file_name":"get_batch_status.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"333145148","text":"import unittest\r\nimport pytest\r\nimport pickle\r\nfrom sklearn.linear_model import LinearRegression, Lasso, LassoCV, LogisticRegression\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import OneHotEncoder, FunctionTransformer, PolynomialFeatures\r\nfrom sklearn.model_selection import KFold, GroupKFold\r\nfrom econml.dml import DML, LinearDML, SparseLinearDML, KernelDML\r\nfrom econml.dml import NonParamDML, ForestDML\r\nfrom econml.drlearner import DRLearner, SparseLinearDRLearner, LinearDRLearner, ForestDRLearner\r\nfrom econml.ortho_iv import DMLATEIV, ProjectedDMLATEIV, DMLIV, NonParamDMLIV,\\\r\n IntentToTreatDRIV, LinearIntentToTreatDRIV\r\nimport numpy as np\r\nfrom econml.utilities import shape, hstack, vstack, reshape, cross_product\r\nfrom econml.inference import BootstrapInference\r\nfrom contextlib import ExitStack\r\nfrom sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier\r\nimport itertools\r\nfrom econml.sklearn_extensions.linear_model import WeightedLasso, StatsModelsRLM\r\nfrom econml.tests.test_statsmodels import _summarize\r\nimport econml.tests.utilities # bugfix for assertWarns\r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\r\n\r\n\r\nclass TestRandomState(unittest.TestCase):\r\n\r\n @staticmethod\r\n def _make_data(n, p):\r\n np.random.seed(1283)\r\n X = np.random.uniform(-1, 1, size=(n, p))\r\n W = np.random.uniform(-1, 1, size=(n, p))\r\n\r\n def true_propensity(x):\r\n return .4 + .1 * (x[:, 0] > 0)\r\n\r\n def true_effect(x):\r\n return .4 + .2 * x[:, 0]\r\n\r\n def true_conf(x):\r\n return x[:, 1]\r\n\r\n T = np.random.binomial(1, true_propensity(X))\r\n Y = true_effect(X) * T + true_conf(X) + np.random.normal(size=(n,))\r\n X_test = np.zeros((100, p))\r\n X_test[:, 0] = np.linspace(-1, 1, 100)\r\n return Y, T, X, W, X_test\r\n\r\n @staticmethod\r\n def _test_random_state(est, X_test, Y, T, **kwargs):\r\n est.fit(Y, T, **kwargs)\r\n te1 = est.effect(X_test)\r\n est.fit(Y, T, **kwargs)\r\n te2 = est.effect(X_test)\r\n est.fit(Y, T, **kwargs)\r\n te3 = est.effect(X_test)\r\n np.testing.assert_array_equal(te1, te2, err_msg='random state fixing does not work')\r\n np.testing.assert_array_equal(te1, te3, err_msg='random state fixing does not work')\r\n\r\n def test_dml_random_state(self):\r\n Y, T, X, W, X_test = TestRandomState._make_data(500, 2)\r\n for est in [\r\n NonParamDML(model_y=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_t=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_final=RandomForestRegressor(max_depth=3, n_estimators=10, min_samples_leaf=100,\r\n bootstrap=True, random_state=123),\r\n discrete_treatment=True, n_splits=2, random_state=123),\r\n ForestDML(model_y=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_t=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n n_estimators=10,\r\n discrete_treatment=True, n_crossfit_splits=2, random_state=123),\r\n LinearDML(model_y=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_t=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n discrete_treatment=True, n_splits=2, random_state=123),\r\n SparseLinearDML(discrete_treatment=True, n_splits=2, random_state=123),\r\n KernelDML(discrete_treatment=True, n_splits=2, random_state=123)]:\r\n TestRandomState._test_random_state(est, X_test, Y, T, X=X, W=W)\r\n\r\n def test_dr_random_state(self):\r\n Y, T, X, W, X_test = self._make_data(500, 2)\r\n for est in [\r\n DRLearner(model_final=RandomForestRegressor(max_depth=3, n_estimators=10, min_samples_leaf=100,\r\n bootstrap=True, random_state=123),\r\n n_splits=2, random_state=123),\r\n LinearDRLearner(random_state=123),\r\n SparseLinearDRLearner(n_splits=2, random_state=123),\r\n ForestDRLearner(model_regression=RandomForestRegressor(n_estimators=10, max_depth=4,\r\n random_state=123),\r\n model_propensity=RandomForestClassifier(\r\n n_estimators=10, max_depth=4, random_state=123),\r\n n_crossfit_splits=2, random_state=123)]:\r\n TestRandomState._test_random_state(est, X_test, Y, T, X=X, W=W)\r\n\r\n def test_orthoiv_random_state(self):\r\n Y, T, X, W, X_test = self._make_data(500, 2)\r\n for est in [\r\n DMLATEIV(model_Y_W=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_W=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_Z_W=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n discrete_treatment=True, discrete_instrument=True, n_splits=2, random_state=123),\r\n ProjectedDMLATEIV(model_Y_W=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_W=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_WZ=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n discrete_treatment=True, discrete_instrument=True, n_splits=2, random_state=123)]:\r\n TestRandomState._test_random_state(est, None, Y, T, W=W, Z=T)\r\n for est in [\r\n DMLIV(model_Y_X=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_X=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_XZ=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_final=LinearRegression(fit_intercept=False),\r\n discrete_treatment=True, discrete_instrument=True, n_splits=2, random_state=123),\r\n NonParamDMLIV(model_Y_X=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_X=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_XZ=RandomForestClassifier(n_estimators=10, max_depth=4, random_state=123),\r\n model_final=LinearRegression(),\r\n discrete_treatment=True, discrete_instrument=True, n_splits=2, random_state=123)]:\r\n TestRandomState._test_random_state(est, X_test, Y, T, X=X, Z=T)\r\n for est in [IntentToTreatDRIV(model_Y_X=RandomForestRegressor(n_estimators=10, max_depth=4, random_state=123),\r\n model_T_XZ=RandomForestClassifier(n_estimators=10,\r\n max_depth=4, random_state=123),\r\n flexible_model_effect=RandomForestRegressor(n_estimators=10,\r\n max_depth=4, random_state=123),\r\n n_splits=2, random_state=123),\r\n LinearIntentToTreatDRIV(model_Y_X=RandomForestRegressor(n_estimators=10,\r\n max_depth=4, random_state=123),\r\n model_T_XZ=RandomForestClassifier(n_estimators=10,\r\n max_depth=4, random_state=123),\r\n flexible_model_effect=RandomForestRegressor(n_estimators=10,\r\n max_depth=4,\r\n random_state=123),\r\n n_splits=2, random_state=123)]:\r\n TestRandomState._test_random_state(est, X_test, Y, T, X=X, W=W, Z=T)\r\n","sub_path":"econml/tests/test_random_state.py","file_name":"test_random_state.py","file_ext":"py","file_size_in_byte":8536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"153023770","text":"import json\nimport requests\nimport pprint\n# https://api.publicapis.org/\n# /entries for a list of apis\n# /random for one random api response\n# /categories for a list of categories\nclass PublicAPI():\n\n \n def query_public_api(data: str) -> dict:\n ''' pass in one of 3 args below in str form to query api \n entries , random or categories '''\n try:\n if data != 'entries' or data != 'random' or data != 'categories':\n pass\n \n response = requests.get('https://api.publicapis.org/' + data.strip())\n \n if response.status_code == 200:\n response_in = json.dumps(response.json())\n response_data = json.loads(response_in)\n return response_data\n if data == 'random':\n PublicAPI.query_data(response_data)\n else:\n print(\"Error querying https://api.publicapis.org/ \"+ response.status_code)\n except Exception as err:\n print(\"query_public_api err in PublicAPI \" + err)\n \n \n def open_html_page(data: str) -> None:\n '''opens the html page in string form'''\n try:\n response = requests.get(data.strip())\n # check response\n if response.status_code == 200:\n response_in = json.dumps(response.text)\n response_data = json.loads(response_in)\n return response_data\n except Exception as err:\n print(\"open_html_page error in PublicAPI \" + err)\n \n \n def query_data(data: dict) -> None:\n try:\n pprint.pprint(data['entries'][0]['Link'])\n store_response = str(data['entries'][0]['Link'])\n print(\"value \" + store_response)\n page_data = PublicAPI.open_html_page(store_response)\n # limit to first x chars\n print(page_data[0:1000])\n except Exception as err:\n print(\"query_data error in PublicAPI \"+err)\n\n# testing output\n# print(PublicAPI.query_public_api('random'))\n# pprint.pprint(PublicAPI.query_public_api('entries')['entries'][0])","sub_path":"Data Analysis/API/PublicAPI.py","file_name":"PublicAPI.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"4969521","text":"from tkinter import *\n\n# Funciones backend\n\n\ndef borrar():\n n1.set('')\n n2.set('')\n\n\ndef sumar():\n r.set(float(n1.get()) + float(n2.get()))\n\n\n# Estructura del formulario\nroot = Tk()\nroot.config(bd=32) # borde exterior de 15 píxeles, queda mejor\n\n# Tres StringVar para manejar los números y el resultado\nn1 = StringVar()\nn2 = StringVar()\nr = StringVar()\n\nLabel(root, text=\"Numero 1\").pack()\nEntry(root, justify=CENTER, textvariable=n1).pack()\n\nLabel(root, text=\"Numero 2\").pack()\nEntry(root, justify=CENTER, textvariable=n2).pack()\n\nLabel(root, text=\"Resultado\").pack()\nEntry(root, justify=CENTER, state=DISABLED, textvariable=r).pack()\n\nLabel(root).pack() # Separador\n\nButton(root, text=\"Sumar\", command=sumar).pack()\n\nroot.mainloop()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"366916089","text":"from flask_restful import Resource, reqparse\nfrom models.roupas import RoupasModel\nfrom flask_jwt_extended import jwt_required\nfrom flask import render_template, make_response\nimport sqlite3 # a consulta ela é feita atráves do banco\n\nroupa = []\n\ndef normalize_path_params(nome=None,\n cor=None,\n preco_min=0,\n preco_max=10000,\n limit = 50,\n offset= 0,**dados):\n if nome:\n return {\n 'preco_min': preco_min,\n 'preco_max': preco_max,\n 'nome': nome,\n 'limit': limit,\n 'offset': offset}\n elif cor:\n return {\n 'preco_min': preco_min,\n 'preco_max': preco_max,\n 'cor': cor,\n 'limit': limit,\n 'offset': offset}\n return {\n 'preco_min': preco_min,\n 'preco_max': preco_max,\n 'limit': limit,\n 'offset': offset}\n\n# path/roupas?cor=vermelha&preco_min=50&preco_max=400 (exemplo de path)\n\npath_params = reqparse.RequestParser()\npath_params.add_argument('nome', type=str)\npath_params.add_argument('cor', type=str)\npath_params.add_argument('preco_min', type=float)\npath_params.add_argument('preco_max', type=float)\npath_params.add_argument('limit', type=float) # quantidade de item para exibir por pagina\npath_params.add_argument('offset', type=float) # quantidade de item que deseja pular\n\nclass Roupas(Resource):\n def get(self):\n connection = sqlite3.connect('banco.db')\n cursor = connection.cursor()\n\n dados = path_params.parse_args()\n dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None} #tratamento para dados validos, os dados que aperecem como NULL por exemplos não são validos.\n parametros = normalize_path_params(**dados_validos)\n\n if not parametros.get('cor') and parametros.get('nome'):\n consulta = \"SELECT * FROM roupas \\\n WHERE (preco >= ? and preco <= ?) \\\n and nome = ? LIMIT ? OFFSET ?\"\n tupla = tuple([parametros[chave] for chave in parametros]) # ele pega os argument na ordem\n resultado = cursor.execute(consulta, tupla)\n else:\n consulta = \"SELECT * FROM roupas \\\n WHERE (preco >= ? and preco <= ?) \\\n LIMIT ? OFFSET ?\"\n tupla = tuple([parametros[chave] for chave in parametros])\n resultado = cursor.execute(consulta, tupla)\n\n roupas = []\n for linha in resultado:\n roupas.append({\n 'roupa_id': linha[0],\n 'nome':linha[1],\n 'cor':linha[2],\n 'preco':linha[3]\n })\n\n #return {'roupas': [roupa.json() for roupa in RoupasModel.query.all()]}\n #headers = {'Content-Type': 'text/html'}\n #return make_response(render_template('roupas.html', roupas = roupas), 200, headers)\n return {'roupas': roupas}\n\nclass Roupa(Resource):\n argumentos = reqparse.RequestParser()\n argumentos.add_argument('nome', type=str, required=True, help=\"O campo 'nome' não pode ficar em branco\")\n argumentos.add_argument('cor',type=str, required=True, help=\"O campo 'cor' não pode ficar em branco\")\n argumentos.add_argument('preco')\n\n def get(self, roupa_id):\n roupa = RoupasModel.find_roupa(roupa_id)\n if roupa:\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('roupa.html',nome = roupa.nome, cor = roupa.cor, preco = roupa.preco), 200, headers)\n return {'message': 'A Roupa não foi encontrada.'}, 404 #Not Found\n\n #@jwt_required\n def post(self, roupa_id):\n if RoupasModel.find_roupa(roupa_id):\n return {'message': 'A Roupa ID \"{}\" já existe.'.format(roupa_id)}, 400 #bad Request\n dados = Roupa.argumentos.parse_args()\n roupa = RoupasModel(roupa_id, **dados)\n try:\n roupa.save_roupa()\n except:\n return {'message': 'Ocorreu um erro interno ao salvar a Roupa.'}, 500 # Internal Server Error\n return roupa.json()\n\n #@jwt_required\n def put(self, roupa_id):\n dados = Roupa.argumentos.parse_args()\n roupa_encontrada = RoupasModel.find_roupa(roupa_id)\n if roupa_encontrada:\n roupa_encontrada.update_roupa(**dados)\n roupa_encontrada.save_roupa()\n return roupa_encontrada.json(), 200\n roupa = RoupasModel(roupa_id, **dados)\n try:\n roupa.save_roupa()\n except:\n return {'message': 'Ocorreu um erro interno ao salvar a Roupa.'}, 500 # Internal Server Error\n return roupa.json(), 201 #created/criado\n @jwt_required\n def delete(self, roupa_id):\n roupa = RoupasModel.find_roupa(roupa_id)\n if roupa:\n try:\n roupa.delete_roupa()\n except:\n return {'message': 'Ocorreu um erro interno ao deletar a Roupa'}, 500 # Internal Server Error\n return {'message': 'Camisa deletada.'}\n return {'message': 'Camisa não existe.'}, 404\n","sub_path":"resources/roupas.py","file_name":"roupas.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"442500387","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: sherlockliao01@gmail.com\n模型训练与测试封装版代码\n\"\"\"\n\nimport sys\n\nsys.path.append('.')\n\nfrom fastreid.config import get_cfg\nfrom fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch\nfrom fastreid.utils.checkpoint import Checkpointer\n\n\n# 读取配置文件\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n # 模型测试\n if args.eval_only:\n cfg.defrost()\n cfg.MODEL.BACKBONE.PRETRAIN = False\n model = DefaultTrainer.build_model(cfg)\n # 加载预训练模型\n Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model\n\n res = DefaultTrainer.test(cfg, model)\n return res\n # 模型训练\n trainer = DefaultTrainer(cfg)\n\n trainer.resume_or_load(resume=args.resume)\n return trainer.train()\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n # 调试使用,使用的时候删除下面代码\n # ---\n args.config_file = \"./configs/Market1501/bagtricks_R50.yml\" # config路径\n args.eval_only = True # 是否测试模型,False表示训练模型,True表示测试模型\n # ---\n\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n","sub_path":"Deep learning/fast-reid/fast-reid_tutorial/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"218516349","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass PreciseNumber:\n def __init__(self, number=1.0):\n self.isNear1 = False\n self.number = number\n self.check_if_near_1()\n\n def check_if_near_1(self):\n if abs(self.number - 1) < 0.4:\n self.isNear1 = True\n self.number -= 1.0\n\n def get(self):\n if self.isNear1:\n return self.number + 1.0\n else:\n return self.number\n\n def sqr(self):\n if self.isNear1:\n self.number = self.number * 2 + self.number ** 2\n else:\n self.number **= 2\n self.check_if_near_1()\n\n return self\n\n def sqrt(self, precision=20):\n def taylor_n_step(n):\n return math.factorial(2 * n) / math.factorial(n) / math.factorial(n) * \\\n pow(-1, n) * pow(self.number, n) / (1 - 2 * n) / pow(4, n)\n\n if self.isNear1:\n steps = np.arange(1, precision)\n self.number = sum(map(taylor_n_step, steps))\n else:\n self.number = math.sqrt(self.number)\n self.check_if_near_1()\n\n return self\n\n\nclass PreciseNumberLog:\n def __init__(self, number=1.0):\n self.number = math.log2(number)\n\n def get(self):\n return 2 ** self.number\n\n def sqr(self):\n self.number *= 2\n return self\n\n def sqrt(self):\n self.number /= 2\n return self\n\n\ndef relative_error(x0, x):\n return np.abs(x0 - x) / np.abs(x0)\n\n\ndef f_sqrt_sqr(x, n=52):\n for k in range(n): x=np.sqrt(x)\n for k in range(n): x=x*x\n return x\n\n\ndef f_sqrt_sqr_precise(x_list, n=52):\n results = []\n\n for x in x_list:\n x = PreciseNumber(x)\n\n for k in range(n): x.sqrt()\n for k in range(n): x.sqr()\n\n results.append(x.get())\n\n return results\n\n\ndef f_sqrt_sqr_precise_log(x_list, n=52):\n results = []\n\n for x in x_list:\n x = PreciseNumberLog(x)\n\n for k in range(n): x.sqrt()\n for k in range(n): x.sqr()\n\n results.append(x.get())\n\n return results\n\n\ndef plot_error(x0, err):\n mask = np.logical_and(err > 0, err < np.inf)\n plt.loglog(x0[mask], err[mask], \".k\")\n plt.loglog(x0, [eps] * len(err), \"--r\") # машинная точность для сравнения\n plt.xlabel(\"$Значение\\;аргумента$\")\n plt.ylabel(\"$Относительная\\;погрешность$\")\n plt.show()\n\n\ndef test():\n assert PreciseNumber(1.24).get() == 1.24\n assert relative_error(PreciseNumber(1.5).sqr().get(), 2.25) < 10 ** -8\n\n assert relative_error(1.2, PreciseNumber(1.44).sqrt().get()) < 10 ** -8\n assert relative_error(0.49558046773, PreciseNumber(0.2456).sqrt().get()) < 10 ** -8\n assert relative_error(2.30217288664, PreciseNumber(5.3).sqrt().get()) < 10 ** -8\n\n assert relative_error(1.44, PreciseNumber(1.44).sqrt().sqr().get()) < 10 ** -8\n\n num = PreciseNumber(1.5)\n assert relative_error(1.22474487139, num.sqrt().get()) < 10 ** -8\n assert relative_error(1.1066819197, num.sqrt().get()) < 10 ** -8\n assert relative_error(1.22474487139, num.sqr().get()) < 10 ** -8\n\neps = np.finfo(np.double).eps\nprint(\"Машинная точность:\", eps)\n\ntest()\n\nx0 = np.logspace(-4, 4, 100, dtype=np.double)\n\nx = f_sqrt_sqr_precise(x0)\nerr = relative_error(x0, x)\nprint(\"Ошибки sqrt-sqr\", err[:4], \"...\")\nplot_error(x0, err)\n\nx = f_sqrt_sqr_precise_log(x0)\nerr = relative_error(x0, x)\nprint(\"Ошибки sqrt-sqr\", err[:4], \"...\")\nplot_error(x0, err)\n\n","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"298974841","text":"import sys\r\nimport math\r\nimport numpy as np\r\n\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import (QApplication, QHBoxLayout, QOpenGLWidget, QSlider,\r\n QWidget, QPushButton)\r\n\r\nimport OpenGL.GL as gl\r\nimport OpenGL.GLU as glu\r\n\r\nfrom Machine import Machine, Axis\r\nfrom ToolPathCreator import circle\r\n\r\nclass Window(QWidget):\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n\r\n self.glWidget = GLWidget(self)\r\n self.glWidget.move(0,0)\r\n self.glWidget.resize(1920,1080)\r\n self.resize(1920,1080)\r\n\r\n self.setWindowTitle(\"5-Axis Simulator\")\r\n\r\n self.machine = Machine()\r\n self.machine.base = Axis('base')\r\n self.machine.base.loadModel('base_frame.obj')\r\n self.machine.base.setColor(200,200,200)\r\n self.machine.base.setRelativePosition(1.5,-0.85,-1.15)\r\n\r\n x_axis = Axis('x_axis')\r\n x_axis.loadModel('x_axis.obj')\r\n x_axis.setColor(200,25,25)\r\n x_axis.defineMovement(type='linear', axis='x')\r\n x_axis.setRelativePosition(-1.5,4.55,0.35)\r\n\r\n y_axis = Axis('y_axis')\r\n y_axis.loadModel('y_axis.obj')\r\n y_axis.setColor(25,200,25)\r\n y_axis.defineMovement(type='linear', axis='z', negative=True)\r\n y_axis.setRelativePosition(0,0,3.125)\r\n\r\n z_axis = Axis('z_axis')\r\n z_axis.loadModel('z_axis.obj')\r\n z_axis.setColor(25,25,200)\r\n z_axis.defineMovement(type='linear', axis='y')\r\n z_axis.setRelativePosition(0,-0.15,0)\r\n\r\n\r\n a_axis = Axis('a_axis')\r\n a_axis.loadModel('a_axis.obj')\r\n a_axis.setColor(200,200,25)\r\n a_axis.defineMovement(type='rotation', axis='y')\r\n a_axis.setRelativePosition(0, -2.8, 0)\r\n\r\n b_axis = Axis('b_axis')\r\n b_axis.loadModel('b_axis_tool.obj')\r\n b_axis.setColor(200,25,200)\r\n b_axis.defineMovement(type='rotation', axis='z')\r\n b_axis.setRelativePosition(0, 0, 0.8)\r\n b_axis.setToolEndOffset(0,-0.75,0)\r\n\r\n\r\n self.machine.base.addChild(x_axis)\r\n self.machine.base.addChild(y_axis)\r\n x_axis.addChild(z_axis)\r\n\r\n z_axis.addChild(a_axis)\r\n a_axis.addChild(b_axis)\r\n\r\n self.machine.x_axis = x_axis\r\n self.machine.y_axis = y_axis\r\n self.machine.z_axis = z_axis\r\n self.machine.a_axis = a_axis\r\n self.machine.b_axis = b_axis\r\n\r\n self.machine.buildMachinState()\r\n\r\n x_axis.setAxisPositionInMM(0)\r\n y_axis.setAxisPositionInMM(0)\r\n z_axis.setAxisPositionInMM(0)\r\n\r\n a_axis.setAxisPositionInDeg(0)\r\n b_axis.setAxisPositionInDeg(0)\r\n\r\n self.machine.buildMachinState()\r\n self.machine.setToolPath(circle(500, [1.5,2,1]))\r\n self.machine.ready = True\r\n\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.timer_step)\r\n self.timer.setInterval(1/60)\r\n self.timer.start()\r\n self.time_step = 0\r\n\r\n\r\n def timer_step(self):\r\n if self.machine.ready:\r\n self.time_step += 1\r\n #self.time_step = 25\r\n if self.time_step >= len(self.machine.tool_path):\r\n self.time_step = 0\r\n\r\n tool_step = self.machine.tool_path[int(self.time_step)]\r\n self.machine.calculateDesiredState(tool_step)\r\n self.machine.buildMachinState()\r\n else:\r\n pass\r\n #print('machine not ready')\r\n self.glWidget.update()\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass GLWidget(QOpenGLWidget):\r\n def __init__(self, parent=None):\r\n super(GLWidget, self).__init__(parent)\r\n\r\n format = QSurfaceFormat()\r\n format.setSamples(8)\r\n self.setFormat(format)\r\n\r\n self.xRot = 20*16\r\n self.yRot = -45*16\r\n self.zRot = 0\r\n self.xPos = 0\r\n self.yPos = 0\r\n self.zPos = 0\r\n\r\n self.lastPos = QPoint()\r\n\r\n def getOpenglInfo(self):\r\n info = \"\"\"\r\n Vendor: {0}\r\n Renderer: {1}\r\n OpenGL Version: {2}\r\n Shader Version: {3}\r\n \"\"\".format(\r\n gl.glGetString(gl.GL_VENDOR),\r\n gl.glGetString(gl.GL_RENDERER),\r\n gl.glGetString(gl.GL_VERSION),\r\n gl.glGetString(gl.GL_SHADING_LANGUAGE_VERSION)\r\n )\r\n\r\n return info\r\n\r\n def minimumSizeHint(self):\r\n return QSize(50, 50)\r\n\r\n def sizeHint(self):\r\n return QSize(400, 400)\r\n\r\n def setXRotation(self, angle):\r\n angle = self.normalizeAngle(angle)\r\n if angle != self.xRot:\r\n self.xRot = angle\r\n\r\n def setYRotation(self, angle):\r\n angle = self.normalizeAngle(angle)\r\n if angle != self.yRot:\r\n self.yRot = angle\r\n\r\n def setZRotation(self, angle):\r\n angle = self.normalizeAngle(angle)\r\n if angle != self.zRot:\r\n self.zRot = angle\r\n\r\n def setXPosition(self, pos):\r\n if pos != self.xPos:\r\n self.xPos = pos\r\n\r\n def setYPosition(self, pos):\r\n if pos != self.yPos:\r\n self.yPos = pos\r\n\r\n def setZPosition(self, pos):\r\n if pos != self.zPos:\r\n self.zPos = pos\r\n\r\n def initializeGL(self):\r\n print(self.getOpenglInfo())\r\n self.setClearColor(QColor(255,255,255))\r\n self.axisIndicator = self.makeAxisIndicator(0.75, label=True)\r\n self.baseGrid = self.makeBaseGrid(10, coarse_spacing=1, fine_spacing=0.1)\r\n\r\n gl.glShadeModel(gl.GL_SMOOTH)\r\n gl.glEnable(gl.GL_DEPTH_TEST)\r\n gl.glEnable(gl.GL_CULL_FACE)\r\n gl.glEnable(gl.GL_MULTISAMPLE)\r\n #gl.glCullFace(gl.GL_FRONT)\r\n\r\n def paintGL(self):\r\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\r\n gl.glLoadIdentity()\r\n gl.glTranslated(self.xPos, self.yPos-2.0, self.zPos-12.5)\r\n gl.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)\r\n gl.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)\r\n gl.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)\r\n gl.glCallList(self.baseGrid)\r\n gl.glCallList(self.axisIndicator)\r\n self.drawMachine(self.parent().machine)\r\n self.drawToolPath(self.parent().machine.tool_path)\r\n\r\n def resizeGL(self, width, height):\r\n side = min(width, height)\r\n if side < 0:\r\n return\r\n gl.glViewport((width) // 2, (height - side) // 2, side, side)\r\n gl.glMatrixMode(gl.GL_PROJECTION)\r\n gl.glLoadIdentity()\r\n glu.gluPerspective(45.0, (1920/1080), 0.0001, 10000.0)\r\n gl.glMatrixMode(gl.GL_MODELVIEW)\r\n\r\n def mousePressEvent(self, event):\r\n self.lastPos = event.pos()\r\n\r\n def mouseMoveEvent(self, event):\r\n dx = event.x() - self.lastPos.x()\r\n dy = event.y() - self.lastPos.y()\r\n if event.buttons() & Qt.LeftButton:\r\n self.setXRotation(self.xRot + 8 * dy)\r\n self.setYRotation(self.yRot + 8 * dx)\r\n elif event.buttons() & Qt.RightButton:\r\n self.setZPosition(self.zPos + 0.01 * dx)\r\n elif event.buttons() & Qt.MiddleButton:\r\n self.setXPosition(self.xPos + 0.003 * dx)\r\n self.setYPosition(self.yPos - 0.003 * dy)\r\n self.lastPos = event.pos()\r\n\r\n def makeAxisIndicator(self, size=1.0, label=False):\r\n genList = gl.glGenLists(1)\r\n gl.glNewList(genList, gl.GL_COMPILE)\r\n\r\n gl.glLineWidth(3.0)\r\n\r\n gl.glBegin(gl.GL_LINES)\r\n\r\n # draw base x-axis lines\r\n self.setColor(QColor(255, 0, 0))\r\n gl.glVertex3d(0, 0, 0)\r\n gl.glVertex3d(size, 0, 0)\r\n # draw 'X'\r\n gl.glVertex3d(1.05 * size, 0.1 * size, 0)\r\n gl.glVertex3d(1.2 * size, -0.1 * size, 0)\r\n gl.glVertex3d(1.05 * size, -0.1 * size, 0)\r\n gl.glVertex3d(1.2 * size, 0.1 * size, 0)\r\n\r\n # draw base y_axis-lines\r\n self.setColor(QColor(0, 255, 0))\r\n gl.glVertex3d(0, 0, 0)\r\n gl.glVertex3d(0, size, 0)\r\n # draw 'Y'\r\n gl.glVertex3d(0.075 * size, 1.3 * size, 0)\r\n gl.glVertex3d(0, 1.15 * size, 0)\r\n gl.glVertex3d(-0.075 * size, 1.3 * size, 0)\r\n gl.glVertex3d(0, 1.15 * size, 0)\r\n gl.glVertex3d(0, 1.05 * size, 0)\r\n gl.glVertex3d(0, 1.15 * size, 0)\r\n\r\n # draw base z_axis-lines\r\n self.setColor(QColor(0, 0, 255))\r\n gl.glVertex3d(0, 0, 0)\r\n gl.glVertex3d(0, 0, size)\r\n # draw 'Z'\r\n gl.glVertex3d(-0.075*size, 0.1*size, 1.05*size)\r\n gl.glVertex3d(0.075*size, 0.1*size, 1.05*size)\r\n gl.glVertex3d(-0.075 * size, -0.1 * size, 1.05 * size)\r\n gl.glVertex3d(0.075 * size, -0.1 * size, 1.05 * size)\r\n gl.glVertex3d(0.075 * size, 0.1 * size, 1.05 * size)\r\n gl.glVertex3d(-0.075 * size, -0.1 * size, 1.05 * size)\r\n\r\n gl.glEnd()\r\n gl.glLineWidth(1.0)\r\n gl.glEndList()\r\n return genList\r\n\r\n def makeBaseGrid(self, size, coarse_spacing=10.0, fine_spacing=1.0):\r\n genList = gl.glGenLists(1)\r\n gl.glNewList(genList, gl.GL_COMPILE)\r\n gl.glBegin(gl.GL_LINES)\r\n\r\n self.setColor(QColor(150, 150, 150))\r\n # draw base x-axis lines\r\n gl.glVertex3d(-size//2, 0, 0)\r\n gl.glVertex3d(size//2, 0, 0)\r\n # draw base z_axis-lines\r\n gl.glVertex3d(0, 0, size//2)\r\n gl.glVertex3d(0, 0, -size//2)\r\n\r\n for i in range(int((size/2)/coarse_spacing)):\r\n # draw coarse x-axis lines\r\n gl.glVertex3d(-size//2, 0, (i+1)*coarse_spacing)\r\n gl.glVertex3d(size//2, 0, (i+1)*coarse_spacing)\r\n gl.glVertex3d(-size//2, 0, -(i + 1) * coarse_spacing)\r\n gl.glVertex3d(size//2, 0, -(i + 1) * coarse_spacing)\r\n # draw coarse z-axis lines\r\n gl.glVertex3d((i + 1) * coarse_spacing,0,-size//2)\r\n gl.glVertex3d((i + 1) * coarse_spacing,0,size//2)\r\n gl.glVertex3d(-(i + 1) * coarse_spacing,0,-size//2)\r\n gl.glVertex3d(-(i + 1) * coarse_spacing,0,size//2)\r\n\r\n self.setColor(QColor(225, 225, 225))\r\n for i in range(int((size/2)/fine_spacing)):\r\n # draw coarse x-axis lines\r\n gl.glVertex3d(-size//2, 0, (i+1)*fine_spacing)\r\n gl.glVertex3d(size//2, 0, (i+1)*fine_spacing)\r\n gl.glVertex3d(-size//2, 0, -(i + 1) * fine_spacing)\r\n gl.glVertex3d(size//2, 0, -(i + 1) * fine_spacing)\r\n # draw coarse z-axis lines\r\n gl.glVertex3d((i + 1) * fine_spacing,0,-size//2)\r\n gl.glVertex3d((i + 1) * fine_spacing,0,size//2)\r\n gl.glVertex3d(-(i + 1) * fine_spacing,0,-size//2)\r\n gl.glVertex3d(-(i + 1) * fine_spacing,0,size//2)\r\n\r\n gl.glEnd()\r\n gl.glEndList()\r\n return genList\r\n\r\n def drawToolPath(self, tool_path):\r\n gl.glPushMatrix()\r\n offset = np.multiply(self.parent().machine.y_axis.axis_position, self.parent().machine.y_axis.linear_movement)\r\n gl.glTranslatef(offset[0], offset[1], offset[2])\r\n\r\n gl.glBegin(gl.GL_LINES)\r\n self.setColor(QColor(0, 0, 255))\r\n #print('drawing toolpath of length:', len(tool_path))\r\n for i in range(len(tool_path)-1):\r\n #draw tool path\r\n gl.glVertex3d(tool_path[i][0]/1000, tool_path[i][2]/1000, tool_path[i][1]/1000)\r\n gl.glVertex3d(tool_path[i+1][0]/1000, tool_path[i+1][2]/1000, tool_path[i+1][1]/1000)\r\n\r\n self.setColor(QColor(255, 0, 0))\r\n for i in range(len(tool_path)):\r\n #draw tool normal\r\n gl.glVertex3d(tool_path[i][0] / 1000, tool_path[i][2] / 1000, tool_path[i][1] / 1000)\r\n gl.glVertex3d(tool_path[i][0+3]+tool_path[i][0] / 1000, tool_path[i][2+3]+tool_path[i][2] / 1000, tool_path[i][1+3]+tool_path[i][1] / 1000)\r\n\r\n gl.glEnd()\r\n\r\n gl.glPopMatrix()\r\n\r\n def drawMachine(self, machine):\r\n gl.glPushMatrix()\r\n\r\n\r\n self.drawAxis(machine.base, 0)\r\n\r\n\r\n gl.glPopMatrix()\r\n\r\n def drawAxis(self, axis, recursion_level):\r\n #print('recursion_level:', recursion_level)\r\n #print('drawing', axis.name)\r\n\r\n position = np.add(axis.relative_translation, np.multiply(axis.axis_position, axis.linear_movement))\r\n gl.glTranslatef(position[0], position[1], position[2])\r\n\r\n #rotation = np.add(axis.relative_rotation, np.multiply(axis.axis_position, axis.rotational_movement))\r\n gl.glRotatef(axis.axis_position, axis.rotational_movement[0], axis.rotational_movement[1], axis.rotational_movement[2])\r\n\r\n gl.glBegin(gl.GL_QUADS)\r\n self.setColor(axis.color)\r\n for vertex in axis.getVertices():\r\n gl.glVertex3f(vertex[0], vertex[1], vertex[2])\r\n gl.glEnd()\r\n\r\n for child in axis.children:\r\n recursion_level += 1\r\n self.drawAxis(child, recursion_level)\r\n recursion_level -= 1\r\n\r\n gl.glTranslatef(-position[0], -position[1], -position[2])\r\n gl.glRotatef(axis.axis_position, -axis.rotational_movement[0], -axis.rotational_movement[1] ,-axis.rotational_movement[2])\r\n\r\n\r\n\r\n\r\n def normalizeAngle(self, angle):\r\n while angle < 0:\r\n angle += 360 * 16\r\n while angle > 360 * 16:\r\n angle -= 360 * 16\r\n return angle\r\n\r\n def setClearColor(self, c):\r\n gl.glClearColor(c.redF(), c.greenF(), c.blueF(), c.alphaF())\r\n\r\n def setColor(self, c):\r\n gl.glColor4f(c.redF(), c.greenF(), c.blueF(), c.alphaF())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n window = Window()\r\n window.show()\r\n sys.exit(app.exec_())","sub_path":"RoboticVisualizer.py","file_name":"RoboticVisualizer.py","file_ext":"py","file_size_in_byte":13555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"495011267","text":"from ContactRegistration.registration.models import UserInfo, Birthplace\n\n\nclass IDAO(object):\n \"\"\" CRUD Interface for DAO objects \"\"\"\n def create(self, bus_object):\n \"\"\" Creates object in data storage \"\"\"\n raise NotImplementedError()\n\n def read(self, object_id):\n \"\"\" Reads object from data storage \"\"\"\n raise NotImplementedError()\n\n def update(self, object_id, new_object):\n \"\"\" Modifies object from data storage \"\"\"\n raise NotImplementedError()\n\n def delete(self, object_id):\n \"\"\" Deletes object from data storage \"\"\"\n raise NotImplementedError()\n\n\nclass BirthplaceDAO(IDAO):\n \"\"\" DAO object for users birthplace \"\"\"\n def create(self, birthplace):\n #create django ORM object\n obj = Birthplace(birthplace=birthplace)\n #saving object in DB using django ORM\n obj.save()\n return obj\n\n def read(self, object_id):\n try:\n obj = Birthplace.objects.get(id=object_id)\n except Birthplace.DoesNotExist:\n # if object not found return None\n obj = None\n return obj\n\n def update(self, object_id, birthplace):\n try:\n obj = Birthplace.objects.get(id=object_id)\n except Birthplace.DoesNotExist:\n # if object not found return False\n return False\n else:\n obj.birthplace = birthplace\n obj.save()\n return True\n\n def delete(self, object_id):\n try:\n obj = Birthplace.objects.get(id=object_id)\n except Birthplace.DoesNotExist:\n return False\n else:\n obj.delete()\n return True\n\n\nclass UserInfoDAO(IDAO):\n \"\"\" DAO object for UserInfo object \"\"\"\n def create(self, user_info):\n #create django ORM object\n birthplace_dao = BirthplaceDAO()\n birthplace_obj = birthplace_dao.create(user_info.birthplace)\n\n obj = UserInfo(username=user_info.username,\n age=user_info.age,\n birthplace=birthplace_obj,\n who=user_info.who)\n\n #saving object in DB using django ORM\n obj.save()\n return obj\n\n def read(self, object_id):\n try:\n obj = UserInfo.objects.get(id=object_id)\n except UserInfo.DoesNotExist:\n obj = None\n return obj\n\n @staticmethod\n def read_all():\n \"\"\" Return all objects from data storage \"\"\"\n return UserInfo.objects.all()\n\n def update(self, object_id, user_info):\n try:\n obj = UserInfo.objects.get(id=object_id)\n except UserInfo.DoesNotExist:\n return False\n else:\n obj.username = user_info.username\n obj.age = user_info.age\n obj.who = user_info.who\n if obj.birthplace.birthplace != user_info.birthplace:\n obj.birthplace.birthplace = user_info.birthplace\n obj.birthplace.save()\n obj.save()\n return True\n\n def delete(self, object_id):\n try:\n obj = UserInfo.objects.get(id=object_id)\n except UserInfo.DoesNotExist:\n return False\n else:\n obj.delete()\n return True\n","sub_path":"registration/DAO.py","file_name":"DAO.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"59780224","text":"#!/usr/bin/python\n\nfrom Adafruit_PWM_Servo_Driver import PWM\nimport time\n# ===========================================================================\n# Example Code\n# ===========================================================================\n# Initialise the PWM device using the default address\npwm = PWM(0x40)\n# Note if you'd like more debug output you can instead run:\n#pwm = PWM(0x40, debug=True)\n\nservoMin = 100 # Min pulse length out of 4096\nservoMax = 515 # Max pulse length out of 4096\n\n#def setServoPulse(channel, pulse):\n # pulseLength = 1000000 # 1,000,000 us per second\n #pulseLength /= 60 # 60 Hz\n #print \"%d us per period\" % pulseLength\n #pulseLength /= 4096 # 12 bits of resolution\n #print \"%d us per bit\" % pulseLength\n #pulse *= 1000\n #pulse /= pulseLength\n #pwm.setPWM(channel, 0, pulse)\n\n\n\npwm.setPWMFreq(50) # Set frequency to 60 Hz\npwm.setPWM(0,0,servoMin)\ntime.sleep(1)\n\n\ndef servo(inter, channel):\n\tfor i in range(servoMin,servoMax+1):\n\t\tpwm.setPWM(channel,0,i)\t\t\n\t\tiRange=servoMax-servoMin\n\t\tangle=180.0*(i-100)/iRange\n\t\tprint('angle: {0:.1f} [degree]'.format(angle))\t\n\t\ttime.sleep(inter)\n\ttime.sleep(1)\n\tfor i in range(servoMin,servoMax+1):\n\t\tduty=servoMax+servoMin-i\n\t\tpwm.setPWM(channel,0,duty)\t\t\n#\t\tiRange=servoMax-servoMin\n#\t\tangle=180.0*(i-100)/iRange\n#\t\tprint('angle: {0:.1f} [degree]'.format(angle))\t\n\t\tprint(duty)\n\t\ttime.sleep(inter)\n\ttime.sleep(1)\n\nfor i in range(5,11):\n\tservo(0.001*i,0)\n\n\n#while (True):\n # Change speed of continuous servo on channel O\n # pwm.setPWM(0, 0, servoMin)\n #time.sleep(1)\n #pwm.setPWM(0, 0, servoMax)\n #time.sleep(1)\n\n\n\n","sub_path":"Others/Servo/Servo_Example.py","file_name":"Servo_Example.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"513183885","text":"from django.shortcuts import render\nfrom products.models import Product\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\n\n\ndef do_search(request):\n if request.GET['q'] != '':\n products = Product.objects.filter(name__icontains=request.GET['q'])\n paginator = Paginator(products, 24)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'products.html', {'page_obj': page_obj, 'products': products})\n else:\n messages.warning(request, 'Please enter a keyword to search for!')\n products = Product.objects.all()\n paginator = Paginator(products, 24)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'products.html', {'page_obj': page_obj, 'products': products})\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"399528273","text":"from django.conf.urls import url\nfrom ShopperBase import views\n\n\nurlpatterns = [\n url(r'^$', views.shopper_list),\n url(r'^(?P[0-9]+)/$', views.shopper_id),\n url(r'^(?P[0-9]+)/upload-image', views.upload_image),\n url(r'^(?P[0-9]+)/batches', views.get_shopper_batches),\n url(r'^documents/$', views.post_documents),\n url(r'^documents/(?P[0-9]+)/$', views.get_documents),\n]\n\n\n\n\n\n\n\n\n\n","sub_path":"ShopperBase/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"483199109","text":"import os\nimport csv\nimport shutil\nfrom settings import RAW_PATH, PROCESSED_PATH, PROCESSED_APPS_PATH\nfrom collections import defaultdict\n\n\ndef fetch_data(path):\n with open(path, 'r') as f:\n data = defaultdict(list)\n reader = csv.DictReader(f)\n for row in reader:\n for (k, v) in row.items():\n if k == 'Time' or \\\n k == 'Duration' or \\\n k == 'Revenue':\n v = float(v)\n data[k].append(v)\n\n return data\n\n\ndef read_raw_data():\n apps = []\n link_data = {}\n orders = {}\n\n with open(RAW_PATH + 'link_data.csv', 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n link_data[row['ApplicationID'].lower()] = row\n\n with open(RAW_PATH + 'orders.csv', 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if not (row['SessionId'].lower() in orders.keys()):\n orders[row['SessionId'].lower()] = []\n orders[row['SessionId'].lower()].append(row)\n\n with open(RAW_PATH + 'apps.csv', 'r') as f:\n reader = csv.reader(f)\n apps = list(reader)\n\n return apps, link_data, orders\n\n\ndef write(apps):\n file = open(PROCESSED_PATH + 'apps.csv', 'w')\n with file:\n writer = csv.writer(file)\n writer.writerows(apps)\n\n\ndef write_files_by_app_name(apps):\n reset_apps_folder()\n app_names = []\n for app in apps[1:]:\n app_name = app[0].split(\".\")[2]\n path = \"\".join([PROCESSED_APPS_PATH, app_name, \".csv\"])\n if app_name not in app_names:\n app_names.append(app_name)\n write_row(path, apps[0])\n write_row(path, app)\n\n\ndef write_row(path, row):\n file = open(path, 'a')\n with file:\n writer = csv.writer(file)\n writer.writerow(row)\n\n\ndef reset_apps_folder():\n apps_path = PROCESSED_APPS_PATH\n if os.path.exists(apps_path):\n shutil.rmtree(apps_path)\n os.mkdir(apps_path)\n","sub_path":"src/utils/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"286770449","text":"# Webscraping of https://sciencemag.org\n# by J. M. Müller 09/2019-06/2020\n\nfrom functions import *\nimport time\nimport json\nimport pandas as pd\n\n# Set Pandas DataFrame Display Width:\ndesired_width = 320\npd.set_option('display.width', desired_width)\npd.set_option('display.max_columns', 15) # Show up to 10 columns in the console output (unlimited: None statt 10)\npd.options.display.float_format = '{:.2f}'.format # float formatting (digits to be shown in console output)\n\n\n# ignore Deprecation Warnings:\ndef warn(*args, **kwargs):\n pass\n\n\nimport warnings\n\nwarnings.warn = warn\n\n# Code below:\nroot_url = 'https://www.sciencemag.org/'\nrunning = True\n\nwith open(\"./config.json\") as file:\n config = json.loads(file.read())\n\nif __name__ == '__main__':\n vimp_msg(\"Welcome to the Science Scraper!\\n\", True)\n if config['cycling']:\n imp_msg(f\"Cycling Mode activated... Refreshing data every {config['cycle time']} minutes.\")\n while running:\n if config['cycling']:\n cycle_start = datetime.datetime.now()\n\n # process Title Page:\n articles = process_title_page(root_url, config['debugging'])\n\n # process Article Pages:\n final_articles = process_article_pages(articles, config['debugging'])\n\n # create term frequency-inverse document frequency matrix:\n tfidf_matrix = get_tfidf_matrix(final_articles, config['keyword number'], config['debugging'])\n\n # Create Console Output:\n create_console_output(final_articles)\n\n # Convert Data to DataFrame and log meta data to Console:\n df = convert_to_dataframe(final_articles, config['keyword number'], config['debugging'])\n\n if config['show plots']:\n create_plots(df, config['cycling'])\n\n if not config['cycling']:\n running = False\n else:\n # Wait for next iteration until Cycle Time is over:\n while datetime.datetime.now() - cycle_start < datetime.timedelta(minutes=config['cycle time']):\n time.sleep(10)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"273759688","text":"import pymongo\nclient = pymongo.MongoClient()\ndb = client.Names\n\n'''\nnamesdata = []\nfor _ in range(2):\n\tsubdata = [input('Enter name: '), input('Enter password: ')]\n\tnamesdata.append(subdata)\nprint(namesdata)\n\n\ndata = {}\nfor i, d in enumerate(namesdata):\n\tsubdata = {}\n\tsubdata['user{}'.format(i)] = {'name' : d[0], 'pass' : d[1],}\n\tdata.update(subdata)\n\n\n'''\n\nnames = db['names-collection']\nnew_name = { \"$set\": { 'Name' : 'Ivan2' }}\n#names_id = names.insert_one(data).inserted_id\nfor i in names.find():\n\tnames.update_one(i, new_name)\nfor i in names.find():\n\tnames.delete_one(i)\n","sub_path":"Learn/MongoDB/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"545142269","text":"import json\n\n\ndef printByCity(city):\n with open('src/by_city.json', encoding=\"utf-8\") as json_file:\n dataCity = json.load(json_file)\n keyDataCity = list(dataCity.keys())\n for i in range(len(keyDataCity)):\n if dataCity[keyDataCity[i]]['region'] == city:\n print(dataCity[keyDataCity[i]])\n\n\ndef printByFo(fo):\n with open('src/by_fo.json', encoding=\"utf-8\") as json_file:\n dataRegion = json.load(json_file)\n keyDataFo = list(dataRegion.keys())\n for i in range(len(keyDataFo)):\n if dataRegion[keyDataFo[i]]['region'] == fo:\n print(dataRegion[keyDataFo[i]])\n\n\n# слить два JSON, теперь в главном JSON есть значени И региона И ФО\ndef mergeTwoBy():\n with open('src/by_city.json', encoding=\"utf-8\") as json_file:\n dataCity = json.load(json_file)\n keyDataCity = list(dataCity.keys())\n with open('src/by_fo.json', encoding=\"utf-8\") as json_file:\n dataRegion = json.load(json_file)\n keyDataFo = list(dataRegion.keys())\n for o in range(len(dataCity)):\n for u in range(len(dataRegion)):\n if dataCity[keyDataCity[o]]['vuz_name'] == dataRegion[keyDataFo[u]]['vuz_name']:\n dataCity[keyDataCity[o]].update(eval('{\"fo\" : \"' + dataRegion[keyDataFo[u]]['region'] + '\"}'))\n with open('src/json/main.json', 'w',\n encoding=\"utf-8\") as fp:\n json.dump(dataCity, fp, ensure_ascii=False)\n\n\n# вывод по любому параметру\ndef printByJson(param, value):\n with open('src/json/main.json', encoding=\"utf-8\") as json_file:\n mainJson = json.load(json_file)\n keyMainJson = list(mainJson.keys())\n for i in range(len(keyMainJson)):\n if mainJson[keyMainJson[i]][param] == value:\n print(mainJson[keyMainJson[i]])\n\nmergeTwoBy()","sub_path":"sk.py","file_name":"sk.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"564130759","text":"def main():\r\n number = False\r\n while not number:\r\n try:\r\n score = int(input(\"Score > \"))\r\n number = True\r\n print(result(score))\r\n except ValueError:\r\n print(\"Only number accepted\")\r\n\r\n\r\ndef result(score):\r\n if score < 0 or score > 100:\r\n return \"Invalid score\"\r\n elif 85 <= score <= 100:\r\n return \"HD\"\r\n elif 75 <= score < 85:\r\n return \"D\"\r\n elif 65 <= score < 75:\r\n return \"C\"\r\n elif 50 <= score < 65:\r\n return \"P\"\r\n else:\r\n return \"F\"\r\n\r\n\r\nmain()\r\n","sub_path":"Prac_3/broken_score.py","file_name":"broken_score.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"425276977","text":"import matplotlib.pyplot as plt\nimport torch\n\ndef plot_graph(G, r=.3, c0='#52b', c1='#Cab', size=(10, 10)):\n \"\"\" Plot graph using laplacian eigenmodes 1 and 2. \"\"\"\n G = G.scalars()\n fig = plt.figure(figsize=size)\n plt.axis('equal')\n plt.axis('off')\n if isinstance(c0, str): c0 = [c0] * G.Nvtx\n if isinstance(c1, str): c1 = [c1] * len(G[1].keys)\n #-- eigenmodes --\n L = G.codiff(1) @ G.diff(0)\n L = L.data.to_dense()\n eigval, eigvec = torch.linalg.eigh(L)\n x = eigvec[:,1:3] * G.Nvtx\n #-- arrows --\n for p, ep in enumerate(G[1].keys):\n i, j = ep\n xi, xj = x[i], x[j]\n add_arrow(xi, xj, r, .1, c1[p])\n #-- vertices --\n for i, xi in enumerate(x):\n add_vertex(xi, r, c0[i], label=str(i)) \n return fig\n\ndef add_vertex(x, r, c, label=None):\n \"\"\" Add vertex at x in current plot. \"\"\"\n circle = plt.Circle((x[0], x[1]), r, color=c)\n plt.gca().add_patch(circle)\n if label:\n plt.text(x[0], x[1], label, ha='center', va='center', color='#fff')\n\ndef add_arrow(xi, xj, r, w, c, label=None):\n \"\"\" Add arrow from xi to xj in current plot. \"\"\"\n v = xj - xi\n dv = r * v / v.norm()\n pos = [*(xi + dv), *(v - 2 * dv)]\n plt.arrow(*pos, \n width=.1,\n length_includes_head=True,\n head_width=.3,\n color=c,\n head_length=.6)\n\n \n\n","sub_path":"topos/io/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"425331615","text":"print(\"#####################\\nPart 8 program START.\\n#####################\")\r\n\r\nimport metadata\r\nfrom PIL import Image, ImageDraw\r\nfrom numpy import genfromtxt\r\n\r\nvalid_Lv1meshID = metadata.call_populated_lv1mesh()\r\n\r\ndef check_validity(targetlist,check_from):\r\n\r\n checkkkresult = []\r\n\r\n for (i,target) in enumerate(targetlist):\r\n if check_from.count(target) > 0 : # if list_check_from contains target, we made it!\r\n checkkkresult.append([target,1])\r\n else:\r\n checkkkresult.append([target,0])\r\n #print(result)\r\n return checkkkresult\r\n\r\ndef check_validity2(result):\r\n # this function is used to check\r\n # whether one list contain element on another list\r\n # this is use after check_validity\r\n # as the result of previous function = [ [a,0],[b,1],[c,1] ]\r\n # it is more convient to have a function to check whether \"1\" == result[n][1]\r\n\r\n ValidiyCodeOfResult = []\r\n\r\n for (i,EachSET) in enumerate(result):\r\n ValidiyCodeOfResult.append(EachSET[1])\r\n # so we obtain ValidiyCodeOfResult = [1,1,0,0,1]\r\n # all we need is to count 1 in this list\r\n if ValidiyCodeOfResult.count(1) == 0:\r\n return 0 # means there is no \"1\"\r\n else:\r\n return 1 # means there has \"1\"\r\n\r\ndef new_dictionary(meshid):\r\n key = str(meshid)\r\n value = []\r\n dictionary = {key:value}\r\n return dictionary\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n while True:\r\n query = input(\"Please input the Global Agglomeration's ID the you are interested in : \")\r\n \r\n if query != \"quit\":\r\n \r\n GlobalAggloID = int(query)\r\n \r\n bigpicture = []\r\n with open(\"data/big_picture/bigpicture.csv\" , \"r\") as input_file:\r\n for line in input_file:\r\n NewLine = line.strip()\r\n NewLine = NewLine[:-1]\r\n NewLine = NewLine.strip().split(\",\")\r\n bigpicture.append(NewLine)\r\n\r\n QuestedGlobalAggloINFO = bigpicture[GlobalAggloID]\r\n \r\n QuestedGlobalAggloPopulation = QuestedGlobalAggloINFO[1]\r\n \r\n QuestedGlobalAggloMesh = set()\r\n \r\n for length in range(len(QuestedGlobalAggloINFO)-2):\r\n QuestedGlobalAggloMesh.add(QuestedGlobalAggloINFO[length+2][:4])\r\n \r\n print(\"Global Agglomeration you selected is located in mesh\\n\",\r\n QuestedGlobalAggloMesh ,\"\\n\",\r\n \"with total population of\", int(QuestedGlobalAggloPopulation),\".\")\r\n\r\n QuestedGlobalAggloINFO = QuestedGlobalAggloINFO[2:]\r\n \r\n MeshIDs = []\r\n \r\n for items in QuestedGlobalAggloMesh:\r\n MeshIDs.append([items])\r\n \r\n for element in QuestedGlobalAggloINFO:\r\n for (index,everyMeshID) in enumerate(MeshIDs):\r\n if str(everyMeshID[0]) == str(element[:4]):\r\n newAggloID = element[5:element.index(\"_\",5)]\r\n MeshIDs[index].append(newAggloID)\r\n \r\n # MeshIDs = [ [ meshID_1, AggloID, AggloID, AggloID ],\r\n # [ meshID_2, AggloID, AggloID, AggloID ],\r\n # [ meshID_3, AggloID, AggloID, AggloID ] ]\r\n \r\n CellsCount = 0\r\n \r\n for eachMeshDATAs in MeshIDs:\r\n \r\n meshID = eachMeshDATAs[0]\r\n AggloIDS = eachMeshDATAs[1:]\r\n\r\n RegionalAggloDATAs = []\r\n \r\n with open(\"data/regional_agglomeration_data_sorted/population_rank_\"+str(meshID)+\".csv\" , \"r\") as input_file:\r\n for line in input_file:\r\n NewLine = line.strip()\r\n NewLine = NewLine[:-1]\r\n NewLine = NewLine.strip().split(\",\")\r\n RegionalAggloDATAs.append(NewLine)\r\n \r\n NeededDATA = []\r\n \r\n for eachAggloID in AggloIDS:\r\n buffer = RegionalAggloDATAs[int(eachAggloID)]\r\n NeededDATA.append(buffer)\r\n \r\n # NeededDATA = [ [ AggloID_1, cellID, cellID, cellID ],\r\n # [ AggloID_2, cellID, cellID, cellID ],\r\n # [ AggloID_3, cellID, cellID, cellID ] ]\r\n \r\n matrix = [[0 for i in range(320)] for j in range(320)]\r\n\r\n for eachAggloDATA in NeededDATA:\r\n cellDATAs = eachAggloDATA[2:]\r\n \r\n for eachCell in cellDATAs:\r\n CellINFO = eachCell.split(\"_\")\r\n meshID = int(CellINFO[0])\r\n cellY = int(CellINFO[1])\r\n cellX = int(CellINFO[2])\r\n cellPopulation = int(CellINFO[3]) \r\n \r\n matrix[cellY][cellX] = cellPopulation\r\n CellsCount += 1\r\n\r\n filename = str(GlobalAggloID)+\"_\"+str(meshID)\r\n \r\n with open(\"data/map/\"+str(filename)+\".csv\" , \"w\") as output_file:\r\n for each_row in matrix:\r\n for each_field in each_row:\r\n output_file.write(str(each_field)+\",\")\r\n output_file.write(\"\\n\")\r\n\r\n\r\n g = open(\"data/map/\"+str(filename)+\".csv\" , \"r\")\r\n temp = genfromtxt(g, delimiter = ',')\r\n #temp = matrix\r\n im = Image.fromarray(temp).convert('RGB')\r\n pix = im.load()\r\n rows, cols = im.size\r\n for x in range(320):\r\n for y in range(320):\r\n #print(str(x) + \" \" + str(y))\r\n pix[x,y] = (int(temp[y,x] // 256 // 256 % 256),\r\n int(temp[y,x] // 256 % 256),\r\n int(temp[y,x] % 256))\r\n im.save(g.name[0:-4] + '.tif')\r\n print(\"This Agglomeration has an area of\", str(CellsCount), \"cells.\\n\",\r\n \"Around\", CellsCount*(1/16) ,\"sq.km .\") \r\n print(\"Maps are output\")\r\n \r\n elif query == \"quit\":\r\n break\r\n\r\n\r\n\r\n\r\n print(\"#####################\\nPart 8 program END. Big Picture is output.\\n#####################\")\r\n","sub_path":"250mPOP/ProgramPart8_GraphingGlobalAgglomeration.py","file_name":"ProgramPart8_GraphingGlobalAgglomeration.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"40184305","text":"# Copyright (c) 2012-2014 The CEF Python authors. All rights reserved.\n# License: New BSD License.\n# Website: http://code.google.com/p/cefpython/\n\n# Create a setup package.\n\nimport sys\nimport os\nimport platform\nimport argparse\nimport re\nimport platform\nimport shutil\nimport glob\n\nBITS = platform.architecture()[0]\nassert (BITS == \"32bit\" or BITS == \"64bit\")\n\nPACKAGE_NAME = \"cefpython1\"\n\nREADME_TEMPLATE = os.getcwd()+r\"/README.txt.template\"\nINIT_TEMPLATE = os.getcwd()+r\"/__init__.py.template\"\nSETUP_TEMPLATE = os.getcwd()+r\"/setup.py.template\"\n\ndef main():\n parser = argparse.ArgumentParser(usage=\"%(prog)s [options]\")\n parser.add_argument(\"-v\", \"--version\", help=\"cefpython version\",\n required=True)\n args = parser.parse_args()\n assert re.search(r\"^\\d+\\.\\d+$\", args.version), (\n \"Invalid version string\")\n\n vars = {}\n vars[\"APP_VERSION\"] = args.version\n\n print(\"Reading template: %s\" % README_TEMPLATE)\n f = open(README_TEMPLATE)\n README_CONTENT = f.read() % vars\n f.close()\n\n print(\"Reading template: %s\" % INIT_TEMPLATE)\n f = open(INIT_TEMPLATE)\n INIT_CONTENT = f.read() % vars\n f.close()\n\n print(\"Reading template: %s\" % SETUP_TEMPLATE)\n f = open(SETUP_TEMPLATE)\n SETUP_CONTENT = f.read() % vars\n f.close()\n\n installer_dir = os.path.dirname(os.path.abspath(__file__))\n\n setup_dir = installer_dir+\"/\"+PACKAGE_NAME+\"-\"+vars[\"APP_VERSION\"]+\"-linux-\"+BITS+\"-setup\"\n print(\"Creating setup dir: \"+setup_dir)\n os.mkdir(setup_dir)\n\n package_dir = setup_dir+\"/\"+PACKAGE_NAME\n print(\"Creating package dir\")\n os.mkdir(package_dir)\n\n print(\"Creating README.txt from template\")\n with open(setup_dir+\"/README.txt\", \"w\") as f:\n f.write(README_CONTENT)\n\n print(\"Creating setup.py from template\")\n with open(setup_dir+\"/setup.py\", \"w\") as f:\n f.write(SETUP_CONTENT)\n\n binaries_dir = os.path.abspath(installer_dir+\"/../binaries_\"+BITS+\"/\")\n print(\"Copying binaries to package dir\")\n ret = os.system(\"cp -rf \"+binaries_dir+\"/* \"+package_dir)\n assert ret == 0\n\n os.chdir(package_dir)\n print(\"Removing .log files from the package dir\")\n ret = os.system(\"rm *.log\")\n # assert ret == 0 - if there are no .log files this assert would fail.\n os.chdir(installer_dir)\n\n print(\"Creating __init__.py from template\")\n with open(package_dir+\"/__init__.py\", \"w\") as f:\n f.write(INIT_CONTENT)\n\n print(\"Creating examples dir in package dir\")\n os.mkdir(package_dir+\"/examples/\")\n\n print(\"Creating wx dir in package dir\")\n os.mkdir(package_dir+\"/wx/\")\n\n print(\"Moving example scripts from package dir to examples dir\")\n examples = glob.glob(package_dir+\"/*.py\")\n for example in examples:\n # Ignore: cefpython_py27.py - dummy API script\n if os.path.basename(example).startswith(\"cefpython_\"):\n continue\n # Ignore: __init__.py\n if os.path.basename(example).startswith(\"__\"):\n continue\n os.rename(example, package_dir+\"/examples/\"+os.path.basename(example))\n ret = os.system(\"mv \"+package_dir+\"/*.html \"+package_dir+\"/examples/\")\n assert ret == 0\n\n print(\"Copying wx-subpackage to wx dir in package dir\")\n wx_subpackage_dir = os.path.abspath(installer_dir+\"/../../wx-subpackage/\")\n ret = os.system(\"cp -rf \"+wx_subpackage_dir+\"/* \"+package_dir+\"/wx/\")\n assert ret == 0\n\n print(\"Moving wx examples from wx/examples to examples/wx\")\n shutil.move(package_dir+\"/wx/examples\", package_dir+\"/wx/wx/\")\n shutil.move(package_dir+\"/wx/wx/\", package_dir+\"/examples/\")\n\n print(\"Copying package dir examples to setup dir\")\n ret = os.system(\"cp -rf \"+package_dir+\"/examples/ \"+setup_dir+\"/examples/\")\n assert ret == 0\n\n print(\"Setup Package created.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cefpython/cef1/linux/installer/make-setup.py","file_name":"make-setup.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"200496602","text":"from sklearn.linear_model import LogisticRegression\n\nfrom utils.hyperparameters import HyperParameters\nfrom .traditional_model import TraditionalModel\n\n\nclass LogisticRegressionModel(TraditionalModel):\n\n def __init__(self, hyper_parameters: HyperParameters, save_folder: str, is_train: bool):\n super().__init__(hyper_parameters, save_folder, is_train)\n\n self._model = None\n self.name = 'logistic-regression'\n\n def make(self, is_train: bool, is_frozen: bool):\n if self.model is not None:\n return\n\n self._model = LogisticRegression(penalty=self.hypers.model_params['penalty'],\n C=self.hypers.model_params['regularization_strength'],\n solver=self.hypers.model_params['solver'],\n max_iter=self.hypers.model_params['max_iters'])\n","sub_path":"src/models/logistic_regression_model.py","file_name":"logistic_regression_model.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"402277769","text":"from gensim import models\nimport logging\nimport numpy as np\nfrom keras.preprocessing import sequence\nimport collections\nimport re\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# 去除标点符号和特殊符号,停用词\n# 获取训练数据\n\nclass MySentences(object):\n def __init__(self, filename):\n self.filename = filename\n\n def __iter__(self):\n # 读入训练数据\n for line in open(self.filename):\n article = line.replace('\\n', '').split(\" \")\n yield article\n\n\n# 训练word2vec\ndef trainModel(inFile,modelName):\n\n # 读入数据\n sentences = MySentences(inFile)\n\n # 训练\n # 少于min_count次数的单词会被丢弃掉, 默认值为5\n # size = 神经网络的隐藏层的单元数 default value is 100\n # workers= 控制训练的并行:default = 1 worker (no parallelization) 只有在安装了Cython后才有效\n model = models.Word2Vec(sentences,min_count=10,window=10,size = 200,workers=4)\n\n modelFile = modelName + \".mdl\"\n # 存储模型\n model.save(modelFile)\n\n vecFile = modelName + \".bin\"\n # 存储vector\n model.wv.save_word2vec_format(vecFile, binary=True)\n\n\ndef incrementTrain(modelName,files):\n\n modelFile = modelName + \".mdl\"\n vecFile = modelName + \".bin\"\n\n # 读入模型\n model = models.Word2Vec.load(modelFile)\n # 读入增量数据\n for f in files:\n data_set = MySentences(f)\n print(\"increment train word2vec model use:\"+f)\n # 增量训练\n model.train(data_set,total_examples=model.corpus_count, epochs=model.iter)\n # 存储模型\n model.save(modelFile)\n # 存储vector\n model.wv.save_word2vec_format(vecFile, binary=True)\n\ndef main():\n \n # 定义文件路径\n dataPath = \"/media/kinux2347/software/DataScience/bdci360_semi/\"\n mdlPath = \"/media/kinux2347/software/DataScience/bdci360_semi/model/\"\n \n # 训练数据\n inFile = dataPath + \"train/train_all.tsv\"\n modelName = mdlPath + \"w2v_v2\"\n\n \n # 训练词向量模型\n # 训练模型使用全部数据\n trainModel(inFile, modelName)\n\n # 增量训练\n # 增量训练初期化\n # trainModel(trainPart[0], modelName)\n # incrementTrain(modelName,trainPart[1:])\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"1126/031_trainW2V.py","file_name":"031_trainW2V.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"167582226","text":"import environ\nimport dj_database_url\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse\nfrom django.utils import six\n\nif six.PY2:\n from urlparse import urlparse, urljoin\nelse:\n from urllib.parse import urlparse, urljoin\n\n\nenv = environ.Env()\nenv.read_env('.env')\n\nBASE_DIR = environ.Path(__file__) - 2\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nDEBUG = env.bool('DJANGO_DEBUG', False)\nALLOWED_HOSTS = ['*']\nINSTALLED_APPS = [\n 'suit',\n 'suit_redactor',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 's3direct',\n 'storages',\n 'sorl.thumbnail',\n 'markdown_deux',\n 'django_rq',\n 'taggit',\n 'constance',\n 'constance.backends.database',\n 'freeradio.core',\n 'freeradio.advertising',\n 'freeradio.talent',\n 'freeradio.traffic',\n 'freeradio.podcasting',\n 'freeradio.music',\n 'freeradio.noticeboard',\n 'freeradio.blog',\n 'freeradio.oembed'\n]\n\nif not DEBUG:\n RAVEN_CONFIG = {\n 'dsn': env('SENTRY_DSN', default='')\n }\n\n INSTALLED_APPS += ('raven.contrib.django.raven_compat',)\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware'\n]\n\nROOT_URLCONF = 'freeradio.urls'\nSECURE_SSL_REDIRECT = env.bool('SECURE_SSL_REDIRECT', False)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n BASE_DIR.path('templates')\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'freeradio.core.context_processors.settings',\n 'freeradio.core.context_processors.home',\n 'freeradio.traffic.context_processors.traffic',\n 'freeradio.noticeboard.context_processors.noticeboard'\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader'\n ],\n 'debug': DEBUG\n },\n },\n]\n\nWSGI_APPLICATION = 'freeradio.wsgi.application'\nDATABASES = {\n 'default': env.db(\n 'DATABASE_URL',\n default='sqlite:///%s' % BASE_DIR.path('freeradio.sqlite')\n )\n}\n\nDATABASES['default'].update(\n dj_database_url.config(conn_max_age=500)\n)\n\n# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': (\n 'django.contrib.auth.password_validation.'\n 'UserAttributeSimilarityValidator'\n )\n },\n {\n 'NAME': (\n 'django.contrib.auth.password_validation.MinimumLengthValidator'\n )\n },\n {\n 'NAME': (\n 'django.contrib.auth.password_validation.CommonPasswordValidator'\n )\n },\n {\n 'NAME': (\n 'django.contrib.auth.password_validation.NumericPasswordValidator'\n )\n }\n]\n\nLANGUAGE_CODE = 'en-gb'\nTIME_ZONE = 'Europe/London'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n)\n\nif (\n env('DROPBOX_OAUTH2_TOKEN', default='') and\n env('DROPBOX_ROOT_PATH', default='')\n):\n DEFAULT_FILE_STORAGE = 'storages.backends.dropbox.DropboxStorage'\n STATICFILES_STORAGE = 'freeradio.core.dropbox.DropboxStaticStorage'\n DROPBOX_OAUTH2_TOKEN = env('DROPBOX_OAUTH2_TOKEN', default='')\n DROPBOX_ROOT_PATH = env('DROPBOX_ROOT_PATH', default='')\nelif env('AWS_S3_BUCKET', default=''):\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n STATICFILES_STORAGE = 'freeradio.core.storages.S3StaticStorage'\n S3DIRECT_REGION = 'eu-west-1'\n AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default='')\n AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default='')\n AWS_STORAGE_BUCKET_NAME = env('AWS_S3_BUCKET', default='')\n AWS_S3_CUSTOM_DOMAIN = env('AWS_S3_CUSTOM_DOMAIN', default='') or (\n 's3-eu-west-1.amazonaws.com/%s' % AWS_STORAGE_BUCKET_NAME\n )\n\n AWS_PRELOAD_METADATA = True\n AWS_QUERYSTRING_AUTH = False\nelif not DEBUG:\n raise ImproperlyConfigured(\n 'Either a DROPBOX_OAUTH2_TOKEN or AWS_S3_BUCKET must be defined.'\n )\n\nREDIS_URL = env('REDIS_URL', default='redis://127.0.0.1:6379/')\nREDIS_URL_PARTS = urlparse(REDIS_URL)\n\nRQ_QUEUES = {\n 'default': {\n 'URL': REDIS_URL\n }\n}\n\nMEDIA_ROOT = BASE_DIR.path('media')\nSTATIC_ROOT = BASE_DIR.path('staticfiles')\nMEDIA_URL = DEBUG and '/media/' or '//%s/uploads/' % AWS_S3_CUSTOM_DOMAIN\nSTATIC_URL = DEBUG and '/static/' or ('//%s/static/' % AWS_S3_CUSTOM_DOMAIN)\nSITE_ID = env.int('SITE_ID', 1)\n\nS3DIRECT_DESTINATIONS = {\n 'podcast_episodes': {\n 'key': 'podcasts',\n 'allowed': ['audio/mpeg', 'audio/mpeg3', 'audio/x-mpeg-3'],\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': [\n env('REDIS_URL', default='redis://127.0.0.1:6379')\n ],\n 'OPTIONS': {\n 'DB': 0,\n 'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',\n 'CONNECTION_POOL_CLASS_KWARGS': {\n 'max_connections': 50,\n 'timeout': 20\n },\n 'MAX_CONNECTIONS': 1000,\n 'PICKLE_VERSION': -1\n },\n 'KEY_PREFIX': 'cache'\n }\n}\n\nNOTICEBOARD_MODELS = (\n (\n 'blog.Post',\n {\n 'subtitle': u'New to the blog',\n 'title': str,\n 'image_field': 'featured_image',\n 'description_field': 'excerpt',\n 'cta_text': u'Read more',\n 'date_field': 'published'\n }\n ),\n (\n 'music.PlaylistTrack',\n {\n 'subtitle': lambda o: u'New track on %s' % str(o.playlist),\n 'title': lambda o: u'%s - %s' % (o.track.artist, o.track),\n 'image_field': 'image',\n 'cta_text': u'Check it out',\n 'cta_url': lambda o: (\n o.track.purchase_url or\n o.track.artist.url or\n reverse('music:playlist')\n ),\n 'stickiness': 5,\n 'date_field': 'added'\n }\n ),\n (\n 'podcasting.Series',\n {\n 'subtitle': u'New podcast',\n 'title': str,\n 'description_field': 'subtitle',\n 'cta_text': u'Hear the first episode',\n 'stickiness': 7,\n 'image_field': 'artwork'\n }\n ),\n (\n 'podcasting.Episode',\n {\n 'subtitle': lambda o: o.series.name,\n 'title': str,\n 'cta_text': u'Listen now',\n 'stickiness': 4,\n 'image': lambda o: o.series.artwork,\n 'date_field': 'published'\n }\n ),\n (\n 'traffic.Update',\n {\n 'subtitle': lambda o: (\n o.kind == 'episode' and 'Listen again' or 'Show news'\n ),\n 'title': lambda o: str(o.programme),\n 'description_field': 'description',\n 'image_field': 'programme.logo',\n 'cta_text': lambda o: (\n o.kind == 'episode' and 'Listen' or 'Read more'\n ),\n 'cta_url': lambda o: reverse(\n 'traffic:programme',\n args=[o.programme.slug]\n ),\n 'date_field': 'date'\n }\n )\n)\n\nCKEDITOR_UPLOAD_PATH = 'uploads'\nCKEDITOR_CONFIGS = {\n 'default': {\n 'skin': 'minimalist',\n 'toolbar_Basic': [\n ['Source', '-', 'Bold', 'Italic']\n ],\n 'toolbar_Advanced': [\n {\n 'name': 'basicstyles',\n 'items': [\n 'Format',\n 'Bold',\n 'Italic',\n 'Subscript',\n 'Superscript',\n '-',\n 'RemoveFormat'\n ]\n },\n {\n 'name': 'paragraph',\n 'items': [\n 'NumberedList',\n 'BulletedList',\n '-',\n 'Outdent',\n 'Indent',\n '-',\n 'Blockquote'\n ]\n },\n {\n 'name': 'links',\n 'items': [\n 'Link',\n 'Unlink',\n 'Anchor'\n ]\n },\n {\n 'name': 'insert',\n 'items': [\n 'Image',\n 'HorizontalRule',\n 'Smiley',\n 'SpecialChar',\n ]\n }\n ],\n 'toolbar': 'Advanced',\n 'tabSpaces': 4,\n 'extraPlugins': ','.join(\n [\n 'autogrow',\n 'widget',\n 'lineutils',\n 'clipboard',\n 'dialog',\n 'dialogui',\n 'elementspath'\n ]\n )\n }\n}\n\nTHUMBNAIL_DEBUG = DEBUG\nTHUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore'\nTHUMBNAIL_REDIS_PASSWORD = REDIS_URL_PARTS.password\nTHUMBNAIL_REDIS_HOST = REDIS_URL_PARTS.hostname\nTHUMBNAIL_REDIS_PORT = REDIS_URL_PARTS.port\n\nNOTICEBOARD_SIZES = {\n 'blog.post': {\n 768: [2, 2]\n },\n 'music.playlisttrack': {\n 768: [1, 2]\n },\n 'podcasting.series': {\n 768: [3, 2],\n 1200: [2, 2]\n },\n 'podcasting.episode': {\n 768: [3, 1],\n 1200: [2, 1]\n },\n 'traffic.update': {\n 768: [3, 1],\n 1200: [1, 1]\n }\n}\n\nADVERTISEMENT_REGIONS = {\n 'home_01': 'Homepage (before Features)',\n 'home_01': 'Homepage (after Features)',\n 'sidebar': 'Sidebar'\n}\n\n\nCONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'\nCONSTANCE_CONFIG = {\n 'OFFAIR_TEXT': (\n 'Our local music mix',\n 'What to display when there is no live show'\n ),\n 'HOME_TITLE': (\n 'Welcome to Django Free Radio',\n 'Homepage heading text'\n ),\n 'HOME_INTRO': (\n 'You can change this content in the admin site',\n 'Home intro text'\n ),\n 'IOS_APP_URL': (\n 'https://itunes.apple.com/gb/app/brum-radio/id1218461799?mt=8',\n 'iOS app URL'\n ),\n 'ANDROID_APP_URL': (\n 'https://play.google.com/store/apps/details?id=com.ionicframework.combrumradio701094',\n 'Android app URL'\n ),\n 'RADIO_STREAM_URL': (\n env(\n 'RADIO_STREAM_URL',\n default='http://uk3.internet-radio.com:11168/stream'\n ),\n 'Shoutcast stream URL'\n ),\n 'RADIO_NOWPLAYING_URL': (\n env(\n 'RADIO_NOWPLAYING_URL',\n default='https://control.internet-radio.com:2199/external/rpc.php?m=streaminfo.get&username=brumradio&charset=&mountpoint=&rid=brumradio'\n ),\n 'Now-playing XML URL'\n ),\n 'GOOGLE_ANALYTICS_ID': (\n env('GOOGLE_ANALYTICS_ID', default=''),\n 'Google Analytics ID'\n ),\n 'FACEBOOK_APP_ID': (\n env('FACEBOOK_APP_ID', default=''),\n 'Facebook app ID'\n ),\n 'MIXCLOUD_USERNAME': (\n env('MIXCLOUD_USERNAME', default='brumradio'),\n 'MixCloud username'\n ),\n 'MAILCHIMP_FORM_URL': (\n env(\n 'MAILCHIMP_FORM_URL',\n default='//freeradio.us5.list-manage.com/subscribe/post?u=ff5d7e986a83ed1a1e1de6c27&id=19d2b7f483'\n ),\n 'MailChimp subscription form URL'\n )\n}\n","sub_path":"freeradio/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":12258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"453161033","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal as mvn\nfrom matplotlib.patches import Ellipse\n\nnp.random.seed(1234)\n\ndef random_posdef(n):\n A = np.random.rand(n, n)\n return np.dot(A, A.transpose())\n\n# Parameter initialization ###\nK = 2\npi = [1.0/K for i in range(K)]\nmeans = [[0,0] for i in range(K)]\n#means = [[-0.2,-0.5],[-1.2,-1.6]]\ncovs = [random_posdef(2) for i in range(K)]\n\ngmm_data = np.load('gmm_data.npy')\n\n################# Scatter plot of data ###########\nplt.scatter(gmm_data[:,0],gmm_data[:,1])\nplt.show()\n\n\n############## EM algorithm ##################\n\n## Probability that a given data point came from one of the K clusters \n\ndef em_gmm(data,means,covs,pi,tol, max_iter):\n\tll_old = 0.0\n\t[num_points,dim] = gmm_data.shape\n\n\tfor l in range(max_iter):\n\t\n\t### E step #####\n\t#### This part is for evaluating the current responsibilities \n\n\t\tpoints_classes = np.zeros((K,num_points))\n\t\tfor i in range(K):\n\t\t\tfor j in range(num_points):\n\t\t\t\t#print(pi[i])\n\t\t\t\t#print(data[j][0])\n\t\t\t\tpoints_classes[i][j] = pi[i] * mvn(means[i],covs[i]).pdf(data[j])\n\t\t#print(points_classes[0][500])\n\t\tpoints_classes /= points_classes.sum(0) \n\t\t#print(points_classes[0][100])\n\t\t#print(points_classes[0][0])\n\t\t\n\t\t#### M step ############\n\t\tpi = np.zeros(K)\n\t\tfor i in range(K):\n\t\t\tfor j in range(num_points):\n\t\t\t\tpi[i] = pi[i] + points_classes[i][j]\n\t\tpi /= num_points\n\t\n\t\tmeans = np.zeros((K,dim))\n\t\tfor i in range(K):\n\t\t\tfor j in range(num_points):\n\t\t\t\tmeans[i] = means[i] + points_classes[i][j] * data[j]\n\t\t\tmeans[i] /= points_classes[i][:].sum()\n\t\t\t#print(means)\n\n\t\tcovs = np.zeros((K,dim,dim))\n\t\tfor i in range(K):\n\t\t\tfor j in range(num_points):\n\t\t\t\tys = np.reshape(data[j] - means[i],(2,1))\n\t\t\t\tcovs[i] = covs[i] + points_classes[i][j] * np.dot(ys,ys.T)\n\t\t\tcovs[i] /= points_classes[i][:].sum()\n\t\n\t\tll_new = 0.0\n\t\tfor i in range(num_points):\n\t\t\ts = 0\n\t\t\tfor j in range(K):\n\t\t\t\ts = s + pi[j] * mvn(means[j],covs[j]).pdf(data[i][0])\n\t\t\tll_new = ll_new + np.log(s)\n\t\tif np.abs(ll_new - ll_old) < tol:\n\t\t\treturn pi, means, covs, ll_new\n\t\tll_old = ll_new\n\treturn pi, means, covs, ll_new\n\nA = em_gmm(gmm_data,means,covs,pi,0.005,100)\nprint(A[1])\nprint(A[2])\nprint(A[3])\n\n\n\ndef plot_ellipse(pos, cov, nstd=2, ax=None, **kwargs):\n def eigsorted(cov):\n vals, vecs = np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n return vals[order], vecs[:,order]\n \n if ax is None:\n ax = plt.gca()\n \n vals, vecs = eigsorted(cov)\n theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))\n \n # Width and height are \"full\" widths, not radius\n width, height = 2 * nstd * np.sqrt(abs(vals))\n ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)\n \n ax.add_artist(ellip)\n return ellip \n \ndef show(X, mu, cov):\n\n plt.cla()\n K = len(mu) # number of clusters\n colors = ['b', 'k', 'g', 'c', 'm', 'y', 'r']\n plt.plot(X.T[0], X.T[1], 'm*')\n for k in range(K):\n \tplot_ellipse(mu[k], cov[k], alpha=0.6, color = colors[k % len(colors)]) \n plt.show()\n\n \n#fig = plt.figure(figsize = (13, 6))\n#fig.add_subplot(121)\nshow(gmm_data, A[1], A[2])\n#fig.add_subplot(122)\n#plt.plot(np.array(A[3]))\n#plt.show()\n\n#print(A)\n#print(gmm_data.shape)\n","sub_path":"HW5/4a_em_jsolde.py","file_name":"4a_em_jsolde.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"351510376","text":"import os\n\nimport time\nimport random\nimport argparse\nimport logging\nimport numpy as np\nimport torch\nfrom torch import nn, autograd\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torchaudio\nfrom warprnnt_pytorch import RNNTLoss\nfrom models import Transducer\nfrom recurrent import MFCC_\nfrom dataset import (\n CommonVoice, \n YoutubeCaption,\n Synthetic,\n Librispeech,\n TEDLIUM,\n seq_collate, MergedDataset\n)\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader, Dataset\nfrom tokenizer import HuggingFaceTokenizer, CharTokenizer\nfrom augmentation import ConcatFeature, TimeMask, FreqMask, TimeWrap\nfrom tensorboardX import SummaryWriter\nimport json\nimport jiwer\nfrom plot_utils import plot_alignment_to_numpy\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom apex import amp\nimport pytorch_lightning as pl\nfrom train import args\nfrom parts.features import AudioPreprocessing\n\n\nclass ParallelTraining(pl.LightningModule):\n def __init__(self, args):\n super(ParallelTraining, self).__init__()\n if args.tokenizer == 'char':\n _tokenizer = CharTokenizer()\n else:\n print('use BPE 1000')\n _tokenizer = HuggingFaceTokenizer() # use BPE-1000\n audio_feature = args.audio_feat\n if args.concat:\n audio_feature *= 3\n\n self.tokenizer = _tokenizer\n self.loss_fn = RNNTLoss(blank=0)\n self.model = Transducer(audio_feature, _tokenizer.vocab_size,\n args.vocab_dim, # vocab embedding dim\n args.h_dim, # hidden dim\n args.layers, pred_num_layers=args.pred_layers, dropout=args.dropout\n )\n self.latest_alignment = None\n self.steps = 0\n self.epoch = 0\n self.args = args\n self.best_wer = 1000\n\n def warmup_optimizer_step(self, steps):\n if steps < self.args.warmup:\n lr_scale = min(1., float(steps + 1) / self.args.warmup*1.0)\n for pg in self.optimizer.param_groups:\n pg['lr'] = lr_scale * self.args.lr\n \n def forward(self, batch):\n xs, ys, xlen, ylen = batch\n # xs, ys, xlen = xs.cuda(), ys, xlen.cuda()\n self.model.flatten_parameters()\n alignment = self.model(xs, ys, xlen, ylen)\n return alignment\n\n def training_step(self, batch, batch_nb):\n xs, ys, xlen, ylen = batch\n # xs, ys, xlen = xs.cuda(), ys, xlen.cuda()\n if xs.shape[1] != xlen.max():\n xs = xs[:, :xlen.max()]\n ys = ys[:, :ylen.max()]\n self.model.flatten_parameters()\n alignment = self.model(xs, ys, xlen, ylen)\n if batch_nb % 100 == 0:\n self.latest_alignment = alignment.cpu()\n if alignment.shape[1] != xs.shape[1]:\n reduction_ratio = (xs.shape[1]/alignment.shape[1])\n xlen = torch.round(xlen/reduction_ratio).int()\n loss = self.loss_fn(alignment, ys.int(), xlen, ylen)\n\n if batch_nb % 100 == 0:\n lr_val = 0\n for param_group in self.optimizer.param_groups:\n lr_val = param_group['lr']\n self.logger.experiment.add_scalar('lr', lr_val, self.steps)\n\n self.steps += 1\n\n if self.steps < self.args.warmup:\n self.warmup_optimizer_step(self.steps)\n else:\n self.cosine_schedule.step()\n\n return {'loss': loss, 'log': {\n 'loss': loss.item()\n }}\n\n def validation_step(self, batch, batch_nb):\n xs, ys, xlen, ylen = batch\n self.model.flatten_parameters()\n y, nll = self.model.greedy_decode(xs, xlen)\n\n hypothesis = self.tokenizer.decode_plus(y)\n ground_truth = self.tokenizer.decode_plus(ys.cpu().numpy())\n measures = jiwer.compute_measures(ground_truth, hypothesis)\n\n return {'val_loss': nll.mean().item(), 'wer': measures['wer'], 'ground_truth': ground_truth[0], 'hypothesis': hypothesis[0]}\n\n def validation_end(self, outputs):\n # OPTIONAL\n self.logger.experiment.add_text('test', 'This is test', 0)\n\n avg_wer = np.mean([x['wer'] for x in outputs])\n ppl = np.mean([x['val_loss'] for x in outputs])\n self.logger.experiment.add_scalar('val/WER', avg_wer, self.steps)\n self.logger.experiment.add_scalar('val/perplexity', ppl, self.steps)\n\n hypothesis, ground_truth = '', ''\n for idx in range(min(5, len(outputs))):\n hypothesis += outputs[idx]['hypothesis']+'\\n\\n'\n ground_truth += outputs[idx]['ground_truth'] + '\\n\\n'\n\n self.logger.experiment.add_text('generated', hypothesis, self.steps)\n self.logger.experiment.add_text('grouth_truth', ground_truth, self.steps)\n if self.latest_alignment != None:\n alignment = self.latest_alignment\n idx = random.randint(0, alignment.size(0) - 1)\n alignment = torch.softmax(alignment[idx], dim=-1)\n alignment[:, :, 0] = 0 # ignore blank token\n alignment = alignment.mean(dim=-1)\n\n self.logger.experiment.add_image(\n \"alignment\",\n plot_alignment_to_numpy(alignment.data.numpy().T),\n self.steps, dataformats='HWC')\n self.logger.experiment.flush()\n\n if self.best_wer > avg_wer:\n print('best checkpoint found!')\n checkpoint = {\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'epoch': self.epoch\n }\n if self.args.apex:\n checkpoint['amp'] = amp.state_dict()\n torch.save(checkpoint, os.path.join(self.args.log_path, str(self.epoch)+'amp_checkpoint.pt'))\n self.best_wer = avg_wer\n\n\n self.plateau_scheduler.step(avg_wer)\n self.epoch += 1\n\n return {'val/WER': torch.tensor(avg_wer),\n 'wer': torch.tensor(avg_wer),\n 'val/perplexity': torch.tensor(ppl) }\n \n def validation_epoch_end(self, outputs):\n avg_wer = np.mean([x['wer'] for x in outputs])\n ppl = np.mean([x['val_loss'] for x in outputs])\n\n hypothesis, ground_truth = '', ''\n for idx in range(5):\n hypothesis += outputs[idx]['hypothesis']+'\\n\\n'\n ground_truth += outputs[idx]['ground_truth'] + '\\n\\n'\n\n writer.add_text('generated', hypothesis, self.steps)\n writer.add_text('grouth_truth', ground_truth, self.steps)\n\n if self.latest_alignment != None:\n alignment = self.latest_alignment\n idx = random.randint(0, alignment.size(0) - 1)\n alignment = torch.softmax(alignment[idx], dim=-1)\n alignment[:, :, 0] = 0 # ignore blank token\n alignment = alignment.mean(dim=-1)\n\n writer.add_image(\n \"alignment\",\n plot_alignment_to_numpy(alignment.data.numpy().T),\n self.steps, dataformats='HWC')\n\n self.logger.experiment.add_scalar('val/WER', avg_wer, self.steps)\n self.logger.experiment.add_scalar('val/perplexity', ppl, self.steps)\n self.logger.experiment.flush()\n\n self.plateau_scheduler.step(avg_wer)\n\n self.epoch += 1\n return {'val/WER': torch.tensor(avg_wer),\n 'val/perplexity': torch.tensor(ppl) }\n\n def configure_optimizers(self):\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr, momentum=0.9)\n lmbda = lambda epoch: 0.97\n scheduler = torch.optim.lr_scheduler.MultiplicativeLR(self.optimizer, lr_lambda=lmbda)\n\n self.plateau_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min', patience=2, factor=0.9)\n self.cosine_schedule = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 250000 , eta_min=1e-8)\n self.warmup_optimizer_step(0)\n return [self.optimizer], [scheduler]\n\n @pl.data_loader\n def train_dataloader(self):\n\n args = self.args\n transforms_piplines = [\n # torchaudio.transforms.MelSpectrogram(\n # # n_mfcc=args.audio_feat, \n # n_fft=args.n_fft, n_mels=args.audio_feat,\n # # melkwargs={'n_fft':1024, 'win_length': 1024}\n # ),\n # MFCC_(\n # n_mfcc=args.audio_feat, log_mels=True,\n # melkwargs={'n_fft':args.n_fft, 'f_max': 5800, 'f_min': 20}\n # ),\n AudioPreprocessing(\n normalize='none', sample_rate=16000, window_size=0.02, \n window_stride=0.015, features=args.audio_feat, n_fft=512, log=True,\n feat_type='logfbank', trim_silence=True, window='hann',dither=0.00001, frame_splicing=1, transpose_out=False\n ),\n TimeWrap(),\n TimeMask(T=40, num_masks=5, replace_with_zero=False),\n FreqMask(F=5, num_masks=5, replace_with_zero=False),\n ]\n if args.concat:\n transforms_piplines.append(\n ConcatFeature(merge_size=3)\n )\n transforms = torch.nn.Sequential(*transforms_piplines)\n\n common_voice = CommonVoice(f'{args.data_path}common_voice',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n synthetic = Synthetic(f'{args.data_path}synthetic',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n yt3_dataset = YoutubeCaption(f'{args.data_path}youtube-speech-text/',\n labels='news_meta.csv',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n yt_dataset = YoutubeCaption(f'{args.data_path}youtube-speech-text/',\n labels='bloomberg2_meta.csv',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n yt2_dataset = YoutubeCaption(f'{args.data_path}youtube-speech-text/',\n labels='english2_meta.csv',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n yt3_dataset = YoutubeCaption(f'{args.data_path}youtube-speech-text/',\n labels='life_meta.csv',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n librispeech2 = Librispeech(f'{args.data_path}LibriSpeech/train-other-500/',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n librispeech = Librispeech(f'{args.data_path}LibriSpeech/train-clean-360/',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n tedlium = TEDLIUM(f'{args.data_path}TEDLIUM/TEDLIUM_release1/train/',\n audio_max_length=13,\n transforms=transforms, tokenizer=self.tokenizer)\n # tedlium2 = TEDLIUM(f'{args.data_path}TEDLIUM/TEDLIUM_release-3/data/',\n # audio_max_length=12,\n # transforms=transforms, tokenizer=self.tokenizer)\n dataset = MergedDataset([common_voice, yt_dataset, librispeech, yt3_dataset, tedlium, yt3_dataset, synthetic, librispeech2, yt2_dataset]) \n return DataLoader(dataset, collate_fn=seq_collate, batch_size=args.batch_size, \n num_workers=4, shuffle=True, drop_last=True)\n\n @pl.data_loader\n def val_dataloader(self):\n\n args = self.args\n val_pipeline = [\n # torchaudio.transforms.MelSpectrogram(\n # # n_mfcc=args.audio_feat, \n # n_fft=args.n_fft, n_mels=args.audio_feat,\n # # melkwargs={'n_fft':1024, 'win_length': 1024}\n # ),\n # MFCC_(\n # n_mfcc=args.audio_feat, log_mels=True,\n # melkwargs={'n_fft':args.n_fft, 'f_max': 5800, 'f_min': 20}\n # )\n AudioPreprocessing(\n normalize='none', sample_rate=16000, window_size=0.02, \n window_stride=0.015, features=args.audio_feat, n_fft=512, log=True,\n feat_type='logfbank', trim_silence=True, window='hann',dither=0.00001, frame_splicing=1, transpose_out=False\n ),\n ]\n if args.concat:\n val_pipeline.append(\n ConcatFeature(merge_size=3)\n )\n val_transform = torch.nn.Sequential(*val_pipeline)\n if len(val_pipeline) == 1:\n val_transform = val_pipeline[0]\n _tokenizer = self.tokenizer\n val_dataset = Librispeech(f'{args.data_path}LibriSpeech/test-clean/',\n audio_max_length=14,\n transforms=val_transform, tokenizer=self.tokenizer)\n return DataLoader(val_dataset, collate_fn=seq_collate, batch_size=64, num_workers=4, shuffle=False)\n\n\n\nif __name__ == \"__main__\":\n from pytorch_lightning import Trainer\n from pytorch_lightning.callbacks import ModelCheckpoint\n import pickle\n model = ParallelTraining(args)\n # with open('test.pt', 'wb') as f:\n # pickle.dump(model, f)\n params = {\n 'gpus': [0, 1, 2],\n 'distributed_backend': 'ddp',\n 'gradient_clip_val': 10,\n 'accumulate_grad_batches': args.accumulation_steps\n }\n if args.apex:\n print('use apex')\n params['amp_level'] = args.opt_level\n params['precision'] = 16\n\n from datetime import datetime\n cur_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n log_name = '{}-{}'.format(args.name, args.tokenizer)\n log_path = 'logs/{}'.format(log_name)\n os.makedirs(log_path, exist_ok=True)\n with open('logs/{}/vars.json'.format(log_name), 'w') as f:\n json.dump(vars(args), f)\n if args.tokenizer == 'bpe':\n model.tokenizer.token.save(f'logs/{log_name}/BPE')\n else:\n with open('logs/{}/vocab.json'.format(log_name), 'w') as f:\n json.dump(model.tokenizer.token2id, f)\n model.args.log_path = log_path\n logger = pl.loggers.tensorboard.TensorBoardLogger('logs', name=args.name)\n params['logger'] = logger\n\n checkpoint_callback = ModelCheckpoint(\n filepath=log_path,\n save_top_k=True,\n verbose=True,\n monitor='val/perplexity',\n mode='min',\n prefix=''\n )\n params['checkpoint_callback'] = checkpoint_callback\n print(params)\n trainer = Trainer(**params)\n trainer.fit(model)\n","sub_path":"lighting.py","file_name":"lighting.py","file_ext":"py","file_size_in_byte":14246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"566729798","text":"import numpy as np\n\nimport pydoocs\n\n\nclass DoocsServerData:\n\n class InvalidDoocsAddress(Exception):\n pass\n\n class UnknownDataType(Exception):\n pass\n\n IMAGE = \"image\"\n LINE = \"line\"\n VALUE = \"value\"\n UNKNOWN = \"unknown\"\n\n def __init__(self):\n self._address = None\n self._readout = {\"type\": self.UNKNOWN, \"data\": None}\n self._data = tuple()\n\n @property\n def data(self):\n if self.type == self.VALUE:\n self._data += (self._readout[\"data\"], )\n return np.array(self._data)\n else:\n self._data = self._readout[\"data\"]\n return np.array(self._data)\n\n @property\n def type(self):\n if self._readout[\"type\"] == \"IMAGE\":\n return self.IMAGE\n elif self._readout[\"type\"] == \"SPECTRUM\":\n return self.LINE\n elif self._readout[\"type\"] in (\"FLOAT\", ):\n return self.VALUE\n else:\n return self.UNKNOWN\n\n def set_address(self, address):\n self._address = address\n self._validate()\n self._data = tuple()\n\n def _validate(self):\n try:\n self._readout = pydoocs.read(self._address)\n except Exception:\n raise DoocsServerData.InvalidDoocsAddress()\n\n if self.type == self.UNKNOWN:\n raise DoocsServerData.UnknownDataType()\n\n def update(self):\n self._readout = pydoocs.read(self._address)\n","sub_path":"justice_league/materials/doocs_server_data.py","file_name":"doocs_server_data.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"525491436","text":"DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'test.sqlite',\n }\n}\n\nSECRET_KEY = '1'\n\nINSTALLED_APPS = [\n \"django.contrib.contenttypes\",\n \"tests\",\n \"dps\",\n]\n\nROOT_URLCONF = \"tests.urls\"\n\ntry:\n from .dps_settings import *\nexcept ImportError:\n PXPAY_USERID = None\n PXPAY_KEY = None\n","sub_path":"tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"93333805","text":"import numpy as np\n\nimport pandas as pd\nimport pandas_datareader.data as web\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport mpl_finance as mpf\n\nclass DayTick():\n # 1日の歩み値を生成する\n def __init__(self, base_price, date):\n p_box = [-2, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2]\n v_box = [1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 6, 10, 15]\n\n # BasePriceからの初期の価格を求める\n initial_price = base_price + base_price * np.random.normal(0, 1) * 0.9 * 0.01\n\n # 前場\n morning = pd.date_range('{0:%Y-%m-%d} 09:00:00'.format(date), '{0:%Y-%m-%d} 11:30:00'.format(date), freq='T')\n df = pd.DataFrame({'p_lot': np.random.randint(0, len(p_box), len(morning)), 'v_lot': np.random.randint(0, len(v_box), len(morning)), 'price': initial_price }, index=morning)\n # 出来高を求める\n df['volume'] = df['v_lot'].map(lambda r: v_box[r] * 100)\n # 価格を求める\n df['p_lot'] = df['p_lot'].map(lambda r: p_box[r])\n for index, row in df.iterrows():\n df.at[index+1, 'price'] = min(max(df.at[index, 'price'] + row.p_lot * base_price * 0.005, base_price - base_price * 0.35), base_price + base_price * 0.35)\n # 出来高が0の行を削除\n df = df[df.volume > 1]\n # 不要な列を削除\n df = df.loc[:, ['price', 'volume']]\n # 前場の終値\n morning_price = df.iloc[-1, 0]\n\n # 後場\n afternoon = pd.date_range('{0:%Y-%m-%d} 12:30:00'.format(date), '{0:%Y-%m-%d} 15:00:00'.format(date), freq='T')\n adf = pd.DataFrame({'p_lot': np.random.randint(0, len(p_box), len(afternoon)), 'v_lot': np.random.randint(0, len(v_box), len(afternoon)), 'price': morning_price}, index=afternoon)\n # 出来高を求める\n adf['volume'] = adf['v_lot'].map(lambda r: v_box[r] * 100)\n # 価格を求める\n adf['p_lot'] = adf['p_lot'].map(lambda r: p_box[r])\n for index, row in adf.iterrows():\n adf.at[index+1, 'price'] = min(max(adf.at[index, 'price'] + row.p_lot * base_price * 0.005, base_price - base_price * 0.35), base_price + base_price * 0.35)\n # 出来高が0の行を削除\n adf = adf[adf.volume > 1]\n # 不要な列を削除\n adf = adf.loc[:, ['price', 'volume']]\n\n # 終値\n self.close = adf.iloc[-1, 0]\n # 前場と後場を統合\n self.df = df.append(adf)\n\n # OHLCV形式にリサンプリング\n def resample_to_ohlcv(self, freq='D'):\n df = pd.concat([self.df['price'].resample(freq).ohlc(), self.df['volume'].resample(freq).sum()], axis=1)\n return df\n\n","sub_path":"finance/stock/day_tick.py","file_name":"day_tick.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"94303774","text":"from django.conf.urls import url\r\nfrom . import views\r\nfrom django.http import HttpResponse, HttpRequest\r\nfrom django.views.i18n import JavaScriptCatalog\r\n\r\n\r\napp_name = 'website'\r\n\r\nurlpatterns = [\r\n\r\n url(r'^$', views.ItemView.as_view(), name='item-view'),\r\n url(r'items/$', views.ItemView.as_view(), name='item-view'),\r\n url(r'^items/(?P[0-9]+)/$', views.ItemDetailView.as_view(), name=\"itemdetail\"),\r\n url(r'^item/(?P[0-9]+)/$', views.zarezerwuj_item, name=\"rezerwujitem\"),\r\n url(r'^accounts/profile/', views.profile, name='profile'),\r\n url(r'^items/update/(?P[0-9]+)/$', views.wypozycz, name=\"wypozycz\"),\r\n url(r'^/items/update/profile/', views.profile, name='profile'),\r\n url(r'o-nas/$', views.o_nas, name='o-nas'),\r\n url('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),\r\n url(r'contact_form', views.contact_form),\r\n]\r\n","sub_path":"myproject/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"32129134","text":"\"\"\"bert_ranking\nUsage:\nbert_high_ranking --train ((--text=CSV | --inputs=SAVE) | (--traindata=CSV --testdata=CSV)) [--outputdir=DIR --batchsize=INT --epochs=INT]\nbert_high_ranking --rank (--text=CSV | --inputs=SAVE) [--submission=FILE --cut=VALUE] [--rawfile=FILE] [--outputdir=DIR --batchsize=INT ] [--checkpoint=CKP]\n\nOptions:\n -h Show this screen\n --text=CSV Read texts from a CSV file, will require preprocessing\n --saveinputs=SAVE Save the InputExamples into a SAVE file (pickle)\n --test Generate Inputs as one collection\n --inputs=SAVE Read InputExamples from a SAVE file (pickle)\n --outputdir=DIR Folder for BERT checkpoints [default: bert_output]\n --batchsize=INT Size of training batches [default: 8]\n --epochs=INT Number of training epochs [default: 5]\n --rank Use a trained BERT model for ranking\n --cut=VALUE The score value at which to cut\n --submission=FILE The file to submit to the competition\n --rawfile=FILE File in which to save the raw scores\n --testcases=FILE File in which to save which case_id were selected as test cases\n --checkpoint=CKP Which checkpoint to use (CKP is a number)\n\"\"\"\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport os\nimport pandas as pd\nimport bert\nimport pickle\n\nfrom tensorflow.python.client import device_lib\n\n\nfrom bert import run_classifier\nfrom bert import optimization\nfrom bert import tokenization\n\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import RandomOverSampler\n\nfrom datetime import datetime\nfrom docopt import docopt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'\nos.environ['TFHUB_CACHE_DIR'] = '/tmp/tfhub'\n\n\nclass BertHighRanker():\n \"\"\"\n This will get a pre-trained BERT, set it to not-trainable and add a DNN classifier with 1 hidden layer\n \"\"\"\n\n # This is a path to an uncased (all lowercase) version of BERT\n BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n SPLIT_ID = 'case_id'\n DATA_COLUMN_A = 'case_text'\n DATA_COLUMN_B = 'candidate_text'\n LABEL_COLUMN = 'candidate_is_noticed'\n LABEL_LIST = (0, 1)\n # We'll set sequences to be at most 512 tokens long.\n MAX_SEQ_LENGTH = 512\n\n def __init__(self, parameters):\n super().__init__()\n self.parameters = parameters\n tf.logging.set_verbosity(tf.logging.INFO)\n\n def create_model(self):\n pass\n\n def create_param_grid(self):\n pass\n\n @staticmethod\n def __create_model__(is_predicting, input_ids, input_mask, segment_ids, labels,\n num_labels):\n \"\"\"Creates a classification model.\"\"\"\n bert_module = hub.Module(\n BertHighRanker.BERT_MODEL_HUB,\n trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n x = tf.layers.Dense(units=256, activation=tf.nn.relu, use_bias=True)(output_layer)\n x = tf.layers.Dropout(rate=0.1)(x)\n logits = tf.layers.Dense(units=2, activation=tf.nn.relu, use_bias=True)(x)\n probs = tf.nn.softmax(logits, axis=-1)\n\n with tf.variable_scope(\"loss\"):\n predicted_labels = tf.argmax(probs, axis=-1, output_type=tf.int32) # shape [BATCH_SIZE, 1]\n\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (tf.squeeze(predicted_labels), probs)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n loss = tf.losses.mean_pairwise_squared_error(labels=one_hot_labels,\n predictions=probs)\n return (loss, predicted_labels, probs)\n\n # model_fn_builder actually creates our model function\n # using the passed parameters for num_labels, learning_rate, etc.\n @staticmethod\n def __model_fn_builder__(num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, probs) = BertHighRanker.__create_model__(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, probs) = BertHighRanker.__create_model__(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn\n\n @staticmethod\n def __create_tokenizer_from_hub_module__():\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(BertHighRanker.BERT_MODEL_HUB)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n\n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\n @staticmethod\n def preprocessdata(X, mode='train'):\n \"\"\"\n Transform the dataset into Bert-edible features. If mode is 'train', then the data is split into\n train/test according to case_id, so that the cases in the test set are not represented in the train set.\n An over-samplind method is used to re-balance the datasets.\n When mode is not 'train', then the data is only transformed to features.\n\n :param X: a Pandas DataFrame with columns 'case_id', 'case_text','candidate_text', 'candidate_is_noticed'\n :param mode: What the data will be used for. If 'train', then it is split and oversampled. If not, then it is just prepared\n :return: a 2-uple (train, test) of lists of bert.run_classifier.InputFeatures\n \"\"\"\n tokenizer = BertHighRanker.__create_tokenizer_from_hub_module__()\n data = X\n\n if mode == 'train':\n # Split so that cases in the test set are not in the training set\n train_cases, test_cases = train_test_split(data['case_id'].unique(), train_size=0.75)\n train = data[data['case_id'].isin(train_cases)]\n test = data[data['case_id'].isin(test_cases)]\n\n ratio = 0.25 # Re-sample until we have the ratio for Minority_Class / Majority Class\n ros = RandomOverSampler(random_state=0, ratio=ratio)\n train_resampled, _ = ros.fit_resample(train, train[BertHighRanker.LABEL_COLUMN])\n test_resampled, _ = ros.fit_resample(test, test[BertHighRanker.LABEL_COLUMN])\n\n train_df = pd.DataFrame(train_resampled, columns=data.columns.values)\n test_df = pd.DataFrame(test_resampled, columns=data.columns.values)\n\n train_InputExamples = train_df.apply(lambda x: bert.run_classifier.InputExample(guid=None,\n text_a=x[BertHighRanker.DATA_COLUMN_A],\n text_b=x[BertHighRanker.DATA_COLUMN_B],\n label=x[BertHighRanker.LABEL_COLUMN]), axis=1)\n test_InputExamples = test_df.apply(lambda x: bert.run_classifier.InputExample(guid=None,\n text_a=x[BertHighRanker.DATA_COLUMN_A],\n text_b=x[BertHighRanker.DATA_COLUMN_B],\n label=x[BertHighRanker.LABEL_COLUMN]), axis=1)\n\n train_features = bert.run_classifier.convert_examples_to_features(\n train_InputExamples,\n BertHighRanker.LABEL_LIST,\n BertHighRanker.MAX_SEQ_LENGTH, tokenizer\n )\n test_features = bert.run_classifier.convert_examples_to_features(\n test_InputExamples,\n BertHighRanker.LABEL_LIST,\n BertHighRanker.MAX_SEQ_LENGTH,\n tokenizer\n )\n\n return train_features, test_features, test_cases\n\n else: # Data is prepared for inference\n data_InputExamples = data.apply(lambda x: bert.run_classifier.InputExample(guid=None,\n text_a=x[BertHighRanker.DATA_COLUMN_A],\n text_b=x[BertHighRanker.DATA_COLUMN_B],\n label=x[BertHighRanker.LABEL_COLUMN] if mode=='prepare' else 0), axis=1)\n data_features = bert.run_classifier.convert_examples_to_features(\n data_InputExamples,\n BertHighRanker.LABEL_LIST,\n BertHighRanker.MAX_SEQ_LENGTH, tokenizer\n )\n return data_features\n\n def train_model(self, X=None, train_data=None, test_data=None):\n \"\"\"\n\n :param X: a dataframe with 3 columns : 'case_text', 'candidate_text', 'candidate_is_noticed', which is preprocessed,\n or a 2-uple (train, test) of lists of bert.run_classifyer.InputFeatures\n :return:\n \"\"\"\n # Convert our train and test features to InputFeatures that BERT understands.\n if X is not None:\n if isinstance(X, pd.DataFrame):\n train_features, test_features, _ = self.preprocessdata(X)\n else:\n train_features, test_features = X\n else:\n train_features = self.preprocessdata(train_data, 'prepare')\n test_features = self.preprocessdata(test_data, 'prepare')\n tf.logging.info('Finished with examples_to_features')\n\n # Compute train and warmup steps from batch size\n # These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n OUTPUT_DIR = self.parameters['--outputdir']\n BATCH_SIZE = int(self.parameters['--batchsize'])\n LEARNING_RATE = 1e-6\n NUM_TRAIN_EPOCHS = int(self.parameters['--epochs'])\n # Warmup is a period of time where hte learning rate\n # is small and gradually increases--usually helps training.\n WARMUP_PROPORTION = 0.1\n # Model configs\n SAVE_CHECKPOINTS_STEPS = 500\n SAVE_SUMMARY_STEPS = 100\n\n # Compute # train and warmup steps from batch size\n num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\n num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n # Run multi-GPU : train on all GPUs, evaluate on 1 GPU\n devices = device_lib.list_local_devices()\n gpu_names = [x.name for x in devices if x.device_type=='GPU']\n train_distribute = None\n eval_distribute = None\n if len(gpu_names) == 2:\n train_distribute = tf.contrib.distribute.OneDeviceStrategy(device=gpu_names[0])\n eval_distribute = tf.contrib.distribute.OneDeviceStrategy(device=gpu_names[1])\n\n # Specify output directory and number of checkpoint steps to save\n run_config = tf.estimator.RunConfig(\n model_dir=OUTPUT_DIR,\n save_summary_steps=SAVE_SUMMARY_STEPS,\n save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,\n keep_checkpoint_max=None,\n train_distribute=train_distribute,\n eval_distribute=eval_distribute)\n\n model_fn = BertHighRanker.__model_fn_builder__(\n num_labels=len(BertHighRanker.LABEL_LIST),\n learning_rate=LEARNING_RATE,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": BATCH_SIZE})\n\n # Create an input function for training. drop_remainder = True for using TPUs.\n train_input_fn = bert.run_classifier.input_fn_builder(\n features=train_features,\n seq_length=BertHighRanker.MAX_SEQ_LENGTH,\n is_training=True,\n drop_remainder=False)\n\n # Create an input function for training. drop_remainder = True for using TPUs.\n test_input_fn = bert.run_classifier.input_fn_builder(\n features=test_features,\n seq_length=BertHighRanker.MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)\n\n #estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n #estimator.evaluate(input_fn=test_input_fn, steps=None)\n\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=num_train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=test_input_fn, steps=None, throttle_secs=60, start_delay_secs=0)\n\n tf.estimator.train_and_evaluate(estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)\n\n def rank(self, X, checkpoint=None):\n \"\"\"\n Rank all candidate cases with regards to relevance to query case.\n X should contain NB_CANDIDATES_PER_CASE rows, each one having case_text the text of the query case, and\n candidate_text the text of the candidate case\n\n :param X: a dataframe with 2 columns : 'case_text', 'candidate_text'\n :return:\n \"\"\"\n # Convert our train and test features to InputFeatures that BERT understands.\n if isinstance(X, pd.DataFrame):\n data_features = self.preprocessdata(X, mode='predict')\n else:\n data_features = X\n tf.logging.info('Finished with examples_to_features')\n\n # Compute train and warmup steps from batch size\n # These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n OUTPUT_DIR = self.parameters['--outputdir']\n BATCH_SIZE = int(self.parameters['--batchsize'])\n\n # Specify output directory and number of checkpoint steps to save\n run_config = tf.estimator.RunConfig(model_dir=OUTPUT_DIR)\n\n model_fn = BertHighRanker.__model_fn_builder__(\n num_labels=len(BertHighRanker.LABEL_LIST),\n learning_rate=1e-3, # FAKE, will not be used\n num_train_steps=1, # FAKE, will not be used\n num_warmup_steps=1) # FAKE, will not be used\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": BATCH_SIZE})\n\n # Create an input function for training. drop_remainder = True for using TPUs.\n predict_input_fn = bert.run_classifier.input_fn_builder(\n features=data_features,\n seq_length=BertHighRanker.MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)\n\n checkpoint_file = None\n if checkpoint is not None:\n checkpoint_file = os.path.join(OUTPUT_DIR, 'model.ckpt-{}'.format(checkpoint))\n\n tf.logging.info('Starting with Predicting')\n predictions = list(estimator.predict(input_fn=predict_input_fn, checkpoint_path=checkpoint_file))\n scores = [p['probabilities'][1] for p in predictions]\n return scores\n\n\ndef main():\n args = docopt(__doc__, version='COLIEE v1.0')\n\n if args['--train'] is True:\n br = BertHighRanker(parameters=args)\n if args['--inputs'] is not None or args['--text'] is not None:\n if args['--inputs'] is not None:\n train_data = pickle.load(open(args['--inputs'], 'rb'))\n else:\n train_data = pd.read_csv(args['--text'])\n br.train_model(X=train_data)\n else:\n train_data = pd.read_csv(args['--traindata'])\n test_data = pd.read_csv(args['--testdata'])\n br.train_model(train_data=train_data, test_data=test_data)\n\n if args['--rank'] is True:\n br = BertHighRanker(parameters=args)\n if args['--text'] is not None:\n test_data = pd.read_csv(args['--text'])\n else:\n test_data = pickle.load(open(args['--inputs'], 'rb'))\n\n checkpoint = None\n if args['--checkpoint'] is not None:\n checkpoint = int(args['--checkpoint'])\n scores = br.rank(test_data, checkpoint=checkpoint)\n test_data['score'] = scores\n if args['--rawfile'] is not None:\n test_data[['case_id', 'candidate_id', 'score']].to_csv(args['--rawfile'], index=False)\n\n if args['--submission'] is not None:\n test_data['run'] = 'ILPS_BERT'\n final_table = test_data.sort_values(by=['case_id', 'score'], ascending=[True, False])\n with open(args['--submission'], 'w') as submission:\n for _, v in final_table[final_table['score'] > float(args['--cut'])][['case_id', 'candidate_id', 'run']].iterrows():\n submission.write('{:03d} {:03d} {}\\n'.format(v['case_id'], v['candidate_id'], v['run']))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Task_01/bert_high_ranking.py","file_name":"bert_high_ranking.py","file_ext":"py","file_size_in_byte":21000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"507201494","text":"#!/usr/bin/env python3\n\nimport map_generator_strategy\n\nclass DungeonMap():\n\t'''\n\tA dungeon map\n\t'''\n\n\tdef __init__(self, grid, start):\n\t\t'''\n\t\tInitialize a map\n\n\t\tArguments\n\t\t---------\n\t\tgrid - a 2-dimensional array of grid tiles\n\t\tstart - starting coordinates (entrance)\n\n\t\t'''\n\t\tself.grid = grid\n\t\tself.start = start\n\n\tdef __str__(self):\n\t\t'''\n\t\tPrint map as ASCII\n\t\t'''\n\t\ts = \"\"\n\t\tl = [\"\".join([s, \"\".join([str(c) for c in l])]) for l in self.grid]\n\t\treturn \"\\n\".join(l)\n\n\t\t\n\nif __name__ == '__main__':\n\n\tm = map_generator_strategy.hard_coded()\n\tprint(m)\n\n","sub_path":"src/dungeonmap.py","file_name":"dungeonmap.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"279282821","text":"\"\"\"\r\n ██████╗██╗██████╗ ██╗ ██╗███████╗██╗ ██╗\r\n██╔════╝██║██╔══██╗██║ ██║██╔════╝╚██╗ ██╔╝\r\n██║ ██║██████╔╝███████║█████╗ ╚████╔╝ \r\n██║ ██║██╔═══╝ ██╔══██║██╔══╝ ╚██╔╝ \r\n╚██████╗██║██║ ██║ ██║███████╗ ██║ \r\n© Brandon Skerritt\r\nGithub: brandonskerritt\r\n\r\nClass calculates the Chi squared score\r\n\"\"\"\r\nfrom string import punctuation\r\nfrom numpy import std\r\nimport sys\r\n\r\nsys.path.append(\"..\")\r\ntry:\r\n import mathsHelper as mh\r\nexcept ModuleNotFoundError:\r\n import ciphey.mathsHelper as mh\r\nfrom loguru import logger\r\nimport cipheycore\r\nimport cipheydists\r\n\r\n# I had a bug where empty string was being added to letter freq dictionary\r\n# this solves it :)\r\npunctuation += \" \"\r\nNUMBERS = \"1234567890\"\r\n\r\n\r\nclass chiSquared:\r\n \"\"\"Class that calculates the Chi squared score and tries to work out what language it might be\r\n to add a new language, go into this class (/app/languageChecker/chisquared.py)\r\n Find \"self.languages\" and add it to the dictionary like \"German\":[0.789, 0.651...]\r\n The list is the letter frequency ordered in alphabetical order \"\"\"\r\n\r\n def __init__(self):\r\n self.language = cipheydists.get_dist(\"twist\")\r\n self.average = 0.0\r\n self.totalDone = 0.0\r\n self.oldAverage = 0.0\r\n self.mh = mh.mathsHelper()\r\n self.highestLanguage = \"\"\r\n self.totalChi = 0.0\r\n self.totalEqual = False\r\n self.chisAsaList = []\r\n\r\n # these are settings that may impact how the program works overall\r\n self.chiSquaredSignificanceThreshold = 0.001 # The p value that we reject below\r\n\r\n def checkChi(self, text):\r\n if text is None:\r\n return False\r\n if type(text) is bytes:\r\n try:\r\n text = text.decode()\r\n except:\r\n return None\r\n \"\"\"Checks to see if the Chi score is good\r\n if it is, it returns True\r\n Call this when you want to determine whether something is likely to be Chi or not\r\n \r\n Arguments:\r\n * text - the text you want to run a Chi Squared score on\r\n \r\n Outputs:\r\n * True - if it has a significantly lower chi squared score\r\n * False - if it doesn't have a significantly lower chi squared score\r\n \"\"\"\r\n # runs after every chi squared to see if it's 1 significantly lower than averae\r\n # the or statement is bc if the program has just started I don't want it to ignore the\r\n # ones at the start\r\n analysis = cipheycore.analyse_string(text)\r\n chisq = cipheycore.chisq_test(analysis, self.language)\r\n logger.debug(f\"Chi-squared p-value is {chisq}\")\r\n return chisq > self.chiSquaredSignificanceThreshold\r\n\r\n def getMostLikelyLanguage(self):\r\n \"\"\"Returns what the most likely language is\r\n Only used when the threshold of checkChi is reached\"\"\"\r\n return self.highestLanguage\r\n","sub_path":"ciphey/languageCheckerMod/chisquared.py","file_name":"chisquared.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"395709081","text":"# coding: utf-8\nimport logging\nimport sys\n\nfrom requests.exceptions import ConnectionError, HTTPError, Timeout\nimport requests\n\nUSER_AGENT = (\n 'Mozilla/5.0 (X11; Linux x86_64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/40.0.2214.115 Safari/537.36'\n)\nHEADERS = {'User-Agent': USER_AGENT}\n\n\ndef get_logger(log_level='INFO'):\n logger = logging.getLogger(__name__.split('.')[0])\n logger.setLevel(getattr(logging, log_level))\n FORMAT = '[%(levelname)s]%(filename)s:%(asctime)s %(message)s'\n DATE_FORMAT = '%H:%M:%S'\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT)\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\nlogger = get_logger()\n\n\ndef download_html(url):\n \"\"\"\n Return:\n html text if download success, else return None\n \"\"\"\n if not url.startswith('http'):\n if url.startswith('www.'):\n url = 'http://' + url\n else:\n url = 'http://www.' + url\n try:\n return get_response(url)\n except (ConnectionError, HTTPError, Timeout) as e:\n logger.error(e)\n # 重试一次\n return get_response(url)\n\n\ndef get_response(url):\n logger.info(\"Get %s\" % url)\n res = requests.get(url, headers=HEADERS)\n if res.status_code in (200, 304):\n return res.text\n logger.error(\"Get %s failed: %d\" % (url, res.status_code))\n return None\n","sub_path":"kindle/read_offline/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"403613479","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/15 8:51\n# @File : FindMaxConsecutiveOnes.py\nfrom typing import List\n\n\"\"\"\n给定一个二进制数组, 计算其中最大连续1的个数。\n\n示例 1:\n\"\"\"\n\n\nclass Solution:\n\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n \"\"\"\n 动态规划\n :param nums:\n :return:\n \"\"\"\n ans = 0 if nums[0] != 1 else 1\n max_len = ans\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1] == 1:\n ans += 1\n elif nums[i]:\n ans = 1\n else:\n ans = 0\n max_len = max(max_len, ans)\n\n return max_len\n\n\nif __name__ == '__main__':\n print(Solution().findMaxConsecutiveOnes([1, 1, 0, 1, 1, 1]))\n","sub_path":"datastructure/daily_topic/FindMaxConsecutiveOnes.py","file_name":"FindMaxConsecutiveOnes.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"427383894","text":"from moviepy.editor import VideoFileClip, CompositeVideoClip\nimport os\nimport skimage\n\n\nclass InstaVideoConverter:\n def __init__(self, video, start, end, path='.'):\n self.video = video\n self.vc = VideoFileClip(self.video)\n self.start = start\n self.end = end\n self.path = path\n assert start <= end <= start + 60\n\n def blur(self, image):\n \"\"\" Returns a blurred (radius=2 pixels) version of the image \"\"\"\n return skimage.filters.gaussian(image.astype(float), sigma=6)\n\n def get_crop(self, size, ratio):\n length = size // 100 * ratio\n start = (size - length) // 2\n end = start + length\n return start, end\n\n def crop_ratio(self, clip, ratio):\n w, h = clip.size\n w_crop = self.get_crop(w, ratio)\n h_crop = self.get_crop(h, ratio)\n return clip.crop(x1=w_crop[0], y1=h_crop[0], x2=w_crop[1], y2=h_crop[1])\n\n def get_convert_image(self):\n vc = VideoFileClip(self.video)\n vc.save_frame(f'image_{self.video[:-4]}.jpeg', t=10)\n\n def convert_blur(self):\n subclip = self.vc.subclip(self.start, self.end)\n width, height = subclip.size\n output_file = os.path.join(self.path, 'insta_blur_' + os.path.basename(self.video))\n r = 60\n if width > height:\n margin_size = (width - height) // 2\n clip1 = subclip.margin(top=margin_size, bottom=margin_size)\n clip2 = self.crop_ratio(subclip.crop(x1=0, y1=0, x2=width, y2=margin_size), r).fl_image(self.blur).resize(\n width=width, height=margin_size)\n clip2.audio = None\n clip3 = self.crop_ratio(subclip.crop(x1=0, y1=height - margin_size, x2=width, y2=height), r).fl_image(\n self.blur).resize(width=width, height=margin_size)\n clip3.audio = None\n video = CompositeVideoClip([clip1,\n clip2.set_pos((\"left\", \"top\")),\n clip3.set_pos((0, height + margin_size))])\n\n video.write_videofile(output_file, temp_audiofile=\"temp-audio.m4a\", remove_temp=True, codec=\"libx264\",\n audio_codec=\"aac\")\n\n def convert_black(self):\n subclip = self.vc.subclip(self.start, self.end)\n width, height = subclip.size\n output_file = os.path.join(self.path, 'insta_black_' + os.path.basename(self.video))\n\n if width > height:\n margin_size = (width - height) // 2\n video = subclip.margin(top=margin_size, bottom=margin_size)\n video.write_videofile(output_file, temp_audiofile=\"temp-audio.m4a\", remove_temp=True, codec=\"libx264\",\n audio_codec=\"aac\")\n\n def convert_white(self):\n subclip = self.vc.subclip(self.start, self.end)\n width, height = subclip.size\n output_file = os.path.join(self.path, 'insta_white_' + os.path.basename(self.video))\n\n if width > height:\n margin_size = (width - height) // 2\n video = subclip.margin(top=margin_size, bottom=margin_size, color=(255, 255, 255))\n video.write_videofile(output_file, temp_audiofile=\"temp-audio.m4a\", remove_temp=True, codec=\"libx264\",\n audio_codec=\"aac\")\n","sub_path":"service/insta_video_converter.py","file_name":"insta_video_converter.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"186705836","text":"# -*- coding: utf-8 -*-\n\nfrom django.forms import widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.datastructures import MultiValueDict, MergeDict\nfrom shop.models import ItemGroup,Item,Producer,MDContents,MDContentsForm\nfrom order.models import CustomerOrder\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.forms.models import model_to_dict\nfrom member.models import User\nfrom delivery.models import DeliveryPolicy\n\nfrom functools import partial\nimport random\n\nclass HTMLEditorWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None: attrs = {}\n w = attrs.get('width', '100%')\n h = attrs.get('height','300px')\n return mark_safe(u'' % (name, w, h, escape(value)))\n\n\nclass ScoreFieldWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None: attrs={}\n html = render_to_string('admin/delivery/scorefield_widget.html',dictionary={'name':name,'value':escape(value)})\n return mark_safe(html)\n\n\nclass PenaltyFieldWidget(widgets.Widget):\n def render(self,name,value,attrs=None):\n if attrs is None: attrs = {}\n html = render_to_string('admin/delivery/penaltyfield_widget.html',dictionary={'name':name,'value':escape(value)})\n return mark_safe(html)\n\n\nclass ItemImageWidget(widgets.Select):\n def render(self, name, value, attrs=None,choices=()):\n if attrs is None : attrs = {}\n try:\n item = Item.objects.select_related().get(id=value)\n except ObjectDoesNotExist:\n return super(ItemImageWidget,self).render(name,value,attrs=attrs,choices=choices)\n else:\n try:\n image_url = item.item_group.desc_image_thumbnail.url\n except ValueError:\n image_url = None\n context = {'name':name,'value':escape(value)}\n if image_url is not None:\n context['image'] = image_url\n html = render_to_string('admin/order/item_image_widget.html',dictionary=context)\n return mark_safe(html)\n\n\nclass ProducerInfoWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None : attrs = {}\n try:\n producer = Producer.objects.get(id=value)\n except ObjectDoesNotExist:\n producer = Producer.objects.all()[0]\n \n \n html = render_to_string('admin/shop/producer_widget.html',\n dictionary={'name':name,'value':value,'md_name':producer.md_name,'producer':producer})\n return html\n \n \nclass ProducerInfoWidgetInline(widgets.Select):\n def render(self, name, value, attrs=None, choices=()):\n if attrs is None : attrs = {}\n try:\n producer = Producer.objects.get(id=value)\n except ObjectDoesNotExist:\n return super(ProducerInfoWidgetInline,self).render(name,value,attrs=attrs,choices=choices)\n else:\n html = render_to_string('admin/shop/producer_widget.html',\n dictionary={'name':name,'value':value,'md_name':producer.md_name,'producer':producer})\n return html\n\n\nclass CustomerOrderDetailWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None : attrs = {}\n try:\n corder = CustomerOrder.objects.get(id=value)\n except ObjectDoesNotExist:\n return u'주문 오류'\n else:\n oitems = corder.ordereditem_set.all()\n try:\n head_oitem = oitems[0]\n except IndexError:\n desc_text = u''\n else:\n desc_text = head_oitem.name_customer + u'외 ' + unicode(oitems.count() -1) + u'건'\n html = render_to_string('admin/order/customerorder_detail_widget.html',\n dictionary={'name':name,'value':value,'corder':corder,'desc_text':desc_text})\n return html\n\n\nclass CategoryWidget(widgets.Widget):\n mptt_model = None\n html_id = ''\n use_multiple=True\n widget_js_src = (u\"(function(){\\n\"\n u\"$('#DIVID')[0].append_entry = function(num){\\n\"\n u\" var use_multiple=MULTIPLE;\\n\"\n u\" var desc=$('#DIVID')[0].get_prop(num);\\n\"\n u\" var html= '
    ';\\n\"\n u\" html=html + '삭제';\\n\"\n u\" html=html + ''+desc+''\\n\"\n u\" html=html + ''\\n\"\n u\" html=html + '
    ';\\n\"\n u\" if (!use_multiple) $('#DIVLIST').html('');\\n\"\n u\" if ($('#DIVID_entry_'+num).length == 0){ $('#DIVLIST').append(html); }\"\n u\" };\\n\"\n u\"$('#DIVID')[0].remove_entry = function(num){\\n\"\n u\" var use_multiple=MULTIPLE;\\n\"\n u\" $('#DIVID_entry_'+num).remove();\\n\"\n u\" };\\n\"\n u\"$('#DIVID')[0].set_prop = function(num, desc) {\\n\"\n u\" if ($('#DIVID')[0].data == undefined) $('#DIVID')[0].data={}; \\n\"\n u\" $('#DIVID')[0].data[num] = desc;\\n\"\n u\" }\\n\"\n u\"$('#DIVID')[0].get_prop = function(num) { return $('#DIVID')[0].data[num]; }\\n\"\n u\"})();\\n\")\n\n def __init__(self, attrs=None):\n if self.mptt_model is None:\n raise Exception('mptt_model is not set')\n super(CategoryWidget, self).__init__(attrs)\n\n def render_entries(self, queryset, inherited_name='', num_to_desc=None):\n res = '
      '\n js = ''\n for entry in queryset:\n res += '
    • %s' % (self.html_id, entry.id, entry.name)\n cur_inherited_name = inherited_name\n if cur_inherited_name != '': cur_inherited_name += ' > '\n cur_inherited_name += entry.name\n\n if isinstance(num_to_desc, dict): num_to_desc[entry.id] = cur_inherited_name\n\n line = '$(\"#%s\")[0].set_prop(%d, \"%s\")\\n' % (self.html_id, entry.id, cur_inherited_name)\n line += '$(\"#%s_%d > a\").on(\"dblclick\", function(e){ if (e.target==this){ $(\"#%s\")[0].append_entry(%d); }});\\n'\n\n js += line % (self.html_id, entry.id, self.html_id, entry.id )\n\n if entry.children.count() > 0:\n tmpjs, tmpres = self.render_entries(entry.children.all(), cur_inherited_name, num_to_desc)\n js += tmpjs\n res += tmpres\n res += '
    '\n return js, res\n\n def render(self, name, value, attrs=None): \n html_id = u'category_widget_%05d' % random.randint(0, 10000)\n self.html_id = html_id\n\n entry_js, entry_html = self.render_entries(self.mptt_model.objects.root_nodes())\n html = u'
    \\n' % (self.html_id)\n html += u'
    \\n'\n html += u'
    ' % (self.html_id)\n html += entry_html \n html +=u'
    \\n'\n html +=u'
    \\n'\n\n html += u'
    \\n' % html_id\n html += u'
    \\n'\n html += u'
    \\n'\n\n default_option = u'{\"plugins\":[\"themes\",\"html_data\",\"ui\",\"crrm\"]}'\n options = attrs.get(u'options', default_option) if isinstance(attrs, dict) else default_option\n\n js = u\"$(function(){ $('#%s_tree').jstree(%s).on('loaded.jstree', function(){ $(this).jstree('open_all'); }); });\" % (html_id, options)\n js += u\"(function(){\"+ entry_js + u\"})();\"\n \n commonjs = self.widget_js_src.replace(u'DIVID', html_id)\n commonjs = commonjs.replace(u'DIVLIST', html_id+u'_list')\n commonjs = commonjs.replace(u'NAME', name)\n commonjs = commonjs.replace(u'MULTIPLE', 'true' if self.use_multiple else 'false')\n\n if not isinstance(value, list): value=[value]\n \n initjs = '(function(){\\n'\n for num in value:\n if not num is None:\n initjs += '$(\"#%s\")[0].append_entry(%d);\\n' % (html_id, int(num))\n initjs += '})();\\n'\n\n html += u''\n\n return mark_safe(html)\n\n \n def value_from_datadict(self, data, files, name):\n if self.use_multiple:\n if isinstance(data, (MultiValueDict, MergeDict)):\n return data.getlist(name)\n return data.get(name, None)\n else:\n if isinstance(data, (MultiValueDict, MergeDict)):\n lst = data.getlist(name)\n if len(lst)>0: return int(lst[0])\n else: return None\n return data.get(name, None)\n\nclass SetEmoneyWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None: attrs = {}\n if value is None: value = ''\n html = render_to_string('admin/shop/set_emoney_widget.html', dictionary={'name':name, 'value':escape(value)})\n return mark_safe(html)\n\nclass MDItemOptionWidget(widgets.Widget):\n def render(self, name, value, attrs=None):\n if attrs is None: attrs = {}\n if value is None: value = ''\n html = render_to_string('admin/shop/md_item_option_widget.html',dictionary={'name':name,'value':escape(value)})\n return mark_safe(html)\n\nclass MDContentsWidget(widgets.Select):\n def render(self, name, value, attrs=None, choices=()):\n if attrs is None: attrs={}\n try:\n if value != '': mdcontents = MDContentsForm(data=model_to_dict(MDContents.objects.get(id=value)))\n else: return super(MDContentsWidget,self).render(name,value,attrs=attrs,choices=choices)\n except ObjectDoesNotExist:\n return super(MDContentsWidget,self).render(name,value,attrs=attrs,choices=choices)\n else:\n widget_html = render_to_string('admin/shop/md_contents_widget.html',\n dictionary={'name': name, 'value': escape(value), 'mdcontents': mdcontents})\n select_widget = super(MDContentsWidget,self).render(name,value,attrs=attrs,choices=choices)\n return mark_safe(widget_html+select_widget)\n\nclass UserDetailWidget(widgets.Select):\n def render(self, name, value, attrs=None, choices=()):\n if attrs is None : attrs= {}\n try:\n user = User.objects.get(id=value)\n except ObjectDoesNotExist as e:\n return super(UserDetailWidget,self).render(name,value,attrs=attrs,choices=choices)\n else:\n return mark_safe(render_to_string('admin/member/user/userdetail_widget.html',\n dictionary={'name': name, 'value': escape(value), 'user': user}))\n\nclass DeliveryPolicyDetailWidget(widgets.Select):\n def render(self, name, value, attrs=None, choices=()):\n if attrs is None : attrs = {}\n try:\n dpolicy = DeliveryPolicy.objects.get(id=value)\n except ObjectDoesNotExist as e:\n return super(DeliveryPolicyDetailWidget, self).render(name,value,attrs=attrs,choices=choices)\n else:\n\n return mark_safe(render_to_string('admin/delivery/dpolicy_detail_widget.html',\n dictionary={'name': name, 'value': escape(value), 'dpolicy': dpolicy}))\n\n\n\n\n\n","sub_path":"src/hellonature/hellonature/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"477549611","text":"'''\njoint chain class\n'''\n\nimport maya.cmds as mc\nfrom . import checks, utils, Jnt\nreload(utils)\n\n\n\nclass jntChain():\n\tdef __init__(\n\t\tself,\n\t\tprefix='c',\n\t\tname='joint',\n\t\tsuffix='jnt',\n\t\tposList=[(0,0,0)]\n\t\t):\n\n\t\tself.prefix = prefix\n\t\tself.name = name\n\t\tself.suffix = suffix\n\t\tself.posList = posList\n\n\t\tself.jnts = []\n\n\tdef build(self):\n\t\t# create joints from pos list\n\t\tfor p in self.posList:\n\t\t\t# clear selection to create joints in root\n\t\t\tmc.select(cl=1)\n\n\t\t\t# get unique name\n\t\t\tfullName = checks.uniqueName(self.prefix, self.name, self.suffix)\n\t\t\tnameSplit = fullName.split('_')\n\t\t\tjnt = Jnt.Jnt(side=nameSplit[0],\n\t\t\t\t\t\tname=nameSplit[1],\n\t\t\t\t\t\tsuffix=nameSplit[-1],\n\t\t\t\t\t\tpos=p)\n\t\t\t# jnt = mc.joint(name=fullName, p=p)\n\t\t\tself.jnts.append(jnt.jnt)\n\n\t\tutils.iterParenting(self.jnts)\n\t\t# orient chain\n\t\tjnt.orientChain()\n\n\tdef displayAxis(self, jnts=[]):\n\t\t# toggle display axis\n\t\tfor jnt in jnts:\n\t\t\tattr = mc.getAttr('{0}.displayLocalAxis'.format(jnt))\n\t\t\tmc.setAttr('{0}.displayLocalAxis'.format(jnt), not attr)\n\n\tdef _unparentChild(self, jnt):\n\t\t# list children of joint\n\t\tchildren = mc.listRelatives(jnt, c=1, pa=1) or []\n\t\t# return unparented child joint\n\t\treturn [ mc.parent(child, w=1)[0] for child in children ]\n\n\tdef _reparentChild(self, jnt, children):\n\t\t# reparent child joint too given joint\n\t\tfor child in children:\n\t\t\tmc.parent(child, jnt)\n","sub_path":"ARig/jointChain.py","file_name":"jointChain.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"627016910","text":"\"\"\"\r\nYou are given a string . \r\nThe string contains only lowercase English alphabet characters.\r\n\r\nYour task is to find the top three most common characters in the string .\r\n\r\nInput Format\r\n\r\nA single line of input containing the string .\r\n\r\nConstraints\r\n\r\n\r\nOutput Format\r\n\r\nPrint the three most common characters along with their occurrence count each on a separate line.\r\nSort output in descending order of occurrence count.\r\nIf the occurrence count is the same, sort the characters in ascending order.\r\n\r\nSample Input\r\n\r\naabbbccde\r\nSample Output\r\n\r\nb 3\r\na 2\r\nc 2\r\n\"\"\"\r\n\r\nN = input()\r\nN = list(N)\r\n\r\ndef hash_i(char):\r\n return ord(char)- ord('a')\r\n\r\ndef find_max(iterable):\r\n for i in range(3):\r\n max_index = (max(iterable),iterable.index(max(iterable)))\r\n iterable[max_index[1]] = 0\r\n yield max_index\r\n\r\ndef count_common(N):\r\n arr = [0] * 26\r\n for char in N:\r\n pos = hash_i(char)\r\n arr[pos] += 1\r\n \r\n first_max=next(find_max(arr))\r\n second_max=next(find_max(arr))\r\n third_max=next(find_max(arr))\r\n\r\n print(chr(first_max[1]+ord('a')),first_max[0])\r\n print(chr(second_max[1]+ord('a')),second_max[0])\r\n print(chr(third_max[1]+ord('a')),third_max[0])\r\n\r\ncount_common(N)\r\n","sub_path":".py/abstract_classes.py","file_name":"abstract_classes.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"556973766","text":"import time\nimport os\nimport random\nimport datetime\nimport numpy\nfrom pylsl import StreamInfo, StreamOutlet, local_clock\n\n###FIRST DEFINE OUR PARTICIPANT NUMBER###\n#part_num = '001'\npart_num= str(numpy.genfromtxt('/home/pi/Experiments/Familiarity_Oddball/Participant_Number', dtype='str'))\n\n###setup variable related to pic and trial number here###\ntrials = 40\nlow_rate = 0.8\nhigh_rate = 0.2\n\ntotal_parts = 2\nself_count = 5\nfam_count = 5\nplace_count = 10\n\n###create our stream variables###\ninfo = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')\n\n###next make an outlet to record the streamed data###\noutlet = StreamOutlet(info)\n\n###so, we need 2 master lists\n###one that tells us if the current trial will be a standard or target\n###another that will tell use if we need to show a self, family, or place image\n\n###first we determine how many of each image we need\nself_images = (numpy.zeros((2,total_parts*self_count)))+1\nfam_images = (numpy.zeros((2,total_parts*fam_count)))+2\nplace_images = (numpy.zeros((2,total_parts*place_count)))+3\n\n###now loop through and define our targets and standards###\n\nfor i_img in range(total_parts*self_count):\n if i_img < ((total_parts*self_count)*low_rate):\n self_images[1][i_img] = 1\n elif i_img >= ((total_parts*self_count)*low_rate):\n self_images[1][i_img] = 2\n\nfor i_img in range(total_parts*fam_count):\n if i_img < ((total_parts*fam_count)*low_rate):\n fam_images[1][i_img] = 1\n elif i_img >= ((total_parts*fam_count)*low_rate):\n fam_images[1][i_img] = 2\n\nfor i_img in range(total_parts*10):\n if i_img < ((total_parts*10)*low_rate):\n place_images[1][i_img] = 1\n elif i_img >= ((total_parts*10)*low_rate):\n place_images[1][i_img] = 2\n\n###here we will combine our three matrices### \nimage_order = numpy.concatenate((self_images,fam_images,place_images),axis = 1)\n\n###convert them to a list, pair each of the elements, and then shuffle the order###\nimage_order = list(zip(image_order[0],image_order[1]))\nrandom.shuffle(image_order)\nimage_order, trial_order = zip(*image_order)\n\n###setup variables to record times###\ntrig_time = []\ndelay_length = []\npart_list = []\nimage_list = []\n\n###wait for button press to start experiment###\nvid_start = time.time()\ntimestamp = local_clock()\ntime.sleep(10)\noutlet.push_sample([3], timestamp)\n\nfor i_pic in range(trials):\n ###wait for a random amount of time between images###\n delay = ((random.randint(0,500))*0.001)\n delay_length.append(delay)\n ###determine if the trial is a standrad or target###\n ###trial is a target###\n if trial_order[i_pic] == 2:\n ###change current part number to a string of the appropriate format###\n if int(part_num) > 9:\n part_order_temp = '0' + str(int(part_num))\n else:\n part_order_temp = '00' + str(int(part_num))\n\n ###now define our trigger to send with LSL###\n trigger = 2\n ###trial is a standard###\n elif trial_order[i_pic] == 1:\n ###pick a random number between 1 and our total number of participants###\n part_order_temp = random.randint(1,total_parts)\n ###since standards are images from other parts, make sure we are not using the current part number###\n while int(part_num) == part_order_temp:\n part_order_temp = random.randint(1,total_parts)\n ###change part num to an appropriate format###\n if part_order_temp > 9:\n part_order_temp = '0' + str(int(part_order_temp))\n else:\n part_order_temp = '00' + str(int(part_order_temp))\n\n ###now define our trigger to send with LSL###\n trigger = 1\n ###now determine the type of image, and image number, we are showing###\n if image_order[i_pic] == 1:\n image_order_temp = 'S'\n pic_order_temp = random.randint(1,self_count)\n elif image_order[i_pic] == 2:\n image_order_temp = 'F'\n pic_order_temp = random.randint(1,fam_count)\n elif image_order[i_pic] == 3:\n image_order_temp = 'P'\n pic_order_temp = random.randint(1,place_count)\n\n ###change part num to an appropriate format###\n if pic_order_temp > 9:\n pic_order_temp = '0' + str(int(pic_order_temp))\n else:\n pic_order_temp = '00' + str(int(pic_order_temp))\n\n #print('/home/pi/Experiments/Familiarity_Oddball/Images/' + part_order_temp + '_' + image_order_temp + '_' + pic_order_temp + '_image.jpg')\n print(trigger)\n ###record our part and image numbers###\n part_list.append(part_order_temp)\n image_list.append(pic_order_temp)\n ###triggers###\n timestamp = local_clock()\n outlet.push_sample([trigger], timestamp)\n trig_time.append(time.time() - vid_start) \n ###wait for a random amount of time###\n time.sleep(delay)\n\nfilename = \"%s_all_familiarity_p3_trigs_muse\"%(part_num)\nfilename_part = (\"/home/pi/Experiments/Familiarity_Oddball/Data/LSL/Muse/Muse_Recorded_Trig_Info/%s.csv\")%filename\n\nnumpy.savetxt(filename_part, (part_list,image_list,image_order,trial_order,trig_time,delay_length), delimiter=',',fmt=\"%s\") \n\ntime.sleep(5)\nos.remove(\"/home/pi/Experiments/Familiarity_Oddball/Stop_EEG.csv\")\n","sub_path":"Familiarity_Oddball/old/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"474662986","text":"import os\n\nfrom aiohttp_swagger import setup_swagger\nfrom aiopg import sa\nfrom closure_table import (\n auth,\n comments,\n)\nfrom closure_table.settings import DATABASE\n\nBASE_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\ndef setup_app(app):\n setup_db(app)\n setup_routes(app)\n setup_middlewares(app)\n setup_swagger(app, swagger_from_file=os.path.join(BASE_PATH, 'swagger.yaml'))\n\n\ndef setup_middlewares(app):\n auth.middlewares.setup_middlewares(app)\n\n\ndef setup_routes(app):\n auth.routes.setup_routes(app)\n comments.routes.setup_routes(app)\n\n\ndef setup_db(app):\n app.on_startup.append(setup_pg)\n app.on_cleanup.append(close_pg)\n\n\nasync def setup_pg(app):\n engine = await sa.create_engine(DATABASE)\n app['db'] = engine\n\n\nasync def close_pg(app):\n app['db'].close()\n await app['db'].wait_closed()\n","sub_path":"src/closure_table/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"297374548","text":"\"\"\"A setuptools based setup module.\n\"\"\"\n\nfrom setuptools import setup\n\n\n__author__ = 'robodasha'\n__email__ = 'damirah@live.com'\n\n\nwith open('README.md') as fp:\n description = fp.read()\n\nsetup(\n name='skeleton',\n version='0.1',\n description='Project skeleton',\n long_description=description,\n license='MIT',\n url='https://github.com/robodasha/skeleton',\n author='Drahomira Herrmannova',\n author_email='damirah@live.com',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development',\n 'Topic :: Utilities'\n ],\n keywords='skeleton setup',\n packages=['skeleton'],\n install_requires=['wheel', 'configparser', 'ordereddict']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"289277326","text":"from flask import Flask, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom send_mail import send_mail\n\napp = Flask(__name__)\n\nENV = 'prod'\n\nif ENV == 'dev':\n app.debug = True\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:qwert@localhost/simplebankingsystem'\nelse:\n app.debug = False\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://ptyrktzegcnkku:e2aa7f41be85c8d1d9248f5e3e33f37033b40254e5bfc35a0b9f6830ffb8e857@ec2-54-157-66-140.compute-1.amazonaws.com:5432/dcc2ophr2g6god'\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\n\nclass Customers(db.Model):\n __tablename__ = 'Customers'\n id = db.Column(db.Integer, primary_key=True)\n customer = db.Column(db.String(200), unique=True)\n email = db.Column(db.String(200))\n balance = db.Column(db.Integer)\n\n def __init__(self, customer, email, balance):\n self.customer = customer\n self.email = email\n self.balance = balance\n \n\nclass Transfers(db.Model):\n __tablename__ = 'Transfers'\n id = db.Column(db.Integer, primary_key=True)\n customerfrom = db.Column(db.String(200), unique=False)\n customerto = db.Column(db.String(200))\n amount = db.Column(db.Integer)\n\n def __init__(self, customerfrom, customerto, amount):\n self.customerfrom = customerfrom\n self.customerto = customerto\n self.amount = amount\n \n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/customers')\ndef cust():\n if db.session.query(Customers).count() == 0:\n #print(\"lol\")\n cus=[\"customer1\",\"customer2\",\"customer3\",\"customer4\",\"customer5\",\"customer6\",\"customer7\",\"customer8\",\"customer9\",\"customer10\"]\n em=['c1@email.com','c2@email.com','c3@email.com','c4@email.com','c5@email.com','c6@email.com','c7@email.com','c8@email.com','c9@email.com','c10@email.com']\n bal=[3836,7347,9090,7654,9090,122,3245,4322,4567,8766]\n for i in range(10):\n customer=cus[i]\n email=em[i]\n balance=bal[i]\n data = Customers(customer,email,balance)\n db.session.add(data)\n db.session.commit()\n users = db.session.query(Customers).all() \n return render_template('customers.html',users=users)\n\n@app.route('/transfers')\ndef transfer():\n tfs = db.session.query(Transfers).all()\n return render_template('transfers.html',tfs=tfs)\n\n@app.route('/customers/')\ndef profile(customer_name):\n\n return render_template('customer_name.html',c_name=customer_name)\n\n\n@app.route('/customers//submit', methods=['POST'])\ndef submit(customer_name):\n if request.method == 'POST':\n customerfrom = customer_name\n customerto = request.form['customerto']\n balance = int(request.form['balance'])\n \n # print(customer, dealer, rating, comments)\n if customerfrom == '' or customerto == '':\n return render_template('index.html', message='Please enter required fields')\n user1 = db.session.query(Customers).filter_by(customer = customerfrom).first()\n #print(user1.balance)\n user2 = db.session.query(Customers).filter_by(customer = customerto).first()\n if user1.balance >= balance:\n \n user1.balance = user1.balance - balance\n db.session.commit()\n user2.balance = user2.balance + balance\n db.session.commit()\n #print(\"done\")\n #session.commit()\n #session.commit()\n #user2.balance = user2.balance + balance\n #session.user1.commit()\n \n data = Transfers(customerfrom, customerto, balance)\n db.session.add(data)\n db.session.commit()\n return render_template('index.html', message='funds transferred succesfully')\n else:\n return render_template('index.html', message='Insuffiecient funds')\n\n # if db.session.query().filter(Feedback.customer == customer).count() == 0:\n # data = Feedback(customer, dealer, rating, comments)\n # db.session.add(data)\n # db.session.commit()\n # send_mail(customer, dealer, rating, comments)\n #return render_template('success.html')\n # return render_template('index.html', message='You have already submitted feedback') ***/\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"123113176","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 07 14:46:38 2017\n\n@author: Frederik Vardinghus\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 10000\n\nT = np.random.uniform(0,3,N)\nW = np.random.uniform(0,1,N)\nZ = T + W\n\ndef MMSE(z):\n if 0 <= z < 1:\n return 0.5*z\n elif 1 <= z <= 3:\n return z-0.5\n elif 3 < z <= 4:\n return 0.5*z+1\n\nhat = np.zeros(N)\nfor i in range(N):\n hat[i] = MMSE(Z[i])\n\nbias = 0\nMSE = 0\nfor i in range(N):\n bias += T[i] - hat[i]\n MSE += (T[i] - hat[i])**2\nbias = bias/float(N)\nMSE = MSE/float(N)\n\n\n\n#==============================================================================\n# LMMSE\n#==============================================================================\n\nEt = 1.5\nEz = 2\nVarz = (3**2)/12. + 1/12.\nCov = 3/4.\n\nh = Varz**(-1)*Cov\nh0 = Et - h*Ez\n\ndef lmmse(z):\n return -3/10. + 9/10.*z\n\nbias_l = T - lmmse(Z)\nMSE_l = (T - lmmse(Z))**2\nbias_l = np.sum(bias_l)/float(N)\nMSE_l = np.sum(MSE_l)/float(N)\n\nprint('Bias af MMSE: %f' %bias)\nprint('MSE af MMSE: %f' %MSE)\nprint('')\nprint('Bias af LMMSE: %f' %bias_l)\nprint('MSE af LMMSE: %f' %MSE_l)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"5. semester/Stochastic processes/Scripts/MMSE.py","file_name":"MMSE.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"195874035","text":"import pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\ndata_set_path= \"Iris-dataset.csv\"\r\nnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']\r\ndata_set = pd.read_csv(data_set_path,names=names)\r\n\r\n\r\narray = data_set.values\r\nX = array[:,0:4] #all rows-data, columns 0,1,2,3\r\ny = array[:,4] #all rows of data, column 4-last column-result column\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.20, random_state=1, shuffle=True)\r\n\r\nmodel = GaussianNB()\r\nmodel.fit(X_train, Y_train)\r\npredictions = model.predict(X_test)\r\n\r\nprint(accuracy_score(Y_test, predictions))\r\nprint(confusion_matrix(Y_test, predictions))\r\nprint(classification_report(Y_test, predictions))\r\n","sub_path":"Gaussian Naive bayes (sklearn).py","file_name":"Gaussian Naive bayes (sklearn).py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"27"} +{"seq_id":"594308488","text":"# -*- coding: utf-8 -*-\n\n#########################################################################\n## This is the controller for scevents\n#########################################################################\n\n\ndef find_moderator_id(user_email, moderator_database):\n \"\"\"\n This function takes the user's email and looks through the moderator\n database and sees if the user's email matches one of the entries inside\n the moderator database. If it matches, it will return that moderator's id.\n In all cases, it will return a value greater than 1 if the moderator's id\n is found, otherwise, the function returns none.\n \"\"\"\n status = None\n for moderator in moderator_database:\n if user_email != moderator.email:\n continue\n if (user_email == moderator.email) and (moderator.moderator_status == True):\n status = moderator.id\n break\n return status\n\n\ndef index():\n \"\"\"\n This function is responsible for retrieving the table of posts and\n is also called by default/index.html to display the appropriate\n contents of the notes table to the user.\n \"\"\"\n\n # Grabs all the rows in the database.\n posts = db(db.post).select()\n comments = db(db.comment).select()\n\n if auth.user_id is None:\n # If the user id is None, then the user is not logged in.\n response.flash = T(\"You must log in to manage your posts.\")\n\n return dict(posts=posts, comments=comments)\n\n\n@auth.requires_login()\ndef add():\n \"\"\"\n Adds a record to post db.\n \"\"\"\n form = SQLFORM(db.post)\n if form.process().accepted:\n # The form content was valid and is accepted\n session.flash = T(\"Added the new post successfully.\")\n redirect(URL('default', 'index'))\n\n return dict(form=form)\n\n\n@auth.requires_login()\ndef edit():\n \"\"\"\n Edits a record from post db.\n \"\"\"\n\n # Grabs the post id for what the user requested.\n post = db.post(request.args(0))\n\n # Checks to see if the post exists.\n if post is None:\n session.flash = T(\"Invalid Request: Post does not exist.\")\n redirect(URL('default', 'index'))\n\n # Checks to see if the current user is the author of post or a moderator. Returns moderator_id.\n moderator_list = db(db.moderator).select()\n moderator_status = find_moderator_id(auth.user.email, moderator_list)\n if (post.email != auth.user.email) and (moderator_status is None):\n session.flash = T(\"Invalid Request: You are not allowed to edit or delete the post.\")\n redirect(URL('default', 'index'))\n\n # Begins editing of the post here.\n form = SQLFORM(db.post, record=post, deletable=True)\n if form.process().accepted:\n # Shows that edit is done after redirecting to the index.\n session.flash = T(\"Edit is done.\")\n redirect(URL('default', 'moderator'))\n\n # Updates the modified_date in database with the same id as the one in this post.\n db(db.post.id == post.id).update(modified_date = datetime.utcnow())\n\n return dict(form=form)\n\n\n@auth.requires_login()\ndef moderator():\n \"\"\"\n Serves all the comments and posts that were created. To even access this 'page', the\n user needs to be logged in.\n\n Implementation Issue found: Cannot easily use javascript to detect which comment was clicked.\n Currently using the same method used in HW1 to basically redirect to another page to view and\n delete a comment or post that way.\n \"\"\"\n\n # Checks to see if the current user is a moderator. Returns moderator_id.\n moderator_list = db(db.moderator).select()\n moderator_status = find_moderator_id(auth.user.email, moderator_list)\n if moderator_status is None:\n session.flash = \"You are not a moderator.\"\n redirect(URL('default', 'index'))\n\n # Grabs all the rows in the database.\n posts = db(db.post.post_approved=='False').select()\n comments = db(db.comment.comment_approved=='False').select()\n\n return dict(posts=posts, comments=comments)\n\n\ndef post():\n \"\"\"\n Reads a record from post db. Also serves the comments for this post.\n\n Bug: post_content is not displayed correctly. If users create linebreaks\n in their post, it will not show at all on the view side because\n 'linebreak' is supposed to start a new paragraph. Current implementation\n stuffs all the post_content under one paragraph. Possible solution is\n to implement support for multiple

    creation on the view side or shove\n the post_content into